2 * tg3.c: Broadcom Tigon3 ethernet driver.
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005 Broadcom Corporation.
10 * Derived from proprietary unpublished source code,
11 * Copyright (C) 2000-2003 Broadcom Corporation.
13 * Permission is hereby granted for the distribution of this firmware
14 * data in hexadecimal or equivalent format, provided this copyright
15 * notice is accompanying it.
18 #include <linux/config.h>
20 #include <linux/module.h>
21 #include <linux/moduleparam.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
28 #include <linux/init.h>
29 #include <linux/ioport.h>
30 #include <linux/pci.h>
31 #include <linux/netdevice.h>
32 #include <linux/etherdevice.h>
33 #include <linux/skbuff.h>
34 #include <linux/ethtool.h>
35 #include <linux/mii.h>
36 #include <linux/if_vlan.h>
38 #include <linux/tcp.h>
39 #include <linux/workqueue.h>
40 #include <linux/prefetch.h>
41 #include <linux/dma-mapping.h>
43 #include <net/checksum.h>
45 #include <asm/system.h>
47 #include <asm/byteorder.h>
48 #include <asm/uaccess.h>
51 #include <asm/idprom.h>
52 #include <asm/oplib.h>
56 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
57 #define TG3_VLAN_TAG_USED 1
59 #define TG3_VLAN_TAG_USED 0
63 #define TG3_TSO_SUPPORT 1
65 #define TG3_TSO_SUPPORT 0
70 #define DRV_MODULE_NAME "tg3"
71 #define PFX DRV_MODULE_NAME ": "
72 #define DRV_MODULE_VERSION "3.49"
73 #define DRV_MODULE_RELDATE "Feb 2, 2006"
75 #define TG3_DEF_MAC_MODE 0
76 #define TG3_DEF_RX_MODE 0
77 #define TG3_DEF_TX_MODE 0
78 #define TG3_DEF_MSG_ENABLE \
88 /* length of time before we decide the hardware is borked,
89 * and dev->tx_timeout() should be called to fix the problem
91 #define TG3_TX_TIMEOUT (5 * HZ)
93 /* hardware minimum and maximum for a single frame's data payload */
94 #define TG3_MIN_MTU 60
95 #define TG3_MAX_MTU(tp) \
96 ((tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) ? 9000 : 1500)
98 /* These numbers seem to be hard coded in the NIC firmware somehow.
99 * You can't change the ring sizes, but you can change where you place
100 * them in the NIC onboard memory.
102 #define TG3_RX_RING_SIZE 512
103 #define TG3_DEF_RX_RING_PENDING 200
104 #define TG3_RX_JUMBO_RING_SIZE 256
105 #define TG3_DEF_RX_JUMBO_RING_PENDING 100
107 /* Do not place this n-ring entries value into the tp struct itself,
108 * we really want to expose these constants to GCC so that modulo et
109 * al. operations are done with shifts and masks instead of with
110 * hw multiply/modulo instructions. Another solution would be to
111 * replace things like '% foo' with '& (foo - 1)'.
113 #define TG3_RX_RCB_RING_SIZE(tp) \
114 ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ? 512 : 1024)
116 #define TG3_TX_RING_SIZE 512
117 #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
119 #define TG3_RX_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
121 #define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
122 TG3_RX_JUMBO_RING_SIZE)
123 #define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
124 TG3_RX_RCB_RING_SIZE(tp))
125 #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
127 #define TX_BUFFS_AVAIL(TP) \
128 ((TP)->tx_pending - \
129 (((TP)->tx_prod - (TP)->tx_cons) & (TG3_TX_RING_SIZE - 1)))
130 #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
132 #define RX_PKT_BUF_SZ (1536 + tp->rx_offset + 64)
133 #define RX_JUMBO_PKT_BUF_SZ (9046 + tp->rx_offset + 64)
135 /* minimum number of free TX descriptors required to wake up TX process */
136 #define TG3_TX_WAKEUP_THRESH (TG3_TX_RING_SIZE / 4)
138 /* number of ETHTOOL_GSTATS u64's */
139 #define TG3_NUM_STATS (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
141 #define TG3_NUM_TEST 6
143 static char version
[] __devinitdata
=
144 DRV_MODULE_NAME
".c:v" DRV_MODULE_VERSION
" (" DRV_MODULE_RELDATE
")\n";
146 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
147 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
148 MODULE_LICENSE("GPL");
149 MODULE_VERSION(DRV_MODULE_VERSION
);
151 static int tg3_debug
= -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
152 module_param(tg3_debug
, int, 0);
153 MODULE_PARM_DESC(tg3_debug
, "Tigon3 bitmapped debugging message enable value");
155 static struct pci_device_id tg3_pci_tbl
[] = {
156 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5700
,
157 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, 0UL },
158 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5701
,
159 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, 0UL },
160 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5702
,
161 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, 0UL },
162 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5703
,
163 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, 0UL },
164 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5704
,
165 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, 0UL },
166 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5702FE
,
167 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, 0UL },
168 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5705
,
169 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, 0UL },
170 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5705_2
,
171 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, 0UL },
172 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5705M
,
173 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, 0UL },
174 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5705M_2
,
175 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, 0UL },
176 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5702X
,
177 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, 0UL },
178 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5703X
,
179 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, 0UL },
180 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5704S
,
181 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, 0UL },
182 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5702A3
,
183 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, 0UL },
184 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5703A3
,
185 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, 0UL },
186 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5782
,
187 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, 0UL },
188 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5788
,
189 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, 0UL },
190 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5789
,
191 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, 0UL },
192 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5901
,
193 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, 0UL },
194 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5901_2
,
195 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, 0UL },
196 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5704S_2
,
197 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, 0UL },
198 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5705F
,
199 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, 0UL },
200 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5720
,
201 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, 0UL },
202 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5721
,
203 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, 0UL },
204 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5750
,
205 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, 0UL },
206 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5751
,
207 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, 0UL },
208 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5750M
,
209 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, 0UL },
210 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5751M
,
211 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, 0UL },
212 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5751F
,
213 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, 0UL },
214 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5752
,
215 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, 0UL },
216 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5752M
,
217 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, 0UL },
218 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5753
,
219 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, 0UL },
220 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5753M
,
221 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, 0UL },
222 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5753F
,
223 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, 0UL },
224 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5714
,
225 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, 0UL },
226 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5714S
,
227 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, 0UL },
228 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5715
,
229 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, 0UL },
230 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5715S
,
231 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, 0UL },
232 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5780
,
233 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, 0UL },
234 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5780S
,
235 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, 0UL },
236 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5781
,
237 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, 0UL },
238 { PCI_VENDOR_ID_SYSKONNECT
, PCI_DEVICE_ID_SYSKONNECT_9DXX
,
239 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, 0UL },
240 { PCI_VENDOR_ID_SYSKONNECT
, PCI_DEVICE_ID_SYSKONNECT_9MXX
,
241 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, 0UL },
242 { PCI_VENDOR_ID_ALTIMA
, PCI_DEVICE_ID_ALTIMA_AC1000
,
243 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, 0UL },
244 { PCI_VENDOR_ID_ALTIMA
, PCI_DEVICE_ID_ALTIMA_AC1001
,
245 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, 0UL },
246 { PCI_VENDOR_ID_ALTIMA
, PCI_DEVICE_ID_ALTIMA_AC1003
,
247 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, 0UL },
248 { PCI_VENDOR_ID_ALTIMA
, PCI_DEVICE_ID_ALTIMA_AC9100
,
249 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, 0UL },
250 { PCI_VENDOR_ID_APPLE
, PCI_DEVICE_ID_APPLE_TIGON3
,
251 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, 0UL },
255 MODULE_DEVICE_TABLE(pci
, tg3_pci_tbl
);
258 const char string
[ETH_GSTRING_LEN
];
259 } ethtool_stats_keys
[TG3_NUM_STATS
] = {
262 { "rx_ucast_packets" },
263 { "rx_mcast_packets" },
264 { "rx_bcast_packets" },
266 { "rx_align_errors" },
267 { "rx_xon_pause_rcvd" },
268 { "rx_xoff_pause_rcvd" },
269 { "rx_mac_ctrl_rcvd" },
270 { "rx_xoff_entered" },
271 { "rx_frame_too_long_errors" },
273 { "rx_undersize_packets" },
274 { "rx_in_length_errors" },
275 { "rx_out_length_errors" },
276 { "rx_64_or_less_octet_packets" },
277 { "rx_65_to_127_octet_packets" },
278 { "rx_128_to_255_octet_packets" },
279 { "rx_256_to_511_octet_packets" },
280 { "rx_512_to_1023_octet_packets" },
281 { "rx_1024_to_1522_octet_packets" },
282 { "rx_1523_to_2047_octet_packets" },
283 { "rx_2048_to_4095_octet_packets" },
284 { "rx_4096_to_8191_octet_packets" },
285 { "rx_8192_to_9022_octet_packets" },
292 { "tx_flow_control" },
294 { "tx_single_collisions" },
295 { "tx_mult_collisions" },
297 { "tx_excessive_collisions" },
298 { "tx_late_collisions" },
299 { "tx_collide_2times" },
300 { "tx_collide_3times" },
301 { "tx_collide_4times" },
302 { "tx_collide_5times" },
303 { "tx_collide_6times" },
304 { "tx_collide_7times" },
305 { "tx_collide_8times" },
306 { "tx_collide_9times" },
307 { "tx_collide_10times" },
308 { "tx_collide_11times" },
309 { "tx_collide_12times" },
310 { "tx_collide_13times" },
311 { "tx_collide_14times" },
312 { "tx_collide_15times" },
313 { "tx_ucast_packets" },
314 { "tx_mcast_packets" },
315 { "tx_bcast_packets" },
316 { "tx_carrier_sense_errors" },
320 { "dma_writeq_full" },
321 { "dma_write_prioq_full" },
325 { "rx_threshold_hit" },
327 { "dma_readq_full" },
328 { "dma_read_prioq_full" },
329 { "tx_comp_queue_full" },
331 { "ring_set_send_prod_index" },
332 { "ring_status_update" },
334 { "nic_avoided_irqs" },
335 { "nic_tx_threshold_hit" }
339 const char string
[ETH_GSTRING_LEN
];
340 } ethtool_test_keys
[TG3_NUM_TEST
] = {
341 { "nvram test (online) " },
342 { "link test (online) " },
343 { "register test (offline)" },
344 { "memory test (offline)" },
345 { "loopback test (offline)" },
346 { "interrupt test (offline)" },
349 static void tg3_write32(struct tg3
*tp
, u32 off
, u32 val
)
351 writel(val
, tp
->regs
+ off
);
354 static u32
tg3_read32(struct tg3
*tp
, u32 off
)
356 return (readl(tp
->regs
+ off
));
359 static void tg3_write_indirect_reg32(struct tg3
*tp
, u32 off
, u32 val
)
363 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
364 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_BASE_ADDR
, off
);
365 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_DATA
, val
);
366 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
369 static void tg3_write_flush_reg32(struct tg3
*tp
, u32 off
, u32 val
)
371 writel(val
, tp
->regs
+ off
);
372 readl(tp
->regs
+ off
);
375 static u32
tg3_read_indirect_reg32(struct tg3
*tp
, u32 off
)
380 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
381 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_BASE_ADDR
, off
);
382 pci_read_config_dword(tp
->pdev
, TG3PCI_REG_DATA
, &val
);
383 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
387 static void tg3_write_indirect_mbox(struct tg3
*tp
, u32 off
, u32 val
)
391 if (off
== (MAILBOX_RCVRET_CON_IDX_0
+ TG3_64BIT_REG_LOW
)) {
392 pci_write_config_dword(tp
->pdev
, TG3PCI_RCV_RET_RING_CON_IDX
+
393 TG3_64BIT_REG_LOW
, val
);
396 if (off
== (MAILBOX_RCV_STD_PROD_IDX
+ TG3_64BIT_REG_LOW
)) {
397 pci_write_config_dword(tp
->pdev
, TG3PCI_STD_RING_PROD_IDX
+
398 TG3_64BIT_REG_LOW
, val
);
402 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
403 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_BASE_ADDR
, off
+ 0x5600);
404 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_DATA
, val
);
405 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
407 /* In indirect mode when disabling interrupts, we also need
408 * to clear the interrupt bit in the GRC local ctrl register.
410 if ((off
== (MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
)) &&
412 pci_write_config_dword(tp
->pdev
, TG3PCI_MISC_LOCAL_CTRL
,
413 tp
->grc_local_ctrl
|GRC_LCLCTRL_CLEARINT
);
417 static u32
tg3_read_indirect_mbox(struct tg3
*tp
, u32 off
)
422 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
423 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_BASE_ADDR
, off
+ 0x5600);
424 pci_read_config_dword(tp
->pdev
, TG3PCI_REG_DATA
, &val
);
425 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
429 /* usec_wait specifies the wait time in usec when writing to certain registers
430 * where it is unsafe to read back the register without some delay.
431 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
432 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
434 static void _tw32_flush(struct tg3
*tp
, u32 off
, u32 val
, u32 usec_wait
)
436 if ((tp
->tg3_flags
& TG3_FLAG_PCIX_TARGET_HWBUG
) ||
437 (tp
->tg3_flags2
& TG3_FLG2_ICH_WORKAROUND
))
438 /* Non-posted methods */
439 tp
->write32(tp
, off
, val
);
442 tg3_write32(tp
, off
, val
);
447 /* Wait again after the read for the posted method to guarantee that
448 * the wait time is met.
454 static inline void tw32_mailbox_flush(struct tg3
*tp
, u32 off
, u32 val
)
456 tp
->write32_mbox(tp
, off
, val
);
457 if (!(tp
->tg3_flags
& TG3_FLAG_MBOX_WRITE_REORDER
) &&
458 !(tp
->tg3_flags2
& TG3_FLG2_ICH_WORKAROUND
))
459 tp
->read32_mbox(tp
, off
);
462 static void tg3_write32_tx_mbox(struct tg3
*tp
, u32 off
, u32 val
)
464 void __iomem
*mbox
= tp
->regs
+ off
;
466 if (tp
->tg3_flags
& TG3_FLAG_TXD_MBOX_HWBUG
)
468 if (tp
->tg3_flags
& TG3_FLAG_MBOX_WRITE_REORDER
)
472 #define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
473 #define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
474 #define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
475 #define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
476 #define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
478 #define tw32(reg,val) tp->write32(tp, reg, val)
479 #define tw32_f(reg,val) _tw32_flush(tp,(reg),(val), 0)
480 #define tw32_wait_f(reg,val,us) _tw32_flush(tp,(reg),(val), (us))
481 #define tr32(reg) tp->read32(tp, reg)
483 static void tg3_write_mem(struct tg3
*tp
, u32 off
, u32 val
)
487 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
488 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
, off
);
489 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_DATA
, val
);
491 /* Always leave this as zero. */
492 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
, 0);
493 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
496 static void tg3_write_mem_fast(struct tg3
*tp
, u32 off
, u32 val
)
498 /* If no workaround is needed, write to mem space directly */
499 if (tp
->write32
!= tg3_write_indirect_reg32
)
500 tw32(NIC_SRAM_WIN_BASE
+ off
, val
);
502 tg3_write_mem(tp
, off
, val
);
505 static void tg3_read_mem(struct tg3
*tp
, u32 off
, u32
*val
)
509 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
510 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
, off
);
511 pci_read_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_DATA
, val
);
513 /* Always leave this as zero. */
514 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
, 0);
515 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
518 static void tg3_disable_ints(struct tg3
*tp
)
520 tw32(TG3PCI_MISC_HOST_CTRL
,
521 (tp
->misc_host_ctrl
| MISC_HOST_CTRL_MASK_PCI_INT
));
522 tw32_mailbox_f(MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
, 0x00000001);
525 static inline void tg3_cond_int(struct tg3
*tp
)
527 if (!(tp
->tg3_flags
& TG3_FLAG_TAGGED_STATUS
) &&
528 (tp
->hw_status
->status
& SD_STATUS_UPDATED
))
529 tw32(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
| GRC_LCLCTRL_SETINT
);
532 static void tg3_enable_ints(struct tg3
*tp
)
537 tw32(TG3PCI_MISC_HOST_CTRL
,
538 (tp
->misc_host_ctrl
& ~MISC_HOST_CTRL_MASK_PCI_INT
));
539 tw32_mailbox_f(MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
,
540 (tp
->last_tag
<< 24));
544 static inline unsigned int tg3_has_work(struct tg3
*tp
)
546 struct tg3_hw_status
*sblk
= tp
->hw_status
;
547 unsigned int work_exists
= 0;
549 /* check for phy events */
550 if (!(tp
->tg3_flags
&
551 (TG3_FLAG_USE_LINKCHG_REG
|
552 TG3_FLAG_POLL_SERDES
))) {
553 if (sblk
->status
& SD_STATUS_LINK_CHG
)
556 /* check for RX/TX work to do */
557 if (sblk
->idx
[0].tx_consumer
!= tp
->tx_cons
||
558 sblk
->idx
[0].rx_producer
!= tp
->rx_rcb_ptr
)
565 * similar to tg3_enable_ints, but it accurately determines whether there
566 * is new work pending and can return without flushing the PIO write
567 * which reenables interrupts
569 static void tg3_restart_ints(struct tg3
*tp
)
571 tw32_mailbox(MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
,
575 /* When doing tagged status, this work check is unnecessary.
576 * The last_tag we write above tells the chip which piece of
577 * work we've completed.
579 if (!(tp
->tg3_flags
& TG3_FLAG_TAGGED_STATUS
) &&
581 tw32(HOSTCC_MODE
, tp
->coalesce_mode
|
582 (HOSTCC_MODE_ENABLE
| HOSTCC_MODE_NOW
));
585 static inline void tg3_netif_stop(struct tg3
*tp
)
587 tp
->dev
->trans_start
= jiffies
; /* prevent tx timeout */
588 netif_poll_disable(tp
->dev
);
589 netif_tx_disable(tp
->dev
);
592 static inline void tg3_netif_start(struct tg3
*tp
)
594 netif_wake_queue(tp
->dev
);
595 /* NOTE: unconditional netif_wake_queue is only appropriate
596 * so long as all callers are assured to have free tx slots
597 * (such as after tg3_init_hw)
599 netif_poll_enable(tp
->dev
);
600 tp
->hw_status
->status
|= SD_STATUS_UPDATED
;
604 static void tg3_switch_clocks(struct tg3
*tp
)
606 u32 clock_ctrl
= tr32(TG3PCI_CLOCK_CTRL
);
609 if (tp
->tg3_flags2
& TG3_FLG2_5780_CLASS
)
612 orig_clock_ctrl
= clock_ctrl
;
613 clock_ctrl
&= (CLOCK_CTRL_FORCE_CLKRUN
|
614 CLOCK_CTRL_CLKRUN_OENABLE
|
616 tp
->pci_clock_ctrl
= clock_ctrl
;
618 if (tp
->tg3_flags2
& TG3_FLG2_5705_PLUS
) {
619 if (orig_clock_ctrl
& CLOCK_CTRL_625_CORE
) {
620 tw32_wait_f(TG3PCI_CLOCK_CTRL
,
621 clock_ctrl
| CLOCK_CTRL_625_CORE
, 40);
623 } else if ((orig_clock_ctrl
& CLOCK_CTRL_44MHZ_CORE
) != 0) {
624 tw32_wait_f(TG3PCI_CLOCK_CTRL
,
626 (CLOCK_CTRL_44MHZ_CORE
| CLOCK_CTRL_ALTCLK
),
628 tw32_wait_f(TG3PCI_CLOCK_CTRL
,
629 clock_ctrl
| (CLOCK_CTRL_ALTCLK
),
632 tw32_wait_f(TG3PCI_CLOCK_CTRL
, clock_ctrl
, 40);
635 #define PHY_BUSY_LOOPS 5000
637 static int tg3_readphy(struct tg3
*tp
, int reg
, u32
*val
)
643 if ((tp
->mi_mode
& MAC_MI_MODE_AUTO_POLL
) != 0) {
645 (tp
->mi_mode
& ~MAC_MI_MODE_AUTO_POLL
));
651 frame_val
= ((PHY_ADDR
<< MI_COM_PHY_ADDR_SHIFT
) &
652 MI_COM_PHY_ADDR_MASK
);
653 frame_val
|= ((reg
<< MI_COM_REG_ADDR_SHIFT
) &
654 MI_COM_REG_ADDR_MASK
);
655 frame_val
|= (MI_COM_CMD_READ
| MI_COM_START
);
657 tw32_f(MAC_MI_COM
, frame_val
);
659 loops
= PHY_BUSY_LOOPS
;
662 frame_val
= tr32(MAC_MI_COM
);
664 if ((frame_val
& MI_COM_BUSY
) == 0) {
666 frame_val
= tr32(MAC_MI_COM
);
674 *val
= frame_val
& MI_COM_DATA_MASK
;
678 if ((tp
->mi_mode
& MAC_MI_MODE_AUTO_POLL
) != 0) {
679 tw32_f(MAC_MI_MODE
, tp
->mi_mode
);
686 static int tg3_writephy(struct tg3
*tp
, int reg
, u32 val
)
692 if ((tp
->mi_mode
& MAC_MI_MODE_AUTO_POLL
) != 0) {
694 (tp
->mi_mode
& ~MAC_MI_MODE_AUTO_POLL
));
698 frame_val
= ((PHY_ADDR
<< MI_COM_PHY_ADDR_SHIFT
) &
699 MI_COM_PHY_ADDR_MASK
);
700 frame_val
|= ((reg
<< MI_COM_REG_ADDR_SHIFT
) &
701 MI_COM_REG_ADDR_MASK
);
702 frame_val
|= (val
& MI_COM_DATA_MASK
);
703 frame_val
|= (MI_COM_CMD_WRITE
| MI_COM_START
);
705 tw32_f(MAC_MI_COM
, frame_val
);
707 loops
= PHY_BUSY_LOOPS
;
710 frame_val
= tr32(MAC_MI_COM
);
711 if ((frame_val
& MI_COM_BUSY
) == 0) {
713 frame_val
= tr32(MAC_MI_COM
);
723 if ((tp
->mi_mode
& MAC_MI_MODE_AUTO_POLL
) != 0) {
724 tw32_f(MAC_MI_MODE
, tp
->mi_mode
);
731 static void tg3_phy_set_wirespeed(struct tg3
*tp
)
735 if (tp
->tg3_flags2
& TG3_FLG2_NO_ETH_WIRE_SPEED
)
738 if (!tg3_writephy(tp
, MII_TG3_AUX_CTRL
, 0x7007) &&
739 !tg3_readphy(tp
, MII_TG3_AUX_CTRL
, &val
))
740 tg3_writephy(tp
, MII_TG3_AUX_CTRL
,
741 (val
| (1 << 15) | (1 << 4)));
744 static int tg3_bmcr_reset(struct tg3
*tp
)
749 /* OK, reset it, and poll the BMCR_RESET bit until it
750 * clears or we time out.
752 phy_control
= BMCR_RESET
;
753 err
= tg3_writephy(tp
, MII_BMCR
, phy_control
);
759 err
= tg3_readphy(tp
, MII_BMCR
, &phy_control
);
763 if ((phy_control
& BMCR_RESET
) == 0) {
775 static int tg3_wait_macro_done(struct tg3
*tp
)
782 if (!tg3_readphy(tp
, 0x16, &tmp32
)) {
783 if ((tmp32
& 0x1000) == 0)
793 static int tg3_phy_write_and_check_testpat(struct tg3
*tp
, int *resetp
)
795 static const u32 test_pat
[4][6] = {
796 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
797 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
798 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
799 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
803 for (chan
= 0; chan
< 4; chan
++) {
806 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
,
807 (chan
* 0x2000) | 0x0200);
808 tg3_writephy(tp
, 0x16, 0x0002);
810 for (i
= 0; i
< 6; i
++)
811 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
,
814 tg3_writephy(tp
, 0x16, 0x0202);
815 if (tg3_wait_macro_done(tp
)) {
820 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
,
821 (chan
* 0x2000) | 0x0200);
822 tg3_writephy(tp
, 0x16, 0x0082);
823 if (tg3_wait_macro_done(tp
)) {
828 tg3_writephy(tp
, 0x16, 0x0802);
829 if (tg3_wait_macro_done(tp
)) {
834 for (i
= 0; i
< 6; i
+= 2) {
837 if (tg3_readphy(tp
, MII_TG3_DSP_RW_PORT
, &low
) ||
838 tg3_readphy(tp
, MII_TG3_DSP_RW_PORT
, &high
) ||
839 tg3_wait_macro_done(tp
)) {
845 if (low
!= test_pat
[chan
][i
] ||
846 high
!= test_pat
[chan
][i
+1]) {
847 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, 0x000b);
848 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x4001);
849 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x4005);
859 static int tg3_phy_reset_chanpat(struct tg3
*tp
)
863 for (chan
= 0; chan
< 4; chan
++) {
866 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
,
867 (chan
* 0x2000) | 0x0200);
868 tg3_writephy(tp
, 0x16, 0x0002);
869 for (i
= 0; i
< 6; i
++)
870 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x000);
871 tg3_writephy(tp
, 0x16, 0x0202);
872 if (tg3_wait_macro_done(tp
))
879 static int tg3_phy_reset_5703_4_5(struct tg3
*tp
)
881 u32 reg32
, phy9_orig
;
882 int retries
, do_phy_reset
, err
;
888 err
= tg3_bmcr_reset(tp
);
894 /* Disable transmitter and interrupt. */
895 if (tg3_readphy(tp
, MII_TG3_EXT_CTRL
, ®32
))
899 tg3_writephy(tp
, MII_TG3_EXT_CTRL
, reg32
);
901 /* Set full-duplex, 1000 mbps. */
902 tg3_writephy(tp
, MII_BMCR
,
903 BMCR_FULLDPLX
| TG3_BMCR_SPEED1000
);
905 /* Set to master mode. */
906 if (tg3_readphy(tp
, MII_TG3_CTRL
, &phy9_orig
))
909 tg3_writephy(tp
, MII_TG3_CTRL
,
910 (MII_TG3_CTRL_AS_MASTER
|
911 MII_TG3_CTRL_ENABLE_AS_MASTER
));
913 /* Enable SM_DSP_CLOCK and 6dB. */
914 tg3_writephy(tp
, MII_TG3_AUX_CTRL
, 0x0c00);
916 /* Block the PHY control access. */
917 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, 0x8005);
918 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x0800);
920 err
= tg3_phy_write_and_check_testpat(tp
, &do_phy_reset
);
925 err
= tg3_phy_reset_chanpat(tp
);
929 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, 0x8005);
930 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x0000);
932 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, 0x8200);
933 tg3_writephy(tp
, 0x16, 0x0000);
935 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5703
||
936 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
) {
937 /* Set Extended packet length bit for jumbo frames */
938 tg3_writephy(tp
, MII_TG3_AUX_CTRL
, 0x4400);
941 tg3_writephy(tp
, MII_TG3_AUX_CTRL
, 0x0400);
944 tg3_writephy(tp
, MII_TG3_CTRL
, phy9_orig
);
946 if (!tg3_readphy(tp
, MII_TG3_EXT_CTRL
, ®32
)) {
948 tg3_writephy(tp
, MII_TG3_EXT_CTRL
, reg32
);
955 /* This will reset the tigon3 PHY if there is no valid
956 * link unless the FORCE argument is non-zero.
958 static int tg3_phy_reset(struct tg3
*tp
)
963 err
= tg3_readphy(tp
, MII_BMSR
, &phy_status
);
964 err
|= tg3_readphy(tp
, MII_BMSR
, &phy_status
);
968 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5703
||
969 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
||
970 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
) {
971 err
= tg3_phy_reset_5703_4_5(tp
);
977 err
= tg3_bmcr_reset(tp
);
982 if (tp
->tg3_flags2
& TG3_FLG2_PHY_ADC_BUG
) {
983 tg3_writephy(tp
, MII_TG3_AUX_CTRL
, 0x0c00);
984 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, 0x201f);
985 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x2aaa);
986 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, 0x000a);
987 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x0323);
988 tg3_writephy(tp
, MII_TG3_AUX_CTRL
, 0x0400);
990 if (tp
->tg3_flags2
& TG3_FLG2_PHY_5704_A0_BUG
) {
991 tg3_writephy(tp
, 0x1c, 0x8d68);
992 tg3_writephy(tp
, 0x1c, 0x8d68);
994 if (tp
->tg3_flags2
& TG3_FLG2_PHY_BER_BUG
) {
995 tg3_writephy(tp
, MII_TG3_AUX_CTRL
, 0x0c00);
996 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, 0x000a);
997 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x310b);
998 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, 0x201f);
999 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x9506);
1000 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, 0x401f);
1001 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x14e2);
1002 tg3_writephy(tp
, MII_TG3_AUX_CTRL
, 0x0400);
1004 /* Set Extended packet length bit (bit 14) on all chips that */
1005 /* support jumbo frames */
1006 if ((tp
->phy_id
& PHY_ID_MASK
) == PHY_ID_BCM5401
) {
1007 /* Cannot do read-modify-write on 5401 */
1008 tg3_writephy(tp
, MII_TG3_AUX_CTRL
, 0x4c20);
1009 } else if (tp
->tg3_flags2
& TG3_FLG2_JUMBO_CAPABLE
) {
1012 /* Set bit 14 with read-modify-write to preserve other bits */
1013 if (!tg3_writephy(tp
, MII_TG3_AUX_CTRL
, 0x0007) &&
1014 !tg3_readphy(tp
, MII_TG3_AUX_CTRL
, &phy_reg
))
1015 tg3_writephy(tp
, MII_TG3_AUX_CTRL
, phy_reg
| 0x4000);
1018 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
1019 * jumbo frames transmission.
1021 if (tp
->tg3_flags2
& TG3_FLG2_JUMBO_CAPABLE
) {
1024 if (!tg3_readphy(tp
, MII_TG3_EXT_CTRL
, &phy_reg
))
1025 tg3_writephy(tp
, MII_TG3_EXT_CTRL
,
1026 phy_reg
| MII_TG3_EXT_CTRL_FIFO_ELASTIC
);
1029 tg3_phy_set_wirespeed(tp
);
1033 static void tg3_frob_aux_power(struct tg3
*tp
)
1035 struct tg3
*tp_peer
= tp
;
1037 if ((tp
->tg3_flags
& TG3_FLAG_EEPROM_WRITE_PROT
) != 0)
1040 if ((GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
) ||
1041 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5714
)) {
1042 struct net_device
*dev_peer
;
1044 dev_peer
= pci_get_drvdata(tp
->pdev_peer
);
1047 tp_peer
= netdev_priv(dev_peer
);
1050 if ((tp
->tg3_flags
& TG3_FLAG_WOL_ENABLE
) != 0 ||
1051 (tp
->tg3_flags
& TG3_FLAG_ENABLE_ASF
) != 0 ||
1052 (tp_peer
->tg3_flags
& TG3_FLAG_WOL_ENABLE
) != 0 ||
1053 (tp_peer
->tg3_flags
& TG3_FLAG_ENABLE_ASF
) != 0) {
1054 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
1055 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
) {
1056 tw32_wait_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
|
1057 (GRC_LCLCTRL_GPIO_OE0
|
1058 GRC_LCLCTRL_GPIO_OE1
|
1059 GRC_LCLCTRL_GPIO_OE2
|
1060 GRC_LCLCTRL_GPIO_OUTPUT0
|
1061 GRC_LCLCTRL_GPIO_OUTPUT1
),
1065 u32 grc_local_ctrl
= 0;
1067 if (tp_peer
!= tp
&&
1068 (tp_peer
->tg3_flags
& TG3_FLAG_INIT_COMPLETE
) != 0)
1071 /* Workaround to prevent overdrawing Amps. */
1072 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) ==
1074 grc_local_ctrl
|= GRC_LCLCTRL_GPIO_OE3
;
1075 tw32_wait_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
|
1076 grc_local_ctrl
, 100);
1079 /* On 5753 and variants, GPIO2 cannot be used. */
1080 no_gpio2
= tp
->nic_sram_data_cfg
&
1081 NIC_SRAM_DATA_CFG_NO_GPIO2
;
1083 grc_local_ctrl
|= GRC_LCLCTRL_GPIO_OE0
|
1084 GRC_LCLCTRL_GPIO_OE1
|
1085 GRC_LCLCTRL_GPIO_OE2
|
1086 GRC_LCLCTRL_GPIO_OUTPUT1
|
1087 GRC_LCLCTRL_GPIO_OUTPUT2
;
1089 grc_local_ctrl
&= ~(GRC_LCLCTRL_GPIO_OE2
|
1090 GRC_LCLCTRL_GPIO_OUTPUT2
);
1092 tw32_wait_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
|
1093 grc_local_ctrl
, 100);
1095 grc_local_ctrl
|= GRC_LCLCTRL_GPIO_OUTPUT0
;
1097 tw32_wait_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
|
1098 grc_local_ctrl
, 100);
1101 grc_local_ctrl
&= ~GRC_LCLCTRL_GPIO_OUTPUT2
;
1102 tw32_wait_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
|
1103 grc_local_ctrl
, 100);
1107 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5700
&&
1108 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5701
) {
1109 if (tp_peer
!= tp
&&
1110 (tp_peer
->tg3_flags
& TG3_FLAG_INIT_COMPLETE
) != 0)
1113 tw32_wait_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
|
1114 (GRC_LCLCTRL_GPIO_OE1
|
1115 GRC_LCLCTRL_GPIO_OUTPUT1
), 100);
1117 tw32_wait_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
|
1118 GRC_LCLCTRL_GPIO_OE1
, 100);
1120 tw32_wait_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
|
1121 (GRC_LCLCTRL_GPIO_OE1
|
1122 GRC_LCLCTRL_GPIO_OUTPUT1
), 100);
1127 static int tg3_setup_phy(struct tg3
*, int);
1129 #define RESET_KIND_SHUTDOWN 0
1130 #define RESET_KIND_INIT 1
1131 #define RESET_KIND_SUSPEND 2
1133 static void tg3_write_sig_post_reset(struct tg3
*, int);
1134 static int tg3_halt_cpu(struct tg3
*, u32
);
1135 static int tg3_nvram_lock(struct tg3
*);
1136 static void tg3_nvram_unlock(struct tg3
*);
1138 static int tg3_set_power_state(struct tg3
*tp
, int state
)
1141 u16 power_control
, power_caps
;
1142 int pm
= tp
->pm_cap
;
1144 /* Make sure register accesses (indirect or otherwise)
1145 * will function correctly.
1147 pci_write_config_dword(tp
->pdev
,
1148 TG3PCI_MISC_HOST_CTRL
,
1149 tp
->misc_host_ctrl
);
1151 pci_read_config_word(tp
->pdev
,
1154 power_control
|= PCI_PM_CTRL_PME_STATUS
;
1155 power_control
&= ~(PCI_PM_CTRL_STATE_MASK
);
1159 pci_write_config_word(tp
->pdev
,
1162 udelay(100); /* Delay after power state change */
1164 /* Switch out of Vaux if it is not a LOM */
1165 if (!(tp
->tg3_flags
& TG3_FLAG_EEPROM_WRITE_PROT
))
1166 tw32_wait_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
, 100);
1183 printk(KERN_WARNING PFX
"%s: Invalid power state (%d) "
1185 tp
->dev
->name
, state
);
1189 power_control
|= PCI_PM_CTRL_PME_ENABLE
;
1191 misc_host_ctrl
= tr32(TG3PCI_MISC_HOST_CTRL
);
1192 tw32(TG3PCI_MISC_HOST_CTRL
,
1193 misc_host_ctrl
| MISC_HOST_CTRL_MASK_PCI_INT
);
1195 if (tp
->link_config
.phy_is_low_power
== 0) {
1196 tp
->link_config
.phy_is_low_power
= 1;
1197 tp
->link_config
.orig_speed
= tp
->link_config
.speed
;
1198 tp
->link_config
.orig_duplex
= tp
->link_config
.duplex
;
1199 tp
->link_config
.orig_autoneg
= tp
->link_config
.autoneg
;
1202 if (!(tp
->tg3_flags2
& TG3_FLG2_ANY_SERDES
)) {
1203 tp
->link_config
.speed
= SPEED_10
;
1204 tp
->link_config
.duplex
= DUPLEX_HALF
;
1205 tp
->link_config
.autoneg
= AUTONEG_ENABLE
;
1206 tg3_setup_phy(tp
, 0);
1209 if (!(tp
->tg3_flags
& TG3_FLAG_ENABLE_ASF
)) {
1213 for (i
= 0; i
< 200; i
++) {
1214 tg3_read_mem(tp
, NIC_SRAM_FW_ASF_STATUS_MBOX
, &val
);
1215 if (val
== ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1
)
1220 tg3_write_mem(tp
, NIC_SRAM_WOL_MBOX
, WOL_SIGNATURE
|
1221 WOL_DRV_STATE_SHUTDOWN
|
1222 WOL_DRV_WOL
| WOL_SET_MAGIC_PKT
);
1224 pci_read_config_word(tp
->pdev
, pm
+ PCI_PM_PMC
, &power_caps
);
1226 if (tp
->tg3_flags
& TG3_FLAG_WOL_ENABLE
) {
1229 if (!(tp
->tg3_flags2
& TG3_FLG2_PHY_SERDES
)) {
1230 tg3_writephy(tp
, MII_TG3_AUX_CTRL
, 0x5a);
1233 mac_mode
= MAC_MODE_PORT_MODE_MII
;
1235 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5700
||
1236 !(tp
->tg3_flags
& TG3_FLAG_WOL_SPEED_100MB
))
1237 mac_mode
|= MAC_MODE_LINK_POLARITY
;
1239 mac_mode
= MAC_MODE_PORT_MODE_TBI
;
1242 if (!(tp
->tg3_flags2
& TG3_FLG2_5750_PLUS
))
1243 tw32(MAC_LED_CTRL
, tp
->led_ctrl
);
1245 if (((power_caps
& PCI_PM_CAP_PME_D3cold
) &&
1246 (tp
->tg3_flags
& TG3_FLAG_WOL_ENABLE
)))
1247 mac_mode
|= MAC_MODE_MAGIC_PKT_ENABLE
;
1249 tw32_f(MAC_MODE
, mac_mode
);
1252 tw32_f(MAC_RX_MODE
, RX_MODE_ENABLE
);
1256 if (!(tp
->tg3_flags
& TG3_FLAG_WOL_SPEED_100MB
) &&
1257 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
1258 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
)) {
1261 base_val
= tp
->pci_clock_ctrl
;
1262 base_val
|= (CLOCK_CTRL_RXCLK_DISABLE
|
1263 CLOCK_CTRL_TXCLK_DISABLE
);
1265 tw32_wait_f(TG3PCI_CLOCK_CTRL
, base_val
| CLOCK_CTRL_ALTCLK
|
1266 CLOCK_CTRL_PWRDOWN_PLL133
, 40);
1267 } else if (tp
->tg3_flags2
& TG3_FLG2_5780_CLASS
) {
1269 } else if (!((tp
->tg3_flags2
& TG3_FLG2_5750_PLUS
) &&
1270 (tp
->tg3_flags
& TG3_FLAG_ENABLE_ASF
))) {
1271 u32 newbits1
, newbits2
;
1273 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
1274 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
) {
1275 newbits1
= (CLOCK_CTRL_RXCLK_DISABLE
|
1276 CLOCK_CTRL_TXCLK_DISABLE
|
1278 newbits2
= newbits1
| CLOCK_CTRL_44MHZ_CORE
;
1279 } else if (tp
->tg3_flags2
& TG3_FLG2_5705_PLUS
) {
1280 newbits1
= CLOCK_CTRL_625_CORE
;
1281 newbits2
= newbits1
| CLOCK_CTRL_ALTCLK
;
1283 newbits1
= CLOCK_CTRL_ALTCLK
;
1284 newbits2
= newbits1
| CLOCK_CTRL_44MHZ_CORE
;
1287 tw32_wait_f(TG3PCI_CLOCK_CTRL
, tp
->pci_clock_ctrl
| newbits1
,
1290 tw32_wait_f(TG3PCI_CLOCK_CTRL
, tp
->pci_clock_ctrl
| newbits2
,
1293 if (!(tp
->tg3_flags2
& TG3_FLG2_5705_PLUS
)) {
1296 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
1297 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
) {
1298 newbits3
= (CLOCK_CTRL_RXCLK_DISABLE
|
1299 CLOCK_CTRL_TXCLK_DISABLE
|
1300 CLOCK_CTRL_44MHZ_CORE
);
1302 newbits3
= CLOCK_CTRL_44MHZ_CORE
;
1305 tw32_wait_f(TG3PCI_CLOCK_CTRL
,
1306 tp
->pci_clock_ctrl
| newbits3
, 40);
1310 if (!(tp
->tg3_flags
& TG3_FLAG_WOL_ENABLE
) &&
1311 !(tp
->tg3_flags
& TG3_FLAG_ENABLE_ASF
)) {
1312 /* Turn off the PHY */
1313 if (!(tp
->tg3_flags2
& TG3_FLG2_PHY_SERDES
)) {
1314 tg3_writephy(tp
, MII_TG3_EXT_CTRL
,
1315 MII_TG3_EXT_CTRL_FORCE_LED_OFF
);
1316 tg3_writephy(tp
, MII_TG3_AUX_CTRL
, 0x01b2);
1317 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5700
)
1318 tg3_writephy(tp
, MII_BMCR
, BMCR_PDOWN
);
1322 tg3_frob_aux_power(tp
);
1324 /* Workaround for unstable PLL clock */
1325 if ((GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5750_AX
) ||
1326 (GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5750_BX
)) {
1327 u32 val
= tr32(0x7d00);
1329 val
&= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
1331 if (!(tp
->tg3_flags
& TG3_FLAG_ENABLE_ASF
)) {
1334 err
= tg3_nvram_lock(tp
);
1335 tg3_halt_cpu(tp
, RX_CPU_BASE
);
1337 tg3_nvram_unlock(tp
);
1341 /* Finally, set the new power state. */
1342 pci_write_config_word(tp
->pdev
, pm
+ PCI_PM_CTRL
, power_control
);
1343 udelay(100); /* Delay after power state change */
1345 tg3_write_sig_post_reset(tp
, RESET_KIND_SHUTDOWN
);
1350 static void tg3_link_report(struct tg3
*tp
)
1352 if (!netif_carrier_ok(tp
->dev
)) {
1353 printk(KERN_INFO PFX
"%s: Link is down.\n", tp
->dev
->name
);
1355 printk(KERN_INFO PFX
"%s: Link is up at %d Mbps, %s duplex.\n",
1357 (tp
->link_config
.active_speed
== SPEED_1000
?
1359 (tp
->link_config
.active_speed
== SPEED_100
?
1361 (tp
->link_config
.active_duplex
== DUPLEX_FULL
?
1364 printk(KERN_INFO PFX
"%s: Flow control is %s for TX and "
1367 (tp
->tg3_flags
& TG3_FLAG_TX_PAUSE
) ? "on" : "off",
1368 (tp
->tg3_flags
& TG3_FLAG_RX_PAUSE
) ? "on" : "off");
1372 static void tg3_setup_flow_control(struct tg3
*tp
, u32 local_adv
, u32 remote_adv
)
1374 u32 new_tg3_flags
= 0;
1375 u32 old_rx_mode
= tp
->rx_mode
;
1376 u32 old_tx_mode
= tp
->tx_mode
;
1378 if (tp
->tg3_flags
& TG3_FLAG_PAUSE_AUTONEG
) {
1380 /* Convert 1000BaseX flow control bits to 1000BaseT
1381 * bits before resolving flow control.
1383 if (tp
->tg3_flags2
& TG3_FLG2_MII_SERDES
) {
1384 local_adv
&= ~(ADVERTISE_PAUSE_CAP
|
1385 ADVERTISE_PAUSE_ASYM
);
1386 remote_adv
&= ~(LPA_PAUSE_CAP
| LPA_PAUSE_ASYM
);
1388 if (local_adv
& ADVERTISE_1000XPAUSE
)
1389 local_adv
|= ADVERTISE_PAUSE_CAP
;
1390 if (local_adv
& ADVERTISE_1000XPSE_ASYM
)
1391 local_adv
|= ADVERTISE_PAUSE_ASYM
;
1392 if (remote_adv
& LPA_1000XPAUSE
)
1393 remote_adv
|= LPA_PAUSE_CAP
;
1394 if (remote_adv
& LPA_1000XPAUSE_ASYM
)
1395 remote_adv
|= LPA_PAUSE_ASYM
;
1398 if (local_adv
& ADVERTISE_PAUSE_CAP
) {
1399 if (local_adv
& ADVERTISE_PAUSE_ASYM
) {
1400 if (remote_adv
& LPA_PAUSE_CAP
)
1402 (TG3_FLAG_RX_PAUSE
|
1404 else if (remote_adv
& LPA_PAUSE_ASYM
)
1406 (TG3_FLAG_RX_PAUSE
);
1408 if (remote_adv
& LPA_PAUSE_CAP
)
1410 (TG3_FLAG_RX_PAUSE
|
1413 } else if (local_adv
& ADVERTISE_PAUSE_ASYM
) {
1414 if ((remote_adv
& LPA_PAUSE_CAP
) &&
1415 (remote_adv
& LPA_PAUSE_ASYM
))
1416 new_tg3_flags
|= TG3_FLAG_TX_PAUSE
;
1419 tp
->tg3_flags
&= ~(TG3_FLAG_RX_PAUSE
| TG3_FLAG_TX_PAUSE
);
1420 tp
->tg3_flags
|= new_tg3_flags
;
1422 new_tg3_flags
= tp
->tg3_flags
;
1425 if (new_tg3_flags
& TG3_FLAG_RX_PAUSE
)
1426 tp
->rx_mode
|= RX_MODE_FLOW_CTRL_ENABLE
;
1428 tp
->rx_mode
&= ~RX_MODE_FLOW_CTRL_ENABLE
;
1430 if (old_rx_mode
!= tp
->rx_mode
) {
1431 tw32_f(MAC_RX_MODE
, tp
->rx_mode
);
1434 if (new_tg3_flags
& TG3_FLAG_TX_PAUSE
)
1435 tp
->tx_mode
|= TX_MODE_FLOW_CTRL_ENABLE
;
1437 tp
->tx_mode
&= ~TX_MODE_FLOW_CTRL_ENABLE
;
1439 if (old_tx_mode
!= tp
->tx_mode
) {
1440 tw32_f(MAC_TX_MODE
, tp
->tx_mode
);
1444 static void tg3_aux_stat_to_speed_duplex(struct tg3
*tp
, u32 val
, u16
*speed
, u8
*duplex
)
1446 switch (val
& MII_TG3_AUX_STAT_SPDMASK
) {
1447 case MII_TG3_AUX_STAT_10HALF
:
1449 *duplex
= DUPLEX_HALF
;
1452 case MII_TG3_AUX_STAT_10FULL
:
1454 *duplex
= DUPLEX_FULL
;
1457 case MII_TG3_AUX_STAT_100HALF
:
1459 *duplex
= DUPLEX_HALF
;
1462 case MII_TG3_AUX_STAT_100FULL
:
1464 *duplex
= DUPLEX_FULL
;
1467 case MII_TG3_AUX_STAT_1000HALF
:
1468 *speed
= SPEED_1000
;
1469 *duplex
= DUPLEX_HALF
;
1472 case MII_TG3_AUX_STAT_1000FULL
:
1473 *speed
= SPEED_1000
;
1474 *duplex
= DUPLEX_FULL
;
1478 *speed
= SPEED_INVALID
;
1479 *duplex
= DUPLEX_INVALID
;
1484 static void tg3_phy_copper_begin(struct tg3
*tp
)
1489 if (tp
->link_config
.phy_is_low_power
) {
1490 /* Entering low power mode. Disable gigabit and
1491 * 100baseT advertisements.
1493 tg3_writephy(tp
, MII_TG3_CTRL
, 0);
1495 new_adv
= (ADVERTISE_10HALF
| ADVERTISE_10FULL
|
1496 ADVERTISE_CSMA
| ADVERTISE_PAUSE_CAP
);
1497 if (tp
->tg3_flags
& TG3_FLAG_WOL_SPEED_100MB
)
1498 new_adv
|= (ADVERTISE_100HALF
| ADVERTISE_100FULL
);
1500 tg3_writephy(tp
, MII_ADVERTISE
, new_adv
);
1501 } else if (tp
->link_config
.speed
== SPEED_INVALID
) {
1502 tp
->link_config
.advertising
=
1503 (ADVERTISED_10baseT_Half
| ADVERTISED_10baseT_Full
|
1504 ADVERTISED_100baseT_Half
| ADVERTISED_100baseT_Full
|
1505 ADVERTISED_1000baseT_Half
| ADVERTISED_1000baseT_Full
|
1506 ADVERTISED_Autoneg
| ADVERTISED_MII
);
1508 if (tp
->tg3_flags
& TG3_FLAG_10_100_ONLY
)
1509 tp
->link_config
.advertising
&=
1510 ~(ADVERTISED_1000baseT_Half
|
1511 ADVERTISED_1000baseT_Full
);
1513 new_adv
= (ADVERTISE_CSMA
| ADVERTISE_PAUSE_CAP
);
1514 if (tp
->link_config
.advertising
& ADVERTISED_10baseT_Half
)
1515 new_adv
|= ADVERTISE_10HALF
;
1516 if (tp
->link_config
.advertising
& ADVERTISED_10baseT_Full
)
1517 new_adv
|= ADVERTISE_10FULL
;
1518 if (tp
->link_config
.advertising
& ADVERTISED_100baseT_Half
)
1519 new_adv
|= ADVERTISE_100HALF
;
1520 if (tp
->link_config
.advertising
& ADVERTISED_100baseT_Full
)
1521 new_adv
|= ADVERTISE_100FULL
;
1522 tg3_writephy(tp
, MII_ADVERTISE
, new_adv
);
1524 if (tp
->link_config
.advertising
&
1525 (ADVERTISED_1000baseT_Half
| ADVERTISED_1000baseT_Full
)) {
1527 if (tp
->link_config
.advertising
& ADVERTISED_1000baseT_Half
)
1528 new_adv
|= MII_TG3_CTRL_ADV_1000_HALF
;
1529 if (tp
->link_config
.advertising
& ADVERTISED_1000baseT_Full
)
1530 new_adv
|= MII_TG3_CTRL_ADV_1000_FULL
;
1531 if (!(tp
->tg3_flags
& TG3_FLAG_10_100_ONLY
) &&
1532 (tp
->pci_chip_rev_id
== CHIPREV_ID_5701_A0
||
1533 tp
->pci_chip_rev_id
== CHIPREV_ID_5701_B0
))
1534 new_adv
|= (MII_TG3_CTRL_AS_MASTER
|
1535 MII_TG3_CTRL_ENABLE_AS_MASTER
);
1536 tg3_writephy(tp
, MII_TG3_CTRL
, new_adv
);
1538 tg3_writephy(tp
, MII_TG3_CTRL
, 0);
1541 /* Asking for a specific link mode. */
1542 if (tp
->link_config
.speed
== SPEED_1000
) {
1543 new_adv
= ADVERTISE_CSMA
| ADVERTISE_PAUSE_CAP
;
1544 tg3_writephy(tp
, MII_ADVERTISE
, new_adv
);
1546 if (tp
->link_config
.duplex
== DUPLEX_FULL
)
1547 new_adv
= MII_TG3_CTRL_ADV_1000_FULL
;
1549 new_adv
= MII_TG3_CTRL_ADV_1000_HALF
;
1550 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5701_A0
||
1551 tp
->pci_chip_rev_id
== CHIPREV_ID_5701_B0
)
1552 new_adv
|= (MII_TG3_CTRL_AS_MASTER
|
1553 MII_TG3_CTRL_ENABLE_AS_MASTER
);
1554 tg3_writephy(tp
, MII_TG3_CTRL
, new_adv
);
1556 tg3_writephy(tp
, MII_TG3_CTRL
, 0);
1558 new_adv
= ADVERTISE_CSMA
| ADVERTISE_PAUSE_CAP
;
1559 if (tp
->link_config
.speed
== SPEED_100
) {
1560 if (tp
->link_config
.duplex
== DUPLEX_FULL
)
1561 new_adv
|= ADVERTISE_100FULL
;
1563 new_adv
|= ADVERTISE_100HALF
;
1565 if (tp
->link_config
.duplex
== DUPLEX_FULL
)
1566 new_adv
|= ADVERTISE_10FULL
;
1568 new_adv
|= ADVERTISE_10HALF
;
1570 tg3_writephy(tp
, MII_ADVERTISE
, new_adv
);
1574 if (tp
->link_config
.autoneg
== AUTONEG_DISABLE
&&
1575 tp
->link_config
.speed
!= SPEED_INVALID
) {
1576 u32 bmcr
, orig_bmcr
;
1578 tp
->link_config
.active_speed
= tp
->link_config
.speed
;
1579 tp
->link_config
.active_duplex
= tp
->link_config
.duplex
;
1582 switch (tp
->link_config
.speed
) {
1588 bmcr
|= BMCR_SPEED100
;
1592 bmcr
|= TG3_BMCR_SPEED1000
;
1596 if (tp
->link_config
.duplex
== DUPLEX_FULL
)
1597 bmcr
|= BMCR_FULLDPLX
;
1599 if (!tg3_readphy(tp
, MII_BMCR
, &orig_bmcr
) &&
1600 (bmcr
!= orig_bmcr
)) {
1601 tg3_writephy(tp
, MII_BMCR
, BMCR_LOOPBACK
);
1602 for (i
= 0; i
< 1500; i
++) {
1606 if (tg3_readphy(tp
, MII_BMSR
, &tmp
) ||
1607 tg3_readphy(tp
, MII_BMSR
, &tmp
))
1609 if (!(tmp
& BMSR_LSTATUS
)) {
1614 tg3_writephy(tp
, MII_BMCR
, bmcr
);
1618 tg3_writephy(tp
, MII_BMCR
,
1619 BMCR_ANENABLE
| BMCR_ANRESTART
);
1623 static int tg3_init_5401phy_dsp(struct tg3
*tp
)
1627 /* Turn off tap power management. */
1628 /* Set Extended packet length bit */
1629 err
= tg3_writephy(tp
, MII_TG3_AUX_CTRL
, 0x4c20);
1631 err
|= tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, 0x0012);
1632 err
|= tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x1804);
1634 err
|= tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, 0x0013);
1635 err
|= tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x1204);
1637 err
|= tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, 0x8006);
1638 err
|= tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x0132);
1640 err
|= tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, 0x8006);
1641 err
|= tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x0232);
1643 err
|= tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, 0x201f);
1644 err
|= tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x0a20);
1651 static int tg3_copper_is_advertising_all(struct tg3
*tp
)
1653 u32 adv_reg
, all_mask
;
1655 if (tg3_readphy(tp
, MII_ADVERTISE
, &adv_reg
))
1658 all_mask
= (ADVERTISE_10HALF
| ADVERTISE_10FULL
|
1659 ADVERTISE_100HALF
| ADVERTISE_100FULL
);
1660 if ((adv_reg
& all_mask
) != all_mask
)
1662 if (!(tp
->tg3_flags
& TG3_FLAG_10_100_ONLY
)) {
1665 if (tg3_readphy(tp
, MII_TG3_CTRL
, &tg3_ctrl
))
1668 all_mask
= (MII_TG3_CTRL_ADV_1000_HALF
|
1669 MII_TG3_CTRL_ADV_1000_FULL
);
1670 if ((tg3_ctrl
& all_mask
) != all_mask
)
1676 static int tg3_setup_copper_phy(struct tg3
*tp
, int force_reset
)
1678 int current_link_up
;
1687 (MAC_STATUS_SYNC_CHANGED
|
1688 MAC_STATUS_CFG_CHANGED
|
1689 MAC_STATUS_MI_COMPLETION
|
1690 MAC_STATUS_LNKSTATE_CHANGED
));
1693 tp
->mi_mode
= MAC_MI_MODE_BASE
;
1694 tw32_f(MAC_MI_MODE
, tp
->mi_mode
);
1697 tg3_writephy(tp
, MII_TG3_AUX_CTRL
, 0x02);
1699 /* Some third-party PHYs need to be reset on link going
1702 if ((GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5703
||
1703 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
||
1704 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
) &&
1705 netif_carrier_ok(tp
->dev
)) {
1706 tg3_readphy(tp
, MII_BMSR
, &bmsr
);
1707 if (!tg3_readphy(tp
, MII_BMSR
, &bmsr
) &&
1708 !(bmsr
& BMSR_LSTATUS
))
1714 if ((tp
->phy_id
& PHY_ID_MASK
) == PHY_ID_BCM5401
) {
1715 tg3_readphy(tp
, MII_BMSR
, &bmsr
);
1716 if (tg3_readphy(tp
, MII_BMSR
, &bmsr
) ||
1717 !(tp
->tg3_flags
& TG3_FLAG_INIT_COMPLETE
))
1720 if (!(bmsr
& BMSR_LSTATUS
)) {
1721 err
= tg3_init_5401phy_dsp(tp
);
1725 tg3_readphy(tp
, MII_BMSR
, &bmsr
);
1726 for (i
= 0; i
< 1000; i
++) {
1728 if (!tg3_readphy(tp
, MII_BMSR
, &bmsr
) &&
1729 (bmsr
& BMSR_LSTATUS
)) {
1735 if ((tp
->phy_id
& PHY_ID_REV_MASK
) == PHY_REV_BCM5401_B0
&&
1736 !(bmsr
& BMSR_LSTATUS
) &&
1737 tp
->link_config
.active_speed
== SPEED_1000
) {
1738 err
= tg3_phy_reset(tp
);
1740 err
= tg3_init_5401phy_dsp(tp
);
1745 } else if (tp
->pci_chip_rev_id
== CHIPREV_ID_5701_A0
||
1746 tp
->pci_chip_rev_id
== CHIPREV_ID_5701_B0
) {
1747 /* 5701 {A0,B0} CRC bug workaround */
1748 tg3_writephy(tp
, 0x15, 0x0a75);
1749 tg3_writephy(tp
, 0x1c, 0x8c68);
1750 tg3_writephy(tp
, 0x1c, 0x8d68);
1751 tg3_writephy(tp
, 0x1c, 0x8c68);
1754 /* Clear pending interrupts... */
1755 tg3_readphy(tp
, MII_TG3_ISTAT
, &dummy
);
1756 tg3_readphy(tp
, MII_TG3_ISTAT
, &dummy
);
1758 if (tp
->tg3_flags
& TG3_FLAG_USE_MI_INTERRUPT
)
1759 tg3_writephy(tp
, MII_TG3_IMASK
, ~MII_TG3_INT_LINKCHG
);
1761 tg3_writephy(tp
, MII_TG3_IMASK
, ~0);
1763 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
1764 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
) {
1765 if (tp
->led_ctrl
== LED_CTRL_MODE_PHY_1
)
1766 tg3_writephy(tp
, MII_TG3_EXT_CTRL
,
1767 MII_TG3_EXT_CTRL_LNK3_LED_MODE
);
1769 tg3_writephy(tp
, MII_TG3_EXT_CTRL
, 0);
1772 current_link_up
= 0;
1773 current_speed
= SPEED_INVALID
;
1774 current_duplex
= DUPLEX_INVALID
;
1776 if (tp
->tg3_flags2
& TG3_FLG2_CAPACITIVE_COUPLING
) {
1779 tg3_writephy(tp
, MII_TG3_AUX_CTRL
, 0x4007);
1780 tg3_readphy(tp
, MII_TG3_AUX_CTRL
, &val
);
1781 if (!(val
& (1 << 10))) {
1783 tg3_writephy(tp
, MII_TG3_AUX_CTRL
, val
);
1789 for (i
= 0; i
< 100; i
++) {
1790 tg3_readphy(tp
, MII_BMSR
, &bmsr
);
1791 if (!tg3_readphy(tp
, MII_BMSR
, &bmsr
) &&
1792 (bmsr
& BMSR_LSTATUS
))
1797 if (bmsr
& BMSR_LSTATUS
) {
1800 tg3_readphy(tp
, MII_TG3_AUX_STAT
, &aux_stat
);
1801 for (i
= 0; i
< 2000; i
++) {
1803 if (!tg3_readphy(tp
, MII_TG3_AUX_STAT
, &aux_stat
) &&
1808 tg3_aux_stat_to_speed_duplex(tp
, aux_stat
,
1813 for (i
= 0; i
< 200; i
++) {
1814 tg3_readphy(tp
, MII_BMCR
, &bmcr
);
1815 if (tg3_readphy(tp
, MII_BMCR
, &bmcr
))
1817 if (bmcr
&& bmcr
!= 0x7fff)
1822 if (tp
->link_config
.autoneg
== AUTONEG_ENABLE
) {
1823 if (bmcr
& BMCR_ANENABLE
) {
1824 current_link_up
= 1;
1826 /* Force autoneg restart if we are exiting
1829 if (!tg3_copper_is_advertising_all(tp
))
1830 current_link_up
= 0;
1832 current_link_up
= 0;
1835 if (!(bmcr
& BMCR_ANENABLE
) &&
1836 tp
->link_config
.speed
== current_speed
&&
1837 tp
->link_config
.duplex
== current_duplex
) {
1838 current_link_up
= 1;
1840 current_link_up
= 0;
1844 tp
->link_config
.active_speed
= current_speed
;
1845 tp
->link_config
.active_duplex
= current_duplex
;
1848 if (current_link_up
== 1 &&
1849 (tp
->link_config
.active_duplex
== DUPLEX_FULL
) &&
1850 (tp
->link_config
.autoneg
== AUTONEG_ENABLE
)) {
1851 u32 local_adv
, remote_adv
;
1853 if (tg3_readphy(tp
, MII_ADVERTISE
, &local_adv
))
1855 local_adv
&= (ADVERTISE_PAUSE_CAP
| ADVERTISE_PAUSE_ASYM
);
1857 if (tg3_readphy(tp
, MII_LPA
, &remote_adv
))
1860 remote_adv
&= (LPA_PAUSE_CAP
| LPA_PAUSE_ASYM
);
1862 /* If we are not advertising full pause capability,
1863 * something is wrong. Bring the link down and reconfigure.
1865 if (local_adv
!= ADVERTISE_PAUSE_CAP
) {
1866 current_link_up
= 0;
1868 tg3_setup_flow_control(tp
, local_adv
, remote_adv
);
1872 if (current_link_up
== 0 || tp
->link_config
.phy_is_low_power
) {
1875 tg3_phy_copper_begin(tp
);
1877 tg3_readphy(tp
, MII_BMSR
, &tmp
);
1878 if (!tg3_readphy(tp
, MII_BMSR
, &tmp
) &&
1879 (tmp
& BMSR_LSTATUS
))
1880 current_link_up
= 1;
1883 tp
->mac_mode
&= ~MAC_MODE_PORT_MODE_MASK
;
1884 if (current_link_up
== 1) {
1885 if (tp
->link_config
.active_speed
== SPEED_100
||
1886 tp
->link_config
.active_speed
== SPEED_10
)
1887 tp
->mac_mode
|= MAC_MODE_PORT_MODE_MII
;
1889 tp
->mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
1891 tp
->mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
1893 tp
->mac_mode
&= ~MAC_MODE_HALF_DUPLEX
;
1894 if (tp
->link_config
.active_duplex
== DUPLEX_HALF
)
1895 tp
->mac_mode
|= MAC_MODE_HALF_DUPLEX
;
1897 tp
->mac_mode
&= ~MAC_MODE_LINK_POLARITY
;
1898 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
) {
1899 if ((tp
->led_ctrl
== LED_CTRL_MODE_PHY_2
) ||
1900 (current_link_up
== 1 &&
1901 tp
->link_config
.active_speed
== SPEED_10
))
1902 tp
->mac_mode
|= MAC_MODE_LINK_POLARITY
;
1904 if (current_link_up
== 1)
1905 tp
->mac_mode
|= MAC_MODE_LINK_POLARITY
;
1908 /* ??? Without this setting Netgear GA302T PHY does not
1909 * ??? send/receive packets...
1911 if ((tp
->phy_id
& PHY_ID_MASK
) == PHY_ID_BCM5411
&&
1912 tp
->pci_chip_rev_id
== CHIPREV_ID_5700_ALTIMA
) {
1913 tp
->mi_mode
|= MAC_MI_MODE_AUTO_POLL
;
1914 tw32_f(MAC_MI_MODE
, tp
->mi_mode
);
1918 tw32_f(MAC_MODE
, tp
->mac_mode
);
1921 if (tp
->tg3_flags
& TG3_FLAG_USE_LINKCHG_REG
) {
1922 /* Polled via timer. */
1923 tw32_f(MAC_EVENT
, 0);
1925 tw32_f(MAC_EVENT
, MAC_EVENT_LNKSTATE_CHANGED
);
1929 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
&&
1930 current_link_up
== 1 &&
1931 tp
->link_config
.active_speed
== SPEED_1000
&&
1932 ((tp
->tg3_flags
& TG3_FLAG_PCIX_MODE
) ||
1933 (tp
->tg3_flags
& TG3_FLAG_PCI_HIGH_SPEED
))) {
1936 (MAC_STATUS_SYNC_CHANGED
|
1937 MAC_STATUS_CFG_CHANGED
));
1940 NIC_SRAM_FIRMWARE_MBOX
,
1941 NIC_SRAM_FIRMWARE_MBOX_MAGIC2
);
1944 if (current_link_up
!= netif_carrier_ok(tp
->dev
)) {
1945 if (current_link_up
)
1946 netif_carrier_on(tp
->dev
);
1948 netif_carrier_off(tp
->dev
);
1949 tg3_link_report(tp
);
1955 struct tg3_fiber_aneginfo
{
1957 #define ANEG_STATE_UNKNOWN 0
1958 #define ANEG_STATE_AN_ENABLE 1
1959 #define ANEG_STATE_RESTART_INIT 2
1960 #define ANEG_STATE_RESTART 3
1961 #define ANEG_STATE_DISABLE_LINK_OK 4
1962 #define ANEG_STATE_ABILITY_DETECT_INIT 5
1963 #define ANEG_STATE_ABILITY_DETECT 6
1964 #define ANEG_STATE_ACK_DETECT_INIT 7
1965 #define ANEG_STATE_ACK_DETECT 8
1966 #define ANEG_STATE_COMPLETE_ACK_INIT 9
1967 #define ANEG_STATE_COMPLETE_ACK 10
1968 #define ANEG_STATE_IDLE_DETECT_INIT 11
1969 #define ANEG_STATE_IDLE_DETECT 12
1970 #define ANEG_STATE_LINK_OK 13
1971 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
1972 #define ANEG_STATE_NEXT_PAGE_WAIT 15
1975 #define MR_AN_ENABLE 0x00000001
1976 #define MR_RESTART_AN 0x00000002
1977 #define MR_AN_COMPLETE 0x00000004
1978 #define MR_PAGE_RX 0x00000008
1979 #define MR_NP_LOADED 0x00000010
1980 #define MR_TOGGLE_TX 0x00000020
1981 #define MR_LP_ADV_FULL_DUPLEX 0x00000040
1982 #define MR_LP_ADV_HALF_DUPLEX 0x00000080
1983 #define MR_LP_ADV_SYM_PAUSE 0x00000100
1984 #define MR_LP_ADV_ASYM_PAUSE 0x00000200
1985 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
1986 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
1987 #define MR_LP_ADV_NEXT_PAGE 0x00001000
1988 #define MR_TOGGLE_RX 0x00002000
1989 #define MR_NP_RX 0x00004000
1991 #define MR_LINK_OK 0x80000000
1993 unsigned long link_time
, cur_time
;
1995 u32 ability_match_cfg
;
1996 int ability_match_count
;
1998 char ability_match
, idle_match
, ack_match
;
2000 u32 txconfig
, rxconfig
;
2001 #define ANEG_CFG_NP 0x00000080
2002 #define ANEG_CFG_ACK 0x00000040
2003 #define ANEG_CFG_RF2 0x00000020
2004 #define ANEG_CFG_RF1 0x00000010
2005 #define ANEG_CFG_PS2 0x00000001
2006 #define ANEG_CFG_PS1 0x00008000
2007 #define ANEG_CFG_HD 0x00004000
2008 #define ANEG_CFG_FD 0x00002000
2009 #define ANEG_CFG_INVAL 0x00001f06
2014 #define ANEG_TIMER_ENAB 2
2015 #define ANEG_FAILED -1
2017 #define ANEG_STATE_SETTLE_TIME 10000
2019 static int tg3_fiber_aneg_smachine(struct tg3
*tp
,
2020 struct tg3_fiber_aneginfo
*ap
)
2022 unsigned long delta
;
2026 if (ap
->state
== ANEG_STATE_UNKNOWN
) {
2030 ap
->ability_match_cfg
= 0;
2031 ap
->ability_match_count
= 0;
2032 ap
->ability_match
= 0;
2038 if (tr32(MAC_STATUS
) & MAC_STATUS_RCVD_CFG
) {
2039 rx_cfg_reg
= tr32(MAC_RX_AUTO_NEG
);
2041 if (rx_cfg_reg
!= ap
->ability_match_cfg
) {
2042 ap
->ability_match_cfg
= rx_cfg_reg
;
2043 ap
->ability_match
= 0;
2044 ap
->ability_match_count
= 0;
2046 if (++ap
->ability_match_count
> 1) {
2047 ap
->ability_match
= 1;
2048 ap
->ability_match_cfg
= rx_cfg_reg
;
2051 if (rx_cfg_reg
& ANEG_CFG_ACK
)
2059 ap
->ability_match_cfg
= 0;
2060 ap
->ability_match_count
= 0;
2061 ap
->ability_match
= 0;
2067 ap
->rxconfig
= rx_cfg_reg
;
2071 case ANEG_STATE_UNKNOWN
:
2072 if (ap
->flags
& (MR_AN_ENABLE
| MR_RESTART_AN
))
2073 ap
->state
= ANEG_STATE_AN_ENABLE
;
2076 case ANEG_STATE_AN_ENABLE
:
2077 ap
->flags
&= ~(MR_AN_COMPLETE
| MR_PAGE_RX
);
2078 if (ap
->flags
& MR_AN_ENABLE
) {
2081 ap
->ability_match_cfg
= 0;
2082 ap
->ability_match_count
= 0;
2083 ap
->ability_match
= 0;
2087 ap
->state
= ANEG_STATE_RESTART_INIT
;
2089 ap
->state
= ANEG_STATE_DISABLE_LINK_OK
;
2093 case ANEG_STATE_RESTART_INIT
:
2094 ap
->link_time
= ap
->cur_time
;
2095 ap
->flags
&= ~(MR_NP_LOADED
);
2097 tw32(MAC_TX_AUTO_NEG
, 0);
2098 tp
->mac_mode
|= MAC_MODE_SEND_CONFIGS
;
2099 tw32_f(MAC_MODE
, tp
->mac_mode
);
2102 ret
= ANEG_TIMER_ENAB
;
2103 ap
->state
= ANEG_STATE_RESTART
;
2106 case ANEG_STATE_RESTART
:
2107 delta
= ap
->cur_time
- ap
->link_time
;
2108 if (delta
> ANEG_STATE_SETTLE_TIME
) {
2109 ap
->state
= ANEG_STATE_ABILITY_DETECT_INIT
;
2111 ret
= ANEG_TIMER_ENAB
;
2115 case ANEG_STATE_DISABLE_LINK_OK
:
2119 case ANEG_STATE_ABILITY_DETECT_INIT
:
2120 ap
->flags
&= ~(MR_TOGGLE_TX
);
2121 ap
->txconfig
= (ANEG_CFG_FD
| ANEG_CFG_PS1
);
2122 tw32(MAC_TX_AUTO_NEG
, ap
->txconfig
);
2123 tp
->mac_mode
|= MAC_MODE_SEND_CONFIGS
;
2124 tw32_f(MAC_MODE
, tp
->mac_mode
);
2127 ap
->state
= ANEG_STATE_ABILITY_DETECT
;
2130 case ANEG_STATE_ABILITY_DETECT
:
2131 if (ap
->ability_match
!= 0 && ap
->rxconfig
!= 0) {
2132 ap
->state
= ANEG_STATE_ACK_DETECT_INIT
;
2136 case ANEG_STATE_ACK_DETECT_INIT
:
2137 ap
->txconfig
|= ANEG_CFG_ACK
;
2138 tw32(MAC_TX_AUTO_NEG
, ap
->txconfig
);
2139 tp
->mac_mode
|= MAC_MODE_SEND_CONFIGS
;
2140 tw32_f(MAC_MODE
, tp
->mac_mode
);
2143 ap
->state
= ANEG_STATE_ACK_DETECT
;
2146 case ANEG_STATE_ACK_DETECT
:
2147 if (ap
->ack_match
!= 0) {
2148 if ((ap
->rxconfig
& ~ANEG_CFG_ACK
) ==
2149 (ap
->ability_match_cfg
& ~ANEG_CFG_ACK
)) {
2150 ap
->state
= ANEG_STATE_COMPLETE_ACK_INIT
;
2152 ap
->state
= ANEG_STATE_AN_ENABLE
;
2154 } else if (ap
->ability_match
!= 0 &&
2155 ap
->rxconfig
== 0) {
2156 ap
->state
= ANEG_STATE_AN_ENABLE
;
2160 case ANEG_STATE_COMPLETE_ACK_INIT
:
2161 if (ap
->rxconfig
& ANEG_CFG_INVAL
) {
2165 ap
->flags
&= ~(MR_LP_ADV_FULL_DUPLEX
|
2166 MR_LP_ADV_HALF_DUPLEX
|
2167 MR_LP_ADV_SYM_PAUSE
|
2168 MR_LP_ADV_ASYM_PAUSE
|
2169 MR_LP_ADV_REMOTE_FAULT1
|
2170 MR_LP_ADV_REMOTE_FAULT2
|
2171 MR_LP_ADV_NEXT_PAGE
|
2174 if (ap
->rxconfig
& ANEG_CFG_FD
)
2175 ap
->flags
|= MR_LP_ADV_FULL_DUPLEX
;
2176 if (ap
->rxconfig
& ANEG_CFG_HD
)
2177 ap
->flags
|= MR_LP_ADV_HALF_DUPLEX
;
2178 if (ap
->rxconfig
& ANEG_CFG_PS1
)
2179 ap
->flags
|= MR_LP_ADV_SYM_PAUSE
;
2180 if (ap
->rxconfig
& ANEG_CFG_PS2
)
2181 ap
->flags
|= MR_LP_ADV_ASYM_PAUSE
;
2182 if (ap
->rxconfig
& ANEG_CFG_RF1
)
2183 ap
->flags
|= MR_LP_ADV_REMOTE_FAULT1
;
2184 if (ap
->rxconfig
& ANEG_CFG_RF2
)
2185 ap
->flags
|= MR_LP_ADV_REMOTE_FAULT2
;
2186 if (ap
->rxconfig
& ANEG_CFG_NP
)
2187 ap
->flags
|= MR_LP_ADV_NEXT_PAGE
;
2189 ap
->link_time
= ap
->cur_time
;
2191 ap
->flags
^= (MR_TOGGLE_TX
);
2192 if (ap
->rxconfig
& 0x0008)
2193 ap
->flags
|= MR_TOGGLE_RX
;
2194 if (ap
->rxconfig
& ANEG_CFG_NP
)
2195 ap
->flags
|= MR_NP_RX
;
2196 ap
->flags
|= MR_PAGE_RX
;
2198 ap
->state
= ANEG_STATE_COMPLETE_ACK
;
2199 ret
= ANEG_TIMER_ENAB
;
2202 case ANEG_STATE_COMPLETE_ACK
:
2203 if (ap
->ability_match
!= 0 &&
2204 ap
->rxconfig
== 0) {
2205 ap
->state
= ANEG_STATE_AN_ENABLE
;
2208 delta
= ap
->cur_time
- ap
->link_time
;
2209 if (delta
> ANEG_STATE_SETTLE_TIME
) {
2210 if (!(ap
->flags
& (MR_LP_ADV_NEXT_PAGE
))) {
2211 ap
->state
= ANEG_STATE_IDLE_DETECT_INIT
;
2213 if ((ap
->txconfig
& ANEG_CFG_NP
) == 0 &&
2214 !(ap
->flags
& MR_NP_RX
)) {
2215 ap
->state
= ANEG_STATE_IDLE_DETECT_INIT
;
2223 case ANEG_STATE_IDLE_DETECT_INIT
:
2224 ap
->link_time
= ap
->cur_time
;
2225 tp
->mac_mode
&= ~MAC_MODE_SEND_CONFIGS
;
2226 tw32_f(MAC_MODE
, tp
->mac_mode
);
2229 ap
->state
= ANEG_STATE_IDLE_DETECT
;
2230 ret
= ANEG_TIMER_ENAB
;
2233 case ANEG_STATE_IDLE_DETECT
:
2234 if (ap
->ability_match
!= 0 &&
2235 ap
->rxconfig
== 0) {
2236 ap
->state
= ANEG_STATE_AN_ENABLE
;
2239 delta
= ap
->cur_time
- ap
->link_time
;
2240 if (delta
> ANEG_STATE_SETTLE_TIME
) {
2241 /* XXX another gem from the Broadcom driver :( */
2242 ap
->state
= ANEG_STATE_LINK_OK
;
2246 case ANEG_STATE_LINK_OK
:
2247 ap
->flags
|= (MR_AN_COMPLETE
| MR_LINK_OK
);
2251 case ANEG_STATE_NEXT_PAGE_WAIT_INIT
:
2252 /* ??? unimplemented */
2255 case ANEG_STATE_NEXT_PAGE_WAIT
:
2256 /* ??? unimplemented */
2267 static int fiber_autoneg(struct tg3
*tp
, u32
*flags
)
2270 struct tg3_fiber_aneginfo aninfo
;
2271 int status
= ANEG_FAILED
;
2275 tw32_f(MAC_TX_AUTO_NEG
, 0);
2277 tmp
= tp
->mac_mode
& ~MAC_MODE_PORT_MODE_MASK
;
2278 tw32_f(MAC_MODE
, tmp
| MAC_MODE_PORT_MODE_GMII
);
2281 tw32_f(MAC_MODE
, tp
->mac_mode
| MAC_MODE_SEND_CONFIGS
);
2284 memset(&aninfo
, 0, sizeof(aninfo
));
2285 aninfo
.flags
|= MR_AN_ENABLE
;
2286 aninfo
.state
= ANEG_STATE_UNKNOWN
;
2287 aninfo
.cur_time
= 0;
2289 while (++tick
< 195000) {
2290 status
= tg3_fiber_aneg_smachine(tp
, &aninfo
);
2291 if (status
== ANEG_DONE
|| status
== ANEG_FAILED
)
2297 tp
->mac_mode
&= ~MAC_MODE_SEND_CONFIGS
;
2298 tw32_f(MAC_MODE
, tp
->mac_mode
);
2301 *flags
= aninfo
.flags
;
2303 if (status
== ANEG_DONE
&&
2304 (aninfo
.flags
& (MR_AN_COMPLETE
| MR_LINK_OK
|
2305 MR_LP_ADV_FULL_DUPLEX
)))
2311 static void tg3_init_bcm8002(struct tg3
*tp
)
2313 u32 mac_status
= tr32(MAC_STATUS
);
2316 /* Reset when initting first time or we have a link. */
2317 if ((tp
->tg3_flags
& TG3_FLAG_INIT_COMPLETE
) &&
2318 !(mac_status
& MAC_STATUS_PCS_SYNCED
))
2321 /* Set PLL lock range. */
2322 tg3_writephy(tp
, 0x16, 0x8007);
2325 tg3_writephy(tp
, MII_BMCR
, BMCR_RESET
);
2327 /* Wait for reset to complete. */
2328 /* XXX schedule_timeout() ... */
2329 for (i
= 0; i
< 500; i
++)
2332 /* Config mode; select PMA/Ch 1 regs. */
2333 tg3_writephy(tp
, 0x10, 0x8411);
2335 /* Enable auto-lock and comdet, select txclk for tx. */
2336 tg3_writephy(tp
, 0x11, 0x0a10);
2338 tg3_writephy(tp
, 0x18, 0x00a0);
2339 tg3_writephy(tp
, 0x16, 0x41ff);
2341 /* Assert and deassert POR. */
2342 tg3_writephy(tp
, 0x13, 0x0400);
2344 tg3_writephy(tp
, 0x13, 0x0000);
2346 tg3_writephy(tp
, 0x11, 0x0a50);
2348 tg3_writephy(tp
, 0x11, 0x0a10);
2350 /* Wait for signal to stabilize */
2351 /* XXX schedule_timeout() ... */
2352 for (i
= 0; i
< 15000; i
++)
2355 /* Deselect the channel register so we can read the PHYID
2358 tg3_writephy(tp
, 0x10, 0x8011);
2361 static int tg3_setup_fiber_hw_autoneg(struct tg3
*tp
, u32 mac_status
)
2363 u32 sg_dig_ctrl
, sg_dig_status
;
2364 u32 serdes_cfg
, expected_sg_dig_ctrl
;
2365 int workaround
, port_a
;
2366 int current_link_up
;
2369 expected_sg_dig_ctrl
= 0;
2372 current_link_up
= 0;
2374 if (tp
->pci_chip_rev_id
!= CHIPREV_ID_5704_A0
&&
2375 tp
->pci_chip_rev_id
!= CHIPREV_ID_5704_A1
) {
2377 if (tr32(TG3PCI_DUAL_MAC_CTRL
) & DUAL_MAC_CTRL_ID
)
2380 /* preserve bits 0-11,13,14 for signal pre-emphasis */
2381 /* preserve bits 20-23 for voltage regulator */
2382 serdes_cfg
= tr32(MAC_SERDES_CFG
) & 0x00f06fff;
2385 sg_dig_ctrl
= tr32(SG_DIG_CTRL
);
2387 if (tp
->link_config
.autoneg
!= AUTONEG_ENABLE
) {
2388 if (sg_dig_ctrl
& (1 << 31)) {
2390 u32 val
= serdes_cfg
;
2396 tw32_f(MAC_SERDES_CFG
, val
);
2398 tw32_f(SG_DIG_CTRL
, 0x01388400);
2400 if (mac_status
& MAC_STATUS_PCS_SYNCED
) {
2401 tg3_setup_flow_control(tp
, 0, 0);
2402 current_link_up
= 1;
2407 /* Want auto-negotiation. */
2408 expected_sg_dig_ctrl
= 0x81388400;
2410 /* Pause capability */
2411 expected_sg_dig_ctrl
|= (1 << 11);
2413 /* Asymettric pause */
2414 expected_sg_dig_ctrl
|= (1 << 12);
2416 if (sg_dig_ctrl
!= expected_sg_dig_ctrl
) {
2418 tw32_f(MAC_SERDES_CFG
, serdes_cfg
| 0xc011000);
2419 tw32_f(SG_DIG_CTRL
, expected_sg_dig_ctrl
| (1 << 30));
2421 tw32_f(SG_DIG_CTRL
, expected_sg_dig_ctrl
);
2423 tp
->tg3_flags2
|= TG3_FLG2_PHY_JUST_INITTED
;
2424 } else if (mac_status
& (MAC_STATUS_PCS_SYNCED
|
2425 MAC_STATUS_SIGNAL_DET
)) {
2428 /* Giver time to negotiate (~200ms) */
2429 for (i
= 0; i
< 40000; i
++) {
2430 sg_dig_status
= tr32(SG_DIG_STATUS
);
2431 if (sg_dig_status
& (0x3))
2435 mac_status
= tr32(MAC_STATUS
);
2437 if ((sg_dig_status
& (1 << 1)) &&
2438 (mac_status
& MAC_STATUS_PCS_SYNCED
)) {
2439 u32 local_adv
, remote_adv
;
2441 local_adv
= ADVERTISE_PAUSE_CAP
;
2443 if (sg_dig_status
& (1 << 19))
2444 remote_adv
|= LPA_PAUSE_CAP
;
2445 if (sg_dig_status
& (1 << 20))
2446 remote_adv
|= LPA_PAUSE_ASYM
;
2448 tg3_setup_flow_control(tp
, local_adv
, remote_adv
);
2449 current_link_up
= 1;
2450 tp
->tg3_flags2
&= ~TG3_FLG2_PHY_JUST_INITTED
;
2451 } else if (!(sg_dig_status
& (1 << 1))) {
2452 if (tp
->tg3_flags2
& TG3_FLG2_PHY_JUST_INITTED
)
2453 tp
->tg3_flags2
&= ~TG3_FLG2_PHY_JUST_INITTED
;
2456 u32 val
= serdes_cfg
;
2463 tw32_f(MAC_SERDES_CFG
, val
);
2466 tw32_f(SG_DIG_CTRL
, 0x01388400);
2469 /* Link parallel detection - link is up */
2470 /* only if we have PCS_SYNC and not */
2471 /* receiving config code words */
2472 mac_status
= tr32(MAC_STATUS
);
2473 if ((mac_status
& MAC_STATUS_PCS_SYNCED
) &&
2474 !(mac_status
& MAC_STATUS_RCVD_CFG
)) {
2475 tg3_setup_flow_control(tp
, 0, 0);
2476 current_link_up
= 1;
2483 return current_link_up
;
2486 static int tg3_setup_fiber_by_hand(struct tg3
*tp
, u32 mac_status
)
2488 int current_link_up
= 0;
2490 if (!(mac_status
& MAC_STATUS_PCS_SYNCED
)) {
2491 tp
->tg3_flags
&= ~TG3_FLAG_GOT_SERDES_FLOWCTL
;
2495 if (tp
->link_config
.autoneg
== AUTONEG_ENABLE
) {
2499 if (fiber_autoneg(tp
, &flags
)) {
2500 u32 local_adv
, remote_adv
;
2502 local_adv
= ADVERTISE_PAUSE_CAP
;
2504 if (flags
& MR_LP_ADV_SYM_PAUSE
)
2505 remote_adv
|= LPA_PAUSE_CAP
;
2506 if (flags
& MR_LP_ADV_ASYM_PAUSE
)
2507 remote_adv
|= LPA_PAUSE_ASYM
;
2509 tg3_setup_flow_control(tp
, local_adv
, remote_adv
);
2511 tp
->tg3_flags
|= TG3_FLAG_GOT_SERDES_FLOWCTL
;
2512 current_link_up
= 1;
2514 for (i
= 0; i
< 30; i
++) {
2517 (MAC_STATUS_SYNC_CHANGED
|
2518 MAC_STATUS_CFG_CHANGED
));
2520 if ((tr32(MAC_STATUS
) &
2521 (MAC_STATUS_SYNC_CHANGED
|
2522 MAC_STATUS_CFG_CHANGED
)) == 0)
2526 mac_status
= tr32(MAC_STATUS
);
2527 if (current_link_up
== 0 &&
2528 (mac_status
& MAC_STATUS_PCS_SYNCED
) &&
2529 !(mac_status
& MAC_STATUS_RCVD_CFG
))
2530 current_link_up
= 1;
2532 /* Forcing 1000FD link up. */
2533 current_link_up
= 1;
2534 tp
->tg3_flags
|= TG3_FLAG_GOT_SERDES_FLOWCTL
;
2536 tw32_f(MAC_MODE
, (tp
->mac_mode
| MAC_MODE_SEND_CONFIGS
));
2541 return current_link_up
;
2544 static int tg3_setup_fiber_phy(struct tg3
*tp
, int force_reset
)
2547 u16 orig_active_speed
;
2548 u8 orig_active_duplex
;
2550 int current_link_up
;
2554 (tp
->tg3_flags
& (TG3_FLAG_RX_PAUSE
|
2555 TG3_FLAG_TX_PAUSE
));
2556 orig_active_speed
= tp
->link_config
.active_speed
;
2557 orig_active_duplex
= tp
->link_config
.active_duplex
;
2559 if (!(tp
->tg3_flags2
& TG3_FLG2_HW_AUTONEG
) &&
2560 netif_carrier_ok(tp
->dev
) &&
2561 (tp
->tg3_flags
& TG3_FLAG_INIT_COMPLETE
)) {
2562 mac_status
= tr32(MAC_STATUS
);
2563 mac_status
&= (MAC_STATUS_PCS_SYNCED
|
2564 MAC_STATUS_SIGNAL_DET
|
2565 MAC_STATUS_CFG_CHANGED
|
2566 MAC_STATUS_RCVD_CFG
);
2567 if (mac_status
== (MAC_STATUS_PCS_SYNCED
|
2568 MAC_STATUS_SIGNAL_DET
)) {
2569 tw32_f(MAC_STATUS
, (MAC_STATUS_SYNC_CHANGED
|
2570 MAC_STATUS_CFG_CHANGED
));
2575 tw32_f(MAC_TX_AUTO_NEG
, 0);
2577 tp
->mac_mode
&= ~(MAC_MODE_PORT_MODE_MASK
| MAC_MODE_HALF_DUPLEX
);
2578 tp
->mac_mode
|= MAC_MODE_PORT_MODE_TBI
;
2579 tw32_f(MAC_MODE
, tp
->mac_mode
);
2582 if (tp
->phy_id
== PHY_ID_BCM8002
)
2583 tg3_init_bcm8002(tp
);
2585 /* Enable link change event even when serdes polling. */
2586 tw32_f(MAC_EVENT
, MAC_EVENT_LNKSTATE_CHANGED
);
2589 current_link_up
= 0;
2590 mac_status
= tr32(MAC_STATUS
);
2592 if (tp
->tg3_flags2
& TG3_FLG2_HW_AUTONEG
)
2593 current_link_up
= tg3_setup_fiber_hw_autoneg(tp
, mac_status
);
2595 current_link_up
= tg3_setup_fiber_by_hand(tp
, mac_status
);
2597 tp
->mac_mode
&= ~MAC_MODE_LINK_POLARITY
;
2598 tw32_f(MAC_MODE
, tp
->mac_mode
);
2601 tp
->hw_status
->status
=
2602 (SD_STATUS_UPDATED
|
2603 (tp
->hw_status
->status
& ~SD_STATUS_LINK_CHG
));
2605 for (i
= 0; i
< 100; i
++) {
2606 tw32_f(MAC_STATUS
, (MAC_STATUS_SYNC_CHANGED
|
2607 MAC_STATUS_CFG_CHANGED
));
2609 if ((tr32(MAC_STATUS
) & (MAC_STATUS_SYNC_CHANGED
|
2610 MAC_STATUS_CFG_CHANGED
)) == 0)
2614 mac_status
= tr32(MAC_STATUS
);
2615 if ((mac_status
& MAC_STATUS_PCS_SYNCED
) == 0) {
2616 current_link_up
= 0;
2617 if (tp
->link_config
.autoneg
== AUTONEG_ENABLE
) {
2618 tw32_f(MAC_MODE
, (tp
->mac_mode
|
2619 MAC_MODE_SEND_CONFIGS
));
2621 tw32_f(MAC_MODE
, tp
->mac_mode
);
2625 if (current_link_up
== 1) {
2626 tp
->link_config
.active_speed
= SPEED_1000
;
2627 tp
->link_config
.active_duplex
= DUPLEX_FULL
;
2628 tw32(MAC_LED_CTRL
, (tp
->led_ctrl
|
2629 LED_CTRL_LNKLED_OVERRIDE
|
2630 LED_CTRL_1000MBPS_ON
));
2632 tp
->link_config
.active_speed
= SPEED_INVALID
;
2633 tp
->link_config
.active_duplex
= DUPLEX_INVALID
;
2634 tw32(MAC_LED_CTRL
, (tp
->led_ctrl
|
2635 LED_CTRL_LNKLED_OVERRIDE
|
2636 LED_CTRL_TRAFFIC_OVERRIDE
));
2639 if (current_link_up
!= netif_carrier_ok(tp
->dev
)) {
2640 if (current_link_up
)
2641 netif_carrier_on(tp
->dev
);
2643 netif_carrier_off(tp
->dev
);
2644 tg3_link_report(tp
);
2647 tp
->tg3_flags
& (TG3_FLAG_RX_PAUSE
|
2649 if (orig_pause_cfg
!= now_pause_cfg
||
2650 orig_active_speed
!= tp
->link_config
.active_speed
||
2651 orig_active_duplex
!= tp
->link_config
.active_duplex
)
2652 tg3_link_report(tp
);
2658 static int tg3_setup_fiber_mii_phy(struct tg3
*tp
, int force_reset
)
2660 int current_link_up
, err
= 0;
2665 tp
->mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
2666 tw32_f(MAC_MODE
, tp
->mac_mode
);
2672 (MAC_STATUS_SYNC_CHANGED
|
2673 MAC_STATUS_CFG_CHANGED
|
2674 MAC_STATUS_MI_COMPLETION
|
2675 MAC_STATUS_LNKSTATE_CHANGED
));
2681 current_link_up
= 0;
2682 current_speed
= SPEED_INVALID
;
2683 current_duplex
= DUPLEX_INVALID
;
2685 err
|= tg3_readphy(tp
, MII_BMSR
, &bmsr
);
2686 err
|= tg3_readphy(tp
, MII_BMSR
, &bmsr
);
2687 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5714
) {
2688 if (tr32(MAC_TX_STATUS
) & TX_STATUS_LINK_UP
)
2689 bmsr
|= BMSR_LSTATUS
;
2691 bmsr
&= ~BMSR_LSTATUS
;
2694 err
|= tg3_readphy(tp
, MII_BMCR
, &bmcr
);
2696 if ((tp
->link_config
.autoneg
== AUTONEG_ENABLE
) && !force_reset
&&
2697 (tp
->tg3_flags2
& TG3_FLG2_PARALLEL_DETECT
)) {
2698 /* do nothing, just check for link up at the end */
2699 } else if (tp
->link_config
.autoneg
== AUTONEG_ENABLE
) {
2702 err
|= tg3_readphy(tp
, MII_ADVERTISE
, &adv
);
2703 new_adv
= adv
& ~(ADVERTISE_1000XFULL
| ADVERTISE_1000XHALF
|
2704 ADVERTISE_1000XPAUSE
|
2705 ADVERTISE_1000XPSE_ASYM
|
2708 /* Always advertise symmetric PAUSE just like copper */
2709 new_adv
|= ADVERTISE_1000XPAUSE
;
2711 if (tp
->link_config
.advertising
& ADVERTISED_1000baseT_Half
)
2712 new_adv
|= ADVERTISE_1000XHALF
;
2713 if (tp
->link_config
.advertising
& ADVERTISED_1000baseT_Full
)
2714 new_adv
|= ADVERTISE_1000XFULL
;
2716 if ((new_adv
!= adv
) || !(bmcr
& BMCR_ANENABLE
)) {
2717 tg3_writephy(tp
, MII_ADVERTISE
, new_adv
);
2718 bmcr
|= BMCR_ANENABLE
| BMCR_ANRESTART
;
2719 tg3_writephy(tp
, MII_BMCR
, bmcr
);
2721 tw32_f(MAC_EVENT
, MAC_EVENT_LNKSTATE_CHANGED
);
2722 tp
->tg3_flags2
|= TG3_FLG2_PHY_JUST_INITTED
;
2723 tp
->tg3_flags2
&= ~TG3_FLG2_PARALLEL_DETECT
;
2730 bmcr
&= ~BMCR_SPEED1000
;
2731 new_bmcr
= bmcr
& ~(BMCR_ANENABLE
| BMCR_FULLDPLX
);
2733 if (tp
->link_config
.duplex
== DUPLEX_FULL
)
2734 new_bmcr
|= BMCR_FULLDPLX
;
2736 if (new_bmcr
!= bmcr
) {
2737 /* BMCR_SPEED1000 is a reserved bit that needs
2738 * to be set on write.
2740 new_bmcr
|= BMCR_SPEED1000
;
2742 /* Force a linkdown */
2743 if (netif_carrier_ok(tp
->dev
)) {
2746 err
|= tg3_readphy(tp
, MII_ADVERTISE
, &adv
);
2747 adv
&= ~(ADVERTISE_1000XFULL
|
2748 ADVERTISE_1000XHALF
|
2750 tg3_writephy(tp
, MII_ADVERTISE
, adv
);
2751 tg3_writephy(tp
, MII_BMCR
, bmcr
|
2755 netif_carrier_off(tp
->dev
);
2757 tg3_writephy(tp
, MII_BMCR
, new_bmcr
);
2759 err
|= tg3_readphy(tp
, MII_BMSR
, &bmsr
);
2760 err
|= tg3_readphy(tp
, MII_BMSR
, &bmsr
);
2761 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) ==
2763 if (tr32(MAC_TX_STATUS
) & TX_STATUS_LINK_UP
)
2764 bmsr
|= BMSR_LSTATUS
;
2766 bmsr
&= ~BMSR_LSTATUS
;
2768 tp
->tg3_flags2
&= ~TG3_FLG2_PARALLEL_DETECT
;
2772 if (bmsr
& BMSR_LSTATUS
) {
2773 current_speed
= SPEED_1000
;
2774 current_link_up
= 1;
2775 if (bmcr
& BMCR_FULLDPLX
)
2776 current_duplex
= DUPLEX_FULL
;
2778 current_duplex
= DUPLEX_HALF
;
2780 if (bmcr
& BMCR_ANENABLE
) {
2781 u32 local_adv
, remote_adv
, common
;
2783 err
|= tg3_readphy(tp
, MII_ADVERTISE
, &local_adv
);
2784 err
|= tg3_readphy(tp
, MII_LPA
, &remote_adv
);
2785 common
= local_adv
& remote_adv
;
2786 if (common
& (ADVERTISE_1000XHALF
|
2787 ADVERTISE_1000XFULL
)) {
2788 if (common
& ADVERTISE_1000XFULL
)
2789 current_duplex
= DUPLEX_FULL
;
2791 current_duplex
= DUPLEX_HALF
;
2793 tg3_setup_flow_control(tp
, local_adv
,
2797 current_link_up
= 0;
2801 tp
->mac_mode
&= ~MAC_MODE_HALF_DUPLEX
;
2802 if (tp
->link_config
.active_duplex
== DUPLEX_HALF
)
2803 tp
->mac_mode
|= MAC_MODE_HALF_DUPLEX
;
2805 tw32_f(MAC_MODE
, tp
->mac_mode
);
2808 tw32_f(MAC_EVENT
, MAC_EVENT_LNKSTATE_CHANGED
);
2810 tp
->link_config
.active_speed
= current_speed
;
2811 tp
->link_config
.active_duplex
= current_duplex
;
2813 if (current_link_up
!= netif_carrier_ok(tp
->dev
)) {
2814 if (current_link_up
)
2815 netif_carrier_on(tp
->dev
);
2817 netif_carrier_off(tp
->dev
);
2818 tp
->tg3_flags2
&= ~TG3_FLG2_PARALLEL_DETECT
;
2820 tg3_link_report(tp
);
2825 static void tg3_serdes_parallel_detect(struct tg3
*tp
)
2827 if (tp
->tg3_flags2
& TG3_FLG2_PHY_JUST_INITTED
) {
2828 /* Give autoneg time to complete. */
2829 tp
->tg3_flags2
&= ~TG3_FLG2_PHY_JUST_INITTED
;
2832 if (!netif_carrier_ok(tp
->dev
) &&
2833 (tp
->link_config
.autoneg
== AUTONEG_ENABLE
)) {
2836 tg3_readphy(tp
, MII_BMCR
, &bmcr
);
2837 if (bmcr
& BMCR_ANENABLE
) {
2840 /* Select shadow register 0x1f */
2841 tg3_writephy(tp
, 0x1c, 0x7c00);
2842 tg3_readphy(tp
, 0x1c, &phy1
);
2844 /* Select expansion interrupt status register */
2845 tg3_writephy(tp
, 0x17, 0x0f01);
2846 tg3_readphy(tp
, 0x15, &phy2
);
2847 tg3_readphy(tp
, 0x15, &phy2
);
2849 if ((phy1
& 0x10) && !(phy2
& 0x20)) {
2850 /* We have signal detect and not receiving
2851 * config code words, link is up by parallel
2855 bmcr
&= ~BMCR_ANENABLE
;
2856 bmcr
|= BMCR_SPEED1000
| BMCR_FULLDPLX
;
2857 tg3_writephy(tp
, MII_BMCR
, bmcr
);
2858 tp
->tg3_flags2
|= TG3_FLG2_PARALLEL_DETECT
;
2862 else if (netif_carrier_ok(tp
->dev
) &&
2863 (tp
->link_config
.autoneg
== AUTONEG_ENABLE
) &&
2864 (tp
->tg3_flags2
& TG3_FLG2_PARALLEL_DETECT
)) {
2867 /* Select expansion interrupt status register */
2868 tg3_writephy(tp
, 0x17, 0x0f01);
2869 tg3_readphy(tp
, 0x15, &phy2
);
2873 /* Config code words received, turn on autoneg. */
2874 tg3_readphy(tp
, MII_BMCR
, &bmcr
);
2875 tg3_writephy(tp
, MII_BMCR
, bmcr
| BMCR_ANENABLE
);
2877 tp
->tg3_flags2
&= ~TG3_FLG2_PARALLEL_DETECT
;
2883 static int tg3_setup_phy(struct tg3
*tp
, int force_reset
)
2887 if (tp
->tg3_flags2
& TG3_FLG2_PHY_SERDES
) {
2888 err
= tg3_setup_fiber_phy(tp
, force_reset
);
2889 } else if (tp
->tg3_flags2
& TG3_FLG2_MII_SERDES
) {
2890 err
= tg3_setup_fiber_mii_phy(tp
, force_reset
);
2892 err
= tg3_setup_copper_phy(tp
, force_reset
);
2895 if (tp
->link_config
.active_speed
== SPEED_1000
&&
2896 tp
->link_config
.active_duplex
== DUPLEX_HALF
)
2897 tw32(MAC_TX_LENGTHS
,
2898 ((2 << TX_LENGTHS_IPG_CRS_SHIFT
) |
2899 (6 << TX_LENGTHS_IPG_SHIFT
) |
2900 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT
)));
2902 tw32(MAC_TX_LENGTHS
,
2903 ((2 << TX_LENGTHS_IPG_CRS_SHIFT
) |
2904 (6 << TX_LENGTHS_IPG_SHIFT
) |
2905 (32 << TX_LENGTHS_SLOT_TIME_SHIFT
)));
2907 if (!(tp
->tg3_flags2
& TG3_FLG2_5705_PLUS
)) {
2908 if (netif_carrier_ok(tp
->dev
)) {
2909 tw32(HOSTCC_STAT_COAL_TICKS
,
2910 tp
->coal
.stats_block_coalesce_usecs
);
2912 tw32(HOSTCC_STAT_COAL_TICKS
, 0);
2919 /* Tigon3 never reports partial packet sends. So we do not
2920 * need special logic to handle SKBs that have not had all
2921 * of their frags sent yet, like SunGEM does.
2923 static void tg3_tx(struct tg3
*tp
)
2925 u32 hw_idx
= tp
->hw_status
->idx
[0].tx_consumer
;
2926 u32 sw_idx
= tp
->tx_cons
;
2928 while (sw_idx
!= hw_idx
) {
2929 struct tx_ring_info
*ri
= &tp
->tx_buffers
[sw_idx
];
2930 struct sk_buff
*skb
= ri
->skb
;
2933 if (unlikely(skb
== NULL
))
2936 pci_unmap_single(tp
->pdev
,
2937 pci_unmap_addr(ri
, mapping
),
2943 sw_idx
= NEXT_TX(sw_idx
);
2945 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
2946 if (unlikely(sw_idx
== hw_idx
))
2949 ri
= &tp
->tx_buffers
[sw_idx
];
2950 if (unlikely(ri
->skb
!= NULL
))
2953 pci_unmap_page(tp
->pdev
,
2954 pci_unmap_addr(ri
, mapping
),
2955 skb_shinfo(skb
)->frags
[i
].size
,
2958 sw_idx
= NEXT_TX(sw_idx
);
2964 tp
->tx_cons
= sw_idx
;
2966 if (unlikely(netif_queue_stopped(tp
->dev
))) {
2967 spin_lock(&tp
->tx_lock
);
2968 if (netif_queue_stopped(tp
->dev
) &&
2969 (TX_BUFFS_AVAIL(tp
) > TG3_TX_WAKEUP_THRESH
))
2970 netif_wake_queue(tp
->dev
);
2971 spin_unlock(&tp
->tx_lock
);
2975 /* Returns size of skb allocated or < 0 on error.
2977 * We only need to fill in the address because the other members
2978 * of the RX descriptor are invariant, see tg3_init_rings.
2980 * Note the purposeful assymetry of cpu vs. chip accesses. For
2981 * posting buffers we only dirty the first cache line of the RX
2982 * descriptor (containing the address). Whereas for the RX status
2983 * buffers the cpu only reads the last cacheline of the RX descriptor
2984 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
2986 static int tg3_alloc_rx_skb(struct tg3
*tp
, u32 opaque_key
,
2987 int src_idx
, u32 dest_idx_unmasked
)
2989 struct tg3_rx_buffer_desc
*desc
;
2990 struct ring_info
*map
, *src_map
;
2991 struct sk_buff
*skb
;
2993 int skb_size
, dest_idx
;
2996 switch (opaque_key
) {
2997 case RXD_OPAQUE_RING_STD
:
2998 dest_idx
= dest_idx_unmasked
% TG3_RX_RING_SIZE
;
2999 desc
= &tp
->rx_std
[dest_idx
];
3000 map
= &tp
->rx_std_buffers
[dest_idx
];
3002 src_map
= &tp
->rx_std_buffers
[src_idx
];
3003 skb_size
= tp
->rx_pkt_buf_sz
;
3006 case RXD_OPAQUE_RING_JUMBO
:
3007 dest_idx
= dest_idx_unmasked
% TG3_RX_JUMBO_RING_SIZE
;
3008 desc
= &tp
->rx_jumbo
[dest_idx
];
3009 map
= &tp
->rx_jumbo_buffers
[dest_idx
];
3011 src_map
= &tp
->rx_jumbo_buffers
[src_idx
];
3012 skb_size
= RX_JUMBO_PKT_BUF_SZ
;
3019 /* Do not overwrite any of the map or rp information
3020 * until we are sure we can commit to a new buffer.
3022 * Callers depend upon this behavior and assume that
3023 * we leave everything unchanged if we fail.
3025 skb
= dev_alloc_skb(skb_size
);
3030 skb_reserve(skb
, tp
->rx_offset
);
3032 mapping
= pci_map_single(tp
->pdev
, skb
->data
,
3033 skb_size
- tp
->rx_offset
,
3034 PCI_DMA_FROMDEVICE
);
3037 pci_unmap_addr_set(map
, mapping
, mapping
);
3039 if (src_map
!= NULL
)
3040 src_map
->skb
= NULL
;
3042 desc
->addr_hi
= ((u64
)mapping
>> 32);
3043 desc
->addr_lo
= ((u64
)mapping
& 0xffffffff);
3048 /* We only need to move over in the address because the other
3049 * members of the RX descriptor are invariant. See notes above
3050 * tg3_alloc_rx_skb for full details.
3052 static void tg3_recycle_rx(struct tg3
*tp
, u32 opaque_key
,
3053 int src_idx
, u32 dest_idx_unmasked
)
3055 struct tg3_rx_buffer_desc
*src_desc
, *dest_desc
;
3056 struct ring_info
*src_map
, *dest_map
;
3059 switch (opaque_key
) {
3060 case RXD_OPAQUE_RING_STD
:
3061 dest_idx
= dest_idx_unmasked
% TG3_RX_RING_SIZE
;
3062 dest_desc
= &tp
->rx_std
[dest_idx
];
3063 dest_map
= &tp
->rx_std_buffers
[dest_idx
];
3064 src_desc
= &tp
->rx_std
[src_idx
];
3065 src_map
= &tp
->rx_std_buffers
[src_idx
];
3068 case RXD_OPAQUE_RING_JUMBO
:
3069 dest_idx
= dest_idx_unmasked
% TG3_RX_JUMBO_RING_SIZE
;
3070 dest_desc
= &tp
->rx_jumbo
[dest_idx
];
3071 dest_map
= &tp
->rx_jumbo_buffers
[dest_idx
];
3072 src_desc
= &tp
->rx_jumbo
[src_idx
];
3073 src_map
= &tp
->rx_jumbo_buffers
[src_idx
];
3080 dest_map
->skb
= src_map
->skb
;
3081 pci_unmap_addr_set(dest_map
, mapping
,
3082 pci_unmap_addr(src_map
, mapping
));
3083 dest_desc
->addr_hi
= src_desc
->addr_hi
;
3084 dest_desc
->addr_lo
= src_desc
->addr_lo
;
3086 src_map
->skb
= NULL
;
3089 #if TG3_VLAN_TAG_USED
3090 static int tg3_vlan_rx(struct tg3
*tp
, struct sk_buff
*skb
, u16 vlan_tag
)
3092 return vlan_hwaccel_receive_skb(skb
, tp
->vlgrp
, vlan_tag
);
3096 /* The RX ring scheme is composed of multiple rings which post fresh
3097 * buffers to the chip, and one special ring the chip uses to report
3098 * status back to the host.
3100 * The special ring reports the status of received packets to the
3101 * host. The chip does not write into the original descriptor the
3102 * RX buffer was obtained from. The chip simply takes the original
3103 * descriptor as provided by the host, updates the status and length
3104 * field, then writes this into the next status ring entry.
3106 * Each ring the host uses to post buffers to the chip is described
3107 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
3108 * it is first placed into the on-chip ram. When the packet's length
3109 * is known, it walks down the TG3_BDINFO entries to select the ring.
3110 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
3111 * which is within the range of the new packet's length is chosen.
3113 * The "separate ring for rx status" scheme may sound queer, but it makes
3114 * sense from a cache coherency perspective. If only the host writes
3115 * to the buffer post rings, and only the chip writes to the rx status
3116 * rings, then cache lines never move beyond shared-modified state.
3117 * If both the host and chip were to write into the same ring, cache line
3118 * eviction could occur since both entities want it in an exclusive state.
3120 static int tg3_rx(struct tg3
*tp
, int budget
)
3123 u32 sw_idx
= tp
->rx_rcb_ptr
;
3127 hw_idx
= tp
->hw_status
->idx
[0].rx_producer
;
3129 * We need to order the read of hw_idx and the read of
3130 * the opaque cookie.
3135 while (sw_idx
!= hw_idx
&& budget
> 0) {
3136 struct tg3_rx_buffer_desc
*desc
= &tp
->rx_rcb
[sw_idx
];
3138 struct sk_buff
*skb
;
3139 dma_addr_t dma_addr
;
3140 u32 opaque_key
, desc_idx
, *post_ptr
;
3142 desc_idx
= desc
->opaque
& RXD_OPAQUE_INDEX_MASK
;
3143 opaque_key
= desc
->opaque
& RXD_OPAQUE_RING_MASK
;
3144 if (opaque_key
== RXD_OPAQUE_RING_STD
) {
3145 dma_addr
= pci_unmap_addr(&tp
->rx_std_buffers
[desc_idx
],
3147 skb
= tp
->rx_std_buffers
[desc_idx
].skb
;
3148 post_ptr
= &tp
->rx_std_ptr
;
3149 } else if (opaque_key
== RXD_OPAQUE_RING_JUMBO
) {
3150 dma_addr
= pci_unmap_addr(&tp
->rx_jumbo_buffers
[desc_idx
],
3152 skb
= tp
->rx_jumbo_buffers
[desc_idx
].skb
;
3153 post_ptr
= &tp
->rx_jumbo_ptr
;
3156 goto next_pkt_nopost
;
3159 work_mask
|= opaque_key
;
3161 if ((desc
->err_vlan
& RXD_ERR_MASK
) != 0 &&
3162 (desc
->err_vlan
!= RXD_ERR_ODD_NIBBLE_RCVD_MII
)) {
3164 tg3_recycle_rx(tp
, opaque_key
,
3165 desc_idx
, *post_ptr
);
3167 /* Other statistics kept track of by card. */
3168 tp
->net_stats
.rx_dropped
++;
3172 len
= ((desc
->idx_len
& RXD_LEN_MASK
) >> RXD_LEN_SHIFT
) - 4; /* omit crc */
3174 if (len
> RX_COPY_THRESHOLD
3175 && tp
->rx_offset
== 2
3176 /* rx_offset != 2 iff this is a 5701 card running
3177 * in PCI-X mode [see tg3_get_invariants()] */
3181 skb_size
= tg3_alloc_rx_skb(tp
, opaque_key
,
3182 desc_idx
, *post_ptr
);
3186 pci_unmap_single(tp
->pdev
, dma_addr
,
3187 skb_size
- tp
->rx_offset
,
3188 PCI_DMA_FROMDEVICE
);
3192 struct sk_buff
*copy_skb
;
3194 tg3_recycle_rx(tp
, opaque_key
,
3195 desc_idx
, *post_ptr
);
3197 copy_skb
= dev_alloc_skb(len
+ 2);
3198 if (copy_skb
== NULL
)
3199 goto drop_it_no_recycle
;
3201 copy_skb
->dev
= tp
->dev
;
3202 skb_reserve(copy_skb
, 2);
3203 skb_put(copy_skb
, len
);
3204 pci_dma_sync_single_for_cpu(tp
->pdev
, dma_addr
, len
, PCI_DMA_FROMDEVICE
);
3205 memcpy(copy_skb
->data
, skb
->data
, len
);
3206 pci_dma_sync_single_for_device(tp
->pdev
, dma_addr
, len
, PCI_DMA_FROMDEVICE
);
3208 /* We'll reuse the original ring buffer. */
3212 if ((tp
->tg3_flags
& TG3_FLAG_RX_CHECKSUMS
) &&
3213 (desc
->type_flags
& RXD_FLAG_TCPUDP_CSUM
) &&
3214 (((desc
->ip_tcp_csum
& RXD_TCPCSUM_MASK
)
3215 >> RXD_TCPCSUM_SHIFT
) == 0xffff))
3216 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
3218 skb
->ip_summed
= CHECKSUM_NONE
;
3220 skb
->protocol
= eth_type_trans(skb
, tp
->dev
);
3221 #if TG3_VLAN_TAG_USED
3222 if (tp
->vlgrp
!= NULL
&&
3223 desc
->type_flags
& RXD_FLAG_VLAN
) {
3224 tg3_vlan_rx(tp
, skb
,
3225 desc
->err_vlan
& RXD_VLAN_MASK
);
3228 netif_receive_skb(skb
);
3230 tp
->dev
->last_rx
= jiffies
;
3238 sw_idx
%= TG3_RX_RCB_RING_SIZE(tp
);
3240 /* Refresh hw_idx to see if there is new work */
3241 if (sw_idx
== hw_idx
) {
3242 hw_idx
= tp
->hw_status
->idx
[0].rx_producer
;
3247 /* ACK the status ring. */
3248 tp
->rx_rcb_ptr
= sw_idx
;
3249 tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0
+ TG3_64BIT_REG_LOW
, sw_idx
);
3251 /* Refill RX ring(s). */
3252 if (work_mask
& RXD_OPAQUE_RING_STD
) {
3253 sw_idx
= tp
->rx_std_ptr
% TG3_RX_RING_SIZE
;
3254 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX
+ TG3_64BIT_REG_LOW
,
3257 if (work_mask
& RXD_OPAQUE_RING_JUMBO
) {
3258 sw_idx
= tp
->rx_jumbo_ptr
% TG3_RX_JUMBO_RING_SIZE
;
3259 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX
+ TG3_64BIT_REG_LOW
,
3267 static int tg3_poll(struct net_device
*netdev
, int *budget
)
3269 struct tg3
*tp
= netdev_priv(netdev
);
3270 struct tg3_hw_status
*sblk
= tp
->hw_status
;
3273 /* handle link change and other phy events */
3274 if (!(tp
->tg3_flags
&
3275 (TG3_FLAG_USE_LINKCHG_REG
|
3276 TG3_FLAG_POLL_SERDES
))) {
3277 if (sblk
->status
& SD_STATUS_LINK_CHG
) {
3278 sblk
->status
= SD_STATUS_UPDATED
|
3279 (sblk
->status
& ~SD_STATUS_LINK_CHG
);
3280 spin_lock(&tp
->lock
);
3281 tg3_setup_phy(tp
, 0);
3282 spin_unlock(&tp
->lock
);
3286 /* run TX completion thread */
3287 if (sblk
->idx
[0].tx_consumer
!= tp
->tx_cons
) {
3291 /* run RX thread, within the bounds set by NAPI.
3292 * All RX "locking" is done by ensuring outside
3293 * code synchronizes with dev->poll()
3295 if (sblk
->idx
[0].rx_producer
!= tp
->rx_rcb_ptr
) {
3296 int orig_budget
= *budget
;
3299 if (orig_budget
> netdev
->quota
)
3300 orig_budget
= netdev
->quota
;
3302 work_done
= tg3_rx(tp
, orig_budget
);
3304 *budget
-= work_done
;
3305 netdev
->quota
-= work_done
;
3308 if (tp
->tg3_flags
& TG3_FLAG_TAGGED_STATUS
) {
3309 tp
->last_tag
= sblk
->status_tag
;
3312 sblk
->status
&= ~SD_STATUS_UPDATED
;
3314 /* if no more work, tell net stack and NIC we're done */
3315 done
= !tg3_has_work(tp
);
3317 netif_rx_complete(netdev
);
3318 tg3_restart_ints(tp
);
3321 return (done
? 0 : 1);
3324 static void tg3_irq_quiesce(struct tg3
*tp
)
3326 BUG_ON(tp
->irq_sync
);
3331 synchronize_irq(tp
->pdev
->irq
);
3334 static inline int tg3_irq_sync(struct tg3
*tp
)
3336 return tp
->irq_sync
;
3339 /* Fully shutdown all tg3 driver activity elsewhere in the system.
3340 * If irq_sync is non-zero, then the IRQ handler must be synchronized
3341 * with as well. Most of the time, this is not necessary except when
3342 * shutting down the device.
3344 static inline void tg3_full_lock(struct tg3
*tp
, int irq_sync
)
3347 tg3_irq_quiesce(tp
);
3348 spin_lock_bh(&tp
->lock
);
3349 spin_lock(&tp
->tx_lock
);
3352 static inline void tg3_full_unlock(struct tg3
*tp
)
3354 spin_unlock(&tp
->tx_lock
);
3355 spin_unlock_bh(&tp
->lock
);
3358 /* MSI ISR - No need to check for interrupt sharing and no need to
3359 * flush status block and interrupt mailbox. PCI ordering rules
3360 * guarantee that MSI will arrive after the status block.
3362 static irqreturn_t
tg3_msi(int irq
, void *dev_id
, struct pt_regs
*regs
)
3364 struct net_device
*dev
= dev_id
;
3365 struct tg3
*tp
= netdev_priv(dev
);
3367 prefetch(tp
->hw_status
);
3368 prefetch(&tp
->rx_rcb
[tp
->rx_rcb_ptr
]);
3370 * Writing any value to intr-mbox-0 clears PCI INTA# and
3371 * chip-internal interrupt pending events.
3372 * Writing non-zero to intr-mbox-0 additional tells the
3373 * NIC to stop sending us irqs, engaging "in-intr-handler"
3376 tw32_mailbox(MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
, 0x00000001);
3377 if (likely(!tg3_irq_sync(tp
)))
3378 netif_rx_schedule(dev
); /* schedule NAPI poll */
3380 return IRQ_RETVAL(1);
3383 static irqreturn_t
tg3_interrupt(int irq
, void *dev_id
, struct pt_regs
*regs
)
3385 struct net_device
*dev
= dev_id
;
3386 struct tg3
*tp
= netdev_priv(dev
);
3387 struct tg3_hw_status
*sblk
= tp
->hw_status
;
3388 unsigned int handled
= 1;
3390 /* In INTx mode, it is possible for the interrupt to arrive at
3391 * the CPU before the status block posted prior to the interrupt.
3392 * Reading the PCI State register will confirm whether the
3393 * interrupt is ours and will flush the status block.
3395 if ((sblk
->status
& SD_STATUS_UPDATED
) ||
3396 !(tr32(TG3PCI_PCISTATE
) & PCISTATE_INT_NOT_ACTIVE
)) {
3398 * Writing any value to intr-mbox-0 clears PCI INTA# and
3399 * chip-internal interrupt pending events.
3400 * Writing non-zero to intr-mbox-0 additional tells the
3401 * NIC to stop sending us irqs, engaging "in-intr-handler"
3404 tw32_mailbox(MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
,
3406 if (tg3_irq_sync(tp
))
3408 sblk
->status
&= ~SD_STATUS_UPDATED
;
3409 if (likely(tg3_has_work(tp
))) {
3410 prefetch(&tp
->rx_rcb
[tp
->rx_rcb_ptr
]);
3411 netif_rx_schedule(dev
); /* schedule NAPI poll */
3413 /* No work, shared interrupt perhaps? re-enable
3414 * interrupts, and flush that PCI write
3416 tw32_mailbox_f(MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
,
3419 } else { /* shared interrupt */
3423 return IRQ_RETVAL(handled
);
3426 static irqreturn_t
tg3_interrupt_tagged(int irq
, void *dev_id
, struct pt_regs
*regs
)
3428 struct net_device
*dev
= dev_id
;
3429 struct tg3
*tp
= netdev_priv(dev
);
3430 struct tg3_hw_status
*sblk
= tp
->hw_status
;
3431 unsigned int handled
= 1;
3433 /* In INTx mode, it is possible for the interrupt to arrive at
3434 * the CPU before the status block posted prior to the interrupt.
3435 * Reading the PCI State register will confirm whether the
3436 * interrupt is ours and will flush the status block.
3438 if ((sblk
->status_tag
!= tp
->last_tag
) ||
3439 !(tr32(TG3PCI_PCISTATE
) & PCISTATE_INT_NOT_ACTIVE
)) {
3441 * writing any value to intr-mbox-0 clears PCI INTA# and
3442 * chip-internal interrupt pending events.
3443 * writing non-zero to intr-mbox-0 additional tells the
3444 * NIC to stop sending us irqs, engaging "in-intr-handler"
3447 tw32_mailbox(MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
,
3449 if (tg3_irq_sync(tp
))
3451 if (netif_rx_schedule_prep(dev
)) {
3452 prefetch(&tp
->rx_rcb
[tp
->rx_rcb_ptr
]);
3453 /* Update last_tag to mark that this status has been
3454 * seen. Because interrupt may be shared, we may be
3455 * racing with tg3_poll(), so only update last_tag
3456 * if tg3_poll() is not scheduled.
3458 tp
->last_tag
= sblk
->status_tag
;
3459 __netif_rx_schedule(dev
);
3461 } else { /* shared interrupt */
3465 return IRQ_RETVAL(handled
);
3468 /* ISR for interrupt test */
3469 static irqreturn_t
tg3_test_isr(int irq
, void *dev_id
,
3470 struct pt_regs
*regs
)
3472 struct net_device
*dev
= dev_id
;
3473 struct tg3
*tp
= netdev_priv(dev
);
3474 struct tg3_hw_status
*sblk
= tp
->hw_status
;
3476 if ((sblk
->status
& SD_STATUS_UPDATED
) ||
3477 !(tr32(TG3PCI_PCISTATE
) & PCISTATE_INT_NOT_ACTIVE
)) {
3478 tw32_mailbox(MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
,
3480 return IRQ_RETVAL(1);
3482 return IRQ_RETVAL(0);
3485 static int tg3_init_hw(struct tg3
*);
3486 static int tg3_halt(struct tg3
*, int, int);
3488 #ifdef CONFIG_NET_POLL_CONTROLLER
3489 static void tg3_poll_controller(struct net_device
*dev
)
3491 struct tg3
*tp
= netdev_priv(dev
);
3493 tg3_interrupt(tp
->pdev
->irq
, dev
, NULL
);
3497 static void tg3_reset_task(void *_data
)
3499 struct tg3
*tp
= _data
;
3500 unsigned int restart_timer
;
3502 tg3_full_lock(tp
, 0);
3503 tp
->tg3_flags
|= TG3_FLAG_IN_RESET_TASK
;
3505 if (!netif_running(tp
->dev
)) {
3506 tp
->tg3_flags
&= ~TG3_FLAG_IN_RESET_TASK
;
3507 tg3_full_unlock(tp
);
3511 tg3_full_unlock(tp
);
3515 tg3_full_lock(tp
, 1);
3517 restart_timer
= tp
->tg3_flags2
& TG3_FLG2_RESTART_TIMER
;
3518 tp
->tg3_flags2
&= ~TG3_FLG2_RESTART_TIMER
;
3520 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 0);
3523 tg3_netif_start(tp
);
3526 mod_timer(&tp
->timer
, jiffies
+ 1);
3528 tp
->tg3_flags
&= ~TG3_FLAG_IN_RESET_TASK
;
3530 tg3_full_unlock(tp
);
3533 static void tg3_tx_timeout(struct net_device
*dev
)
3535 struct tg3
*tp
= netdev_priv(dev
);
3537 printk(KERN_ERR PFX
"%s: transmit timed out, resetting\n",
3540 schedule_work(&tp
->reset_task
);
3543 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
3544 static inline int tg3_4g_overflow_test(dma_addr_t mapping
, int len
)
3546 u32 base
= (u32
) mapping
& 0xffffffff;
3548 return ((base
> 0xffffdcc0) &&
3549 (base
+ len
+ 8 < base
));
3552 /* Test for DMA addresses > 40-bit */
3553 static inline int tg3_40bit_overflow_test(struct tg3
*tp
, dma_addr_t mapping
,
3556 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
3557 if (tp
->tg3_flags2
& TG3_FLG2_5780_CLASS
)
3558 return (((u64
) mapping
+ len
) > DMA_40BIT_MASK
);
3565 static void tg3_set_txd(struct tg3
*, int, dma_addr_t
, int, u32
, u32
);
3567 /* Workaround 4GB and 40-bit hardware DMA bugs. */
3568 static int tigon3_dma_hwbug_workaround(struct tg3
*tp
, struct sk_buff
*skb
,
3569 u32 last_plus_one
, u32
*start
,
3570 u32 base_flags
, u32 mss
)
3572 struct sk_buff
*new_skb
= skb_copy(skb
, GFP_ATOMIC
);
3573 dma_addr_t new_addr
= 0;
3580 /* New SKB is guaranteed to be linear. */
3582 new_addr
= pci_map_single(tp
->pdev
, new_skb
->data
, new_skb
->len
,
3584 /* Make sure new skb does not cross any 4G boundaries.
3585 * Drop the packet if it does.
3587 if (tg3_4g_overflow_test(new_addr
, new_skb
->len
)) {
3589 dev_kfree_skb(new_skb
);
3592 tg3_set_txd(tp
, entry
, new_addr
, new_skb
->len
,
3593 base_flags
, 1 | (mss
<< 1));
3594 *start
= NEXT_TX(entry
);
3598 /* Now clean up the sw ring entries. */
3600 while (entry
!= last_plus_one
) {
3604 len
= skb_headlen(skb
);
3606 len
= skb_shinfo(skb
)->frags
[i
-1].size
;
3607 pci_unmap_single(tp
->pdev
,
3608 pci_unmap_addr(&tp
->tx_buffers
[entry
], mapping
),
3609 len
, PCI_DMA_TODEVICE
);
3611 tp
->tx_buffers
[entry
].skb
= new_skb
;
3612 pci_unmap_addr_set(&tp
->tx_buffers
[entry
], mapping
, new_addr
);
3614 tp
->tx_buffers
[entry
].skb
= NULL
;
3616 entry
= NEXT_TX(entry
);
3625 static void tg3_set_txd(struct tg3
*tp
, int entry
,
3626 dma_addr_t mapping
, int len
, u32 flags
,
3629 struct tg3_tx_buffer_desc
*txd
= &tp
->tx_ring
[entry
];
3630 int is_end
= (mss_and_is_end
& 0x1);
3631 u32 mss
= (mss_and_is_end
>> 1);
3635 flags
|= TXD_FLAG_END
;
3636 if (flags
& TXD_FLAG_VLAN
) {
3637 vlan_tag
= flags
>> 16;
3640 vlan_tag
|= (mss
<< TXD_MSS_SHIFT
);
3642 txd
->addr_hi
= ((u64
) mapping
>> 32);
3643 txd
->addr_lo
= ((u64
) mapping
& 0xffffffff);
3644 txd
->len_flags
= (len
<< TXD_LEN_SHIFT
) | flags
;
3645 txd
->vlan_tag
= vlan_tag
<< TXD_VLAN_TAG_SHIFT
;
3648 static int tg3_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
3650 struct tg3
*tp
= netdev_priv(dev
);
3652 u32 len
, entry
, base_flags
, mss
;
3653 int would_hit_hwbug
;
3655 len
= skb_headlen(skb
);
3657 /* No BH disabling for tx_lock here. We are running in BH disabled
3658 * context and TX reclaim runs via tp->poll inside of a software
3659 * interrupt. Furthermore, IRQ processing runs lockless so we have
3660 * no IRQ context deadlocks to worry about either. Rejoice!
3662 if (!spin_trylock(&tp
->tx_lock
))
3663 return NETDEV_TX_LOCKED
;
3665 if (unlikely(TX_BUFFS_AVAIL(tp
) <= (skb_shinfo(skb
)->nr_frags
+ 1))) {
3666 if (!netif_queue_stopped(dev
)) {
3667 netif_stop_queue(dev
);
3669 /* This is a hard error, log it. */
3670 printk(KERN_ERR PFX
"%s: BUG! Tx Ring full when "
3671 "queue awake!\n", dev
->name
);
3673 spin_unlock(&tp
->tx_lock
);
3674 return NETDEV_TX_BUSY
;
3677 entry
= tp
->tx_prod
;
3679 if (skb
->ip_summed
== CHECKSUM_HW
)
3680 base_flags
|= TXD_FLAG_TCPUDP_CSUM
;
3681 #if TG3_TSO_SUPPORT != 0
3683 if (skb
->len
> (tp
->dev
->mtu
+ ETH_HLEN
) &&
3684 (mss
= skb_shinfo(skb
)->tso_size
) != 0) {
3685 int tcp_opt_len
, ip_tcp_len
;
3687 if (skb_header_cloned(skb
) &&
3688 pskb_expand_head(skb
, 0, 0, GFP_ATOMIC
)) {
3693 tcp_opt_len
= ((skb
->h
.th
->doff
- 5) * 4);
3694 ip_tcp_len
= (skb
->nh
.iph
->ihl
* 4) + sizeof(struct tcphdr
);
3696 base_flags
|= (TXD_FLAG_CPU_PRE_DMA
|
3697 TXD_FLAG_CPU_POST_DMA
);
3699 skb
->nh
.iph
->check
= 0;
3700 skb
->nh
.iph
->tot_len
= htons(mss
+ ip_tcp_len
+ tcp_opt_len
);
3701 if (tp
->tg3_flags2
& TG3_FLG2_HW_TSO
) {
3702 skb
->h
.th
->check
= 0;
3703 base_flags
&= ~TXD_FLAG_TCPUDP_CSUM
;
3707 ~csum_tcpudp_magic(skb
->nh
.iph
->saddr
,
3712 if ((tp
->tg3_flags2
& TG3_FLG2_HW_TSO
) ||
3713 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
)) {
3714 if (tcp_opt_len
|| skb
->nh
.iph
->ihl
> 5) {
3717 tsflags
= ((skb
->nh
.iph
->ihl
- 5) +
3718 (tcp_opt_len
>> 2));
3719 mss
|= (tsflags
<< 11);
3722 if (tcp_opt_len
|| skb
->nh
.iph
->ihl
> 5) {
3725 tsflags
= ((skb
->nh
.iph
->ihl
- 5) +
3726 (tcp_opt_len
>> 2));
3727 base_flags
|= tsflags
<< 12;
3734 #if TG3_VLAN_TAG_USED
3735 if (tp
->vlgrp
!= NULL
&& vlan_tx_tag_present(skb
))
3736 base_flags
|= (TXD_FLAG_VLAN
|
3737 (vlan_tx_tag_get(skb
) << 16));
3740 /* Queue skb data, a.k.a. the main skb fragment. */
3741 mapping
= pci_map_single(tp
->pdev
, skb
->data
, len
, PCI_DMA_TODEVICE
);
3743 tp
->tx_buffers
[entry
].skb
= skb
;
3744 pci_unmap_addr_set(&tp
->tx_buffers
[entry
], mapping
, mapping
);
3746 would_hit_hwbug
= 0;
3748 if (tg3_4g_overflow_test(mapping
, len
))
3749 would_hit_hwbug
= 1;
3751 tg3_set_txd(tp
, entry
, mapping
, len
, base_flags
,
3752 (skb_shinfo(skb
)->nr_frags
== 0) | (mss
<< 1));
3754 entry
= NEXT_TX(entry
);
3756 /* Now loop through additional data fragments, and queue them. */
3757 if (skb_shinfo(skb
)->nr_frags
> 0) {
3758 unsigned int i
, last
;
3760 last
= skb_shinfo(skb
)->nr_frags
- 1;
3761 for (i
= 0; i
<= last
; i
++) {
3762 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
3765 mapping
= pci_map_page(tp
->pdev
,
3768 len
, PCI_DMA_TODEVICE
);
3770 tp
->tx_buffers
[entry
].skb
= NULL
;
3771 pci_unmap_addr_set(&tp
->tx_buffers
[entry
], mapping
, mapping
);
3773 if (tg3_4g_overflow_test(mapping
, len
))
3774 would_hit_hwbug
= 1;
3776 if (tg3_40bit_overflow_test(tp
, mapping
, len
))
3777 would_hit_hwbug
= 1;
3779 if (tp
->tg3_flags2
& TG3_FLG2_HW_TSO
)
3780 tg3_set_txd(tp
, entry
, mapping
, len
,
3781 base_flags
, (i
== last
)|(mss
<< 1));
3783 tg3_set_txd(tp
, entry
, mapping
, len
,
3784 base_flags
, (i
== last
));
3786 entry
= NEXT_TX(entry
);
3790 if (would_hit_hwbug
) {
3791 u32 last_plus_one
= entry
;
3794 start
= entry
- 1 - skb_shinfo(skb
)->nr_frags
;
3795 start
&= (TG3_TX_RING_SIZE
- 1);
3797 /* If the workaround fails due to memory/mapping
3798 * failure, silently drop this packet.
3800 if (tigon3_dma_hwbug_workaround(tp
, skb
, last_plus_one
,
3801 &start
, base_flags
, mss
))
3807 /* Packets are ready, update Tx producer idx local and on card. */
3808 tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0
+ TG3_64BIT_REG_LOW
), entry
);
3810 tp
->tx_prod
= entry
;
3811 if (TX_BUFFS_AVAIL(tp
) <= (MAX_SKB_FRAGS
+ 1)) {
3812 netif_stop_queue(dev
);
3813 if (TX_BUFFS_AVAIL(tp
) > TG3_TX_WAKEUP_THRESH
)
3814 netif_wake_queue(tp
->dev
);
3819 spin_unlock(&tp
->tx_lock
);
3821 dev
->trans_start
= jiffies
;
3823 return NETDEV_TX_OK
;
3826 static inline void tg3_set_mtu(struct net_device
*dev
, struct tg3
*tp
,
3831 if (new_mtu
> ETH_DATA_LEN
) {
3832 if (tp
->tg3_flags2
& TG3_FLG2_5780_CLASS
) {
3833 tp
->tg3_flags2
&= ~TG3_FLG2_TSO_CAPABLE
;
3834 ethtool_op_set_tso(dev
, 0);
3837 tp
->tg3_flags
|= TG3_FLAG_JUMBO_RING_ENABLE
;
3839 if (tp
->tg3_flags2
& TG3_FLG2_5780_CLASS
)
3840 tp
->tg3_flags2
|= TG3_FLG2_TSO_CAPABLE
;
3841 tp
->tg3_flags
&= ~TG3_FLAG_JUMBO_RING_ENABLE
;
3845 static int tg3_change_mtu(struct net_device
*dev
, int new_mtu
)
3847 struct tg3
*tp
= netdev_priv(dev
);
3849 if (new_mtu
< TG3_MIN_MTU
|| new_mtu
> TG3_MAX_MTU(tp
))
3852 if (!netif_running(dev
)) {
3853 /* We'll just catch it later when the
3856 tg3_set_mtu(dev
, tp
, new_mtu
);
3862 tg3_full_lock(tp
, 1);
3864 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
3866 tg3_set_mtu(dev
, tp
, new_mtu
);
3870 tg3_netif_start(tp
);
3872 tg3_full_unlock(tp
);
3877 /* Free up pending packets in all rx/tx rings.
3879 * The chip has been shut down and the driver detached from
3880 * the networking, so no interrupts or new tx packets will
3881 * end up in the driver. tp->{tx,}lock is not held and we are not
3882 * in an interrupt context and thus may sleep.
3884 static void tg3_free_rings(struct tg3
*tp
)
3886 struct ring_info
*rxp
;
3889 for (i
= 0; i
< TG3_RX_RING_SIZE
; i
++) {
3890 rxp
= &tp
->rx_std_buffers
[i
];
3892 if (rxp
->skb
== NULL
)
3894 pci_unmap_single(tp
->pdev
,
3895 pci_unmap_addr(rxp
, mapping
),
3896 tp
->rx_pkt_buf_sz
- tp
->rx_offset
,
3897 PCI_DMA_FROMDEVICE
);
3898 dev_kfree_skb_any(rxp
->skb
);
3902 for (i
= 0; i
< TG3_RX_JUMBO_RING_SIZE
; i
++) {
3903 rxp
= &tp
->rx_jumbo_buffers
[i
];
3905 if (rxp
->skb
== NULL
)
3907 pci_unmap_single(tp
->pdev
,
3908 pci_unmap_addr(rxp
, mapping
),
3909 RX_JUMBO_PKT_BUF_SZ
- tp
->rx_offset
,
3910 PCI_DMA_FROMDEVICE
);
3911 dev_kfree_skb_any(rxp
->skb
);
3915 for (i
= 0; i
< TG3_TX_RING_SIZE
; ) {
3916 struct tx_ring_info
*txp
;
3917 struct sk_buff
*skb
;
3920 txp
= &tp
->tx_buffers
[i
];
3928 pci_unmap_single(tp
->pdev
,
3929 pci_unmap_addr(txp
, mapping
),
3936 for (j
= 0; j
< skb_shinfo(skb
)->nr_frags
; j
++) {
3937 txp
= &tp
->tx_buffers
[i
& (TG3_TX_RING_SIZE
- 1)];
3938 pci_unmap_page(tp
->pdev
,
3939 pci_unmap_addr(txp
, mapping
),
3940 skb_shinfo(skb
)->frags
[j
].size
,
3945 dev_kfree_skb_any(skb
);
3949 /* Initialize tx/rx rings for packet processing.
3951 * The chip has been shut down and the driver detached from
3952 * the networking, so no interrupts or new tx packets will
3953 * end up in the driver. tp->{tx,}lock are held and thus
3956 static void tg3_init_rings(struct tg3
*tp
)
3960 /* Free up all the SKBs. */
3963 /* Zero out all descriptors. */
3964 memset(tp
->rx_std
, 0, TG3_RX_RING_BYTES
);
3965 memset(tp
->rx_jumbo
, 0, TG3_RX_JUMBO_RING_BYTES
);
3966 memset(tp
->rx_rcb
, 0, TG3_RX_RCB_RING_BYTES(tp
));
3967 memset(tp
->tx_ring
, 0, TG3_TX_RING_BYTES
);
3969 tp
->rx_pkt_buf_sz
= RX_PKT_BUF_SZ
;
3970 if ((tp
->tg3_flags2
& TG3_FLG2_5780_CLASS
) &&
3971 (tp
->dev
->mtu
> ETH_DATA_LEN
))
3972 tp
->rx_pkt_buf_sz
= RX_JUMBO_PKT_BUF_SZ
;
3974 /* Initialize invariants of the rings, we only set this
3975 * stuff once. This works because the card does not
3976 * write into the rx buffer posting rings.
3978 for (i
= 0; i
< TG3_RX_RING_SIZE
; i
++) {
3979 struct tg3_rx_buffer_desc
*rxd
;
3981 rxd
= &tp
->rx_std
[i
];
3982 rxd
->idx_len
= (tp
->rx_pkt_buf_sz
- tp
->rx_offset
- 64)
3984 rxd
->type_flags
= (RXD_FLAG_END
<< RXD_FLAGS_SHIFT
);
3985 rxd
->opaque
= (RXD_OPAQUE_RING_STD
|
3986 (i
<< RXD_OPAQUE_INDEX_SHIFT
));
3989 if (tp
->tg3_flags
& TG3_FLAG_JUMBO_RING_ENABLE
) {
3990 for (i
= 0; i
< TG3_RX_JUMBO_RING_SIZE
; i
++) {
3991 struct tg3_rx_buffer_desc
*rxd
;
3993 rxd
= &tp
->rx_jumbo
[i
];
3994 rxd
->idx_len
= (RX_JUMBO_PKT_BUF_SZ
- tp
->rx_offset
- 64)
3996 rxd
->type_flags
= (RXD_FLAG_END
<< RXD_FLAGS_SHIFT
) |
3998 rxd
->opaque
= (RXD_OPAQUE_RING_JUMBO
|
3999 (i
<< RXD_OPAQUE_INDEX_SHIFT
));
4003 /* Now allocate fresh SKBs for each rx ring. */
4004 for (i
= 0; i
< tp
->rx_pending
; i
++) {
4005 if (tg3_alloc_rx_skb(tp
, RXD_OPAQUE_RING_STD
,
4010 if (tp
->tg3_flags
& TG3_FLAG_JUMBO_RING_ENABLE
) {
4011 for (i
= 0; i
< tp
->rx_jumbo_pending
; i
++) {
4012 if (tg3_alloc_rx_skb(tp
, RXD_OPAQUE_RING_JUMBO
,
4020 * Must not be invoked with interrupt sources disabled and
4021 * the hardware shutdown down.
4023 static void tg3_free_consistent(struct tg3
*tp
)
4025 kfree(tp
->rx_std_buffers
);
4026 tp
->rx_std_buffers
= NULL
;
4028 pci_free_consistent(tp
->pdev
, TG3_RX_RING_BYTES
,
4029 tp
->rx_std
, tp
->rx_std_mapping
);
4033 pci_free_consistent(tp
->pdev
, TG3_RX_JUMBO_RING_BYTES
,
4034 tp
->rx_jumbo
, tp
->rx_jumbo_mapping
);
4035 tp
->rx_jumbo
= NULL
;
4038 pci_free_consistent(tp
->pdev
, TG3_RX_RCB_RING_BYTES(tp
),
4039 tp
->rx_rcb
, tp
->rx_rcb_mapping
);
4043 pci_free_consistent(tp
->pdev
, TG3_TX_RING_BYTES
,
4044 tp
->tx_ring
, tp
->tx_desc_mapping
);
4047 if (tp
->hw_status
) {
4048 pci_free_consistent(tp
->pdev
, TG3_HW_STATUS_SIZE
,
4049 tp
->hw_status
, tp
->status_mapping
);
4050 tp
->hw_status
= NULL
;
4053 pci_free_consistent(tp
->pdev
, sizeof(struct tg3_hw_stats
),
4054 tp
->hw_stats
, tp
->stats_mapping
);
4055 tp
->hw_stats
= NULL
;
4060 * Must not be invoked with interrupt sources disabled and
4061 * the hardware shutdown down. Can sleep.
4063 static int tg3_alloc_consistent(struct tg3
*tp
)
4065 tp
->rx_std_buffers
= kmalloc((sizeof(struct ring_info
) *
4067 TG3_RX_JUMBO_RING_SIZE
)) +
4068 (sizeof(struct tx_ring_info
) *
4071 if (!tp
->rx_std_buffers
)
4074 memset(tp
->rx_std_buffers
, 0,
4075 (sizeof(struct ring_info
) *
4077 TG3_RX_JUMBO_RING_SIZE
)) +
4078 (sizeof(struct tx_ring_info
) *
4081 tp
->rx_jumbo_buffers
= &tp
->rx_std_buffers
[TG3_RX_RING_SIZE
];
4082 tp
->tx_buffers
= (struct tx_ring_info
*)
4083 &tp
->rx_jumbo_buffers
[TG3_RX_JUMBO_RING_SIZE
];
4085 tp
->rx_std
= pci_alloc_consistent(tp
->pdev
, TG3_RX_RING_BYTES
,
4086 &tp
->rx_std_mapping
);
4090 tp
->rx_jumbo
= pci_alloc_consistent(tp
->pdev
, TG3_RX_JUMBO_RING_BYTES
,
4091 &tp
->rx_jumbo_mapping
);
4096 tp
->rx_rcb
= pci_alloc_consistent(tp
->pdev
, TG3_RX_RCB_RING_BYTES(tp
),
4097 &tp
->rx_rcb_mapping
);
4101 tp
->tx_ring
= pci_alloc_consistent(tp
->pdev
, TG3_TX_RING_BYTES
,
4102 &tp
->tx_desc_mapping
);
4106 tp
->hw_status
= pci_alloc_consistent(tp
->pdev
,
4108 &tp
->status_mapping
);
4112 tp
->hw_stats
= pci_alloc_consistent(tp
->pdev
,
4113 sizeof(struct tg3_hw_stats
),
4114 &tp
->stats_mapping
);
4118 memset(tp
->hw_status
, 0, TG3_HW_STATUS_SIZE
);
4119 memset(tp
->hw_stats
, 0, sizeof(struct tg3_hw_stats
));
4124 tg3_free_consistent(tp
);
4128 #define MAX_WAIT_CNT 1000
4130 /* To stop a block, clear the enable bit and poll till it
4131 * clears. tp->lock is held.
4133 static int tg3_stop_block(struct tg3
*tp
, unsigned long ofs
, u32 enable_bit
, int silent
)
4138 if (tp
->tg3_flags2
& TG3_FLG2_5705_PLUS
) {
4145 /* We can't enable/disable these bits of the
4146 * 5705/5750, just say success.
4159 for (i
= 0; i
< MAX_WAIT_CNT
; i
++) {
4162 if ((val
& enable_bit
) == 0)
4166 if (i
== MAX_WAIT_CNT
&& !silent
) {
4167 printk(KERN_ERR PFX
"tg3_stop_block timed out, "
4168 "ofs=%lx enable_bit=%x\n",
4176 /* tp->lock is held. */
4177 static int tg3_abort_hw(struct tg3
*tp
, int silent
)
4181 tg3_disable_ints(tp
);
4183 tp
->rx_mode
&= ~RX_MODE_ENABLE
;
4184 tw32_f(MAC_RX_MODE
, tp
->rx_mode
);
4187 err
= tg3_stop_block(tp
, RCVBDI_MODE
, RCVBDI_MODE_ENABLE
, silent
);
4188 err
|= tg3_stop_block(tp
, RCVLPC_MODE
, RCVLPC_MODE_ENABLE
, silent
);
4189 err
|= tg3_stop_block(tp
, RCVLSC_MODE
, RCVLSC_MODE_ENABLE
, silent
);
4190 err
|= tg3_stop_block(tp
, RCVDBDI_MODE
, RCVDBDI_MODE_ENABLE
, silent
);
4191 err
|= tg3_stop_block(tp
, RCVDCC_MODE
, RCVDCC_MODE_ENABLE
, silent
);
4192 err
|= tg3_stop_block(tp
, RCVCC_MODE
, RCVCC_MODE_ENABLE
, silent
);
4194 err
|= tg3_stop_block(tp
, SNDBDS_MODE
, SNDBDS_MODE_ENABLE
, silent
);
4195 err
|= tg3_stop_block(tp
, SNDBDI_MODE
, SNDBDI_MODE_ENABLE
, silent
);
4196 err
|= tg3_stop_block(tp
, SNDDATAI_MODE
, SNDDATAI_MODE_ENABLE
, silent
);
4197 err
|= tg3_stop_block(tp
, RDMAC_MODE
, RDMAC_MODE_ENABLE
, silent
);
4198 err
|= tg3_stop_block(tp
, SNDDATAC_MODE
, SNDDATAC_MODE_ENABLE
, silent
);
4199 err
|= tg3_stop_block(tp
, DMAC_MODE
, DMAC_MODE_ENABLE
, silent
);
4200 err
|= tg3_stop_block(tp
, SNDBDC_MODE
, SNDBDC_MODE_ENABLE
, silent
);
4202 tp
->mac_mode
&= ~MAC_MODE_TDE_ENABLE
;
4203 tw32_f(MAC_MODE
, tp
->mac_mode
);
4206 tp
->tx_mode
&= ~TX_MODE_ENABLE
;
4207 tw32_f(MAC_TX_MODE
, tp
->tx_mode
);
4209 for (i
= 0; i
< MAX_WAIT_CNT
; i
++) {
4211 if (!(tr32(MAC_TX_MODE
) & TX_MODE_ENABLE
))
4214 if (i
>= MAX_WAIT_CNT
) {
4215 printk(KERN_ERR PFX
"tg3_abort_hw timed out for %s, "
4216 "TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n",
4217 tp
->dev
->name
, tr32(MAC_TX_MODE
));
4221 err
|= tg3_stop_block(tp
, HOSTCC_MODE
, HOSTCC_MODE_ENABLE
, silent
);
4222 err
|= tg3_stop_block(tp
, WDMAC_MODE
, WDMAC_MODE_ENABLE
, silent
);
4223 err
|= tg3_stop_block(tp
, MBFREE_MODE
, MBFREE_MODE_ENABLE
, silent
);
4225 tw32(FTQ_RESET
, 0xffffffff);
4226 tw32(FTQ_RESET
, 0x00000000);
4228 err
|= tg3_stop_block(tp
, BUFMGR_MODE
, BUFMGR_MODE_ENABLE
, silent
);
4229 err
|= tg3_stop_block(tp
, MEMARB_MODE
, MEMARB_MODE_ENABLE
, silent
);
4232 memset(tp
->hw_status
, 0, TG3_HW_STATUS_SIZE
);
4234 memset(tp
->hw_stats
, 0, sizeof(struct tg3_hw_stats
));
4239 /* tp->lock is held. */
4240 static int tg3_nvram_lock(struct tg3
*tp
)
4242 if (tp
->tg3_flags
& TG3_FLAG_NVRAM
) {
4245 if (tp
->nvram_lock_cnt
== 0) {
4246 tw32(NVRAM_SWARB
, SWARB_REQ_SET1
);
4247 for (i
= 0; i
< 8000; i
++) {
4248 if (tr32(NVRAM_SWARB
) & SWARB_GNT1
)
4253 tw32(NVRAM_SWARB
, SWARB_REQ_CLR1
);
4257 tp
->nvram_lock_cnt
++;
4262 /* tp->lock is held. */
4263 static void tg3_nvram_unlock(struct tg3
*tp
)
4265 if (tp
->tg3_flags
& TG3_FLAG_NVRAM
) {
4266 if (tp
->nvram_lock_cnt
> 0)
4267 tp
->nvram_lock_cnt
--;
4268 if (tp
->nvram_lock_cnt
== 0)
4269 tw32_f(NVRAM_SWARB
, SWARB_REQ_CLR1
);
4273 /* tp->lock is held. */
4274 static void tg3_enable_nvram_access(struct tg3
*tp
)
4276 if ((tp
->tg3_flags2
& TG3_FLG2_5750_PLUS
) &&
4277 !(tp
->tg3_flags2
& TG3_FLG2_PROTECTED_NVRAM
)) {
4278 u32 nvaccess
= tr32(NVRAM_ACCESS
);
4280 tw32(NVRAM_ACCESS
, nvaccess
| ACCESS_ENABLE
);
4284 /* tp->lock is held. */
4285 static void tg3_disable_nvram_access(struct tg3
*tp
)
4287 if ((tp
->tg3_flags2
& TG3_FLG2_5750_PLUS
) &&
4288 !(tp
->tg3_flags2
& TG3_FLG2_PROTECTED_NVRAM
)) {
4289 u32 nvaccess
= tr32(NVRAM_ACCESS
);
4291 tw32(NVRAM_ACCESS
, nvaccess
& ~ACCESS_ENABLE
);
4295 /* tp->lock is held. */
4296 static void tg3_write_sig_pre_reset(struct tg3
*tp
, int kind
)
4298 if (!(tp
->tg3_flags2
& TG3_FLG2_SUN_570X
))
4299 tg3_write_mem(tp
, NIC_SRAM_FIRMWARE_MBOX
,
4300 NIC_SRAM_FIRMWARE_MBOX_MAGIC1
);
4302 if (tp
->tg3_flags2
& TG3_FLG2_ASF_NEW_HANDSHAKE
) {
4304 case RESET_KIND_INIT
:
4305 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
4309 case RESET_KIND_SHUTDOWN
:
4310 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
4314 case RESET_KIND_SUSPEND
:
4315 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
4325 /* tp->lock is held. */
4326 static void tg3_write_sig_post_reset(struct tg3
*tp
, int kind
)
4328 if (tp
->tg3_flags2
& TG3_FLG2_ASF_NEW_HANDSHAKE
) {
4330 case RESET_KIND_INIT
:
4331 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
4332 DRV_STATE_START_DONE
);
4335 case RESET_KIND_SHUTDOWN
:
4336 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
4337 DRV_STATE_UNLOAD_DONE
);
4346 /* tp->lock is held. */
4347 static void tg3_write_sig_legacy(struct tg3
*tp
, int kind
)
4349 if (tp
->tg3_flags
& TG3_FLAG_ENABLE_ASF
) {
4351 case RESET_KIND_INIT
:
4352 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
4356 case RESET_KIND_SHUTDOWN
:
4357 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
4361 case RESET_KIND_SUSPEND
:
4362 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
4372 static void tg3_stop_fw(struct tg3
*);
4374 /* tp->lock is held. */
4375 static int tg3_chip_reset(struct tg3
*tp
)
4378 void (*write_op
)(struct tg3
*, u32
, u32
);
4381 if (!(tp
->tg3_flags2
& TG3_FLG2_SUN_570X
)) {
4383 /* No matching tg3_nvram_unlock() after this because
4384 * chip reset below will undo the nvram lock.
4386 tp
->nvram_lock_cnt
= 0;
4390 * We must avoid the readl() that normally takes place.
4391 * It locks machines, causes machine checks, and other
4392 * fun things. So, temporarily disable the 5701
4393 * hardware workaround, while we do the reset.
4395 write_op
= tp
->write32
;
4396 if (write_op
== tg3_write_flush_reg32
)
4397 tp
->write32
= tg3_write32
;
4400 val
= GRC_MISC_CFG_CORECLK_RESET
;
4402 if (tp
->tg3_flags2
& TG3_FLG2_PCI_EXPRESS
) {
4403 if (tr32(0x7e2c) == 0x60) {
4406 if (tp
->pci_chip_rev_id
!= CHIPREV_ID_5750_A0
) {
4407 tw32(GRC_MISC_CFG
, (1 << 29));
4412 if (tp
->tg3_flags2
& TG3_FLG2_5705_PLUS
)
4413 val
|= GRC_MISC_CFG_KEEP_GPHY_POWER
;
4414 tw32(GRC_MISC_CFG
, val
);
4416 /* restore 5701 hardware bug workaround write method */
4417 tp
->write32
= write_op
;
4419 /* Unfortunately, we have to delay before the PCI read back.
4420 * Some 575X chips even will not respond to a PCI cfg access
4421 * when the reset command is given to the chip.
4423 * How do these hardware designers expect things to work
4424 * properly if the PCI write is posted for a long period
4425 * of time? It is always necessary to have some method by
4426 * which a register read back can occur to push the write
4427 * out which does the reset.
4429 * For most tg3 variants the trick below was working.
4434 /* Flush PCI posted writes. The normal MMIO registers
4435 * are inaccessible at this time so this is the only
4436 * way to make this reliably (actually, this is no longer
4437 * the case, see above). I tried to use indirect
4438 * register read/write but this upset some 5701 variants.
4440 pci_read_config_dword(tp
->pdev
, PCI_COMMAND
, &val
);
4444 if (tp
->tg3_flags2
& TG3_FLG2_PCI_EXPRESS
) {
4445 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5750_A0
) {
4449 /* Wait for link training to complete. */
4450 for (i
= 0; i
< 5000; i
++)
4453 pci_read_config_dword(tp
->pdev
, 0xc4, &cfg_val
);
4454 pci_write_config_dword(tp
->pdev
, 0xc4,
4455 cfg_val
| (1 << 15));
4457 /* Set PCIE max payload size and clear error status. */
4458 pci_write_config_dword(tp
->pdev
, 0xd8, 0xf5000);
4461 /* Re-enable indirect register accesses. */
4462 pci_write_config_dword(tp
->pdev
, TG3PCI_MISC_HOST_CTRL
,
4463 tp
->misc_host_ctrl
);
4465 /* Set MAX PCI retry to zero. */
4466 val
= (PCISTATE_ROM_ENABLE
| PCISTATE_ROM_RETRY_ENABLE
);
4467 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5704_A0
&&
4468 (tp
->tg3_flags
& TG3_FLAG_PCIX_MODE
))
4469 val
|= PCISTATE_RETRY_SAME_DMA
;
4470 pci_write_config_dword(tp
->pdev
, TG3PCI_PCISTATE
, val
);
4472 pci_restore_state(tp
->pdev
);
4474 /* Make sure PCI-X relaxed ordering bit is clear. */
4475 pci_read_config_dword(tp
->pdev
, TG3PCI_X_CAPS
, &val
);
4476 val
&= ~PCIX_CAPS_RELAXED_ORDERING
;
4477 pci_write_config_dword(tp
->pdev
, TG3PCI_X_CAPS
, val
);
4479 if (tp
->tg3_flags2
& TG3_FLG2_5780_CLASS
) {
4482 /* Chip reset on 5780 will reset MSI enable bit,
4483 * so need to restore it.
4485 if (tp
->tg3_flags2
& TG3_FLG2_USING_MSI
) {
4488 pci_read_config_word(tp
->pdev
,
4489 tp
->msi_cap
+ PCI_MSI_FLAGS
,
4491 pci_write_config_word(tp
->pdev
,
4492 tp
->msi_cap
+ PCI_MSI_FLAGS
,
4493 ctrl
| PCI_MSI_FLAGS_ENABLE
);
4494 val
= tr32(MSGINT_MODE
);
4495 tw32(MSGINT_MODE
, val
| MSGINT_MODE_ENABLE
);
4498 val
= tr32(MEMARB_MODE
);
4499 tw32(MEMARB_MODE
, val
| MEMARB_MODE_ENABLE
);
4502 tw32(MEMARB_MODE
, MEMARB_MODE_ENABLE
);
4504 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5750_A3
) {
4506 tw32(0x5000, 0x400);
4509 tw32(GRC_MODE
, tp
->grc_mode
);
4511 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5705_A0
) {
4512 u32 val
= tr32(0xc4);
4514 tw32(0xc4, val
| (1 << 15));
4517 if ((tp
->nic_sram_data_cfg
& NIC_SRAM_DATA_CFG_MINI_PCI
) != 0 &&
4518 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
) {
4519 tp
->pci_clock_ctrl
|= CLOCK_CTRL_CLKRUN_OENABLE
;
4520 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5705_A0
)
4521 tp
->pci_clock_ctrl
|= CLOCK_CTRL_FORCE_CLKRUN
;
4522 tw32(TG3PCI_CLOCK_CTRL
, tp
->pci_clock_ctrl
);
4525 if (tp
->tg3_flags2
& TG3_FLG2_PHY_SERDES
) {
4526 tp
->mac_mode
= MAC_MODE_PORT_MODE_TBI
;
4527 tw32_f(MAC_MODE
, tp
->mac_mode
);
4528 } else if (tp
->tg3_flags2
& TG3_FLG2_MII_SERDES
) {
4529 tp
->mac_mode
= MAC_MODE_PORT_MODE_GMII
;
4530 tw32_f(MAC_MODE
, tp
->mac_mode
);
4532 tw32_f(MAC_MODE
, 0);
4535 if (!(tp
->tg3_flags2
& TG3_FLG2_SUN_570X
)) {
4536 /* Wait for firmware initialization to complete. */
4537 for (i
= 0; i
< 100000; i
++) {
4538 tg3_read_mem(tp
, NIC_SRAM_FIRMWARE_MBOX
, &val
);
4539 if (val
== ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1
)
4544 printk(KERN_ERR PFX
"tg3_reset_hw timed out for %s, "
4545 "firmware will not restart magic=%08x\n",
4546 tp
->dev
->name
, val
);
4551 if ((tp
->tg3_flags2
& TG3_FLG2_PCI_EXPRESS
) &&
4552 tp
->pci_chip_rev_id
!= CHIPREV_ID_5750_A0
) {
4553 u32 val
= tr32(0x7c00);
4555 tw32(0x7c00, val
| (1 << 25));
4558 /* Reprobe ASF enable state. */
4559 tp
->tg3_flags
&= ~TG3_FLAG_ENABLE_ASF
;
4560 tp
->tg3_flags2
&= ~TG3_FLG2_ASF_NEW_HANDSHAKE
;
4561 tg3_read_mem(tp
, NIC_SRAM_DATA_SIG
, &val
);
4562 if (val
== NIC_SRAM_DATA_SIG_MAGIC
) {
4565 tg3_read_mem(tp
, NIC_SRAM_DATA_CFG
, &nic_cfg
);
4566 if (nic_cfg
& NIC_SRAM_DATA_CFG_ASF_ENABLE
) {
4567 tp
->tg3_flags
|= TG3_FLAG_ENABLE_ASF
;
4568 if (tp
->tg3_flags2
& TG3_FLG2_5750_PLUS
)
4569 tp
->tg3_flags2
|= TG3_FLG2_ASF_NEW_HANDSHAKE
;
4576 /* tp->lock is held. */
4577 static void tg3_stop_fw(struct tg3
*tp
)
4579 if (tp
->tg3_flags
& TG3_FLAG_ENABLE_ASF
) {
4583 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_MBOX
, FWCMD_NICDRV_PAUSE_FW
);
4584 val
= tr32(GRC_RX_CPU_EVENT
);
4586 tw32(GRC_RX_CPU_EVENT
, val
);
4588 /* Wait for RX cpu to ACK the event. */
4589 for (i
= 0; i
< 100; i
++) {
4590 if (!(tr32(GRC_RX_CPU_EVENT
) & (1 << 14)))
4597 /* tp->lock is held. */
4598 static int tg3_halt(struct tg3
*tp
, int kind
, int silent
)
4604 tg3_write_sig_pre_reset(tp
, kind
);
4606 tg3_abort_hw(tp
, silent
);
4607 err
= tg3_chip_reset(tp
);
4609 tg3_write_sig_legacy(tp
, kind
);
4610 tg3_write_sig_post_reset(tp
, kind
);
4618 #define TG3_FW_RELEASE_MAJOR 0x0
4619 #define TG3_FW_RELASE_MINOR 0x0
4620 #define TG3_FW_RELEASE_FIX 0x0
4621 #define TG3_FW_START_ADDR 0x08000000
4622 #define TG3_FW_TEXT_ADDR 0x08000000
4623 #define TG3_FW_TEXT_LEN 0x9c0
4624 #define TG3_FW_RODATA_ADDR 0x080009c0
4625 #define TG3_FW_RODATA_LEN 0x60
4626 #define TG3_FW_DATA_ADDR 0x08000a40
4627 #define TG3_FW_DATA_LEN 0x20
4628 #define TG3_FW_SBSS_ADDR 0x08000a60
4629 #define TG3_FW_SBSS_LEN 0xc
4630 #define TG3_FW_BSS_ADDR 0x08000a70
4631 #define TG3_FW_BSS_LEN 0x10
4633 static u32 tg3FwText
[(TG3_FW_TEXT_LEN
/ sizeof(u32
)) + 1] = {
4634 0x00000000, 0x10000003, 0x00000000, 0x0000000d, 0x0000000d, 0x3c1d0800,
4635 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100000, 0x0e000018, 0x00000000,
4636 0x0000000d, 0x3c1d0800, 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100034,
4637 0x0e00021c, 0x00000000, 0x0000000d, 0x00000000, 0x00000000, 0x00000000,
4638 0x27bdffe0, 0x3c1cc000, 0xafbf0018, 0xaf80680c, 0x0e00004c, 0x241b2105,
4639 0x97850000, 0x97870002, 0x9782002c, 0x9783002e, 0x3c040800, 0x248409c0,
4640 0xafa00014, 0x00021400, 0x00621825, 0x00052c00, 0xafa30010, 0x8f860010,
4641 0x00e52825, 0x0e000060, 0x24070102, 0x3c02ac00, 0x34420100, 0x3c03ac01,
4642 0x34630100, 0xaf820490, 0x3c02ffff, 0xaf820494, 0xaf830498, 0xaf82049c,
4643 0x24020001, 0xaf825ce0, 0x0e00003f, 0xaf825d00, 0x0e000140, 0x00000000,
4644 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x2402ffff, 0xaf825404, 0x8f835400,
4645 0x34630400, 0xaf835400, 0xaf825404, 0x3c020800, 0x24420034, 0xaf82541c,
4646 0x03e00008, 0xaf805400, 0x00000000, 0x00000000, 0x3c020800, 0x34423000,
4647 0x3c030800, 0x34633000, 0x3c040800, 0x348437ff, 0x3c010800, 0xac220a64,
4648 0x24020040, 0x3c010800, 0xac220a68, 0x3c010800, 0xac200a60, 0xac600000,
4649 0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
4650 0x00804821, 0x8faa0010, 0x3c020800, 0x8c420a60, 0x3c040800, 0x8c840a68,
4651 0x8fab0014, 0x24430001, 0x0044102b, 0x3c010800, 0xac230a60, 0x14400003,
4652 0x00004021, 0x3c010800, 0xac200a60, 0x3c020800, 0x8c420a60, 0x3c030800,
4653 0x8c630a64, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
4654 0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020800, 0x8c420a60,
4655 0x3c030800, 0x8c630a64, 0x8f84680c, 0x00021140, 0x00431021, 0xac440008,
4656 0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
4657 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4658 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4660 0x02000008, 0x00000000, 0x0a0001e3, 0x3c0a0001, 0x0a0001e3, 0x3c0a0002,
4661 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4662 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4663 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4664 0x0a0001e3, 0x3c0a0007, 0x0a0001e3, 0x3c0a0008, 0x0a0001e3, 0x3c0a0009,
4665 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000b,
4666 0x0a0001e3, 0x3c0a000c, 0x0a0001e3, 0x3c0a000d, 0x0a0001e3, 0x00000000,
4667 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000e, 0x0a0001e3, 0x00000000,
4668 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4669 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4670 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a0013, 0x0a0001e3, 0x3c0a0014,
4671 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4672 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4673 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4674 0x27bdffe0, 0x00001821, 0x00001021, 0xafbf0018, 0xafb10014, 0xafb00010,
4675 0x3c010800, 0x00220821, 0xac200a70, 0x3c010800, 0x00220821, 0xac200a74,
4676 0x3c010800, 0x00220821, 0xac200a78, 0x24630001, 0x1860fff5, 0x2442000c,
4677 0x24110001, 0x8f906810, 0x32020004, 0x14400005, 0x24040001, 0x3c020800,
4678 0x8c420a78, 0x18400003, 0x00002021, 0x0e000182, 0x00000000, 0x32020001,
4679 0x10400003, 0x00000000, 0x0e000169, 0x00000000, 0x0a000153, 0xaf915028,
4680 0x8fbf0018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020, 0x3c050800,
4681 0x8ca50a70, 0x3c060800, 0x8cc60a80, 0x3c070800, 0x8ce70a78, 0x27bdffe0,
4682 0x3c040800, 0x248409d0, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014,
4683 0x0e00017b, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x24020001,
4684 0x8f836810, 0x00821004, 0x00021027, 0x00621824, 0x03e00008, 0xaf836810,
4685 0x27bdffd8, 0xafbf0024, 0x1080002e, 0xafb00020, 0x8f825cec, 0xafa20018,
4686 0x8f825cec, 0x3c100800, 0x26100a78, 0xafa2001c, 0x34028000, 0xaf825cec,
4687 0x8e020000, 0x18400016, 0x00000000, 0x3c020800, 0x94420a74, 0x8fa3001c,
4688 0x000221c0, 0xac830004, 0x8fa2001c, 0x3c010800, 0x0e000201, 0xac220a74,
4689 0x10400005, 0x00000000, 0x8e020000, 0x24420001, 0x0a0001df, 0xae020000,
4690 0x3c020800, 0x8c420a70, 0x00021c02, 0x000321c0, 0x0a0001c5, 0xafa2001c,
4691 0x0e000201, 0x00000000, 0x1040001f, 0x00000000, 0x8e020000, 0x8fa3001c,
4692 0x24420001, 0x3c010800, 0xac230a70, 0x3c010800, 0xac230a74, 0x0a0001df,
4693 0xae020000, 0x3c100800, 0x26100a78, 0x8e020000, 0x18400028, 0x00000000,
4694 0x0e000201, 0x00000000, 0x14400024, 0x00000000, 0x8e020000, 0x3c030800,
4695 0x8c630a70, 0x2442ffff, 0xafa3001c, 0x18400006, 0xae020000, 0x00031402,
4696 0x000221c0, 0x8c820004, 0x3c010800, 0xac220a70, 0x97a2001e, 0x2442ff00,
4697 0x2c420300, 0x1440000b, 0x24024000, 0x3c040800, 0x248409dc, 0xafa00010,
4698 0xafa00014, 0x8fa6001c, 0x24050008, 0x0e000060, 0x00003821, 0x0a0001df,
4699 0x00000000, 0xaf825cf8, 0x3c020800, 0x8c420a40, 0x8fa3001c, 0x24420001,
4700 0xaf835cf8, 0x3c010800, 0xac220a40, 0x8fbf0024, 0x8fb00020, 0x03e00008,
4701 0x27bd0028, 0x27bdffe0, 0x3c040800, 0x248409e8, 0x00002821, 0x00003021,
4702 0x00003821, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x8fbf0018,
4703 0x03e00008, 0x27bd0020, 0x8f82680c, 0x8f85680c, 0x00021827, 0x0003182b,
4704 0x00031823, 0x00431024, 0x00441021, 0x00a2282b, 0x10a00006, 0x00000000,
4705 0x00401821, 0x8f82680c, 0x0043102b, 0x1440fffd, 0x00000000, 0x03e00008,
4706 0x00000000, 0x3c040800, 0x8c840000, 0x3c030800, 0x8c630a40, 0x0064102b,
4707 0x54400002, 0x00831023, 0x00641023, 0x2c420008, 0x03e00008, 0x38420001,
4708 0x27bdffe0, 0x00802821, 0x3c040800, 0x24840a00, 0x00003021, 0x00003821,
4709 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x0a000216, 0x00000000,
4710 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000, 0x27bdffe0, 0x3c1cc000,
4711 0xafbf0018, 0x0e00004c, 0xaf80680c, 0x3c040800, 0x24840a10, 0x03802821,
4712 0x00003021, 0x00003821, 0xafa00010, 0x0e000060, 0xafa00014, 0x2402ffff,
4713 0xaf825404, 0x3c0200aa, 0x0e000234, 0xaf825434, 0x8fbf0018, 0x03e00008,
4714 0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe8, 0xafb00010,
4715 0x24100001, 0xafbf0014, 0x3c01c003, 0xac200000, 0x8f826810, 0x30422000,
4716 0x10400003, 0x00000000, 0x0e000246, 0x00000000, 0x0a00023a, 0xaf905428,
4717 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x27bdfff8, 0x8f845d0c,
4718 0x3c0200ff, 0x3c030800, 0x8c630a50, 0x3442fff8, 0x00821024, 0x1043001e,
4719 0x3c0500ff, 0x34a5fff8, 0x3c06c003, 0x3c074000, 0x00851824, 0x8c620010,
4720 0x3c010800, 0xac230a50, 0x30420008, 0x10400005, 0x00871025, 0x8cc20000,
4721 0x24420001, 0xacc20000, 0x00871025, 0xaf825d0c, 0x8fa20000, 0x24420001,
4722 0xafa20000, 0x8fa20000, 0x8fa20000, 0x24420001, 0xafa20000, 0x8fa20000,
4723 0x8f845d0c, 0x3c030800, 0x8c630a50, 0x00851024, 0x1443ffe8, 0x00851824,
4724 0x27bd0008, 0x03e00008, 0x00000000, 0x00000000, 0x00000000
4727 static u32 tg3FwRodata
[(TG3_FW_RODATA_LEN
/ sizeof(u32
)) + 1] = {
4728 0x35373031, 0x726c7341, 0x00000000, 0x00000000, 0x53774576, 0x656e7430,
4729 0x00000000, 0x726c7045, 0x76656e74, 0x31000000, 0x556e6b6e, 0x45766e74,
4730 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
4731 0x00000000, 0x00000000, 0x4d61696e, 0x43707542, 0x00000000, 0x00000000,
4735 #if 0 /* All zeros, don't eat up space with it. */
4736 u32 tg3FwData
[(TG3_FW_DATA_LEN
/ sizeof(u32
)) + 1] = {
4737 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
4738 0x00000000, 0x00000000, 0x00000000, 0x00000000
4742 #define RX_CPU_SCRATCH_BASE 0x30000
4743 #define RX_CPU_SCRATCH_SIZE 0x04000
4744 #define TX_CPU_SCRATCH_BASE 0x34000
4745 #define TX_CPU_SCRATCH_SIZE 0x04000
4747 /* tp->lock is held. */
4748 static int tg3_halt_cpu(struct tg3
*tp
, u32 offset
)
4752 if (offset
== TX_CPU_BASE
&&
4753 (tp
->tg3_flags2
& TG3_FLG2_5705_PLUS
))
4756 if (offset
== RX_CPU_BASE
) {
4757 for (i
= 0; i
< 10000; i
++) {
4758 tw32(offset
+ CPU_STATE
, 0xffffffff);
4759 tw32(offset
+ CPU_MODE
, CPU_MODE_HALT
);
4760 if (tr32(offset
+ CPU_MODE
) & CPU_MODE_HALT
)
4764 tw32(offset
+ CPU_STATE
, 0xffffffff);
4765 tw32_f(offset
+ CPU_MODE
, CPU_MODE_HALT
);
4768 for (i
= 0; i
< 10000; i
++) {
4769 tw32(offset
+ CPU_STATE
, 0xffffffff);
4770 tw32(offset
+ CPU_MODE
, CPU_MODE_HALT
);
4771 if (tr32(offset
+ CPU_MODE
) & CPU_MODE_HALT
)
4777 printk(KERN_ERR PFX
"tg3_reset_cpu timed out for %s, "
4780 (offset
== RX_CPU_BASE
? "RX" : "TX"));
4784 /* Clear firmware's nvram arbitration. */
4785 if (tp
->tg3_flags
& TG3_FLAG_NVRAM
)
4786 tw32(NVRAM_SWARB
, SWARB_REQ_CLR0
);
4791 unsigned int text_base
;
4792 unsigned int text_len
;
4794 unsigned int rodata_base
;
4795 unsigned int rodata_len
;
4797 unsigned int data_base
;
4798 unsigned int data_len
;
4802 /* tp->lock is held. */
4803 static int tg3_load_firmware_cpu(struct tg3
*tp
, u32 cpu_base
, u32 cpu_scratch_base
,
4804 int cpu_scratch_size
, struct fw_info
*info
)
4806 int err
, lock_err
, i
;
4807 void (*write_op
)(struct tg3
*, u32
, u32
);
4809 if (cpu_base
== TX_CPU_BASE
&&
4810 (tp
->tg3_flags2
& TG3_FLG2_5705_PLUS
)) {
4811 printk(KERN_ERR PFX
"tg3_load_firmware_cpu: Trying to load "
4812 "TX cpu firmware on %s which is 5705.\n",
4817 if (tp
->tg3_flags2
& TG3_FLG2_5705_PLUS
)
4818 write_op
= tg3_write_mem
;
4820 write_op
= tg3_write_indirect_reg32
;
4822 /* It is possible that bootcode is still loading at this point.
4823 * Get the nvram lock first before halting the cpu.
4825 lock_err
= tg3_nvram_lock(tp
);
4826 err
= tg3_halt_cpu(tp
, cpu_base
);
4828 tg3_nvram_unlock(tp
);
4832 for (i
= 0; i
< cpu_scratch_size
; i
+= sizeof(u32
))
4833 write_op(tp
, cpu_scratch_base
+ i
, 0);
4834 tw32(cpu_base
+ CPU_STATE
, 0xffffffff);
4835 tw32(cpu_base
+ CPU_MODE
, tr32(cpu_base
+CPU_MODE
)|CPU_MODE_HALT
);
4836 for (i
= 0; i
< (info
->text_len
/ sizeof(u32
)); i
++)
4837 write_op(tp
, (cpu_scratch_base
+
4838 (info
->text_base
& 0xffff) +
4841 info
->text_data
[i
] : 0));
4842 for (i
= 0; i
< (info
->rodata_len
/ sizeof(u32
)); i
++)
4843 write_op(tp
, (cpu_scratch_base
+
4844 (info
->rodata_base
& 0xffff) +
4846 (info
->rodata_data
?
4847 info
->rodata_data
[i
] : 0));
4848 for (i
= 0; i
< (info
->data_len
/ sizeof(u32
)); i
++)
4849 write_op(tp
, (cpu_scratch_base
+
4850 (info
->data_base
& 0xffff) +
4853 info
->data_data
[i
] : 0));
4861 /* tp->lock is held. */
4862 static int tg3_load_5701_a0_firmware_fix(struct tg3
*tp
)
4864 struct fw_info info
;
4867 info
.text_base
= TG3_FW_TEXT_ADDR
;
4868 info
.text_len
= TG3_FW_TEXT_LEN
;
4869 info
.text_data
= &tg3FwText
[0];
4870 info
.rodata_base
= TG3_FW_RODATA_ADDR
;
4871 info
.rodata_len
= TG3_FW_RODATA_LEN
;
4872 info
.rodata_data
= &tg3FwRodata
[0];
4873 info
.data_base
= TG3_FW_DATA_ADDR
;
4874 info
.data_len
= TG3_FW_DATA_LEN
;
4875 info
.data_data
= NULL
;
4877 err
= tg3_load_firmware_cpu(tp
, RX_CPU_BASE
,
4878 RX_CPU_SCRATCH_BASE
, RX_CPU_SCRATCH_SIZE
,
4883 err
= tg3_load_firmware_cpu(tp
, TX_CPU_BASE
,
4884 TX_CPU_SCRATCH_BASE
, TX_CPU_SCRATCH_SIZE
,
4889 /* Now startup only the RX cpu. */
4890 tw32(RX_CPU_BASE
+ CPU_STATE
, 0xffffffff);
4891 tw32_f(RX_CPU_BASE
+ CPU_PC
, TG3_FW_TEXT_ADDR
);
4893 for (i
= 0; i
< 5; i
++) {
4894 if (tr32(RX_CPU_BASE
+ CPU_PC
) == TG3_FW_TEXT_ADDR
)
4896 tw32(RX_CPU_BASE
+ CPU_STATE
, 0xffffffff);
4897 tw32(RX_CPU_BASE
+ CPU_MODE
, CPU_MODE_HALT
);
4898 tw32_f(RX_CPU_BASE
+ CPU_PC
, TG3_FW_TEXT_ADDR
);
4902 printk(KERN_ERR PFX
"tg3_load_firmware fails for %s "
4903 "to set RX CPU PC, is %08x should be %08x\n",
4904 tp
->dev
->name
, tr32(RX_CPU_BASE
+ CPU_PC
),
4908 tw32(RX_CPU_BASE
+ CPU_STATE
, 0xffffffff);
4909 tw32_f(RX_CPU_BASE
+ CPU_MODE
, 0x00000000);
4914 #if TG3_TSO_SUPPORT != 0
4916 #define TG3_TSO_FW_RELEASE_MAJOR 0x1
4917 #define TG3_TSO_FW_RELASE_MINOR 0x6
4918 #define TG3_TSO_FW_RELEASE_FIX 0x0
4919 #define TG3_TSO_FW_START_ADDR 0x08000000
4920 #define TG3_TSO_FW_TEXT_ADDR 0x08000000
4921 #define TG3_TSO_FW_TEXT_LEN 0x1aa0
4922 #define TG3_TSO_FW_RODATA_ADDR 0x08001aa0
4923 #define TG3_TSO_FW_RODATA_LEN 0x60
4924 #define TG3_TSO_FW_DATA_ADDR 0x08001b20
4925 #define TG3_TSO_FW_DATA_LEN 0x30
4926 #define TG3_TSO_FW_SBSS_ADDR 0x08001b50
4927 #define TG3_TSO_FW_SBSS_LEN 0x2c
4928 #define TG3_TSO_FW_BSS_ADDR 0x08001b80
4929 #define TG3_TSO_FW_BSS_LEN 0x894
4931 static u32 tg3TsoFwText
[(TG3_TSO_FW_TEXT_LEN
/ 4) + 1] = {
4932 0x0e000003, 0x00000000, 0x08001b24, 0x00000000, 0x10000003, 0x00000000,
4933 0x0000000d, 0x0000000d, 0x3c1d0800, 0x37bd4000, 0x03a0f021, 0x3c100800,
4934 0x26100000, 0x0e000010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
4935 0xafbf0018, 0x0e0005d8, 0x34840002, 0x0e000668, 0x00000000, 0x3c030800,
4936 0x90631b68, 0x24020002, 0x3c040800, 0x24841aac, 0x14620003, 0x24050001,
4937 0x3c040800, 0x24841aa0, 0x24060006, 0x00003821, 0xafa00010, 0x0e00067c,
4938 0xafa00014, 0x8f625c50, 0x34420001, 0xaf625c50, 0x8f625c90, 0x34420001,
4939 0xaf625c90, 0x2402ffff, 0x0e000034, 0xaf625404, 0x8fbf0018, 0x03e00008,
4940 0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c,
4941 0xafb20018, 0xafb10014, 0x0e00005b, 0xafb00010, 0x24120002, 0x24110001,
4942 0x8f706820, 0x32020100, 0x10400003, 0x00000000, 0x0e0000bb, 0x00000000,
4943 0x8f706820, 0x32022000, 0x10400004, 0x32020001, 0x0e0001f0, 0x24040001,
4944 0x32020001, 0x10400003, 0x00000000, 0x0e0000a3, 0x00000000, 0x3c020800,
4945 0x90421b98, 0x14520003, 0x00000000, 0x0e0004c0, 0x00000000, 0x0a00003c,
4946 0xaf715028, 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008,
4947 0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ac0, 0x00002821, 0x00003021,
4948 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x3c040800,
4949 0x248423d8, 0xa4800000, 0x3c010800, 0xa0201b98, 0x3c010800, 0xac201b9c,
4950 0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
4951 0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bbc, 0x8f624434, 0x3c010800,
4952 0xac221b88, 0x8f624438, 0x3c010800, 0xac221b8c, 0x8f624410, 0xac80f7a8,
4953 0x3c010800, 0xac201b84, 0x3c010800, 0xac2023e0, 0x3c010800, 0xac2023c8,
4954 0x3c010800, 0xac2023cc, 0x3c010800, 0xac202400, 0x3c010800, 0xac221b90,
4955 0x8f620068, 0x24030007, 0x00021702, 0x10430005, 0x00000000, 0x8f620068,
4956 0x00021702, 0x14400004, 0x24020001, 0x3c010800, 0x0a000097, 0xac20240c,
4957 0xac820034, 0x3c040800, 0x24841acc, 0x3c050800, 0x8ca5240c, 0x00003021,
4958 0x00003821, 0xafa00010, 0x0e00067c, 0xafa00014, 0x8fbf0018, 0x03e00008,
4959 0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ad8, 0x00002821, 0x00003021,
4960 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x0e00005b,
4961 0x00000000, 0x0e0000b4, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020,
4962 0x24020001, 0x8f636820, 0x00821004, 0x00021027, 0x00621824, 0x03e00008,
4963 0xaf636820, 0x27bdffd0, 0xafbf002c, 0xafb60028, 0xafb50024, 0xafb40020,
4964 0xafb3001c, 0xafb20018, 0xafb10014, 0xafb00010, 0x8f675c5c, 0x3c030800,
4965 0x24631bbc, 0x8c620000, 0x14470005, 0x3c0200ff, 0x3c020800, 0x90421b98,
4966 0x14400119, 0x3c0200ff, 0x3442fff8, 0x00e28824, 0xac670000, 0x00111902,
4967 0x306300ff, 0x30e20003, 0x000211c0, 0x00622825, 0x00a04021, 0x00071602,
4968 0x3c030800, 0x90631b98, 0x3044000f, 0x14600036, 0x00804821, 0x24020001,
4969 0x3c010800, 0xa0221b98, 0x00051100, 0x00821025, 0x3c010800, 0xac201b9c,
4970 0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
4971 0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bb0, 0x3c010800, 0xac201bb4,
4972 0x3c010800, 0xa42223d8, 0x9622000c, 0x30437fff, 0x3c010800, 0xa4222410,
4973 0x30428000, 0x3c010800, 0xa4231bc6, 0x10400005, 0x24020001, 0x3c010800,
4974 0xac2223f4, 0x0a000102, 0x2406003e, 0x24060036, 0x3c010800, 0xac2023f4,
4975 0x9622000a, 0x3c030800, 0x94631bc6, 0x3c010800, 0xac2023f0, 0x3c010800,
4976 0xac2023f8, 0x00021302, 0x00021080, 0x00c21021, 0x00621821, 0x3c010800,
4977 0xa42223d0, 0x3c010800, 0x0a000115, 0xa4231b96, 0x9622000c, 0x3c010800,
4978 0xa42223ec, 0x3c040800, 0x24841b9c, 0x8c820000, 0x00021100, 0x3c010800,
4979 0x00220821, 0xac311bc8, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
4980 0xac271bcc, 0x8c820000, 0x25030001, 0x306601ff, 0x00021100, 0x3c010800,
4981 0x00220821, 0xac261bd0, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
4982 0xac291bd4, 0x96230008, 0x3c020800, 0x8c421bac, 0x00432821, 0x3c010800,
4983 0xac251bac, 0x9622000a, 0x30420004, 0x14400018, 0x00061100, 0x8f630c14,
4984 0x3063000f, 0x2c620002, 0x1440000b, 0x3c02c000, 0x8f630c14, 0x3c020800,
4985 0x8c421b40, 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002,
4986 0x1040fff7, 0x3c02c000, 0x00e21825, 0xaf635c5c, 0x8f625c50, 0x30420002,
4987 0x10400014, 0x00000000, 0x0a000147, 0x00000000, 0x3c030800, 0x8c631b80,
4988 0x3c040800, 0x94841b94, 0x01221025, 0x3c010800, 0xa42223da, 0x24020001,
4989 0x3c010800, 0xac221bb8, 0x24630001, 0x0085202a, 0x3c010800, 0x10800003,
4990 0xac231b80, 0x3c010800, 0xa4251b94, 0x3c060800, 0x24c61b9c, 0x8cc20000,
4991 0x24420001, 0xacc20000, 0x28420080, 0x14400005, 0x00000000, 0x0e000656,
4992 0x24040002, 0x0a0001e6, 0x00000000, 0x3c020800, 0x8c421bb8, 0x10400078,
4993 0x24020001, 0x3c050800, 0x90a51b98, 0x14a20072, 0x00000000, 0x3c150800,
4994 0x96b51b96, 0x3c040800, 0x8c841bac, 0x32a3ffff, 0x0083102a, 0x1440006c,
4995 0x00000000, 0x14830003, 0x00000000, 0x3c010800, 0xac2523f0, 0x1060005c,
4996 0x00009021, 0x24d60004, 0x0060a021, 0x24d30014, 0x8ec20000, 0x00028100,
4997 0x3c110800, 0x02308821, 0x0e000625, 0x8e311bc8, 0x00402821, 0x10a00054,
4998 0x00000000, 0x9628000a, 0x31020040, 0x10400005, 0x2407180c, 0x8e22000c,
4999 0x2407188c, 0x00021400, 0xaca20018, 0x3c030800, 0x00701821, 0x8c631bd0,
5000 0x3c020800, 0x00501021, 0x8c421bd4, 0x00031d00, 0x00021400, 0x00621825,
5001 0xaca30014, 0x8ec30004, 0x96220008, 0x00432023, 0x3242ffff, 0x3083ffff,
5002 0x00431021, 0x0282102a, 0x14400002, 0x02b23023, 0x00803021, 0x8e620000,
5003 0x30c4ffff, 0x00441021, 0xae620000, 0x8e220000, 0xaca20000, 0x8e220004,
5004 0x8e63fff4, 0x00431021, 0xaca20004, 0xa4a6000e, 0x8e62fff4, 0x00441021,
5005 0xae62fff4, 0x96230008, 0x0043102a, 0x14400005, 0x02469021, 0x8e62fff0,
5006 0xae60fff4, 0x24420001, 0xae62fff0, 0xaca00008, 0x3242ffff, 0x14540008,
5007 0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x24020905, 0xa4a2000c,
5008 0x0a0001cb, 0x34e70020, 0xa4a2000c, 0x3c020800, 0x8c4223f0, 0x10400003,
5009 0x3c024b65, 0x0a0001d3, 0x34427654, 0x3c02b49a, 0x344289ab, 0xaca2001c,
5010 0x30e2ffff, 0xaca20010, 0x0e0005a2, 0x00a02021, 0x3242ffff, 0x0054102b,
5011 0x1440ffa9, 0x00000000, 0x24020002, 0x3c010800, 0x0a0001e6, 0xa0221b98,
5012 0x8ec2083c, 0x24420001, 0x0a0001e6, 0xaec2083c, 0x0e0004c0, 0x00000000,
5013 0x8fbf002c, 0x8fb60028, 0x8fb50024, 0x8fb40020, 0x8fb3001c, 0x8fb20018,
5014 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0030, 0x27bdffd0, 0xafbf0028,
5015 0xafb30024, 0xafb20020, 0xafb1001c, 0xafb00018, 0x8f725c9c, 0x3c0200ff,
5016 0x3442fff8, 0x3c070800, 0x24e71bb4, 0x02428824, 0x9623000e, 0x8ce20000,
5017 0x00431021, 0xace20000, 0x8e220010, 0x30420020, 0x14400011, 0x00809821,
5018 0x0e00063b, 0x02202021, 0x3c02c000, 0x02421825, 0xaf635c9c, 0x8f625c90,
5019 0x30420002, 0x1040011e, 0x00000000, 0xaf635c9c, 0x8f625c90, 0x30420002,
5020 0x10400119, 0x00000000, 0x0a00020d, 0x00000000, 0x8e240008, 0x8e230014,
5021 0x00041402, 0x000231c0, 0x00031502, 0x304201ff, 0x2442ffff, 0x3042007f,
5022 0x00031942, 0x30637800, 0x00021100, 0x24424000, 0x00624821, 0x9522000a,
5023 0x3084ffff, 0x30420008, 0x104000b0, 0x000429c0, 0x3c020800, 0x8c422400,
5024 0x14400024, 0x24c50008, 0x94c20014, 0x3c010800, 0xa42223d0, 0x8cc40010,
5025 0x00041402, 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42423d4, 0x94c2000e,
5026 0x3083ffff, 0x00431023, 0x3c010800, 0xac222408, 0x94c2001a, 0x3c010800,
5027 0xac262400, 0x3c010800, 0xac322404, 0x3c010800, 0xac2223fc, 0x3c02c000,
5028 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e5, 0x00000000,
5029 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e0, 0x00000000, 0x0a000246,
5030 0x00000000, 0x94c2000e, 0x3c030800, 0x946323d4, 0x00434023, 0x3103ffff,
5031 0x2c620008, 0x1040001c, 0x00000000, 0x94c20014, 0x24420028, 0x00a22821,
5032 0x00031042, 0x1840000b, 0x00002021, 0x24e60848, 0x00403821, 0x94a30000,
5033 0x8cc20000, 0x24840001, 0x00431021, 0xacc20000, 0x0087102a, 0x1440fff9,
5034 0x24a50002, 0x31020001, 0x1040001f, 0x3c024000, 0x3c040800, 0x248423fc,
5035 0xa0a00001, 0x94a30000, 0x8c820000, 0x00431021, 0x0a000285, 0xac820000,
5036 0x8f626800, 0x3c030010, 0x00431024, 0x10400009, 0x00000000, 0x94c2001a,
5037 0x3c030800, 0x8c6323fc, 0x00431021, 0x3c010800, 0xac2223fc, 0x0a000286,
5038 0x3c024000, 0x94c2001a, 0x94c4001c, 0x3c030800, 0x8c6323fc, 0x00441023,
5039 0x00621821, 0x3c010800, 0xac2323fc, 0x3c024000, 0x02421825, 0xaf635c9c,
5040 0x8f625c90, 0x30420002, 0x1440fffc, 0x00000000, 0x9522000a, 0x30420010,
5041 0x1040009b, 0x00000000, 0x3c030800, 0x946323d4, 0x3c070800, 0x24e72400,
5042 0x8ce40000, 0x8f626800, 0x24630030, 0x00832821, 0x3c030010, 0x00431024,
5043 0x1440000a, 0x00000000, 0x94a20004, 0x3c040800, 0x8c842408, 0x3c030800,
5044 0x8c6323fc, 0x00441023, 0x00621821, 0x3c010800, 0xac2323fc, 0x3c040800,
5045 0x8c8423fc, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402, 0x00822021,
5046 0x00041027, 0xa4a20006, 0x3c030800, 0x8c632404, 0x3c0200ff, 0x3442fff8,
5047 0x00628824, 0x96220008, 0x24050001, 0x24034000, 0x000231c0, 0x00801021,
5048 0xa4c2001a, 0xa4c0001c, 0xace00000, 0x3c010800, 0xac251b60, 0xaf635cb8,
5049 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000, 0x3c010800, 0xac201b60,
5050 0x8e220008, 0xaf625cb8, 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000,
5051 0x3c010800, 0xac201b60, 0x3c020800, 0x8c421b60, 0x1040ffec, 0x00000000,
5052 0x3c040800, 0x0e00063b, 0x8c842404, 0x0a00032a, 0x00000000, 0x3c030800,
5053 0x90631b98, 0x24020002, 0x14620003, 0x3c034b65, 0x0a0002e1, 0x00008021,
5054 0x8e22001c, 0x34637654, 0x10430002, 0x24100002, 0x24100001, 0x00c02021,
5055 0x0e000350, 0x02003021, 0x24020003, 0x3c010800, 0xa0221b98, 0x24020002,
5056 0x1202000a, 0x24020001, 0x3c030800, 0x8c6323f0, 0x10620006, 0x00000000,
5057 0x3c020800, 0x944223d8, 0x00021400, 0x0a00031f, 0xae220014, 0x3c040800,
5058 0x248423da, 0x94820000, 0x00021400, 0xae220014, 0x3c020800, 0x8c421bbc,
5059 0x3c03c000, 0x3c010800, 0xa0201b98, 0x00431025, 0xaf625c5c, 0x8f625c50,
5060 0x30420002, 0x10400009, 0x00000000, 0x2484f7e2, 0x8c820000, 0x00431025,
5061 0xaf625c5c, 0x8f625c50, 0x30420002, 0x1440fffa, 0x00000000, 0x3c020800,
5062 0x24421b84, 0x8c430000, 0x24630001, 0xac430000, 0x8f630c14, 0x3063000f,
5063 0x2c620002, 0x1440000c, 0x3c024000, 0x8f630c14, 0x3c020800, 0x8c421b40,
5064 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7,
5065 0x00000000, 0x3c024000, 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002,
5066 0x1440fffc, 0x00000000, 0x12600003, 0x00000000, 0x0e0004c0, 0x00000000,
5067 0x8fbf0028, 0x8fb30024, 0x8fb20020, 0x8fb1001c, 0x8fb00018, 0x03e00008,
5068 0x27bd0030, 0x8f634450, 0x3c040800, 0x24841b88, 0x8c820000, 0x00031c02,
5069 0x0043102b, 0x14400007, 0x3c038000, 0x8c840004, 0x8f624450, 0x00021c02,
5070 0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
5071 0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3c024000,
5072 0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00000000,
5073 0x03e00008, 0x00000000, 0x27bdffe0, 0x00805821, 0x14c00011, 0x256e0008,
5074 0x3c020800, 0x8c4223f4, 0x10400007, 0x24020016, 0x3c010800, 0xa42223d2,
5075 0x2402002a, 0x3c010800, 0x0a000364, 0xa42223d4, 0x8d670010, 0x00071402,
5076 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42723d4, 0x3c040800, 0x948423d4,
5077 0x3c030800, 0x946323d2, 0x95cf0006, 0x3c020800, 0x944223d0, 0x00832023,
5078 0x01e2c023, 0x3065ffff, 0x24a20028, 0x01c24821, 0x3082ffff, 0x14c0001a,
5079 0x01226021, 0x9582000c, 0x3042003f, 0x3c010800, 0xa42223d6, 0x95820004,
5080 0x95830006, 0x3c010800, 0xac2023e4, 0x3c010800, 0xac2023e8, 0x00021400,
5081 0x00431025, 0x3c010800, 0xac221bc0, 0x95220004, 0x3c010800, 0xa4221bc4,
5082 0x95230002, 0x01e51023, 0x0043102a, 0x10400010, 0x24020001, 0x3c010800,
5083 0x0a000398, 0xac2223f8, 0x3c030800, 0x8c6323e8, 0x3c020800, 0x94421bc4,
5084 0x00431021, 0xa5220004, 0x3c020800, 0x94421bc0, 0xa5820004, 0x3c020800,
5085 0x8c421bc0, 0xa5820006, 0x3c020800, 0x8c4223f0, 0x3c0d0800, 0x8dad23e4,
5086 0x3c0a0800, 0x144000e5, 0x8d4a23e8, 0x3c020800, 0x94421bc4, 0x004a1821,
5087 0x3063ffff, 0x0062182b, 0x24020002, 0x10c2000d, 0x01435023, 0x3c020800,
5088 0x944223d6, 0x30420009, 0x10400008, 0x00000000, 0x9582000c, 0x3042fff6,
5089 0xa582000c, 0x3c020800, 0x944223d6, 0x30420009, 0x01a26823, 0x3c020800,
5090 0x8c4223f8, 0x1040004a, 0x01203821, 0x3c020800, 0x944223d2, 0x00004021,
5091 0xa520000a, 0x01e21023, 0xa5220002, 0x3082ffff, 0x00021042, 0x18400008,
5092 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021, 0x0103102a,
5093 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061402,
5094 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021, 0x2527000c,
5095 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004, 0x1440fffb,
5096 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023, 0x01803821,
5097 0x3082ffff, 0xa4e00010, 0x00621821, 0x00021042, 0x18400010, 0x00c33021,
5098 0x00404821, 0x94e20000, 0x24e70002, 0x00c23021, 0x30e2007f, 0x14400006,
5099 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80, 0x00625824, 0x25670008,
5100 0x0109102a, 0x1440fff3, 0x00000000, 0x30820001, 0x10400005, 0x00061c02,
5101 0xa0e00001, 0x94e20000, 0x00c23021, 0x00061c02, 0x30c2ffff, 0x00623021,
5102 0x00061402, 0x00c23021, 0x0a00047d, 0x30c6ffff, 0x24020002, 0x14c20081,
5103 0x00000000, 0x3c020800, 0x8c42240c, 0x14400007, 0x00000000, 0x3c020800,
5104 0x944223d2, 0x95230002, 0x01e21023, 0x10620077, 0x00000000, 0x3c020800,
5105 0x944223d2, 0x01e21023, 0xa5220002, 0x3c020800, 0x8c42240c, 0x1040001a,
5106 0x31e3ffff, 0x8dc70010, 0x3c020800, 0x94421b96, 0x00e04021, 0x00072c02,
5107 0x00aa2021, 0x00431023, 0x00823823, 0x00072402, 0x30e2ffff, 0x00823821,
5108 0x00071027, 0xa522000a, 0x3102ffff, 0x3c040800, 0x948423d4, 0x00453023,
5109 0x00e02821, 0x00641823, 0x006d1821, 0x00c33021, 0x00061c02, 0x30c2ffff,
5110 0x0a00047d, 0x00623021, 0x01203821, 0x00004021, 0x3082ffff, 0x00021042,
5111 0x18400008, 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021,
5112 0x0103102a, 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021,
5113 0x00061402, 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021,
5114 0x2527000c, 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004,
5115 0x1440fffb, 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023,
5116 0x01803821, 0x3082ffff, 0xa4e00010, 0x3c040800, 0x948423d4, 0x00621821,
5117 0x00c33021, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061c02, 0x3c020800,
5118 0x944223d0, 0x00c34821, 0x00441023, 0x00021fc2, 0x00431021, 0x00021043,
5119 0x18400010, 0x00003021, 0x00402021, 0x94e20000, 0x24e70002, 0x00c23021,
5120 0x30e2007f, 0x14400006, 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80,
5121 0x00625824, 0x25670008, 0x0104102a, 0x1440fff3, 0x00000000, 0x3c020800,
5122 0x944223ec, 0x00c23021, 0x3122ffff, 0x00c23021, 0x00061c02, 0x30c2ffff,
5123 0x00623021, 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010,
5124 0xadc00014, 0x0a00049d, 0xadc00000, 0x8dc70010, 0x00e04021, 0x11400007,
5125 0x00072c02, 0x00aa3021, 0x00061402, 0x30c3ffff, 0x00433021, 0x00061402,
5126 0x00c22821, 0x00051027, 0xa522000a, 0x3c030800, 0x946323d4, 0x3102ffff,
5127 0x01e21021, 0x00433023, 0x00cd3021, 0x00061c02, 0x30c2ffff, 0x00623021,
5128 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010, 0x3102ffff,
5129 0x00051c00, 0x00431025, 0xadc20010, 0x3c020800, 0x8c4223f4, 0x10400005,
5130 0x2de205eb, 0x14400002, 0x25e2fff2, 0x34028870, 0xa5c20034, 0x3c030800,
5131 0x246323e8, 0x8c620000, 0x24420001, 0xac620000, 0x3c040800, 0x8c8423e4,
5132 0x3c020800, 0x8c421bc0, 0x3303ffff, 0x00832021, 0x00431821, 0x0062102b,
5133 0x3c010800, 0xac2423e4, 0x10400003, 0x2482ffff, 0x3c010800, 0xac2223e4,
5134 0x3c010800, 0xac231bc0, 0x03e00008, 0x27bd0020, 0x27bdffb8, 0x3c050800,
5135 0x24a51b96, 0xafbf0044, 0xafbe0040, 0xafb7003c, 0xafb60038, 0xafb50034,
5136 0xafb40030, 0xafb3002c, 0xafb20028, 0xafb10024, 0xafb00020, 0x94a90000,
5137 0x3c020800, 0x944223d0, 0x3c030800, 0x8c631bb0, 0x3c040800, 0x8c841bac,
5138 0x01221023, 0x0064182a, 0xa7a9001e, 0x106000be, 0xa7a20016, 0x24be0022,
5139 0x97b6001e, 0x24b3001a, 0x24b70016, 0x8fc20000, 0x14400008, 0x00000000,
5140 0x8fc2fff8, 0x97a30016, 0x8fc4fff4, 0x00431021, 0x0082202a, 0x148000b0,
5141 0x00000000, 0x97d50818, 0x32a2ffff, 0x104000a3, 0x00009021, 0x0040a021,
5142 0x00008821, 0x0e000625, 0x00000000, 0x00403021, 0x14c00007, 0x00000000,
5143 0x3c020800, 0x8c4223dc, 0x24420001, 0x3c010800, 0x0a000596, 0xac2223dc,
5144 0x3c100800, 0x02118021, 0x8e101bc8, 0x9608000a, 0x31020040, 0x10400005,
5145 0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x31020080,
5146 0x54400001, 0x34e70010, 0x3c020800, 0x00511021, 0x8c421bd0, 0x3c030800,
5147 0x00711821, 0x8c631bd4, 0x00021500, 0x00031c00, 0x00431025, 0xacc20014,
5148 0x96040008, 0x3242ffff, 0x00821021, 0x0282102a, 0x14400002, 0x02b22823,
5149 0x00802821, 0x8e020000, 0x02459021, 0xacc20000, 0x8e020004, 0x00c02021,
5150 0x26310010, 0xac820004, 0x30e2ffff, 0xac800008, 0xa485000e, 0xac820010,
5151 0x24020305, 0x0e0005a2, 0xa482000c, 0x3242ffff, 0x0054102b, 0x1440ffc5,
5152 0x3242ffff, 0x0a00058e, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
5153 0x10400067, 0x00000000, 0x8e62fff0, 0x00028900, 0x3c100800, 0x02118021,
5154 0x0e000625, 0x8e101bc8, 0x00403021, 0x14c00005, 0x00000000, 0x8e62082c,
5155 0x24420001, 0x0a000596, 0xae62082c, 0x9608000a, 0x31020040, 0x10400005,
5156 0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x3c020800,
5157 0x00511021, 0x8c421bd0, 0x3c030800, 0x00711821, 0x8c631bd4, 0x00021500,
5158 0x00031c00, 0x00431025, 0xacc20014, 0x8e63fff4, 0x96020008, 0x00432023,
5159 0x3242ffff, 0x3083ffff, 0x00431021, 0x02c2102a, 0x10400003, 0x00802821,
5160 0x97a9001e, 0x01322823, 0x8e620000, 0x30a4ffff, 0x00441021, 0xae620000,
5161 0xa4c5000e, 0x8e020000, 0xacc20000, 0x8e020004, 0x8e63fff4, 0x00431021,
5162 0xacc20004, 0x8e63fff4, 0x96020008, 0x00641821, 0x0062102a, 0x14400006,
5163 0x02459021, 0x8e62fff0, 0xae60fff4, 0x24420001, 0x0a000571, 0xae62fff0,
5164 0xae63fff4, 0xacc00008, 0x3242ffff, 0x10560003, 0x31020004, 0x10400006,
5165 0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x34e70020, 0x24020905,
5166 0xa4c2000c, 0x8ee30000, 0x8ee20004, 0x14620007, 0x3c02b49a, 0x8ee20860,
5167 0x54400001, 0x34e70400, 0x3c024b65, 0x0a000588, 0x34427654, 0x344289ab,
5168 0xacc2001c, 0x30e2ffff, 0xacc20010, 0x0e0005a2, 0x00c02021, 0x3242ffff,
5169 0x0056102b, 0x1440ff9b, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
5170 0x1440ff48, 0x00000000, 0x8fbf0044, 0x8fbe0040, 0x8fb7003c, 0x8fb60038,
5171 0x8fb50034, 0x8fb40030, 0x8fb3002c, 0x8fb20028, 0x8fb10024, 0x8fb00020,
5172 0x03e00008, 0x27bd0048, 0x27bdffe8, 0xafbf0014, 0xafb00010, 0x8f624450,
5173 0x8f634410, 0x0a0005b1, 0x00808021, 0x8f626820, 0x30422000, 0x10400003,
5174 0x00000000, 0x0e0001f0, 0x00002021, 0x8f624450, 0x8f634410, 0x3042ffff,
5175 0x0043102b, 0x1440fff5, 0x00000000, 0x8f630c14, 0x3063000f, 0x2c620002,
5176 0x1440000b, 0x00000000, 0x8f630c14, 0x3c020800, 0x8c421b40, 0x3063000f,
5177 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7, 0x00000000,
5178 0xaf705c18, 0x8f625c10, 0x30420002, 0x10400009, 0x00000000, 0x8f626820,
5179 0x30422000, 0x1040fff8, 0x00000000, 0x0e0001f0, 0x00002021, 0x0a0005c4,
5180 0x00000000, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000,
5181 0x00000000, 0x00000000, 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010,
5182 0xaf60680c, 0x8f626804, 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50,
5183 0x3c010800, 0xac221b54, 0x24020b78, 0x3c010800, 0xac221b64, 0x34630002,
5184 0xaf634000, 0x0e000605, 0x00808021, 0x3c010800, 0xa0221b68, 0x304200ff,
5185 0x24030002, 0x14430005, 0x00000000, 0x3c020800, 0x8c421b54, 0x0a0005f8,
5186 0xac5000c0, 0x3c020800, 0x8c421b54, 0xac5000bc, 0x8f624434, 0x8f634438,
5187 0x8f644410, 0x3c010800, 0xac221b5c, 0x3c010800, 0xac231b6c, 0x3c010800,
5188 0xac241b58, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x3c040800,
5189 0x8c870000, 0x3c03aa55, 0x3463aa55, 0x3c06c003, 0xac830000, 0x8cc20000,
5190 0x14430007, 0x24050002, 0x3c0355aa, 0x346355aa, 0xac830000, 0x8cc20000,
5191 0x50430001, 0x24050001, 0x3c020800, 0xac470000, 0x03e00008, 0x00a01021,
5192 0x27bdfff8, 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe,
5193 0x00000000, 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008,
5194 0x27bd0008, 0x8f634450, 0x3c020800, 0x8c421b5c, 0x00031c02, 0x0043102b,
5195 0x14400008, 0x3c038000, 0x3c040800, 0x8c841b6c, 0x8f624450, 0x00021c02,
5196 0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
5197 0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff,
5198 0x2442e000, 0x2c422001, 0x14400003, 0x3c024000, 0x0a000648, 0x2402ffff,
5199 0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021,
5200 0x03e00008, 0x00000000, 0x8f624450, 0x3c030800, 0x8c631b58, 0x0a000651,
5201 0x3042ffff, 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000,
5202 0x03e00008, 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040800, 0x24841af0,
5203 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014,
5204 0x0a000660, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000,
5205 0x00000000, 0x00000000, 0x3c020800, 0x34423000, 0x3c030800, 0x34633000,
5206 0x3c040800, 0x348437ff, 0x3c010800, 0xac221b74, 0x24020040, 0x3c010800,
5207 0xac221b78, 0x3c010800, 0xac201b70, 0xac600000, 0x24630004, 0x0083102b,
5208 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000, 0x00804821, 0x8faa0010,
5209 0x3c020800, 0x8c421b70, 0x3c040800, 0x8c841b78, 0x8fab0014, 0x24430001,
5210 0x0044102b, 0x3c010800, 0xac231b70, 0x14400003, 0x00004021, 0x3c010800,
5211 0xac201b70, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74, 0x91240000,
5212 0x00021140, 0x00431021, 0x00481021, 0x25080001, 0xa0440000, 0x29020008,
5213 0x1440fff4, 0x25290001, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74,
5214 0x8f64680c, 0x00021140, 0x00431021, 0xac440008, 0xac45000c, 0xac460010,
5215 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c, 0x00000000, 0x00000000,
5218 static u32 tg3TsoFwRodata
[] = {
5219 0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
5220 0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x496e0000, 0x73746b6f,
5221 0x66662a2a, 0x00000000, 0x53774576, 0x656e7430, 0x00000000, 0x00000000,
5222 0x00000000, 0x00000000, 0x66617461, 0x6c457272, 0x00000000, 0x00000000,
5226 static u32 tg3TsoFwData
[] = {
5227 0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x362e3000, 0x00000000,
5228 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
5232 /* 5705 needs a special version of the TSO firmware. */
5233 #define TG3_TSO5_FW_RELEASE_MAJOR 0x1
5234 #define TG3_TSO5_FW_RELASE_MINOR 0x2
5235 #define TG3_TSO5_FW_RELEASE_FIX 0x0
5236 #define TG3_TSO5_FW_START_ADDR 0x00010000
5237 #define TG3_TSO5_FW_TEXT_ADDR 0x00010000
5238 #define TG3_TSO5_FW_TEXT_LEN 0xe90
5239 #define TG3_TSO5_FW_RODATA_ADDR 0x00010e90
5240 #define TG3_TSO5_FW_RODATA_LEN 0x50
5241 #define TG3_TSO5_FW_DATA_ADDR 0x00010f00
5242 #define TG3_TSO5_FW_DATA_LEN 0x20
5243 #define TG3_TSO5_FW_SBSS_ADDR 0x00010f20
5244 #define TG3_TSO5_FW_SBSS_LEN 0x28
5245 #define TG3_TSO5_FW_BSS_ADDR 0x00010f50
5246 #define TG3_TSO5_FW_BSS_LEN 0x88
5248 static u32 tg3Tso5FwText
[(TG3_TSO5_FW_TEXT_LEN
/ 4) + 1] = {
5249 0x0c004003, 0x00000000, 0x00010f04, 0x00000000, 0x10000003, 0x00000000,
5250 0x0000000d, 0x0000000d, 0x3c1d0001, 0x37bde000, 0x03a0f021, 0x3c100001,
5251 0x26100000, 0x0c004010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
5252 0xafbf0018, 0x0c0042e8, 0x34840002, 0x0c004364, 0x00000000, 0x3c030001,
5253 0x90630f34, 0x24020002, 0x3c040001, 0x24840e9c, 0x14620003, 0x24050001,
5254 0x3c040001, 0x24840e90, 0x24060002, 0x00003821, 0xafa00010, 0x0c004378,
5255 0xafa00014, 0x0c00402c, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5256 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c, 0xafb20018, 0xafb10014,
5257 0x0c0042d4, 0xafb00010, 0x3c128000, 0x24110001, 0x8f706810, 0x32020400,
5258 0x10400007, 0x00000000, 0x8f641008, 0x00921024, 0x14400003, 0x00000000,
5259 0x0c004064, 0x00000000, 0x3c020001, 0x90420f56, 0x10510003, 0x32020200,
5260 0x1040fff1, 0x00000000, 0x0c0041b4, 0x00000000, 0x08004034, 0x00000000,
5261 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020,
5262 0x27bdffe0, 0x3c040001, 0x24840eb0, 0x00002821, 0x00003021, 0x00003821,
5263 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130,
5264 0xaf625000, 0x3c010001, 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018,
5265 0x03e00008, 0x27bd0020, 0x00000000, 0x00000000, 0x3c030001, 0x24630f60,
5266 0x90620000, 0x27bdfff0, 0x14400003, 0x0080c021, 0x08004073, 0x00004821,
5267 0x3c022000, 0x03021024, 0x10400003, 0x24090002, 0x08004073, 0xa0600000,
5268 0x24090001, 0x00181040, 0x30431f80, 0x346f8008, 0x1520004b, 0x25eb0028,
5269 0x3c040001, 0x00832021, 0x8c848010, 0x3c050001, 0x24a50f7a, 0x00041402,
5270 0xa0a20000, 0x3c010001, 0xa0240f7b, 0x3c020001, 0x00431021, 0x94428014,
5271 0x3c010001, 0xa0220f7c, 0x3c0c0001, 0x01836021, 0x8d8c8018, 0x304200ff,
5272 0x24420008, 0x000220c3, 0x24020001, 0x3c010001, 0xa0220f60, 0x0124102b,
5273 0x1040000c, 0x00003821, 0x24a6000e, 0x01602821, 0x8ca20000, 0x8ca30004,
5274 0x24a50008, 0x24e70001, 0xacc20000, 0xacc30004, 0x00e4102b, 0x1440fff8,
5275 0x24c60008, 0x00003821, 0x3c080001, 0x25080f7b, 0x91060000, 0x3c020001,
5276 0x90420f7c, 0x2503000d, 0x00c32821, 0x00461023, 0x00021fc2, 0x00431021,
5277 0x00021043, 0x1840000c, 0x00002021, 0x91020001, 0x00461023, 0x00021fc2,
5278 0x00431021, 0x00021843, 0x94a20000, 0x24e70001, 0x00822021, 0x00e3102a,
5279 0x1440fffb, 0x24a50002, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
5280 0x00822021, 0x3c02ffff, 0x01821024, 0x3083ffff, 0x00431025, 0x3c010001,
5281 0x080040fa, 0xac220f80, 0x3c050001, 0x24a50f7c, 0x90a20000, 0x3c0c0001,
5282 0x01836021, 0x8d8c8018, 0x000220c2, 0x1080000e, 0x00003821, 0x01603021,
5283 0x24a5000c, 0x8ca20000, 0x8ca30004, 0x24a50008, 0x24e70001, 0xacc20000,
5284 0xacc30004, 0x00e4102b, 0x1440fff8, 0x24c60008, 0x3c050001, 0x24a50f7c,
5285 0x90a20000, 0x30430007, 0x24020004, 0x10620011, 0x28620005, 0x10400005,
5286 0x24020002, 0x10620008, 0x000710c0, 0x080040fa, 0x00000000, 0x24020006,
5287 0x1062000e, 0x000710c0, 0x080040fa, 0x00000000, 0x00a21821, 0x9463000c,
5288 0x004b1021, 0x080040fa, 0xa4430000, 0x000710c0, 0x00a21821, 0x8c63000c,
5289 0x004b1021, 0x080040fa, 0xac430000, 0x00a21821, 0x8c63000c, 0x004b2021,
5290 0x00a21021, 0xac830000, 0x94420010, 0xa4820004, 0x95e70006, 0x3c020001,
5291 0x90420f7c, 0x3c030001, 0x90630f7a, 0x00e2c823, 0x3c020001, 0x90420f7b,
5292 0x24630028, 0x01e34021, 0x24420028, 0x15200012, 0x01e23021, 0x94c2000c,
5293 0x3c010001, 0xa4220f78, 0x94c20004, 0x94c30006, 0x3c010001, 0xa4200f76,
5294 0x3c010001, 0xa4200f72, 0x00021400, 0x00431025, 0x3c010001, 0xac220f6c,
5295 0x95020004, 0x3c010001, 0x08004124, 0xa4220f70, 0x3c020001, 0x94420f70,
5296 0x3c030001, 0x94630f72, 0x00431021, 0xa5020004, 0x3c020001, 0x94420f6c,
5297 0xa4c20004, 0x3c020001, 0x8c420f6c, 0xa4c20006, 0x3c040001, 0x94840f72,
5298 0x3c020001, 0x94420f70, 0x3c0a0001, 0x954a0f76, 0x00441821, 0x3063ffff,
5299 0x0062182a, 0x24020002, 0x1122000b, 0x00832023, 0x3c030001, 0x94630f78,
5300 0x30620009, 0x10400006, 0x3062fff6, 0xa4c2000c, 0x3c020001, 0x94420f78,
5301 0x30420009, 0x01425023, 0x24020001, 0x1122001b, 0x29220002, 0x50400005,
5302 0x24020002, 0x11200007, 0x31a2ffff, 0x08004197, 0x00000000, 0x1122001d,
5303 0x24020016, 0x08004197, 0x31a2ffff, 0x3c0e0001, 0x95ce0f80, 0x10800005,
5304 0x01806821, 0x01c42021, 0x00041c02, 0x3082ffff, 0x00627021, 0x000e1027,
5305 0xa502000a, 0x3c030001, 0x90630f7b, 0x31a2ffff, 0x00e21021, 0x0800418d,
5306 0x00432023, 0x3c020001, 0x94420f80, 0x00442021, 0x00041c02, 0x3082ffff,
5307 0x00622021, 0x00807021, 0x00041027, 0x08004185, 0xa502000a, 0x3c050001,
5308 0x24a50f7a, 0x90a30000, 0x14620002, 0x24e2fff2, 0xa5e20034, 0x90a20000,
5309 0x00e21023, 0xa5020002, 0x3c030001, 0x94630f80, 0x3c020001, 0x94420f5a,
5310 0x30e5ffff, 0x00641821, 0x00451023, 0x00622023, 0x00041c02, 0x3082ffff,
5311 0x00622021, 0x00041027, 0xa502000a, 0x3c030001, 0x90630f7c, 0x24620001,
5312 0x14a20005, 0x00807021, 0x01631021, 0x90420000, 0x08004185, 0x00026200,
5313 0x24620002, 0x14a20003, 0x306200fe, 0x004b1021, 0x944c0000, 0x3c020001,
5314 0x94420f82, 0x3183ffff, 0x3c040001, 0x90840f7b, 0x00431021, 0x00e21021,
5315 0x00442023, 0x008a2021, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
5316 0x00822021, 0x00806821, 0x00041027, 0xa4c20010, 0x31a2ffff, 0x000e1c00,
5317 0x00431025, 0x3c040001, 0x24840f72, 0xade20010, 0x94820000, 0x3c050001,
5318 0x94a50f76, 0x3c030001, 0x8c630f6c, 0x24420001, 0x00b92821, 0xa4820000,
5319 0x3322ffff, 0x00622021, 0x0083182b, 0x3c010001, 0xa4250f76, 0x10600003,
5320 0x24a2ffff, 0x3c010001, 0xa4220f76, 0x3c024000, 0x03021025, 0x3c010001,
5321 0xac240f6c, 0xaf621008, 0x03e00008, 0x27bd0010, 0x3c030001, 0x90630f56,
5322 0x27bdffe8, 0x24020001, 0xafbf0014, 0x10620026, 0xafb00010, 0x8f620cf4,
5323 0x2442ffff, 0x3042007f, 0x00021100, 0x8c434000, 0x3c010001, 0xac230f64,
5324 0x8c434008, 0x24444000, 0x8c5c4004, 0x30620040, 0x14400002, 0x24020088,
5325 0x24020008, 0x3c010001, 0xa4220f68, 0x30620004, 0x10400005, 0x24020001,
5326 0x3c010001, 0xa0220f57, 0x080041d5, 0x00031402, 0x3c010001, 0xa0200f57,
5327 0x00031402, 0x3c010001, 0xa4220f54, 0x9483000c, 0x24020001, 0x3c010001,
5328 0xa4200f50, 0x3c010001, 0xa0220f56, 0x3c010001, 0xa4230f62, 0x24020001,
5329 0x1342001e, 0x00000000, 0x13400005, 0x24020003, 0x13420067, 0x00000000,
5330 0x080042cf, 0x00000000, 0x3c020001, 0x94420f62, 0x241a0001, 0x3c010001,
5331 0xa4200f5e, 0x3c010001, 0xa4200f52, 0x304407ff, 0x00021bc2, 0x00031823,
5332 0x3063003e, 0x34630036, 0x00021242, 0x3042003c, 0x00621821, 0x3c010001,
5333 0xa4240f58, 0x00832021, 0x24630030, 0x3c010001, 0xa4240f5a, 0x3c010001,
5334 0xa4230f5c, 0x3c060001, 0x24c60f52, 0x94c50000, 0x94c30002, 0x3c040001,
5335 0x94840f5a, 0x00651021, 0x0044102a, 0x10400013, 0x3c108000, 0x00a31021,
5336 0xa4c20000, 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008,
5337 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4,
5338 0x00501024, 0x104000b7, 0x00000000, 0x0800420f, 0x00000000, 0x3c030001,
5339 0x94630f50, 0x00851023, 0xa4c40000, 0x00621821, 0x3042ffff, 0x3c010001,
5340 0xa4230f50, 0xaf620ce8, 0x3c020001, 0x94420f68, 0x34420024, 0xaf620cec,
5341 0x94c30002, 0x3c020001, 0x94420f50, 0x14620012, 0x3c028000, 0x3c108000,
5342 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008, 0x00901024,
5343 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024,
5344 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003, 0xaf620cf4, 0x3c108000,
5345 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000,
5346 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003,
5347 0x3c070001, 0x24e70f50, 0x94e20000, 0x03821021, 0xaf620ce0, 0x3c020001,
5348 0x8c420f64, 0xaf620ce4, 0x3c050001, 0x94a50f54, 0x94e30000, 0x3c040001,
5349 0x94840f58, 0x3c020001, 0x94420f5e, 0x00a32823, 0x00822023, 0x30a6ffff,
5350 0x3083ffff, 0x00c3102b, 0x14400043, 0x00000000, 0x3c020001, 0x94420f5c,
5351 0x00021400, 0x00621025, 0xaf620ce8, 0x94e20000, 0x3c030001, 0x94630f54,
5352 0x00441021, 0xa4e20000, 0x3042ffff, 0x14430021, 0x3c020008, 0x3c020001,
5353 0x90420f57, 0x10400006, 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624,
5354 0x0800427c, 0x0000d021, 0x3c020001, 0x94420f68, 0x3c030008, 0x34630624,
5355 0x00431025, 0xaf620cec, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
5356 0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
5357 0x00000000, 0x8f620cf4, 0x00501024, 0x10400015, 0x00000000, 0x08004283,
5358 0x00000000, 0x3c030001, 0x94630f68, 0x34420624, 0x3c108000, 0x00621825,
5359 0x3c028000, 0xaf630cec, 0xaf620cf4, 0x8f641008, 0x00901024, 0x14400003,
5360 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7,
5361 0x00000000, 0x3c010001, 0x080042cf, 0xa4200f5e, 0x3c020001, 0x94420f5c,
5362 0x00021400, 0x00c21025, 0xaf620ce8, 0x3c020001, 0x90420f57, 0x10400009,
5363 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624, 0x0000d021, 0x00431025,
5364 0xaf620cec, 0x080042c1, 0x3c108000, 0x3c020001, 0x94420f68, 0x3c030008,
5365 0x34630604, 0x00431025, 0xaf620cec, 0x3c020001, 0x94420f5e, 0x00451021,
5366 0x3c010001, 0xa4220f5e, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
5367 0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
5368 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x8fbf0014,
5369 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000, 0x27bdffe0, 0x3c040001,
5370 0x24840ec0, 0x00002821, 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010,
5371 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130, 0xaf625000, 0x3c010001,
5372 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5373 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010, 0xaf60680c, 0x8f626804,
5374 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50, 0x3c010001, 0xac220f20,
5375 0x24020b78, 0x3c010001, 0xac220f30, 0x34630002, 0xaf634000, 0x0c004315,
5376 0x00808021, 0x3c010001, 0xa0220f34, 0x304200ff, 0x24030002, 0x14430005,
5377 0x00000000, 0x3c020001, 0x8c420f20, 0x08004308, 0xac5000c0, 0x3c020001,
5378 0x8c420f20, 0xac5000bc, 0x8f624434, 0x8f634438, 0x8f644410, 0x3c010001,
5379 0xac220f28, 0x3c010001, 0xac230f38, 0x3c010001, 0xac240f24, 0x8fbf0014,
5380 0x8fb00010, 0x03e00008, 0x27bd0018, 0x03e00008, 0x24020001, 0x27bdfff8,
5381 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe, 0x00000000,
5382 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008, 0x27bd0008,
5383 0x8f634450, 0x3c020001, 0x8c420f28, 0x00031c02, 0x0043102b, 0x14400008,
5384 0x3c038000, 0x3c040001, 0x8c840f38, 0x8f624450, 0x00021c02, 0x0083102b,
5385 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024, 0x1440fffd,
5386 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff, 0x2442e000,
5387 0x2c422001, 0x14400003, 0x3c024000, 0x08004347, 0x2402ffff, 0x00822025,
5388 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021, 0x03e00008,
5389 0x00000000, 0x8f624450, 0x3c030001, 0x8c630f24, 0x08004350, 0x3042ffff,
5390 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000, 0x03e00008,
5391 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040001, 0x24840ed0, 0x00003021,
5392 0x00003821, 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0800435f,
5393 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x3c020001, 0x3442d600,
5394 0x3c030001, 0x3463d600, 0x3c040001, 0x3484ddff, 0x3c010001, 0xac220f40,
5395 0x24020040, 0x3c010001, 0xac220f44, 0x3c010001, 0xac200f3c, 0xac600000,
5396 0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
5397 0x00804821, 0x8faa0010, 0x3c020001, 0x8c420f3c, 0x3c040001, 0x8c840f44,
5398 0x8fab0014, 0x24430001, 0x0044102b, 0x3c010001, 0xac230f3c, 0x14400003,
5399 0x00004021, 0x3c010001, 0xac200f3c, 0x3c020001, 0x8c420f3c, 0x3c030001,
5400 0x8c630f40, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
5401 0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020001, 0x8c420f3c,
5402 0x3c030001, 0x8c630f40, 0x8f64680c, 0x00021140, 0x00431021, 0xac440008,
5403 0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
5404 0x00000000, 0x00000000, 0x00000000,
5407 static u32 tg3Tso5FwRodata
[(TG3_TSO5_FW_RODATA_LEN
/ 4) + 1] = {
5408 0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
5409 0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000,
5410 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
5411 0x00000000, 0x00000000, 0x00000000,
5414 static u32 tg3Tso5FwData
[(TG3_TSO5_FW_DATA_LEN
/ 4) + 1] = {
5415 0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x322e3000, 0x00000000,
5416 0x00000000, 0x00000000, 0x00000000,
5419 /* tp->lock is held. */
5420 static int tg3_load_tso_firmware(struct tg3
*tp
)
5422 struct fw_info info
;
5423 unsigned long cpu_base
, cpu_scratch_base
, cpu_scratch_size
;
5426 if (tp
->tg3_flags2
& TG3_FLG2_HW_TSO
)
5429 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
) {
5430 info
.text_base
= TG3_TSO5_FW_TEXT_ADDR
;
5431 info
.text_len
= TG3_TSO5_FW_TEXT_LEN
;
5432 info
.text_data
= &tg3Tso5FwText
[0];
5433 info
.rodata_base
= TG3_TSO5_FW_RODATA_ADDR
;
5434 info
.rodata_len
= TG3_TSO5_FW_RODATA_LEN
;
5435 info
.rodata_data
= &tg3Tso5FwRodata
[0];
5436 info
.data_base
= TG3_TSO5_FW_DATA_ADDR
;
5437 info
.data_len
= TG3_TSO5_FW_DATA_LEN
;
5438 info
.data_data
= &tg3Tso5FwData
[0];
5439 cpu_base
= RX_CPU_BASE
;
5440 cpu_scratch_base
= NIC_SRAM_MBUF_POOL_BASE5705
;
5441 cpu_scratch_size
= (info
.text_len
+
5444 TG3_TSO5_FW_SBSS_LEN
+
5445 TG3_TSO5_FW_BSS_LEN
);
5447 info
.text_base
= TG3_TSO_FW_TEXT_ADDR
;
5448 info
.text_len
= TG3_TSO_FW_TEXT_LEN
;
5449 info
.text_data
= &tg3TsoFwText
[0];
5450 info
.rodata_base
= TG3_TSO_FW_RODATA_ADDR
;
5451 info
.rodata_len
= TG3_TSO_FW_RODATA_LEN
;
5452 info
.rodata_data
= &tg3TsoFwRodata
[0];
5453 info
.data_base
= TG3_TSO_FW_DATA_ADDR
;
5454 info
.data_len
= TG3_TSO_FW_DATA_LEN
;
5455 info
.data_data
= &tg3TsoFwData
[0];
5456 cpu_base
= TX_CPU_BASE
;
5457 cpu_scratch_base
= TX_CPU_SCRATCH_BASE
;
5458 cpu_scratch_size
= TX_CPU_SCRATCH_SIZE
;
5461 err
= tg3_load_firmware_cpu(tp
, cpu_base
,
5462 cpu_scratch_base
, cpu_scratch_size
,
5467 /* Now startup the cpu. */
5468 tw32(cpu_base
+ CPU_STATE
, 0xffffffff);
5469 tw32_f(cpu_base
+ CPU_PC
, info
.text_base
);
5471 for (i
= 0; i
< 5; i
++) {
5472 if (tr32(cpu_base
+ CPU_PC
) == info
.text_base
)
5474 tw32(cpu_base
+ CPU_STATE
, 0xffffffff);
5475 tw32(cpu_base
+ CPU_MODE
, CPU_MODE_HALT
);
5476 tw32_f(cpu_base
+ CPU_PC
, info
.text_base
);
5480 printk(KERN_ERR PFX
"tg3_load_tso_firmware fails for %s "
5481 "to set CPU PC, is %08x should be %08x\n",
5482 tp
->dev
->name
, tr32(cpu_base
+ CPU_PC
),
5486 tw32(cpu_base
+ CPU_STATE
, 0xffffffff);
5487 tw32_f(cpu_base
+ CPU_MODE
, 0x00000000);
5491 #endif /* TG3_TSO_SUPPORT != 0 */
5493 /* tp->lock is held. */
5494 static void __tg3_set_mac_addr(struct tg3
*tp
)
5496 u32 addr_high
, addr_low
;
5499 addr_high
= ((tp
->dev
->dev_addr
[0] << 8) |
5500 tp
->dev
->dev_addr
[1]);
5501 addr_low
= ((tp
->dev
->dev_addr
[2] << 24) |
5502 (tp
->dev
->dev_addr
[3] << 16) |
5503 (tp
->dev
->dev_addr
[4] << 8) |
5504 (tp
->dev
->dev_addr
[5] << 0));
5505 for (i
= 0; i
< 4; i
++) {
5506 tw32(MAC_ADDR_0_HIGH
+ (i
* 8), addr_high
);
5507 tw32(MAC_ADDR_0_LOW
+ (i
* 8), addr_low
);
5510 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5703
||
5511 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
) {
5512 for (i
= 0; i
< 12; i
++) {
5513 tw32(MAC_EXTADDR_0_HIGH
+ (i
* 8), addr_high
);
5514 tw32(MAC_EXTADDR_0_LOW
+ (i
* 8), addr_low
);
5518 addr_high
= (tp
->dev
->dev_addr
[0] +
5519 tp
->dev
->dev_addr
[1] +
5520 tp
->dev
->dev_addr
[2] +
5521 tp
->dev
->dev_addr
[3] +
5522 tp
->dev
->dev_addr
[4] +
5523 tp
->dev
->dev_addr
[5]) &
5524 TX_BACKOFF_SEED_MASK
;
5525 tw32(MAC_TX_BACKOFF_SEED
, addr_high
);
5528 static int tg3_set_mac_addr(struct net_device
*dev
, void *p
)
5530 struct tg3
*tp
= netdev_priv(dev
);
5531 struct sockaddr
*addr
= p
;
5533 if (!is_valid_ether_addr(addr
->sa_data
))
5536 memcpy(dev
->dev_addr
, addr
->sa_data
, dev
->addr_len
);
5538 spin_lock_bh(&tp
->lock
);
5539 __tg3_set_mac_addr(tp
);
5540 spin_unlock_bh(&tp
->lock
);
5545 /* tp->lock is held. */
5546 static void tg3_set_bdinfo(struct tg3
*tp
, u32 bdinfo_addr
,
5547 dma_addr_t mapping
, u32 maxlen_flags
,
5551 (bdinfo_addr
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_HIGH
),
5552 ((u64
) mapping
>> 32));
5554 (bdinfo_addr
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_LOW
),
5555 ((u64
) mapping
& 0xffffffff));
5557 (bdinfo_addr
+ TG3_BDINFO_MAXLEN_FLAGS
),
5560 if (!(tp
->tg3_flags2
& TG3_FLG2_5705_PLUS
))
5562 (bdinfo_addr
+ TG3_BDINFO_NIC_ADDR
),
5566 static void __tg3_set_rx_mode(struct net_device
*);
5567 static void __tg3_set_coalesce(struct tg3
*tp
, struct ethtool_coalesce
*ec
)
5569 tw32(HOSTCC_RXCOL_TICKS
, ec
->rx_coalesce_usecs
);
5570 tw32(HOSTCC_TXCOL_TICKS
, ec
->tx_coalesce_usecs
);
5571 tw32(HOSTCC_RXMAX_FRAMES
, ec
->rx_max_coalesced_frames
);
5572 tw32(HOSTCC_TXMAX_FRAMES
, ec
->tx_max_coalesced_frames
);
5573 if (!(tp
->tg3_flags2
& TG3_FLG2_5705_PLUS
)) {
5574 tw32(HOSTCC_RXCOAL_TICK_INT
, ec
->rx_coalesce_usecs_irq
);
5575 tw32(HOSTCC_TXCOAL_TICK_INT
, ec
->tx_coalesce_usecs_irq
);
5577 tw32(HOSTCC_RXCOAL_MAXF_INT
, ec
->rx_max_coalesced_frames_irq
);
5578 tw32(HOSTCC_TXCOAL_MAXF_INT
, ec
->tx_max_coalesced_frames_irq
);
5579 if (!(tp
->tg3_flags2
& TG3_FLG2_5705_PLUS
)) {
5580 u32 val
= ec
->stats_block_coalesce_usecs
;
5582 if (!netif_carrier_ok(tp
->dev
))
5585 tw32(HOSTCC_STAT_COAL_TICKS
, val
);
5589 /* tp->lock is held. */
5590 static int tg3_reset_hw(struct tg3
*tp
)
5592 u32 val
, rdmac_mode
;
5595 tg3_disable_ints(tp
);
5599 tg3_write_sig_pre_reset(tp
, RESET_KIND_INIT
);
5601 if (tp
->tg3_flags
& TG3_FLAG_INIT_COMPLETE
) {
5602 tg3_abort_hw(tp
, 1);
5605 if (tp
->tg3_flags2
& TG3_FLG2_MII_SERDES
)
5608 err
= tg3_chip_reset(tp
);
5612 tg3_write_sig_legacy(tp
, RESET_KIND_INIT
);
5614 /* This works around an issue with Athlon chipsets on
5615 * B3 tigon3 silicon. This bit has no effect on any
5616 * other revision. But do not set this on PCI Express
5619 if (!(tp
->tg3_flags2
& TG3_FLG2_PCI_EXPRESS
))
5620 tp
->pci_clock_ctrl
|= CLOCK_CTRL_DELAY_PCI_GRANT
;
5621 tw32_f(TG3PCI_CLOCK_CTRL
, tp
->pci_clock_ctrl
);
5623 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5704_A0
&&
5624 (tp
->tg3_flags
& TG3_FLAG_PCIX_MODE
)) {
5625 val
= tr32(TG3PCI_PCISTATE
);
5626 val
|= PCISTATE_RETRY_SAME_DMA
;
5627 tw32(TG3PCI_PCISTATE
, val
);
5630 if (GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5704_BX
) {
5631 /* Enable some hw fixes. */
5632 val
= tr32(TG3PCI_MSI_DATA
);
5633 val
|= (1 << 26) | (1 << 28) | (1 << 29);
5634 tw32(TG3PCI_MSI_DATA
, val
);
5637 /* Descriptor ring init may make accesses to the
5638 * NIC SRAM area to setup the TX descriptors, so we
5639 * can only do this after the hardware has been
5640 * successfully reset.
5644 /* This value is determined during the probe time DMA
5645 * engine test, tg3_test_dma.
5647 tw32(TG3PCI_DMA_RW_CTRL
, tp
->dma_rwctrl
);
5649 tp
->grc_mode
&= ~(GRC_MODE_HOST_SENDBDS
|
5650 GRC_MODE_4X_NIC_SEND_RINGS
|
5651 GRC_MODE_NO_TX_PHDR_CSUM
|
5652 GRC_MODE_NO_RX_PHDR_CSUM
);
5653 tp
->grc_mode
|= GRC_MODE_HOST_SENDBDS
;
5654 if (tp
->tg3_flags
& TG3_FLAG_NO_TX_PSEUDO_CSUM
)
5655 tp
->grc_mode
|= GRC_MODE_NO_TX_PHDR_CSUM
;
5656 if (tp
->tg3_flags
& TG3_FLAG_NO_RX_PSEUDO_CSUM
)
5657 tp
->grc_mode
|= GRC_MODE_NO_RX_PHDR_CSUM
;
5661 (GRC_MODE_IRQ_ON_MAC_ATTN
| GRC_MODE_HOST_STACKUP
));
5663 /* Setup the timer prescalar register. Clock is always 66Mhz. */
5664 val
= tr32(GRC_MISC_CFG
);
5666 val
|= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT
);
5667 tw32(GRC_MISC_CFG
, val
);
5669 /* Initialize MBUF/DESC pool. */
5670 if (tp
->tg3_flags2
& TG3_FLG2_5750_PLUS
) {
5672 } else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5705
) {
5673 tw32(BUFMGR_MB_POOL_ADDR
, NIC_SRAM_MBUF_POOL_BASE
);
5674 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
)
5675 tw32(BUFMGR_MB_POOL_SIZE
, NIC_SRAM_MBUF_POOL_SIZE64
);
5677 tw32(BUFMGR_MB_POOL_SIZE
, NIC_SRAM_MBUF_POOL_SIZE96
);
5678 tw32(BUFMGR_DMA_DESC_POOL_ADDR
, NIC_SRAM_DMA_DESC_POOL_BASE
);
5679 tw32(BUFMGR_DMA_DESC_POOL_SIZE
, NIC_SRAM_DMA_DESC_POOL_SIZE
);
5681 #if TG3_TSO_SUPPORT != 0
5682 else if (tp
->tg3_flags2
& TG3_FLG2_TSO_CAPABLE
) {
5685 fw_len
= (TG3_TSO5_FW_TEXT_LEN
+
5686 TG3_TSO5_FW_RODATA_LEN
+
5687 TG3_TSO5_FW_DATA_LEN
+
5688 TG3_TSO5_FW_SBSS_LEN
+
5689 TG3_TSO5_FW_BSS_LEN
);
5690 fw_len
= (fw_len
+ (0x80 - 1)) & ~(0x80 - 1);
5691 tw32(BUFMGR_MB_POOL_ADDR
,
5692 NIC_SRAM_MBUF_POOL_BASE5705
+ fw_len
);
5693 tw32(BUFMGR_MB_POOL_SIZE
,
5694 NIC_SRAM_MBUF_POOL_SIZE5705
- fw_len
- 0xa00);
5698 if (tp
->dev
->mtu
<= ETH_DATA_LEN
) {
5699 tw32(BUFMGR_MB_RDMA_LOW_WATER
,
5700 tp
->bufmgr_config
.mbuf_read_dma_low_water
);
5701 tw32(BUFMGR_MB_MACRX_LOW_WATER
,
5702 tp
->bufmgr_config
.mbuf_mac_rx_low_water
);
5703 tw32(BUFMGR_MB_HIGH_WATER
,
5704 tp
->bufmgr_config
.mbuf_high_water
);
5706 tw32(BUFMGR_MB_RDMA_LOW_WATER
,
5707 tp
->bufmgr_config
.mbuf_read_dma_low_water_jumbo
);
5708 tw32(BUFMGR_MB_MACRX_LOW_WATER
,
5709 tp
->bufmgr_config
.mbuf_mac_rx_low_water_jumbo
);
5710 tw32(BUFMGR_MB_HIGH_WATER
,
5711 tp
->bufmgr_config
.mbuf_high_water_jumbo
);
5713 tw32(BUFMGR_DMA_LOW_WATER
,
5714 tp
->bufmgr_config
.dma_low_water
);
5715 tw32(BUFMGR_DMA_HIGH_WATER
,
5716 tp
->bufmgr_config
.dma_high_water
);
5718 tw32(BUFMGR_MODE
, BUFMGR_MODE_ENABLE
| BUFMGR_MODE_ATTN_ENABLE
);
5719 for (i
= 0; i
< 2000; i
++) {
5720 if (tr32(BUFMGR_MODE
) & BUFMGR_MODE_ENABLE
)
5725 printk(KERN_ERR PFX
"tg3_reset_hw cannot enable BUFMGR for %s.\n",
5730 /* Setup replenish threshold. */
5731 tw32(RCVBDI_STD_THRESH
, tp
->rx_pending
/ 8);
5733 /* Initialize TG3_BDINFO's at:
5734 * RCVDBDI_STD_BD: standard eth size rx ring
5735 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
5736 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
5739 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
5740 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
5741 * ring attribute flags
5742 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
5744 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
5745 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
5747 * The size of each ring is fixed in the firmware, but the location is
5750 tw32(RCVDBDI_STD_BD
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_HIGH
,
5751 ((u64
) tp
->rx_std_mapping
>> 32));
5752 tw32(RCVDBDI_STD_BD
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_LOW
,
5753 ((u64
) tp
->rx_std_mapping
& 0xffffffff));
5754 tw32(RCVDBDI_STD_BD
+ TG3_BDINFO_NIC_ADDR
,
5755 NIC_SRAM_RX_BUFFER_DESC
);
5757 /* Don't even try to program the JUMBO/MINI buffer descriptor
5760 if (tp
->tg3_flags2
& TG3_FLG2_5705_PLUS
) {
5761 tw32(RCVDBDI_STD_BD
+ TG3_BDINFO_MAXLEN_FLAGS
,
5762 RX_STD_MAX_SIZE_5705
<< BDINFO_FLAGS_MAXLEN_SHIFT
);
5764 tw32(RCVDBDI_STD_BD
+ TG3_BDINFO_MAXLEN_FLAGS
,
5765 RX_STD_MAX_SIZE
<< BDINFO_FLAGS_MAXLEN_SHIFT
);
5767 tw32(RCVDBDI_MINI_BD
+ TG3_BDINFO_MAXLEN_FLAGS
,
5768 BDINFO_FLAGS_DISABLED
);
5770 /* Setup replenish threshold. */
5771 tw32(RCVBDI_JUMBO_THRESH
, tp
->rx_jumbo_pending
/ 8);
5773 if (tp
->tg3_flags
& TG3_FLAG_JUMBO_RING_ENABLE
) {
5774 tw32(RCVDBDI_JUMBO_BD
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_HIGH
,
5775 ((u64
) tp
->rx_jumbo_mapping
>> 32));
5776 tw32(RCVDBDI_JUMBO_BD
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_LOW
,
5777 ((u64
) tp
->rx_jumbo_mapping
& 0xffffffff));
5778 tw32(RCVDBDI_JUMBO_BD
+ TG3_BDINFO_MAXLEN_FLAGS
,
5779 RX_JUMBO_MAX_SIZE
<< BDINFO_FLAGS_MAXLEN_SHIFT
);
5780 tw32(RCVDBDI_JUMBO_BD
+ TG3_BDINFO_NIC_ADDR
,
5781 NIC_SRAM_RX_JUMBO_BUFFER_DESC
);
5783 tw32(RCVDBDI_JUMBO_BD
+ TG3_BDINFO_MAXLEN_FLAGS
,
5784 BDINFO_FLAGS_DISABLED
);
5789 /* There is only one send ring on 5705/5750, no need to explicitly
5790 * disable the others.
5792 if (!(tp
->tg3_flags2
& TG3_FLG2_5705_PLUS
)) {
5793 /* Clear out send RCB ring in SRAM. */
5794 for (i
= NIC_SRAM_SEND_RCB
; i
< NIC_SRAM_RCV_RET_RCB
; i
+= TG3_BDINFO_SIZE
)
5795 tg3_write_mem(tp
, i
+ TG3_BDINFO_MAXLEN_FLAGS
,
5796 BDINFO_FLAGS_DISABLED
);
5801 tw32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0
+ TG3_64BIT_REG_LOW
, 0);
5802 tw32_tx_mbox(MAILBOX_SNDNIC_PROD_IDX_0
+ TG3_64BIT_REG_LOW
, 0);
5804 tg3_set_bdinfo(tp
, NIC_SRAM_SEND_RCB
,
5805 tp
->tx_desc_mapping
,
5806 (TG3_TX_RING_SIZE
<<
5807 BDINFO_FLAGS_MAXLEN_SHIFT
),
5808 NIC_SRAM_TX_BUFFER_DESC
);
5810 /* There is only one receive return ring on 5705/5750, no need
5811 * to explicitly disable the others.
5813 if (!(tp
->tg3_flags2
& TG3_FLG2_5705_PLUS
)) {
5814 for (i
= NIC_SRAM_RCV_RET_RCB
; i
< NIC_SRAM_STATS_BLK
;
5815 i
+= TG3_BDINFO_SIZE
) {
5816 tg3_write_mem(tp
, i
+ TG3_BDINFO_MAXLEN_FLAGS
,
5817 BDINFO_FLAGS_DISABLED
);
5822 tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0
+ TG3_64BIT_REG_LOW
, 0);
5824 tg3_set_bdinfo(tp
, NIC_SRAM_RCV_RET_RCB
,
5826 (TG3_RX_RCB_RING_SIZE(tp
) <<
5827 BDINFO_FLAGS_MAXLEN_SHIFT
),
5830 tp
->rx_std_ptr
= tp
->rx_pending
;
5831 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX
+ TG3_64BIT_REG_LOW
,
5834 tp
->rx_jumbo_ptr
= (tp
->tg3_flags
& TG3_FLAG_JUMBO_RING_ENABLE
) ?
5835 tp
->rx_jumbo_pending
: 0;
5836 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX
+ TG3_64BIT_REG_LOW
,
5839 /* Initialize MAC address and backoff seed. */
5840 __tg3_set_mac_addr(tp
);
5842 /* MTU + ethernet header + FCS + optional VLAN tag */
5843 tw32(MAC_RX_MTU_SIZE
, tp
->dev
->mtu
+ ETH_HLEN
+ 8);
5845 /* The slot time is changed by tg3_setup_phy if we
5846 * run at gigabit with half duplex.
5848 tw32(MAC_TX_LENGTHS
,
5849 (2 << TX_LENGTHS_IPG_CRS_SHIFT
) |
5850 (6 << TX_LENGTHS_IPG_SHIFT
) |
5851 (32 << TX_LENGTHS_SLOT_TIME_SHIFT
));
5853 /* Receive rules. */
5854 tw32(MAC_RCV_RULE_CFG
, RCV_RULE_CFG_DEFAULT_CLASS
);
5855 tw32(RCVLPC_CONFIG
, 0x0181);
5857 /* Calculate RDMAC_MODE setting early, we need it to determine
5858 * the RCVLPC_STATE_ENABLE mask.
5860 rdmac_mode
= (RDMAC_MODE_ENABLE
| RDMAC_MODE_TGTABORT_ENAB
|
5861 RDMAC_MODE_MSTABORT_ENAB
| RDMAC_MODE_PARITYERR_ENAB
|
5862 RDMAC_MODE_ADDROFLOW_ENAB
| RDMAC_MODE_FIFOOFLOW_ENAB
|
5863 RDMAC_MODE_FIFOURUN_ENAB
| RDMAC_MODE_FIFOOREAD_ENAB
|
5864 RDMAC_MODE_LNGREAD_ENAB
);
5865 if (tp
->tg3_flags
& TG3_FLAG_SPLIT_MODE
)
5866 rdmac_mode
|= RDMAC_MODE_SPLIT_ENABLE
;
5868 /* If statement applies to 5705 and 5750 PCI devices only */
5869 if ((GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
&&
5870 tp
->pci_chip_rev_id
!= CHIPREV_ID_5705_A0
) ||
5871 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5750
)) {
5872 if (tp
->tg3_flags2
& TG3_FLG2_TSO_CAPABLE
&&
5873 (tp
->pci_chip_rev_id
== CHIPREV_ID_5705_A1
||
5874 tp
->pci_chip_rev_id
== CHIPREV_ID_5705_A2
)) {
5875 rdmac_mode
|= RDMAC_MODE_FIFO_SIZE_128
;
5876 } else if (!(tr32(TG3PCI_PCISTATE
) & PCISTATE_BUS_SPEED_HIGH
) &&
5877 !(tp
->tg3_flags2
& TG3_FLG2_IS_5788
)) {
5878 rdmac_mode
|= RDMAC_MODE_FIFO_LONG_BURST
;
5882 if (tp
->tg3_flags2
& TG3_FLG2_PCI_EXPRESS
)
5883 rdmac_mode
|= RDMAC_MODE_FIFO_LONG_BURST
;
5885 #if TG3_TSO_SUPPORT != 0
5886 if (tp
->tg3_flags2
& TG3_FLG2_HW_TSO
)
5887 rdmac_mode
|= (1 << 27);
5890 /* Receive/send statistics. */
5891 if ((rdmac_mode
& RDMAC_MODE_FIFO_SIZE_128
) &&
5892 (tp
->tg3_flags2
& TG3_FLG2_TSO_CAPABLE
)) {
5893 val
= tr32(RCVLPC_STATS_ENABLE
);
5894 val
&= ~RCVLPC_STATSENAB_LNGBRST_RFIX
;
5895 tw32(RCVLPC_STATS_ENABLE
, val
);
5897 tw32(RCVLPC_STATS_ENABLE
, 0xffffff);
5899 tw32(RCVLPC_STATSCTRL
, RCVLPC_STATSCTRL_ENABLE
);
5900 tw32(SNDDATAI_STATSENAB
, 0xffffff);
5901 tw32(SNDDATAI_STATSCTRL
,
5902 (SNDDATAI_SCTRL_ENABLE
|
5903 SNDDATAI_SCTRL_FASTUPD
));
5905 /* Setup host coalescing engine. */
5906 tw32(HOSTCC_MODE
, 0);
5907 for (i
= 0; i
< 2000; i
++) {
5908 if (!(tr32(HOSTCC_MODE
) & HOSTCC_MODE_ENABLE
))
5913 __tg3_set_coalesce(tp
, &tp
->coal
);
5915 /* set status block DMA address */
5916 tw32(HOSTCC_STATUS_BLK_HOST_ADDR
+ TG3_64BIT_REG_HIGH
,
5917 ((u64
) tp
->status_mapping
>> 32));
5918 tw32(HOSTCC_STATUS_BLK_HOST_ADDR
+ TG3_64BIT_REG_LOW
,
5919 ((u64
) tp
->status_mapping
& 0xffffffff));
5921 if (!(tp
->tg3_flags2
& TG3_FLG2_5705_PLUS
)) {
5922 /* Status/statistics block address. See tg3_timer,
5923 * the tg3_periodic_fetch_stats call there, and
5924 * tg3_get_stats to see how this works for 5705/5750 chips.
5926 tw32(HOSTCC_STATS_BLK_HOST_ADDR
+ TG3_64BIT_REG_HIGH
,
5927 ((u64
) tp
->stats_mapping
>> 32));
5928 tw32(HOSTCC_STATS_BLK_HOST_ADDR
+ TG3_64BIT_REG_LOW
,
5929 ((u64
) tp
->stats_mapping
& 0xffffffff));
5930 tw32(HOSTCC_STATS_BLK_NIC_ADDR
, NIC_SRAM_STATS_BLK
);
5931 tw32(HOSTCC_STATUS_BLK_NIC_ADDR
, NIC_SRAM_STATUS_BLK
);
5934 tw32(HOSTCC_MODE
, HOSTCC_MODE_ENABLE
| tp
->coalesce_mode
);
5936 tw32(RCVCC_MODE
, RCVCC_MODE_ENABLE
| RCVCC_MODE_ATTN_ENABLE
);
5937 tw32(RCVLPC_MODE
, RCVLPC_MODE_ENABLE
);
5938 if (!(tp
->tg3_flags2
& TG3_FLG2_5705_PLUS
))
5939 tw32(RCVLSC_MODE
, RCVLSC_MODE_ENABLE
| RCVLSC_MODE_ATTN_ENABLE
);
5941 /* Clear statistics/status block in chip, and status block in ram. */
5942 for (i
= NIC_SRAM_STATS_BLK
;
5943 i
< NIC_SRAM_STATUS_BLK
+ TG3_HW_STATUS_SIZE
;
5945 tg3_write_mem(tp
, i
, 0);
5948 memset(tp
->hw_status
, 0, TG3_HW_STATUS_SIZE
);
5950 if (tp
->tg3_flags2
& TG3_FLG2_MII_SERDES
) {
5951 tp
->tg3_flags2
&= ~TG3_FLG2_PARALLEL_DETECT
;
5952 /* reset to prevent losing 1st rx packet intermittently */
5953 tw32_f(MAC_RX_MODE
, RX_MODE_RESET
);
5957 tp
->mac_mode
= MAC_MODE_TXSTAT_ENABLE
| MAC_MODE_RXSTAT_ENABLE
|
5958 MAC_MODE_TDE_ENABLE
| MAC_MODE_RDE_ENABLE
| MAC_MODE_FHDE_ENABLE
;
5959 tw32_f(MAC_MODE
, tp
->mac_mode
| MAC_MODE_RXSTAT_CLEAR
| MAC_MODE_TXSTAT_CLEAR
);
5962 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
5963 * If TG3_FLAG_EEPROM_WRITE_PROT is set, we should read the
5964 * register to preserve the GPIO settings for LOMs. The GPIOs,
5965 * whether used as inputs or outputs, are set by boot code after
5968 if (tp
->tg3_flags
& TG3_FLAG_EEPROM_WRITE_PROT
) {
5971 gpio_mask
= GRC_LCLCTRL_GPIO_OE0
| GRC_LCLCTRL_GPIO_OE2
|
5972 GRC_LCLCTRL_GPIO_OUTPUT0
| GRC_LCLCTRL_GPIO_OUTPUT2
;
5974 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5752
)
5975 gpio_mask
|= GRC_LCLCTRL_GPIO_OE3
|
5976 GRC_LCLCTRL_GPIO_OUTPUT3
;
5978 tp
->grc_local_ctrl
|= tr32(GRC_LOCAL_CTRL
) & gpio_mask
;
5980 /* GPIO1 must be driven high for eeprom write protect */
5981 tp
->grc_local_ctrl
|= (GRC_LCLCTRL_GPIO_OE1
|
5982 GRC_LCLCTRL_GPIO_OUTPUT1
);
5984 tw32_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
);
5987 tw32_mailbox_f(MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
, 0);
5990 if (!(tp
->tg3_flags2
& TG3_FLG2_5705_PLUS
)) {
5991 tw32_f(DMAC_MODE
, DMAC_MODE_ENABLE
);
5995 val
= (WDMAC_MODE_ENABLE
| WDMAC_MODE_TGTABORT_ENAB
|
5996 WDMAC_MODE_MSTABORT_ENAB
| WDMAC_MODE_PARITYERR_ENAB
|
5997 WDMAC_MODE_ADDROFLOW_ENAB
| WDMAC_MODE_FIFOOFLOW_ENAB
|
5998 WDMAC_MODE_FIFOURUN_ENAB
| WDMAC_MODE_FIFOOREAD_ENAB
|
5999 WDMAC_MODE_LNGREAD_ENAB
);
6001 /* If statement applies to 5705 and 5750 PCI devices only */
6002 if ((GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
&&
6003 tp
->pci_chip_rev_id
!= CHIPREV_ID_5705_A0
) ||
6004 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5750
) {
6005 if ((tp
->tg3_flags
& TG3_FLG2_TSO_CAPABLE
) &&
6006 (tp
->pci_chip_rev_id
== CHIPREV_ID_5705_A1
||
6007 tp
->pci_chip_rev_id
== CHIPREV_ID_5705_A2
)) {
6009 } else if (!(tr32(TG3PCI_PCISTATE
) & PCISTATE_BUS_SPEED_HIGH
) &&
6010 !(tp
->tg3_flags2
& TG3_FLG2_IS_5788
) &&
6011 !(tp
->tg3_flags2
& TG3_FLG2_PCI_EXPRESS
)) {
6012 val
|= WDMAC_MODE_RX_ACCEL
;
6016 tw32_f(WDMAC_MODE
, val
);
6019 if ((tp
->tg3_flags
& TG3_FLAG_PCIX_MODE
) != 0) {
6020 val
= tr32(TG3PCI_X_CAPS
);
6021 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5703
) {
6022 val
&= ~PCIX_CAPS_BURST_MASK
;
6023 val
|= (PCIX_CAPS_MAX_BURST_CPIOB
<< PCIX_CAPS_BURST_SHIFT
);
6024 } else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
) {
6025 val
&= ~(PCIX_CAPS_SPLIT_MASK
| PCIX_CAPS_BURST_MASK
);
6026 val
|= (PCIX_CAPS_MAX_BURST_CPIOB
<< PCIX_CAPS_BURST_SHIFT
);
6027 if (tp
->tg3_flags
& TG3_FLAG_SPLIT_MODE
)
6028 val
|= (tp
->split_mode_max_reqs
<<
6029 PCIX_CAPS_SPLIT_SHIFT
);
6031 tw32(TG3PCI_X_CAPS
, val
);
6034 tw32_f(RDMAC_MODE
, rdmac_mode
);
6037 tw32(RCVDCC_MODE
, RCVDCC_MODE_ENABLE
| RCVDCC_MODE_ATTN_ENABLE
);
6038 if (!(tp
->tg3_flags2
& TG3_FLG2_5705_PLUS
))
6039 tw32(MBFREE_MODE
, MBFREE_MODE_ENABLE
);
6040 tw32(SNDDATAC_MODE
, SNDDATAC_MODE_ENABLE
);
6041 tw32(SNDBDC_MODE
, SNDBDC_MODE_ENABLE
| SNDBDC_MODE_ATTN_ENABLE
);
6042 tw32(RCVBDI_MODE
, RCVBDI_MODE_ENABLE
| RCVBDI_MODE_RCB_ATTN_ENAB
);
6043 tw32(RCVDBDI_MODE
, RCVDBDI_MODE_ENABLE
| RCVDBDI_MODE_INV_RING_SZ
);
6044 tw32(SNDDATAI_MODE
, SNDDATAI_MODE_ENABLE
);
6045 #if TG3_TSO_SUPPORT != 0
6046 if (tp
->tg3_flags2
& TG3_FLG2_HW_TSO
)
6047 tw32(SNDDATAI_MODE
, SNDDATAI_MODE_ENABLE
| 0x8);
6049 tw32(SNDBDI_MODE
, SNDBDI_MODE_ENABLE
| SNDBDI_MODE_ATTN_ENABLE
);
6050 tw32(SNDBDS_MODE
, SNDBDS_MODE_ENABLE
| SNDBDS_MODE_ATTN_ENABLE
);
6052 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5701_A0
) {
6053 err
= tg3_load_5701_a0_firmware_fix(tp
);
6058 #if TG3_TSO_SUPPORT != 0
6059 if (tp
->tg3_flags2
& TG3_FLG2_TSO_CAPABLE
) {
6060 err
= tg3_load_tso_firmware(tp
);
6066 tp
->tx_mode
= TX_MODE_ENABLE
;
6067 tw32_f(MAC_TX_MODE
, tp
->tx_mode
);
6070 tp
->rx_mode
= RX_MODE_ENABLE
;
6071 tw32_f(MAC_RX_MODE
, tp
->rx_mode
);
6074 if (tp
->link_config
.phy_is_low_power
) {
6075 tp
->link_config
.phy_is_low_power
= 0;
6076 tp
->link_config
.speed
= tp
->link_config
.orig_speed
;
6077 tp
->link_config
.duplex
= tp
->link_config
.orig_duplex
;
6078 tp
->link_config
.autoneg
= tp
->link_config
.orig_autoneg
;
6081 tp
->mi_mode
= MAC_MI_MODE_BASE
;
6082 tw32_f(MAC_MI_MODE
, tp
->mi_mode
);
6085 tw32(MAC_LED_CTRL
, tp
->led_ctrl
);
6087 tw32(MAC_MI_STAT
, MAC_MI_STAT_LNKSTAT_ATTN_ENAB
);
6088 if (tp
->tg3_flags2
& TG3_FLG2_PHY_SERDES
) {
6089 tw32_f(MAC_RX_MODE
, RX_MODE_RESET
);
6092 tw32_f(MAC_RX_MODE
, tp
->rx_mode
);
6095 if (tp
->tg3_flags2
& TG3_FLG2_PHY_SERDES
) {
6096 if ((GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
) &&
6097 !(tp
->tg3_flags2
& TG3_FLG2_SERDES_PREEMPHASIS
)) {
6098 /* Set drive transmission level to 1.2V */
6099 /* only if the signal pre-emphasis bit is not set */
6100 val
= tr32(MAC_SERDES_CFG
);
6103 tw32(MAC_SERDES_CFG
, val
);
6105 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5703_A1
)
6106 tw32(MAC_SERDES_CFG
, 0x616000);
6109 /* Prevent chip from dropping frames when flow control
6112 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME
, 2);
6114 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
&&
6115 (tp
->tg3_flags2
& TG3_FLG2_PHY_SERDES
)) {
6116 /* Use hardware link auto-negotiation */
6117 tp
->tg3_flags2
|= TG3_FLG2_HW_AUTONEG
;
6120 if ((tp
->tg3_flags2
& TG3_FLG2_MII_SERDES
) &&
6121 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5714
)) {
6124 tmp
= tr32(SERDES_RX_CTRL
);
6125 tw32(SERDES_RX_CTRL
, tmp
| SERDES_RX_SIG_DETECT
);
6126 tp
->grc_local_ctrl
&= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT
;
6127 tp
->grc_local_ctrl
|= GRC_LCLCTRL_USE_SIG_DETECT
;
6128 tw32(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
);
6131 err
= tg3_setup_phy(tp
, 1);
6135 if (!(tp
->tg3_flags2
& TG3_FLG2_PHY_SERDES
)) {
6138 /* Clear CRC stats. */
6139 if (!tg3_readphy(tp
, 0x1e, &tmp
)) {
6140 tg3_writephy(tp
, 0x1e, tmp
| 0x8000);
6141 tg3_readphy(tp
, 0x14, &tmp
);
6145 __tg3_set_rx_mode(tp
->dev
);
6147 /* Initialize receive rules. */
6148 tw32(MAC_RCV_RULE_0
, 0xc2000000 & RCV_RULE_DISABLE_MASK
);
6149 tw32(MAC_RCV_VALUE_0
, 0xffffffff & RCV_RULE_DISABLE_MASK
);
6150 tw32(MAC_RCV_RULE_1
, 0x86000004 & RCV_RULE_DISABLE_MASK
);
6151 tw32(MAC_RCV_VALUE_1
, 0xffffffff & RCV_RULE_DISABLE_MASK
);
6153 if ((tp
->tg3_flags2
& TG3_FLG2_5705_PLUS
) &&
6154 !(tp
->tg3_flags2
& TG3_FLG2_5780_CLASS
))
6158 if (tp
->tg3_flags
& TG3_FLAG_ENABLE_ASF
)
6162 tw32(MAC_RCV_RULE_15
, 0); tw32(MAC_RCV_VALUE_15
, 0);
6164 tw32(MAC_RCV_RULE_14
, 0); tw32(MAC_RCV_VALUE_14
, 0);
6166 tw32(MAC_RCV_RULE_13
, 0); tw32(MAC_RCV_VALUE_13
, 0);
6168 tw32(MAC_RCV_RULE_12
, 0); tw32(MAC_RCV_VALUE_12
, 0);
6170 tw32(MAC_RCV_RULE_11
, 0); tw32(MAC_RCV_VALUE_11
, 0);
6172 tw32(MAC_RCV_RULE_10
, 0); tw32(MAC_RCV_VALUE_10
, 0);
6174 tw32(MAC_RCV_RULE_9
, 0); tw32(MAC_RCV_VALUE_9
, 0);
6176 tw32(MAC_RCV_RULE_8
, 0); tw32(MAC_RCV_VALUE_8
, 0);
6178 tw32(MAC_RCV_RULE_7
, 0); tw32(MAC_RCV_VALUE_7
, 0);
6180 tw32(MAC_RCV_RULE_6
, 0); tw32(MAC_RCV_VALUE_6
, 0);
6182 tw32(MAC_RCV_RULE_5
, 0); tw32(MAC_RCV_VALUE_5
, 0);
6184 tw32(MAC_RCV_RULE_4
, 0); tw32(MAC_RCV_VALUE_4
, 0);
6186 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
6188 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
6196 tg3_write_sig_post_reset(tp
, RESET_KIND_INIT
);
6201 /* Called at device open time to get the chip ready for
6202 * packet processing. Invoked with tp->lock held.
6204 static int tg3_init_hw(struct tg3
*tp
)
6208 /* Force the chip into D0. */
6209 err
= tg3_set_power_state(tp
, 0);
6213 tg3_switch_clocks(tp
);
6215 tw32(TG3PCI_MEM_WIN_BASE_ADDR
, 0);
6217 err
= tg3_reset_hw(tp
);
6223 #define TG3_STAT_ADD32(PSTAT, REG) \
6224 do { u32 __val = tr32(REG); \
6225 (PSTAT)->low += __val; \
6226 if ((PSTAT)->low < __val) \
6227 (PSTAT)->high += 1; \
6230 static void tg3_periodic_fetch_stats(struct tg3
*tp
)
6232 struct tg3_hw_stats
*sp
= tp
->hw_stats
;
6234 if (!netif_carrier_ok(tp
->dev
))
6237 TG3_STAT_ADD32(&sp
->tx_octets
, MAC_TX_STATS_OCTETS
);
6238 TG3_STAT_ADD32(&sp
->tx_collisions
, MAC_TX_STATS_COLLISIONS
);
6239 TG3_STAT_ADD32(&sp
->tx_xon_sent
, MAC_TX_STATS_XON_SENT
);
6240 TG3_STAT_ADD32(&sp
->tx_xoff_sent
, MAC_TX_STATS_XOFF_SENT
);
6241 TG3_STAT_ADD32(&sp
->tx_mac_errors
, MAC_TX_STATS_MAC_ERRORS
);
6242 TG3_STAT_ADD32(&sp
->tx_single_collisions
, MAC_TX_STATS_SINGLE_COLLISIONS
);
6243 TG3_STAT_ADD32(&sp
->tx_mult_collisions
, MAC_TX_STATS_MULT_COLLISIONS
);
6244 TG3_STAT_ADD32(&sp
->tx_deferred
, MAC_TX_STATS_DEFERRED
);
6245 TG3_STAT_ADD32(&sp
->tx_excessive_collisions
, MAC_TX_STATS_EXCESSIVE_COL
);
6246 TG3_STAT_ADD32(&sp
->tx_late_collisions
, MAC_TX_STATS_LATE_COL
);
6247 TG3_STAT_ADD32(&sp
->tx_ucast_packets
, MAC_TX_STATS_UCAST
);
6248 TG3_STAT_ADD32(&sp
->tx_mcast_packets
, MAC_TX_STATS_MCAST
);
6249 TG3_STAT_ADD32(&sp
->tx_bcast_packets
, MAC_TX_STATS_BCAST
);
6251 TG3_STAT_ADD32(&sp
->rx_octets
, MAC_RX_STATS_OCTETS
);
6252 TG3_STAT_ADD32(&sp
->rx_fragments
, MAC_RX_STATS_FRAGMENTS
);
6253 TG3_STAT_ADD32(&sp
->rx_ucast_packets
, MAC_RX_STATS_UCAST
);
6254 TG3_STAT_ADD32(&sp
->rx_mcast_packets
, MAC_RX_STATS_MCAST
);
6255 TG3_STAT_ADD32(&sp
->rx_bcast_packets
, MAC_RX_STATS_BCAST
);
6256 TG3_STAT_ADD32(&sp
->rx_fcs_errors
, MAC_RX_STATS_FCS_ERRORS
);
6257 TG3_STAT_ADD32(&sp
->rx_align_errors
, MAC_RX_STATS_ALIGN_ERRORS
);
6258 TG3_STAT_ADD32(&sp
->rx_xon_pause_rcvd
, MAC_RX_STATS_XON_PAUSE_RECVD
);
6259 TG3_STAT_ADD32(&sp
->rx_xoff_pause_rcvd
, MAC_RX_STATS_XOFF_PAUSE_RECVD
);
6260 TG3_STAT_ADD32(&sp
->rx_mac_ctrl_rcvd
, MAC_RX_STATS_MAC_CTRL_RECVD
);
6261 TG3_STAT_ADD32(&sp
->rx_xoff_entered
, MAC_RX_STATS_XOFF_ENTERED
);
6262 TG3_STAT_ADD32(&sp
->rx_frame_too_long_errors
, MAC_RX_STATS_FRAME_TOO_LONG
);
6263 TG3_STAT_ADD32(&sp
->rx_jabbers
, MAC_RX_STATS_JABBERS
);
6264 TG3_STAT_ADD32(&sp
->rx_undersize_packets
, MAC_RX_STATS_UNDERSIZE
);
6267 static void tg3_timer(unsigned long __opaque
)
6269 struct tg3
*tp
= (struct tg3
*) __opaque
;
6271 spin_lock(&tp
->lock
);
6273 if (!(tp
->tg3_flags
& TG3_FLAG_TAGGED_STATUS
)) {
6274 /* All of this garbage is because when using non-tagged
6275 * IRQ status the mailbox/status_block protocol the chip
6276 * uses with the cpu is race prone.
6278 if (tp
->hw_status
->status
& SD_STATUS_UPDATED
) {
6279 tw32(GRC_LOCAL_CTRL
,
6280 tp
->grc_local_ctrl
| GRC_LCLCTRL_SETINT
);
6282 tw32(HOSTCC_MODE
, tp
->coalesce_mode
|
6283 (HOSTCC_MODE_ENABLE
| HOSTCC_MODE_NOW
));
6286 if (!(tr32(WDMAC_MODE
) & WDMAC_MODE_ENABLE
)) {
6287 tp
->tg3_flags2
|= TG3_FLG2_RESTART_TIMER
;
6288 spin_unlock(&tp
->lock
);
6289 schedule_work(&tp
->reset_task
);
6294 /* This part only runs once per second. */
6295 if (!--tp
->timer_counter
) {
6296 if (tp
->tg3_flags2
& TG3_FLG2_5705_PLUS
)
6297 tg3_periodic_fetch_stats(tp
);
6299 if (tp
->tg3_flags
& TG3_FLAG_USE_LINKCHG_REG
) {
6303 mac_stat
= tr32(MAC_STATUS
);
6306 if (tp
->tg3_flags
& TG3_FLAG_USE_MI_INTERRUPT
) {
6307 if (mac_stat
& MAC_STATUS_MI_INTERRUPT
)
6309 } else if (mac_stat
& MAC_STATUS_LNKSTATE_CHANGED
)
6313 tg3_setup_phy(tp
, 0);
6314 } else if (tp
->tg3_flags
& TG3_FLAG_POLL_SERDES
) {
6315 u32 mac_stat
= tr32(MAC_STATUS
);
6318 if (netif_carrier_ok(tp
->dev
) &&
6319 (mac_stat
& MAC_STATUS_LNKSTATE_CHANGED
)) {
6322 if (! netif_carrier_ok(tp
->dev
) &&
6323 (mac_stat
& (MAC_STATUS_PCS_SYNCED
|
6324 MAC_STATUS_SIGNAL_DET
))) {
6330 ~MAC_MODE_PORT_MODE_MASK
));
6332 tw32_f(MAC_MODE
, tp
->mac_mode
);
6334 tg3_setup_phy(tp
, 0);
6336 } else if (tp
->tg3_flags2
& TG3_FLG2_MII_SERDES
)
6337 tg3_serdes_parallel_detect(tp
);
6339 tp
->timer_counter
= tp
->timer_multiplier
;
6342 /* Heartbeat is only sent once every 2 seconds. */
6343 if (!--tp
->asf_counter
) {
6344 if (tp
->tg3_flags
& TG3_FLAG_ENABLE_ASF
) {
6347 tg3_write_mem_fast(tp
, NIC_SRAM_FW_CMD_MBOX
,
6348 FWCMD_NICDRV_ALIVE2
);
6349 tg3_write_mem_fast(tp
, NIC_SRAM_FW_CMD_LEN_MBOX
, 4);
6350 /* 5 seconds timeout */
6351 tg3_write_mem_fast(tp
, NIC_SRAM_FW_CMD_DATA_MBOX
, 5);
6352 val
= tr32(GRC_RX_CPU_EVENT
);
6354 tw32(GRC_RX_CPU_EVENT
, val
);
6356 tp
->asf_counter
= tp
->asf_multiplier
;
6359 spin_unlock(&tp
->lock
);
6361 tp
->timer
.expires
= jiffies
+ tp
->timer_offset
;
6362 add_timer(&tp
->timer
);
6365 static int tg3_test_interrupt(struct tg3
*tp
)
6367 struct net_device
*dev
= tp
->dev
;
6371 if (!netif_running(dev
))
6374 tg3_disable_ints(tp
);
6376 free_irq(tp
->pdev
->irq
, dev
);
6378 err
= request_irq(tp
->pdev
->irq
, tg3_test_isr
,
6379 SA_SHIRQ
| SA_SAMPLE_RANDOM
, dev
->name
, dev
);
6383 tp
->hw_status
->status
&= ~SD_STATUS_UPDATED
;
6384 tg3_enable_ints(tp
);
6386 tw32_f(HOSTCC_MODE
, tp
->coalesce_mode
| HOSTCC_MODE_ENABLE
|
6389 for (i
= 0; i
< 5; i
++) {
6390 int_mbox
= tr32_mailbox(MAILBOX_INTERRUPT_0
+
6397 tg3_disable_ints(tp
);
6399 free_irq(tp
->pdev
->irq
, dev
);
6401 if (tp
->tg3_flags2
& TG3_FLG2_USING_MSI
)
6402 err
= request_irq(tp
->pdev
->irq
, tg3_msi
,
6403 SA_SAMPLE_RANDOM
, dev
->name
, dev
);
6405 irqreturn_t (*fn
)(int, void *, struct pt_regs
*)=tg3_interrupt
;
6406 if (tp
->tg3_flags
& TG3_FLAG_TAGGED_STATUS
)
6407 fn
= tg3_interrupt_tagged
;
6408 err
= request_irq(tp
->pdev
->irq
, fn
,
6409 SA_SHIRQ
| SA_SAMPLE_RANDOM
, dev
->name
, dev
);
6421 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
6422 * successfully restored
6424 static int tg3_test_msi(struct tg3
*tp
)
6426 struct net_device
*dev
= tp
->dev
;
6430 if (!(tp
->tg3_flags2
& TG3_FLG2_USING_MSI
))
6433 /* Turn off SERR reporting in case MSI terminates with Master
6436 pci_read_config_word(tp
->pdev
, PCI_COMMAND
, &pci_cmd
);
6437 pci_write_config_word(tp
->pdev
, PCI_COMMAND
,
6438 pci_cmd
& ~PCI_COMMAND_SERR
);
6440 err
= tg3_test_interrupt(tp
);
6442 pci_write_config_word(tp
->pdev
, PCI_COMMAND
, pci_cmd
);
6447 /* other failures */
6451 /* MSI test failed, go back to INTx mode */
6452 printk(KERN_WARNING PFX
"%s: No interrupt was generated using MSI, "
6453 "switching to INTx mode. Please report this failure to "
6454 "the PCI maintainer and include system chipset information.\n",
6457 free_irq(tp
->pdev
->irq
, dev
);
6458 pci_disable_msi(tp
->pdev
);
6460 tp
->tg3_flags2
&= ~TG3_FLG2_USING_MSI
;
6463 irqreturn_t (*fn
)(int, void *, struct pt_regs
*)=tg3_interrupt
;
6464 if (tp
->tg3_flags
& TG3_FLAG_TAGGED_STATUS
)
6465 fn
= tg3_interrupt_tagged
;
6467 err
= request_irq(tp
->pdev
->irq
, fn
,
6468 SA_SHIRQ
| SA_SAMPLE_RANDOM
, dev
->name
, dev
);
6473 /* Need to reset the chip because the MSI cycle may have terminated
6474 * with Master Abort.
6476 tg3_full_lock(tp
, 1);
6478 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
6479 err
= tg3_init_hw(tp
);
6481 tg3_full_unlock(tp
);
6484 free_irq(tp
->pdev
->irq
, dev
);
6489 static int tg3_open(struct net_device
*dev
)
6491 struct tg3
*tp
= netdev_priv(dev
);
6494 tg3_full_lock(tp
, 0);
6496 tg3_disable_ints(tp
);
6497 tp
->tg3_flags
&= ~TG3_FLAG_INIT_COMPLETE
;
6499 tg3_full_unlock(tp
);
6501 /* The placement of this call is tied
6502 * to the setup and use of Host TX descriptors.
6504 err
= tg3_alloc_consistent(tp
);
6508 if ((tp
->tg3_flags2
& TG3_FLG2_5750_PLUS
) &&
6509 (GET_CHIP_REV(tp
->pci_chip_rev_id
) != CHIPREV_5750_AX
) &&
6510 (GET_CHIP_REV(tp
->pci_chip_rev_id
) != CHIPREV_5750_BX
) &&
6511 !((GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5714
) &&
6512 (tp
->pdev_peer
== tp
->pdev
))) {
6513 /* All MSI supporting chips should support tagged
6514 * status. Assert that this is the case.
6516 if (!(tp
->tg3_flags
& TG3_FLAG_TAGGED_STATUS
)) {
6517 printk(KERN_WARNING PFX
"%s: MSI without TAGGED? "
6518 "Not using MSI.\n", tp
->dev
->name
);
6519 } else if (pci_enable_msi(tp
->pdev
) == 0) {
6522 msi_mode
= tr32(MSGINT_MODE
);
6523 tw32(MSGINT_MODE
, msi_mode
| MSGINT_MODE_ENABLE
);
6524 tp
->tg3_flags2
|= TG3_FLG2_USING_MSI
;
6527 if (tp
->tg3_flags2
& TG3_FLG2_USING_MSI
)
6528 err
= request_irq(tp
->pdev
->irq
, tg3_msi
,
6529 SA_SAMPLE_RANDOM
, dev
->name
, dev
);
6531 irqreturn_t (*fn
)(int, void *, struct pt_regs
*)=tg3_interrupt
;
6532 if (tp
->tg3_flags
& TG3_FLAG_TAGGED_STATUS
)
6533 fn
= tg3_interrupt_tagged
;
6535 err
= request_irq(tp
->pdev
->irq
, fn
,
6536 SA_SHIRQ
| SA_SAMPLE_RANDOM
, dev
->name
, dev
);
6540 if (tp
->tg3_flags2
& TG3_FLG2_USING_MSI
) {
6541 pci_disable_msi(tp
->pdev
);
6542 tp
->tg3_flags2
&= ~TG3_FLG2_USING_MSI
;
6544 tg3_free_consistent(tp
);
6548 tg3_full_lock(tp
, 0);
6550 err
= tg3_init_hw(tp
);
6552 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
6555 if (tp
->tg3_flags
& TG3_FLAG_TAGGED_STATUS
)
6556 tp
->timer_offset
= HZ
;
6558 tp
->timer_offset
= HZ
/ 10;
6560 BUG_ON(tp
->timer_offset
> HZ
);
6561 tp
->timer_counter
= tp
->timer_multiplier
=
6562 (HZ
/ tp
->timer_offset
);
6563 tp
->asf_counter
= tp
->asf_multiplier
=
6564 ((HZ
/ tp
->timer_offset
) * 2);
6566 init_timer(&tp
->timer
);
6567 tp
->timer
.expires
= jiffies
+ tp
->timer_offset
;
6568 tp
->timer
.data
= (unsigned long) tp
;
6569 tp
->timer
.function
= tg3_timer
;
6572 tg3_full_unlock(tp
);
6575 free_irq(tp
->pdev
->irq
, dev
);
6576 if (tp
->tg3_flags2
& TG3_FLG2_USING_MSI
) {
6577 pci_disable_msi(tp
->pdev
);
6578 tp
->tg3_flags2
&= ~TG3_FLG2_USING_MSI
;
6580 tg3_free_consistent(tp
);
6584 if (tp
->tg3_flags2
& TG3_FLG2_USING_MSI
) {
6585 err
= tg3_test_msi(tp
);
6588 tg3_full_lock(tp
, 0);
6590 if (tp
->tg3_flags2
& TG3_FLG2_USING_MSI
) {
6591 pci_disable_msi(tp
->pdev
);
6592 tp
->tg3_flags2
&= ~TG3_FLG2_USING_MSI
;
6594 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
6596 tg3_free_consistent(tp
);
6598 tg3_full_unlock(tp
);
6604 tg3_full_lock(tp
, 0);
6606 add_timer(&tp
->timer
);
6607 tp
->tg3_flags
|= TG3_FLAG_INIT_COMPLETE
;
6608 tg3_enable_ints(tp
);
6610 tg3_full_unlock(tp
);
6612 netif_start_queue(dev
);
6618 /*static*/ void tg3_dump_state(struct tg3
*tp
)
6620 u32 val32
, val32_2
, val32_3
, val32_4
, val32_5
;
6624 pci_read_config_word(tp
->pdev
, PCI_STATUS
, &val16
);
6625 pci_read_config_dword(tp
->pdev
, TG3PCI_PCISTATE
, &val32
);
6626 printk("DEBUG: PCI status [%04x] TG3PCI state[%08x]\n",
6630 printk("DEBUG: MAC_MODE[%08x] MAC_STATUS[%08x]\n",
6631 tr32(MAC_MODE
), tr32(MAC_STATUS
));
6632 printk(" MAC_EVENT[%08x] MAC_LED_CTRL[%08x]\n",
6633 tr32(MAC_EVENT
), tr32(MAC_LED_CTRL
));
6634 printk("DEBUG: MAC_TX_MODE[%08x] MAC_TX_STATUS[%08x]\n",
6635 tr32(MAC_TX_MODE
), tr32(MAC_TX_STATUS
));
6636 printk(" MAC_RX_MODE[%08x] MAC_RX_STATUS[%08x]\n",
6637 tr32(MAC_RX_MODE
), tr32(MAC_RX_STATUS
));
6639 /* Send data initiator control block */
6640 printk("DEBUG: SNDDATAI_MODE[%08x] SNDDATAI_STATUS[%08x]\n",
6641 tr32(SNDDATAI_MODE
), tr32(SNDDATAI_STATUS
));
6642 printk(" SNDDATAI_STATSCTRL[%08x]\n",
6643 tr32(SNDDATAI_STATSCTRL
));
6645 /* Send data completion control block */
6646 printk("DEBUG: SNDDATAC_MODE[%08x]\n", tr32(SNDDATAC_MODE
));
6648 /* Send BD ring selector block */
6649 printk("DEBUG: SNDBDS_MODE[%08x] SNDBDS_STATUS[%08x]\n",
6650 tr32(SNDBDS_MODE
), tr32(SNDBDS_STATUS
));
6652 /* Send BD initiator control block */
6653 printk("DEBUG: SNDBDI_MODE[%08x] SNDBDI_STATUS[%08x]\n",
6654 tr32(SNDBDI_MODE
), tr32(SNDBDI_STATUS
));
6656 /* Send BD completion control block */
6657 printk("DEBUG: SNDBDC_MODE[%08x]\n", tr32(SNDBDC_MODE
));
6659 /* Receive list placement control block */
6660 printk("DEBUG: RCVLPC_MODE[%08x] RCVLPC_STATUS[%08x]\n",
6661 tr32(RCVLPC_MODE
), tr32(RCVLPC_STATUS
));
6662 printk(" RCVLPC_STATSCTRL[%08x]\n",
6663 tr32(RCVLPC_STATSCTRL
));
6665 /* Receive data and receive BD initiator control block */
6666 printk("DEBUG: RCVDBDI_MODE[%08x] RCVDBDI_STATUS[%08x]\n",
6667 tr32(RCVDBDI_MODE
), tr32(RCVDBDI_STATUS
));
6669 /* Receive data completion control block */
6670 printk("DEBUG: RCVDCC_MODE[%08x]\n",
6673 /* Receive BD initiator control block */
6674 printk("DEBUG: RCVBDI_MODE[%08x] RCVBDI_STATUS[%08x]\n",
6675 tr32(RCVBDI_MODE
), tr32(RCVBDI_STATUS
));
6677 /* Receive BD completion control block */
6678 printk("DEBUG: RCVCC_MODE[%08x] RCVCC_STATUS[%08x]\n",
6679 tr32(RCVCC_MODE
), tr32(RCVCC_STATUS
));
6681 /* Receive list selector control block */
6682 printk("DEBUG: RCVLSC_MODE[%08x] RCVLSC_STATUS[%08x]\n",
6683 tr32(RCVLSC_MODE
), tr32(RCVLSC_STATUS
));
6685 /* Mbuf cluster free block */
6686 printk("DEBUG: MBFREE_MODE[%08x] MBFREE_STATUS[%08x]\n",
6687 tr32(MBFREE_MODE
), tr32(MBFREE_STATUS
));
6689 /* Host coalescing control block */
6690 printk("DEBUG: HOSTCC_MODE[%08x] HOSTCC_STATUS[%08x]\n",
6691 tr32(HOSTCC_MODE
), tr32(HOSTCC_STATUS
));
6692 printk("DEBUG: HOSTCC_STATS_BLK_HOST_ADDR[%08x%08x]\n",
6693 tr32(HOSTCC_STATS_BLK_HOST_ADDR
+ TG3_64BIT_REG_HIGH
),
6694 tr32(HOSTCC_STATS_BLK_HOST_ADDR
+ TG3_64BIT_REG_LOW
));
6695 printk("DEBUG: HOSTCC_STATUS_BLK_HOST_ADDR[%08x%08x]\n",
6696 tr32(HOSTCC_STATUS_BLK_HOST_ADDR
+ TG3_64BIT_REG_HIGH
),
6697 tr32(HOSTCC_STATUS_BLK_HOST_ADDR
+ TG3_64BIT_REG_LOW
));
6698 printk("DEBUG: HOSTCC_STATS_BLK_NIC_ADDR[%08x]\n",
6699 tr32(HOSTCC_STATS_BLK_NIC_ADDR
));
6700 printk("DEBUG: HOSTCC_STATUS_BLK_NIC_ADDR[%08x]\n",
6701 tr32(HOSTCC_STATUS_BLK_NIC_ADDR
));
6703 /* Memory arbiter control block */
6704 printk("DEBUG: MEMARB_MODE[%08x] MEMARB_STATUS[%08x]\n",
6705 tr32(MEMARB_MODE
), tr32(MEMARB_STATUS
));
6707 /* Buffer manager control block */
6708 printk("DEBUG: BUFMGR_MODE[%08x] BUFMGR_STATUS[%08x]\n",
6709 tr32(BUFMGR_MODE
), tr32(BUFMGR_STATUS
));
6710 printk("DEBUG: BUFMGR_MB_POOL_ADDR[%08x] BUFMGR_MB_POOL_SIZE[%08x]\n",
6711 tr32(BUFMGR_MB_POOL_ADDR
), tr32(BUFMGR_MB_POOL_SIZE
));
6712 printk("DEBUG: BUFMGR_DMA_DESC_POOL_ADDR[%08x] "
6713 "BUFMGR_DMA_DESC_POOL_SIZE[%08x]\n",
6714 tr32(BUFMGR_DMA_DESC_POOL_ADDR
),
6715 tr32(BUFMGR_DMA_DESC_POOL_SIZE
));
6717 /* Read DMA control block */
6718 printk("DEBUG: RDMAC_MODE[%08x] RDMAC_STATUS[%08x]\n",
6719 tr32(RDMAC_MODE
), tr32(RDMAC_STATUS
));
6721 /* Write DMA control block */
6722 printk("DEBUG: WDMAC_MODE[%08x] WDMAC_STATUS[%08x]\n",
6723 tr32(WDMAC_MODE
), tr32(WDMAC_STATUS
));
6725 /* DMA completion block */
6726 printk("DEBUG: DMAC_MODE[%08x]\n",
6730 printk("DEBUG: GRC_MODE[%08x] GRC_MISC_CFG[%08x]\n",
6731 tr32(GRC_MODE
), tr32(GRC_MISC_CFG
));
6732 printk("DEBUG: GRC_LOCAL_CTRL[%08x]\n",
6733 tr32(GRC_LOCAL_CTRL
));
6736 printk("DEBUG: RCVDBDI_JUMBO_BD[%08x%08x:%08x:%08x]\n",
6737 tr32(RCVDBDI_JUMBO_BD
+ 0x0),
6738 tr32(RCVDBDI_JUMBO_BD
+ 0x4),
6739 tr32(RCVDBDI_JUMBO_BD
+ 0x8),
6740 tr32(RCVDBDI_JUMBO_BD
+ 0xc));
6741 printk("DEBUG: RCVDBDI_STD_BD[%08x%08x:%08x:%08x]\n",
6742 tr32(RCVDBDI_STD_BD
+ 0x0),
6743 tr32(RCVDBDI_STD_BD
+ 0x4),
6744 tr32(RCVDBDI_STD_BD
+ 0x8),
6745 tr32(RCVDBDI_STD_BD
+ 0xc));
6746 printk("DEBUG: RCVDBDI_MINI_BD[%08x%08x:%08x:%08x]\n",
6747 tr32(RCVDBDI_MINI_BD
+ 0x0),
6748 tr32(RCVDBDI_MINI_BD
+ 0x4),
6749 tr32(RCVDBDI_MINI_BD
+ 0x8),
6750 tr32(RCVDBDI_MINI_BD
+ 0xc));
6752 tg3_read_mem(tp
, NIC_SRAM_SEND_RCB
+ 0x0, &val32
);
6753 tg3_read_mem(tp
, NIC_SRAM_SEND_RCB
+ 0x4, &val32_2
);
6754 tg3_read_mem(tp
, NIC_SRAM_SEND_RCB
+ 0x8, &val32_3
);
6755 tg3_read_mem(tp
, NIC_SRAM_SEND_RCB
+ 0xc, &val32_4
);
6756 printk("DEBUG: SRAM_SEND_RCB_0[%08x%08x:%08x:%08x]\n",
6757 val32
, val32_2
, val32_3
, val32_4
);
6759 tg3_read_mem(tp
, NIC_SRAM_RCV_RET_RCB
+ 0x0, &val32
);
6760 tg3_read_mem(tp
, NIC_SRAM_RCV_RET_RCB
+ 0x4, &val32_2
);
6761 tg3_read_mem(tp
, NIC_SRAM_RCV_RET_RCB
+ 0x8, &val32_3
);
6762 tg3_read_mem(tp
, NIC_SRAM_RCV_RET_RCB
+ 0xc, &val32_4
);
6763 printk("DEBUG: SRAM_RCV_RET_RCB_0[%08x%08x:%08x:%08x]\n",
6764 val32
, val32_2
, val32_3
, val32_4
);
6766 tg3_read_mem(tp
, NIC_SRAM_STATUS_BLK
+ 0x0, &val32
);
6767 tg3_read_mem(tp
, NIC_SRAM_STATUS_BLK
+ 0x4, &val32_2
);
6768 tg3_read_mem(tp
, NIC_SRAM_STATUS_BLK
+ 0x8, &val32_3
);
6769 tg3_read_mem(tp
, NIC_SRAM_STATUS_BLK
+ 0xc, &val32_4
);
6770 tg3_read_mem(tp
, NIC_SRAM_STATUS_BLK
+ 0x10, &val32_5
);
6771 printk("DEBUG: SRAM_STATUS_BLK[%08x:%08x:%08x:%08x:%08x]\n",
6772 val32
, val32_2
, val32_3
, val32_4
, val32_5
);
6774 /* SW status block */
6775 printk("DEBUG: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
6776 tp
->hw_status
->status
,
6777 tp
->hw_status
->status_tag
,
6778 tp
->hw_status
->rx_jumbo_consumer
,
6779 tp
->hw_status
->rx_consumer
,
6780 tp
->hw_status
->rx_mini_consumer
,
6781 tp
->hw_status
->idx
[0].rx_producer
,
6782 tp
->hw_status
->idx
[0].tx_consumer
);
6784 /* SW statistics block */
6785 printk("DEBUG: Host statistics block [%08x:%08x:%08x:%08x]\n",
6786 ((u32
*)tp
->hw_stats
)[0],
6787 ((u32
*)tp
->hw_stats
)[1],
6788 ((u32
*)tp
->hw_stats
)[2],
6789 ((u32
*)tp
->hw_stats
)[3]);
6792 printk("DEBUG: SNDHOST_PROD[%08x%08x] SNDNIC_PROD[%08x%08x]\n",
6793 tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0
+ 0x0),
6794 tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0
+ 0x4),
6795 tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0
+ 0x0),
6796 tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0
+ 0x4));
6798 /* NIC side send descriptors. */
6799 for (i
= 0; i
< 6; i
++) {
6802 txd
= tp
->regs
+ NIC_SRAM_WIN_BASE
+ NIC_SRAM_TX_BUFFER_DESC
6803 + (i
* sizeof(struct tg3_tx_buffer_desc
));
6804 printk("DEBUG: NIC TXD(%d)[%08x:%08x:%08x:%08x]\n",
6806 readl(txd
+ 0x0), readl(txd
+ 0x4),
6807 readl(txd
+ 0x8), readl(txd
+ 0xc));
6810 /* NIC side RX descriptors. */
6811 for (i
= 0; i
< 6; i
++) {
6814 rxd
= tp
->regs
+ NIC_SRAM_WIN_BASE
+ NIC_SRAM_RX_BUFFER_DESC
6815 + (i
* sizeof(struct tg3_rx_buffer_desc
));
6816 printk("DEBUG: NIC RXD_STD(%d)[0][%08x:%08x:%08x:%08x]\n",
6818 readl(rxd
+ 0x0), readl(rxd
+ 0x4),
6819 readl(rxd
+ 0x8), readl(rxd
+ 0xc));
6820 rxd
+= (4 * sizeof(u32
));
6821 printk("DEBUG: NIC RXD_STD(%d)[1][%08x:%08x:%08x:%08x]\n",
6823 readl(rxd
+ 0x0), readl(rxd
+ 0x4),
6824 readl(rxd
+ 0x8), readl(rxd
+ 0xc));
6827 for (i
= 0; i
< 6; i
++) {
6830 rxd
= tp
->regs
+ NIC_SRAM_WIN_BASE
+ NIC_SRAM_RX_JUMBO_BUFFER_DESC
6831 + (i
* sizeof(struct tg3_rx_buffer_desc
));
6832 printk("DEBUG: NIC RXD_JUMBO(%d)[0][%08x:%08x:%08x:%08x]\n",
6834 readl(rxd
+ 0x0), readl(rxd
+ 0x4),
6835 readl(rxd
+ 0x8), readl(rxd
+ 0xc));
6836 rxd
+= (4 * sizeof(u32
));
6837 printk("DEBUG: NIC RXD_JUMBO(%d)[1][%08x:%08x:%08x:%08x]\n",
6839 readl(rxd
+ 0x0), readl(rxd
+ 0x4),
6840 readl(rxd
+ 0x8), readl(rxd
+ 0xc));
6845 static struct net_device_stats
*tg3_get_stats(struct net_device
*);
6846 static struct tg3_ethtool_stats
*tg3_get_estats(struct tg3
*);
6848 static int tg3_close(struct net_device
*dev
)
6850 struct tg3
*tp
= netdev_priv(dev
);
6852 /* Calling flush_scheduled_work() may deadlock because
6853 * linkwatch_event() may be on the workqueue and it will try to get
6854 * the rtnl_lock which we are holding.
6856 while (tp
->tg3_flags
& TG3_FLAG_IN_RESET_TASK
)
6859 netif_stop_queue(dev
);
6861 del_timer_sync(&tp
->timer
);
6863 tg3_full_lock(tp
, 1);
6868 tg3_disable_ints(tp
);
6870 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
6873 ~(TG3_FLAG_INIT_COMPLETE
|
6874 TG3_FLAG_GOT_SERDES_FLOWCTL
);
6875 netif_carrier_off(tp
->dev
);
6877 tg3_full_unlock(tp
);
6879 free_irq(tp
->pdev
->irq
, dev
);
6880 if (tp
->tg3_flags2
& TG3_FLG2_USING_MSI
) {
6881 pci_disable_msi(tp
->pdev
);
6882 tp
->tg3_flags2
&= ~TG3_FLG2_USING_MSI
;
6885 memcpy(&tp
->net_stats_prev
, tg3_get_stats(tp
->dev
),
6886 sizeof(tp
->net_stats_prev
));
6887 memcpy(&tp
->estats_prev
, tg3_get_estats(tp
),
6888 sizeof(tp
->estats_prev
));
6890 tg3_free_consistent(tp
);
6895 static inline unsigned long get_stat64(tg3_stat64_t
*val
)
6899 #if (BITS_PER_LONG == 32)
6902 ret
= ((u64
)val
->high
<< 32) | ((u64
)val
->low
);
6907 static unsigned long calc_crc_errors(struct tg3
*tp
)
6909 struct tg3_hw_stats
*hw_stats
= tp
->hw_stats
;
6911 if (!(tp
->tg3_flags2
& TG3_FLG2_PHY_SERDES
) &&
6912 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
6913 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
)) {
6916 spin_lock_bh(&tp
->lock
);
6917 if (!tg3_readphy(tp
, 0x1e, &val
)) {
6918 tg3_writephy(tp
, 0x1e, val
| 0x8000);
6919 tg3_readphy(tp
, 0x14, &val
);
6922 spin_unlock_bh(&tp
->lock
);
6924 tp
->phy_crc_errors
+= val
;
6926 return tp
->phy_crc_errors
;
6929 return get_stat64(&hw_stats
->rx_fcs_errors
);
6932 #define ESTAT_ADD(member) \
6933 estats->member = old_estats->member + \
6934 get_stat64(&hw_stats->member)
6936 static struct tg3_ethtool_stats
*tg3_get_estats(struct tg3
*tp
)
6938 struct tg3_ethtool_stats
*estats
= &tp
->estats
;
6939 struct tg3_ethtool_stats
*old_estats
= &tp
->estats_prev
;
6940 struct tg3_hw_stats
*hw_stats
= tp
->hw_stats
;
6945 ESTAT_ADD(rx_octets
);
6946 ESTAT_ADD(rx_fragments
);
6947 ESTAT_ADD(rx_ucast_packets
);
6948 ESTAT_ADD(rx_mcast_packets
);
6949 ESTAT_ADD(rx_bcast_packets
);
6950 ESTAT_ADD(rx_fcs_errors
);
6951 ESTAT_ADD(rx_align_errors
);
6952 ESTAT_ADD(rx_xon_pause_rcvd
);
6953 ESTAT_ADD(rx_xoff_pause_rcvd
);
6954 ESTAT_ADD(rx_mac_ctrl_rcvd
);
6955 ESTAT_ADD(rx_xoff_entered
);
6956 ESTAT_ADD(rx_frame_too_long_errors
);
6957 ESTAT_ADD(rx_jabbers
);
6958 ESTAT_ADD(rx_undersize_packets
);
6959 ESTAT_ADD(rx_in_length_errors
);
6960 ESTAT_ADD(rx_out_length_errors
);
6961 ESTAT_ADD(rx_64_or_less_octet_packets
);
6962 ESTAT_ADD(rx_65_to_127_octet_packets
);
6963 ESTAT_ADD(rx_128_to_255_octet_packets
);
6964 ESTAT_ADD(rx_256_to_511_octet_packets
);
6965 ESTAT_ADD(rx_512_to_1023_octet_packets
);
6966 ESTAT_ADD(rx_1024_to_1522_octet_packets
);
6967 ESTAT_ADD(rx_1523_to_2047_octet_packets
);
6968 ESTAT_ADD(rx_2048_to_4095_octet_packets
);
6969 ESTAT_ADD(rx_4096_to_8191_octet_packets
);
6970 ESTAT_ADD(rx_8192_to_9022_octet_packets
);
6972 ESTAT_ADD(tx_octets
);
6973 ESTAT_ADD(tx_collisions
);
6974 ESTAT_ADD(tx_xon_sent
);
6975 ESTAT_ADD(tx_xoff_sent
);
6976 ESTAT_ADD(tx_flow_control
);
6977 ESTAT_ADD(tx_mac_errors
);
6978 ESTAT_ADD(tx_single_collisions
);
6979 ESTAT_ADD(tx_mult_collisions
);
6980 ESTAT_ADD(tx_deferred
);
6981 ESTAT_ADD(tx_excessive_collisions
);
6982 ESTAT_ADD(tx_late_collisions
);
6983 ESTAT_ADD(tx_collide_2times
);
6984 ESTAT_ADD(tx_collide_3times
);
6985 ESTAT_ADD(tx_collide_4times
);
6986 ESTAT_ADD(tx_collide_5times
);
6987 ESTAT_ADD(tx_collide_6times
);
6988 ESTAT_ADD(tx_collide_7times
);
6989 ESTAT_ADD(tx_collide_8times
);
6990 ESTAT_ADD(tx_collide_9times
);
6991 ESTAT_ADD(tx_collide_10times
);
6992 ESTAT_ADD(tx_collide_11times
);
6993 ESTAT_ADD(tx_collide_12times
);
6994 ESTAT_ADD(tx_collide_13times
);
6995 ESTAT_ADD(tx_collide_14times
);
6996 ESTAT_ADD(tx_collide_15times
);
6997 ESTAT_ADD(tx_ucast_packets
);
6998 ESTAT_ADD(tx_mcast_packets
);
6999 ESTAT_ADD(tx_bcast_packets
);
7000 ESTAT_ADD(tx_carrier_sense_errors
);
7001 ESTAT_ADD(tx_discards
);
7002 ESTAT_ADD(tx_errors
);
7004 ESTAT_ADD(dma_writeq_full
);
7005 ESTAT_ADD(dma_write_prioq_full
);
7006 ESTAT_ADD(rxbds_empty
);
7007 ESTAT_ADD(rx_discards
);
7008 ESTAT_ADD(rx_errors
);
7009 ESTAT_ADD(rx_threshold_hit
);
7011 ESTAT_ADD(dma_readq_full
);
7012 ESTAT_ADD(dma_read_prioq_full
);
7013 ESTAT_ADD(tx_comp_queue_full
);
7015 ESTAT_ADD(ring_set_send_prod_index
);
7016 ESTAT_ADD(ring_status_update
);
7017 ESTAT_ADD(nic_irqs
);
7018 ESTAT_ADD(nic_avoided_irqs
);
7019 ESTAT_ADD(nic_tx_threshold_hit
);
7024 static struct net_device_stats
*tg3_get_stats(struct net_device
*dev
)
7026 struct tg3
*tp
= netdev_priv(dev
);
7027 struct net_device_stats
*stats
= &tp
->net_stats
;
7028 struct net_device_stats
*old_stats
= &tp
->net_stats_prev
;
7029 struct tg3_hw_stats
*hw_stats
= tp
->hw_stats
;
7034 stats
->rx_packets
= old_stats
->rx_packets
+
7035 get_stat64(&hw_stats
->rx_ucast_packets
) +
7036 get_stat64(&hw_stats
->rx_mcast_packets
) +
7037 get_stat64(&hw_stats
->rx_bcast_packets
);
7039 stats
->tx_packets
= old_stats
->tx_packets
+
7040 get_stat64(&hw_stats
->tx_ucast_packets
) +
7041 get_stat64(&hw_stats
->tx_mcast_packets
) +
7042 get_stat64(&hw_stats
->tx_bcast_packets
);
7044 stats
->rx_bytes
= old_stats
->rx_bytes
+
7045 get_stat64(&hw_stats
->rx_octets
);
7046 stats
->tx_bytes
= old_stats
->tx_bytes
+
7047 get_stat64(&hw_stats
->tx_octets
);
7049 stats
->rx_errors
= old_stats
->rx_errors
+
7050 get_stat64(&hw_stats
->rx_errors
);
7051 stats
->tx_errors
= old_stats
->tx_errors
+
7052 get_stat64(&hw_stats
->tx_errors
) +
7053 get_stat64(&hw_stats
->tx_mac_errors
) +
7054 get_stat64(&hw_stats
->tx_carrier_sense_errors
) +
7055 get_stat64(&hw_stats
->tx_discards
);
7057 stats
->multicast
= old_stats
->multicast
+
7058 get_stat64(&hw_stats
->rx_mcast_packets
);
7059 stats
->collisions
= old_stats
->collisions
+
7060 get_stat64(&hw_stats
->tx_collisions
);
7062 stats
->rx_length_errors
= old_stats
->rx_length_errors
+
7063 get_stat64(&hw_stats
->rx_frame_too_long_errors
) +
7064 get_stat64(&hw_stats
->rx_undersize_packets
);
7066 stats
->rx_over_errors
= old_stats
->rx_over_errors
+
7067 get_stat64(&hw_stats
->rxbds_empty
);
7068 stats
->rx_frame_errors
= old_stats
->rx_frame_errors
+
7069 get_stat64(&hw_stats
->rx_align_errors
);
7070 stats
->tx_aborted_errors
= old_stats
->tx_aborted_errors
+
7071 get_stat64(&hw_stats
->tx_discards
);
7072 stats
->tx_carrier_errors
= old_stats
->tx_carrier_errors
+
7073 get_stat64(&hw_stats
->tx_carrier_sense_errors
);
7075 stats
->rx_crc_errors
= old_stats
->rx_crc_errors
+
7076 calc_crc_errors(tp
);
7078 stats
->rx_missed_errors
= old_stats
->rx_missed_errors
+
7079 get_stat64(&hw_stats
->rx_discards
);
7084 static inline u32
calc_crc(unsigned char *buf
, int len
)
7092 for (j
= 0; j
< len
; j
++) {
7095 for (k
= 0; k
< 8; k
++) {
7109 static void tg3_set_multi(struct tg3
*tp
, unsigned int accept_all
)
7111 /* accept or reject all multicast frames */
7112 tw32(MAC_HASH_REG_0
, accept_all
? 0xffffffff : 0);
7113 tw32(MAC_HASH_REG_1
, accept_all
? 0xffffffff : 0);
7114 tw32(MAC_HASH_REG_2
, accept_all
? 0xffffffff : 0);
7115 tw32(MAC_HASH_REG_3
, accept_all
? 0xffffffff : 0);
7118 static void __tg3_set_rx_mode(struct net_device
*dev
)
7120 struct tg3
*tp
= netdev_priv(dev
);
7123 rx_mode
= tp
->rx_mode
& ~(RX_MODE_PROMISC
|
7124 RX_MODE_KEEP_VLAN_TAG
);
7126 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
7129 #if TG3_VLAN_TAG_USED
7131 !(tp
->tg3_flags
& TG3_FLAG_ENABLE_ASF
))
7132 rx_mode
|= RX_MODE_KEEP_VLAN_TAG
;
7134 /* By definition, VLAN is disabled always in this
7137 if (!(tp
->tg3_flags
& TG3_FLAG_ENABLE_ASF
))
7138 rx_mode
|= RX_MODE_KEEP_VLAN_TAG
;
7141 if (dev
->flags
& IFF_PROMISC
) {
7142 /* Promiscuous mode. */
7143 rx_mode
|= RX_MODE_PROMISC
;
7144 } else if (dev
->flags
& IFF_ALLMULTI
) {
7145 /* Accept all multicast. */
7146 tg3_set_multi (tp
, 1);
7147 } else if (dev
->mc_count
< 1) {
7148 /* Reject all multicast. */
7149 tg3_set_multi (tp
, 0);
7151 /* Accept one or more multicast(s). */
7152 struct dev_mc_list
*mclist
;
7154 u32 mc_filter
[4] = { 0, };
7159 for (i
= 0, mclist
= dev
->mc_list
; mclist
&& i
< dev
->mc_count
;
7160 i
++, mclist
= mclist
->next
) {
7162 crc
= calc_crc (mclist
->dmi_addr
, ETH_ALEN
);
7164 regidx
= (bit
& 0x60) >> 5;
7166 mc_filter
[regidx
] |= (1 << bit
);
7169 tw32(MAC_HASH_REG_0
, mc_filter
[0]);
7170 tw32(MAC_HASH_REG_1
, mc_filter
[1]);
7171 tw32(MAC_HASH_REG_2
, mc_filter
[2]);
7172 tw32(MAC_HASH_REG_3
, mc_filter
[3]);
7175 if (rx_mode
!= tp
->rx_mode
) {
7176 tp
->rx_mode
= rx_mode
;
7177 tw32_f(MAC_RX_MODE
, rx_mode
);
7182 static void tg3_set_rx_mode(struct net_device
*dev
)
7184 struct tg3
*tp
= netdev_priv(dev
);
7186 tg3_full_lock(tp
, 0);
7187 __tg3_set_rx_mode(dev
);
7188 tg3_full_unlock(tp
);
7191 #define TG3_REGDUMP_LEN (32 * 1024)
7193 static int tg3_get_regs_len(struct net_device
*dev
)
7195 return TG3_REGDUMP_LEN
;
7198 static void tg3_get_regs(struct net_device
*dev
,
7199 struct ethtool_regs
*regs
, void *_p
)
7202 struct tg3
*tp
= netdev_priv(dev
);
7208 memset(p
, 0, TG3_REGDUMP_LEN
);
7210 tg3_full_lock(tp
, 0);
7212 #define __GET_REG32(reg) (*(p)++ = tr32(reg))
7213 #define GET_REG32_LOOP(base,len) \
7214 do { p = (u32 *)(orig_p + (base)); \
7215 for (i = 0; i < len; i += 4) \
7216 __GET_REG32((base) + i); \
7218 #define GET_REG32_1(reg) \
7219 do { p = (u32 *)(orig_p + (reg)); \
7220 __GET_REG32((reg)); \
7223 GET_REG32_LOOP(TG3PCI_VENDOR
, 0xb0);
7224 GET_REG32_LOOP(MAILBOX_INTERRUPT_0
, 0x200);
7225 GET_REG32_LOOP(MAC_MODE
, 0x4f0);
7226 GET_REG32_LOOP(SNDDATAI_MODE
, 0xe0);
7227 GET_REG32_1(SNDDATAC_MODE
);
7228 GET_REG32_LOOP(SNDBDS_MODE
, 0x80);
7229 GET_REG32_LOOP(SNDBDI_MODE
, 0x48);
7230 GET_REG32_1(SNDBDC_MODE
);
7231 GET_REG32_LOOP(RCVLPC_MODE
, 0x20);
7232 GET_REG32_LOOP(RCVLPC_SELLST_BASE
, 0x15c);
7233 GET_REG32_LOOP(RCVDBDI_MODE
, 0x0c);
7234 GET_REG32_LOOP(RCVDBDI_JUMBO_BD
, 0x3c);
7235 GET_REG32_LOOP(RCVDBDI_BD_PROD_IDX_0
, 0x44);
7236 GET_REG32_1(RCVDCC_MODE
);
7237 GET_REG32_LOOP(RCVBDI_MODE
, 0x20);
7238 GET_REG32_LOOP(RCVCC_MODE
, 0x14);
7239 GET_REG32_LOOP(RCVLSC_MODE
, 0x08);
7240 GET_REG32_1(MBFREE_MODE
);
7241 GET_REG32_LOOP(HOSTCC_MODE
, 0x100);
7242 GET_REG32_LOOP(MEMARB_MODE
, 0x10);
7243 GET_REG32_LOOP(BUFMGR_MODE
, 0x58);
7244 GET_REG32_LOOP(RDMAC_MODE
, 0x08);
7245 GET_REG32_LOOP(WDMAC_MODE
, 0x08);
7246 GET_REG32_1(RX_CPU_MODE
);
7247 GET_REG32_1(RX_CPU_STATE
);
7248 GET_REG32_1(RX_CPU_PGMCTR
);
7249 GET_REG32_1(RX_CPU_HWBKPT
);
7250 GET_REG32_1(TX_CPU_MODE
);
7251 GET_REG32_1(TX_CPU_STATE
);
7252 GET_REG32_1(TX_CPU_PGMCTR
);
7253 GET_REG32_LOOP(GRCMBOX_INTERRUPT_0
, 0x110);
7254 GET_REG32_LOOP(FTQ_RESET
, 0x120);
7255 GET_REG32_LOOP(MSGINT_MODE
, 0x0c);
7256 GET_REG32_1(DMAC_MODE
);
7257 GET_REG32_LOOP(GRC_MODE
, 0x4c);
7258 if (tp
->tg3_flags
& TG3_FLAG_NVRAM
)
7259 GET_REG32_LOOP(NVRAM_CMD
, 0x24);
7262 #undef GET_REG32_LOOP
7265 tg3_full_unlock(tp
);
7268 static int tg3_get_eeprom_len(struct net_device
*dev
)
7270 struct tg3
*tp
= netdev_priv(dev
);
7272 return tp
->nvram_size
;
7275 static int tg3_nvram_read(struct tg3
*tp
, u32 offset
, u32
*val
);
7277 static int tg3_get_eeprom(struct net_device
*dev
, struct ethtool_eeprom
*eeprom
, u8
*data
)
7279 struct tg3
*tp
= netdev_priv(dev
);
7282 u32 i
, offset
, len
, val
, b_offset
, b_count
;
7284 offset
= eeprom
->offset
;
7288 eeprom
->magic
= TG3_EEPROM_MAGIC
;
7291 /* adjustments to start on required 4 byte boundary */
7292 b_offset
= offset
& 3;
7293 b_count
= 4 - b_offset
;
7294 if (b_count
> len
) {
7295 /* i.e. offset=1 len=2 */
7298 ret
= tg3_nvram_read(tp
, offset
-b_offset
, &val
);
7301 val
= cpu_to_le32(val
);
7302 memcpy(data
, ((char*)&val
) + b_offset
, b_count
);
7305 eeprom
->len
+= b_count
;
7308 /* read bytes upto the last 4 byte boundary */
7309 pd
= &data
[eeprom
->len
];
7310 for (i
= 0; i
< (len
- (len
& 3)); i
+= 4) {
7311 ret
= tg3_nvram_read(tp
, offset
+ i
, &val
);
7316 val
= cpu_to_le32(val
);
7317 memcpy(pd
+ i
, &val
, 4);
7322 /* read last bytes not ending on 4 byte boundary */
7323 pd
= &data
[eeprom
->len
];
7325 b_offset
= offset
+ len
- b_count
;
7326 ret
= tg3_nvram_read(tp
, b_offset
, &val
);
7329 val
= cpu_to_le32(val
);
7330 memcpy(pd
, ((char*)&val
), b_count
);
7331 eeprom
->len
+= b_count
;
7336 static int tg3_nvram_write_block(struct tg3
*tp
, u32 offset
, u32 len
, u8
*buf
);
7338 static int tg3_set_eeprom(struct net_device
*dev
, struct ethtool_eeprom
*eeprom
, u8
*data
)
7340 struct tg3
*tp
= netdev_priv(dev
);
7342 u32 offset
, len
, b_offset
, odd_len
, start
, end
;
7345 if (eeprom
->magic
!= TG3_EEPROM_MAGIC
)
7348 offset
= eeprom
->offset
;
7351 if ((b_offset
= (offset
& 3))) {
7352 /* adjustments to start on required 4 byte boundary */
7353 ret
= tg3_nvram_read(tp
, offset
-b_offset
, &start
);
7356 start
= cpu_to_le32(start
);
7365 /* adjustments to end on required 4 byte boundary */
7367 len
= (len
+ 3) & ~3;
7368 ret
= tg3_nvram_read(tp
, offset
+len
-4, &end
);
7371 end
= cpu_to_le32(end
);
7375 if (b_offset
|| odd_len
) {
7376 buf
= kmalloc(len
, GFP_KERNEL
);
7380 memcpy(buf
, &start
, 4);
7382 memcpy(buf
+len
-4, &end
, 4);
7383 memcpy(buf
+ b_offset
, data
, eeprom
->len
);
7386 ret
= tg3_nvram_write_block(tp
, offset
, len
, buf
);
7394 static int tg3_get_settings(struct net_device
*dev
, struct ethtool_cmd
*cmd
)
7396 struct tg3
*tp
= netdev_priv(dev
);
7398 cmd
->supported
= (SUPPORTED_Autoneg
);
7400 if (!(tp
->tg3_flags
& TG3_FLAG_10_100_ONLY
))
7401 cmd
->supported
|= (SUPPORTED_1000baseT_Half
|
7402 SUPPORTED_1000baseT_Full
);
7404 if (!(tp
->tg3_flags2
& TG3_FLG2_ANY_SERDES
))
7405 cmd
->supported
|= (SUPPORTED_100baseT_Half
|
7406 SUPPORTED_100baseT_Full
|
7407 SUPPORTED_10baseT_Half
|
7408 SUPPORTED_10baseT_Full
|
7411 cmd
->supported
|= SUPPORTED_FIBRE
;
7413 cmd
->advertising
= tp
->link_config
.advertising
;
7414 if (netif_running(dev
)) {
7415 cmd
->speed
= tp
->link_config
.active_speed
;
7416 cmd
->duplex
= tp
->link_config
.active_duplex
;
7419 cmd
->phy_address
= PHY_ADDR
;
7420 cmd
->transceiver
= 0;
7421 cmd
->autoneg
= tp
->link_config
.autoneg
;
7427 static int tg3_set_settings(struct net_device
*dev
, struct ethtool_cmd
*cmd
)
7429 struct tg3
*tp
= netdev_priv(dev
);
7431 if (tp
->tg3_flags2
& TG3_FLG2_ANY_SERDES
) {
7432 /* These are the only valid advertisement bits allowed. */
7433 if (cmd
->autoneg
== AUTONEG_ENABLE
&&
7434 (cmd
->advertising
& ~(ADVERTISED_1000baseT_Half
|
7435 ADVERTISED_1000baseT_Full
|
7436 ADVERTISED_Autoneg
|
7439 /* Fiber can only do SPEED_1000. */
7440 else if ((cmd
->autoneg
!= AUTONEG_ENABLE
) &&
7441 (cmd
->speed
!= SPEED_1000
))
7443 /* Copper cannot force SPEED_1000. */
7444 } else if ((cmd
->autoneg
!= AUTONEG_ENABLE
) &&
7445 (cmd
->speed
== SPEED_1000
))
7447 else if ((cmd
->speed
== SPEED_1000
) &&
7448 (tp
->tg3_flags2
& TG3_FLAG_10_100_ONLY
))
7451 tg3_full_lock(tp
, 0);
7453 tp
->link_config
.autoneg
= cmd
->autoneg
;
7454 if (cmd
->autoneg
== AUTONEG_ENABLE
) {
7455 tp
->link_config
.advertising
= cmd
->advertising
;
7456 tp
->link_config
.speed
= SPEED_INVALID
;
7457 tp
->link_config
.duplex
= DUPLEX_INVALID
;
7459 tp
->link_config
.advertising
= 0;
7460 tp
->link_config
.speed
= cmd
->speed
;
7461 tp
->link_config
.duplex
= cmd
->duplex
;
7464 if (netif_running(dev
))
7465 tg3_setup_phy(tp
, 1);
7467 tg3_full_unlock(tp
);
7472 static void tg3_get_drvinfo(struct net_device
*dev
, struct ethtool_drvinfo
*info
)
7474 struct tg3
*tp
= netdev_priv(dev
);
7476 strcpy(info
->driver
, DRV_MODULE_NAME
);
7477 strcpy(info
->version
, DRV_MODULE_VERSION
);
7478 strcpy(info
->bus_info
, pci_name(tp
->pdev
));
7481 static void tg3_get_wol(struct net_device
*dev
, struct ethtool_wolinfo
*wol
)
7483 struct tg3
*tp
= netdev_priv(dev
);
7485 wol
->supported
= WAKE_MAGIC
;
7487 if (tp
->tg3_flags
& TG3_FLAG_WOL_ENABLE
)
7488 wol
->wolopts
= WAKE_MAGIC
;
7489 memset(&wol
->sopass
, 0, sizeof(wol
->sopass
));
7492 static int tg3_set_wol(struct net_device
*dev
, struct ethtool_wolinfo
*wol
)
7494 struct tg3
*tp
= netdev_priv(dev
);
7496 if (wol
->wolopts
& ~WAKE_MAGIC
)
7498 if ((wol
->wolopts
& WAKE_MAGIC
) &&
7499 tp
->tg3_flags2
& TG3_FLG2_PHY_SERDES
&&
7500 !(tp
->tg3_flags
& TG3_FLAG_SERDES_WOL_CAP
))
7503 spin_lock_bh(&tp
->lock
);
7504 if (wol
->wolopts
& WAKE_MAGIC
)
7505 tp
->tg3_flags
|= TG3_FLAG_WOL_ENABLE
;
7507 tp
->tg3_flags
&= ~TG3_FLAG_WOL_ENABLE
;
7508 spin_unlock_bh(&tp
->lock
);
7513 static u32
tg3_get_msglevel(struct net_device
*dev
)
7515 struct tg3
*tp
= netdev_priv(dev
);
7516 return tp
->msg_enable
;
7519 static void tg3_set_msglevel(struct net_device
*dev
, u32 value
)
7521 struct tg3
*tp
= netdev_priv(dev
);
7522 tp
->msg_enable
= value
;
7525 #if TG3_TSO_SUPPORT != 0
7526 static int tg3_set_tso(struct net_device
*dev
, u32 value
)
7528 struct tg3
*tp
= netdev_priv(dev
);
7530 if (!(tp
->tg3_flags2
& TG3_FLG2_TSO_CAPABLE
)) {
7535 return ethtool_op_set_tso(dev
, value
);
7539 static int tg3_nway_reset(struct net_device
*dev
)
7541 struct tg3
*tp
= netdev_priv(dev
);
7545 if (!netif_running(dev
))
7548 if (tp
->tg3_flags2
& TG3_FLG2_PHY_SERDES
)
7551 spin_lock_bh(&tp
->lock
);
7553 tg3_readphy(tp
, MII_BMCR
, &bmcr
);
7554 if (!tg3_readphy(tp
, MII_BMCR
, &bmcr
) &&
7555 ((bmcr
& BMCR_ANENABLE
) ||
7556 (tp
->tg3_flags2
& TG3_FLG2_PARALLEL_DETECT
))) {
7557 tg3_writephy(tp
, MII_BMCR
, bmcr
| BMCR_ANRESTART
|
7561 spin_unlock_bh(&tp
->lock
);
7566 static void tg3_get_ringparam(struct net_device
*dev
, struct ethtool_ringparam
*ering
)
7568 struct tg3
*tp
= netdev_priv(dev
);
7570 ering
->rx_max_pending
= TG3_RX_RING_SIZE
- 1;
7571 ering
->rx_mini_max_pending
= 0;
7572 ering
->rx_jumbo_max_pending
= TG3_RX_JUMBO_RING_SIZE
- 1;
7574 ering
->rx_pending
= tp
->rx_pending
;
7575 ering
->rx_mini_pending
= 0;
7576 ering
->rx_jumbo_pending
= tp
->rx_jumbo_pending
;
7577 ering
->tx_pending
= tp
->tx_pending
;
7580 static int tg3_set_ringparam(struct net_device
*dev
, struct ethtool_ringparam
*ering
)
7582 struct tg3
*tp
= netdev_priv(dev
);
7585 if ((ering
->rx_pending
> TG3_RX_RING_SIZE
- 1) ||
7586 (ering
->rx_jumbo_pending
> TG3_RX_JUMBO_RING_SIZE
- 1) ||
7587 (ering
->tx_pending
> TG3_TX_RING_SIZE
- 1))
7590 if (netif_running(dev
)) {
7595 tg3_full_lock(tp
, irq_sync
);
7597 tp
->rx_pending
= ering
->rx_pending
;
7599 if ((tp
->tg3_flags2
& TG3_FLG2_MAX_RXPEND_64
) &&
7600 tp
->rx_pending
> 63)
7601 tp
->rx_pending
= 63;
7602 tp
->rx_jumbo_pending
= ering
->rx_jumbo_pending
;
7603 tp
->tx_pending
= ering
->tx_pending
;
7605 if (netif_running(dev
)) {
7606 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
7608 tg3_netif_start(tp
);
7611 tg3_full_unlock(tp
);
7616 static void tg3_get_pauseparam(struct net_device
*dev
, struct ethtool_pauseparam
*epause
)
7618 struct tg3
*tp
= netdev_priv(dev
);
7620 epause
->autoneg
= (tp
->tg3_flags
& TG3_FLAG_PAUSE_AUTONEG
) != 0;
7621 epause
->rx_pause
= (tp
->tg3_flags
& TG3_FLAG_RX_PAUSE
) != 0;
7622 epause
->tx_pause
= (tp
->tg3_flags
& TG3_FLAG_TX_PAUSE
) != 0;
7625 static int tg3_set_pauseparam(struct net_device
*dev
, struct ethtool_pauseparam
*epause
)
7627 struct tg3
*tp
= netdev_priv(dev
);
7630 if (netif_running(dev
)) {
7635 tg3_full_lock(tp
, irq_sync
);
7637 if (epause
->autoneg
)
7638 tp
->tg3_flags
|= TG3_FLAG_PAUSE_AUTONEG
;
7640 tp
->tg3_flags
&= ~TG3_FLAG_PAUSE_AUTONEG
;
7641 if (epause
->rx_pause
)
7642 tp
->tg3_flags
|= TG3_FLAG_RX_PAUSE
;
7644 tp
->tg3_flags
&= ~TG3_FLAG_RX_PAUSE
;
7645 if (epause
->tx_pause
)
7646 tp
->tg3_flags
|= TG3_FLAG_TX_PAUSE
;
7648 tp
->tg3_flags
&= ~TG3_FLAG_TX_PAUSE
;
7650 if (netif_running(dev
)) {
7651 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
7653 tg3_netif_start(tp
);
7656 tg3_full_unlock(tp
);
7661 static u32
tg3_get_rx_csum(struct net_device
*dev
)
7663 struct tg3
*tp
= netdev_priv(dev
);
7664 return (tp
->tg3_flags
& TG3_FLAG_RX_CHECKSUMS
) != 0;
7667 static int tg3_set_rx_csum(struct net_device
*dev
, u32 data
)
7669 struct tg3
*tp
= netdev_priv(dev
);
7671 if (tp
->tg3_flags
& TG3_FLAG_BROKEN_CHECKSUMS
) {
7677 spin_lock_bh(&tp
->lock
);
7679 tp
->tg3_flags
|= TG3_FLAG_RX_CHECKSUMS
;
7681 tp
->tg3_flags
&= ~TG3_FLAG_RX_CHECKSUMS
;
7682 spin_unlock_bh(&tp
->lock
);
7687 static int tg3_set_tx_csum(struct net_device
*dev
, u32 data
)
7689 struct tg3
*tp
= netdev_priv(dev
);
7691 if (tp
->tg3_flags
& TG3_FLAG_BROKEN_CHECKSUMS
) {
7698 dev
->features
|= NETIF_F_IP_CSUM
;
7700 dev
->features
&= ~NETIF_F_IP_CSUM
;
7705 static int tg3_get_stats_count (struct net_device
*dev
)
7707 return TG3_NUM_STATS
;
7710 static int tg3_get_test_count (struct net_device
*dev
)
7712 return TG3_NUM_TEST
;
7715 static void tg3_get_strings (struct net_device
*dev
, u32 stringset
, u8
*buf
)
7717 switch (stringset
) {
7719 memcpy(buf
, ðtool_stats_keys
, sizeof(ethtool_stats_keys
));
7722 memcpy(buf
, ðtool_test_keys
, sizeof(ethtool_test_keys
));
7725 WARN_ON(1); /* we need a WARN() */
7730 static int tg3_phys_id(struct net_device
*dev
, u32 data
)
7732 struct tg3
*tp
= netdev_priv(dev
);
7735 if (!netif_running(tp
->dev
))
7741 for (i
= 0; i
< (data
* 2); i
++) {
7743 tw32(MAC_LED_CTRL
, LED_CTRL_LNKLED_OVERRIDE
|
7744 LED_CTRL_1000MBPS_ON
|
7745 LED_CTRL_100MBPS_ON
|
7746 LED_CTRL_10MBPS_ON
|
7747 LED_CTRL_TRAFFIC_OVERRIDE
|
7748 LED_CTRL_TRAFFIC_BLINK
|
7749 LED_CTRL_TRAFFIC_LED
);
7752 tw32(MAC_LED_CTRL
, LED_CTRL_LNKLED_OVERRIDE
|
7753 LED_CTRL_TRAFFIC_OVERRIDE
);
7755 if (msleep_interruptible(500))
7758 tw32(MAC_LED_CTRL
, tp
->led_ctrl
);
7762 static void tg3_get_ethtool_stats (struct net_device
*dev
,
7763 struct ethtool_stats
*estats
, u64
*tmp_stats
)
7765 struct tg3
*tp
= netdev_priv(dev
);
7766 memcpy(tmp_stats
, tg3_get_estats(tp
), sizeof(tp
->estats
));
7769 #define NVRAM_TEST_SIZE 0x100
7771 static int tg3_test_nvram(struct tg3
*tp
)
7776 buf
= kmalloc(NVRAM_TEST_SIZE
, GFP_KERNEL
);
7780 for (i
= 0, j
= 0; i
< NVRAM_TEST_SIZE
; i
+= 4, j
++) {
7783 if ((err
= tg3_nvram_read(tp
, i
, &val
)) != 0)
7785 buf
[j
] = cpu_to_le32(val
);
7787 if (i
< NVRAM_TEST_SIZE
)
7791 if (cpu_to_be32(buf
[0]) != TG3_EEPROM_MAGIC
)
7794 /* Bootstrap checksum at offset 0x10 */
7795 csum
= calc_crc((unsigned char *) buf
, 0x10);
7796 if(csum
!= cpu_to_le32(buf
[0x10/4]))
7799 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
7800 csum
= calc_crc((unsigned char *) &buf
[0x74/4], 0x88);
7801 if (csum
!= cpu_to_le32(buf
[0xfc/4]))
7811 #define TG3_SERDES_TIMEOUT_SEC 2
7812 #define TG3_COPPER_TIMEOUT_SEC 6
7814 static int tg3_test_link(struct tg3
*tp
)
7818 if (!netif_running(tp
->dev
))
7821 if (tp
->tg3_flags2
& TG3_FLG2_ANY_SERDES
)
7822 max
= TG3_SERDES_TIMEOUT_SEC
;
7824 max
= TG3_COPPER_TIMEOUT_SEC
;
7826 for (i
= 0; i
< max
; i
++) {
7827 if (netif_carrier_ok(tp
->dev
))
7830 if (msleep_interruptible(1000))
7837 /* Only test the commonly used registers */
7838 static const int tg3_test_registers(struct tg3
*tp
)
7841 u32 offset
, read_mask
, write_mask
, val
, save_val
, read_val
;
7845 #define TG3_FL_5705 0x1
7846 #define TG3_FL_NOT_5705 0x2
7847 #define TG3_FL_NOT_5788 0x4
7851 /* MAC Control Registers */
7852 { MAC_MODE
, TG3_FL_NOT_5705
,
7853 0x00000000, 0x00ef6f8c },
7854 { MAC_MODE
, TG3_FL_5705
,
7855 0x00000000, 0x01ef6b8c },
7856 { MAC_STATUS
, TG3_FL_NOT_5705
,
7857 0x03800107, 0x00000000 },
7858 { MAC_STATUS
, TG3_FL_5705
,
7859 0x03800100, 0x00000000 },
7860 { MAC_ADDR_0_HIGH
, 0x0000,
7861 0x00000000, 0x0000ffff },
7862 { MAC_ADDR_0_LOW
, 0x0000,
7863 0x00000000, 0xffffffff },
7864 { MAC_RX_MTU_SIZE
, 0x0000,
7865 0x00000000, 0x0000ffff },
7866 { MAC_TX_MODE
, 0x0000,
7867 0x00000000, 0x00000070 },
7868 { MAC_TX_LENGTHS
, 0x0000,
7869 0x00000000, 0x00003fff },
7870 { MAC_RX_MODE
, TG3_FL_NOT_5705
,
7871 0x00000000, 0x000007fc },
7872 { MAC_RX_MODE
, TG3_FL_5705
,
7873 0x00000000, 0x000007dc },
7874 { MAC_HASH_REG_0
, 0x0000,
7875 0x00000000, 0xffffffff },
7876 { MAC_HASH_REG_1
, 0x0000,
7877 0x00000000, 0xffffffff },
7878 { MAC_HASH_REG_2
, 0x0000,
7879 0x00000000, 0xffffffff },
7880 { MAC_HASH_REG_3
, 0x0000,
7881 0x00000000, 0xffffffff },
7883 /* Receive Data and Receive BD Initiator Control Registers. */
7884 { RCVDBDI_JUMBO_BD
+0, TG3_FL_NOT_5705
,
7885 0x00000000, 0xffffffff },
7886 { RCVDBDI_JUMBO_BD
+4, TG3_FL_NOT_5705
,
7887 0x00000000, 0xffffffff },
7888 { RCVDBDI_JUMBO_BD
+8, TG3_FL_NOT_5705
,
7889 0x00000000, 0x00000003 },
7890 { RCVDBDI_JUMBO_BD
+0xc, TG3_FL_NOT_5705
,
7891 0x00000000, 0xffffffff },
7892 { RCVDBDI_STD_BD
+0, 0x0000,
7893 0x00000000, 0xffffffff },
7894 { RCVDBDI_STD_BD
+4, 0x0000,
7895 0x00000000, 0xffffffff },
7896 { RCVDBDI_STD_BD
+8, 0x0000,
7897 0x00000000, 0xffff0002 },
7898 { RCVDBDI_STD_BD
+0xc, 0x0000,
7899 0x00000000, 0xffffffff },
7901 /* Receive BD Initiator Control Registers. */
7902 { RCVBDI_STD_THRESH
, TG3_FL_NOT_5705
,
7903 0x00000000, 0xffffffff },
7904 { RCVBDI_STD_THRESH
, TG3_FL_5705
,
7905 0x00000000, 0x000003ff },
7906 { RCVBDI_JUMBO_THRESH
, TG3_FL_NOT_5705
,
7907 0x00000000, 0xffffffff },
7909 /* Host Coalescing Control Registers. */
7910 { HOSTCC_MODE
, TG3_FL_NOT_5705
,
7911 0x00000000, 0x00000004 },
7912 { HOSTCC_MODE
, TG3_FL_5705
,
7913 0x00000000, 0x000000f6 },
7914 { HOSTCC_RXCOL_TICKS
, TG3_FL_NOT_5705
,
7915 0x00000000, 0xffffffff },
7916 { HOSTCC_RXCOL_TICKS
, TG3_FL_5705
,
7917 0x00000000, 0x000003ff },
7918 { HOSTCC_TXCOL_TICKS
, TG3_FL_NOT_5705
,
7919 0x00000000, 0xffffffff },
7920 { HOSTCC_TXCOL_TICKS
, TG3_FL_5705
,
7921 0x00000000, 0x000003ff },
7922 { HOSTCC_RXMAX_FRAMES
, TG3_FL_NOT_5705
,
7923 0x00000000, 0xffffffff },
7924 { HOSTCC_RXMAX_FRAMES
, TG3_FL_5705
| TG3_FL_NOT_5788
,
7925 0x00000000, 0x000000ff },
7926 { HOSTCC_TXMAX_FRAMES
, TG3_FL_NOT_5705
,
7927 0x00000000, 0xffffffff },
7928 { HOSTCC_TXMAX_FRAMES
, TG3_FL_5705
| TG3_FL_NOT_5788
,
7929 0x00000000, 0x000000ff },
7930 { HOSTCC_RXCOAL_TICK_INT
, TG3_FL_NOT_5705
,
7931 0x00000000, 0xffffffff },
7932 { HOSTCC_TXCOAL_TICK_INT
, TG3_FL_NOT_5705
,
7933 0x00000000, 0xffffffff },
7934 { HOSTCC_RXCOAL_MAXF_INT
, TG3_FL_NOT_5705
,
7935 0x00000000, 0xffffffff },
7936 { HOSTCC_RXCOAL_MAXF_INT
, TG3_FL_5705
| TG3_FL_NOT_5788
,
7937 0x00000000, 0x000000ff },
7938 { HOSTCC_TXCOAL_MAXF_INT
, TG3_FL_NOT_5705
,
7939 0x00000000, 0xffffffff },
7940 { HOSTCC_TXCOAL_MAXF_INT
, TG3_FL_5705
| TG3_FL_NOT_5788
,
7941 0x00000000, 0x000000ff },
7942 { HOSTCC_STAT_COAL_TICKS
, TG3_FL_NOT_5705
,
7943 0x00000000, 0xffffffff },
7944 { HOSTCC_STATS_BLK_HOST_ADDR
, TG3_FL_NOT_5705
,
7945 0x00000000, 0xffffffff },
7946 { HOSTCC_STATS_BLK_HOST_ADDR
+4, TG3_FL_NOT_5705
,
7947 0x00000000, 0xffffffff },
7948 { HOSTCC_STATUS_BLK_HOST_ADDR
, 0x0000,
7949 0x00000000, 0xffffffff },
7950 { HOSTCC_STATUS_BLK_HOST_ADDR
+4, 0x0000,
7951 0x00000000, 0xffffffff },
7952 { HOSTCC_STATS_BLK_NIC_ADDR
, 0x0000,
7953 0xffffffff, 0x00000000 },
7954 { HOSTCC_STATUS_BLK_NIC_ADDR
, 0x0000,
7955 0xffffffff, 0x00000000 },
7957 /* Buffer Manager Control Registers. */
7958 { BUFMGR_MB_POOL_ADDR
, 0x0000,
7959 0x00000000, 0x007fff80 },
7960 { BUFMGR_MB_POOL_SIZE
, 0x0000,
7961 0x00000000, 0x007fffff },
7962 { BUFMGR_MB_RDMA_LOW_WATER
, 0x0000,
7963 0x00000000, 0x0000003f },
7964 { BUFMGR_MB_MACRX_LOW_WATER
, 0x0000,
7965 0x00000000, 0x000001ff },
7966 { BUFMGR_MB_HIGH_WATER
, 0x0000,
7967 0x00000000, 0x000001ff },
7968 { BUFMGR_DMA_DESC_POOL_ADDR
, TG3_FL_NOT_5705
,
7969 0xffffffff, 0x00000000 },
7970 { BUFMGR_DMA_DESC_POOL_SIZE
, TG3_FL_NOT_5705
,
7971 0xffffffff, 0x00000000 },
7973 /* Mailbox Registers */
7974 { GRCMBOX_RCVSTD_PROD_IDX
+4, 0x0000,
7975 0x00000000, 0x000001ff },
7976 { GRCMBOX_RCVJUMBO_PROD_IDX
+4, TG3_FL_NOT_5705
,
7977 0x00000000, 0x000001ff },
7978 { GRCMBOX_RCVRET_CON_IDX_0
+4, 0x0000,
7979 0x00000000, 0x000007ff },
7980 { GRCMBOX_SNDHOST_PROD_IDX_0
+4, 0x0000,
7981 0x00000000, 0x000001ff },
7983 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
7986 if (tp
->tg3_flags2
& TG3_FLG2_5705_PLUS
)
7991 for (i
= 0; reg_tbl
[i
].offset
!= 0xffff; i
++) {
7992 if (is_5705
&& (reg_tbl
[i
].flags
& TG3_FL_NOT_5705
))
7995 if (!is_5705
&& (reg_tbl
[i
].flags
& TG3_FL_5705
))
7998 if ((tp
->tg3_flags2
& TG3_FLG2_IS_5788
) &&
7999 (reg_tbl
[i
].flags
& TG3_FL_NOT_5788
))
8002 offset
= (u32
) reg_tbl
[i
].offset
;
8003 read_mask
= reg_tbl
[i
].read_mask
;
8004 write_mask
= reg_tbl
[i
].write_mask
;
8006 /* Save the original register content */
8007 save_val
= tr32(offset
);
8009 /* Determine the read-only value. */
8010 read_val
= save_val
& read_mask
;
8012 /* Write zero to the register, then make sure the read-only bits
8013 * are not changed and the read/write bits are all zeros.
8019 /* Test the read-only and read/write bits. */
8020 if (((val
& read_mask
) != read_val
) || (val
& write_mask
))
8023 /* Write ones to all the bits defined by RdMask and WrMask, then
8024 * make sure the read-only bits are not changed and the
8025 * read/write bits are all ones.
8027 tw32(offset
, read_mask
| write_mask
);
8031 /* Test the read-only bits. */
8032 if ((val
& read_mask
) != read_val
)
8035 /* Test the read/write bits. */
8036 if ((val
& write_mask
) != write_mask
)
8039 tw32(offset
, save_val
);
8045 printk(KERN_ERR PFX
"Register test failed at offset %x\n", offset
);
8046 tw32(offset
, save_val
);
8050 static int tg3_do_mem_test(struct tg3
*tp
, u32 offset
, u32 len
)
8052 static const u32 test_pattern
[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
8056 for (i
= 0; i
< sizeof(test_pattern
)/sizeof(u32
); i
++) {
8057 for (j
= 0; j
< len
; j
+= 4) {
8060 tg3_write_mem(tp
, offset
+ j
, test_pattern
[i
]);
8061 tg3_read_mem(tp
, offset
+ j
, &val
);
8062 if (val
!= test_pattern
[i
])
8069 static int tg3_test_memory(struct tg3
*tp
)
8071 static struct mem_entry
{
8074 } mem_tbl_570x
[] = {
8075 { 0x00000000, 0x00b50},
8076 { 0x00002000, 0x1c000},
8077 { 0xffffffff, 0x00000}
8078 }, mem_tbl_5705
[] = {
8079 { 0x00000100, 0x0000c},
8080 { 0x00000200, 0x00008},
8081 { 0x00004000, 0x00800},
8082 { 0x00006000, 0x01000},
8083 { 0x00008000, 0x02000},
8084 { 0x00010000, 0x0e000},
8085 { 0xffffffff, 0x00000}
8087 struct mem_entry
*mem_tbl
;
8091 if (tp
->tg3_flags2
& TG3_FLG2_5705_PLUS
)
8092 mem_tbl
= mem_tbl_5705
;
8094 mem_tbl
= mem_tbl_570x
;
8096 for (i
= 0; mem_tbl
[i
].offset
!= 0xffffffff; i
++) {
8097 if ((err
= tg3_do_mem_test(tp
, mem_tbl
[i
].offset
,
8098 mem_tbl
[i
].len
)) != 0)
8105 #define TG3_MAC_LOOPBACK 0
8106 #define TG3_PHY_LOOPBACK 1
8108 static int tg3_run_loopback(struct tg3
*tp
, int loopback_mode
)
8110 u32 mac_mode
, rx_start_idx
, rx_idx
, tx_idx
, opaque_key
;
8112 struct sk_buff
*skb
, *rx_skb
;
8115 int num_pkts
, tx_len
, rx_len
, i
, err
;
8116 struct tg3_rx_buffer_desc
*desc
;
8118 if (loopback_mode
== TG3_MAC_LOOPBACK
) {
8119 /* HW errata - mac loopback fails in some cases on 5780.
8120 * Normal traffic and PHY loopback are not affected by
8123 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5780
)
8126 mac_mode
= (tp
->mac_mode
& ~MAC_MODE_PORT_MODE_MASK
) |
8127 MAC_MODE_PORT_INT_LPBACK
| MAC_MODE_LINK_POLARITY
|
8128 MAC_MODE_PORT_MODE_GMII
;
8129 tw32(MAC_MODE
, mac_mode
);
8130 } else if (loopback_mode
== TG3_PHY_LOOPBACK
) {
8131 tg3_writephy(tp
, MII_BMCR
, BMCR_LOOPBACK
| BMCR_FULLDPLX
|
8134 /* reset to prevent losing 1st rx packet intermittently */
8135 if (tp
->tg3_flags2
& TG3_FLG2_MII_SERDES
) {
8136 tw32_f(MAC_RX_MODE
, RX_MODE_RESET
);
8138 tw32_f(MAC_RX_MODE
, tp
->rx_mode
);
8140 mac_mode
= (tp
->mac_mode
& ~MAC_MODE_PORT_MODE_MASK
) |
8141 MAC_MODE_LINK_POLARITY
| MAC_MODE_PORT_MODE_GMII
;
8142 if ((tp
->phy_id
& PHY_ID_MASK
) == PHY_ID_BCM5401
)
8143 mac_mode
&= ~MAC_MODE_LINK_POLARITY
;
8144 tw32(MAC_MODE
, mac_mode
);
8152 skb
= dev_alloc_skb(tx_len
);
8153 tx_data
= skb_put(skb
, tx_len
);
8154 memcpy(tx_data
, tp
->dev
->dev_addr
, 6);
8155 memset(tx_data
+ 6, 0x0, 8);
8157 tw32(MAC_RX_MTU_SIZE
, tx_len
+ 4);
8159 for (i
= 14; i
< tx_len
; i
++)
8160 tx_data
[i
] = (u8
) (i
& 0xff);
8162 map
= pci_map_single(tp
->pdev
, skb
->data
, tx_len
, PCI_DMA_TODEVICE
);
8164 tw32_f(HOSTCC_MODE
, tp
->coalesce_mode
| HOSTCC_MODE_ENABLE
|
8169 rx_start_idx
= tp
->hw_status
->idx
[0].rx_producer
;
8173 tg3_set_txd(tp
, tp
->tx_prod
, map
, tx_len
, 0, 1);
8178 tw32_tx_mbox(MAILBOX_SNDHOST_PROD_IDX_0
+ TG3_64BIT_REG_LOW
,
8180 tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0
+ TG3_64BIT_REG_LOW
);
8184 for (i
= 0; i
< 10; i
++) {
8185 tw32_f(HOSTCC_MODE
, tp
->coalesce_mode
| HOSTCC_MODE_ENABLE
|
8190 tx_idx
= tp
->hw_status
->idx
[0].tx_consumer
;
8191 rx_idx
= tp
->hw_status
->idx
[0].rx_producer
;
8192 if ((tx_idx
== tp
->tx_prod
) &&
8193 (rx_idx
== (rx_start_idx
+ num_pkts
)))
8197 pci_unmap_single(tp
->pdev
, map
, tx_len
, PCI_DMA_TODEVICE
);
8200 if (tx_idx
!= tp
->tx_prod
)
8203 if (rx_idx
!= rx_start_idx
+ num_pkts
)
8206 desc
= &tp
->rx_rcb
[rx_start_idx
];
8207 desc_idx
= desc
->opaque
& RXD_OPAQUE_INDEX_MASK
;
8208 opaque_key
= desc
->opaque
& RXD_OPAQUE_RING_MASK
;
8209 if (opaque_key
!= RXD_OPAQUE_RING_STD
)
8212 if ((desc
->err_vlan
& RXD_ERR_MASK
) != 0 &&
8213 (desc
->err_vlan
!= RXD_ERR_ODD_NIBBLE_RCVD_MII
))
8216 rx_len
= ((desc
->idx_len
& RXD_LEN_MASK
) >> RXD_LEN_SHIFT
) - 4;
8217 if (rx_len
!= tx_len
)
8220 rx_skb
= tp
->rx_std_buffers
[desc_idx
].skb
;
8222 map
= pci_unmap_addr(&tp
->rx_std_buffers
[desc_idx
], mapping
);
8223 pci_dma_sync_single_for_cpu(tp
->pdev
, map
, rx_len
, PCI_DMA_FROMDEVICE
);
8225 for (i
= 14; i
< tx_len
; i
++) {
8226 if (*(rx_skb
->data
+ i
) != (u8
) (i
& 0xff))
8231 /* tg3_free_rings will unmap and free the rx_skb */
8236 #define TG3_MAC_LOOPBACK_FAILED 1
8237 #define TG3_PHY_LOOPBACK_FAILED 2
8238 #define TG3_LOOPBACK_FAILED (TG3_MAC_LOOPBACK_FAILED | \
8239 TG3_PHY_LOOPBACK_FAILED)
8241 static int tg3_test_loopback(struct tg3
*tp
)
8245 if (!netif_running(tp
->dev
))
8246 return TG3_LOOPBACK_FAILED
;
8250 if (tg3_run_loopback(tp
, TG3_MAC_LOOPBACK
))
8251 err
|= TG3_MAC_LOOPBACK_FAILED
;
8252 if (!(tp
->tg3_flags2
& TG3_FLG2_PHY_SERDES
)) {
8253 if (tg3_run_loopback(tp
, TG3_PHY_LOOPBACK
))
8254 err
|= TG3_PHY_LOOPBACK_FAILED
;
8260 static void tg3_self_test(struct net_device
*dev
, struct ethtool_test
*etest
,
8263 struct tg3
*tp
= netdev_priv(dev
);
8265 memset(data
, 0, sizeof(u64
) * TG3_NUM_TEST
);
8267 if (tg3_test_nvram(tp
) != 0) {
8268 etest
->flags
|= ETH_TEST_FL_FAILED
;
8271 if (tg3_test_link(tp
) != 0) {
8272 etest
->flags
|= ETH_TEST_FL_FAILED
;
8275 if (etest
->flags
& ETH_TEST_FL_OFFLINE
) {
8276 int err
, irq_sync
= 0;
8278 if (netif_running(dev
)) {
8283 tg3_full_lock(tp
, irq_sync
);
8285 tg3_halt(tp
, RESET_KIND_SUSPEND
, 1);
8286 err
= tg3_nvram_lock(tp
);
8287 tg3_halt_cpu(tp
, RX_CPU_BASE
);
8288 if (!(tp
->tg3_flags2
& TG3_FLG2_5705_PLUS
))
8289 tg3_halt_cpu(tp
, TX_CPU_BASE
);
8291 tg3_nvram_unlock(tp
);
8293 if (tg3_test_registers(tp
) != 0) {
8294 etest
->flags
|= ETH_TEST_FL_FAILED
;
8297 if (tg3_test_memory(tp
) != 0) {
8298 etest
->flags
|= ETH_TEST_FL_FAILED
;
8301 if ((data
[4] = tg3_test_loopback(tp
)) != 0)
8302 etest
->flags
|= ETH_TEST_FL_FAILED
;
8304 tg3_full_unlock(tp
);
8306 if (tg3_test_interrupt(tp
) != 0) {
8307 etest
->flags
|= ETH_TEST_FL_FAILED
;
8311 tg3_full_lock(tp
, 0);
8313 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
8314 if (netif_running(dev
)) {
8315 tp
->tg3_flags
|= TG3_FLAG_INIT_COMPLETE
;
8317 tg3_netif_start(tp
);
8320 tg3_full_unlock(tp
);
8324 static int tg3_ioctl(struct net_device
*dev
, struct ifreq
*ifr
, int cmd
)
8326 struct mii_ioctl_data
*data
= if_mii(ifr
);
8327 struct tg3
*tp
= netdev_priv(dev
);
8332 data
->phy_id
= PHY_ADDR
;
8338 if (tp
->tg3_flags2
& TG3_FLG2_PHY_SERDES
)
8339 break; /* We have no PHY */
8341 spin_lock_bh(&tp
->lock
);
8342 err
= tg3_readphy(tp
, data
->reg_num
& 0x1f, &mii_regval
);
8343 spin_unlock_bh(&tp
->lock
);
8345 data
->val_out
= mii_regval
;
8351 if (tp
->tg3_flags2
& TG3_FLG2_PHY_SERDES
)
8352 break; /* We have no PHY */
8354 if (!capable(CAP_NET_ADMIN
))
8357 spin_lock_bh(&tp
->lock
);
8358 err
= tg3_writephy(tp
, data
->reg_num
& 0x1f, data
->val_in
);
8359 spin_unlock_bh(&tp
->lock
);
8370 #if TG3_VLAN_TAG_USED
8371 static void tg3_vlan_rx_register(struct net_device
*dev
, struct vlan_group
*grp
)
8373 struct tg3
*tp
= netdev_priv(dev
);
8375 tg3_full_lock(tp
, 0);
8379 /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
8380 __tg3_set_rx_mode(dev
);
8382 tg3_full_unlock(tp
);
8385 static void tg3_vlan_rx_kill_vid(struct net_device
*dev
, unsigned short vid
)
8387 struct tg3
*tp
= netdev_priv(dev
);
8389 tg3_full_lock(tp
, 0);
8391 tp
->vlgrp
->vlan_devices
[vid
] = NULL
;
8392 tg3_full_unlock(tp
);
8396 static int tg3_get_coalesce(struct net_device
*dev
, struct ethtool_coalesce
*ec
)
8398 struct tg3
*tp
= netdev_priv(dev
);
8400 memcpy(ec
, &tp
->coal
, sizeof(*ec
));
8404 static int tg3_set_coalesce(struct net_device
*dev
, struct ethtool_coalesce
*ec
)
8406 struct tg3
*tp
= netdev_priv(dev
);
8407 u32 max_rxcoal_tick_int
= 0, max_txcoal_tick_int
= 0;
8408 u32 max_stat_coal_ticks
= 0, min_stat_coal_ticks
= 0;
8410 if (!(tp
->tg3_flags2
& TG3_FLG2_5705_PLUS
)) {
8411 max_rxcoal_tick_int
= MAX_RXCOAL_TICK_INT
;
8412 max_txcoal_tick_int
= MAX_TXCOAL_TICK_INT
;
8413 max_stat_coal_ticks
= MAX_STAT_COAL_TICKS
;
8414 min_stat_coal_ticks
= MIN_STAT_COAL_TICKS
;
8417 if ((ec
->rx_coalesce_usecs
> MAX_RXCOL_TICKS
) ||
8418 (ec
->tx_coalesce_usecs
> MAX_TXCOL_TICKS
) ||
8419 (ec
->rx_max_coalesced_frames
> MAX_RXMAX_FRAMES
) ||
8420 (ec
->tx_max_coalesced_frames
> MAX_TXMAX_FRAMES
) ||
8421 (ec
->rx_coalesce_usecs_irq
> max_rxcoal_tick_int
) ||
8422 (ec
->tx_coalesce_usecs_irq
> max_txcoal_tick_int
) ||
8423 (ec
->rx_max_coalesced_frames_irq
> MAX_RXCOAL_MAXF_INT
) ||
8424 (ec
->tx_max_coalesced_frames_irq
> MAX_TXCOAL_MAXF_INT
) ||
8425 (ec
->stats_block_coalesce_usecs
> max_stat_coal_ticks
) ||
8426 (ec
->stats_block_coalesce_usecs
< min_stat_coal_ticks
))
8429 /* No rx interrupts will be generated if both are zero */
8430 if ((ec
->rx_coalesce_usecs
== 0) &&
8431 (ec
->rx_max_coalesced_frames
== 0))
8434 /* No tx interrupts will be generated if both are zero */
8435 if ((ec
->tx_coalesce_usecs
== 0) &&
8436 (ec
->tx_max_coalesced_frames
== 0))
8439 /* Only copy relevant parameters, ignore all others. */
8440 tp
->coal
.rx_coalesce_usecs
= ec
->rx_coalesce_usecs
;
8441 tp
->coal
.tx_coalesce_usecs
= ec
->tx_coalesce_usecs
;
8442 tp
->coal
.rx_max_coalesced_frames
= ec
->rx_max_coalesced_frames
;
8443 tp
->coal
.tx_max_coalesced_frames
= ec
->tx_max_coalesced_frames
;
8444 tp
->coal
.rx_coalesce_usecs_irq
= ec
->rx_coalesce_usecs_irq
;
8445 tp
->coal
.tx_coalesce_usecs_irq
= ec
->tx_coalesce_usecs_irq
;
8446 tp
->coal
.rx_max_coalesced_frames_irq
= ec
->rx_max_coalesced_frames_irq
;
8447 tp
->coal
.tx_max_coalesced_frames_irq
= ec
->tx_max_coalesced_frames_irq
;
8448 tp
->coal
.stats_block_coalesce_usecs
= ec
->stats_block_coalesce_usecs
;
8450 if (netif_running(dev
)) {
8451 tg3_full_lock(tp
, 0);
8452 __tg3_set_coalesce(tp
, &tp
->coal
);
8453 tg3_full_unlock(tp
);
8458 static struct ethtool_ops tg3_ethtool_ops
= {
8459 .get_settings
= tg3_get_settings
,
8460 .set_settings
= tg3_set_settings
,
8461 .get_drvinfo
= tg3_get_drvinfo
,
8462 .get_regs_len
= tg3_get_regs_len
,
8463 .get_regs
= tg3_get_regs
,
8464 .get_wol
= tg3_get_wol
,
8465 .set_wol
= tg3_set_wol
,
8466 .get_msglevel
= tg3_get_msglevel
,
8467 .set_msglevel
= tg3_set_msglevel
,
8468 .nway_reset
= tg3_nway_reset
,
8469 .get_link
= ethtool_op_get_link
,
8470 .get_eeprom_len
= tg3_get_eeprom_len
,
8471 .get_eeprom
= tg3_get_eeprom
,
8472 .set_eeprom
= tg3_set_eeprom
,
8473 .get_ringparam
= tg3_get_ringparam
,
8474 .set_ringparam
= tg3_set_ringparam
,
8475 .get_pauseparam
= tg3_get_pauseparam
,
8476 .set_pauseparam
= tg3_set_pauseparam
,
8477 .get_rx_csum
= tg3_get_rx_csum
,
8478 .set_rx_csum
= tg3_set_rx_csum
,
8479 .get_tx_csum
= ethtool_op_get_tx_csum
,
8480 .set_tx_csum
= tg3_set_tx_csum
,
8481 .get_sg
= ethtool_op_get_sg
,
8482 .set_sg
= ethtool_op_set_sg
,
8483 #if TG3_TSO_SUPPORT != 0
8484 .get_tso
= ethtool_op_get_tso
,
8485 .set_tso
= tg3_set_tso
,
8487 .self_test_count
= tg3_get_test_count
,
8488 .self_test
= tg3_self_test
,
8489 .get_strings
= tg3_get_strings
,
8490 .phys_id
= tg3_phys_id
,
8491 .get_stats_count
= tg3_get_stats_count
,
8492 .get_ethtool_stats
= tg3_get_ethtool_stats
,
8493 .get_coalesce
= tg3_get_coalesce
,
8494 .set_coalesce
= tg3_set_coalesce
,
8495 .get_perm_addr
= ethtool_op_get_perm_addr
,
8498 static void __devinit
tg3_get_eeprom_size(struct tg3
*tp
)
8502 tp
->nvram_size
= EEPROM_CHIP_SIZE
;
8504 if (tg3_nvram_read(tp
, 0, &val
) != 0)
8507 if (swab32(val
) != TG3_EEPROM_MAGIC
)
8511 * Size the chip by reading offsets at increasing powers of two.
8512 * When we encounter our validation signature, we know the addressing
8513 * has wrapped around, and thus have our chip size.
8517 while (cursize
< tp
->nvram_size
) {
8518 if (tg3_nvram_read(tp
, cursize
, &val
) != 0)
8521 if (swab32(val
) == TG3_EEPROM_MAGIC
)
8527 tp
->nvram_size
= cursize
;
8530 static void __devinit
tg3_get_nvram_size(struct tg3
*tp
)
8534 if (tg3_nvram_read(tp
, 0xf0, &val
) == 0) {
8536 tp
->nvram_size
= (val
>> 16) * 1024;
8540 tp
->nvram_size
= 0x20000;
8543 static void __devinit
tg3_get_nvram_info(struct tg3
*tp
)
8547 nvcfg1
= tr32(NVRAM_CFG1
);
8548 if (nvcfg1
& NVRAM_CFG1_FLASHIF_ENAB
) {
8549 tp
->tg3_flags2
|= TG3_FLG2_FLASH
;
8552 nvcfg1
&= ~NVRAM_CFG1_COMPAT_BYPASS
;
8553 tw32(NVRAM_CFG1
, nvcfg1
);
8556 if ((GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5750
) ||
8557 (tp
->tg3_flags2
& TG3_FLG2_5780_CLASS
)) {
8558 switch (nvcfg1
& NVRAM_CFG1_VENDOR_MASK
) {
8559 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED
:
8560 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
8561 tp
->nvram_pagesize
= ATMEL_AT45DB0X1B_PAGE_SIZE
;
8562 tp
->tg3_flags
|= TG3_FLAG_NVRAM_BUFFERED
;
8564 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED
:
8565 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
8566 tp
->nvram_pagesize
= ATMEL_AT25F512_PAGE_SIZE
;
8568 case FLASH_VENDOR_ATMEL_EEPROM
:
8569 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
8570 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
8571 tp
->tg3_flags
|= TG3_FLAG_NVRAM_BUFFERED
;
8573 case FLASH_VENDOR_ST
:
8574 tp
->nvram_jedecnum
= JEDEC_ST
;
8575 tp
->nvram_pagesize
= ST_M45PEX0_PAGE_SIZE
;
8576 tp
->tg3_flags
|= TG3_FLAG_NVRAM_BUFFERED
;
8578 case FLASH_VENDOR_SAIFUN
:
8579 tp
->nvram_jedecnum
= JEDEC_SAIFUN
;
8580 tp
->nvram_pagesize
= SAIFUN_SA25F0XX_PAGE_SIZE
;
8582 case FLASH_VENDOR_SST_SMALL
:
8583 case FLASH_VENDOR_SST_LARGE
:
8584 tp
->nvram_jedecnum
= JEDEC_SST
;
8585 tp
->nvram_pagesize
= SST_25VF0X0_PAGE_SIZE
;
8590 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
8591 tp
->nvram_pagesize
= ATMEL_AT45DB0X1B_PAGE_SIZE
;
8592 tp
->tg3_flags
|= TG3_FLAG_NVRAM_BUFFERED
;
8596 static void __devinit
tg3_get_5752_nvram_info(struct tg3
*tp
)
8600 nvcfg1
= tr32(NVRAM_CFG1
);
8602 /* NVRAM protection for TPM */
8603 if (nvcfg1
& (1 << 27))
8604 tp
->tg3_flags2
|= TG3_FLG2_PROTECTED_NVRAM
;
8606 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
8607 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ
:
8608 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ
:
8609 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
8610 tp
->tg3_flags
|= TG3_FLAG_NVRAM_BUFFERED
;
8612 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED
:
8613 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
8614 tp
->tg3_flags
|= TG3_FLAG_NVRAM_BUFFERED
;
8615 tp
->tg3_flags2
|= TG3_FLG2_FLASH
;
8617 case FLASH_5752VENDOR_ST_M45PE10
:
8618 case FLASH_5752VENDOR_ST_M45PE20
:
8619 case FLASH_5752VENDOR_ST_M45PE40
:
8620 tp
->nvram_jedecnum
= JEDEC_ST
;
8621 tp
->tg3_flags
|= TG3_FLAG_NVRAM_BUFFERED
;
8622 tp
->tg3_flags2
|= TG3_FLG2_FLASH
;
8626 if (tp
->tg3_flags2
& TG3_FLG2_FLASH
) {
8627 switch (nvcfg1
& NVRAM_CFG1_5752PAGE_SIZE_MASK
) {
8628 case FLASH_5752PAGE_SIZE_256
:
8629 tp
->nvram_pagesize
= 256;
8631 case FLASH_5752PAGE_SIZE_512
:
8632 tp
->nvram_pagesize
= 512;
8634 case FLASH_5752PAGE_SIZE_1K
:
8635 tp
->nvram_pagesize
= 1024;
8637 case FLASH_5752PAGE_SIZE_2K
:
8638 tp
->nvram_pagesize
= 2048;
8640 case FLASH_5752PAGE_SIZE_4K
:
8641 tp
->nvram_pagesize
= 4096;
8643 case FLASH_5752PAGE_SIZE_264
:
8644 tp
->nvram_pagesize
= 264;
8649 /* For eeprom, set pagesize to maximum eeprom size */
8650 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
8652 nvcfg1
&= ~NVRAM_CFG1_COMPAT_BYPASS
;
8653 tw32(NVRAM_CFG1
, nvcfg1
);
8657 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
8658 static void __devinit
tg3_nvram_init(struct tg3
*tp
)
8662 if (tp
->tg3_flags2
& TG3_FLG2_SUN_570X
)
8665 tw32_f(GRC_EEPROM_ADDR
,
8666 (EEPROM_ADDR_FSM_RESET
|
8667 (EEPROM_DEFAULT_CLOCK_PERIOD
<<
8668 EEPROM_ADDR_CLKPERD_SHIFT
)));
8670 /* XXX schedule_timeout() ... */
8671 for (j
= 0; j
< 100; j
++)
8674 /* Enable seeprom accesses. */
8675 tw32_f(GRC_LOCAL_CTRL
,
8676 tr32(GRC_LOCAL_CTRL
) | GRC_LCLCTRL_AUTO_SEEPROM
);
8679 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5700
&&
8680 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5701
) {
8681 tp
->tg3_flags
|= TG3_FLAG_NVRAM
;
8683 if (tg3_nvram_lock(tp
)) {
8684 printk(KERN_WARNING PFX
"%s: Cannot get nvarm lock, "
8685 "tg3_nvram_init failed.\n", tp
->dev
->name
);
8688 tg3_enable_nvram_access(tp
);
8690 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5752
)
8691 tg3_get_5752_nvram_info(tp
);
8693 tg3_get_nvram_info(tp
);
8695 tg3_get_nvram_size(tp
);
8697 tg3_disable_nvram_access(tp
);
8698 tg3_nvram_unlock(tp
);
8701 tp
->tg3_flags
&= ~(TG3_FLAG_NVRAM
| TG3_FLAG_NVRAM_BUFFERED
);
8703 tg3_get_eeprom_size(tp
);
8707 static int tg3_nvram_read_using_eeprom(struct tg3
*tp
,
8708 u32 offset
, u32
*val
)
8713 if (offset
> EEPROM_ADDR_ADDR_MASK
||
8717 tmp
= tr32(GRC_EEPROM_ADDR
) & ~(EEPROM_ADDR_ADDR_MASK
|
8718 EEPROM_ADDR_DEVID_MASK
|
8720 tw32(GRC_EEPROM_ADDR
,
8722 (0 << EEPROM_ADDR_DEVID_SHIFT
) |
8723 ((offset
<< EEPROM_ADDR_ADDR_SHIFT
) &
8724 EEPROM_ADDR_ADDR_MASK
) |
8725 EEPROM_ADDR_READ
| EEPROM_ADDR_START
);
8727 for (i
= 0; i
< 10000; i
++) {
8728 tmp
= tr32(GRC_EEPROM_ADDR
);
8730 if (tmp
& EEPROM_ADDR_COMPLETE
)
8734 if (!(tmp
& EEPROM_ADDR_COMPLETE
))
8737 *val
= tr32(GRC_EEPROM_DATA
);
8741 #define NVRAM_CMD_TIMEOUT 10000
8743 static int tg3_nvram_exec_cmd(struct tg3
*tp
, u32 nvram_cmd
)
8747 tw32(NVRAM_CMD
, nvram_cmd
);
8748 for (i
= 0; i
< NVRAM_CMD_TIMEOUT
; i
++) {
8750 if (tr32(NVRAM_CMD
) & NVRAM_CMD_DONE
) {
8755 if (i
== NVRAM_CMD_TIMEOUT
) {
8761 static int tg3_nvram_read(struct tg3
*tp
, u32 offset
, u32
*val
)
8765 if (tp
->tg3_flags2
& TG3_FLG2_SUN_570X
) {
8766 printk(KERN_ERR PFX
"Attempt to do nvram_read on Sun 570X\n");
8770 if (!(tp
->tg3_flags
& TG3_FLAG_NVRAM
))
8771 return tg3_nvram_read_using_eeprom(tp
, offset
, val
);
8773 if ((tp
->tg3_flags
& TG3_FLAG_NVRAM_BUFFERED
) &&
8774 (tp
->tg3_flags2
& TG3_FLG2_FLASH
) &&
8775 (tp
->nvram_jedecnum
== JEDEC_ATMEL
)) {
8777 offset
= ((offset
/ tp
->nvram_pagesize
) <<
8778 ATMEL_AT45DB0X1B_PAGE_POS
) +
8779 (offset
% tp
->nvram_pagesize
);
8782 if (offset
> NVRAM_ADDR_MSK
)
8785 ret
= tg3_nvram_lock(tp
);
8789 tg3_enable_nvram_access(tp
);
8791 tw32(NVRAM_ADDR
, offset
);
8792 ret
= tg3_nvram_exec_cmd(tp
, NVRAM_CMD_RD
| NVRAM_CMD_GO
|
8793 NVRAM_CMD_FIRST
| NVRAM_CMD_LAST
| NVRAM_CMD_DONE
);
8796 *val
= swab32(tr32(NVRAM_RDDATA
));
8798 tg3_disable_nvram_access(tp
);
8800 tg3_nvram_unlock(tp
);
8805 static int tg3_nvram_write_block_using_eeprom(struct tg3
*tp
,
8806 u32 offset
, u32 len
, u8
*buf
)
8811 for (i
= 0; i
< len
; i
+= 4) {
8816 memcpy(&data
, buf
+ i
, 4);
8818 tw32(GRC_EEPROM_DATA
, cpu_to_le32(data
));
8820 val
= tr32(GRC_EEPROM_ADDR
);
8821 tw32(GRC_EEPROM_ADDR
, val
| EEPROM_ADDR_COMPLETE
);
8823 val
&= ~(EEPROM_ADDR_ADDR_MASK
| EEPROM_ADDR_DEVID_MASK
|
8825 tw32(GRC_EEPROM_ADDR
, val
|
8826 (0 << EEPROM_ADDR_DEVID_SHIFT
) |
8827 (addr
& EEPROM_ADDR_ADDR_MASK
) |
8831 for (j
= 0; j
< 10000; j
++) {
8832 val
= tr32(GRC_EEPROM_ADDR
);
8834 if (val
& EEPROM_ADDR_COMPLETE
)
8838 if (!(val
& EEPROM_ADDR_COMPLETE
)) {
8847 /* offset and length are dword aligned */
8848 static int tg3_nvram_write_block_unbuffered(struct tg3
*tp
, u32 offset
, u32 len
,
8852 u32 pagesize
= tp
->nvram_pagesize
;
8853 u32 pagemask
= pagesize
- 1;
8857 tmp
= kmalloc(pagesize
, GFP_KERNEL
);
8863 u32 phy_addr
, page_off
, size
;
8865 phy_addr
= offset
& ~pagemask
;
8867 for (j
= 0; j
< pagesize
; j
+= 4) {
8868 if ((ret
= tg3_nvram_read(tp
, phy_addr
+ j
,
8869 (u32
*) (tmp
+ j
))))
8875 page_off
= offset
& pagemask
;
8882 memcpy(tmp
+ page_off
, buf
, size
);
8884 offset
= offset
+ (pagesize
- page_off
);
8886 tg3_enable_nvram_access(tp
);
8889 * Before we can erase the flash page, we need
8890 * to issue a special "write enable" command.
8892 nvram_cmd
= NVRAM_CMD_WREN
| NVRAM_CMD_GO
| NVRAM_CMD_DONE
;
8894 if (tg3_nvram_exec_cmd(tp
, nvram_cmd
))
8897 /* Erase the target page */
8898 tw32(NVRAM_ADDR
, phy_addr
);
8900 nvram_cmd
= NVRAM_CMD_GO
| NVRAM_CMD_DONE
| NVRAM_CMD_WR
|
8901 NVRAM_CMD_FIRST
| NVRAM_CMD_LAST
| NVRAM_CMD_ERASE
;
8903 if (tg3_nvram_exec_cmd(tp
, nvram_cmd
))
8906 /* Issue another write enable to start the write. */
8907 nvram_cmd
= NVRAM_CMD_WREN
| NVRAM_CMD_GO
| NVRAM_CMD_DONE
;
8909 if (tg3_nvram_exec_cmd(tp
, nvram_cmd
))
8912 for (j
= 0; j
< pagesize
; j
+= 4) {
8915 data
= *((u32
*) (tmp
+ j
));
8916 tw32(NVRAM_WRDATA
, cpu_to_be32(data
));
8918 tw32(NVRAM_ADDR
, phy_addr
+ j
);
8920 nvram_cmd
= NVRAM_CMD_GO
| NVRAM_CMD_DONE
|
8924 nvram_cmd
|= NVRAM_CMD_FIRST
;
8925 else if (j
== (pagesize
- 4))
8926 nvram_cmd
|= NVRAM_CMD_LAST
;
8928 if ((ret
= tg3_nvram_exec_cmd(tp
, nvram_cmd
)))
8935 nvram_cmd
= NVRAM_CMD_WRDI
| NVRAM_CMD_GO
| NVRAM_CMD_DONE
;
8936 tg3_nvram_exec_cmd(tp
, nvram_cmd
);
8943 /* offset and length are dword aligned */
8944 static int tg3_nvram_write_block_buffered(struct tg3
*tp
, u32 offset
, u32 len
,
8949 for (i
= 0; i
< len
; i
+= 4, offset
+= 4) {
8950 u32 data
, page_off
, phy_addr
, nvram_cmd
;
8952 memcpy(&data
, buf
+ i
, 4);
8953 tw32(NVRAM_WRDATA
, cpu_to_be32(data
));
8955 page_off
= offset
% tp
->nvram_pagesize
;
8957 if ((tp
->tg3_flags2
& TG3_FLG2_FLASH
) &&
8958 (tp
->nvram_jedecnum
== JEDEC_ATMEL
)) {
8960 phy_addr
= ((offset
/ tp
->nvram_pagesize
) <<
8961 ATMEL_AT45DB0X1B_PAGE_POS
) + page_off
;
8967 tw32(NVRAM_ADDR
, phy_addr
);
8969 nvram_cmd
= NVRAM_CMD_GO
| NVRAM_CMD_DONE
| NVRAM_CMD_WR
;
8971 if ((page_off
== 0) || (i
== 0))
8972 nvram_cmd
|= NVRAM_CMD_FIRST
;
8973 else if (page_off
== (tp
->nvram_pagesize
- 4))
8974 nvram_cmd
|= NVRAM_CMD_LAST
;
8977 nvram_cmd
|= NVRAM_CMD_LAST
;
8979 if ((GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5752
) &&
8980 (tp
->nvram_jedecnum
== JEDEC_ST
) &&
8981 (nvram_cmd
& NVRAM_CMD_FIRST
)) {
8983 if ((ret
= tg3_nvram_exec_cmd(tp
,
8984 NVRAM_CMD_WREN
| NVRAM_CMD_GO
|
8989 if (!(tp
->tg3_flags2
& TG3_FLG2_FLASH
)) {
8990 /* We always do complete word writes to eeprom. */
8991 nvram_cmd
|= (NVRAM_CMD_FIRST
| NVRAM_CMD_LAST
);
8994 if ((ret
= tg3_nvram_exec_cmd(tp
, nvram_cmd
)))
9000 /* offset and length are dword aligned */
9001 static int tg3_nvram_write_block(struct tg3
*tp
, u32 offset
, u32 len
, u8
*buf
)
9005 if (tp
->tg3_flags2
& TG3_FLG2_SUN_570X
) {
9006 printk(KERN_ERR PFX
"Attempt to do nvram_write on Sun 570X\n");
9010 if (tp
->tg3_flags
& TG3_FLAG_EEPROM_WRITE_PROT
) {
9011 tw32_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
&
9012 ~GRC_LCLCTRL_GPIO_OUTPUT1
);
9016 if (!(tp
->tg3_flags
& TG3_FLAG_NVRAM
)) {
9017 ret
= tg3_nvram_write_block_using_eeprom(tp
, offset
, len
, buf
);
9022 ret
= tg3_nvram_lock(tp
);
9026 tg3_enable_nvram_access(tp
);
9027 if ((tp
->tg3_flags2
& TG3_FLG2_5750_PLUS
) &&
9028 !(tp
->tg3_flags2
& TG3_FLG2_PROTECTED_NVRAM
))
9029 tw32(NVRAM_WRITE1
, 0x406);
9031 grc_mode
= tr32(GRC_MODE
);
9032 tw32(GRC_MODE
, grc_mode
| GRC_MODE_NVRAM_WR_ENABLE
);
9034 if ((tp
->tg3_flags
& TG3_FLAG_NVRAM_BUFFERED
) ||
9035 !(tp
->tg3_flags2
& TG3_FLG2_FLASH
)) {
9037 ret
= tg3_nvram_write_block_buffered(tp
, offset
, len
,
9041 ret
= tg3_nvram_write_block_unbuffered(tp
, offset
, len
,
9045 grc_mode
= tr32(GRC_MODE
);
9046 tw32(GRC_MODE
, grc_mode
& ~GRC_MODE_NVRAM_WR_ENABLE
);
9048 tg3_disable_nvram_access(tp
);
9049 tg3_nvram_unlock(tp
);
9052 if (tp
->tg3_flags
& TG3_FLAG_EEPROM_WRITE_PROT
) {
9053 tw32_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
);
9060 struct subsys_tbl_ent
{
9061 u16 subsys_vendor
, subsys_devid
;
9065 static struct subsys_tbl_ent subsys_id_to_phy_id
[] = {
9066 /* Broadcom boards. */
9067 { PCI_VENDOR_ID_BROADCOM
, 0x1644, PHY_ID_BCM5401
}, /* BCM95700A6 */
9068 { PCI_VENDOR_ID_BROADCOM
, 0x0001, PHY_ID_BCM5701
}, /* BCM95701A5 */
9069 { PCI_VENDOR_ID_BROADCOM
, 0x0002, PHY_ID_BCM8002
}, /* BCM95700T6 */
9070 { PCI_VENDOR_ID_BROADCOM
, 0x0003, 0 }, /* BCM95700A9 */
9071 { PCI_VENDOR_ID_BROADCOM
, 0x0005, PHY_ID_BCM5701
}, /* BCM95701T1 */
9072 { PCI_VENDOR_ID_BROADCOM
, 0x0006, PHY_ID_BCM5701
}, /* BCM95701T8 */
9073 { PCI_VENDOR_ID_BROADCOM
, 0x0007, 0 }, /* BCM95701A7 */
9074 { PCI_VENDOR_ID_BROADCOM
, 0x0008, PHY_ID_BCM5701
}, /* BCM95701A10 */
9075 { PCI_VENDOR_ID_BROADCOM
, 0x8008, PHY_ID_BCM5701
}, /* BCM95701A12 */
9076 { PCI_VENDOR_ID_BROADCOM
, 0x0009, PHY_ID_BCM5703
}, /* BCM95703Ax1 */
9077 { PCI_VENDOR_ID_BROADCOM
, 0x8009, PHY_ID_BCM5703
}, /* BCM95703Ax2 */
9080 { PCI_VENDOR_ID_3COM
, 0x1000, PHY_ID_BCM5401
}, /* 3C996T */
9081 { PCI_VENDOR_ID_3COM
, 0x1006, PHY_ID_BCM5701
}, /* 3C996BT */
9082 { PCI_VENDOR_ID_3COM
, 0x1004, 0 }, /* 3C996SX */
9083 { PCI_VENDOR_ID_3COM
, 0x1007, PHY_ID_BCM5701
}, /* 3C1000T */
9084 { PCI_VENDOR_ID_3COM
, 0x1008, PHY_ID_BCM5701
}, /* 3C940BR01 */
9087 { PCI_VENDOR_ID_DELL
, 0x00d1, PHY_ID_BCM5401
}, /* VIPER */
9088 { PCI_VENDOR_ID_DELL
, 0x0106, PHY_ID_BCM5401
}, /* JAGUAR */
9089 { PCI_VENDOR_ID_DELL
, 0x0109, PHY_ID_BCM5411
}, /* MERLOT */
9090 { PCI_VENDOR_ID_DELL
, 0x010a, PHY_ID_BCM5411
}, /* SLIM_MERLOT */
9092 /* Compaq boards. */
9093 { PCI_VENDOR_ID_COMPAQ
, 0x007c, PHY_ID_BCM5701
}, /* BANSHEE */
9094 { PCI_VENDOR_ID_COMPAQ
, 0x009a, PHY_ID_BCM5701
}, /* BANSHEE_2 */
9095 { PCI_VENDOR_ID_COMPAQ
, 0x007d, 0 }, /* CHANGELING */
9096 { PCI_VENDOR_ID_COMPAQ
, 0x0085, PHY_ID_BCM5701
}, /* NC7780 */
9097 { PCI_VENDOR_ID_COMPAQ
, 0x0099, PHY_ID_BCM5701
}, /* NC7780_2 */
9100 { PCI_VENDOR_ID_IBM
, 0x0281, 0 } /* IBM??? */
9103 static inline struct subsys_tbl_ent
*lookup_by_subsys(struct tg3
*tp
)
9107 for (i
= 0; i
< ARRAY_SIZE(subsys_id_to_phy_id
); i
++) {
9108 if ((subsys_id_to_phy_id
[i
].subsys_vendor
==
9109 tp
->pdev
->subsystem_vendor
) &&
9110 (subsys_id_to_phy_id
[i
].subsys_devid
==
9111 tp
->pdev
->subsystem_device
))
9112 return &subsys_id_to_phy_id
[i
];
9117 /* Since this function may be called in D3-hot power state during
9118 * tg3_init_one(), only config cycles are allowed.
9120 static void __devinit
tg3_get_eeprom_hw_cfg(struct tg3
*tp
)
9124 /* Make sure register accesses (indirect or otherwise)
9125 * will function correctly.
9127 pci_write_config_dword(tp
->pdev
, TG3PCI_MISC_HOST_CTRL
,
9128 tp
->misc_host_ctrl
);
9130 tp
->phy_id
= PHY_ID_INVALID
;
9131 tp
->led_ctrl
= LED_CTRL_MODE_PHY_1
;
9133 /* Do not even try poking around in here on Sun parts. */
9134 if (tp
->tg3_flags2
& TG3_FLG2_SUN_570X
)
9137 tg3_read_mem(tp
, NIC_SRAM_DATA_SIG
, &val
);
9138 if (val
== NIC_SRAM_DATA_SIG_MAGIC
) {
9139 u32 nic_cfg
, led_cfg
;
9140 u32 nic_phy_id
, ver
, cfg2
= 0, eeprom_phy_id
;
9141 int eeprom_phy_serdes
= 0;
9143 tg3_read_mem(tp
, NIC_SRAM_DATA_CFG
, &nic_cfg
);
9144 tp
->nic_sram_data_cfg
= nic_cfg
;
9146 tg3_read_mem(tp
, NIC_SRAM_DATA_VER
, &ver
);
9147 ver
>>= NIC_SRAM_DATA_VER_SHIFT
;
9148 if ((GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5700
) &&
9149 (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5701
) &&
9150 (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5703
) &&
9151 (ver
> 0) && (ver
< 0x100))
9152 tg3_read_mem(tp
, NIC_SRAM_DATA_CFG_2
, &cfg2
);
9154 if ((nic_cfg
& NIC_SRAM_DATA_CFG_PHY_TYPE_MASK
) ==
9155 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER
)
9156 eeprom_phy_serdes
= 1;
9158 tg3_read_mem(tp
, NIC_SRAM_DATA_PHY_ID
, &nic_phy_id
);
9159 if (nic_phy_id
!= 0) {
9160 u32 id1
= nic_phy_id
& NIC_SRAM_DATA_PHY_ID1_MASK
;
9161 u32 id2
= nic_phy_id
& NIC_SRAM_DATA_PHY_ID2_MASK
;
9163 eeprom_phy_id
= (id1
>> 16) << 10;
9164 eeprom_phy_id
|= (id2
& 0xfc00) << 16;
9165 eeprom_phy_id
|= (id2
& 0x03ff) << 0;
9169 tp
->phy_id
= eeprom_phy_id
;
9170 if (eeprom_phy_serdes
) {
9171 if (tp
->tg3_flags2
& TG3_FLG2_5780_CLASS
)
9172 tp
->tg3_flags2
|= TG3_FLG2_MII_SERDES
;
9174 tp
->tg3_flags2
|= TG3_FLG2_PHY_SERDES
;
9177 if (tp
->tg3_flags2
& TG3_FLG2_5750_PLUS
)
9178 led_cfg
= cfg2
& (NIC_SRAM_DATA_CFG_LED_MODE_MASK
|
9179 SHASTA_EXT_LED_MODE_MASK
);
9181 led_cfg
= nic_cfg
& NIC_SRAM_DATA_CFG_LED_MODE_MASK
;
9185 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1
:
9186 tp
->led_ctrl
= LED_CTRL_MODE_PHY_1
;
9189 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2
:
9190 tp
->led_ctrl
= LED_CTRL_MODE_PHY_2
;
9193 case NIC_SRAM_DATA_CFG_LED_MODE_MAC
:
9194 tp
->led_ctrl
= LED_CTRL_MODE_MAC
;
9196 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
9197 * read on some older 5700/5701 bootcode.
9199 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) ==
9201 GET_ASIC_REV(tp
->pci_chip_rev_id
) ==
9203 tp
->led_ctrl
= LED_CTRL_MODE_PHY_1
;
9207 case SHASTA_EXT_LED_SHARED
:
9208 tp
->led_ctrl
= LED_CTRL_MODE_SHARED
;
9209 if (tp
->pci_chip_rev_id
!= CHIPREV_ID_5750_A0
&&
9210 tp
->pci_chip_rev_id
!= CHIPREV_ID_5750_A1
)
9211 tp
->led_ctrl
|= (LED_CTRL_MODE_PHY_1
|
9212 LED_CTRL_MODE_PHY_2
);
9215 case SHASTA_EXT_LED_MAC
:
9216 tp
->led_ctrl
= LED_CTRL_MODE_SHASTA_MAC
;
9219 case SHASTA_EXT_LED_COMBO
:
9220 tp
->led_ctrl
= LED_CTRL_MODE_COMBO
;
9221 if (tp
->pci_chip_rev_id
!= CHIPREV_ID_5750_A0
)
9222 tp
->led_ctrl
|= (LED_CTRL_MODE_PHY_1
|
9223 LED_CTRL_MODE_PHY_2
);
9228 if ((GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
9229 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
) &&
9230 tp
->pdev
->subsystem_vendor
== PCI_VENDOR_ID_DELL
)
9231 tp
->led_ctrl
= LED_CTRL_MODE_PHY_2
;
9233 if ((GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5700
) &&
9234 (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5701
) &&
9235 (nic_cfg
& NIC_SRAM_DATA_CFG_EEPROM_WP
))
9236 tp
->tg3_flags
|= TG3_FLAG_EEPROM_WRITE_PROT
;
9238 if (nic_cfg
& NIC_SRAM_DATA_CFG_ASF_ENABLE
) {
9239 tp
->tg3_flags
|= TG3_FLAG_ENABLE_ASF
;
9240 if (tp
->tg3_flags2
& TG3_FLG2_5750_PLUS
)
9241 tp
->tg3_flags2
|= TG3_FLG2_ASF_NEW_HANDSHAKE
;
9243 if (nic_cfg
& NIC_SRAM_DATA_CFG_FIBER_WOL
)
9244 tp
->tg3_flags
|= TG3_FLAG_SERDES_WOL_CAP
;
9246 if (cfg2
& (1 << 17))
9247 tp
->tg3_flags2
|= TG3_FLG2_CAPACITIVE_COUPLING
;
9249 /* serdes signal pre-emphasis in register 0x590 set by */
9250 /* bootcode if bit 18 is set */
9251 if (cfg2
& (1 << 18))
9252 tp
->tg3_flags2
|= TG3_FLG2_SERDES_PREEMPHASIS
;
9256 static int __devinit
tg3_phy_probe(struct tg3
*tp
)
9258 u32 hw_phy_id_1
, hw_phy_id_2
;
9259 u32 hw_phy_id
, hw_phy_id_masked
;
9262 /* Reading the PHY ID register can conflict with ASF
9263 * firwmare access to the PHY hardware.
9266 if (tp
->tg3_flags
& TG3_FLAG_ENABLE_ASF
) {
9267 hw_phy_id
= hw_phy_id_masked
= PHY_ID_INVALID
;
9269 /* Now read the physical PHY_ID from the chip and verify
9270 * that it is sane. If it doesn't look good, we fall back
9271 * to either the hard-coded table based PHY_ID and failing
9272 * that the value found in the eeprom area.
9274 err
|= tg3_readphy(tp
, MII_PHYSID1
, &hw_phy_id_1
);
9275 err
|= tg3_readphy(tp
, MII_PHYSID2
, &hw_phy_id_2
);
9277 hw_phy_id
= (hw_phy_id_1
& 0xffff) << 10;
9278 hw_phy_id
|= (hw_phy_id_2
& 0xfc00) << 16;
9279 hw_phy_id
|= (hw_phy_id_2
& 0x03ff) << 0;
9281 hw_phy_id_masked
= hw_phy_id
& PHY_ID_MASK
;
9284 if (!err
&& KNOWN_PHY_ID(hw_phy_id_masked
)) {
9285 tp
->phy_id
= hw_phy_id
;
9286 if (hw_phy_id_masked
== PHY_ID_BCM8002
)
9287 tp
->tg3_flags2
|= TG3_FLG2_PHY_SERDES
;
9289 tp
->tg3_flags2
&= ~TG3_FLG2_PHY_SERDES
;
9291 if (tp
->phy_id
!= PHY_ID_INVALID
) {
9292 /* Do nothing, phy ID already set up in
9293 * tg3_get_eeprom_hw_cfg().
9296 struct subsys_tbl_ent
*p
;
9298 /* No eeprom signature? Try the hardcoded
9299 * subsys device table.
9301 p
= lookup_by_subsys(tp
);
9305 tp
->phy_id
= p
->phy_id
;
9307 tp
->phy_id
== PHY_ID_BCM8002
)
9308 tp
->tg3_flags2
|= TG3_FLG2_PHY_SERDES
;
9312 if (!(tp
->tg3_flags2
& TG3_FLG2_ANY_SERDES
) &&
9313 !(tp
->tg3_flags
& TG3_FLAG_ENABLE_ASF
)) {
9314 u32 bmsr
, adv_reg
, tg3_ctrl
;
9316 tg3_readphy(tp
, MII_BMSR
, &bmsr
);
9317 if (!tg3_readphy(tp
, MII_BMSR
, &bmsr
) &&
9318 (bmsr
& BMSR_LSTATUS
))
9319 goto skip_phy_reset
;
9321 err
= tg3_phy_reset(tp
);
9325 adv_reg
= (ADVERTISE_10HALF
| ADVERTISE_10FULL
|
9326 ADVERTISE_100HALF
| ADVERTISE_100FULL
|
9327 ADVERTISE_CSMA
| ADVERTISE_PAUSE_CAP
);
9329 if (!(tp
->tg3_flags
& TG3_FLAG_10_100_ONLY
)) {
9330 tg3_ctrl
= (MII_TG3_CTRL_ADV_1000_HALF
|
9331 MII_TG3_CTRL_ADV_1000_FULL
);
9332 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5701_A0
||
9333 tp
->pci_chip_rev_id
== CHIPREV_ID_5701_B0
)
9334 tg3_ctrl
|= (MII_TG3_CTRL_AS_MASTER
|
9335 MII_TG3_CTRL_ENABLE_AS_MASTER
);
9338 if (!tg3_copper_is_advertising_all(tp
)) {
9339 tg3_writephy(tp
, MII_ADVERTISE
, adv_reg
);
9341 if (!(tp
->tg3_flags
& TG3_FLAG_10_100_ONLY
))
9342 tg3_writephy(tp
, MII_TG3_CTRL
, tg3_ctrl
);
9344 tg3_writephy(tp
, MII_BMCR
,
9345 BMCR_ANENABLE
| BMCR_ANRESTART
);
9347 tg3_phy_set_wirespeed(tp
);
9349 tg3_writephy(tp
, MII_ADVERTISE
, adv_reg
);
9350 if (!(tp
->tg3_flags
& TG3_FLAG_10_100_ONLY
))
9351 tg3_writephy(tp
, MII_TG3_CTRL
, tg3_ctrl
);
9355 if ((tp
->phy_id
& PHY_ID_MASK
) == PHY_ID_BCM5401
) {
9356 err
= tg3_init_5401phy_dsp(tp
);
9361 if (!err
&& ((tp
->phy_id
& PHY_ID_MASK
) == PHY_ID_BCM5401
)) {
9362 err
= tg3_init_5401phy_dsp(tp
);
9365 if (tp
->tg3_flags2
& TG3_FLG2_ANY_SERDES
)
9366 tp
->link_config
.advertising
=
9367 (ADVERTISED_1000baseT_Half
|
9368 ADVERTISED_1000baseT_Full
|
9369 ADVERTISED_Autoneg
|
9371 if (tp
->tg3_flags
& TG3_FLAG_10_100_ONLY
)
9372 tp
->link_config
.advertising
&=
9373 ~(ADVERTISED_1000baseT_Half
|
9374 ADVERTISED_1000baseT_Full
);
9379 static void __devinit
tg3_read_partno(struct tg3
*tp
)
9381 unsigned char vpd_data
[256];
9384 if (tp
->tg3_flags2
& TG3_FLG2_SUN_570X
) {
9385 /* Sun decided not to put the necessary bits in the
9386 * NVRAM of their onboard tg3 parts :(
9388 strcpy(tp
->board_part_number
, "Sun 570X");
9392 for (i
= 0; i
< 256; i
+= 4) {
9395 if (tg3_nvram_read(tp
, 0x100 + i
, &tmp
))
9398 vpd_data
[i
+ 0] = ((tmp
>> 0) & 0xff);
9399 vpd_data
[i
+ 1] = ((tmp
>> 8) & 0xff);
9400 vpd_data
[i
+ 2] = ((tmp
>> 16) & 0xff);
9401 vpd_data
[i
+ 3] = ((tmp
>> 24) & 0xff);
9404 /* Now parse and find the part number. */
9405 for (i
= 0; i
< 256; ) {
9406 unsigned char val
= vpd_data
[i
];
9409 if (val
== 0x82 || val
== 0x91) {
9412 (vpd_data
[i
+ 2] << 8)));
9419 block_end
= (i
+ 3 +
9421 (vpd_data
[i
+ 2] << 8)));
9423 while (i
< block_end
) {
9424 if (vpd_data
[i
+ 0] == 'P' &&
9425 vpd_data
[i
+ 1] == 'N') {
9426 int partno_len
= vpd_data
[i
+ 2];
9428 if (partno_len
> 24)
9431 memcpy(tp
->board_part_number
,
9440 /* Part number not found. */
9445 strcpy(tp
->board_part_number
, "none");
9448 #ifdef CONFIG_SPARC64
9449 static int __devinit
tg3_is_sun_570X(struct tg3
*tp
)
9451 struct pci_dev
*pdev
= tp
->pdev
;
9452 struct pcidev_cookie
*pcp
= pdev
->sysdata
;
9455 int node
= pcp
->prom_node
;
9459 err
= prom_getproperty(node
, "subsystem-vendor-id",
9460 (char *) &venid
, sizeof(venid
));
9461 if (err
== 0 || err
== -1)
9463 if (venid
== PCI_VENDOR_ID_SUN
)
9466 /* TG3 chips onboard the SunBlade-2500 don't have the
9467 * subsystem-vendor-id set to PCI_VENDOR_ID_SUN but they
9468 * are distinguishable from non-Sun variants by being
9469 * named "network" by the firmware. Non-Sun cards will
9470 * show up as being named "ethernet".
9472 if (!strcmp(pcp
->prom_name
, "network"))
9479 static int __devinit
tg3_get_invariants(struct tg3
*tp
)
9481 static struct pci_device_id write_reorder_chipsets
[] = {
9482 { PCI_DEVICE(PCI_VENDOR_ID_AMD
,
9483 PCI_DEVICE_ID_AMD_FE_GATE_700C
) },
9484 { PCI_DEVICE(PCI_VENDOR_ID_VIA
,
9485 PCI_DEVICE_ID_VIA_8385_0
) },
9489 u32 cacheline_sz_reg
;
9490 u32 pci_state_reg
, grc_misc_cfg
;
9495 #ifdef CONFIG_SPARC64
9496 if (tg3_is_sun_570X(tp
))
9497 tp
->tg3_flags2
|= TG3_FLG2_SUN_570X
;
9500 /* Force memory write invalidate off. If we leave it on,
9501 * then on 5700_BX chips we have to enable a workaround.
9502 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
9503 * to match the cacheline size. The Broadcom driver have this
9504 * workaround but turns MWI off all the times so never uses
9505 * it. This seems to suggest that the workaround is insufficient.
9507 pci_read_config_word(tp
->pdev
, PCI_COMMAND
, &pci_cmd
);
9508 pci_cmd
&= ~PCI_COMMAND_INVALIDATE
;
9509 pci_write_config_word(tp
->pdev
, PCI_COMMAND
, pci_cmd
);
9511 /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
9512 * has the register indirect write enable bit set before
9513 * we try to access any of the MMIO registers. It is also
9514 * critical that the PCI-X hw workaround situation is decided
9515 * before that as well.
9517 pci_read_config_dword(tp
->pdev
, TG3PCI_MISC_HOST_CTRL
,
9520 tp
->pci_chip_rev_id
= (misc_ctrl_reg
>>
9521 MISC_HOST_CTRL_CHIPREV_SHIFT
);
9523 /* Wrong chip ID in 5752 A0. This code can be removed later
9524 * as A0 is not in production.
9526 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5752_A0_HW
)
9527 tp
->pci_chip_rev_id
= CHIPREV_ID_5752_A0
;
9529 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
9530 * we need to disable memory and use config. cycles
9531 * only to access all registers. The 5702/03 chips
9532 * can mistakenly decode the special cycles from the
9533 * ICH chipsets as memory write cycles, causing corruption
9534 * of register and memory space. Only certain ICH bridges
9535 * will drive special cycles with non-zero data during the
9536 * address phase which can fall within the 5703's address
9537 * range. This is not an ICH bug as the PCI spec allows
9538 * non-zero address during special cycles. However, only
9539 * these ICH bridges are known to drive non-zero addresses
9540 * during special cycles.
9542 * Since special cycles do not cross PCI bridges, we only
9543 * enable this workaround if the 5703 is on the secondary
9544 * bus of these ICH bridges.
9546 if ((tp
->pci_chip_rev_id
== CHIPREV_ID_5703_A1
) ||
9547 (tp
->pci_chip_rev_id
== CHIPREV_ID_5703_A2
)) {
9548 static struct tg3_dev_id
{
9552 } ich_chipsets
[] = {
9553 { PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_82801AA_8
,
9555 { PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_82801AB_8
,
9557 { PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_82801BA_11
,
9559 { PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_82801BA_6
,
9563 struct tg3_dev_id
*pci_id
= &ich_chipsets
[0];
9564 struct pci_dev
*bridge
= NULL
;
9566 while (pci_id
->vendor
!= 0) {
9567 bridge
= pci_get_device(pci_id
->vendor
, pci_id
->device
,
9573 if (pci_id
->rev
!= PCI_ANY_ID
) {
9576 pci_read_config_byte(bridge
, PCI_REVISION_ID
,
9578 if (rev
> pci_id
->rev
)
9581 if (bridge
->subordinate
&&
9582 (bridge
->subordinate
->number
==
9583 tp
->pdev
->bus
->number
)) {
9585 tp
->tg3_flags2
|= TG3_FLG2_ICH_WORKAROUND
;
9586 pci_dev_put(bridge
);
9592 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
9593 * DMA addresses > 40-bit. This bridge may have other additional
9594 * 57xx devices behind it in some 4-port NIC designs for example.
9595 * Any tg3 device found behind the bridge will also need the 40-bit
9598 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5780
||
9599 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5714
) {
9600 tp
->tg3_flags2
|= TG3_FLG2_5780_CLASS
;
9601 tp
->tg3_flags
|= TG3_FLAG_40BIT_DMA_BUG
;
9602 tp
->msi_cap
= pci_find_capability(tp
->pdev
, PCI_CAP_ID_MSI
);
9605 struct pci_dev
*bridge
= NULL
;
9608 bridge
= pci_get_device(PCI_VENDOR_ID_SERVERWORKS
,
9609 PCI_DEVICE_ID_SERVERWORKS_EPB
,
9611 if (bridge
&& bridge
->subordinate
&&
9612 (bridge
->subordinate
->number
<=
9613 tp
->pdev
->bus
->number
) &&
9614 (bridge
->subordinate
->subordinate
>=
9615 tp
->pdev
->bus
->number
)) {
9616 tp
->tg3_flags
|= TG3_FLAG_40BIT_DMA_BUG
;
9617 pci_dev_put(bridge
);
9623 /* Initialize misc host control in PCI block. */
9624 tp
->misc_host_ctrl
|= (misc_ctrl_reg
&
9625 MISC_HOST_CTRL_CHIPREV
);
9626 pci_write_config_dword(tp
->pdev
, TG3PCI_MISC_HOST_CTRL
,
9627 tp
->misc_host_ctrl
);
9629 pci_read_config_dword(tp
->pdev
, TG3PCI_CACHELINESZ
,
9632 tp
->pci_cacheline_sz
= (cacheline_sz_reg
>> 0) & 0xff;
9633 tp
->pci_lat_timer
= (cacheline_sz_reg
>> 8) & 0xff;
9634 tp
->pci_hdr_type
= (cacheline_sz_reg
>> 16) & 0xff;
9635 tp
->pci_bist
= (cacheline_sz_reg
>> 24) & 0xff;
9637 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5750
||
9638 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5752
||
9639 (tp
->tg3_flags2
& TG3_FLG2_5780_CLASS
))
9640 tp
->tg3_flags2
|= TG3_FLG2_5750_PLUS
;
9642 if ((GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
) ||
9643 (tp
->tg3_flags2
& TG3_FLG2_5750_PLUS
))
9644 tp
->tg3_flags2
|= TG3_FLG2_5705_PLUS
;
9646 if (tp
->tg3_flags2
& TG3_FLG2_5750_PLUS
)
9647 tp
->tg3_flags2
|= TG3_FLG2_HW_TSO
;
9649 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5705
&&
9650 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5750
&&
9651 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5752
)
9652 tp
->tg3_flags2
|= TG3_FLG2_JUMBO_CAPABLE
;
9654 if (pci_find_capability(tp
->pdev
, PCI_CAP_ID_EXP
) != 0)
9655 tp
->tg3_flags2
|= TG3_FLG2_PCI_EXPRESS
;
9657 /* If we have an AMD 762 or VIA K8T800 chipset, write
9658 * reordering to the mailbox registers done by the host
9659 * controller can cause major troubles. We read back from
9660 * every mailbox register write to force the writes to be
9661 * posted to the chip in order.
9663 if (pci_dev_present(write_reorder_chipsets
) &&
9664 !(tp
->tg3_flags2
& TG3_FLG2_PCI_EXPRESS
))
9665 tp
->tg3_flags
|= TG3_FLAG_MBOX_WRITE_REORDER
;
9667 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5703
&&
9668 tp
->pci_lat_timer
< 64) {
9669 tp
->pci_lat_timer
= 64;
9671 cacheline_sz_reg
= ((tp
->pci_cacheline_sz
& 0xff) << 0);
9672 cacheline_sz_reg
|= ((tp
->pci_lat_timer
& 0xff) << 8);
9673 cacheline_sz_reg
|= ((tp
->pci_hdr_type
& 0xff) << 16);
9674 cacheline_sz_reg
|= ((tp
->pci_bist
& 0xff) << 24);
9676 pci_write_config_dword(tp
->pdev
, TG3PCI_CACHELINESZ
,
9680 pci_read_config_dword(tp
->pdev
, TG3PCI_PCISTATE
,
9683 if ((pci_state_reg
& PCISTATE_CONV_PCI_MODE
) == 0) {
9684 tp
->tg3_flags
|= TG3_FLAG_PCIX_MODE
;
9686 /* If this is a 5700 BX chipset, and we are in PCI-X
9687 * mode, enable register write workaround.
9689 * The workaround is to use indirect register accesses
9690 * for all chip writes not to mailbox registers.
9692 if (GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5700_BX
) {
9696 tp
->tg3_flags
|= TG3_FLAG_PCIX_TARGET_HWBUG
;
9698 /* The chip can have it's power management PCI config
9699 * space registers clobbered due to this bug.
9700 * So explicitly force the chip into D0 here.
9702 pci_read_config_dword(tp
->pdev
, TG3PCI_PM_CTRL_STAT
,
9704 pm_reg
&= ~PCI_PM_CTRL_STATE_MASK
;
9705 pm_reg
|= PCI_PM_CTRL_PME_ENABLE
| 0 /* D0 */;
9706 pci_write_config_dword(tp
->pdev
, TG3PCI_PM_CTRL_STAT
,
9709 /* Also, force SERR#/PERR# in PCI command. */
9710 pci_read_config_word(tp
->pdev
, PCI_COMMAND
, &pci_cmd
);
9711 pci_cmd
|= PCI_COMMAND_PARITY
| PCI_COMMAND_SERR
;
9712 pci_write_config_word(tp
->pdev
, PCI_COMMAND
, pci_cmd
);
9716 /* 5700 BX chips need to have their TX producer index mailboxes
9717 * written twice to workaround a bug.
9719 if (GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5700_BX
)
9720 tp
->tg3_flags
|= TG3_FLAG_TXD_MBOX_HWBUG
;
9722 /* Back to back register writes can cause problems on this chip,
9723 * the workaround is to read back all reg writes except those to
9724 * mailbox regs. See tg3_write_indirect_reg32().
9726 * PCI Express 5750_A0 rev chips need this workaround too.
9728 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
||
9729 ((tp
->tg3_flags2
& TG3_FLG2_PCI_EXPRESS
) &&
9730 tp
->pci_chip_rev_id
== CHIPREV_ID_5750_A0
))
9731 tp
->tg3_flags
|= TG3_FLAG_5701_REG_WRITE_BUG
;
9733 if ((pci_state_reg
& PCISTATE_BUS_SPEED_HIGH
) != 0)
9734 tp
->tg3_flags
|= TG3_FLAG_PCI_HIGH_SPEED
;
9735 if ((pci_state_reg
& PCISTATE_BUS_32BIT
) != 0)
9736 tp
->tg3_flags
|= TG3_FLAG_PCI_32BIT
;
9738 /* Chip-specific fixup from Broadcom driver */
9739 if ((tp
->pci_chip_rev_id
== CHIPREV_ID_5704_A0
) &&
9740 (!(pci_state_reg
& PCISTATE_RETRY_SAME_DMA
))) {
9741 pci_state_reg
|= PCISTATE_RETRY_SAME_DMA
;
9742 pci_write_config_dword(tp
->pdev
, TG3PCI_PCISTATE
, pci_state_reg
);
9745 /* Default fast path register access methods */
9746 tp
->read32
= tg3_read32
;
9747 tp
->write32
= tg3_write32
;
9748 tp
->read32_mbox
= tg3_read32
;
9749 tp
->write32_mbox
= tg3_write32
;
9750 tp
->write32_tx_mbox
= tg3_write32
;
9751 tp
->write32_rx_mbox
= tg3_write32
;
9753 /* Various workaround register access methods */
9754 if (tp
->tg3_flags
& TG3_FLAG_PCIX_TARGET_HWBUG
)
9755 tp
->write32
= tg3_write_indirect_reg32
;
9756 else if (tp
->tg3_flags
& TG3_FLAG_5701_REG_WRITE_BUG
)
9757 tp
->write32
= tg3_write_flush_reg32
;
9759 if ((tp
->tg3_flags
& TG3_FLAG_TXD_MBOX_HWBUG
) ||
9760 (tp
->tg3_flags
& TG3_FLAG_MBOX_WRITE_REORDER
)) {
9761 tp
->write32_tx_mbox
= tg3_write32_tx_mbox
;
9762 if (tp
->tg3_flags
& TG3_FLAG_MBOX_WRITE_REORDER
)
9763 tp
->write32_rx_mbox
= tg3_write_flush_reg32
;
9766 if (tp
->tg3_flags2
& TG3_FLG2_ICH_WORKAROUND
) {
9767 tp
->read32
= tg3_read_indirect_reg32
;
9768 tp
->write32
= tg3_write_indirect_reg32
;
9769 tp
->read32_mbox
= tg3_read_indirect_mbox
;
9770 tp
->write32_mbox
= tg3_write_indirect_mbox
;
9771 tp
->write32_tx_mbox
= tg3_write_indirect_mbox
;
9772 tp
->write32_rx_mbox
= tg3_write_indirect_mbox
;
9777 pci_read_config_word(tp
->pdev
, PCI_COMMAND
, &pci_cmd
);
9778 pci_cmd
&= ~PCI_COMMAND_MEMORY
;
9779 pci_write_config_word(tp
->pdev
, PCI_COMMAND
, pci_cmd
);
9782 /* Get eeprom hw config before calling tg3_set_power_state().
9783 * In particular, the TG3_FLAG_EEPROM_WRITE_PROT flag must be
9784 * determined before calling tg3_set_power_state() so that
9785 * we know whether or not to switch out of Vaux power.
9786 * When the flag is set, it means that GPIO1 is used for eeprom
9787 * write protect and also implies that it is a LOM where GPIOs
9788 * are not used to switch power.
9790 tg3_get_eeprom_hw_cfg(tp
);
9792 /* Set up tp->grc_local_ctrl before calling tg3_set_power_state().
9793 * GPIO1 driven high will bring 5700's external PHY out of reset.
9794 * It is also used as eeprom write protect on LOMs.
9796 tp
->grc_local_ctrl
= GRC_LCLCTRL_INT_ON_ATTN
| GRC_LCLCTRL_AUTO_SEEPROM
;
9797 if ((GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
) ||
9798 (tp
->tg3_flags
& TG3_FLAG_EEPROM_WRITE_PROT
))
9799 tp
->grc_local_ctrl
|= (GRC_LCLCTRL_GPIO_OE1
|
9800 GRC_LCLCTRL_GPIO_OUTPUT1
);
9801 /* Unused GPIO3 must be driven as output on 5752 because there
9802 * are no pull-up resistors on unused GPIO pins.
9804 else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5752
)
9805 tp
->grc_local_ctrl
|= GRC_LCLCTRL_GPIO_OE3
;
9807 /* Force the chip into D0. */
9808 err
= tg3_set_power_state(tp
, 0);
9810 printk(KERN_ERR PFX
"(%s) transition to D0 failed\n",
9811 pci_name(tp
->pdev
));
9815 /* 5700 B0 chips do not support checksumming correctly due
9818 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5700_B0
)
9819 tp
->tg3_flags
|= TG3_FLAG_BROKEN_CHECKSUMS
;
9821 /* Pseudo-header checksum is done by hardware logic and not
9822 * the offload processers, so make the chip do the pseudo-
9823 * header checksums on receive. For transmit it is more
9824 * convenient to do the pseudo-header checksum in software
9825 * as Linux does that on transmit for us in all cases.
9827 tp
->tg3_flags
|= TG3_FLAG_NO_TX_PSEUDO_CSUM
;
9828 tp
->tg3_flags
&= ~TG3_FLAG_NO_RX_PSEUDO_CSUM
;
9830 /* Derive initial jumbo mode from MTU assigned in
9831 * ether_setup() via the alloc_etherdev() call
9833 if (tp
->dev
->mtu
> ETH_DATA_LEN
&&
9834 !(tp
->tg3_flags2
& TG3_FLG2_5780_CLASS
))
9835 tp
->tg3_flags
|= TG3_FLAG_JUMBO_RING_ENABLE
;
9837 /* Determine WakeOnLan speed to use. */
9838 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
9839 tp
->pci_chip_rev_id
== CHIPREV_ID_5701_A0
||
9840 tp
->pci_chip_rev_id
== CHIPREV_ID_5701_B0
||
9841 tp
->pci_chip_rev_id
== CHIPREV_ID_5701_B2
) {
9842 tp
->tg3_flags
&= ~(TG3_FLAG_WOL_SPEED_100MB
);
9844 tp
->tg3_flags
|= TG3_FLAG_WOL_SPEED_100MB
;
9847 /* A few boards don't want Ethernet@WireSpeed phy feature */
9848 if ((GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
) ||
9849 ((GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
) &&
9850 (tp
->pci_chip_rev_id
!= CHIPREV_ID_5705_A0
) &&
9851 (tp
->pci_chip_rev_id
!= CHIPREV_ID_5705_A1
)) ||
9852 (tp
->tg3_flags2
& TG3_FLG2_ANY_SERDES
))
9853 tp
->tg3_flags2
|= TG3_FLG2_NO_ETH_WIRE_SPEED
;
9855 if (GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5703_AX
||
9856 GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5704_AX
)
9857 tp
->tg3_flags2
|= TG3_FLG2_PHY_ADC_BUG
;
9858 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5704_A0
)
9859 tp
->tg3_flags2
|= TG3_FLG2_PHY_5704_A0_BUG
;
9861 if (tp
->tg3_flags2
& TG3_FLG2_5705_PLUS
)
9862 tp
->tg3_flags2
|= TG3_FLG2_PHY_BER_BUG
;
9864 tp
->coalesce_mode
= 0;
9865 if (GET_CHIP_REV(tp
->pci_chip_rev_id
) != CHIPREV_5700_AX
&&
9866 GET_CHIP_REV(tp
->pci_chip_rev_id
) != CHIPREV_5700_BX
)
9867 tp
->coalesce_mode
|= HOSTCC_MODE_32BYTE
;
9869 /* Initialize MAC MI mode, polling disabled. */
9870 tw32_f(MAC_MI_MODE
, tp
->mi_mode
);
9873 /* Initialize data/descriptor byte/word swapping. */
9874 val
= tr32(GRC_MODE
);
9875 val
&= GRC_MODE_HOST_STACKUP
;
9876 tw32(GRC_MODE
, val
| tp
->grc_mode
);
9878 tg3_switch_clocks(tp
);
9880 /* Clear this out for sanity. */
9881 tw32(TG3PCI_MEM_WIN_BASE_ADDR
, 0);
9883 pci_read_config_dword(tp
->pdev
, TG3PCI_PCISTATE
,
9885 if ((pci_state_reg
& PCISTATE_CONV_PCI_MODE
) == 0 &&
9886 (tp
->tg3_flags
& TG3_FLAG_PCIX_TARGET_HWBUG
) == 0) {
9887 u32 chiprevid
= GET_CHIP_REV_ID(tp
->misc_host_ctrl
);
9889 if (chiprevid
== CHIPREV_ID_5701_A0
||
9890 chiprevid
== CHIPREV_ID_5701_B0
||
9891 chiprevid
== CHIPREV_ID_5701_B2
||
9892 chiprevid
== CHIPREV_ID_5701_B5
) {
9893 void __iomem
*sram_base
;
9895 /* Write some dummy words into the SRAM status block
9896 * area, see if it reads back correctly. If the return
9897 * value is bad, force enable the PCIX workaround.
9899 sram_base
= tp
->regs
+ NIC_SRAM_WIN_BASE
+ NIC_SRAM_STATS_BLK
;
9901 writel(0x00000000, sram_base
);
9902 writel(0x00000000, sram_base
+ 4);
9903 writel(0xffffffff, sram_base
+ 4);
9904 if (readl(sram_base
) != 0x00000000)
9905 tp
->tg3_flags
|= TG3_FLAG_PCIX_TARGET_HWBUG
;
9912 grc_misc_cfg
= tr32(GRC_MISC_CFG
);
9913 grc_misc_cfg
&= GRC_MISC_CFG_BOARD_ID_MASK
;
9915 /* Broadcom's driver says that CIOBE multisplit has a bug */
9917 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
&&
9918 grc_misc_cfg
== GRC_MISC_CFG_BOARD_ID_5704CIOBE
) {
9919 tp
->tg3_flags
|= TG3_FLAG_SPLIT_MODE
;
9920 tp
->split_mode_max_reqs
= SPLIT_MODE_5704_MAX_REQ
;
9923 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
&&
9924 (grc_misc_cfg
== GRC_MISC_CFG_BOARD_ID_5788
||
9925 grc_misc_cfg
== GRC_MISC_CFG_BOARD_ID_5788M
))
9926 tp
->tg3_flags2
|= TG3_FLG2_IS_5788
;
9928 if (!(tp
->tg3_flags2
& TG3_FLG2_IS_5788
) &&
9929 (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5700
))
9930 tp
->tg3_flags
|= TG3_FLAG_TAGGED_STATUS
;
9931 if (tp
->tg3_flags
& TG3_FLAG_TAGGED_STATUS
) {
9932 tp
->coalesce_mode
|= (HOSTCC_MODE_CLRTICK_RXBD
|
9933 HOSTCC_MODE_CLRTICK_TXBD
);
9935 tp
->misc_host_ctrl
|= MISC_HOST_CTRL_TAGGED_STATUS
;
9936 pci_write_config_dword(tp
->pdev
, TG3PCI_MISC_HOST_CTRL
,
9937 tp
->misc_host_ctrl
);
9940 /* these are limited to 10/100 only */
9941 if ((GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5703
&&
9942 (grc_misc_cfg
== 0x8000 || grc_misc_cfg
== 0x4000)) ||
9943 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
&&
9944 tp
->pdev
->vendor
== PCI_VENDOR_ID_BROADCOM
&&
9945 (tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5901
||
9946 tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5901_2
||
9947 tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5705F
)) ||
9948 (tp
->pdev
->vendor
== PCI_VENDOR_ID_BROADCOM
&&
9949 (tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5751F
||
9950 tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5753F
)))
9951 tp
->tg3_flags
|= TG3_FLAG_10_100_ONLY
;
9953 err
= tg3_phy_probe(tp
);
9955 printk(KERN_ERR PFX
"(%s) phy probe failed, err %d\n",
9956 pci_name(tp
->pdev
), err
);
9957 /* ... but do not return immediately ... */
9960 tg3_read_partno(tp
);
9962 if (tp
->tg3_flags2
& TG3_FLG2_PHY_SERDES
) {
9963 tp
->tg3_flags
&= ~TG3_FLAG_USE_MI_INTERRUPT
;
9965 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
)
9966 tp
->tg3_flags
|= TG3_FLAG_USE_MI_INTERRUPT
;
9968 tp
->tg3_flags
&= ~TG3_FLAG_USE_MI_INTERRUPT
;
9971 /* 5700 {AX,BX} chips have a broken status block link
9972 * change bit implementation, so we must use the
9973 * status register in those cases.
9975 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
)
9976 tp
->tg3_flags
|= TG3_FLAG_USE_LINKCHG_REG
;
9978 tp
->tg3_flags
&= ~TG3_FLAG_USE_LINKCHG_REG
;
9980 /* The led_ctrl is set during tg3_phy_probe, here we might
9981 * have to force the link status polling mechanism based
9982 * upon subsystem IDs.
9984 if (tp
->pdev
->subsystem_vendor
== PCI_VENDOR_ID_DELL
&&
9985 !(tp
->tg3_flags2
& TG3_FLG2_PHY_SERDES
)) {
9986 tp
->tg3_flags
|= (TG3_FLAG_USE_MI_INTERRUPT
|
9987 TG3_FLAG_USE_LINKCHG_REG
);
9990 /* For all SERDES we poll the MAC status register. */
9991 if (tp
->tg3_flags2
& TG3_FLG2_PHY_SERDES
)
9992 tp
->tg3_flags
|= TG3_FLAG_POLL_SERDES
;
9994 tp
->tg3_flags
&= ~TG3_FLAG_POLL_SERDES
;
9996 /* It seems all chips can get confused if TX buffers
9997 * straddle the 4GB address boundary in some cases.
9999 tp
->dev
->hard_start_xmit
= tg3_start_xmit
;
10002 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
&&
10003 (tp
->tg3_flags
& TG3_FLAG_PCIX_MODE
) != 0)
10006 /* By default, disable wake-on-lan. User can change this
10007 * using ETHTOOL_SWOL.
10009 tp
->tg3_flags
&= ~TG3_FLAG_WOL_ENABLE
;
10014 #ifdef CONFIG_SPARC64
10015 static int __devinit
tg3_get_macaddr_sparc(struct tg3
*tp
)
10017 struct net_device
*dev
= tp
->dev
;
10018 struct pci_dev
*pdev
= tp
->pdev
;
10019 struct pcidev_cookie
*pcp
= pdev
->sysdata
;
10022 int node
= pcp
->prom_node
;
10024 if (prom_getproplen(node
, "local-mac-address") == 6) {
10025 prom_getproperty(node
, "local-mac-address",
10027 memcpy(dev
->perm_addr
, dev
->dev_addr
, 6);
10034 static int __devinit
tg3_get_default_macaddr_sparc(struct tg3
*tp
)
10036 struct net_device
*dev
= tp
->dev
;
10038 memcpy(dev
->dev_addr
, idprom
->id_ethaddr
, 6);
10039 memcpy(dev
->perm_addr
, idprom
->id_ethaddr
, 6);
10044 static int __devinit
tg3_get_device_address(struct tg3
*tp
)
10046 struct net_device
*dev
= tp
->dev
;
10047 u32 hi
, lo
, mac_offset
;
10049 #ifdef CONFIG_SPARC64
10050 if (!tg3_get_macaddr_sparc(tp
))
10055 if ((GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
&&
10056 !(tp
->tg3_flags
& TG3_FLG2_SUN_570X
)) ||
10057 (tp
->tg3_flags2
& TG3_FLG2_5780_CLASS
)) {
10058 if (tr32(TG3PCI_DUAL_MAC_CTRL
) & DUAL_MAC_CTRL_ID
)
10060 if (tg3_nvram_lock(tp
))
10061 tw32_f(NVRAM_CMD
, NVRAM_CMD_RESET
);
10063 tg3_nvram_unlock(tp
);
10066 /* First try to get it from MAC address mailbox. */
10067 tg3_read_mem(tp
, NIC_SRAM_MAC_ADDR_HIGH_MBOX
, &hi
);
10068 if ((hi
>> 16) == 0x484b) {
10069 dev
->dev_addr
[0] = (hi
>> 8) & 0xff;
10070 dev
->dev_addr
[1] = (hi
>> 0) & 0xff;
10072 tg3_read_mem(tp
, NIC_SRAM_MAC_ADDR_LOW_MBOX
, &lo
);
10073 dev
->dev_addr
[2] = (lo
>> 24) & 0xff;
10074 dev
->dev_addr
[3] = (lo
>> 16) & 0xff;
10075 dev
->dev_addr
[4] = (lo
>> 8) & 0xff;
10076 dev
->dev_addr
[5] = (lo
>> 0) & 0xff;
10078 /* Next, try NVRAM. */
10079 else if (!(tp
->tg3_flags
& TG3_FLG2_SUN_570X
) &&
10080 !tg3_nvram_read(tp
, mac_offset
+ 0, &hi
) &&
10081 !tg3_nvram_read(tp
, mac_offset
+ 4, &lo
)) {
10082 dev
->dev_addr
[0] = ((hi
>> 16) & 0xff);
10083 dev
->dev_addr
[1] = ((hi
>> 24) & 0xff);
10084 dev
->dev_addr
[2] = ((lo
>> 0) & 0xff);
10085 dev
->dev_addr
[3] = ((lo
>> 8) & 0xff);
10086 dev
->dev_addr
[4] = ((lo
>> 16) & 0xff);
10087 dev
->dev_addr
[5] = ((lo
>> 24) & 0xff);
10089 /* Finally just fetch it out of the MAC control regs. */
10091 hi
= tr32(MAC_ADDR_0_HIGH
);
10092 lo
= tr32(MAC_ADDR_0_LOW
);
10094 dev
->dev_addr
[5] = lo
& 0xff;
10095 dev
->dev_addr
[4] = (lo
>> 8) & 0xff;
10096 dev
->dev_addr
[3] = (lo
>> 16) & 0xff;
10097 dev
->dev_addr
[2] = (lo
>> 24) & 0xff;
10098 dev
->dev_addr
[1] = hi
& 0xff;
10099 dev
->dev_addr
[0] = (hi
>> 8) & 0xff;
10102 if (!is_valid_ether_addr(&dev
->dev_addr
[0])) {
10103 #ifdef CONFIG_SPARC64
10104 if (!tg3_get_default_macaddr_sparc(tp
))
10109 memcpy(dev
->perm_addr
, dev
->dev_addr
, dev
->addr_len
);
10113 #define BOUNDARY_SINGLE_CACHELINE 1
10114 #define BOUNDARY_MULTI_CACHELINE 2
10116 static u32 __devinit
tg3_calc_dma_bndry(struct tg3
*tp
, u32 val
)
10118 int cacheline_size
;
10122 pci_read_config_byte(tp
->pdev
, PCI_CACHE_LINE_SIZE
, &byte
);
10124 cacheline_size
= 1024;
10126 cacheline_size
= (int) byte
* 4;
10128 /* On 5703 and later chips, the boundary bits have no
10131 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5700
&&
10132 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5701
&&
10133 !(tp
->tg3_flags2
& TG3_FLG2_PCI_EXPRESS
))
10136 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
10137 goal
= BOUNDARY_MULTI_CACHELINE
;
10139 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
10140 goal
= BOUNDARY_SINGLE_CACHELINE
;
10149 /* PCI controllers on most RISC systems tend to disconnect
10150 * when a device tries to burst across a cache-line boundary.
10151 * Therefore, letting tg3 do so just wastes PCI bandwidth.
10153 * Unfortunately, for PCI-E there are only limited
10154 * write-side controls for this, and thus for reads
10155 * we will still get the disconnects. We'll also waste
10156 * these PCI cycles for both read and write for chips
10157 * other than 5700 and 5701 which do not implement the
10160 if ((tp
->tg3_flags
& TG3_FLAG_PCIX_MODE
) &&
10161 !(tp
->tg3_flags2
& TG3_FLG2_PCI_EXPRESS
)) {
10162 switch (cacheline_size
) {
10167 if (goal
== BOUNDARY_SINGLE_CACHELINE
) {
10168 val
|= (DMA_RWCTRL_READ_BNDRY_128_PCIX
|
10169 DMA_RWCTRL_WRITE_BNDRY_128_PCIX
);
10171 val
|= (DMA_RWCTRL_READ_BNDRY_384_PCIX
|
10172 DMA_RWCTRL_WRITE_BNDRY_384_PCIX
);
10177 val
|= (DMA_RWCTRL_READ_BNDRY_256_PCIX
|
10178 DMA_RWCTRL_WRITE_BNDRY_256_PCIX
);
10182 val
|= (DMA_RWCTRL_READ_BNDRY_384_PCIX
|
10183 DMA_RWCTRL_WRITE_BNDRY_384_PCIX
);
10186 } else if (tp
->tg3_flags2
& TG3_FLG2_PCI_EXPRESS
) {
10187 switch (cacheline_size
) {
10191 if (goal
== BOUNDARY_SINGLE_CACHELINE
) {
10192 val
&= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE
;
10193 val
|= DMA_RWCTRL_WRITE_BNDRY_64_PCIE
;
10199 val
&= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE
;
10200 val
|= DMA_RWCTRL_WRITE_BNDRY_128_PCIE
;
10204 switch (cacheline_size
) {
10206 if (goal
== BOUNDARY_SINGLE_CACHELINE
) {
10207 val
|= (DMA_RWCTRL_READ_BNDRY_16
|
10208 DMA_RWCTRL_WRITE_BNDRY_16
);
10213 if (goal
== BOUNDARY_SINGLE_CACHELINE
) {
10214 val
|= (DMA_RWCTRL_READ_BNDRY_32
|
10215 DMA_RWCTRL_WRITE_BNDRY_32
);
10220 if (goal
== BOUNDARY_SINGLE_CACHELINE
) {
10221 val
|= (DMA_RWCTRL_READ_BNDRY_64
|
10222 DMA_RWCTRL_WRITE_BNDRY_64
);
10227 if (goal
== BOUNDARY_SINGLE_CACHELINE
) {
10228 val
|= (DMA_RWCTRL_READ_BNDRY_128
|
10229 DMA_RWCTRL_WRITE_BNDRY_128
);
10234 val
|= (DMA_RWCTRL_READ_BNDRY_256
|
10235 DMA_RWCTRL_WRITE_BNDRY_256
);
10238 val
|= (DMA_RWCTRL_READ_BNDRY_512
|
10239 DMA_RWCTRL_WRITE_BNDRY_512
);
10243 val
|= (DMA_RWCTRL_READ_BNDRY_1024
|
10244 DMA_RWCTRL_WRITE_BNDRY_1024
);
10253 static int __devinit
tg3_do_test_dma(struct tg3
*tp
, u32
*buf
, dma_addr_t buf_dma
, int size
, int to_device
)
10255 struct tg3_internal_buffer_desc test_desc
;
10256 u32 sram_dma_descs
;
10259 sram_dma_descs
= NIC_SRAM_DMA_DESC_POOL_BASE
;
10261 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ
, 0);
10262 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ
, 0);
10263 tw32(RDMAC_STATUS
, 0);
10264 tw32(WDMAC_STATUS
, 0);
10266 tw32(BUFMGR_MODE
, 0);
10267 tw32(FTQ_RESET
, 0);
10269 test_desc
.addr_hi
= ((u64
) buf_dma
) >> 32;
10270 test_desc
.addr_lo
= buf_dma
& 0xffffffff;
10271 test_desc
.nic_mbuf
= 0x00002100;
10272 test_desc
.len
= size
;
10275 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
10276 * the *second* time the tg3 driver was getting loaded after an
10279 * Broadcom tells me:
10280 * ...the DMA engine is connected to the GRC block and a DMA
10281 * reset may affect the GRC block in some unpredictable way...
10282 * The behavior of resets to individual blocks has not been tested.
10284 * Broadcom noted the GRC reset will also reset all sub-components.
10287 test_desc
.cqid_sqid
= (13 << 8) | 2;
10289 tw32_f(RDMAC_MODE
, RDMAC_MODE_ENABLE
);
10292 test_desc
.cqid_sqid
= (16 << 8) | 7;
10294 tw32_f(WDMAC_MODE
, WDMAC_MODE_ENABLE
);
10297 test_desc
.flags
= 0x00000005;
10299 for (i
= 0; i
< (sizeof(test_desc
) / sizeof(u32
)); i
++) {
10302 val
= *(((u32
*)&test_desc
) + i
);
10303 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
,
10304 sram_dma_descs
+ (i
* sizeof(u32
)));
10305 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_DATA
, val
);
10307 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
, 0);
10310 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ
, sram_dma_descs
);
10312 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ
, sram_dma_descs
);
10316 for (i
= 0; i
< 40; i
++) {
10320 val
= tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ
);
10322 val
= tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ
);
10323 if ((val
& 0xffff) == sram_dma_descs
) {
10334 #define TEST_BUFFER_SIZE 0x2000
10336 static int __devinit
tg3_test_dma(struct tg3
*tp
)
10338 dma_addr_t buf_dma
;
10339 u32
*buf
, saved_dma_rwctrl
;
10342 buf
= pci_alloc_consistent(tp
->pdev
, TEST_BUFFER_SIZE
, &buf_dma
);
10348 tp
->dma_rwctrl
= ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT
) |
10349 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT
));
10351 tp
->dma_rwctrl
= tg3_calc_dma_bndry(tp
, tp
->dma_rwctrl
);
10353 if (tp
->tg3_flags2
& TG3_FLG2_PCI_EXPRESS
) {
10354 /* DMA read watermark not used on PCIE */
10355 tp
->dma_rwctrl
|= 0x00180000;
10356 } else if (!(tp
->tg3_flags
& TG3_FLAG_PCIX_MODE
)) {
10357 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
||
10358 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5750
)
10359 tp
->dma_rwctrl
|= 0x003f0000;
10361 tp
->dma_rwctrl
|= 0x003f000f;
10363 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5703
||
10364 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
) {
10365 u32 ccval
= (tr32(TG3PCI_CLOCK_CTRL
) & 0x1f);
10367 /* If the 5704 is behind the EPB bridge, we can
10368 * do the less restrictive ONE_DMA workaround for
10369 * better performance.
10371 if ((tp
->tg3_flags
& TG3_FLAG_40BIT_DMA_BUG
) &&
10372 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
)
10373 tp
->dma_rwctrl
|= 0x8000;
10374 else if (ccval
== 0x6 || ccval
== 0x7)
10375 tp
->dma_rwctrl
|= DMA_RWCTRL_ONE_DMA
;
10377 /* Set bit 23 to enable PCIX hw bug fix */
10378 tp
->dma_rwctrl
|= 0x009f0000;
10379 } else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5780
) {
10380 /* 5780 always in PCIX mode */
10381 tp
->dma_rwctrl
|= 0x00144000;
10382 } else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5714
) {
10383 /* 5714 always in PCIX mode */
10384 tp
->dma_rwctrl
|= 0x00148000;
10386 tp
->dma_rwctrl
|= 0x001b000f;
10390 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5703
||
10391 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
)
10392 tp
->dma_rwctrl
&= 0xfffffff0;
10394 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
10395 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
) {
10396 /* Remove this if it causes problems for some boards. */
10397 tp
->dma_rwctrl
|= DMA_RWCTRL_USE_MEM_READ_MULT
;
10399 /* On 5700/5701 chips, we need to set this bit.
10400 * Otherwise the chip will issue cacheline transactions
10401 * to streamable DMA memory with not all the byte
10402 * enables turned on. This is an error on several
10403 * RISC PCI controllers, in particular sparc64.
10405 * On 5703/5704 chips, this bit has been reassigned
10406 * a different meaning. In particular, it is used
10407 * on those chips to enable a PCI-X workaround.
10409 tp
->dma_rwctrl
|= DMA_RWCTRL_ASSERT_ALL_BE
;
10412 tw32(TG3PCI_DMA_RW_CTRL
, tp
->dma_rwctrl
);
10415 /* Unneeded, already done by tg3_get_invariants. */
10416 tg3_switch_clocks(tp
);
10420 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5700
&&
10421 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5701
)
10424 /* It is best to perform DMA test with maximum write burst size
10425 * to expose the 5700/5701 write DMA bug.
10427 saved_dma_rwctrl
= tp
->dma_rwctrl
;
10428 tp
->dma_rwctrl
&= ~DMA_RWCTRL_WRITE_BNDRY_MASK
;
10429 tw32(TG3PCI_DMA_RW_CTRL
, tp
->dma_rwctrl
);
10434 for (i
= 0; i
< TEST_BUFFER_SIZE
/ sizeof(u32
); i
++)
10437 /* Send the buffer to the chip. */
10438 ret
= tg3_do_test_dma(tp
, buf
, buf_dma
, TEST_BUFFER_SIZE
, 1);
10440 printk(KERN_ERR
"tg3_test_dma() Write the buffer failed %d\n", ret
);
10445 /* validate data reached card RAM correctly. */
10446 for (i
= 0; i
< TEST_BUFFER_SIZE
/ sizeof(u32
); i
++) {
10448 tg3_read_mem(tp
, 0x2100 + (i
*4), &val
);
10449 if (le32_to_cpu(val
) != p
[i
]) {
10450 printk(KERN_ERR
" tg3_test_dma() Card buffer corrupted on write! (%d != %d)\n", val
, i
);
10451 /* ret = -ENODEV here? */
10456 /* Now read it back. */
10457 ret
= tg3_do_test_dma(tp
, buf
, buf_dma
, TEST_BUFFER_SIZE
, 0);
10459 printk(KERN_ERR
"tg3_test_dma() Read the buffer failed %d\n", ret
);
10465 for (i
= 0; i
< TEST_BUFFER_SIZE
/ sizeof(u32
); i
++) {
10469 if ((tp
->dma_rwctrl
& DMA_RWCTRL_WRITE_BNDRY_MASK
) !=
10470 DMA_RWCTRL_WRITE_BNDRY_16
) {
10471 tp
->dma_rwctrl
&= ~DMA_RWCTRL_WRITE_BNDRY_MASK
;
10472 tp
->dma_rwctrl
|= DMA_RWCTRL_WRITE_BNDRY_16
;
10473 tw32(TG3PCI_DMA_RW_CTRL
, tp
->dma_rwctrl
);
10476 printk(KERN_ERR
"tg3_test_dma() buffer corrupted on read back! (%d != %d)\n", p
[i
], i
);
10482 if (i
== (TEST_BUFFER_SIZE
/ sizeof(u32
))) {
10488 if ((tp
->dma_rwctrl
& DMA_RWCTRL_WRITE_BNDRY_MASK
) !=
10489 DMA_RWCTRL_WRITE_BNDRY_16
) {
10490 static struct pci_device_id dma_wait_state_chipsets
[] = {
10491 { PCI_DEVICE(PCI_VENDOR_ID_APPLE
,
10492 PCI_DEVICE_ID_APPLE_UNI_N_PCI15
) },
10496 /* DMA test passed without adjusting DMA boundary,
10497 * now look for chipsets that are known to expose the
10498 * DMA bug without failing the test.
10500 if (pci_dev_present(dma_wait_state_chipsets
)) {
10501 tp
->dma_rwctrl
&= ~DMA_RWCTRL_WRITE_BNDRY_MASK
;
10502 tp
->dma_rwctrl
|= DMA_RWCTRL_WRITE_BNDRY_16
;
10505 /* Safe to use the calculated DMA boundary. */
10506 tp
->dma_rwctrl
= saved_dma_rwctrl
;
10508 tw32(TG3PCI_DMA_RW_CTRL
, tp
->dma_rwctrl
);
10512 pci_free_consistent(tp
->pdev
, TEST_BUFFER_SIZE
, buf
, buf_dma
);
10517 static void __devinit
tg3_init_link_config(struct tg3
*tp
)
10519 tp
->link_config
.advertising
=
10520 (ADVERTISED_10baseT_Half
| ADVERTISED_10baseT_Full
|
10521 ADVERTISED_100baseT_Half
| ADVERTISED_100baseT_Full
|
10522 ADVERTISED_1000baseT_Half
| ADVERTISED_1000baseT_Full
|
10523 ADVERTISED_Autoneg
| ADVERTISED_MII
);
10524 tp
->link_config
.speed
= SPEED_INVALID
;
10525 tp
->link_config
.duplex
= DUPLEX_INVALID
;
10526 tp
->link_config
.autoneg
= AUTONEG_ENABLE
;
10527 netif_carrier_off(tp
->dev
);
10528 tp
->link_config
.active_speed
= SPEED_INVALID
;
10529 tp
->link_config
.active_duplex
= DUPLEX_INVALID
;
10530 tp
->link_config
.phy_is_low_power
= 0;
10531 tp
->link_config
.orig_speed
= SPEED_INVALID
;
10532 tp
->link_config
.orig_duplex
= DUPLEX_INVALID
;
10533 tp
->link_config
.orig_autoneg
= AUTONEG_INVALID
;
10536 static void __devinit
tg3_init_bufmgr_config(struct tg3
*tp
)
10538 if (tp
->tg3_flags2
& TG3_FLG2_5705_PLUS
) {
10539 tp
->bufmgr_config
.mbuf_read_dma_low_water
=
10540 DEFAULT_MB_RDMA_LOW_WATER_5705
;
10541 tp
->bufmgr_config
.mbuf_mac_rx_low_water
=
10542 DEFAULT_MB_MACRX_LOW_WATER_5705
;
10543 tp
->bufmgr_config
.mbuf_high_water
=
10544 DEFAULT_MB_HIGH_WATER_5705
;
10546 tp
->bufmgr_config
.mbuf_read_dma_low_water_jumbo
=
10547 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780
;
10548 tp
->bufmgr_config
.mbuf_mac_rx_low_water_jumbo
=
10549 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780
;
10550 tp
->bufmgr_config
.mbuf_high_water_jumbo
=
10551 DEFAULT_MB_HIGH_WATER_JUMBO_5780
;
10553 tp
->bufmgr_config
.mbuf_read_dma_low_water
=
10554 DEFAULT_MB_RDMA_LOW_WATER
;
10555 tp
->bufmgr_config
.mbuf_mac_rx_low_water
=
10556 DEFAULT_MB_MACRX_LOW_WATER
;
10557 tp
->bufmgr_config
.mbuf_high_water
=
10558 DEFAULT_MB_HIGH_WATER
;
10560 tp
->bufmgr_config
.mbuf_read_dma_low_water_jumbo
=
10561 DEFAULT_MB_RDMA_LOW_WATER_JUMBO
;
10562 tp
->bufmgr_config
.mbuf_mac_rx_low_water_jumbo
=
10563 DEFAULT_MB_MACRX_LOW_WATER_JUMBO
;
10564 tp
->bufmgr_config
.mbuf_high_water_jumbo
=
10565 DEFAULT_MB_HIGH_WATER_JUMBO
;
10568 tp
->bufmgr_config
.dma_low_water
= DEFAULT_DMA_LOW_WATER
;
10569 tp
->bufmgr_config
.dma_high_water
= DEFAULT_DMA_HIGH_WATER
;
10572 static char * __devinit
tg3_phy_string(struct tg3
*tp
)
10574 switch (tp
->phy_id
& PHY_ID_MASK
) {
10575 case PHY_ID_BCM5400
: return "5400";
10576 case PHY_ID_BCM5401
: return "5401";
10577 case PHY_ID_BCM5411
: return "5411";
10578 case PHY_ID_BCM5701
: return "5701";
10579 case PHY_ID_BCM5703
: return "5703";
10580 case PHY_ID_BCM5704
: return "5704";
10581 case PHY_ID_BCM5705
: return "5705";
10582 case PHY_ID_BCM5750
: return "5750";
10583 case PHY_ID_BCM5752
: return "5752";
10584 case PHY_ID_BCM5714
: return "5714";
10585 case PHY_ID_BCM5780
: return "5780";
10586 case PHY_ID_BCM8002
: return "8002/serdes";
10587 case 0: return "serdes";
10588 default: return "unknown";
10592 static char * __devinit
tg3_bus_string(struct tg3
*tp
, char *str
)
10594 if (tp
->tg3_flags2
& TG3_FLG2_PCI_EXPRESS
) {
10595 strcpy(str
, "PCI Express");
10597 } else if (tp
->tg3_flags
& TG3_FLAG_PCIX_MODE
) {
10598 u32 clock_ctrl
= tr32(TG3PCI_CLOCK_CTRL
) & 0x1f;
10600 strcpy(str
, "PCIX:");
10602 if ((clock_ctrl
== 7) ||
10603 ((tr32(GRC_MISC_CFG
) & GRC_MISC_CFG_BOARD_ID_MASK
) ==
10604 GRC_MISC_CFG_BOARD_ID_5704CIOBE
))
10605 strcat(str
, "133MHz");
10606 else if (clock_ctrl
== 0)
10607 strcat(str
, "33MHz");
10608 else if (clock_ctrl
== 2)
10609 strcat(str
, "50MHz");
10610 else if (clock_ctrl
== 4)
10611 strcat(str
, "66MHz");
10612 else if (clock_ctrl
== 6)
10613 strcat(str
, "100MHz");
10615 strcpy(str
, "PCI:");
10616 if (tp
->tg3_flags
& TG3_FLAG_PCI_HIGH_SPEED
)
10617 strcat(str
, "66MHz");
10619 strcat(str
, "33MHz");
10621 if (tp
->tg3_flags
& TG3_FLAG_PCI_32BIT
)
10622 strcat(str
, ":32-bit");
10624 strcat(str
, ":64-bit");
10628 static struct pci_dev
* __devinit
tg3_find_peer(struct tg3
*tp
)
10630 struct pci_dev
*peer
;
10631 unsigned int func
, devnr
= tp
->pdev
->devfn
& ~7;
10633 for (func
= 0; func
< 8; func
++) {
10634 peer
= pci_get_slot(tp
->pdev
->bus
, devnr
| func
);
10635 if (peer
&& peer
!= tp
->pdev
)
10639 /* 5704 can be configured in single-port mode, set peer to
10640 * tp->pdev in that case.
10648 * We don't need to keep the refcount elevated; there's no way
10649 * to remove one half of this device without removing the other
10656 static void __devinit
tg3_init_coal(struct tg3
*tp
)
10658 struct ethtool_coalesce
*ec
= &tp
->coal
;
10660 memset(ec
, 0, sizeof(*ec
));
10661 ec
->cmd
= ETHTOOL_GCOALESCE
;
10662 ec
->rx_coalesce_usecs
= LOW_RXCOL_TICKS
;
10663 ec
->tx_coalesce_usecs
= LOW_TXCOL_TICKS
;
10664 ec
->rx_max_coalesced_frames
= LOW_RXMAX_FRAMES
;
10665 ec
->tx_max_coalesced_frames
= LOW_TXMAX_FRAMES
;
10666 ec
->rx_coalesce_usecs_irq
= DEFAULT_RXCOAL_TICK_INT
;
10667 ec
->tx_coalesce_usecs_irq
= DEFAULT_TXCOAL_TICK_INT
;
10668 ec
->rx_max_coalesced_frames_irq
= DEFAULT_RXCOAL_MAXF_INT
;
10669 ec
->tx_max_coalesced_frames_irq
= DEFAULT_TXCOAL_MAXF_INT
;
10670 ec
->stats_block_coalesce_usecs
= DEFAULT_STAT_COAL_TICKS
;
10672 if (tp
->coalesce_mode
& (HOSTCC_MODE_CLRTICK_RXBD
|
10673 HOSTCC_MODE_CLRTICK_TXBD
)) {
10674 ec
->rx_coalesce_usecs
= LOW_RXCOL_TICKS_CLRTCKS
;
10675 ec
->rx_coalesce_usecs_irq
= DEFAULT_RXCOAL_TICK_INT_CLRTCKS
;
10676 ec
->tx_coalesce_usecs
= LOW_TXCOL_TICKS_CLRTCKS
;
10677 ec
->tx_coalesce_usecs_irq
= DEFAULT_TXCOAL_TICK_INT_CLRTCKS
;
10680 if (tp
->tg3_flags2
& TG3_FLG2_5705_PLUS
) {
10681 ec
->rx_coalesce_usecs_irq
= 0;
10682 ec
->tx_coalesce_usecs_irq
= 0;
10683 ec
->stats_block_coalesce_usecs
= 0;
10687 static int __devinit
tg3_init_one(struct pci_dev
*pdev
,
10688 const struct pci_device_id
*ent
)
10690 static int tg3_version_printed
= 0;
10691 unsigned long tg3reg_base
, tg3reg_len
;
10692 struct net_device
*dev
;
10694 int i
, err
, pm_cap
;
10696 u64 dma_mask
, persist_dma_mask
;
10698 if (tg3_version_printed
++ == 0)
10699 printk(KERN_INFO
"%s", version
);
10701 err
= pci_enable_device(pdev
);
10703 printk(KERN_ERR PFX
"Cannot enable PCI device, "
10708 if (!(pci_resource_flags(pdev
, 0) & IORESOURCE_MEM
)) {
10709 printk(KERN_ERR PFX
"Cannot find proper PCI device "
10710 "base address, aborting.\n");
10712 goto err_out_disable_pdev
;
10715 err
= pci_request_regions(pdev
, DRV_MODULE_NAME
);
10717 printk(KERN_ERR PFX
"Cannot obtain PCI resources, "
10719 goto err_out_disable_pdev
;
10722 pci_set_master(pdev
);
10724 /* Find power-management capability. */
10725 pm_cap
= pci_find_capability(pdev
, PCI_CAP_ID_PM
);
10727 printk(KERN_ERR PFX
"Cannot find PowerManagement capability, "
10730 goto err_out_free_res
;
10733 tg3reg_base
= pci_resource_start(pdev
, 0);
10734 tg3reg_len
= pci_resource_len(pdev
, 0);
10736 dev
= alloc_etherdev(sizeof(*tp
));
10738 printk(KERN_ERR PFX
"Etherdev alloc failed, aborting.\n");
10740 goto err_out_free_res
;
10743 SET_MODULE_OWNER(dev
);
10744 SET_NETDEV_DEV(dev
, &pdev
->dev
);
10746 dev
->features
|= NETIF_F_LLTX
;
10747 #if TG3_VLAN_TAG_USED
10748 dev
->features
|= NETIF_F_HW_VLAN_TX
| NETIF_F_HW_VLAN_RX
;
10749 dev
->vlan_rx_register
= tg3_vlan_rx_register
;
10750 dev
->vlan_rx_kill_vid
= tg3_vlan_rx_kill_vid
;
10753 tp
= netdev_priv(dev
);
10756 tp
->pm_cap
= pm_cap
;
10757 tp
->mac_mode
= TG3_DEF_MAC_MODE
;
10758 tp
->rx_mode
= TG3_DEF_RX_MODE
;
10759 tp
->tx_mode
= TG3_DEF_TX_MODE
;
10760 tp
->mi_mode
= MAC_MI_MODE_BASE
;
10762 tp
->msg_enable
= tg3_debug
;
10764 tp
->msg_enable
= TG3_DEF_MSG_ENABLE
;
10766 /* The word/byte swap controls here control register access byte
10767 * swapping. DMA data byte swapping is controlled in the GRC_MODE
10770 tp
->misc_host_ctrl
=
10771 MISC_HOST_CTRL_MASK_PCI_INT
|
10772 MISC_HOST_CTRL_WORD_SWAP
|
10773 MISC_HOST_CTRL_INDIR_ACCESS
|
10774 MISC_HOST_CTRL_PCISTATE_RW
;
10776 /* The NONFRM (non-frame) byte/word swap controls take effect
10777 * on descriptor entries, anything which isn't packet data.
10779 * The StrongARM chips on the board (one for tx, one for rx)
10780 * are running in big-endian mode.
10782 tp
->grc_mode
= (GRC_MODE_WSWAP_DATA
| GRC_MODE_BSWAP_DATA
|
10783 GRC_MODE_WSWAP_NONFRM_DATA
);
10784 #ifdef __BIG_ENDIAN
10785 tp
->grc_mode
|= GRC_MODE_BSWAP_NONFRM_DATA
;
10787 spin_lock_init(&tp
->lock
);
10788 spin_lock_init(&tp
->tx_lock
);
10789 spin_lock_init(&tp
->indirect_lock
);
10790 INIT_WORK(&tp
->reset_task
, tg3_reset_task
, tp
);
10792 tp
->regs
= ioremap_nocache(tg3reg_base
, tg3reg_len
);
10793 if (tp
->regs
== 0UL) {
10794 printk(KERN_ERR PFX
"Cannot map device registers, "
10797 goto err_out_free_dev
;
10800 tg3_init_link_config(tp
);
10802 tp
->rx_pending
= TG3_DEF_RX_RING_PENDING
;
10803 tp
->rx_jumbo_pending
= TG3_DEF_RX_JUMBO_RING_PENDING
;
10804 tp
->tx_pending
= TG3_DEF_TX_RING_PENDING
;
10806 dev
->open
= tg3_open
;
10807 dev
->stop
= tg3_close
;
10808 dev
->get_stats
= tg3_get_stats
;
10809 dev
->set_multicast_list
= tg3_set_rx_mode
;
10810 dev
->set_mac_address
= tg3_set_mac_addr
;
10811 dev
->do_ioctl
= tg3_ioctl
;
10812 dev
->tx_timeout
= tg3_tx_timeout
;
10813 dev
->poll
= tg3_poll
;
10814 dev
->ethtool_ops
= &tg3_ethtool_ops
;
10816 dev
->watchdog_timeo
= TG3_TX_TIMEOUT
;
10817 dev
->change_mtu
= tg3_change_mtu
;
10818 dev
->irq
= pdev
->irq
;
10819 #ifdef CONFIG_NET_POLL_CONTROLLER
10820 dev
->poll_controller
= tg3_poll_controller
;
10823 err
= tg3_get_invariants(tp
);
10825 printk(KERN_ERR PFX
"Problem fetching invariants of chip, "
10827 goto err_out_iounmap
;
10830 /* The EPB bridge inside 5714, 5715, and 5780 and any
10831 * device behind the EPB cannot support DMA addresses > 40-bit.
10832 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
10833 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
10834 * do DMA address check in tg3_start_xmit().
10836 if (tp
->tg3_flags2
& TG3_FLG2_IS_5788
)
10837 persist_dma_mask
= dma_mask
= DMA_32BIT_MASK
;
10838 else if (tp
->tg3_flags
& TG3_FLAG_40BIT_DMA_BUG
) {
10839 persist_dma_mask
= dma_mask
= DMA_40BIT_MASK
;
10840 #ifdef CONFIG_HIGHMEM
10841 dma_mask
= DMA_64BIT_MASK
;
10844 persist_dma_mask
= dma_mask
= DMA_64BIT_MASK
;
10846 /* Configure DMA attributes. */
10847 if (dma_mask
> DMA_32BIT_MASK
) {
10848 err
= pci_set_dma_mask(pdev
, dma_mask
);
10850 dev
->features
|= NETIF_F_HIGHDMA
;
10851 err
= pci_set_consistent_dma_mask(pdev
,
10854 printk(KERN_ERR PFX
"Unable to obtain 64 bit "
10855 "DMA for consistent allocations\n");
10856 goto err_out_iounmap
;
10860 if (err
|| dma_mask
== DMA_32BIT_MASK
) {
10861 err
= pci_set_dma_mask(pdev
, DMA_32BIT_MASK
);
10863 printk(KERN_ERR PFX
"No usable DMA configuration, "
10865 goto err_out_iounmap
;
10869 tg3_init_bufmgr_config(tp
);
10871 #if TG3_TSO_SUPPORT != 0
10872 if (tp
->tg3_flags2
& TG3_FLG2_HW_TSO
) {
10873 tp
->tg3_flags2
|= TG3_FLG2_TSO_CAPABLE
;
10875 else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
10876 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
||
10877 tp
->pci_chip_rev_id
== CHIPREV_ID_5705_A0
||
10878 (tp
->tg3_flags
& TG3_FLAG_ENABLE_ASF
) != 0) {
10879 tp
->tg3_flags2
&= ~TG3_FLG2_TSO_CAPABLE
;
10881 tp
->tg3_flags2
|= TG3_FLG2_TSO_CAPABLE
;
10884 /* TSO is off by default, user can enable using ethtool. */
10886 if (tp
->tg3_flags2
& TG3_FLG2_TSO_CAPABLE
)
10887 dev
->features
|= NETIF_F_TSO
;
10892 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5705_A1
&&
10893 !(tp
->tg3_flags2
& TG3_FLG2_TSO_CAPABLE
) &&
10894 !(tr32(TG3PCI_PCISTATE
) & PCISTATE_BUS_SPEED_HIGH
)) {
10895 tp
->tg3_flags2
|= TG3_FLG2_MAX_RXPEND_64
;
10896 tp
->rx_pending
= 63;
10899 if ((GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
) ||
10900 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5714
))
10901 tp
->pdev_peer
= tg3_find_peer(tp
);
10903 err
= tg3_get_device_address(tp
);
10905 printk(KERN_ERR PFX
"Could not obtain valid ethernet address, "
10907 goto err_out_iounmap
;
10911 * Reset chip in case UNDI or EFI driver did not shutdown
10912 * DMA self test will enable WDMAC and we'll see (spurious)
10913 * pending DMA on the PCI bus at that point.
10915 if ((tr32(HOSTCC_MODE
) & HOSTCC_MODE_ENABLE
) ||
10916 (tr32(WDMAC_MODE
) & WDMAC_MODE_ENABLE
)) {
10917 pci_save_state(tp
->pdev
);
10918 tw32(MEMARB_MODE
, MEMARB_MODE_ENABLE
);
10919 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
10922 err
= tg3_test_dma(tp
);
10924 printk(KERN_ERR PFX
"DMA engine test failed, aborting.\n");
10925 goto err_out_iounmap
;
10928 /* Tigon3 can do ipv4 only... and some chips have buggy
10931 if ((tp
->tg3_flags
& TG3_FLAG_BROKEN_CHECKSUMS
) == 0) {
10932 dev
->features
|= NETIF_F_SG
| NETIF_F_IP_CSUM
;
10933 tp
->tg3_flags
|= TG3_FLAG_RX_CHECKSUMS
;
10935 tp
->tg3_flags
&= ~TG3_FLAG_RX_CHECKSUMS
;
10937 /* flow control autonegotiation is default behavior */
10938 tp
->tg3_flags
|= TG3_FLAG_PAUSE_AUTONEG
;
10942 /* Now that we have fully setup the chip, save away a snapshot
10943 * of the PCI config space. We need to restore this after
10944 * GRC_MISC_CFG core clock resets and some resume events.
10946 pci_save_state(tp
->pdev
);
10948 err
= register_netdev(dev
);
10950 printk(KERN_ERR PFX
"Cannot register net device, "
10952 goto err_out_iounmap
;
10955 pci_set_drvdata(pdev
, dev
);
10957 printk(KERN_INFO
"%s: Tigon3 [partno(%s) rev %04x PHY(%s)] (%s) %sBaseT Ethernet ",
10959 tp
->board_part_number
,
10960 tp
->pci_chip_rev_id
,
10961 tg3_phy_string(tp
),
10962 tg3_bus_string(tp
, str
),
10963 (tp
->tg3_flags
& TG3_FLAG_10_100_ONLY
) ? "10/100" : "10/100/1000");
10965 for (i
= 0; i
< 6; i
++)
10966 printk("%2.2x%c", dev
->dev_addr
[i
],
10967 i
== 5 ? '\n' : ':');
10969 printk(KERN_INFO
"%s: RXcsums[%d] LinkChgREG[%d] "
10970 "MIirq[%d] ASF[%d] Split[%d] WireSpeed[%d] "
10973 (tp
->tg3_flags
& TG3_FLAG_RX_CHECKSUMS
) != 0,
10974 (tp
->tg3_flags
& TG3_FLAG_USE_LINKCHG_REG
) != 0,
10975 (tp
->tg3_flags
& TG3_FLAG_USE_MI_INTERRUPT
) != 0,
10976 (tp
->tg3_flags
& TG3_FLAG_ENABLE_ASF
) != 0,
10977 (tp
->tg3_flags
& TG3_FLAG_SPLIT_MODE
) != 0,
10978 (tp
->tg3_flags2
& TG3_FLG2_NO_ETH_WIRE_SPEED
) == 0,
10979 (tp
->tg3_flags2
& TG3_FLG2_TSO_CAPABLE
) != 0);
10980 printk(KERN_INFO
"%s: dma_rwctrl[%08x] dma_mask[%d-bit]\n",
10981 dev
->name
, tp
->dma_rwctrl
,
10982 (pdev
->dma_mask
== DMA_32BIT_MASK
) ? 32 :
10983 (((u64
) pdev
->dma_mask
== DMA_40BIT_MASK
) ? 40 : 64));
10997 pci_release_regions(pdev
);
10999 err_out_disable_pdev
:
11000 pci_disable_device(pdev
);
11001 pci_set_drvdata(pdev
, NULL
);
11005 static void __devexit
tg3_remove_one(struct pci_dev
*pdev
)
11007 struct net_device
*dev
= pci_get_drvdata(pdev
);
11010 struct tg3
*tp
= netdev_priv(dev
);
11012 flush_scheduled_work();
11013 unregister_netdev(dev
);
11019 pci_release_regions(pdev
);
11020 pci_disable_device(pdev
);
11021 pci_set_drvdata(pdev
, NULL
);
11025 static int tg3_suspend(struct pci_dev
*pdev
, pm_message_t state
)
11027 struct net_device
*dev
= pci_get_drvdata(pdev
);
11028 struct tg3
*tp
= netdev_priv(dev
);
11031 if (!netif_running(dev
))
11034 flush_scheduled_work();
11035 tg3_netif_stop(tp
);
11037 del_timer_sync(&tp
->timer
);
11039 tg3_full_lock(tp
, 1);
11040 tg3_disable_ints(tp
);
11041 tg3_full_unlock(tp
);
11043 netif_device_detach(dev
);
11045 tg3_full_lock(tp
, 0);
11046 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
11047 tp
->tg3_flags
&= ~TG3_FLAG_INIT_COMPLETE
;
11048 tg3_full_unlock(tp
);
11050 err
= tg3_set_power_state(tp
, pci_choose_state(pdev
, state
));
11052 tg3_full_lock(tp
, 0);
11054 tp
->tg3_flags
|= TG3_FLAG_INIT_COMPLETE
;
11057 tp
->timer
.expires
= jiffies
+ tp
->timer_offset
;
11058 add_timer(&tp
->timer
);
11060 netif_device_attach(dev
);
11061 tg3_netif_start(tp
);
11063 tg3_full_unlock(tp
);
11069 static int tg3_resume(struct pci_dev
*pdev
)
11071 struct net_device
*dev
= pci_get_drvdata(pdev
);
11072 struct tg3
*tp
= netdev_priv(dev
);
11075 if (!netif_running(dev
))
11078 pci_restore_state(tp
->pdev
);
11080 err
= tg3_set_power_state(tp
, 0);
11084 netif_device_attach(dev
);
11086 tg3_full_lock(tp
, 0);
11088 tp
->tg3_flags
|= TG3_FLAG_INIT_COMPLETE
;
11091 tp
->timer
.expires
= jiffies
+ tp
->timer_offset
;
11092 add_timer(&tp
->timer
);
11094 tg3_netif_start(tp
);
11096 tg3_full_unlock(tp
);
11101 static struct pci_driver tg3_driver
= {
11102 .name
= DRV_MODULE_NAME
,
11103 .id_table
= tg3_pci_tbl
,
11104 .probe
= tg3_init_one
,
11105 .remove
= __devexit_p(tg3_remove_one
),
11106 .suspend
= tg3_suspend
,
11107 .resume
= tg3_resume
11110 static int __init
tg3_init(void)
11112 return pci_module_init(&tg3_driver
);
11115 static void __exit
tg3_cleanup(void)
11117 pci_unregister_driver(&tg3_driver
);
11120 module_init(tg3_init
);
11121 module_exit(tg3_cleanup
);