2 * tg3.c: Broadcom Tigon3 ethernet driver.
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005 Broadcom Corporation.
10 * Derived from proprietary unpublished source code,
11 * Copyright (C) 2000-2003 Broadcom Corporation.
13 * Permission is hereby granted for the distribution of this firmware
14 * data in hexadecimal or equivalent format, provided this copyright
15 * notice is accompanying it.
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/kernel.h>
22 #include <linux/types.h>
23 #include <linux/compiler.h>
24 #include <linux/slab.h>
25 #include <linux/delay.h>
27 #include <linux/init.h>
28 #include <linux/ioport.h>
29 #include <linux/pci.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/ethtool.h>
34 #include <linux/mii.h>
35 #include <linux/if_vlan.h>
37 #include <linux/tcp.h>
38 #include <linux/workqueue.h>
39 #include <linux/prefetch.h>
40 #include <linux/dma-mapping.h>
42 #include <net/checksum.h>
44 #include <asm/system.h>
46 #include <asm/byteorder.h>
47 #include <asm/uaccess.h>
50 #include <asm/idprom.h>
51 #include <asm/oplib.h>
55 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
56 #define TG3_VLAN_TAG_USED 1
58 #define TG3_VLAN_TAG_USED 0
62 #define TG3_TSO_SUPPORT 1
64 #define TG3_TSO_SUPPORT 0
69 #define DRV_MODULE_NAME "tg3"
70 #define PFX DRV_MODULE_NAME ": "
71 #define DRV_MODULE_VERSION "3.65"
72 #define DRV_MODULE_RELDATE "August 07, 2006"
74 #define TG3_DEF_MAC_MODE 0
75 #define TG3_DEF_RX_MODE 0
76 #define TG3_DEF_TX_MODE 0
77 #define TG3_DEF_MSG_ENABLE \
87 /* length of time before we decide the hardware is borked,
88 * and dev->tx_timeout() should be called to fix the problem
90 #define TG3_TX_TIMEOUT (5 * HZ)
92 /* hardware minimum and maximum for a single frame's data payload */
93 #define TG3_MIN_MTU 60
94 #define TG3_MAX_MTU(tp) \
95 ((tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) ? 9000 : 1500)
97 /* These numbers seem to be hard coded in the NIC firmware somehow.
98 * You can't change the ring sizes, but you can change where you place
99 * them in the NIC onboard memory.
101 #define TG3_RX_RING_SIZE 512
102 #define TG3_DEF_RX_RING_PENDING 200
103 #define TG3_RX_JUMBO_RING_SIZE 256
104 #define TG3_DEF_RX_JUMBO_RING_PENDING 100
106 /* Do not place this n-ring entries value into the tp struct itself,
107 * we really want to expose these constants to GCC so that modulo et
108 * al. operations are done with shifts and masks instead of with
109 * hw multiply/modulo instructions. Another solution would be to
110 * replace things like '% foo' with '& (foo - 1)'.
112 #define TG3_RX_RCB_RING_SIZE(tp) \
113 ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ? 512 : 1024)
115 #define TG3_TX_RING_SIZE 512
116 #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
118 #define TG3_RX_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
120 #define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
121 TG3_RX_JUMBO_RING_SIZE)
122 #define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
123 TG3_RX_RCB_RING_SIZE(tp))
124 #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
126 #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
128 #define RX_PKT_BUF_SZ (1536 + tp->rx_offset + 64)
129 #define RX_JUMBO_PKT_BUF_SZ (9046 + tp->rx_offset + 64)
131 /* minimum number of free TX descriptors required to wake up TX process */
132 #define TG3_TX_WAKEUP_THRESH (TG3_TX_RING_SIZE / 4)
134 /* number of ETHTOOL_GSTATS u64's */
135 #define TG3_NUM_STATS (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
137 #define TG3_NUM_TEST 6
139 static char version
[] __devinitdata
=
140 DRV_MODULE_NAME
".c:v" DRV_MODULE_VERSION
" (" DRV_MODULE_RELDATE
")\n";
142 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
143 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
144 MODULE_LICENSE("GPL");
145 MODULE_VERSION(DRV_MODULE_VERSION
);
147 static int tg3_debug
= -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
148 module_param(tg3_debug
, int, 0);
149 MODULE_PARM_DESC(tg3_debug
, "Tigon3 bitmapped debugging message enable value");
151 static struct pci_device_id tg3_pci_tbl
[] = {
152 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5700
)},
153 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5701
)},
154 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5702
)},
155 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5703
)},
156 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5704
)},
157 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5702FE
)},
158 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5705
)},
159 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5705_2
)},
160 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5705M
)},
161 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5705M_2
)},
162 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5702X
)},
163 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5703X
)},
164 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5704S
)},
165 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5702A3
)},
166 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5703A3
)},
167 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5782
)},
168 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5788
)},
169 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5789
)},
170 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5901
)},
171 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5901_2
)},
172 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5704S_2
)},
173 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5705F
)},
174 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5720
)},
175 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5721
)},
176 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5722
)},
177 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5750
)},
178 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5751
)},
179 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5750M
)},
180 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5751M
)},
181 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5751F
)},
182 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5752
)},
183 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5752M
)},
184 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5753
)},
185 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5753M
)},
186 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5753F
)},
187 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5754
)},
188 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5754M
)},
189 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5755
)},
190 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5755M
)},
191 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5756
)},
192 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5786
)},
193 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5787
)},
194 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5787M
)},
195 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5714
)},
196 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5714S
)},
197 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5715
)},
198 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5715S
)},
199 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5780
)},
200 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5780S
)},
201 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5781
)},
202 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5906
)},
203 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5906M
)},
204 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT
, PCI_DEVICE_ID_SYSKONNECT_9DXX
)},
205 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT
, PCI_DEVICE_ID_SYSKONNECT_9MXX
)},
206 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA
, PCI_DEVICE_ID_ALTIMA_AC1000
)},
207 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA
, PCI_DEVICE_ID_ALTIMA_AC1001
)},
208 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA
, PCI_DEVICE_ID_ALTIMA_AC1003
)},
209 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA
, PCI_DEVICE_ID_ALTIMA_AC9100
)},
210 {PCI_DEVICE(PCI_VENDOR_ID_APPLE
, PCI_DEVICE_ID_APPLE_TIGON3
)},
214 MODULE_DEVICE_TABLE(pci
, tg3_pci_tbl
);
216 static const struct {
217 const char string
[ETH_GSTRING_LEN
];
218 } ethtool_stats_keys
[TG3_NUM_STATS
] = {
221 { "rx_ucast_packets" },
222 { "rx_mcast_packets" },
223 { "rx_bcast_packets" },
225 { "rx_align_errors" },
226 { "rx_xon_pause_rcvd" },
227 { "rx_xoff_pause_rcvd" },
228 { "rx_mac_ctrl_rcvd" },
229 { "rx_xoff_entered" },
230 { "rx_frame_too_long_errors" },
232 { "rx_undersize_packets" },
233 { "rx_in_length_errors" },
234 { "rx_out_length_errors" },
235 { "rx_64_or_less_octet_packets" },
236 { "rx_65_to_127_octet_packets" },
237 { "rx_128_to_255_octet_packets" },
238 { "rx_256_to_511_octet_packets" },
239 { "rx_512_to_1023_octet_packets" },
240 { "rx_1024_to_1522_octet_packets" },
241 { "rx_1523_to_2047_octet_packets" },
242 { "rx_2048_to_4095_octet_packets" },
243 { "rx_4096_to_8191_octet_packets" },
244 { "rx_8192_to_9022_octet_packets" },
251 { "tx_flow_control" },
253 { "tx_single_collisions" },
254 { "tx_mult_collisions" },
256 { "tx_excessive_collisions" },
257 { "tx_late_collisions" },
258 { "tx_collide_2times" },
259 { "tx_collide_3times" },
260 { "tx_collide_4times" },
261 { "tx_collide_5times" },
262 { "tx_collide_6times" },
263 { "tx_collide_7times" },
264 { "tx_collide_8times" },
265 { "tx_collide_9times" },
266 { "tx_collide_10times" },
267 { "tx_collide_11times" },
268 { "tx_collide_12times" },
269 { "tx_collide_13times" },
270 { "tx_collide_14times" },
271 { "tx_collide_15times" },
272 { "tx_ucast_packets" },
273 { "tx_mcast_packets" },
274 { "tx_bcast_packets" },
275 { "tx_carrier_sense_errors" },
279 { "dma_writeq_full" },
280 { "dma_write_prioq_full" },
284 { "rx_threshold_hit" },
286 { "dma_readq_full" },
287 { "dma_read_prioq_full" },
288 { "tx_comp_queue_full" },
290 { "ring_set_send_prod_index" },
291 { "ring_status_update" },
293 { "nic_avoided_irqs" },
294 { "nic_tx_threshold_hit" }
297 static const struct {
298 const char string
[ETH_GSTRING_LEN
];
299 } ethtool_test_keys
[TG3_NUM_TEST
] = {
300 { "nvram test (online) " },
301 { "link test (online) " },
302 { "register test (offline)" },
303 { "memory test (offline)" },
304 { "loopback test (offline)" },
305 { "interrupt test (offline)" },
308 static void tg3_write32(struct tg3
*tp
, u32 off
, u32 val
)
310 writel(val
, tp
->regs
+ off
);
313 static u32
tg3_read32(struct tg3
*tp
, u32 off
)
315 return (readl(tp
->regs
+ off
));
318 static void tg3_write_indirect_reg32(struct tg3
*tp
, u32 off
, u32 val
)
322 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
323 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_BASE_ADDR
, off
);
324 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_DATA
, val
);
325 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
328 static void tg3_write_flush_reg32(struct tg3
*tp
, u32 off
, u32 val
)
330 writel(val
, tp
->regs
+ off
);
331 readl(tp
->regs
+ off
);
334 static u32
tg3_read_indirect_reg32(struct tg3
*tp
, u32 off
)
339 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
340 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_BASE_ADDR
, off
);
341 pci_read_config_dword(tp
->pdev
, TG3PCI_REG_DATA
, &val
);
342 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
346 static void tg3_write_indirect_mbox(struct tg3
*tp
, u32 off
, u32 val
)
350 if (off
== (MAILBOX_RCVRET_CON_IDX_0
+ TG3_64BIT_REG_LOW
)) {
351 pci_write_config_dword(tp
->pdev
, TG3PCI_RCV_RET_RING_CON_IDX
+
352 TG3_64BIT_REG_LOW
, val
);
355 if (off
== (MAILBOX_RCV_STD_PROD_IDX
+ TG3_64BIT_REG_LOW
)) {
356 pci_write_config_dword(tp
->pdev
, TG3PCI_STD_RING_PROD_IDX
+
357 TG3_64BIT_REG_LOW
, val
);
361 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
362 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_BASE_ADDR
, off
+ 0x5600);
363 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_DATA
, val
);
364 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
366 /* In indirect mode when disabling interrupts, we also need
367 * to clear the interrupt bit in the GRC local ctrl register.
369 if ((off
== (MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
)) &&
371 pci_write_config_dword(tp
->pdev
, TG3PCI_MISC_LOCAL_CTRL
,
372 tp
->grc_local_ctrl
|GRC_LCLCTRL_CLEARINT
);
376 static u32
tg3_read_indirect_mbox(struct tg3
*tp
, u32 off
)
381 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
382 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_BASE_ADDR
, off
+ 0x5600);
383 pci_read_config_dword(tp
->pdev
, TG3PCI_REG_DATA
, &val
);
384 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
388 /* usec_wait specifies the wait time in usec when writing to certain registers
389 * where it is unsafe to read back the register without some delay.
390 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
391 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
393 static void _tw32_flush(struct tg3
*tp
, u32 off
, u32 val
, u32 usec_wait
)
395 if ((tp
->tg3_flags
& TG3_FLAG_PCIX_TARGET_HWBUG
) ||
396 (tp
->tg3_flags2
& TG3_FLG2_ICH_WORKAROUND
))
397 /* Non-posted methods */
398 tp
->write32(tp
, off
, val
);
401 tg3_write32(tp
, off
, val
);
406 /* Wait again after the read for the posted method to guarantee that
407 * the wait time is met.
413 static inline void tw32_mailbox_flush(struct tg3
*tp
, u32 off
, u32 val
)
415 tp
->write32_mbox(tp
, off
, val
);
416 if (!(tp
->tg3_flags
& TG3_FLAG_MBOX_WRITE_REORDER
) &&
417 !(tp
->tg3_flags2
& TG3_FLG2_ICH_WORKAROUND
))
418 tp
->read32_mbox(tp
, off
);
421 static void tg3_write32_tx_mbox(struct tg3
*tp
, u32 off
, u32 val
)
423 void __iomem
*mbox
= tp
->regs
+ off
;
425 if (tp
->tg3_flags
& TG3_FLAG_TXD_MBOX_HWBUG
)
427 if (tp
->tg3_flags
& TG3_FLAG_MBOX_WRITE_REORDER
)
431 static u32
tg3_read32_mbox_5906(struct tg3
*tp
, u32 off
)
433 return (readl(tp
->regs
+ off
+ GRCMBOX_BASE
));
436 static void tg3_write32_mbox_5906(struct tg3
*tp
, u32 off
, u32 val
)
438 writel(val
, tp
->regs
+ off
+ GRCMBOX_BASE
);
441 #define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
442 #define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
443 #define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
444 #define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
445 #define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
447 #define tw32(reg,val) tp->write32(tp, reg, val)
448 #define tw32_f(reg,val) _tw32_flush(tp,(reg),(val), 0)
449 #define tw32_wait_f(reg,val,us) _tw32_flush(tp,(reg),(val), (us))
450 #define tr32(reg) tp->read32(tp, reg)
452 static void tg3_write_mem(struct tg3
*tp
, u32 off
, u32 val
)
456 if ((GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
) &&
457 (off
>= NIC_SRAM_STATS_BLK
) && (off
< NIC_SRAM_TX_BUFFER_DESC
))
460 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
461 if (tp
->tg3_flags
& TG3_FLAG_SRAM_USE_CONFIG
) {
462 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
, off
);
463 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_DATA
, val
);
465 /* Always leave this as zero. */
466 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
, 0);
468 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR
, off
);
469 tw32_f(TG3PCI_MEM_WIN_DATA
, val
);
471 /* Always leave this as zero. */
472 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR
, 0);
474 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
477 static void tg3_read_mem(struct tg3
*tp
, u32 off
, u32
*val
)
481 if ((GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
) &&
482 (off
>= NIC_SRAM_STATS_BLK
) && (off
< NIC_SRAM_TX_BUFFER_DESC
)) {
487 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
488 if (tp
->tg3_flags
& TG3_FLAG_SRAM_USE_CONFIG
) {
489 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
, off
);
490 pci_read_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_DATA
, val
);
492 /* Always leave this as zero. */
493 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
, 0);
495 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR
, off
);
496 *val
= tr32(TG3PCI_MEM_WIN_DATA
);
498 /* Always leave this as zero. */
499 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR
, 0);
501 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
504 static void tg3_disable_ints(struct tg3
*tp
)
506 tw32(TG3PCI_MISC_HOST_CTRL
,
507 (tp
->misc_host_ctrl
| MISC_HOST_CTRL_MASK_PCI_INT
));
508 tw32_mailbox_f(MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
, 0x00000001);
511 static inline void tg3_cond_int(struct tg3
*tp
)
513 if (!(tp
->tg3_flags
& TG3_FLAG_TAGGED_STATUS
) &&
514 (tp
->hw_status
->status
& SD_STATUS_UPDATED
))
515 tw32(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
| GRC_LCLCTRL_SETINT
);
517 tw32(HOSTCC_MODE
, tp
->coalesce_mode
|
518 (HOSTCC_MODE_ENABLE
| HOSTCC_MODE_NOW
));
521 static void tg3_enable_ints(struct tg3
*tp
)
526 tw32(TG3PCI_MISC_HOST_CTRL
,
527 (tp
->misc_host_ctrl
& ~MISC_HOST_CTRL_MASK_PCI_INT
));
528 tw32_mailbox_f(MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
,
529 (tp
->last_tag
<< 24));
530 if (tp
->tg3_flags2
& TG3_FLG2_1SHOT_MSI
)
531 tw32_mailbox_f(MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
,
532 (tp
->last_tag
<< 24));
536 static inline unsigned int tg3_has_work(struct tg3
*tp
)
538 struct tg3_hw_status
*sblk
= tp
->hw_status
;
539 unsigned int work_exists
= 0;
541 /* check for phy events */
542 if (!(tp
->tg3_flags
&
543 (TG3_FLAG_USE_LINKCHG_REG
|
544 TG3_FLAG_POLL_SERDES
))) {
545 if (sblk
->status
& SD_STATUS_LINK_CHG
)
548 /* check for RX/TX work to do */
549 if (sblk
->idx
[0].tx_consumer
!= tp
->tx_cons
||
550 sblk
->idx
[0].rx_producer
!= tp
->rx_rcb_ptr
)
557 * similar to tg3_enable_ints, but it accurately determines whether there
558 * is new work pending and can return without flushing the PIO write
559 * which reenables interrupts
561 static void tg3_restart_ints(struct tg3
*tp
)
563 tw32_mailbox(MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
,
567 /* When doing tagged status, this work check is unnecessary.
568 * The last_tag we write above tells the chip which piece of
569 * work we've completed.
571 if (!(tp
->tg3_flags
& TG3_FLAG_TAGGED_STATUS
) &&
573 tw32(HOSTCC_MODE
, tp
->coalesce_mode
|
574 (HOSTCC_MODE_ENABLE
| HOSTCC_MODE_NOW
));
577 static inline void tg3_netif_stop(struct tg3
*tp
)
579 tp
->dev
->trans_start
= jiffies
; /* prevent tx timeout */
580 netif_poll_disable(tp
->dev
);
581 netif_tx_disable(tp
->dev
);
584 static inline void tg3_netif_start(struct tg3
*tp
)
586 netif_wake_queue(tp
->dev
);
587 /* NOTE: unconditional netif_wake_queue is only appropriate
588 * so long as all callers are assured to have free tx slots
589 * (such as after tg3_init_hw)
591 netif_poll_enable(tp
->dev
);
592 tp
->hw_status
->status
|= SD_STATUS_UPDATED
;
596 static void tg3_switch_clocks(struct tg3
*tp
)
598 u32 clock_ctrl
= tr32(TG3PCI_CLOCK_CTRL
);
601 if (tp
->tg3_flags2
& TG3_FLG2_5780_CLASS
)
604 orig_clock_ctrl
= clock_ctrl
;
605 clock_ctrl
&= (CLOCK_CTRL_FORCE_CLKRUN
|
606 CLOCK_CTRL_CLKRUN_OENABLE
|
608 tp
->pci_clock_ctrl
= clock_ctrl
;
610 if (tp
->tg3_flags2
& TG3_FLG2_5705_PLUS
) {
611 if (orig_clock_ctrl
& CLOCK_CTRL_625_CORE
) {
612 tw32_wait_f(TG3PCI_CLOCK_CTRL
,
613 clock_ctrl
| CLOCK_CTRL_625_CORE
, 40);
615 } else if ((orig_clock_ctrl
& CLOCK_CTRL_44MHZ_CORE
) != 0) {
616 tw32_wait_f(TG3PCI_CLOCK_CTRL
,
618 (CLOCK_CTRL_44MHZ_CORE
| CLOCK_CTRL_ALTCLK
),
620 tw32_wait_f(TG3PCI_CLOCK_CTRL
,
621 clock_ctrl
| (CLOCK_CTRL_ALTCLK
),
624 tw32_wait_f(TG3PCI_CLOCK_CTRL
, clock_ctrl
, 40);
627 #define PHY_BUSY_LOOPS 5000
629 static int tg3_readphy(struct tg3
*tp
, int reg
, u32
*val
)
635 if ((tp
->mi_mode
& MAC_MI_MODE_AUTO_POLL
) != 0) {
637 (tp
->mi_mode
& ~MAC_MI_MODE_AUTO_POLL
));
643 frame_val
= ((PHY_ADDR
<< MI_COM_PHY_ADDR_SHIFT
) &
644 MI_COM_PHY_ADDR_MASK
);
645 frame_val
|= ((reg
<< MI_COM_REG_ADDR_SHIFT
) &
646 MI_COM_REG_ADDR_MASK
);
647 frame_val
|= (MI_COM_CMD_READ
| MI_COM_START
);
649 tw32_f(MAC_MI_COM
, frame_val
);
651 loops
= PHY_BUSY_LOOPS
;
654 frame_val
= tr32(MAC_MI_COM
);
656 if ((frame_val
& MI_COM_BUSY
) == 0) {
658 frame_val
= tr32(MAC_MI_COM
);
666 *val
= frame_val
& MI_COM_DATA_MASK
;
670 if ((tp
->mi_mode
& MAC_MI_MODE_AUTO_POLL
) != 0) {
671 tw32_f(MAC_MI_MODE
, tp
->mi_mode
);
678 static int tg3_writephy(struct tg3
*tp
, int reg
, u32 val
)
684 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
&&
685 (reg
== MII_TG3_CTRL
|| reg
== MII_TG3_AUX_CTRL
))
688 if ((tp
->mi_mode
& MAC_MI_MODE_AUTO_POLL
) != 0) {
690 (tp
->mi_mode
& ~MAC_MI_MODE_AUTO_POLL
));
694 frame_val
= ((PHY_ADDR
<< MI_COM_PHY_ADDR_SHIFT
) &
695 MI_COM_PHY_ADDR_MASK
);
696 frame_val
|= ((reg
<< MI_COM_REG_ADDR_SHIFT
) &
697 MI_COM_REG_ADDR_MASK
);
698 frame_val
|= (val
& MI_COM_DATA_MASK
);
699 frame_val
|= (MI_COM_CMD_WRITE
| MI_COM_START
);
701 tw32_f(MAC_MI_COM
, frame_val
);
703 loops
= PHY_BUSY_LOOPS
;
706 frame_val
= tr32(MAC_MI_COM
);
707 if ((frame_val
& MI_COM_BUSY
) == 0) {
709 frame_val
= tr32(MAC_MI_COM
);
719 if ((tp
->mi_mode
& MAC_MI_MODE_AUTO_POLL
) != 0) {
720 tw32_f(MAC_MI_MODE
, tp
->mi_mode
);
727 static void tg3_phy_set_wirespeed(struct tg3
*tp
)
731 if (tp
->tg3_flags2
& TG3_FLG2_NO_ETH_WIRE_SPEED
)
734 if (!tg3_writephy(tp
, MII_TG3_AUX_CTRL
, 0x7007) &&
735 !tg3_readphy(tp
, MII_TG3_AUX_CTRL
, &val
))
736 tg3_writephy(tp
, MII_TG3_AUX_CTRL
,
737 (val
| (1 << 15) | (1 << 4)));
740 static int tg3_bmcr_reset(struct tg3
*tp
)
745 /* OK, reset it, and poll the BMCR_RESET bit until it
746 * clears or we time out.
748 phy_control
= BMCR_RESET
;
749 err
= tg3_writephy(tp
, MII_BMCR
, phy_control
);
755 err
= tg3_readphy(tp
, MII_BMCR
, &phy_control
);
759 if ((phy_control
& BMCR_RESET
) == 0) {
771 static int tg3_wait_macro_done(struct tg3
*tp
)
778 if (!tg3_readphy(tp
, 0x16, &tmp32
)) {
779 if ((tmp32
& 0x1000) == 0)
789 static int tg3_phy_write_and_check_testpat(struct tg3
*tp
, int *resetp
)
791 static const u32 test_pat
[4][6] = {
792 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
793 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
794 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
795 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
799 for (chan
= 0; chan
< 4; chan
++) {
802 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
,
803 (chan
* 0x2000) | 0x0200);
804 tg3_writephy(tp
, 0x16, 0x0002);
806 for (i
= 0; i
< 6; i
++)
807 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
,
810 tg3_writephy(tp
, 0x16, 0x0202);
811 if (tg3_wait_macro_done(tp
)) {
816 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
,
817 (chan
* 0x2000) | 0x0200);
818 tg3_writephy(tp
, 0x16, 0x0082);
819 if (tg3_wait_macro_done(tp
)) {
824 tg3_writephy(tp
, 0x16, 0x0802);
825 if (tg3_wait_macro_done(tp
)) {
830 for (i
= 0; i
< 6; i
+= 2) {
833 if (tg3_readphy(tp
, MII_TG3_DSP_RW_PORT
, &low
) ||
834 tg3_readphy(tp
, MII_TG3_DSP_RW_PORT
, &high
) ||
835 tg3_wait_macro_done(tp
)) {
841 if (low
!= test_pat
[chan
][i
] ||
842 high
!= test_pat
[chan
][i
+1]) {
843 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, 0x000b);
844 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x4001);
845 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x4005);
855 static int tg3_phy_reset_chanpat(struct tg3
*tp
)
859 for (chan
= 0; chan
< 4; chan
++) {
862 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
,
863 (chan
* 0x2000) | 0x0200);
864 tg3_writephy(tp
, 0x16, 0x0002);
865 for (i
= 0; i
< 6; i
++)
866 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x000);
867 tg3_writephy(tp
, 0x16, 0x0202);
868 if (tg3_wait_macro_done(tp
))
875 static int tg3_phy_reset_5703_4_5(struct tg3
*tp
)
877 u32 reg32
, phy9_orig
;
878 int retries
, do_phy_reset
, err
;
884 err
= tg3_bmcr_reset(tp
);
890 /* Disable transmitter and interrupt. */
891 if (tg3_readphy(tp
, MII_TG3_EXT_CTRL
, ®32
))
895 tg3_writephy(tp
, MII_TG3_EXT_CTRL
, reg32
);
897 /* Set full-duplex, 1000 mbps. */
898 tg3_writephy(tp
, MII_BMCR
,
899 BMCR_FULLDPLX
| TG3_BMCR_SPEED1000
);
901 /* Set to master mode. */
902 if (tg3_readphy(tp
, MII_TG3_CTRL
, &phy9_orig
))
905 tg3_writephy(tp
, MII_TG3_CTRL
,
906 (MII_TG3_CTRL_AS_MASTER
|
907 MII_TG3_CTRL_ENABLE_AS_MASTER
));
909 /* Enable SM_DSP_CLOCK and 6dB. */
910 tg3_writephy(tp
, MII_TG3_AUX_CTRL
, 0x0c00);
912 /* Block the PHY control access. */
913 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, 0x8005);
914 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x0800);
916 err
= tg3_phy_write_and_check_testpat(tp
, &do_phy_reset
);
921 err
= tg3_phy_reset_chanpat(tp
);
925 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, 0x8005);
926 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x0000);
928 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, 0x8200);
929 tg3_writephy(tp
, 0x16, 0x0000);
931 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5703
||
932 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
) {
933 /* Set Extended packet length bit for jumbo frames */
934 tg3_writephy(tp
, MII_TG3_AUX_CTRL
, 0x4400);
937 tg3_writephy(tp
, MII_TG3_AUX_CTRL
, 0x0400);
940 tg3_writephy(tp
, MII_TG3_CTRL
, phy9_orig
);
942 if (!tg3_readphy(tp
, MII_TG3_EXT_CTRL
, ®32
)) {
944 tg3_writephy(tp
, MII_TG3_EXT_CTRL
, reg32
);
951 static void tg3_link_report(struct tg3
*);
953 /* This will reset the tigon3 PHY if there is no valid
954 * link unless the FORCE argument is non-zero.
956 static int tg3_phy_reset(struct tg3
*tp
)
961 err
= tg3_readphy(tp
, MII_BMSR
, &phy_status
);
962 err
|= tg3_readphy(tp
, MII_BMSR
, &phy_status
);
966 if (netif_running(tp
->dev
) && netif_carrier_ok(tp
->dev
)) {
967 netif_carrier_off(tp
->dev
);
971 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5703
||
972 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
||
973 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
) {
974 err
= tg3_phy_reset_5703_4_5(tp
);
980 err
= tg3_bmcr_reset(tp
);
985 if (tp
->tg3_flags2
& TG3_FLG2_PHY_ADC_BUG
) {
986 tg3_writephy(tp
, MII_TG3_AUX_CTRL
, 0x0c00);
987 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, 0x201f);
988 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x2aaa);
989 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, 0x000a);
990 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x0323);
991 tg3_writephy(tp
, MII_TG3_AUX_CTRL
, 0x0400);
993 if (tp
->tg3_flags2
& TG3_FLG2_PHY_5704_A0_BUG
) {
994 tg3_writephy(tp
, 0x1c, 0x8d68);
995 tg3_writephy(tp
, 0x1c, 0x8d68);
997 if (tp
->tg3_flags2
& TG3_FLG2_PHY_BER_BUG
) {
998 tg3_writephy(tp
, MII_TG3_AUX_CTRL
, 0x0c00);
999 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, 0x000a);
1000 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x310b);
1001 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, 0x201f);
1002 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x9506);
1003 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, 0x401f);
1004 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x14e2);
1005 tg3_writephy(tp
, MII_TG3_AUX_CTRL
, 0x0400);
1007 else if (tp
->tg3_flags2
& TG3_FLG2_PHY_JITTER_BUG
) {
1008 tg3_writephy(tp
, MII_TG3_AUX_CTRL
, 0x0c00);
1009 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, 0x000a);
1010 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x010b);
1011 tg3_writephy(tp
, MII_TG3_AUX_CTRL
, 0x0400);
1013 /* Set Extended packet length bit (bit 14) on all chips that */
1014 /* support jumbo frames */
1015 if ((tp
->phy_id
& PHY_ID_MASK
) == PHY_ID_BCM5401
) {
1016 /* Cannot do read-modify-write on 5401 */
1017 tg3_writephy(tp
, MII_TG3_AUX_CTRL
, 0x4c20);
1018 } else if (tp
->tg3_flags2
& TG3_FLG2_JUMBO_CAPABLE
) {
1021 /* Set bit 14 with read-modify-write to preserve other bits */
1022 if (!tg3_writephy(tp
, MII_TG3_AUX_CTRL
, 0x0007) &&
1023 !tg3_readphy(tp
, MII_TG3_AUX_CTRL
, &phy_reg
))
1024 tg3_writephy(tp
, MII_TG3_AUX_CTRL
, phy_reg
| 0x4000);
1027 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
1028 * jumbo frames transmission.
1030 if (tp
->tg3_flags2
& TG3_FLG2_JUMBO_CAPABLE
) {
1033 if (!tg3_readphy(tp
, MII_TG3_EXT_CTRL
, &phy_reg
))
1034 tg3_writephy(tp
, MII_TG3_EXT_CTRL
,
1035 phy_reg
| MII_TG3_EXT_CTRL_FIFO_ELASTIC
);
1038 tg3_phy_set_wirespeed(tp
);
1042 static void tg3_frob_aux_power(struct tg3
*tp
)
1044 struct tg3
*tp_peer
= tp
;
1046 if ((tp
->tg3_flags
& TG3_FLAG_EEPROM_WRITE_PROT
) != 0)
1049 if ((GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
) ||
1050 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5714
)) {
1051 struct net_device
*dev_peer
;
1053 dev_peer
= pci_get_drvdata(tp
->pdev_peer
);
1054 /* remove_one() may have been run on the peer. */
1058 tp_peer
= netdev_priv(dev_peer
);
1061 if ((tp
->tg3_flags
& TG3_FLAG_WOL_ENABLE
) != 0 ||
1062 (tp
->tg3_flags
& TG3_FLAG_ENABLE_ASF
) != 0 ||
1063 (tp_peer
->tg3_flags
& TG3_FLAG_WOL_ENABLE
) != 0 ||
1064 (tp_peer
->tg3_flags
& TG3_FLAG_ENABLE_ASF
) != 0) {
1065 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
1066 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
) {
1067 tw32_wait_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
|
1068 (GRC_LCLCTRL_GPIO_OE0
|
1069 GRC_LCLCTRL_GPIO_OE1
|
1070 GRC_LCLCTRL_GPIO_OE2
|
1071 GRC_LCLCTRL_GPIO_OUTPUT0
|
1072 GRC_LCLCTRL_GPIO_OUTPUT1
),
1076 u32 grc_local_ctrl
= 0;
1078 if (tp_peer
!= tp
&&
1079 (tp_peer
->tg3_flags
& TG3_FLAG_INIT_COMPLETE
) != 0)
1082 /* Workaround to prevent overdrawing Amps. */
1083 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) ==
1085 grc_local_ctrl
|= GRC_LCLCTRL_GPIO_OE3
;
1086 tw32_wait_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
|
1087 grc_local_ctrl
, 100);
1090 /* On 5753 and variants, GPIO2 cannot be used. */
1091 no_gpio2
= tp
->nic_sram_data_cfg
&
1092 NIC_SRAM_DATA_CFG_NO_GPIO2
;
1094 grc_local_ctrl
|= GRC_LCLCTRL_GPIO_OE0
|
1095 GRC_LCLCTRL_GPIO_OE1
|
1096 GRC_LCLCTRL_GPIO_OE2
|
1097 GRC_LCLCTRL_GPIO_OUTPUT1
|
1098 GRC_LCLCTRL_GPIO_OUTPUT2
;
1100 grc_local_ctrl
&= ~(GRC_LCLCTRL_GPIO_OE2
|
1101 GRC_LCLCTRL_GPIO_OUTPUT2
);
1103 tw32_wait_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
|
1104 grc_local_ctrl
, 100);
1106 grc_local_ctrl
|= GRC_LCLCTRL_GPIO_OUTPUT0
;
1108 tw32_wait_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
|
1109 grc_local_ctrl
, 100);
1112 grc_local_ctrl
&= ~GRC_LCLCTRL_GPIO_OUTPUT2
;
1113 tw32_wait_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
|
1114 grc_local_ctrl
, 100);
1118 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5700
&&
1119 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5701
) {
1120 if (tp_peer
!= tp
&&
1121 (tp_peer
->tg3_flags
& TG3_FLAG_INIT_COMPLETE
) != 0)
1124 tw32_wait_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
|
1125 (GRC_LCLCTRL_GPIO_OE1
|
1126 GRC_LCLCTRL_GPIO_OUTPUT1
), 100);
1128 tw32_wait_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
|
1129 GRC_LCLCTRL_GPIO_OE1
, 100);
1131 tw32_wait_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
|
1132 (GRC_LCLCTRL_GPIO_OE1
|
1133 GRC_LCLCTRL_GPIO_OUTPUT1
), 100);
1138 static int tg3_setup_phy(struct tg3
*, int);
1140 #define RESET_KIND_SHUTDOWN 0
1141 #define RESET_KIND_INIT 1
1142 #define RESET_KIND_SUSPEND 2
1144 static void tg3_write_sig_post_reset(struct tg3
*, int);
1145 static int tg3_halt_cpu(struct tg3
*, u32
);
1146 static int tg3_nvram_lock(struct tg3
*);
1147 static void tg3_nvram_unlock(struct tg3
*);
1149 static void tg3_power_down_phy(struct tg3
*tp
)
1151 if (tp
->tg3_flags2
& TG3_FLG2_PHY_SERDES
)
1154 tg3_writephy(tp
, MII_TG3_EXT_CTRL
, MII_TG3_EXT_CTRL_FORCE_LED_OFF
);
1155 tg3_writephy(tp
, MII_TG3_AUX_CTRL
, 0x01b2);
1157 /* The PHY should not be powered down on some chips because
1160 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
1161 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
||
1162 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5780
&&
1163 (tp
->tg3_flags2
& TG3_FLG2_MII_SERDES
)))
1165 tg3_writephy(tp
, MII_BMCR
, BMCR_PDOWN
);
1168 static int tg3_set_power_state(struct tg3
*tp
, pci_power_t state
)
1171 u16 power_control
, power_caps
;
1172 int pm
= tp
->pm_cap
;
1174 /* Make sure register accesses (indirect or otherwise)
1175 * will function correctly.
1177 pci_write_config_dword(tp
->pdev
,
1178 TG3PCI_MISC_HOST_CTRL
,
1179 tp
->misc_host_ctrl
);
1181 pci_read_config_word(tp
->pdev
,
1184 power_control
|= PCI_PM_CTRL_PME_STATUS
;
1185 power_control
&= ~(PCI_PM_CTRL_STATE_MASK
);
1189 pci_write_config_word(tp
->pdev
,
1192 udelay(100); /* Delay after power state change */
1194 /* Switch out of Vaux if it is not a LOM */
1195 if (!(tp
->tg3_flags
& TG3_FLAG_EEPROM_WRITE_PROT
))
1196 tw32_wait_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
, 100);
1213 printk(KERN_WARNING PFX
"%s: Invalid power state (%d) "
1215 tp
->dev
->name
, state
);
1219 power_control
|= PCI_PM_CTRL_PME_ENABLE
;
1221 misc_host_ctrl
= tr32(TG3PCI_MISC_HOST_CTRL
);
1222 tw32(TG3PCI_MISC_HOST_CTRL
,
1223 misc_host_ctrl
| MISC_HOST_CTRL_MASK_PCI_INT
);
1225 if (tp
->link_config
.phy_is_low_power
== 0) {
1226 tp
->link_config
.phy_is_low_power
= 1;
1227 tp
->link_config
.orig_speed
= tp
->link_config
.speed
;
1228 tp
->link_config
.orig_duplex
= tp
->link_config
.duplex
;
1229 tp
->link_config
.orig_autoneg
= tp
->link_config
.autoneg
;
1232 if (!(tp
->tg3_flags2
& TG3_FLG2_ANY_SERDES
)) {
1233 tp
->link_config
.speed
= SPEED_10
;
1234 tp
->link_config
.duplex
= DUPLEX_HALF
;
1235 tp
->link_config
.autoneg
= AUTONEG_ENABLE
;
1236 tg3_setup_phy(tp
, 0);
1239 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
) {
1242 val
= tr32(GRC_VCPU_EXT_CTRL
);
1243 tw32(GRC_VCPU_EXT_CTRL
, val
| GRC_VCPU_EXT_CTRL_DISABLE_WOL
);
1244 } else if (!(tp
->tg3_flags
& TG3_FLAG_ENABLE_ASF
)) {
1248 for (i
= 0; i
< 200; i
++) {
1249 tg3_read_mem(tp
, NIC_SRAM_FW_ASF_STATUS_MBOX
, &val
);
1250 if (val
== ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1
)
1255 tg3_write_mem(tp
, NIC_SRAM_WOL_MBOX
, WOL_SIGNATURE
|
1256 WOL_DRV_STATE_SHUTDOWN
|
1257 WOL_DRV_WOL
| WOL_SET_MAGIC_PKT
);
1259 pci_read_config_word(tp
->pdev
, pm
+ PCI_PM_PMC
, &power_caps
);
1261 if (tp
->tg3_flags
& TG3_FLAG_WOL_ENABLE
) {
1264 if (!(tp
->tg3_flags2
& TG3_FLG2_PHY_SERDES
)) {
1265 tg3_writephy(tp
, MII_TG3_AUX_CTRL
, 0x5a);
1268 if (tp
->tg3_flags2
& TG3_FLG2_MII_SERDES
)
1269 mac_mode
= MAC_MODE_PORT_MODE_GMII
;
1271 mac_mode
= MAC_MODE_PORT_MODE_MII
;
1273 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5700
||
1274 !(tp
->tg3_flags
& TG3_FLAG_WOL_SPEED_100MB
))
1275 mac_mode
|= MAC_MODE_LINK_POLARITY
;
1277 mac_mode
= MAC_MODE_PORT_MODE_TBI
;
1280 if (!(tp
->tg3_flags2
& TG3_FLG2_5750_PLUS
))
1281 tw32(MAC_LED_CTRL
, tp
->led_ctrl
);
1283 if (((power_caps
& PCI_PM_CAP_PME_D3cold
) &&
1284 (tp
->tg3_flags
& TG3_FLAG_WOL_ENABLE
)))
1285 mac_mode
|= MAC_MODE_MAGIC_PKT_ENABLE
;
1287 tw32_f(MAC_MODE
, mac_mode
);
1290 tw32_f(MAC_RX_MODE
, RX_MODE_ENABLE
);
1294 if (!(tp
->tg3_flags
& TG3_FLAG_WOL_SPEED_100MB
) &&
1295 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
1296 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
)) {
1299 base_val
= tp
->pci_clock_ctrl
;
1300 base_val
|= (CLOCK_CTRL_RXCLK_DISABLE
|
1301 CLOCK_CTRL_TXCLK_DISABLE
);
1303 tw32_wait_f(TG3PCI_CLOCK_CTRL
, base_val
| CLOCK_CTRL_ALTCLK
|
1304 CLOCK_CTRL_PWRDOWN_PLL133
, 40);
1305 } else if (tp
->tg3_flags2
& TG3_FLG2_5780_CLASS
) {
1307 } else if (!((tp
->tg3_flags2
& TG3_FLG2_5750_PLUS
) &&
1308 (tp
->tg3_flags
& TG3_FLAG_ENABLE_ASF
))) {
1309 u32 newbits1
, newbits2
;
1311 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
1312 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
) {
1313 newbits1
= (CLOCK_CTRL_RXCLK_DISABLE
|
1314 CLOCK_CTRL_TXCLK_DISABLE
|
1316 newbits2
= newbits1
| CLOCK_CTRL_44MHZ_CORE
;
1317 } else if (tp
->tg3_flags2
& TG3_FLG2_5705_PLUS
) {
1318 newbits1
= CLOCK_CTRL_625_CORE
;
1319 newbits2
= newbits1
| CLOCK_CTRL_ALTCLK
;
1321 newbits1
= CLOCK_CTRL_ALTCLK
;
1322 newbits2
= newbits1
| CLOCK_CTRL_44MHZ_CORE
;
1325 tw32_wait_f(TG3PCI_CLOCK_CTRL
, tp
->pci_clock_ctrl
| newbits1
,
1328 tw32_wait_f(TG3PCI_CLOCK_CTRL
, tp
->pci_clock_ctrl
| newbits2
,
1331 if (!(tp
->tg3_flags2
& TG3_FLG2_5705_PLUS
)) {
1334 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
1335 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
) {
1336 newbits3
= (CLOCK_CTRL_RXCLK_DISABLE
|
1337 CLOCK_CTRL_TXCLK_DISABLE
|
1338 CLOCK_CTRL_44MHZ_CORE
);
1340 newbits3
= CLOCK_CTRL_44MHZ_CORE
;
1343 tw32_wait_f(TG3PCI_CLOCK_CTRL
,
1344 tp
->pci_clock_ctrl
| newbits3
, 40);
1348 if (!(tp
->tg3_flags
& TG3_FLAG_WOL_ENABLE
) &&
1349 !(tp
->tg3_flags
& TG3_FLAG_ENABLE_ASF
))
1350 tg3_power_down_phy(tp
);
1352 tg3_frob_aux_power(tp
);
1354 /* Workaround for unstable PLL clock */
1355 if ((GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5750_AX
) ||
1356 (GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5750_BX
)) {
1357 u32 val
= tr32(0x7d00);
1359 val
&= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
1361 if (!(tp
->tg3_flags
& TG3_FLAG_ENABLE_ASF
)) {
1364 err
= tg3_nvram_lock(tp
);
1365 tg3_halt_cpu(tp
, RX_CPU_BASE
);
1367 tg3_nvram_unlock(tp
);
1371 tg3_write_sig_post_reset(tp
, RESET_KIND_SHUTDOWN
);
1373 /* Finally, set the new power state. */
1374 pci_write_config_word(tp
->pdev
, pm
+ PCI_PM_CTRL
, power_control
);
1375 udelay(100); /* Delay after power state change */
1380 static void tg3_link_report(struct tg3
*tp
)
1382 if (!netif_carrier_ok(tp
->dev
)) {
1383 printk(KERN_INFO PFX
"%s: Link is down.\n", tp
->dev
->name
);
1385 printk(KERN_INFO PFX
"%s: Link is up at %d Mbps, %s duplex.\n",
1387 (tp
->link_config
.active_speed
== SPEED_1000
?
1389 (tp
->link_config
.active_speed
== SPEED_100
?
1391 (tp
->link_config
.active_duplex
== DUPLEX_FULL
?
1394 printk(KERN_INFO PFX
"%s: Flow control is %s for TX and "
1397 (tp
->tg3_flags
& TG3_FLAG_TX_PAUSE
) ? "on" : "off",
1398 (tp
->tg3_flags
& TG3_FLAG_RX_PAUSE
) ? "on" : "off");
1402 static void tg3_setup_flow_control(struct tg3
*tp
, u32 local_adv
, u32 remote_adv
)
1404 u32 new_tg3_flags
= 0;
1405 u32 old_rx_mode
= tp
->rx_mode
;
1406 u32 old_tx_mode
= tp
->tx_mode
;
1408 if (tp
->tg3_flags
& TG3_FLAG_PAUSE_AUTONEG
) {
1410 /* Convert 1000BaseX flow control bits to 1000BaseT
1411 * bits before resolving flow control.
1413 if (tp
->tg3_flags2
& TG3_FLG2_MII_SERDES
) {
1414 local_adv
&= ~(ADVERTISE_PAUSE_CAP
|
1415 ADVERTISE_PAUSE_ASYM
);
1416 remote_adv
&= ~(LPA_PAUSE_CAP
| LPA_PAUSE_ASYM
);
1418 if (local_adv
& ADVERTISE_1000XPAUSE
)
1419 local_adv
|= ADVERTISE_PAUSE_CAP
;
1420 if (local_adv
& ADVERTISE_1000XPSE_ASYM
)
1421 local_adv
|= ADVERTISE_PAUSE_ASYM
;
1422 if (remote_adv
& LPA_1000XPAUSE
)
1423 remote_adv
|= LPA_PAUSE_CAP
;
1424 if (remote_adv
& LPA_1000XPAUSE_ASYM
)
1425 remote_adv
|= LPA_PAUSE_ASYM
;
1428 if (local_adv
& ADVERTISE_PAUSE_CAP
) {
1429 if (local_adv
& ADVERTISE_PAUSE_ASYM
) {
1430 if (remote_adv
& LPA_PAUSE_CAP
)
1432 (TG3_FLAG_RX_PAUSE
|
1434 else if (remote_adv
& LPA_PAUSE_ASYM
)
1436 (TG3_FLAG_RX_PAUSE
);
1438 if (remote_adv
& LPA_PAUSE_CAP
)
1440 (TG3_FLAG_RX_PAUSE
|
1443 } else if (local_adv
& ADVERTISE_PAUSE_ASYM
) {
1444 if ((remote_adv
& LPA_PAUSE_CAP
) &&
1445 (remote_adv
& LPA_PAUSE_ASYM
))
1446 new_tg3_flags
|= TG3_FLAG_TX_PAUSE
;
1449 tp
->tg3_flags
&= ~(TG3_FLAG_RX_PAUSE
| TG3_FLAG_TX_PAUSE
);
1450 tp
->tg3_flags
|= new_tg3_flags
;
1452 new_tg3_flags
= tp
->tg3_flags
;
1455 if (new_tg3_flags
& TG3_FLAG_RX_PAUSE
)
1456 tp
->rx_mode
|= RX_MODE_FLOW_CTRL_ENABLE
;
1458 tp
->rx_mode
&= ~RX_MODE_FLOW_CTRL_ENABLE
;
1460 if (old_rx_mode
!= tp
->rx_mode
) {
1461 tw32_f(MAC_RX_MODE
, tp
->rx_mode
);
1464 if (new_tg3_flags
& TG3_FLAG_TX_PAUSE
)
1465 tp
->tx_mode
|= TX_MODE_FLOW_CTRL_ENABLE
;
1467 tp
->tx_mode
&= ~TX_MODE_FLOW_CTRL_ENABLE
;
1469 if (old_tx_mode
!= tp
->tx_mode
) {
1470 tw32_f(MAC_TX_MODE
, tp
->tx_mode
);
1474 static void tg3_aux_stat_to_speed_duplex(struct tg3
*tp
, u32 val
, u16
*speed
, u8
*duplex
)
1476 switch (val
& MII_TG3_AUX_STAT_SPDMASK
) {
1477 case MII_TG3_AUX_STAT_10HALF
:
1479 *duplex
= DUPLEX_HALF
;
1482 case MII_TG3_AUX_STAT_10FULL
:
1484 *duplex
= DUPLEX_FULL
;
1487 case MII_TG3_AUX_STAT_100HALF
:
1489 *duplex
= DUPLEX_HALF
;
1492 case MII_TG3_AUX_STAT_100FULL
:
1494 *duplex
= DUPLEX_FULL
;
1497 case MII_TG3_AUX_STAT_1000HALF
:
1498 *speed
= SPEED_1000
;
1499 *duplex
= DUPLEX_HALF
;
1502 case MII_TG3_AUX_STAT_1000FULL
:
1503 *speed
= SPEED_1000
;
1504 *duplex
= DUPLEX_FULL
;
1508 *speed
= SPEED_INVALID
;
1509 *duplex
= DUPLEX_INVALID
;
1514 static void tg3_phy_copper_begin(struct tg3
*tp
)
1519 if (tp
->link_config
.phy_is_low_power
) {
1520 /* Entering low power mode. Disable gigabit and
1521 * 100baseT advertisements.
1523 tg3_writephy(tp
, MII_TG3_CTRL
, 0);
1525 new_adv
= (ADVERTISE_10HALF
| ADVERTISE_10FULL
|
1526 ADVERTISE_CSMA
| ADVERTISE_PAUSE_CAP
);
1527 if (tp
->tg3_flags
& TG3_FLAG_WOL_SPEED_100MB
)
1528 new_adv
|= (ADVERTISE_100HALF
| ADVERTISE_100FULL
);
1530 tg3_writephy(tp
, MII_ADVERTISE
, new_adv
);
1531 } else if (tp
->link_config
.speed
== SPEED_INVALID
) {
1532 tp
->link_config
.advertising
=
1533 (ADVERTISED_10baseT_Half
| ADVERTISED_10baseT_Full
|
1534 ADVERTISED_100baseT_Half
| ADVERTISED_100baseT_Full
|
1535 ADVERTISED_1000baseT_Half
| ADVERTISED_1000baseT_Full
|
1536 ADVERTISED_Autoneg
| ADVERTISED_MII
);
1538 if (tp
->tg3_flags
& TG3_FLAG_10_100_ONLY
)
1539 tp
->link_config
.advertising
&=
1540 ~(ADVERTISED_1000baseT_Half
|
1541 ADVERTISED_1000baseT_Full
);
1543 new_adv
= (ADVERTISE_CSMA
| ADVERTISE_PAUSE_CAP
);
1544 if (tp
->link_config
.advertising
& ADVERTISED_10baseT_Half
)
1545 new_adv
|= ADVERTISE_10HALF
;
1546 if (tp
->link_config
.advertising
& ADVERTISED_10baseT_Full
)
1547 new_adv
|= ADVERTISE_10FULL
;
1548 if (tp
->link_config
.advertising
& ADVERTISED_100baseT_Half
)
1549 new_adv
|= ADVERTISE_100HALF
;
1550 if (tp
->link_config
.advertising
& ADVERTISED_100baseT_Full
)
1551 new_adv
|= ADVERTISE_100FULL
;
1552 tg3_writephy(tp
, MII_ADVERTISE
, new_adv
);
1554 if (tp
->link_config
.advertising
&
1555 (ADVERTISED_1000baseT_Half
| ADVERTISED_1000baseT_Full
)) {
1557 if (tp
->link_config
.advertising
& ADVERTISED_1000baseT_Half
)
1558 new_adv
|= MII_TG3_CTRL_ADV_1000_HALF
;
1559 if (tp
->link_config
.advertising
& ADVERTISED_1000baseT_Full
)
1560 new_adv
|= MII_TG3_CTRL_ADV_1000_FULL
;
1561 if (!(tp
->tg3_flags
& TG3_FLAG_10_100_ONLY
) &&
1562 (tp
->pci_chip_rev_id
== CHIPREV_ID_5701_A0
||
1563 tp
->pci_chip_rev_id
== CHIPREV_ID_5701_B0
))
1564 new_adv
|= (MII_TG3_CTRL_AS_MASTER
|
1565 MII_TG3_CTRL_ENABLE_AS_MASTER
);
1566 tg3_writephy(tp
, MII_TG3_CTRL
, new_adv
);
1568 tg3_writephy(tp
, MII_TG3_CTRL
, 0);
1571 /* Asking for a specific link mode. */
1572 if (tp
->link_config
.speed
== SPEED_1000
) {
1573 new_adv
= ADVERTISE_CSMA
| ADVERTISE_PAUSE_CAP
;
1574 tg3_writephy(tp
, MII_ADVERTISE
, new_adv
);
1576 if (tp
->link_config
.duplex
== DUPLEX_FULL
)
1577 new_adv
= MII_TG3_CTRL_ADV_1000_FULL
;
1579 new_adv
= MII_TG3_CTRL_ADV_1000_HALF
;
1580 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5701_A0
||
1581 tp
->pci_chip_rev_id
== CHIPREV_ID_5701_B0
)
1582 new_adv
|= (MII_TG3_CTRL_AS_MASTER
|
1583 MII_TG3_CTRL_ENABLE_AS_MASTER
);
1584 tg3_writephy(tp
, MII_TG3_CTRL
, new_adv
);
1586 tg3_writephy(tp
, MII_TG3_CTRL
, 0);
1588 new_adv
= ADVERTISE_CSMA
| ADVERTISE_PAUSE_CAP
;
1589 if (tp
->link_config
.speed
== SPEED_100
) {
1590 if (tp
->link_config
.duplex
== DUPLEX_FULL
)
1591 new_adv
|= ADVERTISE_100FULL
;
1593 new_adv
|= ADVERTISE_100HALF
;
1595 if (tp
->link_config
.duplex
== DUPLEX_FULL
)
1596 new_adv
|= ADVERTISE_10FULL
;
1598 new_adv
|= ADVERTISE_10HALF
;
1600 tg3_writephy(tp
, MII_ADVERTISE
, new_adv
);
1604 if (tp
->link_config
.autoneg
== AUTONEG_DISABLE
&&
1605 tp
->link_config
.speed
!= SPEED_INVALID
) {
1606 u32 bmcr
, orig_bmcr
;
1608 tp
->link_config
.active_speed
= tp
->link_config
.speed
;
1609 tp
->link_config
.active_duplex
= tp
->link_config
.duplex
;
1612 switch (tp
->link_config
.speed
) {
1618 bmcr
|= BMCR_SPEED100
;
1622 bmcr
|= TG3_BMCR_SPEED1000
;
1626 if (tp
->link_config
.duplex
== DUPLEX_FULL
)
1627 bmcr
|= BMCR_FULLDPLX
;
1629 if (!tg3_readphy(tp
, MII_BMCR
, &orig_bmcr
) &&
1630 (bmcr
!= orig_bmcr
)) {
1631 tg3_writephy(tp
, MII_BMCR
, BMCR_LOOPBACK
);
1632 for (i
= 0; i
< 1500; i
++) {
1636 if (tg3_readphy(tp
, MII_BMSR
, &tmp
) ||
1637 tg3_readphy(tp
, MII_BMSR
, &tmp
))
1639 if (!(tmp
& BMSR_LSTATUS
)) {
1644 tg3_writephy(tp
, MII_BMCR
, bmcr
);
1648 tg3_writephy(tp
, MII_BMCR
,
1649 BMCR_ANENABLE
| BMCR_ANRESTART
);
1653 static int tg3_init_5401phy_dsp(struct tg3
*tp
)
1657 /* Turn off tap power management. */
1658 /* Set Extended packet length bit */
1659 err
= tg3_writephy(tp
, MII_TG3_AUX_CTRL
, 0x4c20);
1661 err
|= tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, 0x0012);
1662 err
|= tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x1804);
1664 err
|= tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, 0x0013);
1665 err
|= tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x1204);
1667 err
|= tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, 0x8006);
1668 err
|= tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x0132);
1670 err
|= tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, 0x8006);
1671 err
|= tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x0232);
1673 err
|= tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, 0x201f);
1674 err
|= tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x0a20);
1681 static int tg3_copper_is_advertising_all(struct tg3
*tp
)
1683 u32 adv_reg
, all_mask
;
1685 if (tg3_readphy(tp
, MII_ADVERTISE
, &adv_reg
))
1688 all_mask
= (ADVERTISE_10HALF
| ADVERTISE_10FULL
|
1689 ADVERTISE_100HALF
| ADVERTISE_100FULL
);
1690 if ((adv_reg
& all_mask
) != all_mask
)
1692 if (!(tp
->tg3_flags
& TG3_FLAG_10_100_ONLY
)) {
1695 if (tg3_readphy(tp
, MII_TG3_CTRL
, &tg3_ctrl
))
1698 all_mask
= (MII_TG3_CTRL_ADV_1000_HALF
|
1699 MII_TG3_CTRL_ADV_1000_FULL
);
1700 if ((tg3_ctrl
& all_mask
) != all_mask
)
1706 static int tg3_setup_copper_phy(struct tg3
*tp
, int force_reset
)
1708 int current_link_up
;
1717 (MAC_STATUS_SYNC_CHANGED
|
1718 MAC_STATUS_CFG_CHANGED
|
1719 MAC_STATUS_MI_COMPLETION
|
1720 MAC_STATUS_LNKSTATE_CHANGED
));
1723 tp
->mi_mode
= MAC_MI_MODE_BASE
;
1724 tw32_f(MAC_MI_MODE
, tp
->mi_mode
);
1727 tg3_writephy(tp
, MII_TG3_AUX_CTRL
, 0x02);
1729 /* Some third-party PHYs need to be reset on link going
1732 if ((GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5703
||
1733 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
||
1734 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
) &&
1735 netif_carrier_ok(tp
->dev
)) {
1736 tg3_readphy(tp
, MII_BMSR
, &bmsr
);
1737 if (!tg3_readphy(tp
, MII_BMSR
, &bmsr
) &&
1738 !(bmsr
& BMSR_LSTATUS
))
1744 if ((tp
->phy_id
& PHY_ID_MASK
) == PHY_ID_BCM5401
) {
1745 tg3_readphy(tp
, MII_BMSR
, &bmsr
);
1746 if (tg3_readphy(tp
, MII_BMSR
, &bmsr
) ||
1747 !(tp
->tg3_flags
& TG3_FLAG_INIT_COMPLETE
))
1750 if (!(bmsr
& BMSR_LSTATUS
)) {
1751 err
= tg3_init_5401phy_dsp(tp
);
1755 tg3_readphy(tp
, MII_BMSR
, &bmsr
);
1756 for (i
= 0; i
< 1000; i
++) {
1758 if (!tg3_readphy(tp
, MII_BMSR
, &bmsr
) &&
1759 (bmsr
& BMSR_LSTATUS
)) {
1765 if ((tp
->phy_id
& PHY_ID_REV_MASK
) == PHY_REV_BCM5401_B0
&&
1766 !(bmsr
& BMSR_LSTATUS
) &&
1767 tp
->link_config
.active_speed
== SPEED_1000
) {
1768 err
= tg3_phy_reset(tp
);
1770 err
= tg3_init_5401phy_dsp(tp
);
1775 } else if (tp
->pci_chip_rev_id
== CHIPREV_ID_5701_A0
||
1776 tp
->pci_chip_rev_id
== CHIPREV_ID_5701_B0
) {
1777 /* 5701 {A0,B0} CRC bug workaround */
1778 tg3_writephy(tp
, 0x15, 0x0a75);
1779 tg3_writephy(tp
, 0x1c, 0x8c68);
1780 tg3_writephy(tp
, 0x1c, 0x8d68);
1781 tg3_writephy(tp
, 0x1c, 0x8c68);
1784 /* Clear pending interrupts... */
1785 tg3_readphy(tp
, MII_TG3_ISTAT
, &dummy
);
1786 tg3_readphy(tp
, MII_TG3_ISTAT
, &dummy
);
1788 if (tp
->tg3_flags
& TG3_FLAG_USE_MI_INTERRUPT
)
1789 tg3_writephy(tp
, MII_TG3_IMASK
, ~MII_TG3_INT_LINKCHG
);
1791 tg3_writephy(tp
, MII_TG3_IMASK
, ~0);
1793 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
1794 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
) {
1795 if (tp
->led_ctrl
== LED_CTRL_MODE_PHY_1
)
1796 tg3_writephy(tp
, MII_TG3_EXT_CTRL
,
1797 MII_TG3_EXT_CTRL_LNK3_LED_MODE
);
1799 tg3_writephy(tp
, MII_TG3_EXT_CTRL
, 0);
1802 current_link_up
= 0;
1803 current_speed
= SPEED_INVALID
;
1804 current_duplex
= DUPLEX_INVALID
;
1806 if (tp
->tg3_flags2
& TG3_FLG2_CAPACITIVE_COUPLING
) {
1809 tg3_writephy(tp
, MII_TG3_AUX_CTRL
, 0x4007);
1810 tg3_readphy(tp
, MII_TG3_AUX_CTRL
, &val
);
1811 if (!(val
& (1 << 10))) {
1813 tg3_writephy(tp
, MII_TG3_AUX_CTRL
, val
);
1819 for (i
= 0; i
< 100; i
++) {
1820 tg3_readphy(tp
, MII_BMSR
, &bmsr
);
1821 if (!tg3_readphy(tp
, MII_BMSR
, &bmsr
) &&
1822 (bmsr
& BMSR_LSTATUS
))
1827 if (bmsr
& BMSR_LSTATUS
) {
1830 tg3_readphy(tp
, MII_TG3_AUX_STAT
, &aux_stat
);
1831 for (i
= 0; i
< 2000; i
++) {
1833 if (!tg3_readphy(tp
, MII_TG3_AUX_STAT
, &aux_stat
) &&
1838 tg3_aux_stat_to_speed_duplex(tp
, aux_stat
,
1843 for (i
= 0; i
< 200; i
++) {
1844 tg3_readphy(tp
, MII_BMCR
, &bmcr
);
1845 if (tg3_readphy(tp
, MII_BMCR
, &bmcr
))
1847 if (bmcr
&& bmcr
!= 0x7fff)
1852 if (tp
->link_config
.autoneg
== AUTONEG_ENABLE
) {
1853 if (bmcr
& BMCR_ANENABLE
) {
1854 current_link_up
= 1;
1856 /* Force autoneg restart if we are exiting
1859 if (!tg3_copper_is_advertising_all(tp
))
1860 current_link_up
= 0;
1862 current_link_up
= 0;
1865 if (!(bmcr
& BMCR_ANENABLE
) &&
1866 tp
->link_config
.speed
== current_speed
&&
1867 tp
->link_config
.duplex
== current_duplex
) {
1868 current_link_up
= 1;
1870 current_link_up
= 0;
1874 tp
->link_config
.active_speed
= current_speed
;
1875 tp
->link_config
.active_duplex
= current_duplex
;
1878 if (current_link_up
== 1 &&
1879 (tp
->link_config
.active_duplex
== DUPLEX_FULL
) &&
1880 (tp
->link_config
.autoneg
== AUTONEG_ENABLE
)) {
1881 u32 local_adv
, remote_adv
;
1883 if (tg3_readphy(tp
, MII_ADVERTISE
, &local_adv
))
1885 local_adv
&= (ADVERTISE_PAUSE_CAP
| ADVERTISE_PAUSE_ASYM
);
1887 if (tg3_readphy(tp
, MII_LPA
, &remote_adv
))
1890 remote_adv
&= (LPA_PAUSE_CAP
| LPA_PAUSE_ASYM
);
1892 /* If we are not advertising full pause capability,
1893 * something is wrong. Bring the link down and reconfigure.
1895 if (local_adv
!= ADVERTISE_PAUSE_CAP
) {
1896 current_link_up
= 0;
1898 tg3_setup_flow_control(tp
, local_adv
, remote_adv
);
1902 if (current_link_up
== 0 || tp
->link_config
.phy_is_low_power
) {
1905 tg3_phy_copper_begin(tp
);
1907 tg3_readphy(tp
, MII_BMSR
, &tmp
);
1908 if (!tg3_readphy(tp
, MII_BMSR
, &tmp
) &&
1909 (tmp
& BMSR_LSTATUS
))
1910 current_link_up
= 1;
1913 tp
->mac_mode
&= ~MAC_MODE_PORT_MODE_MASK
;
1914 if (current_link_up
== 1) {
1915 if (tp
->link_config
.active_speed
== SPEED_100
||
1916 tp
->link_config
.active_speed
== SPEED_10
)
1917 tp
->mac_mode
|= MAC_MODE_PORT_MODE_MII
;
1919 tp
->mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
1921 tp
->mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
1923 tp
->mac_mode
&= ~MAC_MODE_HALF_DUPLEX
;
1924 if (tp
->link_config
.active_duplex
== DUPLEX_HALF
)
1925 tp
->mac_mode
|= MAC_MODE_HALF_DUPLEX
;
1927 tp
->mac_mode
&= ~MAC_MODE_LINK_POLARITY
;
1928 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
) {
1929 if ((tp
->led_ctrl
== LED_CTRL_MODE_PHY_2
) ||
1930 (current_link_up
== 1 &&
1931 tp
->link_config
.active_speed
== SPEED_10
))
1932 tp
->mac_mode
|= MAC_MODE_LINK_POLARITY
;
1934 if (current_link_up
== 1)
1935 tp
->mac_mode
|= MAC_MODE_LINK_POLARITY
;
1938 /* ??? Without this setting Netgear GA302T PHY does not
1939 * ??? send/receive packets...
1941 if ((tp
->phy_id
& PHY_ID_MASK
) == PHY_ID_BCM5411
&&
1942 tp
->pci_chip_rev_id
== CHIPREV_ID_5700_ALTIMA
) {
1943 tp
->mi_mode
|= MAC_MI_MODE_AUTO_POLL
;
1944 tw32_f(MAC_MI_MODE
, tp
->mi_mode
);
1948 tw32_f(MAC_MODE
, tp
->mac_mode
);
1951 if (tp
->tg3_flags
& TG3_FLAG_USE_LINKCHG_REG
) {
1952 /* Polled via timer. */
1953 tw32_f(MAC_EVENT
, 0);
1955 tw32_f(MAC_EVENT
, MAC_EVENT_LNKSTATE_CHANGED
);
1959 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
&&
1960 current_link_up
== 1 &&
1961 tp
->link_config
.active_speed
== SPEED_1000
&&
1962 ((tp
->tg3_flags
& TG3_FLAG_PCIX_MODE
) ||
1963 (tp
->tg3_flags
& TG3_FLAG_PCI_HIGH_SPEED
))) {
1966 (MAC_STATUS_SYNC_CHANGED
|
1967 MAC_STATUS_CFG_CHANGED
));
1970 NIC_SRAM_FIRMWARE_MBOX
,
1971 NIC_SRAM_FIRMWARE_MBOX_MAGIC2
);
1974 if (current_link_up
!= netif_carrier_ok(tp
->dev
)) {
1975 if (current_link_up
)
1976 netif_carrier_on(tp
->dev
);
1978 netif_carrier_off(tp
->dev
);
1979 tg3_link_report(tp
);
1985 struct tg3_fiber_aneginfo
{
1987 #define ANEG_STATE_UNKNOWN 0
1988 #define ANEG_STATE_AN_ENABLE 1
1989 #define ANEG_STATE_RESTART_INIT 2
1990 #define ANEG_STATE_RESTART 3
1991 #define ANEG_STATE_DISABLE_LINK_OK 4
1992 #define ANEG_STATE_ABILITY_DETECT_INIT 5
1993 #define ANEG_STATE_ABILITY_DETECT 6
1994 #define ANEG_STATE_ACK_DETECT_INIT 7
1995 #define ANEG_STATE_ACK_DETECT 8
1996 #define ANEG_STATE_COMPLETE_ACK_INIT 9
1997 #define ANEG_STATE_COMPLETE_ACK 10
1998 #define ANEG_STATE_IDLE_DETECT_INIT 11
1999 #define ANEG_STATE_IDLE_DETECT 12
2000 #define ANEG_STATE_LINK_OK 13
2001 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
2002 #define ANEG_STATE_NEXT_PAGE_WAIT 15
2005 #define MR_AN_ENABLE 0x00000001
2006 #define MR_RESTART_AN 0x00000002
2007 #define MR_AN_COMPLETE 0x00000004
2008 #define MR_PAGE_RX 0x00000008
2009 #define MR_NP_LOADED 0x00000010
2010 #define MR_TOGGLE_TX 0x00000020
2011 #define MR_LP_ADV_FULL_DUPLEX 0x00000040
2012 #define MR_LP_ADV_HALF_DUPLEX 0x00000080
2013 #define MR_LP_ADV_SYM_PAUSE 0x00000100
2014 #define MR_LP_ADV_ASYM_PAUSE 0x00000200
2015 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
2016 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
2017 #define MR_LP_ADV_NEXT_PAGE 0x00001000
2018 #define MR_TOGGLE_RX 0x00002000
2019 #define MR_NP_RX 0x00004000
2021 #define MR_LINK_OK 0x80000000
2023 unsigned long link_time
, cur_time
;
2025 u32 ability_match_cfg
;
2026 int ability_match_count
;
2028 char ability_match
, idle_match
, ack_match
;
2030 u32 txconfig
, rxconfig
;
2031 #define ANEG_CFG_NP 0x00000080
2032 #define ANEG_CFG_ACK 0x00000040
2033 #define ANEG_CFG_RF2 0x00000020
2034 #define ANEG_CFG_RF1 0x00000010
2035 #define ANEG_CFG_PS2 0x00000001
2036 #define ANEG_CFG_PS1 0x00008000
2037 #define ANEG_CFG_HD 0x00004000
2038 #define ANEG_CFG_FD 0x00002000
2039 #define ANEG_CFG_INVAL 0x00001f06
2044 #define ANEG_TIMER_ENAB 2
2045 #define ANEG_FAILED -1
2047 #define ANEG_STATE_SETTLE_TIME 10000
2049 static int tg3_fiber_aneg_smachine(struct tg3
*tp
,
2050 struct tg3_fiber_aneginfo
*ap
)
2052 unsigned long delta
;
2056 if (ap
->state
== ANEG_STATE_UNKNOWN
) {
2060 ap
->ability_match_cfg
= 0;
2061 ap
->ability_match_count
= 0;
2062 ap
->ability_match
= 0;
2068 if (tr32(MAC_STATUS
) & MAC_STATUS_RCVD_CFG
) {
2069 rx_cfg_reg
= tr32(MAC_RX_AUTO_NEG
);
2071 if (rx_cfg_reg
!= ap
->ability_match_cfg
) {
2072 ap
->ability_match_cfg
= rx_cfg_reg
;
2073 ap
->ability_match
= 0;
2074 ap
->ability_match_count
= 0;
2076 if (++ap
->ability_match_count
> 1) {
2077 ap
->ability_match
= 1;
2078 ap
->ability_match_cfg
= rx_cfg_reg
;
2081 if (rx_cfg_reg
& ANEG_CFG_ACK
)
2089 ap
->ability_match_cfg
= 0;
2090 ap
->ability_match_count
= 0;
2091 ap
->ability_match
= 0;
2097 ap
->rxconfig
= rx_cfg_reg
;
2101 case ANEG_STATE_UNKNOWN
:
2102 if (ap
->flags
& (MR_AN_ENABLE
| MR_RESTART_AN
))
2103 ap
->state
= ANEG_STATE_AN_ENABLE
;
2106 case ANEG_STATE_AN_ENABLE
:
2107 ap
->flags
&= ~(MR_AN_COMPLETE
| MR_PAGE_RX
);
2108 if (ap
->flags
& MR_AN_ENABLE
) {
2111 ap
->ability_match_cfg
= 0;
2112 ap
->ability_match_count
= 0;
2113 ap
->ability_match
= 0;
2117 ap
->state
= ANEG_STATE_RESTART_INIT
;
2119 ap
->state
= ANEG_STATE_DISABLE_LINK_OK
;
2123 case ANEG_STATE_RESTART_INIT
:
2124 ap
->link_time
= ap
->cur_time
;
2125 ap
->flags
&= ~(MR_NP_LOADED
);
2127 tw32(MAC_TX_AUTO_NEG
, 0);
2128 tp
->mac_mode
|= MAC_MODE_SEND_CONFIGS
;
2129 tw32_f(MAC_MODE
, tp
->mac_mode
);
2132 ret
= ANEG_TIMER_ENAB
;
2133 ap
->state
= ANEG_STATE_RESTART
;
2136 case ANEG_STATE_RESTART
:
2137 delta
= ap
->cur_time
- ap
->link_time
;
2138 if (delta
> ANEG_STATE_SETTLE_TIME
) {
2139 ap
->state
= ANEG_STATE_ABILITY_DETECT_INIT
;
2141 ret
= ANEG_TIMER_ENAB
;
2145 case ANEG_STATE_DISABLE_LINK_OK
:
2149 case ANEG_STATE_ABILITY_DETECT_INIT
:
2150 ap
->flags
&= ~(MR_TOGGLE_TX
);
2151 ap
->txconfig
= (ANEG_CFG_FD
| ANEG_CFG_PS1
);
2152 tw32(MAC_TX_AUTO_NEG
, ap
->txconfig
);
2153 tp
->mac_mode
|= MAC_MODE_SEND_CONFIGS
;
2154 tw32_f(MAC_MODE
, tp
->mac_mode
);
2157 ap
->state
= ANEG_STATE_ABILITY_DETECT
;
2160 case ANEG_STATE_ABILITY_DETECT
:
2161 if (ap
->ability_match
!= 0 && ap
->rxconfig
!= 0) {
2162 ap
->state
= ANEG_STATE_ACK_DETECT_INIT
;
2166 case ANEG_STATE_ACK_DETECT_INIT
:
2167 ap
->txconfig
|= ANEG_CFG_ACK
;
2168 tw32(MAC_TX_AUTO_NEG
, ap
->txconfig
);
2169 tp
->mac_mode
|= MAC_MODE_SEND_CONFIGS
;
2170 tw32_f(MAC_MODE
, tp
->mac_mode
);
2173 ap
->state
= ANEG_STATE_ACK_DETECT
;
2176 case ANEG_STATE_ACK_DETECT
:
2177 if (ap
->ack_match
!= 0) {
2178 if ((ap
->rxconfig
& ~ANEG_CFG_ACK
) ==
2179 (ap
->ability_match_cfg
& ~ANEG_CFG_ACK
)) {
2180 ap
->state
= ANEG_STATE_COMPLETE_ACK_INIT
;
2182 ap
->state
= ANEG_STATE_AN_ENABLE
;
2184 } else if (ap
->ability_match
!= 0 &&
2185 ap
->rxconfig
== 0) {
2186 ap
->state
= ANEG_STATE_AN_ENABLE
;
2190 case ANEG_STATE_COMPLETE_ACK_INIT
:
2191 if (ap
->rxconfig
& ANEG_CFG_INVAL
) {
2195 ap
->flags
&= ~(MR_LP_ADV_FULL_DUPLEX
|
2196 MR_LP_ADV_HALF_DUPLEX
|
2197 MR_LP_ADV_SYM_PAUSE
|
2198 MR_LP_ADV_ASYM_PAUSE
|
2199 MR_LP_ADV_REMOTE_FAULT1
|
2200 MR_LP_ADV_REMOTE_FAULT2
|
2201 MR_LP_ADV_NEXT_PAGE
|
2204 if (ap
->rxconfig
& ANEG_CFG_FD
)
2205 ap
->flags
|= MR_LP_ADV_FULL_DUPLEX
;
2206 if (ap
->rxconfig
& ANEG_CFG_HD
)
2207 ap
->flags
|= MR_LP_ADV_HALF_DUPLEX
;
2208 if (ap
->rxconfig
& ANEG_CFG_PS1
)
2209 ap
->flags
|= MR_LP_ADV_SYM_PAUSE
;
2210 if (ap
->rxconfig
& ANEG_CFG_PS2
)
2211 ap
->flags
|= MR_LP_ADV_ASYM_PAUSE
;
2212 if (ap
->rxconfig
& ANEG_CFG_RF1
)
2213 ap
->flags
|= MR_LP_ADV_REMOTE_FAULT1
;
2214 if (ap
->rxconfig
& ANEG_CFG_RF2
)
2215 ap
->flags
|= MR_LP_ADV_REMOTE_FAULT2
;
2216 if (ap
->rxconfig
& ANEG_CFG_NP
)
2217 ap
->flags
|= MR_LP_ADV_NEXT_PAGE
;
2219 ap
->link_time
= ap
->cur_time
;
2221 ap
->flags
^= (MR_TOGGLE_TX
);
2222 if (ap
->rxconfig
& 0x0008)
2223 ap
->flags
|= MR_TOGGLE_RX
;
2224 if (ap
->rxconfig
& ANEG_CFG_NP
)
2225 ap
->flags
|= MR_NP_RX
;
2226 ap
->flags
|= MR_PAGE_RX
;
2228 ap
->state
= ANEG_STATE_COMPLETE_ACK
;
2229 ret
= ANEG_TIMER_ENAB
;
2232 case ANEG_STATE_COMPLETE_ACK
:
2233 if (ap
->ability_match
!= 0 &&
2234 ap
->rxconfig
== 0) {
2235 ap
->state
= ANEG_STATE_AN_ENABLE
;
2238 delta
= ap
->cur_time
- ap
->link_time
;
2239 if (delta
> ANEG_STATE_SETTLE_TIME
) {
2240 if (!(ap
->flags
& (MR_LP_ADV_NEXT_PAGE
))) {
2241 ap
->state
= ANEG_STATE_IDLE_DETECT_INIT
;
2243 if ((ap
->txconfig
& ANEG_CFG_NP
) == 0 &&
2244 !(ap
->flags
& MR_NP_RX
)) {
2245 ap
->state
= ANEG_STATE_IDLE_DETECT_INIT
;
2253 case ANEG_STATE_IDLE_DETECT_INIT
:
2254 ap
->link_time
= ap
->cur_time
;
2255 tp
->mac_mode
&= ~MAC_MODE_SEND_CONFIGS
;
2256 tw32_f(MAC_MODE
, tp
->mac_mode
);
2259 ap
->state
= ANEG_STATE_IDLE_DETECT
;
2260 ret
= ANEG_TIMER_ENAB
;
2263 case ANEG_STATE_IDLE_DETECT
:
2264 if (ap
->ability_match
!= 0 &&
2265 ap
->rxconfig
== 0) {
2266 ap
->state
= ANEG_STATE_AN_ENABLE
;
2269 delta
= ap
->cur_time
- ap
->link_time
;
2270 if (delta
> ANEG_STATE_SETTLE_TIME
) {
2271 /* XXX another gem from the Broadcom driver :( */
2272 ap
->state
= ANEG_STATE_LINK_OK
;
2276 case ANEG_STATE_LINK_OK
:
2277 ap
->flags
|= (MR_AN_COMPLETE
| MR_LINK_OK
);
2281 case ANEG_STATE_NEXT_PAGE_WAIT_INIT
:
2282 /* ??? unimplemented */
2285 case ANEG_STATE_NEXT_PAGE_WAIT
:
2286 /* ??? unimplemented */
2297 static int fiber_autoneg(struct tg3
*tp
, u32
*flags
)
2300 struct tg3_fiber_aneginfo aninfo
;
2301 int status
= ANEG_FAILED
;
2305 tw32_f(MAC_TX_AUTO_NEG
, 0);
2307 tmp
= tp
->mac_mode
& ~MAC_MODE_PORT_MODE_MASK
;
2308 tw32_f(MAC_MODE
, tmp
| MAC_MODE_PORT_MODE_GMII
);
2311 tw32_f(MAC_MODE
, tp
->mac_mode
| MAC_MODE_SEND_CONFIGS
);
2314 memset(&aninfo
, 0, sizeof(aninfo
));
2315 aninfo
.flags
|= MR_AN_ENABLE
;
2316 aninfo
.state
= ANEG_STATE_UNKNOWN
;
2317 aninfo
.cur_time
= 0;
2319 while (++tick
< 195000) {
2320 status
= tg3_fiber_aneg_smachine(tp
, &aninfo
);
2321 if (status
== ANEG_DONE
|| status
== ANEG_FAILED
)
2327 tp
->mac_mode
&= ~MAC_MODE_SEND_CONFIGS
;
2328 tw32_f(MAC_MODE
, tp
->mac_mode
);
2331 *flags
= aninfo
.flags
;
2333 if (status
== ANEG_DONE
&&
2334 (aninfo
.flags
& (MR_AN_COMPLETE
| MR_LINK_OK
|
2335 MR_LP_ADV_FULL_DUPLEX
)))
2341 static void tg3_init_bcm8002(struct tg3
*tp
)
2343 u32 mac_status
= tr32(MAC_STATUS
);
2346 /* Reset when initting first time or we have a link. */
2347 if ((tp
->tg3_flags
& TG3_FLAG_INIT_COMPLETE
) &&
2348 !(mac_status
& MAC_STATUS_PCS_SYNCED
))
2351 /* Set PLL lock range. */
2352 tg3_writephy(tp
, 0x16, 0x8007);
2355 tg3_writephy(tp
, MII_BMCR
, BMCR_RESET
);
2357 /* Wait for reset to complete. */
2358 /* XXX schedule_timeout() ... */
2359 for (i
= 0; i
< 500; i
++)
2362 /* Config mode; select PMA/Ch 1 regs. */
2363 tg3_writephy(tp
, 0x10, 0x8411);
2365 /* Enable auto-lock and comdet, select txclk for tx. */
2366 tg3_writephy(tp
, 0x11, 0x0a10);
2368 tg3_writephy(tp
, 0x18, 0x00a0);
2369 tg3_writephy(tp
, 0x16, 0x41ff);
2371 /* Assert and deassert POR. */
2372 tg3_writephy(tp
, 0x13, 0x0400);
2374 tg3_writephy(tp
, 0x13, 0x0000);
2376 tg3_writephy(tp
, 0x11, 0x0a50);
2378 tg3_writephy(tp
, 0x11, 0x0a10);
2380 /* Wait for signal to stabilize */
2381 /* XXX schedule_timeout() ... */
2382 for (i
= 0; i
< 15000; i
++)
2385 /* Deselect the channel register so we can read the PHYID
2388 tg3_writephy(tp
, 0x10, 0x8011);
2391 static int tg3_setup_fiber_hw_autoneg(struct tg3
*tp
, u32 mac_status
)
2393 u32 sg_dig_ctrl
, sg_dig_status
;
2394 u32 serdes_cfg
, expected_sg_dig_ctrl
;
2395 int workaround
, port_a
;
2396 int current_link_up
;
2399 expected_sg_dig_ctrl
= 0;
2402 current_link_up
= 0;
2404 if (tp
->pci_chip_rev_id
!= CHIPREV_ID_5704_A0
&&
2405 tp
->pci_chip_rev_id
!= CHIPREV_ID_5704_A1
) {
2407 if (tr32(TG3PCI_DUAL_MAC_CTRL
) & DUAL_MAC_CTRL_ID
)
2410 /* preserve bits 0-11,13,14 for signal pre-emphasis */
2411 /* preserve bits 20-23 for voltage regulator */
2412 serdes_cfg
= tr32(MAC_SERDES_CFG
) & 0x00f06fff;
2415 sg_dig_ctrl
= tr32(SG_DIG_CTRL
);
2417 if (tp
->link_config
.autoneg
!= AUTONEG_ENABLE
) {
2418 if (sg_dig_ctrl
& (1 << 31)) {
2420 u32 val
= serdes_cfg
;
2426 tw32_f(MAC_SERDES_CFG
, val
);
2428 tw32_f(SG_DIG_CTRL
, 0x01388400);
2430 if (mac_status
& MAC_STATUS_PCS_SYNCED
) {
2431 tg3_setup_flow_control(tp
, 0, 0);
2432 current_link_up
= 1;
2437 /* Want auto-negotiation. */
2438 expected_sg_dig_ctrl
= 0x81388400;
2440 /* Pause capability */
2441 expected_sg_dig_ctrl
|= (1 << 11);
2443 /* Asymettric pause */
2444 expected_sg_dig_ctrl
|= (1 << 12);
2446 if (sg_dig_ctrl
!= expected_sg_dig_ctrl
) {
2447 if ((tp
->tg3_flags2
& TG3_FLG2_PARALLEL_DETECT
) &&
2448 tp
->serdes_counter
&&
2449 ((mac_status
& (MAC_STATUS_PCS_SYNCED
|
2450 MAC_STATUS_RCVD_CFG
)) ==
2451 MAC_STATUS_PCS_SYNCED
)) {
2452 tp
->serdes_counter
--;
2453 current_link_up
= 1;
2458 tw32_f(MAC_SERDES_CFG
, serdes_cfg
| 0xc011000);
2459 tw32_f(SG_DIG_CTRL
, expected_sg_dig_ctrl
| (1 << 30));
2461 tw32_f(SG_DIG_CTRL
, expected_sg_dig_ctrl
);
2463 tp
->serdes_counter
= SERDES_AN_TIMEOUT_5704S
;
2464 tp
->tg3_flags2
&= ~TG3_FLG2_PARALLEL_DETECT
;
2465 } else if (mac_status
& (MAC_STATUS_PCS_SYNCED
|
2466 MAC_STATUS_SIGNAL_DET
)) {
2467 sg_dig_status
= tr32(SG_DIG_STATUS
);
2468 mac_status
= tr32(MAC_STATUS
);
2470 if ((sg_dig_status
& (1 << 1)) &&
2471 (mac_status
& MAC_STATUS_PCS_SYNCED
)) {
2472 u32 local_adv
, remote_adv
;
2474 local_adv
= ADVERTISE_PAUSE_CAP
;
2476 if (sg_dig_status
& (1 << 19))
2477 remote_adv
|= LPA_PAUSE_CAP
;
2478 if (sg_dig_status
& (1 << 20))
2479 remote_adv
|= LPA_PAUSE_ASYM
;
2481 tg3_setup_flow_control(tp
, local_adv
, remote_adv
);
2482 current_link_up
= 1;
2483 tp
->serdes_counter
= 0;
2484 tp
->tg3_flags2
&= ~TG3_FLG2_PARALLEL_DETECT
;
2485 } else if (!(sg_dig_status
& (1 << 1))) {
2486 if (tp
->serdes_counter
)
2487 tp
->serdes_counter
--;
2490 u32 val
= serdes_cfg
;
2497 tw32_f(MAC_SERDES_CFG
, val
);
2500 tw32_f(SG_DIG_CTRL
, 0x01388400);
2503 /* Link parallel detection - link is up */
2504 /* only if we have PCS_SYNC and not */
2505 /* receiving config code words */
2506 mac_status
= tr32(MAC_STATUS
);
2507 if ((mac_status
& MAC_STATUS_PCS_SYNCED
) &&
2508 !(mac_status
& MAC_STATUS_RCVD_CFG
)) {
2509 tg3_setup_flow_control(tp
, 0, 0);
2510 current_link_up
= 1;
2512 TG3_FLG2_PARALLEL_DETECT
;
2513 tp
->serdes_counter
=
2514 SERDES_PARALLEL_DET_TIMEOUT
;
2516 goto restart_autoneg
;
2520 tp
->serdes_counter
= SERDES_AN_TIMEOUT_5704S
;
2521 tp
->tg3_flags2
&= ~TG3_FLG2_PARALLEL_DETECT
;
2525 return current_link_up
;
2528 static int tg3_setup_fiber_by_hand(struct tg3
*tp
, u32 mac_status
)
2530 int current_link_up
= 0;
2532 if (!(mac_status
& MAC_STATUS_PCS_SYNCED
)) {
2533 tp
->tg3_flags
&= ~TG3_FLAG_GOT_SERDES_FLOWCTL
;
2537 if (tp
->link_config
.autoneg
== AUTONEG_ENABLE
) {
2541 if (fiber_autoneg(tp
, &flags
)) {
2542 u32 local_adv
, remote_adv
;
2544 local_adv
= ADVERTISE_PAUSE_CAP
;
2546 if (flags
& MR_LP_ADV_SYM_PAUSE
)
2547 remote_adv
|= LPA_PAUSE_CAP
;
2548 if (flags
& MR_LP_ADV_ASYM_PAUSE
)
2549 remote_adv
|= LPA_PAUSE_ASYM
;
2551 tg3_setup_flow_control(tp
, local_adv
, remote_adv
);
2553 tp
->tg3_flags
|= TG3_FLAG_GOT_SERDES_FLOWCTL
;
2554 current_link_up
= 1;
2556 for (i
= 0; i
< 30; i
++) {
2559 (MAC_STATUS_SYNC_CHANGED
|
2560 MAC_STATUS_CFG_CHANGED
));
2562 if ((tr32(MAC_STATUS
) &
2563 (MAC_STATUS_SYNC_CHANGED
|
2564 MAC_STATUS_CFG_CHANGED
)) == 0)
2568 mac_status
= tr32(MAC_STATUS
);
2569 if (current_link_up
== 0 &&
2570 (mac_status
& MAC_STATUS_PCS_SYNCED
) &&
2571 !(mac_status
& MAC_STATUS_RCVD_CFG
))
2572 current_link_up
= 1;
2574 /* Forcing 1000FD link up. */
2575 current_link_up
= 1;
2576 tp
->tg3_flags
|= TG3_FLAG_GOT_SERDES_FLOWCTL
;
2578 tw32_f(MAC_MODE
, (tp
->mac_mode
| MAC_MODE_SEND_CONFIGS
));
2583 return current_link_up
;
2586 static int tg3_setup_fiber_phy(struct tg3
*tp
, int force_reset
)
2589 u16 orig_active_speed
;
2590 u8 orig_active_duplex
;
2592 int current_link_up
;
2596 (tp
->tg3_flags
& (TG3_FLAG_RX_PAUSE
|
2597 TG3_FLAG_TX_PAUSE
));
2598 orig_active_speed
= tp
->link_config
.active_speed
;
2599 orig_active_duplex
= tp
->link_config
.active_duplex
;
2601 if (!(tp
->tg3_flags2
& TG3_FLG2_HW_AUTONEG
) &&
2602 netif_carrier_ok(tp
->dev
) &&
2603 (tp
->tg3_flags
& TG3_FLAG_INIT_COMPLETE
)) {
2604 mac_status
= tr32(MAC_STATUS
);
2605 mac_status
&= (MAC_STATUS_PCS_SYNCED
|
2606 MAC_STATUS_SIGNAL_DET
|
2607 MAC_STATUS_CFG_CHANGED
|
2608 MAC_STATUS_RCVD_CFG
);
2609 if (mac_status
== (MAC_STATUS_PCS_SYNCED
|
2610 MAC_STATUS_SIGNAL_DET
)) {
2611 tw32_f(MAC_STATUS
, (MAC_STATUS_SYNC_CHANGED
|
2612 MAC_STATUS_CFG_CHANGED
));
2617 tw32_f(MAC_TX_AUTO_NEG
, 0);
2619 tp
->mac_mode
&= ~(MAC_MODE_PORT_MODE_MASK
| MAC_MODE_HALF_DUPLEX
);
2620 tp
->mac_mode
|= MAC_MODE_PORT_MODE_TBI
;
2621 tw32_f(MAC_MODE
, tp
->mac_mode
);
2624 if (tp
->phy_id
== PHY_ID_BCM8002
)
2625 tg3_init_bcm8002(tp
);
2627 /* Enable link change event even when serdes polling. */
2628 tw32_f(MAC_EVENT
, MAC_EVENT_LNKSTATE_CHANGED
);
2631 current_link_up
= 0;
2632 mac_status
= tr32(MAC_STATUS
);
2634 if (tp
->tg3_flags2
& TG3_FLG2_HW_AUTONEG
)
2635 current_link_up
= tg3_setup_fiber_hw_autoneg(tp
, mac_status
);
2637 current_link_up
= tg3_setup_fiber_by_hand(tp
, mac_status
);
2639 tp
->mac_mode
&= ~MAC_MODE_LINK_POLARITY
;
2640 tw32_f(MAC_MODE
, tp
->mac_mode
);
2643 tp
->hw_status
->status
=
2644 (SD_STATUS_UPDATED
|
2645 (tp
->hw_status
->status
& ~SD_STATUS_LINK_CHG
));
2647 for (i
= 0; i
< 100; i
++) {
2648 tw32_f(MAC_STATUS
, (MAC_STATUS_SYNC_CHANGED
|
2649 MAC_STATUS_CFG_CHANGED
));
2651 if ((tr32(MAC_STATUS
) & (MAC_STATUS_SYNC_CHANGED
|
2652 MAC_STATUS_CFG_CHANGED
|
2653 MAC_STATUS_LNKSTATE_CHANGED
)) == 0)
2657 mac_status
= tr32(MAC_STATUS
);
2658 if ((mac_status
& MAC_STATUS_PCS_SYNCED
) == 0) {
2659 current_link_up
= 0;
2660 if (tp
->link_config
.autoneg
== AUTONEG_ENABLE
&&
2661 tp
->serdes_counter
== 0) {
2662 tw32_f(MAC_MODE
, (tp
->mac_mode
|
2663 MAC_MODE_SEND_CONFIGS
));
2665 tw32_f(MAC_MODE
, tp
->mac_mode
);
2669 if (current_link_up
== 1) {
2670 tp
->link_config
.active_speed
= SPEED_1000
;
2671 tp
->link_config
.active_duplex
= DUPLEX_FULL
;
2672 tw32(MAC_LED_CTRL
, (tp
->led_ctrl
|
2673 LED_CTRL_LNKLED_OVERRIDE
|
2674 LED_CTRL_1000MBPS_ON
));
2676 tp
->link_config
.active_speed
= SPEED_INVALID
;
2677 tp
->link_config
.active_duplex
= DUPLEX_INVALID
;
2678 tw32(MAC_LED_CTRL
, (tp
->led_ctrl
|
2679 LED_CTRL_LNKLED_OVERRIDE
|
2680 LED_CTRL_TRAFFIC_OVERRIDE
));
2683 if (current_link_up
!= netif_carrier_ok(tp
->dev
)) {
2684 if (current_link_up
)
2685 netif_carrier_on(tp
->dev
);
2687 netif_carrier_off(tp
->dev
);
2688 tg3_link_report(tp
);
2691 tp
->tg3_flags
& (TG3_FLAG_RX_PAUSE
|
2693 if (orig_pause_cfg
!= now_pause_cfg
||
2694 orig_active_speed
!= tp
->link_config
.active_speed
||
2695 orig_active_duplex
!= tp
->link_config
.active_duplex
)
2696 tg3_link_report(tp
);
2702 static int tg3_setup_fiber_mii_phy(struct tg3
*tp
, int force_reset
)
2704 int current_link_up
, err
= 0;
2709 tp
->mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
2710 tw32_f(MAC_MODE
, tp
->mac_mode
);
2716 (MAC_STATUS_SYNC_CHANGED
|
2717 MAC_STATUS_CFG_CHANGED
|
2718 MAC_STATUS_MI_COMPLETION
|
2719 MAC_STATUS_LNKSTATE_CHANGED
));
2725 current_link_up
= 0;
2726 current_speed
= SPEED_INVALID
;
2727 current_duplex
= DUPLEX_INVALID
;
2729 err
|= tg3_readphy(tp
, MII_BMSR
, &bmsr
);
2730 err
|= tg3_readphy(tp
, MII_BMSR
, &bmsr
);
2731 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5714
) {
2732 if (tr32(MAC_TX_STATUS
) & TX_STATUS_LINK_UP
)
2733 bmsr
|= BMSR_LSTATUS
;
2735 bmsr
&= ~BMSR_LSTATUS
;
2738 err
|= tg3_readphy(tp
, MII_BMCR
, &bmcr
);
2740 if ((tp
->link_config
.autoneg
== AUTONEG_ENABLE
) && !force_reset
&&
2741 (tp
->tg3_flags2
& TG3_FLG2_PARALLEL_DETECT
)) {
2742 /* do nothing, just check for link up at the end */
2743 } else if (tp
->link_config
.autoneg
== AUTONEG_ENABLE
) {
2746 err
|= tg3_readphy(tp
, MII_ADVERTISE
, &adv
);
2747 new_adv
= adv
& ~(ADVERTISE_1000XFULL
| ADVERTISE_1000XHALF
|
2748 ADVERTISE_1000XPAUSE
|
2749 ADVERTISE_1000XPSE_ASYM
|
2752 /* Always advertise symmetric PAUSE just like copper */
2753 new_adv
|= ADVERTISE_1000XPAUSE
;
2755 if (tp
->link_config
.advertising
& ADVERTISED_1000baseT_Half
)
2756 new_adv
|= ADVERTISE_1000XHALF
;
2757 if (tp
->link_config
.advertising
& ADVERTISED_1000baseT_Full
)
2758 new_adv
|= ADVERTISE_1000XFULL
;
2760 if ((new_adv
!= adv
) || !(bmcr
& BMCR_ANENABLE
)) {
2761 tg3_writephy(tp
, MII_ADVERTISE
, new_adv
);
2762 bmcr
|= BMCR_ANENABLE
| BMCR_ANRESTART
;
2763 tg3_writephy(tp
, MII_BMCR
, bmcr
);
2765 tw32_f(MAC_EVENT
, MAC_EVENT_LNKSTATE_CHANGED
);
2766 tp
->serdes_counter
= SERDES_AN_TIMEOUT_5714S
;
2767 tp
->tg3_flags2
&= ~TG3_FLG2_PARALLEL_DETECT
;
2774 bmcr
&= ~BMCR_SPEED1000
;
2775 new_bmcr
= bmcr
& ~(BMCR_ANENABLE
| BMCR_FULLDPLX
);
2777 if (tp
->link_config
.duplex
== DUPLEX_FULL
)
2778 new_bmcr
|= BMCR_FULLDPLX
;
2780 if (new_bmcr
!= bmcr
) {
2781 /* BMCR_SPEED1000 is a reserved bit that needs
2782 * to be set on write.
2784 new_bmcr
|= BMCR_SPEED1000
;
2786 /* Force a linkdown */
2787 if (netif_carrier_ok(tp
->dev
)) {
2790 err
|= tg3_readphy(tp
, MII_ADVERTISE
, &adv
);
2791 adv
&= ~(ADVERTISE_1000XFULL
|
2792 ADVERTISE_1000XHALF
|
2794 tg3_writephy(tp
, MII_ADVERTISE
, adv
);
2795 tg3_writephy(tp
, MII_BMCR
, bmcr
|
2799 netif_carrier_off(tp
->dev
);
2801 tg3_writephy(tp
, MII_BMCR
, new_bmcr
);
2803 err
|= tg3_readphy(tp
, MII_BMSR
, &bmsr
);
2804 err
|= tg3_readphy(tp
, MII_BMSR
, &bmsr
);
2805 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) ==
2807 if (tr32(MAC_TX_STATUS
) & TX_STATUS_LINK_UP
)
2808 bmsr
|= BMSR_LSTATUS
;
2810 bmsr
&= ~BMSR_LSTATUS
;
2812 tp
->tg3_flags2
&= ~TG3_FLG2_PARALLEL_DETECT
;
2816 if (bmsr
& BMSR_LSTATUS
) {
2817 current_speed
= SPEED_1000
;
2818 current_link_up
= 1;
2819 if (bmcr
& BMCR_FULLDPLX
)
2820 current_duplex
= DUPLEX_FULL
;
2822 current_duplex
= DUPLEX_HALF
;
2824 if (bmcr
& BMCR_ANENABLE
) {
2825 u32 local_adv
, remote_adv
, common
;
2827 err
|= tg3_readphy(tp
, MII_ADVERTISE
, &local_adv
);
2828 err
|= tg3_readphy(tp
, MII_LPA
, &remote_adv
);
2829 common
= local_adv
& remote_adv
;
2830 if (common
& (ADVERTISE_1000XHALF
|
2831 ADVERTISE_1000XFULL
)) {
2832 if (common
& ADVERTISE_1000XFULL
)
2833 current_duplex
= DUPLEX_FULL
;
2835 current_duplex
= DUPLEX_HALF
;
2837 tg3_setup_flow_control(tp
, local_adv
,
2841 current_link_up
= 0;
2845 tp
->mac_mode
&= ~MAC_MODE_HALF_DUPLEX
;
2846 if (tp
->link_config
.active_duplex
== DUPLEX_HALF
)
2847 tp
->mac_mode
|= MAC_MODE_HALF_DUPLEX
;
2849 tw32_f(MAC_MODE
, tp
->mac_mode
);
2852 tw32_f(MAC_EVENT
, MAC_EVENT_LNKSTATE_CHANGED
);
2854 tp
->link_config
.active_speed
= current_speed
;
2855 tp
->link_config
.active_duplex
= current_duplex
;
2857 if (current_link_up
!= netif_carrier_ok(tp
->dev
)) {
2858 if (current_link_up
)
2859 netif_carrier_on(tp
->dev
);
2861 netif_carrier_off(tp
->dev
);
2862 tp
->tg3_flags2
&= ~TG3_FLG2_PARALLEL_DETECT
;
2864 tg3_link_report(tp
);
2869 static void tg3_serdes_parallel_detect(struct tg3
*tp
)
2871 if (tp
->serdes_counter
) {
2872 /* Give autoneg time to complete. */
2873 tp
->serdes_counter
--;
2876 if (!netif_carrier_ok(tp
->dev
) &&
2877 (tp
->link_config
.autoneg
== AUTONEG_ENABLE
)) {
2880 tg3_readphy(tp
, MII_BMCR
, &bmcr
);
2881 if (bmcr
& BMCR_ANENABLE
) {
2884 /* Select shadow register 0x1f */
2885 tg3_writephy(tp
, 0x1c, 0x7c00);
2886 tg3_readphy(tp
, 0x1c, &phy1
);
2888 /* Select expansion interrupt status register */
2889 tg3_writephy(tp
, 0x17, 0x0f01);
2890 tg3_readphy(tp
, 0x15, &phy2
);
2891 tg3_readphy(tp
, 0x15, &phy2
);
2893 if ((phy1
& 0x10) && !(phy2
& 0x20)) {
2894 /* We have signal detect and not receiving
2895 * config code words, link is up by parallel
2899 bmcr
&= ~BMCR_ANENABLE
;
2900 bmcr
|= BMCR_SPEED1000
| BMCR_FULLDPLX
;
2901 tg3_writephy(tp
, MII_BMCR
, bmcr
);
2902 tp
->tg3_flags2
|= TG3_FLG2_PARALLEL_DETECT
;
2906 else if (netif_carrier_ok(tp
->dev
) &&
2907 (tp
->link_config
.autoneg
== AUTONEG_ENABLE
) &&
2908 (tp
->tg3_flags2
& TG3_FLG2_PARALLEL_DETECT
)) {
2911 /* Select expansion interrupt status register */
2912 tg3_writephy(tp
, 0x17, 0x0f01);
2913 tg3_readphy(tp
, 0x15, &phy2
);
2917 /* Config code words received, turn on autoneg. */
2918 tg3_readphy(tp
, MII_BMCR
, &bmcr
);
2919 tg3_writephy(tp
, MII_BMCR
, bmcr
| BMCR_ANENABLE
);
2921 tp
->tg3_flags2
&= ~TG3_FLG2_PARALLEL_DETECT
;
2927 static int tg3_setup_phy(struct tg3
*tp
, int force_reset
)
2931 if (tp
->tg3_flags2
& TG3_FLG2_PHY_SERDES
) {
2932 err
= tg3_setup_fiber_phy(tp
, force_reset
);
2933 } else if (tp
->tg3_flags2
& TG3_FLG2_MII_SERDES
) {
2934 err
= tg3_setup_fiber_mii_phy(tp
, force_reset
);
2936 err
= tg3_setup_copper_phy(tp
, force_reset
);
2939 if (tp
->link_config
.active_speed
== SPEED_1000
&&
2940 tp
->link_config
.active_duplex
== DUPLEX_HALF
)
2941 tw32(MAC_TX_LENGTHS
,
2942 ((2 << TX_LENGTHS_IPG_CRS_SHIFT
) |
2943 (6 << TX_LENGTHS_IPG_SHIFT
) |
2944 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT
)));
2946 tw32(MAC_TX_LENGTHS
,
2947 ((2 << TX_LENGTHS_IPG_CRS_SHIFT
) |
2948 (6 << TX_LENGTHS_IPG_SHIFT
) |
2949 (32 << TX_LENGTHS_SLOT_TIME_SHIFT
)));
2951 if (!(tp
->tg3_flags2
& TG3_FLG2_5705_PLUS
)) {
2952 if (netif_carrier_ok(tp
->dev
)) {
2953 tw32(HOSTCC_STAT_COAL_TICKS
,
2954 tp
->coal
.stats_block_coalesce_usecs
);
2956 tw32(HOSTCC_STAT_COAL_TICKS
, 0);
2963 /* This is called whenever we suspect that the system chipset is re-
2964 * ordering the sequence of MMIO to the tx send mailbox. The symptom
2965 * is bogus tx completions. We try to recover by setting the
2966 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
2969 static void tg3_tx_recover(struct tg3
*tp
)
2971 BUG_ON((tp
->tg3_flags
& TG3_FLAG_MBOX_WRITE_REORDER
) ||
2972 tp
->write32_tx_mbox
== tg3_write_indirect_mbox
);
2974 printk(KERN_WARNING PFX
"%s: The system may be re-ordering memory-"
2975 "mapped I/O cycles to the network device, attempting to "
2976 "recover. Please report the problem to the driver maintainer "
2977 "and include system chipset information.\n", tp
->dev
->name
);
2979 spin_lock(&tp
->lock
);
2980 tp
->tg3_flags
|= TG3_FLAG_TX_RECOVERY_PENDING
;
2981 spin_unlock(&tp
->lock
);
2984 static inline u32
tg3_tx_avail(struct tg3
*tp
)
2987 return (tp
->tx_pending
-
2988 ((tp
->tx_prod
- tp
->tx_cons
) & (TG3_TX_RING_SIZE
- 1)));
2991 /* Tigon3 never reports partial packet sends. So we do not
2992 * need special logic to handle SKBs that have not had all
2993 * of their frags sent yet, like SunGEM does.
2995 static void tg3_tx(struct tg3
*tp
)
2997 u32 hw_idx
= tp
->hw_status
->idx
[0].tx_consumer
;
2998 u32 sw_idx
= tp
->tx_cons
;
3000 while (sw_idx
!= hw_idx
) {
3001 struct tx_ring_info
*ri
= &tp
->tx_buffers
[sw_idx
];
3002 struct sk_buff
*skb
= ri
->skb
;
3005 if (unlikely(skb
== NULL
)) {
3010 pci_unmap_single(tp
->pdev
,
3011 pci_unmap_addr(ri
, mapping
),
3017 sw_idx
= NEXT_TX(sw_idx
);
3019 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
3020 ri
= &tp
->tx_buffers
[sw_idx
];
3021 if (unlikely(ri
->skb
!= NULL
|| sw_idx
== hw_idx
))
3024 pci_unmap_page(tp
->pdev
,
3025 pci_unmap_addr(ri
, mapping
),
3026 skb_shinfo(skb
)->frags
[i
].size
,
3029 sw_idx
= NEXT_TX(sw_idx
);
3034 if (unlikely(tx_bug
)) {
3040 tp
->tx_cons
= sw_idx
;
3042 /* Need to make the tx_cons update visible to tg3_start_xmit()
3043 * before checking for netif_queue_stopped(). Without the
3044 * memory barrier, there is a small possibility that tg3_start_xmit()
3045 * will miss it and cause the queue to be stopped forever.
3049 if (unlikely(netif_queue_stopped(tp
->dev
) &&
3050 (tg3_tx_avail(tp
) > TG3_TX_WAKEUP_THRESH
))) {
3051 netif_tx_lock(tp
->dev
);
3052 if (netif_queue_stopped(tp
->dev
) &&
3053 (tg3_tx_avail(tp
) > TG3_TX_WAKEUP_THRESH
))
3054 netif_wake_queue(tp
->dev
);
3055 netif_tx_unlock(tp
->dev
);
3059 /* Returns size of skb allocated or < 0 on error.
3061 * We only need to fill in the address because the other members
3062 * of the RX descriptor are invariant, see tg3_init_rings.
3064 * Note the purposeful assymetry of cpu vs. chip accesses. For
3065 * posting buffers we only dirty the first cache line of the RX
3066 * descriptor (containing the address). Whereas for the RX status
3067 * buffers the cpu only reads the last cacheline of the RX descriptor
3068 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
3070 static int tg3_alloc_rx_skb(struct tg3
*tp
, u32 opaque_key
,
3071 int src_idx
, u32 dest_idx_unmasked
)
3073 struct tg3_rx_buffer_desc
*desc
;
3074 struct ring_info
*map
, *src_map
;
3075 struct sk_buff
*skb
;
3077 int skb_size
, dest_idx
;
3080 switch (opaque_key
) {
3081 case RXD_OPAQUE_RING_STD
:
3082 dest_idx
= dest_idx_unmasked
% TG3_RX_RING_SIZE
;
3083 desc
= &tp
->rx_std
[dest_idx
];
3084 map
= &tp
->rx_std_buffers
[dest_idx
];
3086 src_map
= &tp
->rx_std_buffers
[src_idx
];
3087 skb_size
= tp
->rx_pkt_buf_sz
;
3090 case RXD_OPAQUE_RING_JUMBO
:
3091 dest_idx
= dest_idx_unmasked
% TG3_RX_JUMBO_RING_SIZE
;
3092 desc
= &tp
->rx_jumbo
[dest_idx
];
3093 map
= &tp
->rx_jumbo_buffers
[dest_idx
];
3095 src_map
= &tp
->rx_jumbo_buffers
[src_idx
];
3096 skb_size
= RX_JUMBO_PKT_BUF_SZ
;
3103 /* Do not overwrite any of the map or rp information
3104 * until we are sure we can commit to a new buffer.
3106 * Callers depend upon this behavior and assume that
3107 * we leave everything unchanged if we fail.
3109 skb
= netdev_alloc_skb(tp
->dev
, skb_size
);
3113 skb_reserve(skb
, tp
->rx_offset
);
3115 mapping
= pci_map_single(tp
->pdev
, skb
->data
,
3116 skb_size
- tp
->rx_offset
,
3117 PCI_DMA_FROMDEVICE
);
3120 pci_unmap_addr_set(map
, mapping
, mapping
);
3122 if (src_map
!= NULL
)
3123 src_map
->skb
= NULL
;
3125 desc
->addr_hi
= ((u64
)mapping
>> 32);
3126 desc
->addr_lo
= ((u64
)mapping
& 0xffffffff);
3131 /* We only need to move over in the address because the other
3132 * members of the RX descriptor are invariant. See notes above
3133 * tg3_alloc_rx_skb for full details.
3135 static void tg3_recycle_rx(struct tg3
*tp
, u32 opaque_key
,
3136 int src_idx
, u32 dest_idx_unmasked
)
3138 struct tg3_rx_buffer_desc
*src_desc
, *dest_desc
;
3139 struct ring_info
*src_map
, *dest_map
;
3142 switch (opaque_key
) {
3143 case RXD_OPAQUE_RING_STD
:
3144 dest_idx
= dest_idx_unmasked
% TG3_RX_RING_SIZE
;
3145 dest_desc
= &tp
->rx_std
[dest_idx
];
3146 dest_map
= &tp
->rx_std_buffers
[dest_idx
];
3147 src_desc
= &tp
->rx_std
[src_idx
];
3148 src_map
= &tp
->rx_std_buffers
[src_idx
];
3151 case RXD_OPAQUE_RING_JUMBO
:
3152 dest_idx
= dest_idx_unmasked
% TG3_RX_JUMBO_RING_SIZE
;
3153 dest_desc
= &tp
->rx_jumbo
[dest_idx
];
3154 dest_map
= &tp
->rx_jumbo_buffers
[dest_idx
];
3155 src_desc
= &tp
->rx_jumbo
[src_idx
];
3156 src_map
= &tp
->rx_jumbo_buffers
[src_idx
];
3163 dest_map
->skb
= src_map
->skb
;
3164 pci_unmap_addr_set(dest_map
, mapping
,
3165 pci_unmap_addr(src_map
, mapping
));
3166 dest_desc
->addr_hi
= src_desc
->addr_hi
;
3167 dest_desc
->addr_lo
= src_desc
->addr_lo
;
3169 src_map
->skb
= NULL
;
3172 #if TG3_VLAN_TAG_USED
3173 static int tg3_vlan_rx(struct tg3
*tp
, struct sk_buff
*skb
, u16 vlan_tag
)
3175 return vlan_hwaccel_receive_skb(skb
, tp
->vlgrp
, vlan_tag
);
3179 /* The RX ring scheme is composed of multiple rings which post fresh
3180 * buffers to the chip, and one special ring the chip uses to report
3181 * status back to the host.
3183 * The special ring reports the status of received packets to the
3184 * host. The chip does not write into the original descriptor the
3185 * RX buffer was obtained from. The chip simply takes the original
3186 * descriptor as provided by the host, updates the status and length
3187 * field, then writes this into the next status ring entry.
3189 * Each ring the host uses to post buffers to the chip is described
3190 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
3191 * it is first placed into the on-chip ram. When the packet's length
3192 * is known, it walks down the TG3_BDINFO entries to select the ring.
3193 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
3194 * which is within the range of the new packet's length is chosen.
3196 * The "separate ring for rx status" scheme may sound queer, but it makes
3197 * sense from a cache coherency perspective. If only the host writes
3198 * to the buffer post rings, and only the chip writes to the rx status
3199 * rings, then cache lines never move beyond shared-modified state.
3200 * If both the host and chip were to write into the same ring, cache line
3201 * eviction could occur since both entities want it in an exclusive state.
3203 static int tg3_rx(struct tg3
*tp
, int budget
)
3205 u32 work_mask
, rx_std_posted
= 0;
3206 u32 sw_idx
= tp
->rx_rcb_ptr
;
3210 hw_idx
= tp
->hw_status
->idx
[0].rx_producer
;
3212 * We need to order the read of hw_idx and the read of
3213 * the opaque cookie.
3218 while (sw_idx
!= hw_idx
&& budget
> 0) {
3219 struct tg3_rx_buffer_desc
*desc
= &tp
->rx_rcb
[sw_idx
];
3221 struct sk_buff
*skb
;
3222 dma_addr_t dma_addr
;
3223 u32 opaque_key
, desc_idx
, *post_ptr
;
3225 desc_idx
= desc
->opaque
& RXD_OPAQUE_INDEX_MASK
;
3226 opaque_key
= desc
->opaque
& RXD_OPAQUE_RING_MASK
;
3227 if (opaque_key
== RXD_OPAQUE_RING_STD
) {
3228 dma_addr
= pci_unmap_addr(&tp
->rx_std_buffers
[desc_idx
],
3230 skb
= tp
->rx_std_buffers
[desc_idx
].skb
;
3231 post_ptr
= &tp
->rx_std_ptr
;
3233 } else if (opaque_key
== RXD_OPAQUE_RING_JUMBO
) {
3234 dma_addr
= pci_unmap_addr(&tp
->rx_jumbo_buffers
[desc_idx
],
3236 skb
= tp
->rx_jumbo_buffers
[desc_idx
].skb
;
3237 post_ptr
= &tp
->rx_jumbo_ptr
;
3240 goto next_pkt_nopost
;
3243 work_mask
|= opaque_key
;
3245 if ((desc
->err_vlan
& RXD_ERR_MASK
) != 0 &&
3246 (desc
->err_vlan
!= RXD_ERR_ODD_NIBBLE_RCVD_MII
)) {
3248 tg3_recycle_rx(tp
, opaque_key
,
3249 desc_idx
, *post_ptr
);
3251 /* Other statistics kept track of by card. */
3252 tp
->net_stats
.rx_dropped
++;
3256 len
= ((desc
->idx_len
& RXD_LEN_MASK
) >> RXD_LEN_SHIFT
) - 4; /* omit crc */
3258 if (len
> RX_COPY_THRESHOLD
3259 && tp
->rx_offset
== 2
3260 /* rx_offset != 2 iff this is a 5701 card running
3261 * in PCI-X mode [see tg3_get_invariants()] */
3265 skb_size
= tg3_alloc_rx_skb(tp
, opaque_key
,
3266 desc_idx
, *post_ptr
);
3270 pci_unmap_single(tp
->pdev
, dma_addr
,
3271 skb_size
- tp
->rx_offset
,
3272 PCI_DMA_FROMDEVICE
);
3276 struct sk_buff
*copy_skb
;
3278 tg3_recycle_rx(tp
, opaque_key
,
3279 desc_idx
, *post_ptr
);
3281 copy_skb
= netdev_alloc_skb(tp
->dev
, len
+ 2);
3282 if (copy_skb
== NULL
)
3283 goto drop_it_no_recycle
;
3285 skb_reserve(copy_skb
, 2);
3286 skb_put(copy_skb
, len
);
3287 pci_dma_sync_single_for_cpu(tp
->pdev
, dma_addr
, len
, PCI_DMA_FROMDEVICE
);
3288 memcpy(copy_skb
->data
, skb
->data
, len
);
3289 pci_dma_sync_single_for_device(tp
->pdev
, dma_addr
, len
, PCI_DMA_FROMDEVICE
);
3291 /* We'll reuse the original ring buffer. */
3295 if ((tp
->tg3_flags
& TG3_FLAG_RX_CHECKSUMS
) &&
3296 (desc
->type_flags
& RXD_FLAG_TCPUDP_CSUM
) &&
3297 (((desc
->ip_tcp_csum
& RXD_TCPCSUM_MASK
)
3298 >> RXD_TCPCSUM_SHIFT
) == 0xffff))
3299 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
3301 skb
->ip_summed
= CHECKSUM_NONE
;
3303 skb
->protocol
= eth_type_trans(skb
, tp
->dev
);
3304 #if TG3_VLAN_TAG_USED
3305 if (tp
->vlgrp
!= NULL
&&
3306 desc
->type_flags
& RXD_FLAG_VLAN
) {
3307 tg3_vlan_rx(tp
, skb
,
3308 desc
->err_vlan
& RXD_VLAN_MASK
);
3311 netif_receive_skb(skb
);
3313 tp
->dev
->last_rx
= jiffies
;
3320 if (unlikely(rx_std_posted
>= tp
->rx_std_max_post
)) {
3321 u32 idx
= *post_ptr
% TG3_RX_RING_SIZE
;
3323 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX
+
3324 TG3_64BIT_REG_LOW
, idx
);
3325 work_mask
&= ~RXD_OPAQUE_RING_STD
;
3330 sw_idx
%= TG3_RX_RCB_RING_SIZE(tp
);
3332 /* Refresh hw_idx to see if there is new work */
3333 if (sw_idx
== hw_idx
) {
3334 hw_idx
= tp
->hw_status
->idx
[0].rx_producer
;
3339 /* ACK the status ring. */
3340 tp
->rx_rcb_ptr
= sw_idx
;
3341 tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0
+ TG3_64BIT_REG_LOW
, sw_idx
);
3343 /* Refill RX ring(s). */
3344 if (work_mask
& RXD_OPAQUE_RING_STD
) {
3345 sw_idx
= tp
->rx_std_ptr
% TG3_RX_RING_SIZE
;
3346 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX
+ TG3_64BIT_REG_LOW
,
3349 if (work_mask
& RXD_OPAQUE_RING_JUMBO
) {
3350 sw_idx
= tp
->rx_jumbo_ptr
% TG3_RX_JUMBO_RING_SIZE
;
3351 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX
+ TG3_64BIT_REG_LOW
,
3359 static int tg3_poll(struct net_device
*netdev
, int *budget
)
3361 struct tg3
*tp
= netdev_priv(netdev
);
3362 struct tg3_hw_status
*sblk
= tp
->hw_status
;
3365 /* handle link change and other phy events */
3366 if (!(tp
->tg3_flags
&
3367 (TG3_FLAG_USE_LINKCHG_REG
|
3368 TG3_FLAG_POLL_SERDES
))) {
3369 if (sblk
->status
& SD_STATUS_LINK_CHG
) {
3370 sblk
->status
= SD_STATUS_UPDATED
|
3371 (sblk
->status
& ~SD_STATUS_LINK_CHG
);
3372 spin_lock(&tp
->lock
);
3373 tg3_setup_phy(tp
, 0);
3374 spin_unlock(&tp
->lock
);
3378 /* run TX completion thread */
3379 if (sblk
->idx
[0].tx_consumer
!= tp
->tx_cons
) {
3381 if (unlikely(tp
->tg3_flags
& TG3_FLAG_TX_RECOVERY_PENDING
)) {
3382 netif_rx_complete(netdev
);
3383 schedule_work(&tp
->reset_task
);
3388 /* run RX thread, within the bounds set by NAPI.
3389 * All RX "locking" is done by ensuring outside
3390 * code synchronizes with dev->poll()
3392 if (sblk
->idx
[0].rx_producer
!= tp
->rx_rcb_ptr
) {
3393 int orig_budget
= *budget
;
3396 if (orig_budget
> netdev
->quota
)
3397 orig_budget
= netdev
->quota
;
3399 work_done
= tg3_rx(tp
, orig_budget
);
3401 *budget
-= work_done
;
3402 netdev
->quota
-= work_done
;
3405 if (tp
->tg3_flags
& TG3_FLAG_TAGGED_STATUS
) {
3406 tp
->last_tag
= sblk
->status_tag
;
3409 sblk
->status
&= ~SD_STATUS_UPDATED
;
3411 /* if no more work, tell net stack and NIC we're done */
3412 done
= !tg3_has_work(tp
);
3414 netif_rx_complete(netdev
);
3415 tg3_restart_ints(tp
);
3418 return (done
? 0 : 1);
3421 static void tg3_irq_quiesce(struct tg3
*tp
)
3423 BUG_ON(tp
->irq_sync
);
3428 synchronize_irq(tp
->pdev
->irq
);
3431 static inline int tg3_irq_sync(struct tg3
*tp
)
3433 return tp
->irq_sync
;
3436 /* Fully shutdown all tg3 driver activity elsewhere in the system.
3437 * If irq_sync is non-zero, then the IRQ handler must be synchronized
3438 * with as well. Most of the time, this is not necessary except when
3439 * shutting down the device.
3441 static inline void tg3_full_lock(struct tg3
*tp
, int irq_sync
)
3444 tg3_irq_quiesce(tp
);
3445 spin_lock_bh(&tp
->lock
);
3448 static inline void tg3_full_unlock(struct tg3
*tp
)
3450 spin_unlock_bh(&tp
->lock
);
3453 /* One-shot MSI handler - Chip automatically disables interrupt
3454 * after sending MSI so driver doesn't have to do it.
3456 static irqreturn_t
tg3_msi_1shot(int irq
, void *dev_id
, struct pt_regs
*regs
)
3458 struct net_device
*dev
= dev_id
;
3459 struct tg3
*tp
= netdev_priv(dev
);
3461 prefetch(tp
->hw_status
);
3462 prefetch(&tp
->rx_rcb
[tp
->rx_rcb_ptr
]);
3464 if (likely(!tg3_irq_sync(tp
)))
3465 netif_rx_schedule(dev
); /* schedule NAPI poll */
3470 /* MSI ISR - No need to check for interrupt sharing and no need to
3471 * flush status block and interrupt mailbox. PCI ordering rules
3472 * guarantee that MSI will arrive after the status block.
3474 static irqreturn_t
tg3_msi(int irq
, void *dev_id
, struct pt_regs
*regs
)
3476 struct net_device
*dev
= dev_id
;
3477 struct tg3
*tp
= netdev_priv(dev
);
3479 prefetch(tp
->hw_status
);
3480 prefetch(&tp
->rx_rcb
[tp
->rx_rcb_ptr
]);
3482 * Writing any value to intr-mbox-0 clears PCI INTA# and
3483 * chip-internal interrupt pending events.
3484 * Writing non-zero to intr-mbox-0 additional tells the
3485 * NIC to stop sending us irqs, engaging "in-intr-handler"
3488 tw32_mailbox(MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
, 0x00000001);
3489 if (likely(!tg3_irq_sync(tp
)))
3490 netif_rx_schedule(dev
); /* schedule NAPI poll */
3492 return IRQ_RETVAL(1);
3495 static irqreturn_t
tg3_interrupt(int irq
, void *dev_id
, struct pt_regs
*regs
)
3497 struct net_device
*dev
= dev_id
;
3498 struct tg3
*tp
= netdev_priv(dev
);
3499 struct tg3_hw_status
*sblk
= tp
->hw_status
;
3500 unsigned int handled
= 1;
3502 /* In INTx mode, it is possible for the interrupt to arrive at
3503 * the CPU before the status block posted prior to the interrupt.
3504 * Reading the PCI State register will confirm whether the
3505 * interrupt is ours and will flush the status block.
3507 if ((sblk
->status
& SD_STATUS_UPDATED
) ||
3508 !(tr32(TG3PCI_PCISTATE
) & PCISTATE_INT_NOT_ACTIVE
)) {
3510 * Writing any value to intr-mbox-0 clears PCI INTA# and
3511 * chip-internal interrupt pending events.
3512 * Writing non-zero to intr-mbox-0 additional tells the
3513 * NIC to stop sending us irqs, engaging "in-intr-handler"
3516 tw32_mailbox(MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
,
3518 if (tg3_irq_sync(tp
))
3520 sblk
->status
&= ~SD_STATUS_UPDATED
;
3521 if (likely(tg3_has_work(tp
))) {
3522 prefetch(&tp
->rx_rcb
[tp
->rx_rcb_ptr
]);
3523 netif_rx_schedule(dev
); /* schedule NAPI poll */
3525 /* No work, shared interrupt perhaps? re-enable
3526 * interrupts, and flush that PCI write
3528 tw32_mailbox_f(MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
,
3531 } else { /* shared interrupt */
3535 return IRQ_RETVAL(handled
);
3538 static irqreturn_t
tg3_interrupt_tagged(int irq
, void *dev_id
, struct pt_regs
*regs
)
3540 struct net_device
*dev
= dev_id
;
3541 struct tg3
*tp
= netdev_priv(dev
);
3542 struct tg3_hw_status
*sblk
= tp
->hw_status
;
3543 unsigned int handled
= 1;
3545 /* In INTx mode, it is possible for the interrupt to arrive at
3546 * the CPU before the status block posted prior to the interrupt.
3547 * Reading the PCI State register will confirm whether the
3548 * interrupt is ours and will flush the status block.
3550 if ((sblk
->status_tag
!= tp
->last_tag
) ||
3551 !(tr32(TG3PCI_PCISTATE
) & PCISTATE_INT_NOT_ACTIVE
)) {
3553 * writing any value to intr-mbox-0 clears PCI INTA# and
3554 * chip-internal interrupt pending events.
3555 * writing non-zero to intr-mbox-0 additional tells the
3556 * NIC to stop sending us irqs, engaging "in-intr-handler"
3559 tw32_mailbox(MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
,
3561 if (tg3_irq_sync(tp
))
3563 if (netif_rx_schedule_prep(dev
)) {
3564 prefetch(&tp
->rx_rcb
[tp
->rx_rcb_ptr
]);
3565 /* Update last_tag to mark that this status has been
3566 * seen. Because interrupt may be shared, we may be
3567 * racing with tg3_poll(), so only update last_tag
3568 * if tg3_poll() is not scheduled.
3570 tp
->last_tag
= sblk
->status_tag
;
3571 __netif_rx_schedule(dev
);
3573 } else { /* shared interrupt */
3577 return IRQ_RETVAL(handled
);
3580 /* ISR for interrupt test */
3581 static irqreturn_t
tg3_test_isr(int irq
, void *dev_id
,
3582 struct pt_regs
*regs
)
3584 struct net_device
*dev
= dev_id
;
3585 struct tg3
*tp
= netdev_priv(dev
);
3586 struct tg3_hw_status
*sblk
= tp
->hw_status
;
3588 if ((sblk
->status
& SD_STATUS_UPDATED
) ||
3589 !(tr32(TG3PCI_PCISTATE
) & PCISTATE_INT_NOT_ACTIVE
)) {
3590 tw32_mailbox(MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
,
3592 return IRQ_RETVAL(1);
3594 return IRQ_RETVAL(0);
3597 static int tg3_init_hw(struct tg3
*, int);
3598 static int tg3_halt(struct tg3
*, int, int);
3600 /* Restart hardware after configuration changes, self-test, etc.
3601 * Invoked with tp->lock held.
3603 static int tg3_restart_hw(struct tg3
*tp
, int reset_phy
)
3607 err
= tg3_init_hw(tp
, reset_phy
);
3609 printk(KERN_ERR PFX
"%s: Failed to re-initialize device, "
3610 "aborting.\n", tp
->dev
->name
);
3611 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
3612 tg3_full_unlock(tp
);
3613 del_timer_sync(&tp
->timer
);
3615 netif_poll_enable(tp
->dev
);
3617 tg3_full_lock(tp
, 0);
3622 #ifdef CONFIG_NET_POLL_CONTROLLER
3623 static void tg3_poll_controller(struct net_device
*dev
)
3625 struct tg3
*tp
= netdev_priv(dev
);
3627 tg3_interrupt(tp
->pdev
->irq
, dev
, NULL
);
3631 static void tg3_reset_task(void *_data
)
3633 struct tg3
*tp
= _data
;
3634 unsigned int restart_timer
;
3636 tg3_full_lock(tp
, 0);
3637 tp
->tg3_flags
|= TG3_FLAG_IN_RESET_TASK
;
3639 if (!netif_running(tp
->dev
)) {
3640 tp
->tg3_flags
&= ~TG3_FLAG_IN_RESET_TASK
;
3641 tg3_full_unlock(tp
);
3645 tg3_full_unlock(tp
);
3649 tg3_full_lock(tp
, 1);
3651 restart_timer
= tp
->tg3_flags2
& TG3_FLG2_RESTART_TIMER
;
3652 tp
->tg3_flags2
&= ~TG3_FLG2_RESTART_TIMER
;
3654 if (tp
->tg3_flags
& TG3_FLAG_TX_RECOVERY_PENDING
) {
3655 tp
->write32_tx_mbox
= tg3_write32_tx_mbox
;
3656 tp
->write32_rx_mbox
= tg3_write_flush_reg32
;
3657 tp
->tg3_flags
|= TG3_FLAG_MBOX_WRITE_REORDER
;
3658 tp
->tg3_flags
&= ~TG3_FLAG_TX_RECOVERY_PENDING
;
3661 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 0);
3662 if (tg3_init_hw(tp
, 1))
3665 tg3_netif_start(tp
);
3668 mod_timer(&tp
->timer
, jiffies
+ 1);
3671 tp
->tg3_flags
&= ~TG3_FLAG_IN_RESET_TASK
;
3673 tg3_full_unlock(tp
);
3676 static void tg3_tx_timeout(struct net_device
*dev
)
3678 struct tg3
*tp
= netdev_priv(dev
);
3680 printk(KERN_ERR PFX
"%s: transmit timed out, resetting\n",
3683 schedule_work(&tp
->reset_task
);
3686 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
3687 static inline int tg3_4g_overflow_test(dma_addr_t mapping
, int len
)
3689 u32 base
= (u32
) mapping
& 0xffffffff;
3691 return ((base
> 0xffffdcc0) &&
3692 (base
+ len
+ 8 < base
));
3695 /* Test for DMA addresses > 40-bit */
3696 static inline int tg3_40bit_overflow_test(struct tg3
*tp
, dma_addr_t mapping
,
3699 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
3700 if (tp
->tg3_flags
& TG3_FLAG_40BIT_DMA_BUG
)
3701 return (((u64
) mapping
+ len
) > DMA_40BIT_MASK
);
3708 static void tg3_set_txd(struct tg3
*, int, dma_addr_t
, int, u32
, u32
);
3710 /* Workaround 4GB and 40-bit hardware DMA bugs. */
3711 static int tigon3_dma_hwbug_workaround(struct tg3
*tp
, struct sk_buff
*skb
,
3712 u32 last_plus_one
, u32
*start
,
3713 u32 base_flags
, u32 mss
)
3715 struct sk_buff
*new_skb
= skb_copy(skb
, GFP_ATOMIC
);
3716 dma_addr_t new_addr
= 0;
3723 /* New SKB is guaranteed to be linear. */
3725 new_addr
= pci_map_single(tp
->pdev
, new_skb
->data
, new_skb
->len
,
3727 /* Make sure new skb does not cross any 4G boundaries.
3728 * Drop the packet if it does.
3730 if (tg3_4g_overflow_test(new_addr
, new_skb
->len
)) {
3732 dev_kfree_skb(new_skb
);
3735 tg3_set_txd(tp
, entry
, new_addr
, new_skb
->len
,
3736 base_flags
, 1 | (mss
<< 1));
3737 *start
= NEXT_TX(entry
);
3741 /* Now clean up the sw ring entries. */
3743 while (entry
!= last_plus_one
) {
3747 len
= skb_headlen(skb
);
3749 len
= skb_shinfo(skb
)->frags
[i
-1].size
;
3750 pci_unmap_single(tp
->pdev
,
3751 pci_unmap_addr(&tp
->tx_buffers
[entry
], mapping
),
3752 len
, PCI_DMA_TODEVICE
);
3754 tp
->tx_buffers
[entry
].skb
= new_skb
;
3755 pci_unmap_addr_set(&tp
->tx_buffers
[entry
], mapping
, new_addr
);
3757 tp
->tx_buffers
[entry
].skb
= NULL
;
3759 entry
= NEXT_TX(entry
);
3768 static void tg3_set_txd(struct tg3
*tp
, int entry
,
3769 dma_addr_t mapping
, int len
, u32 flags
,
3772 struct tg3_tx_buffer_desc
*txd
= &tp
->tx_ring
[entry
];
3773 int is_end
= (mss_and_is_end
& 0x1);
3774 u32 mss
= (mss_and_is_end
>> 1);
3778 flags
|= TXD_FLAG_END
;
3779 if (flags
& TXD_FLAG_VLAN
) {
3780 vlan_tag
= flags
>> 16;
3783 vlan_tag
|= (mss
<< TXD_MSS_SHIFT
);
3785 txd
->addr_hi
= ((u64
) mapping
>> 32);
3786 txd
->addr_lo
= ((u64
) mapping
& 0xffffffff);
3787 txd
->len_flags
= (len
<< TXD_LEN_SHIFT
) | flags
;
3788 txd
->vlan_tag
= vlan_tag
<< TXD_VLAN_TAG_SHIFT
;
3791 /* hard_start_xmit for devices that don't have any bugs and
3792 * support TG3_FLG2_HW_TSO_2 only.
3794 static int tg3_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
3796 struct tg3
*tp
= netdev_priv(dev
);
3798 u32 len
, entry
, base_flags
, mss
;
3800 len
= skb_headlen(skb
);
3802 /* We are running in BH disabled context with netif_tx_lock
3803 * and TX reclaim runs via tp->poll inside of a software
3804 * interrupt. Furthermore, IRQ processing runs lockless so we have
3805 * no IRQ context deadlocks to worry about either. Rejoice!
3807 if (unlikely(tg3_tx_avail(tp
) <= (skb_shinfo(skb
)->nr_frags
+ 1))) {
3808 if (!netif_queue_stopped(dev
)) {
3809 netif_stop_queue(dev
);
3811 /* This is a hard error, log it. */
3812 printk(KERN_ERR PFX
"%s: BUG! Tx Ring full when "
3813 "queue awake!\n", dev
->name
);
3815 return NETDEV_TX_BUSY
;
3818 entry
= tp
->tx_prod
;
3820 #if TG3_TSO_SUPPORT != 0
3822 if (skb
->len
> (tp
->dev
->mtu
+ ETH_HLEN
) &&
3823 (mss
= skb_shinfo(skb
)->gso_size
) != 0) {
3824 int tcp_opt_len
, ip_tcp_len
;
3826 if (skb_header_cloned(skb
) &&
3827 pskb_expand_head(skb
, 0, 0, GFP_ATOMIC
)) {
3832 if (skb_shinfo(skb
)->gso_type
& SKB_GSO_TCPV6
)
3833 mss
|= (skb_headlen(skb
) - ETH_HLEN
) << 9;
3835 tcp_opt_len
= ((skb
->h
.th
->doff
- 5) * 4);
3836 ip_tcp_len
= (skb
->nh
.iph
->ihl
* 4) +
3837 sizeof(struct tcphdr
);
3839 skb
->nh
.iph
->check
= 0;
3840 skb
->nh
.iph
->tot_len
= htons(mss
+ ip_tcp_len
+
3842 mss
|= (ip_tcp_len
+ tcp_opt_len
) << 9;
3845 base_flags
|= (TXD_FLAG_CPU_PRE_DMA
|
3846 TXD_FLAG_CPU_POST_DMA
);
3848 skb
->h
.th
->check
= 0;
3851 else if (skb
->ip_summed
== CHECKSUM_PARTIAL
)
3852 base_flags
|= TXD_FLAG_TCPUDP_CSUM
;
3855 if (skb
->ip_summed
== CHECKSUM_PARTIAL
)
3856 base_flags
|= TXD_FLAG_TCPUDP_CSUM
;
3858 #if TG3_VLAN_TAG_USED
3859 if (tp
->vlgrp
!= NULL
&& vlan_tx_tag_present(skb
))
3860 base_flags
|= (TXD_FLAG_VLAN
|
3861 (vlan_tx_tag_get(skb
) << 16));
3864 /* Queue skb data, a.k.a. the main skb fragment. */
3865 mapping
= pci_map_single(tp
->pdev
, skb
->data
, len
, PCI_DMA_TODEVICE
);
3867 tp
->tx_buffers
[entry
].skb
= skb
;
3868 pci_unmap_addr_set(&tp
->tx_buffers
[entry
], mapping
, mapping
);
3870 tg3_set_txd(tp
, entry
, mapping
, len
, base_flags
,
3871 (skb_shinfo(skb
)->nr_frags
== 0) | (mss
<< 1));
3873 entry
= NEXT_TX(entry
);
3875 /* Now loop through additional data fragments, and queue them. */
3876 if (skb_shinfo(skb
)->nr_frags
> 0) {
3877 unsigned int i
, last
;
3879 last
= skb_shinfo(skb
)->nr_frags
- 1;
3880 for (i
= 0; i
<= last
; i
++) {
3881 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
3884 mapping
= pci_map_page(tp
->pdev
,
3887 len
, PCI_DMA_TODEVICE
);
3889 tp
->tx_buffers
[entry
].skb
= NULL
;
3890 pci_unmap_addr_set(&tp
->tx_buffers
[entry
], mapping
, mapping
);
3892 tg3_set_txd(tp
, entry
, mapping
, len
,
3893 base_flags
, (i
== last
) | (mss
<< 1));
3895 entry
= NEXT_TX(entry
);
3899 /* Packets are ready, update Tx producer idx local and on card. */
3900 tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0
+ TG3_64BIT_REG_LOW
), entry
);
3902 tp
->tx_prod
= entry
;
3903 if (unlikely(tg3_tx_avail(tp
) <= (MAX_SKB_FRAGS
+ 1))) {
3904 netif_stop_queue(dev
);
3905 if (tg3_tx_avail(tp
) > TG3_TX_WAKEUP_THRESH
)
3906 netif_wake_queue(tp
->dev
);
3912 dev
->trans_start
= jiffies
;
3914 return NETDEV_TX_OK
;
3917 #if TG3_TSO_SUPPORT != 0
3918 static int tg3_start_xmit_dma_bug(struct sk_buff
*, struct net_device
*);
3920 /* Use GSO to workaround a rare TSO bug that may be triggered when the
3921 * TSO header is greater than 80 bytes.
3923 static int tg3_tso_bug(struct tg3
*tp
, struct sk_buff
*skb
)
3925 struct sk_buff
*segs
, *nskb
;
3927 /* Estimate the number of fragments in the worst case */
3928 if (unlikely(tg3_tx_avail(tp
) <= (skb_shinfo(skb
)->gso_segs
* 3))) {
3929 netif_stop_queue(tp
->dev
);
3930 return NETDEV_TX_BUSY
;
3933 segs
= skb_gso_segment(skb
, tp
->dev
->features
& ~NETIF_F_TSO
);
3934 if (unlikely(IS_ERR(segs
)))
3935 goto tg3_tso_bug_end
;
3941 tg3_start_xmit_dma_bug(nskb
, tp
->dev
);
3947 return NETDEV_TX_OK
;
3951 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
3952 * support TG3_FLG2_HW_TSO_1 or firmware TSO only.
3954 static int tg3_start_xmit_dma_bug(struct sk_buff
*skb
, struct net_device
*dev
)
3956 struct tg3
*tp
= netdev_priv(dev
);
3958 u32 len
, entry
, base_flags
, mss
;
3959 int would_hit_hwbug
;
3961 len
= skb_headlen(skb
);
3963 /* We are running in BH disabled context with netif_tx_lock
3964 * and TX reclaim runs via tp->poll inside of a software
3965 * interrupt. Furthermore, IRQ processing runs lockless so we have
3966 * no IRQ context deadlocks to worry about either. Rejoice!
3968 if (unlikely(tg3_tx_avail(tp
) <= (skb_shinfo(skb
)->nr_frags
+ 1))) {
3969 if (!netif_queue_stopped(dev
)) {
3970 netif_stop_queue(dev
);
3972 /* This is a hard error, log it. */
3973 printk(KERN_ERR PFX
"%s: BUG! Tx Ring full when "
3974 "queue awake!\n", dev
->name
);
3976 return NETDEV_TX_BUSY
;
3979 entry
= tp
->tx_prod
;
3981 if (skb
->ip_summed
== CHECKSUM_PARTIAL
)
3982 base_flags
|= TXD_FLAG_TCPUDP_CSUM
;
3983 #if TG3_TSO_SUPPORT != 0
3985 if (skb
->len
> (tp
->dev
->mtu
+ ETH_HLEN
) &&
3986 (mss
= skb_shinfo(skb
)->gso_size
) != 0) {
3987 int tcp_opt_len
, ip_tcp_len
, hdr_len
;
3989 if (skb_header_cloned(skb
) &&
3990 pskb_expand_head(skb
, 0, 0, GFP_ATOMIC
)) {
3995 tcp_opt_len
= ((skb
->h
.th
->doff
- 5) * 4);
3996 ip_tcp_len
= (skb
->nh
.iph
->ihl
* 4) + sizeof(struct tcphdr
);
3998 hdr_len
= ip_tcp_len
+ tcp_opt_len
;
3999 if (unlikely((ETH_HLEN
+ hdr_len
) > 80) &&
4000 (tp
->tg3_flags2
& TG3_FLG2_HW_TSO_1_BUG
))
4001 return (tg3_tso_bug(tp
, skb
));
4003 base_flags
|= (TXD_FLAG_CPU_PRE_DMA
|
4004 TXD_FLAG_CPU_POST_DMA
);
4006 skb
->nh
.iph
->check
= 0;
4007 skb
->nh
.iph
->tot_len
= htons(mss
+ hdr_len
);
4008 if (tp
->tg3_flags2
& TG3_FLG2_HW_TSO
) {
4009 skb
->h
.th
->check
= 0;
4010 base_flags
&= ~TXD_FLAG_TCPUDP_CSUM
;
4014 ~csum_tcpudp_magic(skb
->nh
.iph
->saddr
,
4019 if ((tp
->tg3_flags2
& TG3_FLG2_HW_TSO
) ||
4020 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
)) {
4021 if (tcp_opt_len
|| skb
->nh
.iph
->ihl
> 5) {
4024 tsflags
= ((skb
->nh
.iph
->ihl
- 5) +
4025 (tcp_opt_len
>> 2));
4026 mss
|= (tsflags
<< 11);
4029 if (tcp_opt_len
|| skb
->nh
.iph
->ihl
> 5) {
4032 tsflags
= ((skb
->nh
.iph
->ihl
- 5) +
4033 (tcp_opt_len
>> 2));
4034 base_flags
|= tsflags
<< 12;
4041 #if TG3_VLAN_TAG_USED
4042 if (tp
->vlgrp
!= NULL
&& vlan_tx_tag_present(skb
))
4043 base_flags
|= (TXD_FLAG_VLAN
|
4044 (vlan_tx_tag_get(skb
) << 16));
4047 /* Queue skb data, a.k.a. the main skb fragment. */
4048 mapping
= pci_map_single(tp
->pdev
, skb
->data
, len
, PCI_DMA_TODEVICE
);
4050 tp
->tx_buffers
[entry
].skb
= skb
;
4051 pci_unmap_addr_set(&tp
->tx_buffers
[entry
], mapping
, mapping
);
4053 would_hit_hwbug
= 0;
4055 if (tg3_4g_overflow_test(mapping
, len
))
4056 would_hit_hwbug
= 1;
4058 tg3_set_txd(tp
, entry
, mapping
, len
, base_flags
,
4059 (skb_shinfo(skb
)->nr_frags
== 0) | (mss
<< 1));
4061 entry
= NEXT_TX(entry
);
4063 /* Now loop through additional data fragments, and queue them. */
4064 if (skb_shinfo(skb
)->nr_frags
> 0) {
4065 unsigned int i
, last
;
4067 last
= skb_shinfo(skb
)->nr_frags
- 1;
4068 for (i
= 0; i
<= last
; i
++) {
4069 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
4072 mapping
= pci_map_page(tp
->pdev
,
4075 len
, PCI_DMA_TODEVICE
);
4077 tp
->tx_buffers
[entry
].skb
= NULL
;
4078 pci_unmap_addr_set(&tp
->tx_buffers
[entry
], mapping
, mapping
);
4080 if (tg3_4g_overflow_test(mapping
, len
))
4081 would_hit_hwbug
= 1;
4083 if (tg3_40bit_overflow_test(tp
, mapping
, len
))
4084 would_hit_hwbug
= 1;
4086 if (tp
->tg3_flags2
& TG3_FLG2_HW_TSO
)
4087 tg3_set_txd(tp
, entry
, mapping
, len
,
4088 base_flags
, (i
== last
)|(mss
<< 1));
4090 tg3_set_txd(tp
, entry
, mapping
, len
,
4091 base_flags
, (i
== last
));
4093 entry
= NEXT_TX(entry
);
4097 if (would_hit_hwbug
) {
4098 u32 last_plus_one
= entry
;
4101 start
= entry
- 1 - skb_shinfo(skb
)->nr_frags
;
4102 start
&= (TG3_TX_RING_SIZE
- 1);
4104 /* If the workaround fails due to memory/mapping
4105 * failure, silently drop this packet.
4107 if (tigon3_dma_hwbug_workaround(tp
, skb
, last_plus_one
,
4108 &start
, base_flags
, mss
))
4114 /* Packets are ready, update Tx producer idx local and on card. */
4115 tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0
+ TG3_64BIT_REG_LOW
), entry
);
4117 tp
->tx_prod
= entry
;
4118 if (unlikely(tg3_tx_avail(tp
) <= (MAX_SKB_FRAGS
+ 1))) {
4119 netif_stop_queue(dev
);
4120 if (tg3_tx_avail(tp
) > TG3_TX_WAKEUP_THRESH
)
4121 netif_wake_queue(tp
->dev
);
4127 dev
->trans_start
= jiffies
;
4129 return NETDEV_TX_OK
;
4132 static inline void tg3_set_mtu(struct net_device
*dev
, struct tg3
*tp
,
4137 if (new_mtu
> ETH_DATA_LEN
) {
4138 if (tp
->tg3_flags2
& TG3_FLG2_5780_CLASS
) {
4139 tp
->tg3_flags2
&= ~TG3_FLG2_TSO_CAPABLE
;
4140 ethtool_op_set_tso(dev
, 0);
4143 tp
->tg3_flags
|= TG3_FLAG_JUMBO_RING_ENABLE
;
4145 if (tp
->tg3_flags2
& TG3_FLG2_5780_CLASS
)
4146 tp
->tg3_flags2
|= TG3_FLG2_TSO_CAPABLE
;
4147 tp
->tg3_flags
&= ~TG3_FLAG_JUMBO_RING_ENABLE
;
4151 static int tg3_change_mtu(struct net_device
*dev
, int new_mtu
)
4153 struct tg3
*tp
= netdev_priv(dev
);
4156 if (new_mtu
< TG3_MIN_MTU
|| new_mtu
> TG3_MAX_MTU(tp
))
4159 if (!netif_running(dev
)) {
4160 /* We'll just catch it later when the
4163 tg3_set_mtu(dev
, tp
, new_mtu
);
4169 tg3_full_lock(tp
, 1);
4171 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
4173 tg3_set_mtu(dev
, tp
, new_mtu
);
4175 err
= tg3_restart_hw(tp
, 0);
4178 tg3_netif_start(tp
);
4180 tg3_full_unlock(tp
);
4185 /* Free up pending packets in all rx/tx rings.
4187 * The chip has been shut down and the driver detached from
4188 * the networking, so no interrupts or new tx packets will
4189 * end up in the driver. tp->{tx,}lock is not held and we are not
4190 * in an interrupt context and thus may sleep.
4192 static void tg3_free_rings(struct tg3
*tp
)
4194 struct ring_info
*rxp
;
4197 for (i
= 0; i
< TG3_RX_RING_SIZE
; i
++) {
4198 rxp
= &tp
->rx_std_buffers
[i
];
4200 if (rxp
->skb
== NULL
)
4202 pci_unmap_single(tp
->pdev
,
4203 pci_unmap_addr(rxp
, mapping
),
4204 tp
->rx_pkt_buf_sz
- tp
->rx_offset
,
4205 PCI_DMA_FROMDEVICE
);
4206 dev_kfree_skb_any(rxp
->skb
);
4210 for (i
= 0; i
< TG3_RX_JUMBO_RING_SIZE
; i
++) {
4211 rxp
= &tp
->rx_jumbo_buffers
[i
];
4213 if (rxp
->skb
== NULL
)
4215 pci_unmap_single(tp
->pdev
,
4216 pci_unmap_addr(rxp
, mapping
),
4217 RX_JUMBO_PKT_BUF_SZ
- tp
->rx_offset
,
4218 PCI_DMA_FROMDEVICE
);
4219 dev_kfree_skb_any(rxp
->skb
);
4223 for (i
= 0; i
< TG3_TX_RING_SIZE
; ) {
4224 struct tx_ring_info
*txp
;
4225 struct sk_buff
*skb
;
4228 txp
= &tp
->tx_buffers
[i
];
4236 pci_unmap_single(tp
->pdev
,
4237 pci_unmap_addr(txp
, mapping
),
4244 for (j
= 0; j
< skb_shinfo(skb
)->nr_frags
; j
++) {
4245 txp
= &tp
->tx_buffers
[i
& (TG3_TX_RING_SIZE
- 1)];
4246 pci_unmap_page(tp
->pdev
,
4247 pci_unmap_addr(txp
, mapping
),
4248 skb_shinfo(skb
)->frags
[j
].size
,
4253 dev_kfree_skb_any(skb
);
4257 /* Initialize tx/rx rings for packet processing.
4259 * The chip has been shut down and the driver detached from
4260 * the networking, so no interrupts or new tx packets will
4261 * end up in the driver. tp->{tx,}lock are held and thus
4264 static int tg3_init_rings(struct tg3
*tp
)
4268 /* Free up all the SKBs. */
4271 /* Zero out all descriptors. */
4272 memset(tp
->rx_std
, 0, TG3_RX_RING_BYTES
);
4273 memset(tp
->rx_jumbo
, 0, TG3_RX_JUMBO_RING_BYTES
);
4274 memset(tp
->rx_rcb
, 0, TG3_RX_RCB_RING_BYTES(tp
));
4275 memset(tp
->tx_ring
, 0, TG3_TX_RING_BYTES
);
4277 tp
->rx_pkt_buf_sz
= RX_PKT_BUF_SZ
;
4278 if ((tp
->tg3_flags2
& TG3_FLG2_5780_CLASS
) &&
4279 (tp
->dev
->mtu
> ETH_DATA_LEN
))
4280 tp
->rx_pkt_buf_sz
= RX_JUMBO_PKT_BUF_SZ
;
4282 /* Initialize invariants of the rings, we only set this
4283 * stuff once. This works because the card does not
4284 * write into the rx buffer posting rings.
4286 for (i
= 0; i
< TG3_RX_RING_SIZE
; i
++) {
4287 struct tg3_rx_buffer_desc
*rxd
;
4289 rxd
= &tp
->rx_std
[i
];
4290 rxd
->idx_len
= (tp
->rx_pkt_buf_sz
- tp
->rx_offset
- 64)
4292 rxd
->type_flags
= (RXD_FLAG_END
<< RXD_FLAGS_SHIFT
);
4293 rxd
->opaque
= (RXD_OPAQUE_RING_STD
|
4294 (i
<< RXD_OPAQUE_INDEX_SHIFT
));
4297 if (tp
->tg3_flags
& TG3_FLAG_JUMBO_RING_ENABLE
) {
4298 for (i
= 0; i
< TG3_RX_JUMBO_RING_SIZE
; i
++) {
4299 struct tg3_rx_buffer_desc
*rxd
;
4301 rxd
= &tp
->rx_jumbo
[i
];
4302 rxd
->idx_len
= (RX_JUMBO_PKT_BUF_SZ
- tp
->rx_offset
- 64)
4304 rxd
->type_flags
= (RXD_FLAG_END
<< RXD_FLAGS_SHIFT
) |
4306 rxd
->opaque
= (RXD_OPAQUE_RING_JUMBO
|
4307 (i
<< RXD_OPAQUE_INDEX_SHIFT
));
4311 /* Now allocate fresh SKBs for each rx ring. */
4312 for (i
= 0; i
< tp
->rx_pending
; i
++) {
4313 if (tg3_alloc_rx_skb(tp
, RXD_OPAQUE_RING_STD
, -1, i
) < 0) {
4314 printk(KERN_WARNING PFX
4315 "%s: Using a smaller RX standard ring, "
4316 "only %d out of %d buffers were allocated "
4318 tp
->dev
->name
, i
, tp
->rx_pending
);
4326 if (tp
->tg3_flags
& TG3_FLAG_JUMBO_RING_ENABLE
) {
4327 for (i
= 0; i
< tp
->rx_jumbo_pending
; i
++) {
4328 if (tg3_alloc_rx_skb(tp
, RXD_OPAQUE_RING_JUMBO
,
4330 printk(KERN_WARNING PFX
4331 "%s: Using a smaller RX jumbo ring, "
4332 "only %d out of %d buffers were "
4333 "allocated successfully.\n",
4334 tp
->dev
->name
, i
, tp
->rx_jumbo_pending
);
4339 tp
->rx_jumbo_pending
= i
;
4348 * Must not be invoked with interrupt sources disabled and
4349 * the hardware shutdown down.
4351 static void tg3_free_consistent(struct tg3
*tp
)
4353 kfree(tp
->rx_std_buffers
);
4354 tp
->rx_std_buffers
= NULL
;
4356 pci_free_consistent(tp
->pdev
, TG3_RX_RING_BYTES
,
4357 tp
->rx_std
, tp
->rx_std_mapping
);
4361 pci_free_consistent(tp
->pdev
, TG3_RX_JUMBO_RING_BYTES
,
4362 tp
->rx_jumbo
, tp
->rx_jumbo_mapping
);
4363 tp
->rx_jumbo
= NULL
;
4366 pci_free_consistent(tp
->pdev
, TG3_RX_RCB_RING_BYTES(tp
),
4367 tp
->rx_rcb
, tp
->rx_rcb_mapping
);
4371 pci_free_consistent(tp
->pdev
, TG3_TX_RING_BYTES
,
4372 tp
->tx_ring
, tp
->tx_desc_mapping
);
4375 if (tp
->hw_status
) {
4376 pci_free_consistent(tp
->pdev
, TG3_HW_STATUS_SIZE
,
4377 tp
->hw_status
, tp
->status_mapping
);
4378 tp
->hw_status
= NULL
;
4381 pci_free_consistent(tp
->pdev
, sizeof(struct tg3_hw_stats
),
4382 tp
->hw_stats
, tp
->stats_mapping
);
4383 tp
->hw_stats
= NULL
;
4388 * Must not be invoked with interrupt sources disabled and
4389 * the hardware shutdown down. Can sleep.
4391 static int tg3_alloc_consistent(struct tg3
*tp
)
4393 tp
->rx_std_buffers
= kmalloc((sizeof(struct ring_info
) *
4395 TG3_RX_JUMBO_RING_SIZE
)) +
4396 (sizeof(struct tx_ring_info
) *
4399 if (!tp
->rx_std_buffers
)
4402 memset(tp
->rx_std_buffers
, 0,
4403 (sizeof(struct ring_info
) *
4405 TG3_RX_JUMBO_RING_SIZE
)) +
4406 (sizeof(struct tx_ring_info
) *
4409 tp
->rx_jumbo_buffers
= &tp
->rx_std_buffers
[TG3_RX_RING_SIZE
];
4410 tp
->tx_buffers
= (struct tx_ring_info
*)
4411 &tp
->rx_jumbo_buffers
[TG3_RX_JUMBO_RING_SIZE
];
4413 tp
->rx_std
= pci_alloc_consistent(tp
->pdev
, TG3_RX_RING_BYTES
,
4414 &tp
->rx_std_mapping
);
4418 tp
->rx_jumbo
= pci_alloc_consistent(tp
->pdev
, TG3_RX_JUMBO_RING_BYTES
,
4419 &tp
->rx_jumbo_mapping
);
4424 tp
->rx_rcb
= pci_alloc_consistent(tp
->pdev
, TG3_RX_RCB_RING_BYTES(tp
),
4425 &tp
->rx_rcb_mapping
);
4429 tp
->tx_ring
= pci_alloc_consistent(tp
->pdev
, TG3_TX_RING_BYTES
,
4430 &tp
->tx_desc_mapping
);
4434 tp
->hw_status
= pci_alloc_consistent(tp
->pdev
,
4436 &tp
->status_mapping
);
4440 tp
->hw_stats
= pci_alloc_consistent(tp
->pdev
,
4441 sizeof(struct tg3_hw_stats
),
4442 &tp
->stats_mapping
);
4446 memset(tp
->hw_status
, 0, TG3_HW_STATUS_SIZE
);
4447 memset(tp
->hw_stats
, 0, sizeof(struct tg3_hw_stats
));
4452 tg3_free_consistent(tp
);
4456 #define MAX_WAIT_CNT 1000
4458 /* To stop a block, clear the enable bit and poll till it
4459 * clears. tp->lock is held.
4461 static int tg3_stop_block(struct tg3
*tp
, unsigned long ofs
, u32 enable_bit
, int silent
)
4466 if (tp
->tg3_flags2
& TG3_FLG2_5705_PLUS
) {
4473 /* We can't enable/disable these bits of the
4474 * 5705/5750, just say success.
4487 for (i
= 0; i
< MAX_WAIT_CNT
; i
++) {
4490 if ((val
& enable_bit
) == 0)
4494 if (i
== MAX_WAIT_CNT
&& !silent
) {
4495 printk(KERN_ERR PFX
"tg3_stop_block timed out, "
4496 "ofs=%lx enable_bit=%x\n",
4504 /* tp->lock is held. */
4505 static int tg3_abort_hw(struct tg3
*tp
, int silent
)
4509 tg3_disable_ints(tp
);
4511 tp
->rx_mode
&= ~RX_MODE_ENABLE
;
4512 tw32_f(MAC_RX_MODE
, tp
->rx_mode
);
4515 err
= tg3_stop_block(tp
, RCVBDI_MODE
, RCVBDI_MODE_ENABLE
, silent
);
4516 err
|= tg3_stop_block(tp
, RCVLPC_MODE
, RCVLPC_MODE_ENABLE
, silent
);
4517 err
|= tg3_stop_block(tp
, RCVLSC_MODE
, RCVLSC_MODE_ENABLE
, silent
);
4518 err
|= tg3_stop_block(tp
, RCVDBDI_MODE
, RCVDBDI_MODE_ENABLE
, silent
);
4519 err
|= tg3_stop_block(tp
, RCVDCC_MODE
, RCVDCC_MODE_ENABLE
, silent
);
4520 err
|= tg3_stop_block(tp
, RCVCC_MODE
, RCVCC_MODE_ENABLE
, silent
);
4522 err
|= tg3_stop_block(tp
, SNDBDS_MODE
, SNDBDS_MODE_ENABLE
, silent
);
4523 err
|= tg3_stop_block(tp
, SNDBDI_MODE
, SNDBDI_MODE_ENABLE
, silent
);
4524 err
|= tg3_stop_block(tp
, SNDDATAI_MODE
, SNDDATAI_MODE_ENABLE
, silent
);
4525 err
|= tg3_stop_block(tp
, RDMAC_MODE
, RDMAC_MODE_ENABLE
, silent
);
4526 err
|= tg3_stop_block(tp
, SNDDATAC_MODE
, SNDDATAC_MODE_ENABLE
, silent
);
4527 err
|= tg3_stop_block(tp
, DMAC_MODE
, DMAC_MODE_ENABLE
, silent
);
4528 err
|= tg3_stop_block(tp
, SNDBDC_MODE
, SNDBDC_MODE_ENABLE
, silent
);
4530 tp
->mac_mode
&= ~MAC_MODE_TDE_ENABLE
;
4531 tw32_f(MAC_MODE
, tp
->mac_mode
);
4534 tp
->tx_mode
&= ~TX_MODE_ENABLE
;
4535 tw32_f(MAC_TX_MODE
, tp
->tx_mode
);
4537 for (i
= 0; i
< MAX_WAIT_CNT
; i
++) {
4539 if (!(tr32(MAC_TX_MODE
) & TX_MODE_ENABLE
))
4542 if (i
>= MAX_WAIT_CNT
) {
4543 printk(KERN_ERR PFX
"tg3_abort_hw timed out for %s, "
4544 "TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n",
4545 tp
->dev
->name
, tr32(MAC_TX_MODE
));
4549 err
|= tg3_stop_block(tp
, HOSTCC_MODE
, HOSTCC_MODE_ENABLE
, silent
);
4550 err
|= tg3_stop_block(tp
, WDMAC_MODE
, WDMAC_MODE_ENABLE
, silent
);
4551 err
|= tg3_stop_block(tp
, MBFREE_MODE
, MBFREE_MODE_ENABLE
, silent
);
4553 tw32(FTQ_RESET
, 0xffffffff);
4554 tw32(FTQ_RESET
, 0x00000000);
4556 err
|= tg3_stop_block(tp
, BUFMGR_MODE
, BUFMGR_MODE_ENABLE
, silent
);
4557 err
|= tg3_stop_block(tp
, MEMARB_MODE
, MEMARB_MODE_ENABLE
, silent
);
4560 memset(tp
->hw_status
, 0, TG3_HW_STATUS_SIZE
);
4562 memset(tp
->hw_stats
, 0, sizeof(struct tg3_hw_stats
));
4567 /* tp->lock is held. */
4568 static int tg3_nvram_lock(struct tg3
*tp
)
4570 if (tp
->tg3_flags
& TG3_FLAG_NVRAM
) {
4573 if (tp
->nvram_lock_cnt
== 0) {
4574 tw32(NVRAM_SWARB
, SWARB_REQ_SET1
);
4575 for (i
= 0; i
< 8000; i
++) {
4576 if (tr32(NVRAM_SWARB
) & SWARB_GNT1
)
4581 tw32(NVRAM_SWARB
, SWARB_REQ_CLR1
);
4585 tp
->nvram_lock_cnt
++;
4590 /* tp->lock is held. */
4591 static void tg3_nvram_unlock(struct tg3
*tp
)
4593 if (tp
->tg3_flags
& TG3_FLAG_NVRAM
) {
4594 if (tp
->nvram_lock_cnt
> 0)
4595 tp
->nvram_lock_cnt
--;
4596 if (tp
->nvram_lock_cnt
== 0)
4597 tw32_f(NVRAM_SWARB
, SWARB_REQ_CLR1
);
4601 /* tp->lock is held. */
4602 static void tg3_enable_nvram_access(struct tg3
*tp
)
4604 if ((tp
->tg3_flags2
& TG3_FLG2_5750_PLUS
) &&
4605 !(tp
->tg3_flags2
& TG3_FLG2_PROTECTED_NVRAM
)) {
4606 u32 nvaccess
= tr32(NVRAM_ACCESS
);
4608 tw32(NVRAM_ACCESS
, nvaccess
| ACCESS_ENABLE
);
4612 /* tp->lock is held. */
4613 static void tg3_disable_nvram_access(struct tg3
*tp
)
4615 if ((tp
->tg3_flags2
& TG3_FLG2_5750_PLUS
) &&
4616 !(tp
->tg3_flags2
& TG3_FLG2_PROTECTED_NVRAM
)) {
4617 u32 nvaccess
= tr32(NVRAM_ACCESS
);
4619 tw32(NVRAM_ACCESS
, nvaccess
& ~ACCESS_ENABLE
);
4623 /* tp->lock is held. */
4624 static void tg3_write_sig_pre_reset(struct tg3
*tp
, int kind
)
4626 tg3_write_mem(tp
, NIC_SRAM_FIRMWARE_MBOX
,
4627 NIC_SRAM_FIRMWARE_MBOX_MAGIC1
);
4629 if (tp
->tg3_flags2
& TG3_FLG2_ASF_NEW_HANDSHAKE
) {
4631 case RESET_KIND_INIT
:
4632 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
4636 case RESET_KIND_SHUTDOWN
:
4637 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
4641 case RESET_KIND_SUSPEND
:
4642 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
4652 /* tp->lock is held. */
4653 static void tg3_write_sig_post_reset(struct tg3
*tp
, int kind
)
4655 if (tp
->tg3_flags2
& TG3_FLG2_ASF_NEW_HANDSHAKE
) {
4657 case RESET_KIND_INIT
:
4658 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
4659 DRV_STATE_START_DONE
);
4662 case RESET_KIND_SHUTDOWN
:
4663 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
4664 DRV_STATE_UNLOAD_DONE
);
4673 /* tp->lock is held. */
4674 static void tg3_write_sig_legacy(struct tg3
*tp
, int kind
)
4676 if (tp
->tg3_flags
& TG3_FLAG_ENABLE_ASF
) {
4678 case RESET_KIND_INIT
:
4679 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
4683 case RESET_KIND_SHUTDOWN
:
4684 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
4688 case RESET_KIND_SUSPEND
:
4689 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
4699 static int tg3_poll_fw(struct tg3
*tp
)
4704 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
) {
4705 for (i
= 0; i
< 400; i
++) {
4706 if (tr32(VCPU_STATUS
) & VCPU_STATUS_INIT_DONE
)
4713 /* Wait for firmware initialization to complete. */
4714 for (i
= 0; i
< 100000; i
++) {
4715 tg3_read_mem(tp
, NIC_SRAM_FIRMWARE_MBOX
, &val
);
4716 if (val
== ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1
)
4721 /* Chip might not be fitted with firmware. Some Sun onboard
4722 * parts are configured like that. So don't signal the timeout
4723 * of the above loop as an error, but do report the lack of
4724 * running firmware once.
4727 !(tp
->tg3_flags2
& TG3_FLG2_NO_FWARE_REPORTED
)) {
4728 tp
->tg3_flags2
|= TG3_FLG2_NO_FWARE_REPORTED
;
4730 printk(KERN_INFO PFX
"%s: No firmware running.\n",
4737 static void tg3_stop_fw(struct tg3
*);
4739 /* tp->lock is held. */
4740 static int tg3_chip_reset(struct tg3
*tp
)
4743 void (*write_op
)(struct tg3
*, u32
, u32
);
4748 /* No matching tg3_nvram_unlock() after this because
4749 * chip reset below will undo the nvram lock.
4751 tp
->nvram_lock_cnt
= 0;
4753 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5752
||
4754 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5755
||
4755 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5787
)
4756 tw32(GRC_FASTBOOT_PC
, 0);
4759 * We must avoid the readl() that normally takes place.
4760 * It locks machines, causes machine checks, and other
4761 * fun things. So, temporarily disable the 5701
4762 * hardware workaround, while we do the reset.
4764 write_op
= tp
->write32
;
4765 if (write_op
== tg3_write_flush_reg32
)
4766 tp
->write32
= tg3_write32
;
4769 val
= GRC_MISC_CFG_CORECLK_RESET
;
4771 if (tp
->tg3_flags2
& TG3_FLG2_PCI_EXPRESS
) {
4772 if (tr32(0x7e2c) == 0x60) {
4775 if (tp
->pci_chip_rev_id
!= CHIPREV_ID_5750_A0
) {
4776 tw32(GRC_MISC_CFG
, (1 << 29));
4781 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
) {
4782 tw32(VCPU_STATUS
, tr32(VCPU_STATUS
) | VCPU_STATUS_DRV_RESET
);
4783 tw32(GRC_VCPU_EXT_CTRL
,
4784 tr32(GRC_VCPU_EXT_CTRL
) & ~GRC_VCPU_EXT_CTRL_HALT_CPU
);
4787 if (tp
->tg3_flags2
& TG3_FLG2_5705_PLUS
)
4788 val
|= GRC_MISC_CFG_KEEP_GPHY_POWER
;
4789 tw32(GRC_MISC_CFG
, val
);
4791 /* restore 5701 hardware bug workaround write method */
4792 tp
->write32
= write_op
;
4794 /* Unfortunately, we have to delay before the PCI read back.
4795 * Some 575X chips even will not respond to a PCI cfg access
4796 * when the reset command is given to the chip.
4798 * How do these hardware designers expect things to work
4799 * properly if the PCI write is posted for a long period
4800 * of time? It is always necessary to have some method by
4801 * which a register read back can occur to push the write
4802 * out which does the reset.
4804 * For most tg3 variants the trick below was working.
4809 /* Flush PCI posted writes. The normal MMIO registers
4810 * are inaccessible at this time so this is the only
4811 * way to make this reliably (actually, this is no longer
4812 * the case, see above). I tried to use indirect
4813 * register read/write but this upset some 5701 variants.
4815 pci_read_config_dword(tp
->pdev
, PCI_COMMAND
, &val
);
4819 if (tp
->tg3_flags2
& TG3_FLG2_PCI_EXPRESS
) {
4820 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5750_A0
) {
4824 /* Wait for link training to complete. */
4825 for (i
= 0; i
< 5000; i
++)
4828 pci_read_config_dword(tp
->pdev
, 0xc4, &cfg_val
);
4829 pci_write_config_dword(tp
->pdev
, 0xc4,
4830 cfg_val
| (1 << 15));
4832 /* Set PCIE max payload size and clear error status. */
4833 pci_write_config_dword(tp
->pdev
, 0xd8, 0xf5000);
4836 /* Re-enable indirect register accesses. */
4837 pci_write_config_dword(tp
->pdev
, TG3PCI_MISC_HOST_CTRL
,
4838 tp
->misc_host_ctrl
);
4840 /* Set MAX PCI retry to zero. */
4841 val
= (PCISTATE_ROM_ENABLE
| PCISTATE_ROM_RETRY_ENABLE
);
4842 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5704_A0
&&
4843 (tp
->tg3_flags
& TG3_FLAG_PCIX_MODE
))
4844 val
|= PCISTATE_RETRY_SAME_DMA
;
4845 pci_write_config_dword(tp
->pdev
, TG3PCI_PCISTATE
, val
);
4847 pci_restore_state(tp
->pdev
);
4849 /* Make sure PCI-X relaxed ordering bit is clear. */
4850 pci_read_config_dword(tp
->pdev
, TG3PCI_X_CAPS
, &val
);
4851 val
&= ~PCIX_CAPS_RELAXED_ORDERING
;
4852 pci_write_config_dword(tp
->pdev
, TG3PCI_X_CAPS
, val
);
4854 if (tp
->tg3_flags2
& TG3_FLG2_5780_CLASS
) {
4857 /* Chip reset on 5780 will reset MSI enable bit,
4858 * so need to restore it.
4860 if (tp
->tg3_flags2
& TG3_FLG2_USING_MSI
) {
4863 pci_read_config_word(tp
->pdev
,
4864 tp
->msi_cap
+ PCI_MSI_FLAGS
,
4866 pci_write_config_word(tp
->pdev
,
4867 tp
->msi_cap
+ PCI_MSI_FLAGS
,
4868 ctrl
| PCI_MSI_FLAGS_ENABLE
);
4869 val
= tr32(MSGINT_MODE
);
4870 tw32(MSGINT_MODE
, val
| MSGINT_MODE_ENABLE
);
4873 val
= tr32(MEMARB_MODE
);
4874 tw32(MEMARB_MODE
, val
| MEMARB_MODE_ENABLE
);
4877 tw32(MEMARB_MODE
, MEMARB_MODE_ENABLE
);
4879 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5750_A3
) {
4881 tw32(0x5000, 0x400);
4884 tw32(GRC_MODE
, tp
->grc_mode
);
4886 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5705_A0
) {
4887 u32 val
= tr32(0xc4);
4889 tw32(0xc4, val
| (1 << 15));
4892 if ((tp
->nic_sram_data_cfg
& NIC_SRAM_DATA_CFG_MINI_PCI
) != 0 &&
4893 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
) {
4894 tp
->pci_clock_ctrl
|= CLOCK_CTRL_CLKRUN_OENABLE
;
4895 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5705_A0
)
4896 tp
->pci_clock_ctrl
|= CLOCK_CTRL_FORCE_CLKRUN
;
4897 tw32(TG3PCI_CLOCK_CTRL
, tp
->pci_clock_ctrl
);
4900 if (tp
->tg3_flags2
& TG3_FLG2_PHY_SERDES
) {
4901 tp
->mac_mode
= MAC_MODE_PORT_MODE_TBI
;
4902 tw32_f(MAC_MODE
, tp
->mac_mode
);
4903 } else if (tp
->tg3_flags2
& TG3_FLG2_MII_SERDES
) {
4904 tp
->mac_mode
= MAC_MODE_PORT_MODE_GMII
;
4905 tw32_f(MAC_MODE
, tp
->mac_mode
);
4907 tw32_f(MAC_MODE
, 0);
4910 err
= tg3_poll_fw(tp
);
4914 if ((tp
->tg3_flags2
& TG3_FLG2_PCI_EXPRESS
) &&
4915 tp
->pci_chip_rev_id
!= CHIPREV_ID_5750_A0
) {
4916 u32 val
= tr32(0x7c00);
4918 tw32(0x7c00, val
| (1 << 25));
4921 /* Reprobe ASF enable state. */
4922 tp
->tg3_flags
&= ~TG3_FLAG_ENABLE_ASF
;
4923 tp
->tg3_flags2
&= ~TG3_FLG2_ASF_NEW_HANDSHAKE
;
4924 tg3_read_mem(tp
, NIC_SRAM_DATA_SIG
, &val
);
4925 if (val
== NIC_SRAM_DATA_SIG_MAGIC
) {
4928 tg3_read_mem(tp
, NIC_SRAM_DATA_CFG
, &nic_cfg
);
4929 if (nic_cfg
& NIC_SRAM_DATA_CFG_ASF_ENABLE
) {
4930 tp
->tg3_flags
|= TG3_FLAG_ENABLE_ASF
;
4931 if (tp
->tg3_flags2
& TG3_FLG2_5750_PLUS
)
4932 tp
->tg3_flags2
|= TG3_FLG2_ASF_NEW_HANDSHAKE
;
4939 /* tp->lock is held. */
4940 static void tg3_stop_fw(struct tg3
*tp
)
4942 if (tp
->tg3_flags
& TG3_FLAG_ENABLE_ASF
) {
4946 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_MBOX
, FWCMD_NICDRV_PAUSE_FW
);
4947 val
= tr32(GRC_RX_CPU_EVENT
);
4949 tw32(GRC_RX_CPU_EVENT
, val
);
4951 /* Wait for RX cpu to ACK the event. */
4952 for (i
= 0; i
< 100; i
++) {
4953 if (!(tr32(GRC_RX_CPU_EVENT
) & (1 << 14)))
4960 /* tp->lock is held. */
4961 static int tg3_halt(struct tg3
*tp
, int kind
, int silent
)
4967 tg3_write_sig_pre_reset(tp
, kind
);
4969 tg3_abort_hw(tp
, silent
);
4970 err
= tg3_chip_reset(tp
);
4972 tg3_write_sig_legacy(tp
, kind
);
4973 tg3_write_sig_post_reset(tp
, kind
);
4981 #define TG3_FW_RELEASE_MAJOR 0x0
4982 #define TG3_FW_RELASE_MINOR 0x0
4983 #define TG3_FW_RELEASE_FIX 0x0
4984 #define TG3_FW_START_ADDR 0x08000000
4985 #define TG3_FW_TEXT_ADDR 0x08000000
4986 #define TG3_FW_TEXT_LEN 0x9c0
4987 #define TG3_FW_RODATA_ADDR 0x080009c0
4988 #define TG3_FW_RODATA_LEN 0x60
4989 #define TG3_FW_DATA_ADDR 0x08000a40
4990 #define TG3_FW_DATA_LEN 0x20
4991 #define TG3_FW_SBSS_ADDR 0x08000a60
4992 #define TG3_FW_SBSS_LEN 0xc
4993 #define TG3_FW_BSS_ADDR 0x08000a70
4994 #define TG3_FW_BSS_LEN 0x10
4996 static const u32 tg3FwText
[(TG3_FW_TEXT_LEN
/ sizeof(u32
)) + 1] = {
4997 0x00000000, 0x10000003, 0x00000000, 0x0000000d, 0x0000000d, 0x3c1d0800,
4998 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100000, 0x0e000018, 0x00000000,
4999 0x0000000d, 0x3c1d0800, 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100034,
5000 0x0e00021c, 0x00000000, 0x0000000d, 0x00000000, 0x00000000, 0x00000000,
5001 0x27bdffe0, 0x3c1cc000, 0xafbf0018, 0xaf80680c, 0x0e00004c, 0x241b2105,
5002 0x97850000, 0x97870002, 0x9782002c, 0x9783002e, 0x3c040800, 0x248409c0,
5003 0xafa00014, 0x00021400, 0x00621825, 0x00052c00, 0xafa30010, 0x8f860010,
5004 0x00e52825, 0x0e000060, 0x24070102, 0x3c02ac00, 0x34420100, 0x3c03ac01,
5005 0x34630100, 0xaf820490, 0x3c02ffff, 0xaf820494, 0xaf830498, 0xaf82049c,
5006 0x24020001, 0xaf825ce0, 0x0e00003f, 0xaf825d00, 0x0e000140, 0x00000000,
5007 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x2402ffff, 0xaf825404, 0x8f835400,
5008 0x34630400, 0xaf835400, 0xaf825404, 0x3c020800, 0x24420034, 0xaf82541c,
5009 0x03e00008, 0xaf805400, 0x00000000, 0x00000000, 0x3c020800, 0x34423000,
5010 0x3c030800, 0x34633000, 0x3c040800, 0x348437ff, 0x3c010800, 0xac220a64,
5011 0x24020040, 0x3c010800, 0xac220a68, 0x3c010800, 0xac200a60, 0xac600000,
5012 0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
5013 0x00804821, 0x8faa0010, 0x3c020800, 0x8c420a60, 0x3c040800, 0x8c840a68,
5014 0x8fab0014, 0x24430001, 0x0044102b, 0x3c010800, 0xac230a60, 0x14400003,
5015 0x00004021, 0x3c010800, 0xac200a60, 0x3c020800, 0x8c420a60, 0x3c030800,
5016 0x8c630a64, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
5017 0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020800, 0x8c420a60,
5018 0x3c030800, 0x8c630a64, 0x8f84680c, 0x00021140, 0x00431021, 0xac440008,
5019 0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
5020 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5021 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5023 0x02000008, 0x00000000, 0x0a0001e3, 0x3c0a0001, 0x0a0001e3, 0x3c0a0002,
5024 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5025 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5026 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5027 0x0a0001e3, 0x3c0a0007, 0x0a0001e3, 0x3c0a0008, 0x0a0001e3, 0x3c0a0009,
5028 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000b,
5029 0x0a0001e3, 0x3c0a000c, 0x0a0001e3, 0x3c0a000d, 0x0a0001e3, 0x00000000,
5030 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000e, 0x0a0001e3, 0x00000000,
5031 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5032 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5033 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a0013, 0x0a0001e3, 0x3c0a0014,
5034 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5035 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5036 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5037 0x27bdffe0, 0x00001821, 0x00001021, 0xafbf0018, 0xafb10014, 0xafb00010,
5038 0x3c010800, 0x00220821, 0xac200a70, 0x3c010800, 0x00220821, 0xac200a74,
5039 0x3c010800, 0x00220821, 0xac200a78, 0x24630001, 0x1860fff5, 0x2442000c,
5040 0x24110001, 0x8f906810, 0x32020004, 0x14400005, 0x24040001, 0x3c020800,
5041 0x8c420a78, 0x18400003, 0x00002021, 0x0e000182, 0x00000000, 0x32020001,
5042 0x10400003, 0x00000000, 0x0e000169, 0x00000000, 0x0a000153, 0xaf915028,
5043 0x8fbf0018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020, 0x3c050800,
5044 0x8ca50a70, 0x3c060800, 0x8cc60a80, 0x3c070800, 0x8ce70a78, 0x27bdffe0,
5045 0x3c040800, 0x248409d0, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014,
5046 0x0e00017b, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x24020001,
5047 0x8f836810, 0x00821004, 0x00021027, 0x00621824, 0x03e00008, 0xaf836810,
5048 0x27bdffd8, 0xafbf0024, 0x1080002e, 0xafb00020, 0x8f825cec, 0xafa20018,
5049 0x8f825cec, 0x3c100800, 0x26100a78, 0xafa2001c, 0x34028000, 0xaf825cec,
5050 0x8e020000, 0x18400016, 0x00000000, 0x3c020800, 0x94420a74, 0x8fa3001c,
5051 0x000221c0, 0xac830004, 0x8fa2001c, 0x3c010800, 0x0e000201, 0xac220a74,
5052 0x10400005, 0x00000000, 0x8e020000, 0x24420001, 0x0a0001df, 0xae020000,
5053 0x3c020800, 0x8c420a70, 0x00021c02, 0x000321c0, 0x0a0001c5, 0xafa2001c,
5054 0x0e000201, 0x00000000, 0x1040001f, 0x00000000, 0x8e020000, 0x8fa3001c,
5055 0x24420001, 0x3c010800, 0xac230a70, 0x3c010800, 0xac230a74, 0x0a0001df,
5056 0xae020000, 0x3c100800, 0x26100a78, 0x8e020000, 0x18400028, 0x00000000,
5057 0x0e000201, 0x00000000, 0x14400024, 0x00000000, 0x8e020000, 0x3c030800,
5058 0x8c630a70, 0x2442ffff, 0xafa3001c, 0x18400006, 0xae020000, 0x00031402,
5059 0x000221c0, 0x8c820004, 0x3c010800, 0xac220a70, 0x97a2001e, 0x2442ff00,
5060 0x2c420300, 0x1440000b, 0x24024000, 0x3c040800, 0x248409dc, 0xafa00010,
5061 0xafa00014, 0x8fa6001c, 0x24050008, 0x0e000060, 0x00003821, 0x0a0001df,
5062 0x00000000, 0xaf825cf8, 0x3c020800, 0x8c420a40, 0x8fa3001c, 0x24420001,
5063 0xaf835cf8, 0x3c010800, 0xac220a40, 0x8fbf0024, 0x8fb00020, 0x03e00008,
5064 0x27bd0028, 0x27bdffe0, 0x3c040800, 0x248409e8, 0x00002821, 0x00003021,
5065 0x00003821, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x8fbf0018,
5066 0x03e00008, 0x27bd0020, 0x8f82680c, 0x8f85680c, 0x00021827, 0x0003182b,
5067 0x00031823, 0x00431024, 0x00441021, 0x00a2282b, 0x10a00006, 0x00000000,
5068 0x00401821, 0x8f82680c, 0x0043102b, 0x1440fffd, 0x00000000, 0x03e00008,
5069 0x00000000, 0x3c040800, 0x8c840000, 0x3c030800, 0x8c630a40, 0x0064102b,
5070 0x54400002, 0x00831023, 0x00641023, 0x2c420008, 0x03e00008, 0x38420001,
5071 0x27bdffe0, 0x00802821, 0x3c040800, 0x24840a00, 0x00003021, 0x00003821,
5072 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x0a000216, 0x00000000,
5073 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000, 0x27bdffe0, 0x3c1cc000,
5074 0xafbf0018, 0x0e00004c, 0xaf80680c, 0x3c040800, 0x24840a10, 0x03802821,
5075 0x00003021, 0x00003821, 0xafa00010, 0x0e000060, 0xafa00014, 0x2402ffff,
5076 0xaf825404, 0x3c0200aa, 0x0e000234, 0xaf825434, 0x8fbf0018, 0x03e00008,
5077 0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe8, 0xafb00010,
5078 0x24100001, 0xafbf0014, 0x3c01c003, 0xac200000, 0x8f826810, 0x30422000,
5079 0x10400003, 0x00000000, 0x0e000246, 0x00000000, 0x0a00023a, 0xaf905428,
5080 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x27bdfff8, 0x8f845d0c,
5081 0x3c0200ff, 0x3c030800, 0x8c630a50, 0x3442fff8, 0x00821024, 0x1043001e,
5082 0x3c0500ff, 0x34a5fff8, 0x3c06c003, 0x3c074000, 0x00851824, 0x8c620010,
5083 0x3c010800, 0xac230a50, 0x30420008, 0x10400005, 0x00871025, 0x8cc20000,
5084 0x24420001, 0xacc20000, 0x00871025, 0xaf825d0c, 0x8fa20000, 0x24420001,
5085 0xafa20000, 0x8fa20000, 0x8fa20000, 0x24420001, 0xafa20000, 0x8fa20000,
5086 0x8f845d0c, 0x3c030800, 0x8c630a50, 0x00851024, 0x1443ffe8, 0x00851824,
5087 0x27bd0008, 0x03e00008, 0x00000000, 0x00000000, 0x00000000
5090 static const u32 tg3FwRodata
[(TG3_FW_RODATA_LEN
/ sizeof(u32
)) + 1] = {
5091 0x35373031, 0x726c7341, 0x00000000, 0x00000000, 0x53774576, 0x656e7430,
5092 0x00000000, 0x726c7045, 0x76656e74, 0x31000000, 0x556e6b6e, 0x45766e74,
5093 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
5094 0x00000000, 0x00000000, 0x4d61696e, 0x43707542, 0x00000000, 0x00000000,
5098 #if 0 /* All zeros, don't eat up space with it. */
5099 u32 tg3FwData
[(TG3_FW_DATA_LEN
/ sizeof(u32
)) + 1] = {
5100 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
5101 0x00000000, 0x00000000, 0x00000000, 0x00000000
5105 #define RX_CPU_SCRATCH_BASE 0x30000
5106 #define RX_CPU_SCRATCH_SIZE 0x04000
5107 #define TX_CPU_SCRATCH_BASE 0x34000
5108 #define TX_CPU_SCRATCH_SIZE 0x04000
5110 /* tp->lock is held. */
5111 static int tg3_halt_cpu(struct tg3
*tp
, u32 offset
)
5115 BUG_ON(offset
== TX_CPU_BASE
&&
5116 (tp
->tg3_flags2
& TG3_FLG2_5705_PLUS
));
5118 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
) {
5119 u32 val
= tr32(GRC_VCPU_EXT_CTRL
);
5121 tw32(GRC_VCPU_EXT_CTRL
, val
| GRC_VCPU_EXT_CTRL_HALT_CPU
);
5124 if (offset
== RX_CPU_BASE
) {
5125 for (i
= 0; i
< 10000; i
++) {
5126 tw32(offset
+ CPU_STATE
, 0xffffffff);
5127 tw32(offset
+ CPU_MODE
, CPU_MODE_HALT
);
5128 if (tr32(offset
+ CPU_MODE
) & CPU_MODE_HALT
)
5132 tw32(offset
+ CPU_STATE
, 0xffffffff);
5133 tw32_f(offset
+ CPU_MODE
, CPU_MODE_HALT
);
5136 for (i
= 0; i
< 10000; i
++) {
5137 tw32(offset
+ CPU_STATE
, 0xffffffff);
5138 tw32(offset
+ CPU_MODE
, CPU_MODE_HALT
);
5139 if (tr32(offset
+ CPU_MODE
) & CPU_MODE_HALT
)
5145 printk(KERN_ERR PFX
"tg3_reset_cpu timed out for %s, "
5148 (offset
== RX_CPU_BASE
? "RX" : "TX"));
5152 /* Clear firmware's nvram arbitration. */
5153 if (tp
->tg3_flags
& TG3_FLAG_NVRAM
)
5154 tw32(NVRAM_SWARB
, SWARB_REQ_CLR0
);
5159 unsigned int text_base
;
5160 unsigned int text_len
;
5161 const u32
*text_data
;
5162 unsigned int rodata_base
;
5163 unsigned int rodata_len
;
5164 const u32
*rodata_data
;
5165 unsigned int data_base
;
5166 unsigned int data_len
;
5167 const u32
*data_data
;
5170 /* tp->lock is held. */
5171 static int tg3_load_firmware_cpu(struct tg3
*tp
, u32 cpu_base
, u32 cpu_scratch_base
,
5172 int cpu_scratch_size
, struct fw_info
*info
)
5174 int err
, lock_err
, i
;
5175 void (*write_op
)(struct tg3
*, u32
, u32
);
5177 if (cpu_base
== TX_CPU_BASE
&&
5178 (tp
->tg3_flags2
& TG3_FLG2_5705_PLUS
)) {
5179 printk(KERN_ERR PFX
"tg3_load_firmware_cpu: Trying to load "
5180 "TX cpu firmware on %s which is 5705.\n",
5185 if (tp
->tg3_flags2
& TG3_FLG2_5705_PLUS
)
5186 write_op
= tg3_write_mem
;
5188 write_op
= tg3_write_indirect_reg32
;
5190 /* It is possible that bootcode is still loading at this point.
5191 * Get the nvram lock first before halting the cpu.
5193 lock_err
= tg3_nvram_lock(tp
);
5194 err
= tg3_halt_cpu(tp
, cpu_base
);
5196 tg3_nvram_unlock(tp
);
5200 for (i
= 0; i
< cpu_scratch_size
; i
+= sizeof(u32
))
5201 write_op(tp
, cpu_scratch_base
+ i
, 0);
5202 tw32(cpu_base
+ CPU_STATE
, 0xffffffff);
5203 tw32(cpu_base
+ CPU_MODE
, tr32(cpu_base
+CPU_MODE
)|CPU_MODE_HALT
);
5204 for (i
= 0; i
< (info
->text_len
/ sizeof(u32
)); i
++)
5205 write_op(tp
, (cpu_scratch_base
+
5206 (info
->text_base
& 0xffff) +
5209 info
->text_data
[i
] : 0));
5210 for (i
= 0; i
< (info
->rodata_len
/ sizeof(u32
)); i
++)
5211 write_op(tp
, (cpu_scratch_base
+
5212 (info
->rodata_base
& 0xffff) +
5214 (info
->rodata_data
?
5215 info
->rodata_data
[i
] : 0));
5216 for (i
= 0; i
< (info
->data_len
/ sizeof(u32
)); i
++)
5217 write_op(tp
, (cpu_scratch_base
+
5218 (info
->data_base
& 0xffff) +
5221 info
->data_data
[i
] : 0));
5229 /* tp->lock is held. */
5230 static int tg3_load_5701_a0_firmware_fix(struct tg3
*tp
)
5232 struct fw_info info
;
5235 info
.text_base
= TG3_FW_TEXT_ADDR
;
5236 info
.text_len
= TG3_FW_TEXT_LEN
;
5237 info
.text_data
= &tg3FwText
[0];
5238 info
.rodata_base
= TG3_FW_RODATA_ADDR
;
5239 info
.rodata_len
= TG3_FW_RODATA_LEN
;
5240 info
.rodata_data
= &tg3FwRodata
[0];
5241 info
.data_base
= TG3_FW_DATA_ADDR
;
5242 info
.data_len
= TG3_FW_DATA_LEN
;
5243 info
.data_data
= NULL
;
5245 err
= tg3_load_firmware_cpu(tp
, RX_CPU_BASE
,
5246 RX_CPU_SCRATCH_BASE
, RX_CPU_SCRATCH_SIZE
,
5251 err
= tg3_load_firmware_cpu(tp
, TX_CPU_BASE
,
5252 TX_CPU_SCRATCH_BASE
, TX_CPU_SCRATCH_SIZE
,
5257 /* Now startup only the RX cpu. */
5258 tw32(RX_CPU_BASE
+ CPU_STATE
, 0xffffffff);
5259 tw32_f(RX_CPU_BASE
+ CPU_PC
, TG3_FW_TEXT_ADDR
);
5261 for (i
= 0; i
< 5; i
++) {
5262 if (tr32(RX_CPU_BASE
+ CPU_PC
) == TG3_FW_TEXT_ADDR
)
5264 tw32(RX_CPU_BASE
+ CPU_STATE
, 0xffffffff);
5265 tw32(RX_CPU_BASE
+ CPU_MODE
, CPU_MODE_HALT
);
5266 tw32_f(RX_CPU_BASE
+ CPU_PC
, TG3_FW_TEXT_ADDR
);
5270 printk(KERN_ERR PFX
"tg3_load_firmware fails for %s "
5271 "to set RX CPU PC, is %08x should be %08x\n",
5272 tp
->dev
->name
, tr32(RX_CPU_BASE
+ CPU_PC
),
5276 tw32(RX_CPU_BASE
+ CPU_STATE
, 0xffffffff);
5277 tw32_f(RX_CPU_BASE
+ CPU_MODE
, 0x00000000);
5282 #if TG3_TSO_SUPPORT != 0
5284 #define TG3_TSO_FW_RELEASE_MAJOR 0x1
5285 #define TG3_TSO_FW_RELASE_MINOR 0x6
5286 #define TG3_TSO_FW_RELEASE_FIX 0x0
5287 #define TG3_TSO_FW_START_ADDR 0x08000000
5288 #define TG3_TSO_FW_TEXT_ADDR 0x08000000
5289 #define TG3_TSO_FW_TEXT_LEN 0x1aa0
5290 #define TG3_TSO_FW_RODATA_ADDR 0x08001aa0
5291 #define TG3_TSO_FW_RODATA_LEN 0x60
5292 #define TG3_TSO_FW_DATA_ADDR 0x08001b20
5293 #define TG3_TSO_FW_DATA_LEN 0x30
5294 #define TG3_TSO_FW_SBSS_ADDR 0x08001b50
5295 #define TG3_TSO_FW_SBSS_LEN 0x2c
5296 #define TG3_TSO_FW_BSS_ADDR 0x08001b80
5297 #define TG3_TSO_FW_BSS_LEN 0x894
5299 static const u32 tg3TsoFwText
[(TG3_TSO_FW_TEXT_LEN
/ 4) + 1] = {
5300 0x0e000003, 0x00000000, 0x08001b24, 0x00000000, 0x10000003, 0x00000000,
5301 0x0000000d, 0x0000000d, 0x3c1d0800, 0x37bd4000, 0x03a0f021, 0x3c100800,
5302 0x26100000, 0x0e000010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
5303 0xafbf0018, 0x0e0005d8, 0x34840002, 0x0e000668, 0x00000000, 0x3c030800,
5304 0x90631b68, 0x24020002, 0x3c040800, 0x24841aac, 0x14620003, 0x24050001,
5305 0x3c040800, 0x24841aa0, 0x24060006, 0x00003821, 0xafa00010, 0x0e00067c,
5306 0xafa00014, 0x8f625c50, 0x34420001, 0xaf625c50, 0x8f625c90, 0x34420001,
5307 0xaf625c90, 0x2402ffff, 0x0e000034, 0xaf625404, 0x8fbf0018, 0x03e00008,
5308 0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c,
5309 0xafb20018, 0xafb10014, 0x0e00005b, 0xafb00010, 0x24120002, 0x24110001,
5310 0x8f706820, 0x32020100, 0x10400003, 0x00000000, 0x0e0000bb, 0x00000000,
5311 0x8f706820, 0x32022000, 0x10400004, 0x32020001, 0x0e0001f0, 0x24040001,
5312 0x32020001, 0x10400003, 0x00000000, 0x0e0000a3, 0x00000000, 0x3c020800,
5313 0x90421b98, 0x14520003, 0x00000000, 0x0e0004c0, 0x00000000, 0x0a00003c,
5314 0xaf715028, 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008,
5315 0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ac0, 0x00002821, 0x00003021,
5316 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x3c040800,
5317 0x248423d8, 0xa4800000, 0x3c010800, 0xa0201b98, 0x3c010800, 0xac201b9c,
5318 0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
5319 0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bbc, 0x8f624434, 0x3c010800,
5320 0xac221b88, 0x8f624438, 0x3c010800, 0xac221b8c, 0x8f624410, 0xac80f7a8,
5321 0x3c010800, 0xac201b84, 0x3c010800, 0xac2023e0, 0x3c010800, 0xac2023c8,
5322 0x3c010800, 0xac2023cc, 0x3c010800, 0xac202400, 0x3c010800, 0xac221b90,
5323 0x8f620068, 0x24030007, 0x00021702, 0x10430005, 0x00000000, 0x8f620068,
5324 0x00021702, 0x14400004, 0x24020001, 0x3c010800, 0x0a000097, 0xac20240c,
5325 0xac820034, 0x3c040800, 0x24841acc, 0x3c050800, 0x8ca5240c, 0x00003021,
5326 0x00003821, 0xafa00010, 0x0e00067c, 0xafa00014, 0x8fbf0018, 0x03e00008,
5327 0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ad8, 0x00002821, 0x00003021,
5328 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x0e00005b,
5329 0x00000000, 0x0e0000b4, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5330 0x24020001, 0x8f636820, 0x00821004, 0x00021027, 0x00621824, 0x03e00008,
5331 0xaf636820, 0x27bdffd0, 0xafbf002c, 0xafb60028, 0xafb50024, 0xafb40020,
5332 0xafb3001c, 0xafb20018, 0xafb10014, 0xafb00010, 0x8f675c5c, 0x3c030800,
5333 0x24631bbc, 0x8c620000, 0x14470005, 0x3c0200ff, 0x3c020800, 0x90421b98,
5334 0x14400119, 0x3c0200ff, 0x3442fff8, 0x00e28824, 0xac670000, 0x00111902,
5335 0x306300ff, 0x30e20003, 0x000211c0, 0x00622825, 0x00a04021, 0x00071602,
5336 0x3c030800, 0x90631b98, 0x3044000f, 0x14600036, 0x00804821, 0x24020001,
5337 0x3c010800, 0xa0221b98, 0x00051100, 0x00821025, 0x3c010800, 0xac201b9c,
5338 0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
5339 0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bb0, 0x3c010800, 0xac201bb4,
5340 0x3c010800, 0xa42223d8, 0x9622000c, 0x30437fff, 0x3c010800, 0xa4222410,
5341 0x30428000, 0x3c010800, 0xa4231bc6, 0x10400005, 0x24020001, 0x3c010800,
5342 0xac2223f4, 0x0a000102, 0x2406003e, 0x24060036, 0x3c010800, 0xac2023f4,
5343 0x9622000a, 0x3c030800, 0x94631bc6, 0x3c010800, 0xac2023f0, 0x3c010800,
5344 0xac2023f8, 0x00021302, 0x00021080, 0x00c21021, 0x00621821, 0x3c010800,
5345 0xa42223d0, 0x3c010800, 0x0a000115, 0xa4231b96, 0x9622000c, 0x3c010800,
5346 0xa42223ec, 0x3c040800, 0x24841b9c, 0x8c820000, 0x00021100, 0x3c010800,
5347 0x00220821, 0xac311bc8, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
5348 0xac271bcc, 0x8c820000, 0x25030001, 0x306601ff, 0x00021100, 0x3c010800,
5349 0x00220821, 0xac261bd0, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
5350 0xac291bd4, 0x96230008, 0x3c020800, 0x8c421bac, 0x00432821, 0x3c010800,
5351 0xac251bac, 0x9622000a, 0x30420004, 0x14400018, 0x00061100, 0x8f630c14,
5352 0x3063000f, 0x2c620002, 0x1440000b, 0x3c02c000, 0x8f630c14, 0x3c020800,
5353 0x8c421b40, 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002,
5354 0x1040fff7, 0x3c02c000, 0x00e21825, 0xaf635c5c, 0x8f625c50, 0x30420002,
5355 0x10400014, 0x00000000, 0x0a000147, 0x00000000, 0x3c030800, 0x8c631b80,
5356 0x3c040800, 0x94841b94, 0x01221025, 0x3c010800, 0xa42223da, 0x24020001,
5357 0x3c010800, 0xac221bb8, 0x24630001, 0x0085202a, 0x3c010800, 0x10800003,
5358 0xac231b80, 0x3c010800, 0xa4251b94, 0x3c060800, 0x24c61b9c, 0x8cc20000,
5359 0x24420001, 0xacc20000, 0x28420080, 0x14400005, 0x00000000, 0x0e000656,
5360 0x24040002, 0x0a0001e6, 0x00000000, 0x3c020800, 0x8c421bb8, 0x10400078,
5361 0x24020001, 0x3c050800, 0x90a51b98, 0x14a20072, 0x00000000, 0x3c150800,
5362 0x96b51b96, 0x3c040800, 0x8c841bac, 0x32a3ffff, 0x0083102a, 0x1440006c,
5363 0x00000000, 0x14830003, 0x00000000, 0x3c010800, 0xac2523f0, 0x1060005c,
5364 0x00009021, 0x24d60004, 0x0060a021, 0x24d30014, 0x8ec20000, 0x00028100,
5365 0x3c110800, 0x02308821, 0x0e000625, 0x8e311bc8, 0x00402821, 0x10a00054,
5366 0x00000000, 0x9628000a, 0x31020040, 0x10400005, 0x2407180c, 0x8e22000c,
5367 0x2407188c, 0x00021400, 0xaca20018, 0x3c030800, 0x00701821, 0x8c631bd0,
5368 0x3c020800, 0x00501021, 0x8c421bd4, 0x00031d00, 0x00021400, 0x00621825,
5369 0xaca30014, 0x8ec30004, 0x96220008, 0x00432023, 0x3242ffff, 0x3083ffff,
5370 0x00431021, 0x0282102a, 0x14400002, 0x02b23023, 0x00803021, 0x8e620000,
5371 0x30c4ffff, 0x00441021, 0xae620000, 0x8e220000, 0xaca20000, 0x8e220004,
5372 0x8e63fff4, 0x00431021, 0xaca20004, 0xa4a6000e, 0x8e62fff4, 0x00441021,
5373 0xae62fff4, 0x96230008, 0x0043102a, 0x14400005, 0x02469021, 0x8e62fff0,
5374 0xae60fff4, 0x24420001, 0xae62fff0, 0xaca00008, 0x3242ffff, 0x14540008,
5375 0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x24020905, 0xa4a2000c,
5376 0x0a0001cb, 0x34e70020, 0xa4a2000c, 0x3c020800, 0x8c4223f0, 0x10400003,
5377 0x3c024b65, 0x0a0001d3, 0x34427654, 0x3c02b49a, 0x344289ab, 0xaca2001c,
5378 0x30e2ffff, 0xaca20010, 0x0e0005a2, 0x00a02021, 0x3242ffff, 0x0054102b,
5379 0x1440ffa9, 0x00000000, 0x24020002, 0x3c010800, 0x0a0001e6, 0xa0221b98,
5380 0x8ec2083c, 0x24420001, 0x0a0001e6, 0xaec2083c, 0x0e0004c0, 0x00000000,
5381 0x8fbf002c, 0x8fb60028, 0x8fb50024, 0x8fb40020, 0x8fb3001c, 0x8fb20018,
5382 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0030, 0x27bdffd0, 0xafbf0028,
5383 0xafb30024, 0xafb20020, 0xafb1001c, 0xafb00018, 0x8f725c9c, 0x3c0200ff,
5384 0x3442fff8, 0x3c070800, 0x24e71bb4, 0x02428824, 0x9623000e, 0x8ce20000,
5385 0x00431021, 0xace20000, 0x8e220010, 0x30420020, 0x14400011, 0x00809821,
5386 0x0e00063b, 0x02202021, 0x3c02c000, 0x02421825, 0xaf635c9c, 0x8f625c90,
5387 0x30420002, 0x1040011e, 0x00000000, 0xaf635c9c, 0x8f625c90, 0x30420002,
5388 0x10400119, 0x00000000, 0x0a00020d, 0x00000000, 0x8e240008, 0x8e230014,
5389 0x00041402, 0x000231c0, 0x00031502, 0x304201ff, 0x2442ffff, 0x3042007f,
5390 0x00031942, 0x30637800, 0x00021100, 0x24424000, 0x00624821, 0x9522000a,
5391 0x3084ffff, 0x30420008, 0x104000b0, 0x000429c0, 0x3c020800, 0x8c422400,
5392 0x14400024, 0x24c50008, 0x94c20014, 0x3c010800, 0xa42223d0, 0x8cc40010,
5393 0x00041402, 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42423d4, 0x94c2000e,
5394 0x3083ffff, 0x00431023, 0x3c010800, 0xac222408, 0x94c2001a, 0x3c010800,
5395 0xac262400, 0x3c010800, 0xac322404, 0x3c010800, 0xac2223fc, 0x3c02c000,
5396 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e5, 0x00000000,
5397 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e0, 0x00000000, 0x0a000246,
5398 0x00000000, 0x94c2000e, 0x3c030800, 0x946323d4, 0x00434023, 0x3103ffff,
5399 0x2c620008, 0x1040001c, 0x00000000, 0x94c20014, 0x24420028, 0x00a22821,
5400 0x00031042, 0x1840000b, 0x00002021, 0x24e60848, 0x00403821, 0x94a30000,
5401 0x8cc20000, 0x24840001, 0x00431021, 0xacc20000, 0x0087102a, 0x1440fff9,
5402 0x24a50002, 0x31020001, 0x1040001f, 0x3c024000, 0x3c040800, 0x248423fc,
5403 0xa0a00001, 0x94a30000, 0x8c820000, 0x00431021, 0x0a000285, 0xac820000,
5404 0x8f626800, 0x3c030010, 0x00431024, 0x10400009, 0x00000000, 0x94c2001a,
5405 0x3c030800, 0x8c6323fc, 0x00431021, 0x3c010800, 0xac2223fc, 0x0a000286,
5406 0x3c024000, 0x94c2001a, 0x94c4001c, 0x3c030800, 0x8c6323fc, 0x00441023,
5407 0x00621821, 0x3c010800, 0xac2323fc, 0x3c024000, 0x02421825, 0xaf635c9c,
5408 0x8f625c90, 0x30420002, 0x1440fffc, 0x00000000, 0x9522000a, 0x30420010,
5409 0x1040009b, 0x00000000, 0x3c030800, 0x946323d4, 0x3c070800, 0x24e72400,
5410 0x8ce40000, 0x8f626800, 0x24630030, 0x00832821, 0x3c030010, 0x00431024,
5411 0x1440000a, 0x00000000, 0x94a20004, 0x3c040800, 0x8c842408, 0x3c030800,
5412 0x8c6323fc, 0x00441023, 0x00621821, 0x3c010800, 0xac2323fc, 0x3c040800,
5413 0x8c8423fc, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402, 0x00822021,
5414 0x00041027, 0xa4a20006, 0x3c030800, 0x8c632404, 0x3c0200ff, 0x3442fff8,
5415 0x00628824, 0x96220008, 0x24050001, 0x24034000, 0x000231c0, 0x00801021,
5416 0xa4c2001a, 0xa4c0001c, 0xace00000, 0x3c010800, 0xac251b60, 0xaf635cb8,
5417 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000, 0x3c010800, 0xac201b60,
5418 0x8e220008, 0xaf625cb8, 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000,
5419 0x3c010800, 0xac201b60, 0x3c020800, 0x8c421b60, 0x1040ffec, 0x00000000,
5420 0x3c040800, 0x0e00063b, 0x8c842404, 0x0a00032a, 0x00000000, 0x3c030800,
5421 0x90631b98, 0x24020002, 0x14620003, 0x3c034b65, 0x0a0002e1, 0x00008021,
5422 0x8e22001c, 0x34637654, 0x10430002, 0x24100002, 0x24100001, 0x00c02021,
5423 0x0e000350, 0x02003021, 0x24020003, 0x3c010800, 0xa0221b98, 0x24020002,
5424 0x1202000a, 0x24020001, 0x3c030800, 0x8c6323f0, 0x10620006, 0x00000000,
5425 0x3c020800, 0x944223d8, 0x00021400, 0x0a00031f, 0xae220014, 0x3c040800,
5426 0x248423da, 0x94820000, 0x00021400, 0xae220014, 0x3c020800, 0x8c421bbc,
5427 0x3c03c000, 0x3c010800, 0xa0201b98, 0x00431025, 0xaf625c5c, 0x8f625c50,
5428 0x30420002, 0x10400009, 0x00000000, 0x2484f7e2, 0x8c820000, 0x00431025,
5429 0xaf625c5c, 0x8f625c50, 0x30420002, 0x1440fffa, 0x00000000, 0x3c020800,
5430 0x24421b84, 0x8c430000, 0x24630001, 0xac430000, 0x8f630c14, 0x3063000f,
5431 0x2c620002, 0x1440000c, 0x3c024000, 0x8f630c14, 0x3c020800, 0x8c421b40,
5432 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7,
5433 0x00000000, 0x3c024000, 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002,
5434 0x1440fffc, 0x00000000, 0x12600003, 0x00000000, 0x0e0004c0, 0x00000000,
5435 0x8fbf0028, 0x8fb30024, 0x8fb20020, 0x8fb1001c, 0x8fb00018, 0x03e00008,
5436 0x27bd0030, 0x8f634450, 0x3c040800, 0x24841b88, 0x8c820000, 0x00031c02,
5437 0x0043102b, 0x14400007, 0x3c038000, 0x8c840004, 0x8f624450, 0x00021c02,
5438 0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
5439 0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3c024000,
5440 0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00000000,
5441 0x03e00008, 0x00000000, 0x27bdffe0, 0x00805821, 0x14c00011, 0x256e0008,
5442 0x3c020800, 0x8c4223f4, 0x10400007, 0x24020016, 0x3c010800, 0xa42223d2,
5443 0x2402002a, 0x3c010800, 0x0a000364, 0xa42223d4, 0x8d670010, 0x00071402,
5444 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42723d4, 0x3c040800, 0x948423d4,
5445 0x3c030800, 0x946323d2, 0x95cf0006, 0x3c020800, 0x944223d0, 0x00832023,
5446 0x01e2c023, 0x3065ffff, 0x24a20028, 0x01c24821, 0x3082ffff, 0x14c0001a,
5447 0x01226021, 0x9582000c, 0x3042003f, 0x3c010800, 0xa42223d6, 0x95820004,
5448 0x95830006, 0x3c010800, 0xac2023e4, 0x3c010800, 0xac2023e8, 0x00021400,
5449 0x00431025, 0x3c010800, 0xac221bc0, 0x95220004, 0x3c010800, 0xa4221bc4,
5450 0x95230002, 0x01e51023, 0x0043102a, 0x10400010, 0x24020001, 0x3c010800,
5451 0x0a000398, 0xac2223f8, 0x3c030800, 0x8c6323e8, 0x3c020800, 0x94421bc4,
5452 0x00431021, 0xa5220004, 0x3c020800, 0x94421bc0, 0xa5820004, 0x3c020800,
5453 0x8c421bc0, 0xa5820006, 0x3c020800, 0x8c4223f0, 0x3c0d0800, 0x8dad23e4,
5454 0x3c0a0800, 0x144000e5, 0x8d4a23e8, 0x3c020800, 0x94421bc4, 0x004a1821,
5455 0x3063ffff, 0x0062182b, 0x24020002, 0x10c2000d, 0x01435023, 0x3c020800,
5456 0x944223d6, 0x30420009, 0x10400008, 0x00000000, 0x9582000c, 0x3042fff6,
5457 0xa582000c, 0x3c020800, 0x944223d6, 0x30420009, 0x01a26823, 0x3c020800,
5458 0x8c4223f8, 0x1040004a, 0x01203821, 0x3c020800, 0x944223d2, 0x00004021,
5459 0xa520000a, 0x01e21023, 0xa5220002, 0x3082ffff, 0x00021042, 0x18400008,
5460 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021, 0x0103102a,
5461 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061402,
5462 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021, 0x2527000c,
5463 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004, 0x1440fffb,
5464 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023, 0x01803821,
5465 0x3082ffff, 0xa4e00010, 0x00621821, 0x00021042, 0x18400010, 0x00c33021,
5466 0x00404821, 0x94e20000, 0x24e70002, 0x00c23021, 0x30e2007f, 0x14400006,
5467 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80, 0x00625824, 0x25670008,
5468 0x0109102a, 0x1440fff3, 0x00000000, 0x30820001, 0x10400005, 0x00061c02,
5469 0xa0e00001, 0x94e20000, 0x00c23021, 0x00061c02, 0x30c2ffff, 0x00623021,
5470 0x00061402, 0x00c23021, 0x0a00047d, 0x30c6ffff, 0x24020002, 0x14c20081,
5471 0x00000000, 0x3c020800, 0x8c42240c, 0x14400007, 0x00000000, 0x3c020800,
5472 0x944223d2, 0x95230002, 0x01e21023, 0x10620077, 0x00000000, 0x3c020800,
5473 0x944223d2, 0x01e21023, 0xa5220002, 0x3c020800, 0x8c42240c, 0x1040001a,
5474 0x31e3ffff, 0x8dc70010, 0x3c020800, 0x94421b96, 0x00e04021, 0x00072c02,
5475 0x00aa2021, 0x00431023, 0x00823823, 0x00072402, 0x30e2ffff, 0x00823821,
5476 0x00071027, 0xa522000a, 0x3102ffff, 0x3c040800, 0x948423d4, 0x00453023,
5477 0x00e02821, 0x00641823, 0x006d1821, 0x00c33021, 0x00061c02, 0x30c2ffff,
5478 0x0a00047d, 0x00623021, 0x01203821, 0x00004021, 0x3082ffff, 0x00021042,
5479 0x18400008, 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021,
5480 0x0103102a, 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021,
5481 0x00061402, 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021,
5482 0x2527000c, 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004,
5483 0x1440fffb, 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023,
5484 0x01803821, 0x3082ffff, 0xa4e00010, 0x3c040800, 0x948423d4, 0x00621821,
5485 0x00c33021, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061c02, 0x3c020800,
5486 0x944223d0, 0x00c34821, 0x00441023, 0x00021fc2, 0x00431021, 0x00021043,
5487 0x18400010, 0x00003021, 0x00402021, 0x94e20000, 0x24e70002, 0x00c23021,
5488 0x30e2007f, 0x14400006, 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80,
5489 0x00625824, 0x25670008, 0x0104102a, 0x1440fff3, 0x00000000, 0x3c020800,
5490 0x944223ec, 0x00c23021, 0x3122ffff, 0x00c23021, 0x00061c02, 0x30c2ffff,
5491 0x00623021, 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010,
5492 0xadc00014, 0x0a00049d, 0xadc00000, 0x8dc70010, 0x00e04021, 0x11400007,
5493 0x00072c02, 0x00aa3021, 0x00061402, 0x30c3ffff, 0x00433021, 0x00061402,
5494 0x00c22821, 0x00051027, 0xa522000a, 0x3c030800, 0x946323d4, 0x3102ffff,
5495 0x01e21021, 0x00433023, 0x00cd3021, 0x00061c02, 0x30c2ffff, 0x00623021,
5496 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010, 0x3102ffff,
5497 0x00051c00, 0x00431025, 0xadc20010, 0x3c020800, 0x8c4223f4, 0x10400005,
5498 0x2de205eb, 0x14400002, 0x25e2fff2, 0x34028870, 0xa5c20034, 0x3c030800,
5499 0x246323e8, 0x8c620000, 0x24420001, 0xac620000, 0x3c040800, 0x8c8423e4,
5500 0x3c020800, 0x8c421bc0, 0x3303ffff, 0x00832021, 0x00431821, 0x0062102b,
5501 0x3c010800, 0xac2423e4, 0x10400003, 0x2482ffff, 0x3c010800, 0xac2223e4,
5502 0x3c010800, 0xac231bc0, 0x03e00008, 0x27bd0020, 0x27bdffb8, 0x3c050800,
5503 0x24a51b96, 0xafbf0044, 0xafbe0040, 0xafb7003c, 0xafb60038, 0xafb50034,
5504 0xafb40030, 0xafb3002c, 0xafb20028, 0xafb10024, 0xafb00020, 0x94a90000,
5505 0x3c020800, 0x944223d0, 0x3c030800, 0x8c631bb0, 0x3c040800, 0x8c841bac,
5506 0x01221023, 0x0064182a, 0xa7a9001e, 0x106000be, 0xa7a20016, 0x24be0022,
5507 0x97b6001e, 0x24b3001a, 0x24b70016, 0x8fc20000, 0x14400008, 0x00000000,
5508 0x8fc2fff8, 0x97a30016, 0x8fc4fff4, 0x00431021, 0x0082202a, 0x148000b0,
5509 0x00000000, 0x97d50818, 0x32a2ffff, 0x104000a3, 0x00009021, 0x0040a021,
5510 0x00008821, 0x0e000625, 0x00000000, 0x00403021, 0x14c00007, 0x00000000,
5511 0x3c020800, 0x8c4223dc, 0x24420001, 0x3c010800, 0x0a000596, 0xac2223dc,
5512 0x3c100800, 0x02118021, 0x8e101bc8, 0x9608000a, 0x31020040, 0x10400005,
5513 0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x31020080,
5514 0x54400001, 0x34e70010, 0x3c020800, 0x00511021, 0x8c421bd0, 0x3c030800,
5515 0x00711821, 0x8c631bd4, 0x00021500, 0x00031c00, 0x00431025, 0xacc20014,
5516 0x96040008, 0x3242ffff, 0x00821021, 0x0282102a, 0x14400002, 0x02b22823,
5517 0x00802821, 0x8e020000, 0x02459021, 0xacc20000, 0x8e020004, 0x00c02021,
5518 0x26310010, 0xac820004, 0x30e2ffff, 0xac800008, 0xa485000e, 0xac820010,
5519 0x24020305, 0x0e0005a2, 0xa482000c, 0x3242ffff, 0x0054102b, 0x1440ffc5,
5520 0x3242ffff, 0x0a00058e, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
5521 0x10400067, 0x00000000, 0x8e62fff0, 0x00028900, 0x3c100800, 0x02118021,
5522 0x0e000625, 0x8e101bc8, 0x00403021, 0x14c00005, 0x00000000, 0x8e62082c,
5523 0x24420001, 0x0a000596, 0xae62082c, 0x9608000a, 0x31020040, 0x10400005,
5524 0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x3c020800,
5525 0x00511021, 0x8c421bd0, 0x3c030800, 0x00711821, 0x8c631bd4, 0x00021500,
5526 0x00031c00, 0x00431025, 0xacc20014, 0x8e63fff4, 0x96020008, 0x00432023,
5527 0x3242ffff, 0x3083ffff, 0x00431021, 0x02c2102a, 0x10400003, 0x00802821,
5528 0x97a9001e, 0x01322823, 0x8e620000, 0x30a4ffff, 0x00441021, 0xae620000,
5529 0xa4c5000e, 0x8e020000, 0xacc20000, 0x8e020004, 0x8e63fff4, 0x00431021,
5530 0xacc20004, 0x8e63fff4, 0x96020008, 0x00641821, 0x0062102a, 0x14400006,
5531 0x02459021, 0x8e62fff0, 0xae60fff4, 0x24420001, 0x0a000571, 0xae62fff0,
5532 0xae63fff4, 0xacc00008, 0x3242ffff, 0x10560003, 0x31020004, 0x10400006,
5533 0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x34e70020, 0x24020905,
5534 0xa4c2000c, 0x8ee30000, 0x8ee20004, 0x14620007, 0x3c02b49a, 0x8ee20860,
5535 0x54400001, 0x34e70400, 0x3c024b65, 0x0a000588, 0x34427654, 0x344289ab,
5536 0xacc2001c, 0x30e2ffff, 0xacc20010, 0x0e0005a2, 0x00c02021, 0x3242ffff,
5537 0x0056102b, 0x1440ff9b, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
5538 0x1440ff48, 0x00000000, 0x8fbf0044, 0x8fbe0040, 0x8fb7003c, 0x8fb60038,
5539 0x8fb50034, 0x8fb40030, 0x8fb3002c, 0x8fb20028, 0x8fb10024, 0x8fb00020,
5540 0x03e00008, 0x27bd0048, 0x27bdffe8, 0xafbf0014, 0xafb00010, 0x8f624450,
5541 0x8f634410, 0x0a0005b1, 0x00808021, 0x8f626820, 0x30422000, 0x10400003,
5542 0x00000000, 0x0e0001f0, 0x00002021, 0x8f624450, 0x8f634410, 0x3042ffff,
5543 0x0043102b, 0x1440fff5, 0x00000000, 0x8f630c14, 0x3063000f, 0x2c620002,
5544 0x1440000b, 0x00000000, 0x8f630c14, 0x3c020800, 0x8c421b40, 0x3063000f,
5545 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7, 0x00000000,
5546 0xaf705c18, 0x8f625c10, 0x30420002, 0x10400009, 0x00000000, 0x8f626820,
5547 0x30422000, 0x1040fff8, 0x00000000, 0x0e0001f0, 0x00002021, 0x0a0005c4,
5548 0x00000000, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000,
5549 0x00000000, 0x00000000, 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010,
5550 0xaf60680c, 0x8f626804, 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50,
5551 0x3c010800, 0xac221b54, 0x24020b78, 0x3c010800, 0xac221b64, 0x34630002,
5552 0xaf634000, 0x0e000605, 0x00808021, 0x3c010800, 0xa0221b68, 0x304200ff,
5553 0x24030002, 0x14430005, 0x00000000, 0x3c020800, 0x8c421b54, 0x0a0005f8,
5554 0xac5000c0, 0x3c020800, 0x8c421b54, 0xac5000bc, 0x8f624434, 0x8f634438,
5555 0x8f644410, 0x3c010800, 0xac221b5c, 0x3c010800, 0xac231b6c, 0x3c010800,
5556 0xac241b58, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x3c040800,
5557 0x8c870000, 0x3c03aa55, 0x3463aa55, 0x3c06c003, 0xac830000, 0x8cc20000,
5558 0x14430007, 0x24050002, 0x3c0355aa, 0x346355aa, 0xac830000, 0x8cc20000,
5559 0x50430001, 0x24050001, 0x3c020800, 0xac470000, 0x03e00008, 0x00a01021,
5560 0x27bdfff8, 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe,
5561 0x00000000, 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008,
5562 0x27bd0008, 0x8f634450, 0x3c020800, 0x8c421b5c, 0x00031c02, 0x0043102b,
5563 0x14400008, 0x3c038000, 0x3c040800, 0x8c841b6c, 0x8f624450, 0x00021c02,
5564 0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
5565 0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff,
5566 0x2442e000, 0x2c422001, 0x14400003, 0x3c024000, 0x0a000648, 0x2402ffff,
5567 0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021,
5568 0x03e00008, 0x00000000, 0x8f624450, 0x3c030800, 0x8c631b58, 0x0a000651,
5569 0x3042ffff, 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000,
5570 0x03e00008, 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040800, 0x24841af0,
5571 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014,
5572 0x0a000660, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000,
5573 0x00000000, 0x00000000, 0x3c020800, 0x34423000, 0x3c030800, 0x34633000,
5574 0x3c040800, 0x348437ff, 0x3c010800, 0xac221b74, 0x24020040, 0x3c010800,
5575 0xac221b78, 0x3c010800, 0xac201b70, 0xac600000, 0x24630004, 0x0083102b,
5576 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000, 0x00804821, 0x8faa0010,
5577 0x3c020800, 0x8c421b70, 0x3c040800, 0x8c841b78, 0x8fab0014, 0x24430001,
5578 0x0044102b, 0x3c010800, 0xac231b70, 0x14400003, 0x00004021, 0x3c010800,
5579 0xac201b70, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74, 0x91240000,
5580 0x00021140, 0x00431021, 0x00481021, 0x25080001, 0xa0440000, 0x29020008,
5581 0x1440fff4, 0x25290001, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74,
5582 0x8f64680c, 0x00021140, 0x00431021, 0xac440008, 0xac45000c, 0xac460010,
5583 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c, 0x00000000, 0x00000000,
5586 static const u32 tg3TsoFwRodata
[] = {
5587 0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
5588 0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x496e0000, 0x73746b6f,
5589 0x66662a2a, 0x00000000, 0x53774576, 0x656e7430, 0x00000000, 0x00000000,
5590 0x00000000, 0x00000000, 0x66617461, 0x6c457272, 0x00000000, 0x00000000,
5594 static const u32 tg3TsoFwData
[] = {
5595 0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x362e3000, 0x00000000,
5596 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
5600 /* 5705 needs a special version of the TSO firmware. */
5601 #define TG3_TSO5_FW_RELEASE_MAJOR 0x1
5602 #define TG3_TSO5_FW_RELASE_MINOR 0x2
5603 #define TG3_TSO5_FW_RELEASE_FIX 0x0
5604 #define TG3_TSO5_FW_START_ADDR 0x00010000
5605 #define TG3_TSO5_FW_TEXT_ADDR 0x00010000
5606 #define TG3_TSO5_FW_TEXT_LEN 0xe90
5607 #define TG3_TSO5_FW_RODATA_ADDR 0x00010e90
5608 #define TG3_TSO5_FW_RODATA_LEN 0x50
5609 #define TG3_TSO5_FW_DATA_ADDR 0x00010f00
5610 #define TG3_TSO5_FW_DATA_LEN 0x20
5611 #define TG3_TSO5_FW_SBSS_ADDR 0x00010f20
5612 #define TG3_TSO5_FW_SBSS_LEN 0x28
5613 #define TG3_TSO5_FW_BSS_ADDR 0x00010f50
5614 #define TG3_TSO5_FW_BSS_LEN 0x88
5616 static const u32 tg3Tso5FwText
[(TG3_TSO5_FW_TEXT_LEN
/ 4) + 1] = {
5617 0x0c004003, 0x00000000, 0x00010f04, 0x00000000, 0x10000003, 0x00000000,
5618 0x0000000d, 0x0000000d, 0x3c1d0001, 0x37bde000, 0x03a0f021, 0x3c100001,
5619 0x26100000, 0x0c004010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
5620 0xafbf0018, 0x0c0042e8, 0x34840002, 0x0c004364, 0x00000000, 0x3c030001,
5621 0x90630f34, 0x24020002, 0x3c040001, 0x24840e9c, 0x14620003, 0x24050001,
5622 0x3c040001, 0x24840e90, 0x24060002, 0x00003821, 0xafa00010, 0x0c004378,
5623 0xafa00014, 0x0c00402c, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5624 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c, 0xafb20018, 0xafb10014,
5625 0x0c0042d4, 0xafb00010, 0x3c128000, 0x24110001, 0x8f706810, 0x32020400,
5626 0x10400007, 0x00000000, 0x8f641008, 0x00921024, 0x14400003, 0x00000000,
5627 0x0c004064, 0x00000000, 0x3c020001, 0x90420f56, 0x10510003, 0x32020200,
5628 0x1040fff1, 0x00000000, 0x0c0041b4, 0x00000000, 0x08004034, 0x00000000,
5629 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020,
5630 0x27bdffe0, 0x3c040001, 0x24840eb0, 0x00002821, 0x00003021, 0x00003821,
5631 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130,
5632 0xaf625000, 0x3c010001, 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018,
5633 0x03e00008, 0x27bd0020, 0x00000000, 0x00000000, 0x3c030001, 0x24630f60,
5634 0x90620000, 0x27bdfff0, 0x14400003, 0x0080c021, 0x08004073, 0x00004821,
5635 0x3c022000, 0x03021024, 0x10400003, 0x24090002, 0x08004073, 0xa0600000,
5636 0x24090001, 0x00181040, 0x30431f80, 0x346f8008, 0x1520004b, 0x25eb0028,
5637 0x3c040001, 0x00832021, 0x8c848010, 0x3c050001, 0x24a50f7a, 0x00041402,
5638 0xa0a20000, 0x3c010001, 0xa0240f7b, 0x3c020001, 0x00431021, 0x94428014,
5639 0x3c010001, 0xa0220f7c, 0x3c0c0001, 0x01836021, 0x8d8c8018, 0x304200ff,
5640 0x24420008, 0x000220c3, 0x24020001, 0x3c010001, 0xa0220f60, 0x0124102b,
5641 0x1040000c, 0x00003821, 0x24a6000e, 0x01602821, 0x8ca20000, 0x8ca30004,
5642 0x24a50008, 0x24e70001, 0xacc20000, 0xacc30004, 0x00e4102b, 0x1440fff8,
5643 0x24c60008, 0x00003821, 0x3c080001, 0x25080f7b, 0x91060000, 0x3c020001,
5644 0x90420f7c, 0x2503000d, 0x00c32821, 0x00461023, 0x00021fc2, 0x00431021,
5645 0x00021043, 0x1840000c, 0x00002021, 0x91020001, 0x00461023, 0x00021fc2,
5646 0x00431021, 0x00021843, 0x94a20000, 0x24e70001, 0x00822021, 0x00e3102a,
5647 0x1440fffb, 0x24a50002, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
5648 0x00822021, 0x3c02ffff, 0x01821024, 0x3083ffff, 0x00431025, 0x3c010001,
5649 0x080040fa, 0xac220f80, 0x3c050001, 0x24a50f7c, 0x90a20000, 0x3c0c0001,
5650 0x01836021, 0x8d8c8018, 0x000220c2, 0x1080000e, 0x00003821, 0x01603021,
5651 0x24a5000c, 0x8ca20000, 0x8ca30004, 0x24a50008, 0x24e70001, 0xacc20000,
5652 0xacc30004, 0x00e4102b, 0x1440fff8, 0x24c60008, 0x3c050001, 0x24a50f7c,
5653 0x90a20000, 0x30430007, 0x24020004, 0x10620011, 0x28620005, 0x10400005,
5654 0x24020002, 0x10620008, 0x000710c0, 0x080040fa, 0x00000000, 0x24020006,
5655 0x1062000e, 0x000710c0, 0x080040fa, 0x00000000, 0x00a21821, 0x9463000c,
5656 0x004b1021, 0x080040fa, 0xa4430000, 0x000710c0, 0x00a21821, 0x8c63000c,
5657 0x004b1021, 0x080040fa, 0xac430000, 0x00a21821, 0x8c63000c, 0x004b2021,
5658 0x00a21021, 0xac830000, 0x94420010, 0xa4820004, 0x95e70006, 0x3c020001,
5659 0x90420f7c, 0x3c030001, 0x90630f7a, 0x00e2c823, 0x3c020001, 0x90420f7b,
5660 0x24630028, 0x01e34021, 0x24420028, 0x15200012, 0x01e23021, 0x94c2000c,
5661 0x3c010001, 0xa4220f78, 0x94c20004, 0x94c30006, 0x3c010001, 0xa4200f76,
5662 0x3c010001, 0xa4200f72, 0x00021400, 0x00431025, 0x3c010001, 0xac220f6c,
5663 0x95020004, 0x3c010001, 0x08004124, 0xa4220f70, 0x3c020001, 0x94420f70,
5664 0x3c030001, 0x94630f72, 0x00431021, 0xa5020004, 0x3c020001, 0x94420f6c,
5665 0xa4c20004, 0x3c020001, 0x8c420f6c, 0xa4c20006, 0x3c040001, 0x94840f72,
5666 0x3c020001, 0x94420f70, 0x3c0a0001, 0x954a0f76, 0x00441821, 0x3063ffff,
5667 0x0062182a, 0x24020002, 0x1122000b, 0x00832023, 0x3c030001, 0x94630f78,
5668 0x30620009, 0x10400006, 0x3062fff6, 0xa4c2000c, 0x3c020001, 0x94420f78,
5669 0x30420009, 0x01425023, 0x24020001, 0x1122001b, 0x29220002, 0x50400005,
5670 0x24020002, 0x11200007, 0x31a2ffff, 0x08004197, 0x00000000, 0x1122001d,
5671 0x24020016, 0x08004197, 0x31a2ffff, 0x3c0e0001, 0x95ce0f80, 0x10800005,
5672 0x01806821, 0x01c42021, 0x00041c02, 0x3082ffff, 0x00627021, 0x000e1027,
5673 0xa502000a, 0x3c030001, 0x90630f7b, 0x31a2ffff, 0x00e21021, 0x0800418d,
5674 0x00432023, 0x3c020001, 0x94420f80, 0x00442021, 0x00041c02, 0x3082ffff,
5675 0x00622021, 0x00807021, 0x00041027, 0x08004185, 0xa502000a, 0x3c050001,
5676 0x24a50f7a, 0x90a30000, 0x14620002, 0x24e2fff2, 0xa5e20034, 0x90a20000,
5677 0x00e21023, 0xa5020002, 0x3c030001, 0x94630f80, 0x3c020001, 0x94420f5a,
5678 0x30e5ffff, 0x00641821, 0x00451023, 0x00622023, 0x00041c02, 0x3082ffff,
5679 0x00622021, 0x00041027, 0xa502000a, 0x3c030001, 0x90630f7c, 0x24620001,
5680 0x14a20005, 0x00807021, 0x01631021, 0x90420000, 0x08004185, 0x00026200,
5681 0x24620002, 0x14a20003, 0x306200fe, 0x004b1021, 0x944c0000, 0x3c020001,
5682 0x94420f82, 0x3183ffff, 0x3c040001, 0x90840f7b, 0x00431021, 0x00e21021,
5683 0x00442023, 0x008a2021, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
5684 0x00822021, 0x00806821, 0x00041027, 0xa4c20010, 0x31a2ffff, 0x000e1c00,
5685 0x00431025, 0x3c040001, 0x24840f72, 0xade20010, 0x94820000, 0x3c050001,
5686 0x94a50f76, 0x3c030001, 0x8c630f6c, 0x24420001, 0x00b92821, 0xa4820000,
5687 0x3322ffff, 0x00622021, 0x0083182b, 0x3c010001, 0xa4250f76, 0x10600003,
5688 0x24a2ffff, 0x3c010001, 0xa4220f76, 0x3c024000, 0x03021025, 0x3c010001,
5689 0xac240f6c, 0xaf621008, 0x03e00008, 0x27bd0010, 0x3c030001, 0x90630f56,
5690 0x27bdffe8, 0x24020001, 0xafbf0014, 0x10620026, 0xafb00010, 0x8f620cf4,
5691 0x2442ffff, 0x3042007f, 0x00021100, 0x8c434000, 0x3c010001, 0xac230f64,
5692 0x8c434008, 0x24444000, 0x8c5c4004, 0x30620040, 0x14400002, 0x24020088,
5693 0x24020008, 0x3c010001, 0xa4220f68, 0x30620004, 0x10400005, 0x24020001,
5694 0x3c010001, 0xa0220f57, 0x080041d5, 0x00031402, 0x3c010001, 0xa0200f57,
5695 0x00031402, 0x3c010001, 0xa4220f54, 0x9483000c, 0x24020001, 0x3c010001,
5696 0xa4200f50, 0x3c010001, 0xa0220f56, 0x3c010001, 0xa4230f62, 0x24020001,
5697 0x1342001e, 0x00000000, 0x13400005, 0x24020003, 0x13420067, 0x00000000,
5698 0x080042cf, 0x00000000, 0x3c020001, 0x94420f62, 0x241a0001, 0x3c010001,
5699 0xa4200f5e, 0x3c010001, 0xa4200f52, 0x304407ff, 0x00021bc2, 0x00031823,
5700 0x3063003e, 0x34630036, 0x00021242, 0x3042003c, 0x00621821, 0x3c010001,
5701 0xa4240f58, 0x00832021, 0x24630030, 0x3c010001, 0xa4240f5a, 0x3c010001,
5702 0xa4230f5c, 0x3c060001, 0x24c60f52, 0x94c50000, 0x94c30002, 0x3c040001,
5703 0x94840f5a, 0x00651021, 0x0044102a, 0x10400013, 0x3c108000, 0x00a31021,
5704 0xa4c20000, 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008,
5705 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4,
5706 0x00501024, 0x104000b7, 0x00000000, 0x0800420f, 0x00000000, 0x3c030001,
5707 0x94630f50, 0x00851023, 0xa4c40000, 0x00621821, 0x3042ffff, 0x3c010001,
5708 0xa4230f50, 0xaf620ce8, 0x3c020001, 0x94420f68, 0x34420024, 0xaf620cec,
5709 0x94c30002, 0x3c020001, 0x94420f50, 0x14620012, 0x3c028000, 0x3c108000,
5710 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008, 0x00901024,
5711 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024,
5712 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003, 0xaf620cf4, 0x3c108000,
5713 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000,
5714 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003,
5715 0x3c070001, 0x24e70f50, 0x94e20000, 0x03821021, 0xaf620ce0, 0x3c020001,
5716 0x8c420f64, 0xaf620ce4, 0x3c050001, 0x94a50f54, 0x94e30000, 0x3c040001,
5717 0x94840f58, 0x3c020001, 0x94420f5e, 0x00a32823, 0x00822023, 0x30a6ffff,
5718 0x3083ffff, 0x00c3102b, 0x14400043, 0x00000000, 0x3c020001, 0x94420f5c,
5719 0x00021400, 0x00621025, 0xaf620ce8, 0x94e20000, 0x3c030001, 0x94630f54,
5720 0x00441021, 0xa4e20000, 0x3042ffff, 0x14430021, 0x3c020008, 0x3c020001,
5721 0x90420f57, 0x10400006, 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624,
5722 0x0800427c, 0x0000d021, 0x3c020001, 0x94420f68, 0x3c030008, 0x34630624,
5723 0x00431025, 0xaf620cec, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
5724 0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
5725 0x00000000, 0x8f620cf4, 0x00501024, 0x10400015, 0x00000000, 0x08004283,
5726 0x00000000, 0x3c030001, 0x94630f68, 0x34420624, 0x3c108000, 0x00621825,
5727 0x3c028000, 0xaf630cec, 0xaf620cf4, 0x8f641008, 0x00901024, 0x14400003,
5728 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7,
5729 0x00000000, 0x3c010001, 0x080042cf, 0xa4200f5e, 0x3c020001, 0x94420f5c,
5730 0x00021400, 0x00c21025, 0xaf620ce8, 0x3c020001, 0x90420f57, 0x10400009,
5731 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624, 0x0000d021, 0x00431025,
5732 0xaf620cec, 0x080042c1, 0x3c108000, 0x3c020001, 0x94420f68, 0x3c030008,
5733 0x34630604, 0x00431025, 0xaf620cec, 0x3c020001, 0x94420f5e, 0x00451021,
5734 0x3c010001, 0xa4220f5e, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
5735 0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
5736 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x8fbf0014,
5737 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000, 0x27bdffe0, 0x3c040001,
5738 0x24840ec0, 0x00002821, 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010,
5739 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130, 0xaf625000, 0x3c010001,
5740 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5741 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010, 0xaf60680c, 0x8f626804,
5742 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50, 0x3c010001, 0xac220f20,
5743 0x24020b78, 0x3c010001, 0xac220f30, 0x34630002, 0xaf634000, 0x0c004315,
5744 0x00808021, 0x3c010001, 0xa0220f34, 0x304200ff, 0x24030002, 0x14430005,
5745 0x00000000, 0x3c020001, 0x8c420f20, 0x08004308, 0xac5000c0, 0x3c020001,
5746 0x8c420f20, 0xac5000bc, 0x8f624434, 0x8f634438, 0x8f644410, 0x3c010001,
5747 0xac220f28, 0x3c010001, 0xac230f38, 0x3c010001, 0xac240f24, 0x8fbf0014,
5748 0x8fb00010, 0x03e00008, 0x27bd0018, 0x03e00008, 0x24020001, 0x27bdfff8,
5749 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe, 0x00000000,
5750 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008, 0x27bd0008,
5751 0x8f634450, 0x3c020001, 0x8c420f28, 0x00031c02, 0x0043102b, 0x14400008,
5752 0x3c038000, 0x3c040001, 0x8c840f38, 0x8f624450, 0x00021c02, 0x0083102b,
5753 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024, 0x1440fffd,
5754 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff, 0x2442e000,
5755 0x2c422001, 0x14400003, 0x3c024000, 0x08004347, 0x2402ffff, 0x00822025,
5756 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021, 0x03e00008,
5757 0x00000000, 0x8f624450, 0x3c030001, 0x8c630f24, 0x08004350, 0x3042ffff,
5758 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000, 0x03e00008,
5759 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040001, 0x24840ed0, 0x00003021,
5760 0x00003821, 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0800435f,
5761 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x3c020001, 0x3442d600,
5762 0x3c030001, 0x3463d600, 0x3c040001, 0x3484ddff, 0x3c010001, 0xac220f40,
5763 0x24020040, 0x3c010001, 0xac220f44, 0x3c010001, 0xac200f3c, 0xac600000,
5764 0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
5765 0x00804821, 0x8faa0010, 0x3c020001, 0x8c420f3c, 0x3c040001, 0x8c840f44,
5766 0x8fab0014, 0x24430001, 0x0044102b, 0x3c010001, 0xac230f3c, 0x14400003,
5767 0x00004021, 0x3c010001, 0xac200f3c, 0x3c020001, 0x8c420f3c, 0x3c030001,
5768 0x8c630f40, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
5769 0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020001, 0x8c420f3c,
5770 0x3c030001, 0x8c630f40, 0x8f64680c, 0x00021140, 0x00431021, 0xac440008,
5771 0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
5772 0x00000000, 0x00000000, 0x00000000,
5775 static const u32 tg3Tso5FwRodata
[(TG3_TSO5_FW_RODATA_LEN
/ 4) + 1] = {
5776 0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
5777 0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000,
5778 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
5779 0x00000000, 0x00000000, 0x00000000,
5782 static const u32 tg3Tso5FwData
[(TG3_TSO5_FW_DATA_LEN
/ 4) + 1] = {
5783 0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x322e3000, 0x00000000,
5784 0x00000000, 0x00000000, 0x00000000,
5787 /* tp->lock is held. */
5788 static int tg3_load_tso_firmware(struct tg3
*tp
)
5790 struct fw_info info
;
5791 unsigned long cpu_base
, cpu_scratch_base
, cpu_scratch_size
;
5794 if (tp
->tg3_flags2
& TG3_FLG2_HW_TSO
)
5797 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
) {
5798 info
.text_base
= TG3_TSO5_FW_TEXT_ADDR
;
5799 info
.text_len
= TG3_TSO5_FW_TEXT_LEN
;
5800 info
.text_data
= &tg3Tso5FwText
[0];
5801 info
.rodata_base
= TG3_TSO5_FW_RODATA_ADDR
;
5802 info
.rodata_len
= TG3_TSO5_FW_RODATA_LEN
;
5803 info
.rodata_data
= &tg3Tso5FwRodata
[0];
5804 info
.data_base
= TG3_TSO5_FW_DATA_ADDR
;
5805 info
.data_len
= TG3_TSO5_FW_DATA_LEN
;
5806 info
.data_data
= &tg3Tso5FwData
[0];
5807 cpu_base
= RX_CPU_BASE
;
5808 cpu_scratch_base
= NIC_SRAM_MBUF_POOL_BASE5705
;
5809 cpu_scratch_size
= (info
.text_len
+
5812 TG3_TSO5_FW_SBSS_LEN
+
5813 TG3_TSO5_FW_BSS_LEN
);
5815 info
.text_base
= TG3_TSO_FW_TEXT_ADDR
;
5816 info
.text_len
= TG3_TSO_FW_TEXT_LEN
;
5817 info
.text_data
= &tg3TsoFwText
[0];
5818 info
.rodata_base
= TG3_TSO_FW_RODATA_ADDR
;
5819 info
.rodata_len
= TG3_TSO_FW_RODATA_LEN
;
5820 info
.rodata_data
= &tg3TsoFwRodata
[0];
5821 info
.data_base
= TG3_TSO_FW_DATA_ADDR
;
5822 info
.data_len
= TG3_TSO_FW_DATA_LEN
;
5823 info
.data_data
= &tg3TsoFwData
[0];
5824 cpu_base
= TX_CPU_BASE
;
5825 cpu_scratch_base
= TX_CPU_SCRATCH_BASE
;
5826 cpu_scratch_size
= TX_CPU_SCRATCH_SIZE
;
5829 err
= tg3_load_firmware_cpu(tp
, cpu_base
,
5830 cpu_scratch_base
, cpu_scratch_size
,
5835 /* Now startup the cpu. */
5836 tw32(cpu_base
+ CPU_STATE
, 0xffffffff);
5837 tw32_f(cpu_base
+ CPU_PC
, info
.text_base
);
5839 for (i
= 0; i
< 5; i
++) {
5840 if (tr32(cpu_base
+ CPU_PC
) == info
.text_base
)
5842 tw32(cpu_base
+ CPU_STATE
, 0xffffffff);
5843 tw32(cpu_base
+ CPU_MODE
, CPU_MODE_HALT
);
5844 tw32_f(cpu_base
+ CPU_PC
, info
.text_base
);
5848 printk(KERN_ERR PFX
"tg3_load_tso_firmware fails for %s "
5849 "to set CPU PC, is %08x should be %08x\n",
5850 tp
->dev
->name
, tr32(cpu_base
+ CPU_PC
),
5854 tw32(cpu_base
+ CPU_STATE
, 0xffffffff);
5855 tw32_f(cpu_base
+ CPU_MODE
, 0x00000000);
5859 #endif /* TG3_TSO_SUPPORT != 0 */
5861 /* tp->lock is held. */
5862 static void __tg3_set_mac_addr(struct tg3
*tp
)
5864 u32 addr_high
, addr_low
;
5867 addr_high
= ((tp
->dev
->dev_addr
[0] << 8) |
5868 tp
->dev
->dev_addr
[1]);
5869 addr_low
= ((tp
->dev
->dev_addr
[2] << 24) |
5870 (tp
->dev
->dev_addr
[3] << 16) |
5871 (tp
->dev
->dev_addr
[4] << 8) |
5872 (tp
->dev
->dev_addr
[5] << 0));
5873 for (i
= 0; i
< 4; i
++) {
5874 tw32(MAC_ADDR_0_HIGH
+ (i
* 8), addr_high
);
5875 tw32(MAC_ADDR_0_LOW
+ (i
* 8), addr_low
);
5878 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5703
||
5879 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
) {
5880 for (i
= 0; i
< 12; i
++) {
5881 tw32(MAC_EXTADDR_0_HIGH
+ (i
* 8), addr_high
);
5882 tw32(MAC_EXTADDR_0_LOW
+ (i
* 8), addr_low
);
5886 addr_high
= (tp
->dev
->dev_addr
[0] +
5887 tp
->dev
->dev_addr
[1] +
5888 tp
->dev
->dev_addr
[2] +
5889 tp
->dev
->dev_addr
[3] +
5890 tp
->dev
->dev_addr
[4] +
5891 tp
->dev
->dev_addr
[5]) &
5892 TX_BACKOFF_SEED_MASK
;
5893 tw32(MAC_TX_BACKOFF_SEED
, addr_high
);
5896 static int tg3_set_mac_addr(struct net_device
*dev
, void *p
)
5898 struct tg3
*tp
= netdev_priv(dev
);
5899 struct sockaddr
*addr
= p
;
5902 if (!is_valid_ether_addr(addr
->sa_data
))
5905 memcpy(dev
->dev_addr
, addr
->sa_data
, dev
->addr_len
);
5907 if (!netif_running(dev
))
5910 if (tp
->tg3_flags
& TG3_FLAG_ENABLE_ASF
) {
5911 /* Reset chip so that ASF can re-init any MAC addresses it
5915 tg3_full_lock(tp
, 1);
5917 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
5918 err
= tg3_restart_hw(tp
, 0);
5920 tg3_netif_start(tp
);
5921 tg3_full_unlock(tp
);
5923 spin_lock_bh(&tp
->lock
);
5924 __tg3_set_mac_addr(tp
);
5925 spin_unlock_bh(&tp
->lock
);
5931 /* tp->lock is held. */
5932 static void tg3_set_bdinfo(struct tg3
*tp
, u32 bdinfo_addr
,
5933 dma_addr_t mapping
, u32 maxlen_flags
,
5937 (bdinfo_addr
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_HIGH
),
5938 ((u64
) mapping
>> 32));
5940 (bdinfo_addr
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_LOW
),
5941 ((u64
) mapping
& 0xffffffff));
5943 (bdinfo_addr
+ TG3_BDINFO_MAXLEN_FLAGS
),
5946 if (!(tp
->tg3_flags2
& TG3_FLG2_5705_PLUS
))
5948 (bdinfo_addr
+ TG3_BDINFO_NIC_ADDR
),
5952 static void __tg3_set_rx_mode(struct net_device
*);
5953 static void __tg3_set_coalesce(struct tg3
*tp
, struct ethtool_coalesce
*ec
)
5955 tw32(HOSTCC_RXCOL_TICKS
, ec
->rx_coalesce_usecs
);
5956 tw32(HOSTCC_TXCOL_TICKS
, ec
->tx_coalesce_usecs
);
5957 tw32(HOSTCC_RXMAX_FRAMES
, ec
->rx_max_coalesced_frames
);
5958 tw32(HOSTCC_TXMAX_FRAMES
, ec
->tx_max_coalesced_frames
);
5959 if (!(tp
->tg3_flags2
& TG3_FLG2_5705_PLUS
)) {
5960 tw32(HOSTCC_RXCOAL_TICK_INT
, ec
->rx_coalesce_usecs_irq
);
5961 tw32(HOSTCC_TXCOAL_TICK_INT
, ec
->tx_coalesce_usecs_irq
);
5963 tw32(HOSTCC_RXCOAL_MAXF_INT
, ec
->rx_max_coalesced_frames_irq
);
5964 tw32(HOSTCC_TXCOAL_MAXF_INT
, ec
->tx_max_coalesced_frames_irq
);
5965 if (!(tp
->tg3_flags2
& TG3_FLG2_5705_PLUS
)) {
5966 u32 val
= ec
->stats_block_coalesce_usecs
;
5968 if (!netif_carrier_ok(tp
->dev
))
5971 tw32(HOSTCC_STAT_COAL_TICKS
, val
);
5975 /* tp->lock is held. */
5976 static int tg3_reset_hw(struct tg3
*tp
, int reset_phy
)
5978 u32 val
, rdmac_mode
;
5981 tg3_disable_ints(tp
);
5985 tg3_write_sig_pre_reset(tp
, RESET_KIND_INIT
);
5987 if (tp
->tg3_flags
& TG3_FLAG_INIT_COMPLETE
) {
5988 tg3_abort_hw(tp
, 1);
5991 if ((tp
->tg3_flags2
& TG3_FLG2_MII_SERDES
) && reset_phy
)
5994 err
= tg3_chip_reset(tp
);
5998 tg3_write_sig_legacy(tp
, RESET_KIND_INIT
);
6000 /* This works around an issue with Athlon chipsets on
6001 * B3 tigon3 silicon. This bit has no effect on any
6002 * other revision. But do not set this on PCI Express
6005 if (!(tp
->tg3_flags2
& TG3_FLG2_PCI_EXPRESS
))
6006 tp
->pci_clock_ctrl
|= CLOCK_CTRL_DELAY_PCI_GRANT
;
6007 tw32_f(TG3PCI_CLOCK_CTRL
, tp
->pci_clock_ctrl
);
6009 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5704_A0
&&
6010 (tp
->tg3_flags
& TG3_FLAG_PCIX_MODE
)) {
6011 val
= tr32(TG3PCI_PCISTATE
);
6012 val
|= PCISTATE_RETRY_SAME_DMA
;
6013 tw32(TG3PCI_PCISTATE
, val
);
6016 if (GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5704_BX
) {
6017 /* Enable some hw fixes. */
6018 val
= tr32(TG3PCI_MSI_DATA
);
6019 val
|= (1 << 26) | (1 << 28) | (1 << 29);
6020 tw32(TG3PCI_MSI_DATA
, val
);
6023 /* Descriptor ring init may make accesses to the
6024 * NIC SRAM area to setup the TX descriptors, so we
6025 * can only do this after the hardware has been
6026 * successfully reset.
6028 err
= tg3_init_rings(tp
);
6032 /* This value is determined during the probe time DMA
6033 * engine test, tg3_test_dma.
6035 tw32(TG3PCI_DMA_RW_CTRL
, tp
->dma_rwctrl
);
6037 tp
->grc_mode
&= ~(GRC_MODE_HOST_SENDBDS
|
6038 GRC_MODE_4X_NIC_SEND_RINGS
|
6039 GRC_MODE_NO_TX_PHDR_CSUM
|
6040 GRC_MODE_NO_RX_PHDR_CSUM
);
6041 tp
->grc_mode
|= GRC_MODE_HOST_SENDBDS
;
6043 /* Pseudo-header checksum is done by hardware logic and not
6044 * the offload processers, so make the chip do the pseudo-
6045 * header checksums on receive. For transmit it is more
6046 * convenient to do the pseudo-header checksum in software
6047 * as Linux does that on transmit for us in all cases.
6049 tp
->grc_mode
|= GRC_MODE_NO_TX_PHDR_CSUM
;
6053 (GRC_MODE_IRQ_ON_MAC_ATTN
| GRC_MODE_HOST_STACKUP
));
6055 /* Setup the timer prescalar register. Clock is always 66Mhz. */
6056 val
= tr32(GRC_MISC_CFG
);
6058 val
|= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT
);
6059 tw32(GRC_MISC_CFG
, val
);
6061 /* Initialize MBUF/DESC pool. */
6062 if (tp
->tg3_flags2
& TG3_FLG2_5750_PLUS
) {
6064 } else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5705
) {
6065 tw32(BUFMGR_MB_POOL_ADDR
, NIC_SRAM_MBUF_POOL_BASE
);
6066 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
)
6067 tw32(BUFMGR_MB_POOL_SIZE
, NIC_SRAM_MBUF_POOL_SIZE64
);
6069 tw32(BUFMGR_MB_POOL_SIZE
, NIC_SRAM_MBUF_POOL_SIZE96
);
6070 tw32(BUFMGR_DMA_DESC_POOL_ADDR
, NIC_SRAM_DMA_DESC_POOL_BASE
);
6071 tw32(BUFMGR_DMA_DESC_POOL_SIZE
, NIC_SRAM_DMA_DESC_POOL_SIZE
);
6073 #if TG3_TSO_SUPPORT != 0
6074 else if (tp
->tg3_flags2
& TG3_FLG2_TSO_CAPABLE
) {
6077 fw_len
= (TG3_TSO5_FW_TEXT_LEN
+
6078 TG3_TSO5_FW_RODATA_LEN
+
6079 TG3_TSO5_FW_DATA_LEN
+
6080 TG3_TSO5_FW_SBSS_LEN
+
6081 TG3_TSO5_FW_BSS_LEN
);
6082 fw_len
= (fw_len
+ (0x80 - 1)) & ~(0x80 - 1);
6083 tw32(BUFMGR_MB_POOL_ADDR
,
6084 NIC_SRAM_MBUF_POOL_BASE5705
+ fw_len
);
6085 tw32(BUFMGR_MB_POOL_SIZE
,
6086 NIC_SRAM_MBUF_POOL_SIZE5705
- fw_len
- 0xa00);
6090 if (tp
->dev
->mtu
<= ETH_DATA_LEN
) {
6091 tw32(BUFMGR_MB_RDMA_LOW_WATER
,
6092 tp
->bufmgr_config
.mbuf_read_dma_low_water
);
6093 tw32(BUFMGR_MB_MACRX_LOW_WATER
,
6094 tp
->bufmgr_config
.mbuf_mac_rx_low_water
);
6095 tw32(BUFMGR_MB_HIGH_WATER
,
6096 tp
->bufmgr_config
.mbuf_high_water
);
6098 tw32(BUFMGR_MB_RDMA_LOW_WATER
,
6099 tp
->bufmgr_config
.mbuf_read_dma_low_water_jumbo
);
6100 tw32(BUFMGR_MB_MACRX_LOW_WATER
,
6101 tp
->bufmgr_config
.mbuf_mac_rx_low_water_jumbo
);
6102 tw32(BUFMGR_MB_HIGH_WATER
,
6103 tp
->bufmgr_config
.mbuf_high_water_jumbo
);
6105 tw32(BUFMGR_DMA_LOW_WATER
,
6106 tp
->bufmgr_config
.dma_low_water
);
6107 tw32(BUFMGR_DMA_HIGH_WATER
,
6108 tp
->bufmgr_config
.dma_high_water
);
6110 tw32(BUFMGR_MODE
, BUFMGR_MODE_ENABLE
| BUFMGR_MODE_ATTN_ENABLE
);
6111 for (i
= 0; i
< 2000; i
++) {
6112 if (tr32(BUFMGR_MODE
) & BUFMGR_MODE_ENABLE
)
6117 printk(KERN_ERR PFX
"tg3_reset_hw cannot enable BUFMGR for %s.\n",
6122 /* Setup replenish threshold. */
6123 val
= tp
->rx_pending
/ 8;
6126 else if (val
> tp
->rx_std_max_post
)
6127 val
= tp
->rx_std_max_post
;
6128 else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
) {
6129 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5906_A1
)
6130 tw32(ISO_PKT_TX
, (tr32(ISO_PKT_TX
) & ~0x3) | 0x2);
6132 if (val
> (TG3_RX_INTERNAL_RING_SZ_5906
/ 2))
6133 val
= TG3_RX_INTERNAL_RING_SZ_5906
/ 2;
6136 tw32(RCVBDI_STD_THRESH
, val
);
6138 /* Initialize TG3_BDINFO's at:
6139 * RCVDBDI_STD_BD: standard eth size rx ring
6140 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
6141 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
6144 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
6145 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
6146 * ring attribute flags
6147 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
6149 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
6150 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
6152 * The size of each ring is fixed in the firmware, but the location is
6155 tw32(RCVDBDI_STD_BD
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_HIGH
,
6156 ((u64
) tp
->rx_std_mapping
>> 32));
6157 tw32(RCVDBDI_STD_BD
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_LOW
,
6158 ((u64
) tp
->rx_std_mapping
& 0xffffffff));
6159 tw32(RCVDBDI_STD_BD
+ TG3_BDINFO_NIC_ADDR
,
6160 NIC_SRAM_RX_BUFFER_DESC
);
6162 /* Don't even try to program the JUMBO/MINI buffer descriptor
6165 if (tp
->tg3_flags2
& TG3_FLG2_5705_PLUS
) {
6166 tw32(RCVDBDI_STD_BD
+ TG3_BDINFO_MAXLEN_FLAGS
,
6167 RX_STD_MAX_SIZE_5705
<< BDINFO_FLAGS_MAXLEN_SHIFT
);
6169 tw32(RCVDBDI_STD_BD
+ TG3_BDINFO_MAXLEN_FLAGS
,
6170 RX_STD_MAX_SIZE
<< BDINFO_FLAGS_MAXLEN_SHIFT
);
6172 tw32(RCVDBDI_MINI_BD
+ TG3_BDINFO_MAXLEN_FLAGS
,
6173 BDINFO_FLAGS_DISABLED
);
6175 /* Setup replenish threshold. */
6176 tw32(RCVBDI_JUMBO_THRESH
, tp
->rx_jumbo_pending
/ 8);
6178 if (tp
->tg3_flags
& TG3_FLAG_JUMBO_RING_ENABLE
) {
6179 tw32(RCVDBDI_JUMBO_BD
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_HIGH
,
6180 ((u64
) tp
->rx_jumbo_mapping
>> 32));
6181 tw32(RCVDBDI_JUMBO_BD
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_LOW
,
6182 ((u64
) tp
->rx_jumbo_mapping
& 0xffffffff));
6183 tw32(RCVDBDI_JUMBO_BD
+ TG3_BDINFO_MAXLEN_FLAGS
,
6184 RX_JUMBO_MAX_SIZE
<< BDINFO_FLAGS_MAXLEN_SHIFT
);
6185 tw32(RCVDBDI_JUMBO_BD
+ TG3_BDINFO_NIC_ADDR
,
6186 NIC_SRAM_RX_JUMBO_BUFFER_DESC
);
6188 tw32(RCVDBDI_JUMBO_BD
+ TG3_BDINFO_MAXLEN_FLAGS
,
6189 BDINFO_FLAGS_DISABLED
);
6194 /* There is only one send ring on 5705/5750, no need to explicitly
6195 * disable the others.
6197 if (!(tp
->tg3_flags2
& TG3_FLG2_5705_PLUS
)) {
6198 /* Clear out send RCB ring in SRAM. */
6199 for (i
= NIC_SRAM_SEND_RCB
; i
< NIC_SRAM_RCV_RET_RCB
; i
+= TG3_BDINFO_SIZE
)
6200 tg3_write_mem(tp
, i
+ TG3_BDINFO_MAXLEN_FLAGS
,
6201 BDINFO_FLAGS_DISABLED
);
6206 tw32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0
+ TG3_64BIT_REG_LOW
, 0);
6207 tw32_tx_mbox(MAILBOX_SNDNIC_PROD_IDX_0
+ TG3_64BIT_REG_LOW
, 0);
6209 tg3_set_bdinfo(tp
, NIC_SRAM_SEND_RCB
,
6210 tp
->tx_desc_mapping
,
6211 (TG3_TX_RING_SIZE
<<
6212 BDINFO_FLAGS_MAXLEN_SHIFT
),
6213 NIC_SRAM_TX_BUFFER_DESC
);
6215 /* There is only one receive return ring on 5705/5750, no need
6216 * to explicitly disable the others.
6218 if (!(tp
->tg3_flags2
& TG3_FLG2_5705_PLUS
)) {
6219 for (i
= NIC_SRAM_RCV_RET_RCB
; i
< NIC_SRAM_STATS_BLK
;
6220 i
+= TG3_BDINFO_SIZE
) {
6221 tg3_write_mem(tp
, i
+ TG3_BDINFO_MAXLEN_FLAGS
,
6222 BDINFO_FLAGS_DISABLED
);
6227 tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0
+ TG3_64BIT_REG_LOW
, 0);
6229 tg3_set_bdinfo(tp
, NIC_SRAM_RCV_RET_RCB
,
6231 (TG3_RX_RCB_RING_SIZE(tp
) <<
6232 BDINFO_FLAGS_MAXLEN_SHIFT
),
6235 tp
->rx_std_ptr
= tp
->rx_pending
;
6236 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX
+ TG3_64BIT_REG_LOW
,
6239 tp
->rx_jumbo_ptr
= (tp
->tg3_flags
& TG3_FLAG_JUMBO_RING_ENABLE
) ?
6240 tp
->rx_jumbo_pending
: 0;
6241 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX
+ TG3_64BIT_REG_LOW
,
6244 /* Initialize MAC address and backoff seed. */
6245 __tg3_set_mac_addr(tp
);
6247 /* MTU + ethernet header + FCS + optional VLAN tag */
6248 tw32(MAC_RX_MTU_SIZE
, tp
->dev
->mtu
+ ETH_HLEN
+ 8);
6250 /* The slot time is changed by tg3_setup_phy if we
6251 * run at gigabit with half duplex.
6253 tw32(MAC_TX_LENGTHS
,
6254 (2 << TX_LENGTHS_IPG_CRS_SHIFT
) |
6255 (6 << TX_LENGTHS_IPG_SHIFT
) |
6256 (32 << TX_LENGTHS_SLOT_TIME_SHIFT
));
6258 /* Receive rules. */
6259 tw32(MAC_RCV_RULE_CFG
, RCV_RULE_CFG_DEFAULT_CLASS
);
6260 tw32(RCVLPC_CONFIG
, 0x0181);
6262 /* Calculate RDMAC_MODE setting early, we need it to determine
6263 * the RCVLPC_STATE_ENABLE mask.
6265 rdmac_mode
= (RDMAC_MODE_ENABLE
| RDMAC_MODE_TGTABORT_ENAB
|
6266 RDMAC_MODE_MSTABORT_ENAB
| RDMAC_MODE_PARITYERR_ENAB
|
6267 RDMAC_MODE_ADDROFLOW_ENAB
| RDMAC_MODE_FIFOOFLOW_ENAB
|
6268 RDMAC_MODE_FIFOURUN_ENAB
| RDMAC_MODE_FIFOOREAD_ENAB
|
6269 RDMAC_MODE_LNGREAD_ENAB
);
6270 if (tp
->tg3_flags
& TG3_FLAG_SPLIT_MODE
)
6271 rdmac_mode
|= RDMAC_MODE_SPLIT_ENABLE
;
6273 /* If statement applies to 5705 and 5750 PCI devices only */
6274 if ((GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
&&
6275 tp
->pci_chip_rev_id
!= CHIPREV_ID_5705_A0
) ||
6276 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5750
)) {
6277 if (tp
->tg3_flags2
& TG3_FLG2_TSO_CAPABLE
&&
6278 (tp
->pci_chip_rev_id
== CHIPREV_ID_5705_A1
||
6279 tp
->pci_chip_rev_id
== CHIPREV_ID_5705_A2
)) {
6280 rdmac_mode
|= RDMAC_MODE_FIFO_SIZE_128
;
6281 } else if (!(tr32(TG3PCI_PCISTATE
) & PCISTATE_BUS_SPEED_HIGH
) &&
6282 !(tp
->tg3_flags2
& TG3_FLG2_IS_5788
)) {
6283 rdmac_mode
|= RDMAC_MODE_FIFO_LONG_BURST
;
6287 if (tp
->tg3_flags2
& TG3_FLG2_PCI_EXPRESS
)
6288 rdmac_mode
|= RDMAC_MODE_FIFO_LONG_BURST
;
6290 #if TG3_TSO_SUPPORT != 0
6291 if (tp
->tg3_flags2
& TG3_FLG2_HW_TSO
)
6292 rdmac_mode
|= (1 << 27);
6295 /* Receive/send statistics. */
6296 if (tp
->tg3_flags2
& TG3_FLG2_5750_PLUS
) {
6297 val
= tr32(RCVLPC_STATS_ENABLE
);
6298 val
&= ~RCVLPC_STATSENAB_DACK_FIX
;
6299 tw32(RCVLPC_STATS_ENABLE
, val
);
6300 } else if ((rdmac_mode
& RDMAC_MODE_FIFO_SIZE_128
) &&
6301 (tp
->tg3_flags2
& TG3_FLG2_TSO_CAPABLE
)) {
6302 val
= tr32(RCVLPC_STATS_ENABLE
);
6303 val
&= ~RCVLPC_STATSENAB_LNGBRST_RFIX
;
6304 tw32(RCVLPC_STATS_ENABLE
, val
);
6306 tw32(RCVLPC_STATS_ENABLE
, 0xffffff);
6308 tw32(RCVLPC_STATSCTRL
, RCVLPC_STATSCTRL_ENABLE
);
6309 tw32(SNDDATAI_STATSENAB
, 0xffffff);
6310 tw32(SNDDATAI_STATSCTRL
,
6311 (SNDDATAI_SCTRL_ENABLE
|
6312 SNDDATAI_SCTRL_FASTUPD
));
6314 /* Setup host coalescing engine. */
6315 tw32(HOSTCC_MODE
, 0);
6316 for (i
= 0; i
< 2000; i
++) {
6317 if (!(tr32(HOSTCC_MODE
) & HOSTCC_MODE_ENABLE
))
6322 __tg3_set_coalesce(tp
, &tp
->coal
);
6324 /* set status block DMA address */
6325 tw32(HOSTCC_STATUS_BLK_HOST_ADDR
+ TG3_64BIT_REG_HIGH
,
6326 ((u64
) tp
->status_mapping
>> 32));
6327 tw32(HOSTCC_STATUS_BLK_HOST_ADDR
+ TG3_64BIT_REG_LOW
,
6328 ((u64
) tp
->status_mapping
& 0xffffffff));
6330 if (!(tp
->tg3_flags2
& TG3_FLG2_5705_PLUS
)) {
6331 /* Status/statistics block address. See tg3_timer,
6332 * the tg3_periodic_fetch_stats call there, and
6333 * tg3_get_stats to see how this works for 5705/5750 chips.
6335 tw32(HOSTCC_STATS_BLK_HOST_ADDR
+ TG3_64BIT_REG_HIGH
,
6336 ((u64
) tp
->stats_mapping
>> 32));
6337 tw32(HOSTCC_STATS_BLK_HOST_ADDR
+ TG3_64BIT_REG_LOW
,
6338 ((u64
) tp
->stats_mapping
& 0xffffffff));
6339 tw32(HOSTCC_STATS_BLK_NIC_ADDR
, NIC_SRAM_STATS_BLK
);
6340 tw32(HOSTCC_STATUS_BLK_NIC_ADDR
, NIC_SRAM_STATUS_BLK
);
6343 tw32(HOSTCC_MODE
, HOSTCC_MODE_ENABLE
| tp
->coalesce_mode
);
6345 tw32(RCVCC_MODE
, RCVCC_MODE_ENABLE
| RCVCC_MODE_ATTN_ENABLE
);
6346 tw32(RCVLPC_MODE
, RCVLPC_MODE_ENABLE
);
6347 if (!(tp
->tg3_flags2
& TG3_FLG2_5705_PLUS
))
6348 tw32(RCVLSC_MODE
, RCVLSC_MODE_ENABLE
| RCVLSC_MODE_ATTN_ENABLE
);
6350 /* Clear statistics/status block in chip, and status block in ram. */
6351 for (i
= NIC_SRAM_STATS_BLK
;
6352 i
< NIC_SRAM_STATUS_BLK
+ TG3_HW_STATUS_SIZE
;
6354 tg3_write_mem(tp
, i
, 0);
6357 memset(tp
->hw_status
, 0, TG3_HW_STATUS_SIZE
);
6359 if (tp
->tg3_flags2
& TG3_FLG2_MII_SERDES
) {
6360 tp
->tg3_flags2
&= ~TG3_FLG2_PARALLEL_DETECT
;
6361 /* reset to prevent losing 1st rx packet intermittently */
6362 tw32_f(MAC_RX_MODE
, RX_MODE_RESET
);
6366 tp
->mac_mode
= MAC_MODE_TXSTAT_ENABLE
| MAC_MODE_RXSTAT_ENABLE
|
6367 MAC_MODE_TDE_ENABLE
| MAC_MODE_RDE_ENABLE
| MAC_MODE_FHDE_ENABLE
;
6368 tw32_f(MAC_MODE
, tp
->mac_mode
| MAC_MODE_RXSTAT_CLEAR
| MAC_MODE_TXSTAT_CLEAR
);
6371 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
6372 * If TG3_FLAG_EEPROM_WRITE_PROT is set, we should read the
6373 * register to preserve the GPIO settings for LOMs. The GPIOs,
6374 * whether used as inputs or outputs, are set by boot code after
6377 if (tp
->tg3_flags
& TG3_FLAG_EEPROM_WRITE_PROT
) {
6380 gpio_mask
= GRC_LCLCTRL_GPIO_OE0
| GRC_LCLCTRL_GPIO_OE2
|
6381 GRC_LCLCTRL_GPIO_OUTPUT0
| GRC_LCLCTRL_GPIO_OUTPUT2
;
6383 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5752
)
6384 gpio_mask
|= GRC_LCLCTRL_GPIO_OE3
|
6385 GRC_LCLCTRL_GPIO_OUTPUT3
;
6387 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5755
)
6388 gpio_mask
|= GRC_LCLCTRL_GPIO_UART_SEL
;
6390 tp
->grc_local_ctrl
|= tr32(GRC_LOCAL_CTRL
) & gpio_mask
;
6392 /* GPIO1 must be driven high for eeprom write protect */
6393 tp
->grc_local_ctrl
|= (GRC_LCLCTRL_GPIO_OE1
|
6394 GRC_LCLCTRL_GPIO_OUTPUT1
);
6396 tw32_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
);
6399 tw32_mailbox_f(MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
, 0);
6402 if (!(tp
->tg3_flags2
& TG3_FLG2_5705_PLUS
)) {
6403 tw32_f(DMAC_MODE
, DMAC_MODE_ENABLE
);
6407 val
= (WDMAC_MODE_ENABLE
| WDMAC_MODE_TGTABORT_ENAB
|
6408 WDMAC_MODE_MSTABORT_ENAB
| WDMAC_MODE_PARITYERR_ENAB
|
6409 WDMAC_MODE_ADDROFLOW_ENAB
| WDMAC_MODE_FIFOOFLOW_ENAB
|
6410 WDMAC_MODE_FIFOURUN_ENAB
| WDMAC_MODE_FIFOOREAD_ENAB
|
6411 WDMAC_MODE_LNGREAD_ENAB
);
6413 /* If statement applies to 5705 and 5750 PCI devices only */
6414 if ((GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
&&
6415 tp
->pci_chip_rev_id
!= CHIPREV_ID_5705_A0
) ||
6416 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5750
) {
6417 if ((tp
->tg3_flags
& TG3_FLG2_TSO_CAPABLE
) &&
6418 (tp
->pci_chip_rev_id
== CHIPREV_ID_5705_A1
||
6419 tp
->pci_chip_rev_id
== CHIPREV_ID_5705_A2
)) {
6421 } else if (!(tr32(TG3PCI_PCISTATE
) & PCISTATE_BUS_SPEED_HIGH
) &&
6422 !(tp
->tg3_flags2
& TG3_FLG2_IS_5788
) &&
6423 !(tp
->tg3_flags2
& TG3_FLG2_PCI_EXPRESS
)) {
6424 val
|= WDMAC_MODE_RX_ACCEL
;
6428 /* Enable host coalescing bug fix */
6429 if ((GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5755
) ||
6430 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5787
))
6433 tw32_f(WDMAC_MODE
, val
);
6436 if ((tp
->tg3_flags
& TG3_FLAG_PCIX_MODE
) != 0) {
6437 val
= tr32(TG3PCI_X_CAPS
);
6438 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5703
) {
6439 val
&= ~PCIX_CAPS_BURST_MASK
;
6440 val
|= (PCIX_CAPS_MAX_BURST_CPIOB
<< PCIX_CAPS_BURST_SHIFT
);
6441 } else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
) {
6442 val
&= ~(PCIX_CAPS_SPLIT_MASK
| PCIX_CAPS_BURST_MASK
);
6443 val
|= (PCIX_CAPS_MAX_BURST_CPIOB
<< PCIX_CAPS_BURST_SHIFT
);
6444 if (tp
->tg3_flags
& TG3_FLAG_SPLIT_MODE
)
6445 val
|= (tp
->split_mode_max_reqs
<<
6446 PCIX_CAPS_SPLIT_SHIFT
);
6448 tw32(TG3PCI_X_CAPS
, val
);
6451 tw32_f(RDMAC_MODE
, rdmac_mode
);
6454 tw32(RCVDCC_MODE
, RCVDCC_MODE_ENABLE
| RCVDCC_MODE_ATTN_ENABLE
);
6455 if (!(tp
->tg3_flags2
& TG3_FLG2_5705_PLUS
))
6456 tw32(MBFREE_MODE
, MBFREE_MODE_ENABLE
);
6457 tw32(SNDDATAC_MODE
, SNDDATAC_MODE_ENABLE
);
6458 tw32(SNDBDC_MODE
, SNDBDC_MODE_ENABLE
| SNDBDC_MODE_ATTN_ENABLE
);
6459 tw32(RCVBDI_MODE
, RCVBDI_MODE_ENABLE
| RCVBDI_MODE_RCB_ATTN_ENAB
);
6460 tw32(RCVDBDI_MODE
, RCVDBDI_MODE_ENABLE
| RCVDBDI_MODE_INV_RING_SZ
);
6461 tw32(SNDDATAI_MODE
, SNDDATAI_MODE_ENABLE
);
6462 #if TG3_TSO_SUPPORT != 0
6463 if (tp
->tg3_flags2
& TG3_FLG2_HW_TSO
)
6464 tw32(SNDDATAI_MODE
, SNDDATAI_MODE_ENABLE
| 0x8);
6466 tw32(SNDBDI_MODE
, SNDBDI_MODE_ENABLE
| SNDBDI_MODE_ATTN_ENABLE
);
6467 tw32(SNDBDS_MODE
, SNDBDS_MODE_ENABLE
| SNDBDS_MODE_ATTN_ENABLE
);
6469 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5701_A0
) {
6470 err
= tg3_load_5701_a0_firmware_fix(tp
);
6475 #if TG3_TSO_SUPPORT != 0
6476 if (tp
->tg3_flags2
& TG3_FLG2_TSO_CAPABLE
) {
6477 err
= tg3_load_tso_firmware(tp
);
6483 tp
->tx_mode
= TX_MODE_ENABLE
;
6484 tw32_f(MAC_TX_MODE
, tp
->tx_mode
);
6487 tp
->rx_mode
= RX_MODE_ENABLE
;
6488 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5755
)
6489 tp
->rx_mode
|= RX_MODE_IPV6_CSUM_ENABLE
;
6491 tw32_f(MAC_RX_MODE
, tp
->rx_mode
);
6494 if (tp
->link_config
.phy_is_low_power
) {
6495 tp
->link_config
.phy_is_low_power
= 0;
6496 tp
->link_config
.speed
= tp
->link_config
.orig_speed
;
6497 tp
->link_config
.duplex
= tp
->link_config
.orig_duplex
;
6498 tp
->link_config
.autoneg
= tp
->link_config
.orig_autoneg
;
6501 tp
->mi_mode
= MAC_MI_MODE_BASE
;
6502 tw32_f(MAC_MI_MODE
, tp
->mi_mode
);
6505 tw32(MAC_LED_CTRL
, tp
->led_ctrl
);
6507 tw32(MAC_MI_STAT
, MAC_MI_STAT_LNKSTAT_ATTN_ENAB
);
6508 if (tp
->tg3_flags2
& TG3_FLG2_PHY_SERDES
) {
6509 tw32_f(MAC_RX_MODE
, RX_MODE_RESET
);
6512 tw32_f(MAC_RX_MODE
, tp
->rx_mode
);
6515 if (tp
->tg3_flags2
& TG3_FLG2_PHY_SERDES
) {
6516 if ((GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
) &&
6517 !(tp
->tg3_flags2
& TG3_FLG2_SERDES_PREEMPHASIS
)) {
6518 /* Set drive transmission level to 1.2V */
6519 /* only if the signal pre-emphasis bit is not set */
6520 val
= tr32(MAC_SERDES_CFG
);
6523 tw32(MAC_SERDES_CFG
, val
);
6525 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5703_A1
)
6526 tw32(MAC_SERDES_CFG
, 0x616000);
6529 /* Prevent chip from dropping frames when flow control
6532 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME
, 2);
6534 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
&&
6535 (tp
->tg3_flags2
& TG3_FLG2_PHY_SERDES
)) {
6536 /* Use hardware link auto-negotiation */
6537 tp
->tg3_flags2
|= TG3_FLG2_HW_AUTONEG
;
6540 if ((tp
->tg3_flags2
& TG3_FLG2_MII_SERDES
) &&
6541 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5714
)) {
6544 tmp
= tr32(SERDES_RX_CTRL
);
6545 tw32(SERDES_RX_CTRL
, tmp
| SERDES_RX_SIG_DETECT
);
6546 tp
->grc_local_ctrl
&= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT
;
6547 tp
->grc_local_ctrl
|= GRC_LCLCTRL_USE_SIG_DETECT
;
6548 tw32(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
);
6551 err
= tg3_setup_phy(tp
, reset_phy
);
6555 if (!(tp
->tg3_flags2
& TG3_FLG2_PHY_SERDES
)) {
6558 /* Clear CRC stats. */
6559 if (!tg3_readphy(tp
, 0x1e, &tmp
)) {
6560 tg3_writephy(tp
, 0x1e, tmp
| 0x8000);
6561 tg3_readphy(tp
, 0x14, &tmp
);
6565 __tg3_set_rx_mode(tp
->dev
);
6567 /* Initialize receive rules. */
6568 tw32(MAC_RCV_RULE_0
, 0xc2000000 & RCV_RULE_DISABLE_MASK
);
6569 tw32(MAC_RCV_VALUE_0
, 0xffffffff & RCV_RULE_DISABLE_MASK
);
6570 tw32(MAC_RCV_RULE_1
, 0x86000004 & RCV_RULE_DISABLE_MASK
);
6571 tw32(MAC_RCV_VALUE_1
, 0xffffffff & RCV_RULE_DISABLE_MASK
);
6573 if ((tp
->tg3_flags2
& TG3_FLG2_5705_PLUS
) &&
6574 !(tp
->tg3_flags2
& TG3_FLG2_5780_CLASS
))
6578 if (tp
->tg3_flags
& TG3_FLAG_ENABLE_ASF
)
6582 tw32(MAC_RCV_RULE_15
, 0); tw32(MAC_RCV_VALUE_15
, 0);
6584 tw32(MAC_RCV_RULE_14
, 0); tw32(MAC_RCV_VALUE_14
, 0);
6586 tw32(MAC_RCV_RULE_13
, 0); tw32(MAC_RCV_VALUE_13
, 0);
6588 tw32(MAC_RCV_RULE_12
, 0); tw32(MAC_RCV_VALUE_12
, 0);
6590 tw32(MAC_RCV_RULE_11
, 0); tw32(MAC_RCV_VALUE_11
, 0);
6592 tw32(MAC_RCV_RULE_10
, 0); tw32(MAC_RCV_VALUE_10
, 0);
6594 tw32(MAC_RCV_RULE_9
, 0); tw32(MAC_RCV_VALUE_9
, 0);
6596 tw32(MAC_RCV_RULE_8
, 0); tw32(MAC_RCV_VALUE_8
, 0);
6598 tw32(MAC_RCV_RULE_7
, 0); tw32(MAC_RCV_VALUE_7
, 0);
6600 tw32(MAC_RCV_RULE_6
, 0); tw32(MAC_RCV_VALUE_6
, 0);
6602 tw32(MAC_RCV_RULE_5
, 0); tw32(MAC_RCV_VALUE_5
, 0);
6604 tw32(MAC_RCV_RULE_4
, 0); tw32(MAC_RCV_VALUE_4
, 0);
6606 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
6608 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
6616 tg3_write_sig_post_reset(tp
, RESET_KIND_INIT
);
6621 /* Called at device open time to get the chip ready for
6622 * packet processing. Invoked with tp->lock held.
6624 static int tg3_init_hw(struct tg3
*tp
, int reset_phy
)
6628 /* Force the chip into D0. */
6629 err
= tg3_set_power_state(tp
, PCI_D0
);
6633 tg3_switch_clocks(tp
);
6635 tw32(TG3PCI_MEM_WIN_BASE_ADDR
, 0);
6637 err
= tg3_reset_hw(tp
, reset_phy
);
6643 #define TG3_STAT_ADD32(PSTAT, REG) \
6644 do { u32 __val = tr32(REG); \
6645 (PSTAT)->low += __val; \
6646 if ((PSTAT)->low < __val) \
6647 (PSTAT)->high += 1; \
6650 static void tg3_periodic_fetch_stats(struct tg3
*tp
)
6652 struct tg3_hw_stats
*sp
= tp
->hw_stats
;
6654 if (!netif_carrier_ok(tp
->dev
))
6657 TG3_STAT_ADD32(&sp
->tx_octets
, MAC_TX_STATS_OCTETS
);
6658 TG3_STAT_ADD32(&sp
->tx_collisions
, MAC_TX_STATS_COLLISIONS
);
6659 TG3_STAT_ADD32(&sp
->tx_xon_sent
, MAC_TX_STATS_XON_SENT
);
6660 TG3_STAT_ADD32(&sp
->tx_xoff_sent
, MAC_TX_STATS_XOFF_SENT
);
6661 TG3_STAT_ADD32(&sp
->tx_mac_errors
, MAC_TX_STATS_MAC_ERRORS
);
6662 TG3_STAT_ADD32(&sp
->tx_single_collisions
, MAC_TX_STATS_SINGLE_COLLISIONS
);
6663 TG3_STAT_ADD32(&sp
->tx_mult_collisions
, MAC_TX_STATS_MULT_COLLISIONS
);
6664 TG3_STAT_ADD32(&sp
->tx_deferred
, MAC_TX_STATS_DEFERRED
);
6665 TG3_STAT_ADD32(&sp
->tx_excessive_collisions
, MAC_TX_STATS_EXCESSIVE_COL
);
6666 TG3_STAT_ADD32(&sp
->tx_late_collisions
, MAC_TX_STATS_LATE_COL
);
6667 TG3_STAT_ADD32(&sp
->tx_ucast_packets
, MAC_TX_STATS_UCAST
);
6668 TG3_STAT_ADD32(&sp
->tx_mcast_packets
, MAC_TX_STATS_MCAST
);
6669 TG3_STAT_ADD32(&sp
->tx_bcast_packets
, MAC_TX_STATS_BCAST
);
6671 TG3_STAT_ADD32(&sp
->rx_octets
, MAC_RX_STATS_OCTETS
);
6672 TG3_STAT_ADD32(&sp
->rx_fragments
, MAC_RX_STATS_FRAGMENTS
);
6673 TG3_STAT_ADD32(&sp
->rx_ucast_packets
, MAC_RX_STATS_UCAST
);
6674 TG3_STAT_ADD32(&sp
->rx_mcast_packets
, MAC_RX_STATS_MCAST
);
6675 TG3_STAT_ADD32(&sp
->rx_bcast_packets
, MAC_RX_STATS_BCAST
);
6676 TG3_STAT_ADD32(&sp
->rx_fcs_errors
, MAC_RX_STATS_FCS_ERRORS
);
6677 TG3_STAT_ADD32(&sp
->rx_align_errors
, MAC_RX_STATS_ALIGN_ERRORS
);
6678 TG3_STAT_ADD32(&sp
->rx_xon_pause_rcvd
, MAC_RX_STATS_XON_PAUSE_RECVD
);
6679 TG3_STAT_ADD32(&sp
->rx_xoff_pause_rcvd
, MAC_RX_STATS_XOFF_PAUSE_RECVD
);
6680 TG3_STAT_ADD32(&sp
->rx_mac_ctrl_rcvd
, MAC_RX_STATS_MAC_CTRL_RECVD
);
6681 TG3_STAT_ADD32(&sp
->rx_xoff_entered
, MAC_RX_STATS_XOFF_ENTERED
);
6682 TG3_STAT_ADD32(&sp
->rx_frame_too_long_errors
, MAC_RX_STATS_FRAME_TOO_LONG
);
6683 TG3_STAT_ADD32(&sp
->rx_jabbers
, MAC_RX_STATS_JABBERS
);
6684 TG3_STAT_ADD32(&sp
->rx_undersize_packets
, MAC_RX_STATS_UNDERSIZE
);
6686 TG3_STAT_ADD32(&sp
->rxbds_empty
, RCVLPC_NO_RCV_BD_CNT
);
6687 TG3_STAT_ADD32(&sp
->rx_discards
, RCVLPC_IN_DISCARDS_CNT
);
6688 TG3_STAT_ADD32(&sp
->rx_errors
, RCVLPC_IN_ERRORS_CNT
);
6691 static void tg3_timer(unsigned long __opaque
)
6693 struct tg3
*tp
= (struct tg3
*) __opaque
;
6698 spin_lock(&tp
->lock
);
6700 if (!(tp
->tg3_flags
& TG3_FLAG_TAGGED_STATUS
)) {
6701 /* All of this garbage is because when using non-tagged
6702 * IRQ status the mailbox/status_block protocol the chip
6703 * uses with the cpu is race prone.
6705 if (tp
->hw_status
->status
& SD_STATUS_UPDATED
) {
6706 tw32(GRC_LOCAL_CTRL
,
6707 tp
->grc_local_ctrl
| GRC_LCLCTRL_SETINT
);
6709 tw32(HOSTCC_MODE
, tp
->coalesce_mode
|
6710 (HOSTCC_MODE_ENABLE
| HOSTCC_MODE_NOW
));
6713 if (!(tr32(WDMAC_MODE
) & WDMAC_MODE_ENABLE
)) {
6714 tp
->tg3_flags2
|= TG3_FLG2_RESTART_TIMER
;
6715 spin_unlock(&tp
->lock
);
6716 schedule_work(&tp
->reset_task
);
6721 /* This part only runs once per second. */
6722 if (!--tp
->timer_counter
) {
6723 if (tp
->tg3_flags2
& TG3_FLG2_5705_PLUS
)
6724 tg3_periodic_fetch_stats(tp
);
6726 if (tp
->tg3_flags
& TG3_FLAG_USE_LINKCHG_REG
) {
6730 mac_stat
= tr32(MAC_STATUS
);
6733 if (tp
->tg3_flags
& TG3_FLAG_USE_MI_INTERRUPT
) {
6734 if (mac_stat
& MAC_STATUS_MI_INTERRUPT
)
6736 } else if (mac_stat
& MAC_STATUS_LNKSTATE_CHANGED
)
6740 tg3_setup_phy(tp
, 0);
6741 } else if (tp
->tg3_flags
& TG3_FLAG_POLL_SERDES
) {
6742 u32 mac_stat
= tr32(MAC_STATUS
);
6745 if (netif_carrier_ok(tp
->dev
) &&
6746 (mac_stat
& MAC_STATUS_LNKSTATE_CHANGED
)) {
6749 if (! netif_carrier_ok(tp
->dev
) &&
6750 (mac_stat
& (MAC_STATUS_PCS_SYNCED
|
6751 MAC_STATUS_SIGNAL_DET
))) {
6755 if (!tp
->serdes_counter
) {
6758 ~MAC_MODE_PORT_MODE_MASK
));
6760 tw32_f(MAC_MODE
, tp
->mac_mode
);
6763 tg3_setup_phy(tp
, 0);
6765 } else if (tp
->tg3_flags2
& TG3_FLG2_MII_SERDES
)
6766 tg3_serdes_parallel_detect(tp
);
6768 tp
->timer_counter
= tp
->timer_multiplier
;
6771 /* Heartbeat is only sent once every 2 seconds.
6773 * The heartbeat is to tell the ASF firmware that the host
6774 * driver is still alive. In the event that the OS crashes,
6775 * ASF needs to reset the hardware to free up the FIFO space
6776 * that may be filled with rx packets destined for the host.
6777 * If the FIFO is full, ASF will no longer function properly.
6779 * Unintended resets have been reported on real time kernels
6780 * where the timer doesn't run on time. Netpoll will also have
6783 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
6784 * to check the ring condition when the heartbeat is expiring
6785 * before doing the reset. This will prevent most unintended
6788 if (!--tp
->asf_counter
) {
6789 if (tp
->tg3_flags
& TG3_FLAG_ENABLE_ASF
) {
6792 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_MBOX
,
6793 FWCMD_NICDRV_ALIVE3
);
6794 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_LEN_MBOX
, 4);
6795 /* 5 seconds timeout */
6796 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_DATA_MBOX
, 5);
6797 val
= tr32(GRC_RX_CPU_EVENT
);
6799 tw32(GRC_RX_CPU_EVENT
, val
);
6801 tp
->asf_counter
= tp
->asf_multiplier
;
6804 spin_unlock(&tp
->lock
);
6807 tp
->timer
.expires
= jiffies
+ tp
->timer_offset
;
6808 add_timer(&tp
->timer
);
6811 static int tg3_request_irq(struct tg3
*tp
)
6813 irqreturn_t (*fn
)(int, void *, struct pt_regs
*);
6814 unsigned long flags
;
6815 struct net_device
*dev
= tp
->dev
;
6817 if (tp
->tg3_flags2
& TG3_FLG2_USING_MSI
) {
6819 if (tp
->tg3_flags2
& TG3_FLG2_1SHOT_MSI
)
6821 flags
= IRQF_SAMPLE_RANDOM
;
6824 if (tp
->tg3_flags
& TG3_FLAG_TAGGED_STATUS
)
6825 fn
= tg3_interrupt_tagged
;
6826 flags
= IRQF_SHARED
| IRQF_SAMPLE_RANDOM
;
6828 return (request_irq(tp
->pdev
->irq
, fn
, flags
, dev
->name
, dev
));
6831 static int tg3_test_interrupt(struct tg3
*tp
)
6833 struct net_device
*dev
= tp
->dev
;
6837 if (!netif_running(dev
))
6840 tg3_disable_ints(tp
);
6842 free_irq(tp
->pdev
->irq
, dev
);
6844 err
= request_irq(tp
->pdev
->irq
, tg3_test_isr
,
6845 IRQF_SHARED
| IRQF_SAMPLE_RANDOM
, dev
->name
, dev
);
6849 tp
->hw_status
->status
&= ~SD_STATUS_UPDATED
;
6850 tg3_enable_ints(tp
);
6852 tw32_f(HOSTCC_MODE
, tp
->coalesce_mode
| HOSTCC_MODE_ENABLE
|
6855 for (i
= 0; i
< 5; i
++) {
6856 int_mbox
= tr32_mailbox(MAILBOX_INTERRUPT_0
+
6863 tg3_disable_ints(tp
);
6865 free_irq(tp
->pdev
->irq
, dev
);
6867 err
= tg3_request_irq(tp
);
6878 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
6879 * successfully restored
6881 static int tg3_test_msi(struct tg3
*tp
)
6883 struct net_device
*dev
= tp
->dev
;
6887 if (!(tp
->tg3_flags2
& TG3_FLG2_USING_MSI
))
6890 /* Turn off SERR reporting in case MSI terminates with Master
6893 pci_read_config_word(tp
->pdev
, PCI_COMMAND
, &pci_cmd
);
6894 pci_write_config_word(tp
->pdev
, PCI_COMMAND
,
6895 pci_cmd
& ~PCI_COMMAND_SERR
);
6897 err
= tg3_test_interrupt(tp
);
6899 pci_write_config_word(tp
->pdev
, PCI_COMMAND
, pci_cmd
);
6904 /* other failures */
6908 /* MSI test failed, go back to INTx mode */
6909 printk(KERN_WARNING PFX
"%s: No interrupt was generated using MSI, "
6910 "switching to INTx mode. Please report this failure to "
6911 "the PCI maintainer and include system chipset information.\n",
6914 free_irq(tp
->pdev
->irq
, dev
);
6915 pci_disable_msi(tp
->pdev
);
6917 tp
->tg3_flags2
&= ~TG3_FLG2_USING_MSI
;
6919 err
= tg3_request_irq(tp
);
6923 /* Need to reset the chip because the MSI cycle may have terminated
6924 * with Master Abort.
6926 tg3_full_lock(tp
, 1);
6928 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
6929 err
= tg3_init_hw(tp
, 1);
6931 tg3_full_unlock(tp
);
6934 free_irq(tp
->pdev
->irq
, dev
);
6939 static int tg3_open(struct net_device
*dev
)
6941 struct tg3
*tp
= netdev_priv(dev
);
6944 tg3_full_lock(tp
, 0);
6946 err
= tg3_set_power_state(tp
, PCI_D0
);
6950 tg3_disable_ints(tp
);
6951 tp
->tg3_flags
&= ~TG3_FLAG_INIT_COMPLETE
;
6953 tg3_full_unlock(tp
);
6955 /* The placement of this call is tied
6956 * to the setup and use of Host TX descriptors.
6958 err
= tg3_alloc_consistent(tp
);
6962 if ((tp
->tg3_flags2
& TG3_FLG2_5750_PLUS
) &&
6963 (GET_CHIP_REV(tp
->pci_chip_rev_id
) != CHIPREV_5750_AX
) &&
6964 (GET_CHIP_REV(tp
->pci_chip_rev_id
) != CHIPREV_5750_BX
) &&
6965 !((GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5714
) &&
6966 (tp
->pdev_peer
== tp
->pdev
))) {
6967 /* All MSI supporting chips should support tagged
6968 * status. Assert that this is the case.
6970 if (!(tp
->tg3_flags
& TG3_FLAG_TAGGED_STATUS
)) {
6971 printk(KERN_WARNING PFX
"%s: MSI without TAGGED? "
6972 "Not using MSI.\n", tp
->dev
->name
);
6973 } else if (pci_enable_msi(tp
->pdev
) == 0) {
6976 msi_mode
= tr32(MSGINT_MODE
);
6977 tw32(MSGINT_MODE
, msi_mode
| MSGINT_MODE_ENABLE
);
6978 tp
->tg3_flags2
|= TG3_FLG2_USING_MSI
;
6981 err
= tg3_request_irq(tp
);
6984 if (tp
->tg3_flags2
& TG3_FLG2_USING_MSI
) {
6985 pci_disable_msi(tp
->pdev
);
6986 tp
->tg3_flags2
&= ~TG3_FLG2_USING_MSI
;
6988 tg3_free_consistent(tp
);
6992 tg3_full_lock(tp
, 0);
6994 err
= tg3_init_hw(tp
, 1);
6996 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
6999 if (tp
->tg3_flags
& TG3_FLAG_TAGGED_STATUS
)
7000 tp
->timer_offset
= HZ
;
7002 tp
->timer_offset
= HZ
/ 10;
7004 BUG_ON(tp
->timer_offset
> HZ
);
7005 tp
->timer_counter
= tp
->timer_multiplier
=
7006 (HZ
/ tp
->timer_offset
);
7007 tp
->asf_counter
= tp
->asf_multiplier
=
7008 ((HZ
/ tp
->timer_offset
) * 2);
7010 init_timer(&tp
->timer
);
7011 tp
->timer
.expires
= jiffies
+ tp
->timer_offset
;
7012 tp
->timer
.data
= (unsigned long) tp
;
7013 tp
->timer
.function
= tg3_timer
;
7016 tg3_full_unlock(tp
);
7019 free_irq(tp
->pdev
->irq
, dev
);
7020 if (tp
->tg3_flags2
& TG3_FLG2_USING_MSI
) {
7021 pci_disable_msi(tp
->pdev
);
7022 tp
->tg3_flags2
&= ~TG3_FLG2_USING_MSI
;
7024 tg3_free_consistent(tp
);
7028 if (tp
->tg3_flags2
& TG3_FLG2_USING_MSI
) {
7029 err
= tg3_test_msi(tp
);
7032 tg3_full_lock(tp
, 0);
7034 if (tp
->tg3_flags2
& TG3_FLG2_USING_MSI
) {
7035 pci_disable_msi(tp
->pdev
);
7036 tp
->tg3_flags2
&= ~TG3_FLG2_USING_MSI
;
7038 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
7040 tg3_free_consistent(tp
);
7042 tg3_full_unlock(tp
);
7047 if (tp
->tg3_flags2
& TG3_FLG2_USING_MSI
) {
7048 if (tp
->tg3_flags2
& TG3_FLG2_1SHOT_MSI
) {
7049 u32 val
= tr32(PCIE_TRANSACTION_CFG
);
7051 tw32(PCIE_TRANSACTION_CFG
,
7052 val
| PCIE_TRANS_CFG_1SHOT_MSI
);
7057 tg3_full_lock(tp
, 0);
7059 add_timer(&tp
->timer
);
7060 tp
->tg3_flags
|= TG3_FLAG_INIT_COMPLETE
;
7061 tg3_enable_ints(tp
);
7063 tg3_full_unlock(tp
);
7065 netif_start_queue(dev
);
7071 /*static*/ void tg3_dump_state(struct tg3
*tp
)
7073 u32 val32
, val32_2
, val32_3
, val32_4
, val32_5
;
7077 pci_read_config_word(tp
->pdev
, PCI_STATUS
, &val16
);
7078 pci_read_config_dword(tp
->pdev
, TG3PCI_PCISTATE
, &val32
);
7079 printk("DEBUG: PCI status [%04x] TG3PCI state[%08x]\n",
7083 printk("DEBUG: MAC_MODE[%08x] MAC_STATUS[%08x]\n",
7084 tr32(MAC_MODE
), tr32(MAC_STATUS
));
7085 printk(" MAC_EVENT[%08x] MAC_LED_CTRL[%08x]\n",
7086 tr32(MAC_EVENT
), tr32(MAC_LED_CTRL
));
7087 printk("DEBUG: MAC_TX_MODE[%08x] MAC_TX_STATUS[%08x]\n",
7088 tr32(MAC_TX_MODE
), tr32(MAC_TX_STATUS
));
7089 printk(" MAC_RX_MODE[%08x] MAC_RX_STATUS[%08x]\n",
7090 tr32(MAC_RX_MODE
), tr32(MAC_RX_STATUS
));
7092 /* Send data initiator control block */
7093 printk("DEBUG: SNDDATAI_MODE[%08x] SNDDATAI_STATUS[%08x]\n",
7094 tr32(SNDDATAI_MODE
), tr32(SNDDATAI_STATUS
));
7095 printk(" SNDDATAI_STATSCTRL[%08x]\n",
7096 tr32(SNDDATAI_STATSCTRL
));
7098 /* Send data completion control block */
7099 printk("DEBUG: SNDDATAC_MODE[%08x]\n", tr32(SNDDATAC_MODE
));
7101 /* Send BD ring selector block */
7102 printk("DEBUG: SNDBDS_MODE[%08x] SNDBDS_STATUS[%08x]\n",
7103 tr32(SNDBDS_MODE
), tr32(SNDBDS_STATUS
));
7105 /* Send BD initiator control block */
7106 printk("DEBUG: SNDBDI_MODE[%08x] SNDBDI_STATUS[%08x]\n",
7107 tr32(SNDBDI_MODE
), tr32(SNDBDI_STATUS
));
7109 /* Send BD completion control block */
7110 printk("DEBUG: SNDBDC_MODE[%08x]\n", tr32(SNDBDC_MODE
));
7112 /* Receive list placement control block */
7113 printk("DEBUG: RCVLPC_MODE[%08x] RCVLPC_STATUS[%08x]\n",
7114 tr32(RCVLPC_MODE
), tr32(RCVLPC_STATUS
));
7115 printk(" RCVLPC_STATSCTRL[%08x]\n",
7116 tr32(RCVLPC_STATSCTRL
));
7118 /* Receive data and receive BD initiator control block */
7119 printk("DEBUG: RCVDBDI_MODE[%08x] RCVDBDI_STATUS[%08x]\n",
7120 tr32(RCVDBDI_MODE
), tr32(RCVDBDI_STATUS
));
7122 /* Receive data completion control block */
7123 printk("DEBUG: RCVDCC_MODE[%08x]\n",
7126 /* Receive BD initiator control block */
7127 printk("DEBUG: RCVBDI_MODE[%08x] RCVBDI_STATUS[%08x]\n",
7128 tr32(RCVBDI_MODE
), tr32(RCVBDI_STATUS
));
7130 /* Receive BD completion control block */
7131 printk("DEBUG: RCVCC_MODE[%08x] RCVCC_STATUS[%08x]\n",
7132 tr32(RCVCC_MODE
), tr32(RCVCC_STATUS
));
7134 /* Receive list selector control block */
7135 printk("DEBUG: RCVLSC_MODE[%08x] RCVLSC_STATUS[%08x]\n",
7136 tr32(RCVLSC_MODE
), tr32(RCVLSC_STATUS
));
7138 /* Mbuf cluster free block */
7139 printk("DEBUG: MBFREE_MODE[%08x] MBFREE_STATUS[%08x]\n",
7140 tr32(MBFREE_MODE
), tr32(MBFREE_STATUS
));
7142 /* Host coalescing control block */
7143 printk("DEBUG: HOSTCC_MODE[%08x] HOSTCC_STATUS[%08x]\n",
7144 tr32(HOSTCC_MODE
), tr32(HOSTCC_STATUS
));
7145 printk("DEBUG: HOSTCC_STATS_BLK_HOST_ADDR[%08x%08x]\n",
7146 tr32(HOSTCC_STATS_BLK_HOST_ADDR
+ TG3_64BIT_REG_HIGH
),
7147 tr32(HOSTCC_STATS_BLK_HOST_ADDR
+ TG3_64BIT_REG_LOW
));
7148 printk("DEBUG: HOSTCC_STATUS_BLK_HOST_ADDR[%08x%08x]\n",
7149 tr32(HOSTCC_STATUS_BLK_HOST_ADDR
+ TG3_64BIT_REG_HIGH
),
7150 tr32(HOSTCC_STATUS_BLK_HOST_ADDR
+ TG3_64BIT_REG_LOW
));
7151 printk("DEBUG: HOSTCC_STATS_BLK_NIC_ADDR[%08x]\n",
7152 tr32(HOSTCC_STATS_BLK_NIC_ADDR
));
7153 printk("DEBUG: HOSTCC_STATUS_BLK_NIC_ADDR[%08x]\n",
7154 tr32(HOSTCC_STATUS_BLK_NIC_ADDR
));
7156 /* Memory arbiter control block */
7157 printk("DEBUG: MEMARB_MODE[%08x] MEMARB_STATUS[%08x]\n",
7158 tr32(MEMARB_MODE
), tr32(MEMARB_STATUS
));
7160 /* Buffer manager control block */
7161 printk("DEBUG: BUFMGR_MODE[%08x] BUFMGR_STATUS[%08x]\n",
7162 tr32(BUFMGR_MODE
), tr32(BUFMGR_STATUS
));
7163 printk("DEBUG: BUFMGR_MB_POOL_ADDR[%08x] BUFMGR_MB_POOL_SIZE[%08x]\n",
7164 tr32(BUFMGR_MB_POOL_ADDR
), tr32(BUFMGR_MB_POOL_SIZE
));
7165 printk("DEBUG: BUFMGR_DMA_DESC_POOL_ADDR[%08x] "
7166 "BUFMGR_DMA_DESC_POOL_SIZE[%08x]\n",
7167 tr32(BUFMGR_DMA_DESC_POOL_ADDR
),
7168 tr32(BUFMGR_DMA_DESC_POOL_SIZE
));
7170 /* Read DMA control block */
7171 printk("DEBUG: RDMAC_MODE[%08x] RDMAC_STATUS[%08x]\n",
7172 tr32(RDMAC_MODE
), tr32(RDMAC_STATUS
));
7174 /* Write DMA control block */
7175 printk("DEBUG: WDMAC_MODE[%08x] WDMAC_STATUS[%08x]\n",
7176 tr32(WDMAC_MODE
), tr32(WDMAC_STATUS
));
7178 /* DMA completion block */
7179 printk("DEBUG: DMAC_MODE[%08x]\n",
7183 printk("DEBUG: GRC_MODE[%08x] GRC_MISC_CFG[%08x]\n",
7184 tr32(GRC_MODE
), tr32(GRC_MISC_CFG
));
7185 printk("DEBUG: GRC_LOCAL_CTRL[%08x]\n",
7186 tr32(GRC_LOCAL_CTRL
));
7189 printk("DEBUG: RCVDBDI_JUMBO_BD[%08x%08x:%08x:%08x]\n",
7190 tr32(RCVDBDI_JUMBO_BD
+ 0x0),
7191 tr32(RCVDBDI_JUMBO_BD
+ 0x4),
7192 tr32(RCVDBDI_JUMBO_BD
+ 0x8),
7193 tr32(RCVDBDI_JUMBO_BD
+ 0xc));
7194 printk("DEBUG: RCVDBDI_STD_BD[%08x%08x:%08x:%08x]\n",
7195 tr32(RCVDBDI_STD_BD
+ 0x0),
7196 tr32(RCVDBDI_STD_BD
+ 0x4),
7197 tr32(RCVDBDI_STD_BD
+ 0x8),
7198 tr32(RCVDBDI_STD_BD
+ 0xc));
7199 printk("DEBUG: RCVDBDI_MINI_BD[%08x%08x:%08x:%08x]\n",
7200 tr32(RCVDBDI_MINI_BD
+ 0x0),
7201 tr32(RCVDBDI_MINI_BD
+ 0x4),
7202 tr32(RCVDBDI_MINI_BD
+ 0x8),
7203 tr32(RCVDBDI_MINI_BD
+ 0xc));
7205 tg3_read_mem(tp
, NIC_SRAM_SEND_RCB
+ 0x0, &val32
);
7206 tg3_read_mem(tp
, NIC_SRAM_SEND_RCB
+ 0x4, &val32_2
);
7207 tg3_read_mem(tp
, NIC_SRAM_SEND_RCB
+ 0x8, &val32_3
);
7208 tg3_read_mem(tp
, NIC_SRAM_SEND_RCB
+ 0xc, &val32_4
);
7209 printk("DEBUG: SRAM_SEND_RCB_0[%08x%08x:%08x:%08x]\n",
7210 val32
, val32_2
, val32_3
, val32_4
);
7212 tg3_read_mem(tp
, NIC_SRAM_RCV_RET_RCB
+ 0x0, &val32
);
7213 tg3_read_mem(tp
, NIC_SRAM_RCV_RET_RCB
+ 0x4, &val32_2
);
7214 tg3_read_mem(tp
, NIC_SRAM_RCV_RET_RCB
+ 0x8, &val32_3
);
7215 tg3_read_mem(tp
, NIC_SRAM_RCV_RET_RCB
+ 0xc, &val32_4
);
7216 printk("DEBUG: SRAM_RCV_RET_RCB_0[%08x%08x:%08x:%08x]\n",
7217 val32
, val32_2
, val32_3
, val32_4
);
7219 tg3_read_mem(tp
, NIC_SRAM_STATUS_BLK
+ 0x0, &val32
);
7220 tg3_read_mem(tp
, NIC_SRAM_STATUS_BLK
+ 0x4, &val32_2
);
7221 tg3_read_mem(tp
, NIC_SRAM_STATUS_BLK
+ 0x8, &val32_3
);
7222 tg3_read_mem(tp
, NIC_SRAM_STATUS_BLK
+ 0xc, &val32_4
);
7223 tg3_read_mem(tp
, NIC_SRAM_STATUS_BLK
+ 0x10, &val32_5
);
7224 printk("DEBUG: SRAM_STATUS_BLK[%08x:%08x:%08x:%08x:%08x]\n",
7225 val32
, val32_2
, val32_3
, val32_4
, val32_5
);
7227 /* SW status block */
7228 printk("DEBUG: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
7229 tp
->hw_status
->status
,
7230 tp
->hw_status
->status_tag
,
7231 tp
->hw_status
->rx_jumbo_consumer
,
7232 tp
->hw_status
->rx_consumer
,
7233 tp
->hw_status
->rx_mini_consumer
,
7234 tp
->hw_status
->idx
[0].rx_producer
,
7235 tp
->hw_status
->idx
[0].tx_consumer
);
7237 /* SW statistics block */
7238 printk("DEBUG: Host statistics block [%08x:%08x:%08x:%08x]\n",
7239 ((u32
*)tp
->hw_stats
)[0],
7240 ((u32
*)tp
->hw_stats
)[1],
7241 ((u32
*)tp
->hw_stats
)[2],
7242 ((u32
*)tp
->hw_stats
)[3]);
7245 printk("DEBUG: SNDHOST_PROD[%08x%08x] SNDNIC_PROD[%08x%08x]\n",
7246 tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0
+ 0x0),
7247 tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0
+ 0x4),
7248 tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0
+ 0x0),
7249 tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0
+ 0x4));
7251 /* NIC side send descriptors. */
7252 for (i
= 0; i
< 6; i
++) {
7255 txd
= tp
->regs
+ NIC_SRAM_WIN_BASE
+ NIC_SRAM_TX_BUFFER_DESC
7256 + (i
* sizeof(struct tg3_tx_buffer_desc
));
7257 printk("DEBUG: NIC TXD(%d)[%08x:%08x:%08x:%08x]\n",
7259 readl(txd
+ 0x0), readl(txd
+ 0x4),
7260 readl(txd
+ 0x8), readl(txd
+ 0xc));
7263 /* NIC side RX descriptors. */
7264 for (i
= 0; i
< 6; i
++) {
7267 rxd
= tp
->regs
+ NIC_SRAM_WIN_BASE
+ NIC_SRAM_RX_BUFFER_DESC
7268 + (i
* sizeof(struct tg3_rx_buffer_desc
));
7269 printk("DEBUG: NIC RXD_STD(%d)[0][%08x:%08x:%08x:%08x]\n",
7271 readl(rxd
+ 0x0), readl(rxd
+ 0x4),
7272 readl(rxd
+ 0x8), readl(rxd
+ 0xc));
7273 rxd
+= (4 * sizeof(u32
));
7274 printk("DEBUG: NIC RXD_STD(%d)[1][%08x:%08x:%08x:%08x]\n",
7276 readl(rxd
+ 0x0), readl(rxd
+ 0x4),
7277 readl(rxd
+ 0x8), readl(rxd
+ 0xc));
7280 for (i
= 0; i
< 6; i
++) {
7283 rxd
= tp
->regs
+ NIC_SRAM_WIN_BASE
+ NIC_SRAM_RX_JUMBO_BUFFER_DESC
7284 + (i
* sizeof(struct tg3_rx_buffer_desc
));
7285 printk("DEBUG: NIC RXD_JUMBO(%d)[0][%08x:%08x:%08x:%08x]\n",
7287 readl(rxd
+ 0x0), readl(rxd
+ 0x4),
7288 readl(rxd
+ 0x8), readl(rxd
+ 0xc));
7289 rxd
+= (4 * sizeof(u32
));
7290 printk("DEBUG: NIC RXD_JUMBO(%d)[1][%08x:%08x:%08x:%08x]\n",
7292 readl(rxd
+ 0x0), readl(rxd
+ 0x4),
7293 readl(rxd
+ 0x8), readl(rxd
+ 0xc));
7298 static struct net_device_stats
*tg3_get_stats(struct net_device
*);
7299 static struct tg3_ethtool_stats
*tg3_get_estats(struct tg3
*);
7301 static int tg3_close(struct net_device
*dev
)
7303 struct tg3
*tp
= netdev_priv(dev
);
7305 /* Calling flush_scheduled_work() may deadlock because
7306 * linkwatch_event() may be on the workqueue and it will try to get
7307 * the rtnl_lock which we are holding.
7309 while (tp
->tg3_flags
& TG3_FLAG_IN_RESET_TASK
)
7312 netif_stop_queue(dev
);
7314 del_timer_sync(&tp
->timer
);
7316 tg3_full_lock(tp
, 1);
7321 tg3_disable_ints(tp
);
7323 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
7326 ~(TG3_FLAG_INIT_COMPLETE
|
7327 TG3_FLAG_GOT_SERDES_FLOWCTL
);
7329 tg3_full_unlock(tp
);
7331 free_irq(tp
->pdev
->irq
, dev
);
7332 if (tp
->tg3_flags2
& TG3_FLG2_USING_MSI
) {
7333 pci_disable_msi(tp
->pdev
);
7334 tp
->tg3_flags2
&= ~TG3_FLG2_USING_MSI
;
7337 memcpy(&tp
->net_stats_prev
, tg3_get_stats(tp
->dev
),
7338 sizeof(tp
->net_stats_prev
));
7339 memcpy(&tp
->estats_prev
, tg3_get_estats(tp
),
7340 sizeof(tp
->estats_prev
));
7342 tg3_free_consistent(tp
);
7344 tg3_set_power_state(tp
, PCI_D3hot
);
7346 netif_carrier_off(tp
->dev
);
7351 static inline unsigned long get_stat64(tg3_stat64_t
*val
)
7355 #if (BITS_PER_LONG == 32)
7358 ret
= ((u64
)val
->high
<< 32) | ((u64
)val
->low
);
7363 static unsigned long calc_crc_errors(struct tg3
*tp
)
7365 struct tg3_hw_stats
*hw_stats
= tp
->hw_stats
;
7367 if (!(tp
->tg3_flags2
& TG3_FLG2_PHY_SERDES
) &&
7368 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
7369 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
)) {
7372 spin_lock_bh(&tp
->lock
);
7373 if (!tg3_readphy(tp
, 0x1e, &val
)) {
7374 tg3_writephy(tp
, 0x1e, val
| 0x8000);
7375 tg3_readphy(tp
, 0x14, &val
);
7378 spin_unlock_bh(&tp
->lock
);
7380 tp
->phy_crc_errors
+= val
;
7382 return tp
->phy_crc_errors
;
7385 return get_stat64(&hw_stats
->rx_fcs_errors
);
7388 #define ESTAT_ADD(member) \
7389 estats->member = old_estats->member + \
7390 get_stat64(&hw_stats->member)
7392 static struct tg3_ethtool_stats
*tg3_get_estats(struct tg3
*tp
)
7394 struct tg3_ethtool_stats
*estats
= &tp
->estats
;
7395 struct tg3_ethtool_stats
*old_estats
= &tp
->estats_prev
;
7396 struct tg3_hw_stats
*hw_stats
= tp
->hw_stats
;
7401 ESTAT_ADD(rx_octets
);
7402 ESTAT_ADD(rx_fragments
);
7403 ESTAT_ADD(rx_ucast_packets
);
7404 ESTAT_ADD(rx_mcast_packets
);
7405 ESTAT_ADD(rx_bcast_packets
);
7406 ESTAT_ADD(rx_fcs_errors
);
7407 ESTAT_ADD(rx_align_errors
);
7408 ESTAT_ADD(rx_xon_pause_rcvd
);
7409 ESTAT_ADD(rx_xoff_pause_rcvd
);
7410 ESTAT_ADD(rx_mac_ctrl_rcvd
);
7411 ESTAT_ADD(rx_xoff_entered
);
7412 ESTAT_ADD(rx_frame_too_long_errors
);
7413 ESTAT_ADD(rx_jabbers
);
7414 ESTAT_ADD(rx_undersize_packets
);
7415 ESTAT_ADD(rx_in_length_errors
);
7416 ESTAT_ADD(rx_out_length_errors
);
7417 ESTAT_ADD(rx_64_or_less_octet_packets
);
7418 ESTAT_ADD(rx_65_to_127_octet_packets
);
7419 ESTAT_ADD(rx_128_to_255_octet_packets
);
7420 ESTAT_ADD(rx_256_to_511_octet_packets
);
7421 ESTAT_ADD(rx_512_to_1023_octet_packets
);
7422 ESTAT_ADD(rx_1024_to_1522_octet_packets
);
7423 ESTAT_ADD(rx_1523_to_2047_octet_packets
);
7424 ESTAT_ADD(rx_2048_to_4095_octet_packets
);
7425 ESTAT_ADD(rx_4096_to_8191_octet_packets
);
7426 ESTAT_ADD(rx_8192_to_9022_octet_packets
);
7428 ESTAT_ADD(tx_octets
);
7429 ESTAT_ADD(tx_collisions
);
7430 ESTAT_ADD(tx_xon_sent
);
7431 ESTAT_ADD(tx_xoff_sent
);
7432 ESTAT_ADD(tx_flow_control
);
7433 ESTAT_ADD(tx_mac_errors
);
7434 ESTAT_ADD(tx_single_collisions
);
7435 ESTAT_ADD(tx_mult_collisions
);
7436 ESTAT_ADD(tx_deferred
);
7437 ESTAT_ADD(tx_excessive_collisions
);
7438 ESTAT_ADD(tx_late_collisions
);
7439 ESTAT_ADD(tx_collide_2times
);
7440 ESTAT_ADD(tx_collide_3times
);
7441 ESTAT_ADD(tx_collide_4times
);
7442 ESTAT_ADD(tx_collide_5times
);
7443 ESTAT_ADD(tx_collide_6times
);
7444 ESTAT_ADD(tx_collide_7times
);
7445 ESTAT_ADD(tx_collide_8times
);
7446 ESTAT_ADD(tx_collide_9times
);
7447 ESTAT_ADD(tx_collide_10times
);
7448 ESTAT_ADD(tx_collide_11times
);
7449 ESTAT_ADD(tx_collide_12times
);
7450 ESTAT_ADD(tx_collide_13times
);
7451 ESTAT_ADD(tx_collide_14times
);
7452 ESTAT_ADD(tx_collide_15times
);
7453 ESTAT_ADD(tx_ucast_packets
);
7454 ESTAT_ADD(tx_mcast_packets
);
7455 ESTAT_ADD(tx_bcast_packets
);
7456 ESTAT_ADD(tx_carrier_sense_errors
);
7457 ESTAT_ADD(tx_discards
);
7458 ESTAT_ADD(tx_errors
);
7460 ESTAT_ADD(dma_writeq_full
);
7461 ESTAT_ADD(dma_write_prioq_full
);
7462 ESTAT_ADD(rxbds_empty
);
7463 ESTAT_ADD(rx_discards
);
7464 ESTAT_ADD(rx_errors
);
7465 ESTAT_ADD(rx_threshold_hit
);
7467 ESTAT_ADD(dma_readq_full
);
7468 ESTAT_ADD(dma_read_prioq_full
);
7469 ESTAT_ADD(tx_comp_queue_full
);
7471 ESTAT_ADD(ring_set_send_prod_index
);
7472 ESTAT_ADD(ring_status_update
);
7473 ESTAT_ADD(nic_irqs
);
7474 ESTAT_ADD(nic_avoided_irqs
);
7475 ESTAT_ADD(nic_tx_threshold_hit
);
7480 static struct net_device_stats
*tg3_get_stats(struct net_device
*dev
)
7482 struct tg3
*tp
= netdev_priv(dev
);
7483 struct net_device_stats
*stats
= &tp
->net_stats
;
7484 struct net_device_stats
*old_stats
= &tp
->net_stats_prev
;
7485 struct tg3_hw_stats
*hw_stats
= tp
->hw_stats
;
7490 stats
->rx_packets
= old_stats
->rx_packets
+
7491 get_stat64(&hw_stats
->rx_ucast_packets
) +
7492 get_stat64(&hw_stats
->rx_mcast_packets
) +
7493 get_stat64(&hw_stats
->rx_bcast_packets
);
7495 stats
->tx_packets
= old_stats
->tx_packets
+
7496 get_stat64(&hw_stats
->tx_ucast_packets
) +
7497 get_stat64(&hw_stats
->tx_mcast_packets
) +
7498 get_stat64(&hw_stats
->tx_bcast_packets
);
7500 stats
->rx_bytes
= old_stats
->rx_bytes
+
7501 get_stat64(&hw_stats
->rx_octets
);
7502 stats
->tx_bytes
= old_stats
->tx_bytes
+
7503 get_stat64(&hw_stats
->tx_octets
);
7505 stats
->rx_errors
= old_stats
->rx_errors
+
7506 get_stat64(&hw_stats
->rx_errors
);
7507 stats
->tx_errors
= old_stats
->tx_errors
+
7508 get_stat64(&hw_stats
->tx_errors
) +
7509 get_stat64(&hw_stats
->tx_mac_errors
) +
7510 get_stat64(&hw_stats
->tx_carrier_sense_errors
) +
7511 get_stat64(&hw_stats
->tx_discards
);
7513 stats
->multicast
= old_stats
->multicast
+
7514 get_stat64(&hw_stats
->rx_mcast_packets
);
7515 stats
->collisions
= old_stats
->collisions
+
7516 get_stat64(&hw_stats
->tx_collisions
);
7518 stats
->rx_length_errors
= old_stats
->rx_length_errors
+
7519 get_stat64(&hw_stats
->rx_frame_too_long_errors
) +
7520 get_stat64(&hw_stats
->rx_undersize_packets
);
7522 stats
->rx_over_errors
= old_stats
->rx_over_errors
+
7523 get_stat64(&hw_stats
->rxbds_empty
);
7524 stats
->rx_frame_errors
= old_stats
->rx_frame_errors
+
7525 get_stat64(&hw_stats
->rx_align_errors
);
7526 stats
->tx_aborted_errors
= old_stats
->tx_aborted_errors
+
7527 get_stat64(&hw_stats
->tx_discards
);
7528 stats
->tx_carrier_errors
= old_stats
->tx_carrier_errors
+
7529 get_stat64(&hw_stats
->tx_carrier_sense_errors
);
7531 stats
->rx_crc_errors
= old_stats
->rx_crc_errors
+
7532 calc_crc_errors(tp
);
7534 stats
->rx_missed_errors
= old_stats
->rx_missed_errors
+
7535 get_stat64(&hw_stats
->rx_discards
);
7540 static inline u32
calc_crc(unsigned char *buf
, int len
)
7548 for (j
= 0; j
< len
; j
++) {
7551 for (k
= 0; k
< 8; k
++) {
7565 static void tg3_set_multi(struct tg3
*tp
, unsigned int accept_all
)
7567 /* accept or reject all multicast frames */
7568 tw32(MAC_HASH_REG_0
, accept_all
? 0xffffffff : 0);
7569 tw32(MAC_HASH_REG_1
, accept_all
? 0xffffffff : 0);
7570 tw32(MAC_HASH_REG_2
, accept_all
? 0xffffffff : 0);
7571 tw32(MAC_HASH_REG_3
, accept_all
? 0xffffffff : 0);
7574 static void __tg3_set_rx_mode(struct net_device
*dev
)
7576 struct tg3
*tp
= netdev_priv(dev
);
7579 rx_mode
= tp
->rx_mode
& ~(RX_MODE_PROMISC
|
7580 RX_MODE_KEEP_VLAN_TAG
);
7582 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
7585 #if TG3_VLAN_TAG_USED
7587 !(tp
->tg3_flags
& TG3_FLAG_ENABLE_ASF
))
7588 rx_mode
|= RX_MODE_KEEP_VLAN_TAG
;
7590 /* By definition, VLAN is disabled always in this
7593 if (!(tp
->tg3_flags
& TG3_FLAG_ENABLE_ASF
))
7594 rx_mode
|= RX_MODE_KEEP_VLAN_TAG
;
7597 if (dev
->flags
& IFF_PROMISC
) {
7598 /* Promiscuous mode. */
7599 rx_mode
|= RX_MODE_PROMISC
;
7600 } else if (dev
->flags
& IFF_ALLMULTI
) {
7601 /* Accept all multicast. */
7602 tg3_set_multi (tp
, 1);
7603 } else if (dev
->mc_count
< 1) {
7604 /* Reject all multicast. */
7605 tg3_set_multi (tp
, 0);
7607 /* Accept one or more multicast(s). */
7608 struct dev_mc_list
*mclist
;
7610 u32 mc_filter
[4] = { 0, };
7615 for (i
= 0, mclist
= dev
->mc_list
; mclist
&& i
< dev
->mc_count
;
7616 i
++, mclist
= mclist
->next
) {
7618 crc
= calc_crc (mclist
->dmi_addr
, ETH_ALEN
);
7620 regidx
= (bit
& 0x60) >> 5;
7622 mc_filter
[regidx
] |= (1 << bit
);
7625 tw32(MAC_HASH_REG_0
, mc_filter
[0]);
7626 tw32(MAC_HASH_REG_1
, mc_filter
[1]);
7627 tw32(MAC_HASH_REG_2
, mc_filter
[2]);
7628 tw32(MAC_HASH_REG_3
, mc_filter
[3]);
7631 if (rx_mode
!= tp
->rx_mode
) {
7632 tp
->rx_mode
= rx_mode
;
7633 tw32_f(MAC_RX_MODE
, rx_mode
);
7638 static void tg3_set_rx_mode(struct net_device
*dev
)
7640 struct tg3
*tp
= netdev_priv(dev
);
7642 if (!netif_running(dev
))
7645 tg3_full_lock(tp
, 0);
7646 __tg3_set_rx_mode(dev
);
7647 tg3_full_unlock(tp
);
7650 #define TG3_REGDUMP_LEN (32 * 1024)
7652 static int tg3_get_regs_len(struct net_device
*dev
)
7654 return TG3_REGDUMP_LEN
;
7657 static void tg3_get_regs(struct net_device
*dev
,
7658 struct ethtool_regs
*regs
, void *_p
)
7661 struct tg3
*tp
= netdev_priv(dev
);
7667 memset(p
, 0, TG3_REGDUMP_LEN
);
7669 if (tp
->link_config
.phy_is_low_power
)
7672 tg3_full_lock(tp
, 0);
7674 #define __GET_REG32(reg) (*(p)++ = tr32(reg))
7675 #define GET_REG32_LOOP(base,len) \
7676 do { p = (u32 *)(orig_p + (base)); \
7677 for (i = 0; i < len; i += 4) \
7678 __GET_REG32((base) + i); \
7680 #define GET_REG32_1(reg) \
7681 do { p = (u32 *)(orig_p + (reg)); \
7682 __GET_REG32((reg)); \
7685 GET_REG32_LOOP(TG3PCI_VENDOR
, 0xb0);
7686 GET_REG32_LOOP(MAILBOX_INTERRUPT_0
, 0x200);
7687 GET_REG32_LOOP(MAC_MODE
, 0x4f0);
7688 GET_REG32_LOOP(SNDDATAI_MODE
, 0xe0);
7689 GET_REG32_1(SNDDATAC_MODE
);
7690 GET_REG32_LOOP(SNDBDS_MODE
, 0x80);
7691 GET_REG32_LOOP(SNDBDI_MODE
, 0x48);
7692 GET_REG32_1(SNDBDC_MODE
);
7693 GET_REG32_LOOP(RCVLPC_MODE
, 0x20);
7694 GET_REG32_LOOP(RCVLPC_SELLST_BASE
, 0x15c);
7695 GET_REG32_LOOP(RCVDBDI_MODE
, 0x0c);
7696 GET_REG32_LOOP(RCVDBDI_JUMBO_BD
, 0x3c);
7697 GET_REG32_LOOP(RCVDBDI_BD_PROD_IDX_0
, 0x44);
7698 GET_REG32_1(RCVDCC_MODE
);
7699 GET_REG32_LOOP(RCVBDI_MODE
, 0x20);
7700 GET_REG32_LOOP(RCVCC_MODE
, 0x14);
7701 GET_REG32_LOOP(RCVLSC_MODE
, 0x08);
7702 GET_REG32_1(MBFREE_MODE
);
7703 GET_REG32_LOOP(HOSTCC_MODE
, 0x100);
7704 GET_REG32_LOOP(MEMARB_MODE
, 0x10);
7705 GET_REG32_LOOP(BUFMGR_MODE
, 0x58);
7706 GET_REG32_LOOP(RDMAC_MODE
, 0x08);
7707 GET_REG32_LOOP(WDMAC_MODE
, 0x08);
7708 GET_REG32_1(RX_CPU_MODE
);
7709 GET_REG32_1(RX_CPU_STATE
);
7710 GET_REG32_1(RX_CPU_PGMCTR
);
7711 GET_REG32_1(RX_CPU_HWBKPT
);
7712 GET_REG32_1(TX_CPU_MODE
);
7713 GET_REG32_1(TX_CPU_STATE
);
7714 GET_REG32_1(TX_CPU_PGMCTR
);
7715 GET_REG32_LOOP(GRCMBOX_INTERRUPT_0
, 0x110);
7716 GET_REG32_LOOP(FTQ_RESET
, 0x120);
7717 GET_REG32_LOOP(MSGINT_MODE
, 0x0c);
7718 GET_REG32_1(DMAC_MODE
);
7719 GET_REG32_LOOP(GRC_MODE
, 0x4c);
7720 if (tp
->tg3_flags
& TG3_FLAG_NVRAM
)
7721 GET_REG32_LOOP(NVRAM_CMD
, 0x24);
7724 #undef GET_REG32_LOOP
7727 tg3_full_unlock(tp
);
7730 static int tg3_get_eeprom_len(struct net_device
*dev
)
7732 struct tg3
*tp
= netdev_priv(dev
);
7734 return tp
->nvram_size
;
7737 static int tg3_nvram_read(struct tg3
*tp
, u32 offset
, u32
*val
);
7738 static int tg3_nvram_read_swab(struct tg3
*tp
, u32 offset
, u32
*val
);
7740 static int tg3_get_eeprom(struct net_device
*dev
, struct ethtool_eeprom
*eeprom
, u8
*data
)
7742 struct tg3
*tp
= netdev_priv(dev
);
7745 u32 i
, offset
, len
, val
, b_offset
, b_count
;
7747 if (tp
->link_config
.phy_is_low_power
)
7750 offset
= eeprom
->offset
;
7754 eeprom
->magic
= TG3_EEPROM_MAGIC
;
7757 /* adjustments to start on required 4 byte boundary */
7758 b_offset
= offset
& 3;
7759 b_count
= 4 - b_offset
;
7760 if (b_count
> len
) {
7761 /* i.e. offset=1 len=2 */
7764 ret
= tg3_nvram_read(tp
, offset
-b_offset
, &val
);
7767 val
= cpu_to_le32(val
);
7768 memcpy(data
, ((char*)&val
) + b_offset
, b_count
);
7771 eeprom
->len
+= b_count
;
7774 /* read bytes upto the last 4 byte boundary */
7775 pd
= &data
[eeprom
->len
];
7776 for (i
= 0; i
< (len
- (len
& 3)); i
+= 4) {
7777 ret
= tg3_nvram_read(tp
, offset
+ i
, &val
);
7782 val
= cpu_to_le32(val
);
7783 memcpy(pd
+ i
, &val
, 4);
7788 /* read last bytes not ending on 4 byte boundary */
7789 pd
= &data
[eeprom
->len
];
7791 b_offset
= offset
+ len
- b_count
;
7792 ret
= tg3_nvram_read(tp
, b_offset
, &val
);
7795 val
= cpu_to_le32(val
);
7796 memcpy(pd
, ((char*)&val
), b_count
);
7797 eeprom
->len
+= b_count
;
7802 static int tg3_nvram_write_block(struct tg3
*tp
, u32 offset
, u32 len
, u8
*buf
);
7804 static int tg3_set_eeprom(struct net_device
*dev
, struct ethtool_eeprom
*eeprom
, u8
*data
)
7806 struct tg3
*tp
= netdev_priv(dev
);
7808 u32 offset
, len
, b_offset
, odd_len
, start
, end
;
7811 if (tp
->link_config
.phy_is_low_power
)
7814 if (eeprom
->magic
!= TG3_EEPROM_MAGIC
)
7817 offset
= eeprom
->offset
;
7820 if ((b_offset
= (offset
& 3))) {
7821 /* adjustments to start on required 4 byte boundary */
7822 ret
= tg3_nvram_read(tp
, offset
-b_offset
, &start
);
7825 start
= cpu_to_le32(start
);
7834 /* adjustments to end on required 4 byte boundary */
7836 len
= (len
+ 3) & ~3;
7837 ret
= tg3_nvram_read(tp
, offset
+len
-4, &end
);
7840 end
= cpu_to_le32(end
);
7844 if (b_offset
|| odd_len
) {
7845 buf
= kmalloc(len
, GFP_KERNEL
);
7849 memcpy(buf
, &start
, 4);
7851 memcpy(buf
+len
-4, &end
, 4);
7852 memcpy(buf
+ b_offset
, data
, eeprom
->len
);
7855 ret
= tg3_nvram_write_block(tp
, offset
, len
, buf
);
7863 static int tg3_get_settings(struct net_device
*dev
, struct ethtool_cmd
*cmd
)
7865 struct tg3
*tp
= netdev_priv(dev
);
7867 cmd
->supported
= (SUPPORTED_Autoneg
);
7869 if (!(tp
->tg3_flags
& TG3_FLAG_10_100_ONLY
))
7870 cmd
->supported
|= (SUPPORTED_1000baseT_Half
|
7871 SUPPORTED_1000baseT_Full
);
7873 if (!(tp
->tg3_flags2
& TG3_FLG2_ANY_SERDES
)) {
7874 cmd
->supported
|= (SUPPORTED_100baseT_Half
|
7875 SUPPORTED_100baseT_Full
|
7876 SUPPORTED_10baseT_Half
|
7877 SUPPORTED_10baseT_Full
|
7879 cmd
->port
= PORT_TP
;
7881 cmd
->supported
|= SUPPORTED_FIBRE
;
7882 cmd
->port
= PORT_FIBRE
;
7885 cmd
->advertising
= tp
->link_config
.advertising
;
7886 if (netif_running(dev
)) {
7887 cmd
->speed
= tp
->link_config
.active_speed
;
7888 cmd
->duplex
= tp
->link_config
.active_duplex
;
7890 cmd
->phy_address
= PHY_ADDR
;
7891 cmd
->transceiver
= 0;
7892 cmd
->autoneg
= tp
->link_config
.autoneg
;
7898 static int tg3_set_settings(struct net_device
*dev
, struct ethtool_cmd
*cmd
)
7900 struct tg3
*tp
= netdev_priv(dev
);
7902 if (tp
->tg3_flags2
& TG3_FLG2_ANY_SERDES
) {
7903 /* These are the only valid advertisement bits allowed. */
7904 if (cmd
->autoneg
== AUTONEG_ENABLE
&&
7905 (cmd
->advertising
& ~(ADVERTISED_1000baseT_Half
|
7906 ADVERTISED_1000baseT_Full
|
7907 ADVERTISED_Autoneg
|
7910 /* Fiber can only do SPEED_1000. */
7911 else if ((cmd
->autoneg
!= AUTONEG_ENABLE
) &&
7912 (cmd
->speed
!= SPEED_1000
))
7914 /* Copper cannot force SPEED_1000. */
7915 } else if ((cmd
->autoneg
!= AUTONEG_ENABLE
) &&
7916 (cmd
->speed
== SPEED_1000
))
7918 else if ((cmd
->speed
== SPEED_1000
) &&
7919 (tp
->tg3_flags2
& TG3_FLAG_10_100_ONLY
))
7922 tg3_full_lock(tp
, 0);
7924 tp
->link_config
.autoneg
= cmd
->autoneg
;
7925 if (cmd
->autoneg
== AUTONEG_ENABLE
) {
7926 tp
->link_config
.advertising
= cmd
->advertising
;
7927 tp
->link_config
.speed
= SPEED_INVALID
;
7928 tp
->link_config
.duplex
= DUPLEX_INVALID
;
7930 tp
->link_config
.advertising
= 0;
7931 tp
->link_config
.speed
= cmd
->speed
;
7932 tp
->link_config
.duplex
= cmd
->duplex
;
7935 if (netif_running(dev
))
7936 tg3_setup_phy(tp
, 1);
7938 tg3_full_unlock(tp
);
7943 static void tg3_get_drvinfo(struct net_device
*dev
, struct ethtool_drvinfo
*info
)
7945 struct tg3
*tp
= netdev_priv(dev
);
7947 strcpy(info
->driver
, DRV_MODULE_NAME
);
7948 strcpy(info
->version
, DRV_MODULE_VERSION
);
7949 strcpy(info
->fw_version
, tp
->fw_ver
);
7950 strcpy(info
->bus_info
, pci_name(tp
->pdev
));
7953 static void tg3_get_wol(struct net_device
*dev
, struct ethtool_wolinfo
*wol
)
7955 struct tg3
*tp
= netdev_priv(dev
);
7957 wol
->supported
= WAKE_MAGIC
;
7959 if (tp
->tg3_flags
& TG3_FLAG_WOL_ENABLE
)
7960 wol
->wolopts
= WAKE_MAGIC
;
7961 memset(&wol
->sopass
, 0, sizeof(wol
->sopass
));
7964 static int tg3_set_wol(struct net_device
*dev
, struct ethtool_wolinfo
*wol
)
7966 struct tg3
*tp
= netdev_priv(dev
);
7968 if (wol
->wolopts
& ~WAKE_MAGIC
)
7970 if ((wol
->wolopts
& WAKE_MAGIC
) &&
7971 tp
->tg3_flags2
& TG3_FLG2_ANY_SERDES
&&
7972 !(tp
->tg3_flags
& TG3_FLAG_SERDES_WOL_CAP
))
7975 spin_lock_bh(&tp
->lock
);
7976 if (wol
->wolopts
& WAKE_MAGIC
)
7977 tp
->tg3_flags
|= TG3_FLAG_WOL_ENABLE
;
7979 tp
->tg3_flags
&= ~TG3_FLAG_WOL_ENABLE
;
7980 spin_unlock_bh(&tp
->lock
);
7985 static u32
tg3_get_msglevel(struct net_device
*dev
)
7987 struct tg3
*tp
= netdev_priv(dev
);
7988 return tp
->msg_enable
;
7991 static void tg3_set_msglevel(struct net_device
*dev
, u32 value
)
7993 struct tg3
*tp
= netdev_priv(dev
);
7994 tp
->msg_enable
= value
;
7997 #if TG3_TSO_SUPPORT != 0
7998 static int tg3_set_tso(struct net_device
*dev
, u32 value
)
8000 struct tg3
*tp
= netdev_priv(dev
);
8002 if (!(tp
->tg3_flags2
& TG3_FLG2_TSO_CAPABLE
)) {
8007 if ((tp
->tg3_flags2
& TG3_FLG2_HW_TSO_2
) &&
8008 (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5906
)) {
8010 dev
->features
|= NETIF_F_TSO6
;
8012 dev
->features
&= ~NETIF_F_TSO6
;
8014 return ethtool_op_set_tso(dev
, value
);
8018 static int tg3_nway_reset(struct net_device
*dev
)
8020 struct tg3
*tp
= netdev_priv(dev
);
8024 if (!netif_running(dev
))
8027 if (tp
->tg3_flags2
& TG3_FLG2_PHY_SERDES
)
8030 spin_lock_bh(&tp
->lock
);
8032 tg3_readphy(tp
, MII_BMCR
, &bmcr
);
8033 if (!tg3_readphy(tp
, MII_BMCR
, &bmcr
) &&
8034 ((bmcr
& BMCR_ANENABLE
) ||
8035 (tp
->tg3_flags2
& TG3_FLG2_PARALLEL_DETECT
))) {
8036 tg3_writephy(tp
, MII_BMCR
, bmcr
| BMCR_ANRESTART
|
8040 spin_unlock_bh(&tp
->lock
);
8045 static void tg3_get_ringparam(struct net_device
*dev
, struct ethtool_ringparam
*ering
)
8047 struct tg3
*tp
= netdev_priv(dev
);
8049 ering
->rx_max_pending
= TG3_RX_RING_SIZE
- 1;
8050 ering
->rx_mini_max_pending
= 0;
8051 if (tp
->tg3_flags
& TG3_FLAG_JUMBO_RING_ENABLE
)
8052 ering
->rx_jumbo_max_pending
= TG3_RX_JUMBO_RING_SIZE
- 1;
8054 ering
->rx_jumbo_max_pending
= 0;
8056 ering
->tx_max_pending
= TG3_TX_RING_SIZE
- 1;
8058 ering
->rx_pending
= tp
->rx_pending
;
8059 ering
->rx_mini_pending
= 0;
8060 if (tp
->tg3_flags
& TG3_FLAG_JUMBO_RING_ENABLE
)
8061 ering
->rx_jumbo_pending
= tp
->rx_jumbo_pending
;
8063 ering
->rx_jumbo_pending
= 0;
8065 ering
->tx_pending
= tp
->tx_pending
;
8068 static int tg3_set_ringparam(struct net_device
*dev
, struct ethtool_ringparam
*ering
)
8070 struct tg3
*tp
= netdev_priv(dev
);
8071 int irq_sync
= 0, err
= 0;
8073 if ((ering
->rx_pending
> TG3_RX_RING_SIZE
- 1) ||
8074 (ering
->rx_jumbo_pending
> TG3_RX_JUMBO_RING_SIZE
- 1) ||
8075 (ering
->tx_pending
> TG3_TX_RING_SIZE
- 1))
8078 if (netif_running(dev
)) {
8083 tg3_full_lock(tp
, irq_sync
);
8085 tp
->rx_pending
= ering
->rx_pending
;
8087 if ((tp
->tg3_flags2
& TG3_FLG2_MAX_RXPEND_64
) &&
8088 tp
->rx_pending
> 63)
8089 tp
->rx_pending
= 63;
8090 tp
->rx_jumbo_pending
= ering
->rx_jumbo_pending
;
8091 tp
->tx_pending
= ering
->tx_pending
;
8093 if (netif_running(dev
)) {
8094 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
8095 err
= tg3_restart_hw(tp
, 1);
8097 tg3_netif_start(tp
);
8100 tg3_full_unlock(tp
);
8105 static void tg3_get_pauseparam(struct net_device
*dev
, struct ethtool_pauseparam
*epause
)
8107 struct tg3
*tp
= netdev_priv(dev
);
8109 epause
->autoneg
= (tp
->tg3_flags
& TG3_FLAG_PAUSE_AUTONEG
) != 0;
8110 epause
->rx_pause
= (tp
->tg3_flags
& TG3_FLAG_RX_PAUSE
) != 0;
8111 epause
->tx_pause
= (tp
->tg3_flags
& TG3_FLAG_TX_PAUSE
) != 0;
8114 static int tg3_set_pauseparam(struct net_device
*dev
, struct ethtool_pauseparam
*epause
)
8116 struct tg3
*tp
= netdev_priv(dev
);
8117 int irq_sync
= 0, err
= 0;
8119 if (netif_running(dev
)) {
8124 tg3_full_lock(tp
, irq_sync
);
8126 if (epause
->autoneg
)
8127 tp
->tg3_flags
|= TG3_FLAG_PAUSE_AUTONEG
;
8129 tp
->tg3_flags
&= ~TG3_FLAG_PAUSE_AUTONEG
;
8130 if (epause
->rx_pause
)
8131 tp
->tg3_flags
|= TG3_FLAG_RX_PAUSE
;
8133 tp
->tg3_flags
&= ~TG3_FLAG_RX_PAUSE
;
8134 if (epause
->tx_pause
)
8135 tp
->tg3_flags
|= TG3_FLAG_TX_PAUSE
;
8137 tp
->tg3_flags
&= ~TG3_FLAG_TX_PAUSE
;
8139 if (netif_running(dev
)) {
8140 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
8141 err
= tg3_restart_hw(tp
, 1);
8143 tg3_netif_start(tp
);
8146 tg3_full_unlock(tp
);
8151 static u32
tg3_get_rx_csum(struct net_device
*dev
)
8153 struct tg3
*tp
= netdev_priv(dev
);
8154 return (tp
->tg3_flags
& TG3_FLAG_RX_CHECKSUMS
) != 0;
8157 static int tg3_set_rx_csum(struct net_device
*dev
, u32 data
)
8159 struct tg3
*tp
= netdev_priv(dev
);
8161 if (tp
->tg3_flags
& TG3_FLAG_BROKEN_CHECKSUMS
) {
8167 spin_lock_bh(&tp
->lock
);
8169 tp
->tg3_flags
|= TG3_FLAG_RX_CHECKSUMS
;
8171 tp
->tg3_flags
&= ~TG3_FLAG_RX_CHECKSUMS
;
8172 spin_unlock_bh(&tp
->lock
);
8177 static int tg3_set_tx_csum(struct net_device
*dev
, u32 data
)
8179 struct tg3
*tp
= netdev_priv(dev
);
8181 if (tp
->tg3_flags
& TG3_FLAG_BROKEN_CHECKSUMS
) {
8187 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5755
||
8188 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5787
)
8189 ethtool_op_set_tx_hw_csum(dev
, data
);
8191 ethtool_op_set_tx_csum(dev
, data
);
8196 static int tg3_get_stats_count (struct net_device
*dev
)
8198 return TG3_NUM_STATS
;
8201 static int tg3_get_test_count (struct net_device
*dev
)
8203 return TG3_NUM_TEST
;
8206 static void tg3_get_strings (struct net_device
*dev
, u32 stringset
, u8
*buf
)
8208 switch (stringset
) {
8210 memcpy(buf
, ðtool_stats_keys
, sizeof(ethtool_stats_keys
));
8213 memcpy(buf
, ðtool_test_keys
, sizeof(ethtool_test_keys
));
8216 WARN_ON(1); /* we need a WARN() */
8221 static int tg3_phys_id(struct net_device
*dev
, u32 data
)
8223 struct tg3
*tp
= netdev_priv(dev
);
8226 if (!netif_running(tp
->dev
))
8232 for (i
= 0; i
< (data
* 2); i
++) {
8234 tw32(MAC_LED_CTRL
, LED_CTRL_LNKLED_OVERRIDE
|
8235 LED_CTRL_1000MBPS_ON
|
8236 LED_CTRL_100MBPS_ON
|
8237 LED_CTRL_10MBPS_ON
|
8238 LED_CTRL_TRAFFIC_OVERRIDE
|
8239 LED_CTRL_TRAFFIC_BLINK
|
8240 LED_CTRL_TRAFFIC_LED
);
8243 tw32(MAC_LED_CTRL
, LED_CTRL_LNKLED_OVERRIDE
|
8244 LED_CTRL_TRAFFIC_OVERRIDE
);
8246 if (msleep_interruptible(500))
8249 tw32(MAC_LED_CTRL
, tp
->led_ctrl
);
8253 static void tg3_get_ethtool_stats (struct net_device
*dev
,
8254 struct ethtool_stats
*estats
, u64
*tmp_stats
)
8256 struct tg3
*tp
= netdev_priv(dev
);
8257 memcpy(tmp_stats
, tg3_get_estats(tp
), sizeof(tp
->estats
));
8260 #define NVRAM_TEST_SIZE 0x100
8261 #define NVRAM_SELFBOOT_FORMAT1_SIZE 0x14
8263 static int tg3_test_nvram(struct tg3
*tp
)
8265 u32
*buf
, csum
, magic
;
8266 int i
, j
, err
= 0, size
;
8268 if (tg3_nvram_read_swab(tp
, 0, &magic
) != 0)
8271 if (magic
== TG3_EEPROM_MAGIC
)
8272 size
= NVRAM_TEST_SIZE
;
8273 else if ((magic
& 0xff000000) == 0xa5000000) {
8274 if ((magic
& 0xe00000) == 0x200000)
8275 size
= NVRAM_SELFBOOT_FORMAT1_SIZE
;
8281 buf
= kmalloc(size
, GFP_KERNEL
);
8286 for (i
= 0, j
= 0; i
< size
; i
+= 4, j
++) {
8289 if ((err
= tg3_nvram_read(tp
, i
, &val
)) != 0)
8291 buf
[j
] = cpu_to_le32(val
);
8296 /* Selfboot format */
8297 if (cpu_to_be32(buf
[0]) != TG3_EEPROM_MAGIC
) {
8298 u8
*buf8
= (u8
*) buf
, csum8
= 0;
8300 for (i
= 0; i
< size
; i
++)
8312 /* Bootstrap checksum at offset 0x10 */
8313 csum
= calc_crc((unsigned char *) buf
, 0x10);
8314 if(csum
!= cpu_to_le32(buf
[0x10/4]))
8317 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
8318 csum
= calc_crc((unsigned char *) &buf
[0x74/4], 0x88);
8319 if (csum
!= cpu_to_le32(buf
[0xfc/4]))
8329 #define TG3_SERDES_TIMEOUT_SEC 2
8330 #define TG3_COPPER_TIMEOUT_SEC 6
8332 static int tg3_test_link(struct tg3
*tp
)
8336 if (!netif_running(tp
->dev
))
8339 if (tp
->tg3_flags2
& TG3_FLG2_ANY_SERDES
)
8340 max
= TG3_SERDES_TIMEOUT_SEC
;
8342 max
= TG3_COPPER_TIMEOUT_SEC
;
8344 for (i
= 0; i
< max
; i
++) {
8345 if (netif_carrier_ok(tp
->dev
))
8348 if (msleep_interruptible(1000))
8355 /* Only test the commonly used registers */
8356 static int tg3_test_registers(struct tg3
*tp
)
8359 u32 offset
, read_mask
, write_mask
, val
, save_val
, read_val
;
8363 #define TG3_FL_5705 0x1
8364 #define TG3_FL_NOT_5705 0x2
8365 #define TG3_FL_NOT_5788 0x4
8369 /* MAC Control Registers */
8370 { MAC_MODE
, TG3_FL_NOT_5705
,
8371 0x00000000, 0x00ef6f8c },
8372 { MAC_MODE
, TG3_FL_5705
,
8373 0x00000000, 0x01ef6b8c },
8374 { MAC_STATUS
, TG3_FL_NOT_5705
,
8375 0x03800107, 0x00000000 },
8376 { MAC_STATUS
, TG3_FL_5705
,
8377 0x03800100, 0x00000000 },
8378 { MAC_ADDR_0_HIGH
, 0x0000,
8379 0x00000000, 0x0000ffff },
8380 { MAC_ADDR_0_LOW
, 0x0000,
8381 0x00000000, 0xffffffff },
8382 { MAC_RX_MTU_SIZE
, 0x0000,
8383 0x00000000, 0x0000ffff },
8384 { MAC_TX_MODE
, 0x0000,
8385 0x00000000, 0x00000070 },
8386 { MAC_TX_LENGTHS
, 0x0000,
8387 0x00000000, 0x00003fff },
8388 { MAC_RX_MODE
, TG3_FL_NOT_5705
,
8389 0x00000000, 0x000007fc },
8390 { MAC_RX_MODE
, TG3_FL_5705
,
8391 0x00000000, 0x000007dc },
8392 { MAC_HASH_REG_0
, 0x0000,
8393 0x00000000, 0xffffffff },
8394 { MAC_HASH_REG_1
, 0x0000,
8395 0x00000000, 0xffffffff },
8396 { MAC_HASH_REG_2
, 0x0000,
8397 0x00000000, 0xffffffff },
8398 { MAC_HASH_REG_3
, 0x0000,
8399 0x00000000, 0xffffffff },
8401 /* Receive Data and Receive BD Initiator Control Registers. */
8402 { RCVDBDI_JUMBO_BD
+0, TG3_FL_NOT_5705
,
8403 0x00000000, 0xffffffff },
8404 { RCVDBDI_JUMBO_BD
+4, TG3_FL_NOT_5705
,
8405 0x00000000, 0xffffffff },
8406 { RCVDBDI_JUMBO_BD
+8, TG3_FL_NOT_5705
,
8407 0x00000000, 0x00000003 },
8408 { RCVDBDI_JUMBO_BD
+0xc, TG3_FL_NOT_5705
,
8409 0x00000000, 0xffffffff },
8410 { RCVDBDI_STD_BD
+0, 0x0000,
8411 0x00000000, 0xffffffff },
8412 { RCVDBDI_STD_BD
+4, 0x0000,
8413 0x00000000, 0xffffffff },
8414 { RCVDBDI_STD_BD
+8, 0x0000,
8415 0x00000000, 0xffff0002 },
8416 { RCVDBDI_STD_BD
+0xc, 0x0000,
8417 0x00000000, 0xffffffff },
8419 /* Receive BD Initiator Control Registers. */
8420 { RCVBDI_STD_THRESH
, TG3_FL_NOT_5705
,
8421 0x00000000, 0xffffffff },
8422 { RCVBDI_STD_THRESH
, TG3_FL_5705
,
8423 0x00000000, 0x000003ff },
8424 { RCVBDI_JUMBO_THRESH
, TG3_FL_NOT_5705
,
8425 0x00000000, 0xffffffff },
8427 /* Host Coalescing Control Registers. */
8428 { HOSTCC_MODE
, TG3_FL_NOT_5705
,
8429 0x00000000, 0x00000004 },
8430 { HOSTCC_MODE
, TG3_FL_5705
,
8431 0x00000000, 0x000000f6 },
8432 { HOSTCC_RXCOL_TICKS
, TG3_FL_NOT_5705
,
8433 0x00000000, 0xffffffff },
8434 { HOSTCC_RXCOL_TICKS
, TG3_FL_5705
,
8435 0x00000000, 0x000003ff },
8436 { HOSTCC_TXCOL_TICKS
, TG3_FL_NOT_5705
,
8437 0x00000000, 0xffffffff },
8438 { HOSTCC_TXCOL_TICKS
, TG3_FL_5705
,
8439 0x00000000, 0x000003ff },
8440 { HOSTCC_RXMAX_FRAMES
, TG3_FL_NOT_5705
,
8441 0x00000000, 0xffffffff },
8442 { HOSTCC_RXMAX_FRAMES
, TG3_FL_5705
| TG3_FL_NOT_5788
,
8443 0x00000000, 0x000000ff },
8444 { HOSTCC_TXMAX_FRAMES
, TG3_FL_NOT_5705
,
8445 0x00000000, 0xffffffff },
8446 { HOSTCC_TXMAX_FRAMES
, TG3_FL_5705
| TG3_FL_NOT_5788
,
8447 0x00000000, 0x000000ff },
8448 { HOSTCC_RXCOAL_TICK_INT
, TG3_FL_NOT_5705
,
8449 0x00000000, 0xffffffff },
8450 { HOSTCC_TXCOAL_TICK_INT
, TG3_FL_NOT_5705
,
8451 0x00000000, 0xffffffff },
8452 { HOSTCC_RXCOAL_MAXF_INT
, TG3_FL_NOT_5705
,
8453 0x00000000, 0xffffffff },
8454 { HOSTCC_RXCOAL_MAXF_INT
, TG3_FL_5705
| TG3_FL_NOT_5788
,
8455 0x00000000, 0x000000ff },
8456 { HOSTCC_TXCOAL_MAXF_INT
, TG3_FL_NOT_5705
,
8457 0x00000000, 0xffffffff },
8458 { HOSTCC_TXCOAL_MAXF_INT
, TG3_FL_5705
| TG3_FL_NOT_5788
,
8459 0x00000000, 0x000000ff },
8460 { HOSTCC_STAT_COAL_TICKS
, TG3_FL_NOT_5705
,
8461 0x00000000, 0xffffffff },
8462 { HOSTCC_STATS_BLK_HOST_ADDR
, TG3_FL_NOT_5705
,
8463 0x00000000, 0xffffffff },
8464 { HOSTCC_STATS_BLK_HOST_ADDR
+4, TG3_FL_NOT_5705
,
8465 0x00000000, 0xffffffff },
8466 { HOSTCC_STATUS_BLK_HOST_ADDR
, 0x0000,
8467 0x00000000, 0xffffffff },
8468 { HOSTCC_STATUS_BLK_HOST_ADDR
+4, 0x0000,
8469 0x00000000, 0xffffffff },
8470 { HOSTCC_STATS_BLK_NIC_ADDR
, 0x0000,
8471 0xffffffff, 0x00000000 },
8472 { HOSTCC_STATUS_BLK_NIC_ADDR
, 0x0000,
8473 0xffffffff, 0x00000000 },
8475 /* Buffer Manager Control Registers. */
8476 { BUFMGR_MB_POOL_ADDR
, 0x0000,
8477 0x00000000, 0x007fff80 },
8478 { BUFMGR_MB_POOL_SIZE
, 0x0000,
8479 0x00000000, 0x007fffff },
8480 { BUFMGR_MB_RDMA_LOW_WATER
, 0x0000,
8481 0x00000000, 0x0000003f },
8482 { BUFMGR_MB_MACRX_LOW_WATER
, 0x0000,
8483 0x00000000, 0x000001ff },
8484 { BUFMGR_MB_HIGH_WATER
, 0x0000,
8485 0x00000000, 0x000001ff },
8486 { BUFMGR_DMA_DESC_POOL_ADDR
, TG3_FL_NOT_5705
,
8487 0xffffffff, 0x00000000 },
8488 { BUFMGR_DMA_DESC_POOL_SIZE
, TG3_FL_NOT_5705
,
8489 0xffffffff, 0x00000000 },
8491 /* Mailbox Registers */
8492 { GRCMBOX_RCVSTD_PROD_IDX
+4, 0x0000,
8493 0x00000000, 0x000001ff },
8494 { GRCMBOX_RCVJUMBO_PROD_IDX
+4, TG3_FL_NOT_5705
,
8495 0x00000000, 0x000001ff },
8496 { GRCMBOX_RCVRET_CON_IDX_0
+4, 0x0000,
8497 0x00000000, 0x000007ff },
8498 { GRCMBOX_SNDHOST_PROD_IDX_0
+4, 0x0000,
8499 0x00000000, 0x000001ff },
8501 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
8504 if (tp
->tg3_flags2
& TG3_FLG2_5705_PLUS
)
8509 for (i
= 0; reg_tbl
[i
].offset
!= 0xffff; i
++) {
8510 if (is_5705
&& (reg_tbl
[i
].flags
& TG3_FL_NOT_5705
))
8513 if (!is_5705
&& (reg_tbl
[i
].flags
& TG3_FL_5705
))
8516 if ((tp
->tg3_flags2
& TG3_FLG2_IS_5788
) &&
8517 (reg_tbl
[i
].flags
& TG3_FL_NOT_5788
))
8520 offset
= (u32
) reg_tbl
[i
].offset
;
8521 read_mask
= reg_tbl
[i
].read_mask
;
8522 write_mask
= reg_tbl
[i
].write_mask
;
8524 /* Save the original register content */
8525 save_val
= tr32(offset
);
8527 /* Determine the read-only value. */
8528 read_val
= save_val
& read_mask
;
8530 /* Write zero to the register, then make sure the read-only bits
8531 * are not changed and the read/write bits are all zeros.
8537 /* Test the read-only and read/write bits. */
8538 if (((val
& read_mask
) != read_val
) || (val
& write_mask
))
8541 /* Write ones to all the bits defined by RdMask and WrMask, then
8542 * make sure the read-only bits are not changed and the
8543 * read/write bits are all ones.
8545 tw32(offset
, read_mask
| write_mask
);
8549 /* Test the read-only bits. */
8550 if ((val
& read_mask
) != read_val
)
8553 /* Test the read/write bits. */
8554 if ((val
& write_mask
) != write_mask
)
8557 tw32(offset
, save_val
);
8563 printk(KERN_ERR PFX
"Register test failed at offset %x\n", offset
);
8564 tw32(offset
, save_val
);
8568 static int tg3_do_mem_test(struct tg3
*tp
, u32 offset
, u32 len
)
8570 static const u32 test_pattern
[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
8574 for (i
= 0; i
< sizeof(test_pattern
)/sizeof(u32
); i
++) {
8575 for (j
= 0; j
< len
; j
+= 4) {
8578 tg3_write_mem(tp
, offset
+ j
, test_pattern
[i
]);
8579 tg3_read_mem(tp
, offset
+ j
, &val
);
8580 if (val
!= test_pattern
[i
])
8587 static int tg3_test_memory(struct tg3
*tp
)
8589 static struct mem_entry
{
8592 } mem_tbl_570x
[] = {
8593 { 0x00000000, 0x00b50},
8594 { 0x00002000, 0x1c000},
8595 { 0xffffffff, 0x00000}
8596 }, mem_tbl_5705
[] = {
8597 { 0x00000100, 0x0000c},
8598 { 0x00000200, 0x00008},
8599 { 0x00004000, 0x00800},
8600 { 0x00006000, 0x01000},
8601 { 0x00008000, 0x02000},
8602 { 0x00010000, 0x0e000},
8603 { 0xffffffff, 0x00000}
8604 }, mem_tbl_5755
[] = {
8605 { 0x00000200, 0x00008},
8606 { 0x00004000, 0x00800},
8607 { 0x00006000, 0x00800},
8608 { 0x00008000, 0x02000},
8609 { 0x00010000, 0x0c000},
8610 { 0xffffffff, 0x00000}
8612 struct mem_entry
*mem_tbl
;
8616 if (tp
->tg3_flags2
& TG3_FLG2_5705_PLUS
) {
8617 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5755
||
8618 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5787
)
8619 mem_tbl
= mem_tbl_5755
;
8621 mem_tbl
= mem_tbl_5705
;
8623 mem_tbl
= mem_tbl_570x
;
8625 for (i
= 0; mem_tbl
[i
].offset
!= 0xffffffff; i
++) {
8626 if ((err
= tg3_do_mem_test(tp
, mem_tbl
[i
].offset
,
8627 mem_tbl
[i
].len
)) != 0)
8634 #define TG3_MAC_LOOPBACK 0
8635 #define TG3_PHY_LOOPBACK 1
8637 static int tg3_run_loopback(struct tg3
*tp
, int loopback_mode
)
8639 u32 mac_mode
, rx_start_idx
, rx_idx
, tx_idx
, opaque_key
;
8641 struct sk_buff
*skb
, *rx_skb
;
8644 int num_pkts
, tx_len
, rx_len
, i
, err
;
8645 struct tg3_rx_buffer_desc
*desc
;
8647 if (loopback_mode
== TG3_MAC_LOOPBACK
) {
8648 /* HW errata - mac loopback fails in some cases on 5780.
8649 * Normal traffic and PHY loopback are not affected by
8652 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5780
)
8655 mac_mode
= (tp
->mac_mode
& ~MAC_MODE_PORT_MODE_MASK
) |
8656 MAC_MODE_PORT_INT_LPBACK
| MAC_MODE_LINK_POLARITY
;
8657 if (tp
->tg3_flags
& TG3_FLAG_10_100_ONLY
)
8658 mac_mode
|= MAC_MODE_PORT_MODE_MII
;
8660 mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
8661 tw32(MAC_MODE
, mac_mode
);
8662 } else if (loopback_mode
== TG3_PHY_LOOPBACK
) {
8665 val
= BMCR_LOOPBACK
| BMCR_FULLDPLX
;
8666 if (tp
->tg3_flags
& TG3_FLAG_10_100_ONLY
)
8667 val
|= BMCR_SPEED100
;
8669 val
|= BMCR_SPEED1000
;
8671 tg3_writephy(tp
, MII_BMCR
, val
);
8673 /* reset to prevent losing 1st rx packet intermittently */
8674 if (tp
->tg3_flags2
& TG3_FLG2_MII_SERDES
) {
8675 tw32_f(MAC_RX_MODE
, RX_MODE_RESET
);
8677 tw32_f(MAC_RX_MODE
, tp
->rx_mode
);
8679 mac_mode
= (tp
->mac_mode
& ~MAC_MODE_PORT_MODE_MASK
) |
8680 MAC_MODE_LINK_POLARITY
;
8681 if (tp
->tg3_flags
& TG3_FLAG_10_100_ONLY
)
8682 mac_mode
|= MAC_MODE_PORT_MODE_MII
;
8684 mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
8685 if ((tp
->phy_id
& PHY_ID_MASK
) == PHY_ID_BCM5401
) {
8686 mac_mode
&= ~MAC_MODE_LINK_POLARITY
;
8687 tg3_writephy(tp
, MII_TG3_EXT_CTRL
,
8688 MII_TG3_EXT_CTRL_LNK3_LED_MODE
);
8690 tw32(MAC_MODE
, mac_mode
);
8698 skb
= netdev_alloc_skb(tp
->dev
, tx_len
);
8702 tx_data
= skb_put(skb
, tx_len
);
8703 memcpy(tx_data
, tp
->dev
->dev_addr
, 6);
8704 memset(tx_data
+ 6, 0x0, 8);
8706 tw32(MAC_RX_MTU_SIZE
, tx_len
+ 4);
8708 for (i
= 14; i
< tx_len
; i
++)
8709 tx_data
[i
] = (u8
) (i
& 0xff);
8711 map
= pci_map_single(tp
->pdev
, skb
->data
, tx_len
, PCI_DMA_TODEVICE
);
8713 tw32_f(HOSTCC_MODE
, tp
->coalesce_mode
| HOSTCC_MODE_ENABLE
|
8718 rx_start_idx
= tp
->hw_status
->idx
[0].rx_producer
;
8722 tg3_set_txd(tp
, tp
->tx_prod
, map
, tx_len
, 0, 1);
8727 tw32_tx_mbox(MAILBOX_SNDHOST_PROD_IDX_0
+ TG3_64BIT_REG_LOW
,
8729 tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0
+ TG3_64BIT_REG_LOW
);
8733 /* 250 usec to allow enough time on some 10/100 Mbps devices. */
8734 for (i
= 0; i
< 25; i
++) {
8735 tw32_f(HOSTCC_MODE
, tp
->coalesce_mode
| HOSTCC_MODE_ENABLE
|
8740 tx_idx
= tp
->hw_status
->idx
[0].tx_consumer
;
8741 rx_idx
= tp
->hw_status
->idx
[0].rx_producer
;
8742 if ((tx_idx
== tp
->tx_prod
) &&
8743 (rx_idx
== (rx_start_idx
+ num_pkts
)))
8747 pci_unmap_single(tp
->pdev
, map
, tx_len
, PCI_DMA_TODEVICE
);
8750 if (tx_idx
!= tp
->tx_prod
)
8753 if (rx_idx
!= rx_start_idx
+ num_pkts
)
8756 desc
= &tp
->rx_rcb
[rx_start_idx
];
8757 desc_idx
= desc
->opaque
& RXD_OPAQUE_INDEX_MASK
;
8758 opaque_key
= desc
->opaque
& RXD_OPAQUE_RING_MASK
;
8759 if (opaque_key
!= RXD_OPAQUE_RING_STD
)
8762 if ((desc
->err_vlan
& RXD_ERR_MASK
) != 0 &&
8763 (desc
->err_vlan
!= RXD_ERR_ODD_NIBBLE_RCVD_MII
))
8766 rx_len
= ((desc
->idx_len
& RXD_LEN_MASK
) >> RXD_LEN_SHIFT
) - 4;
8767 if (rx_len
!= tx_len
)
8770 rx_skb
= tp
->rx_std_buffers
[desc_idx
].skb
;
8772 map
= pci_unmap_addr(&tp
->rx_std_buffers
[desc_idx
], mapping
);
8773 pci_dma_sync_single_for_cpu(tp
->pdev
, map
, rx_len
, PCI_DMA_FROMDEVICE
);
8775 for (i
= 14; i
< tx_len
; i
++) {
8776 if (*(rx_skb
->data
+ i
) != (u8
) (i
& 0xff))
8781 /* tg3_free_rings will unmap and free the rx_skb */
8786 #define TG3_MAC_LOOPBACK_FAILED 1
8787 #define TG3_PHY_LOOPBACK_FAILED 2
8788 #define TG3_LOOPBACK_FAILED (TG3_MAC_LOOPBACK_FAILED | \
8789 TG3_PHY_LOOPBACK_FAILED)
8791 static int tg3_test_loopback(struct tg3
*tp
)
8795 if (!netif_running(tp
->dev
))
8796 return TG3_LOOPBACK_FAILED
;
8798 err
= tg3_reset_hw(tp
, 1);
8800 return TG3_LOOPBACK_FAILED
;
8802 if (tg3_run_loopback(tp
, TG3_MAC_LOOPBACK
))
8803 err
|= TG3_MAC_LOOPBACK_FAILED
;
8804 if (!(tp
->tg3_flags2
& TG3_FLG2_PHY_SERDES
)) {
8805 if (tg3_run_loopback(tp
, TG3_PHY_LOOPBACK
))
8806 err
|= TG3_PHY_LOOPBACK_FAILED
;
8812 static void tg3_self_test(struct net_device
*dev
, struct ethtool_test
*etest
,
8815 struct tg3
*tp
= netdev_priv(dev
);
8817 if (tp
->link_config
.phy_is_low_power
)
8818 tg3_set_power_state(tp
, PCI_D0
);
8820 memset(data
, 0, sizeof(u64
) * TG3_NUM_TEST
);
8822 if (tg3_test_nvram(tp
) != 0) {
8823 etest
->flags
|= ETH_TEST_FL_FAILED
;
8826 if (tg3_test_link(tp
) != 0) {
8827 etest
->flags
|= ETH_TEST_FL_FAILED
;
8830 if (etest
->flags
& ETH_TEST_FL_OFFLINE
) {
8831 int err
, irq_sync
= 0;
8833 if (netif_running(dev
)) {
8838 tg3_full_lock(tp
, irq_sync
);
8840 tg3_halt(tp
, RESET_KIND_SUSPEND
, 1);
8841 err
= tg3_nvram_lock(tp
);
8842 tg3_halt_cpu(tp
, RX_CPU_BASE
);
8843 if (!(tp
->tg3_flags2
& TG3_FLG2_5705_PLUS
))
8844 tg3_halt_cpu(tp
, TX_CPU_BASE
);
8846 tg3_nvram_unlock(tp
);
8848 if (tp
->tg3_flags2
& TG3_FLG2_MII_SERDES
)
8851 if (tg3_test_registers(tp
) != 0) {
8852 etest
->flags
|= ETH_TEST_FL_FAILED
;
8855 if (tg3_test_memory(tp
) != 0) {
8856 etest
->flags
|= ETH_TEST_FL_FAILED
;
8859 if ((data
[4] = tg3_test_loopback(tp
)) != 0)
8860 etest
->flags
|= ETH_TEST_FL_FAILED
;
8862 tg3_full_unlock(tp
);
8864 if (tg3_test_interrupt(tp
) != 0) {
8865 etest
->flags
|= ETH_TEST_FL_FAILED
;
8869 tg3_full_lock(tp
, 0);
8871 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
8872 if (netif_running(dev
)) {
8873 tp
->tg3_flags
|= TG3_FLAG_INIT_COMPLETE
;
8874 if (!tg3_restart_hw(tp
, 1))
8875 tg3_netif_start(tp
);
8878 tg3_full_unlock(tp
);
8880 if (tp
->link_config
.phy_is_low_power
)
8881 tg3_set_power_state(tp
, PCI_D3hot
);
8885 static int tg3_ioctl(struct net_device
*dev
, struct ifreq
*ifr
, int cmd
)
8887 struct mii_ioctl_data
*data
= if_mii(ifr
);
8888 struct tg3
*tp
= netdev_priv(dev
);
8893 data
->phy_id
= PHY_ADDR
;
8899 if (tp
->tg3_flags2
& TG3_FLG2_PHY_SERDES
)
8900 break; /* We have no PHY */
8902 if (tp
->link_config
.phy_is_low_power
)
8905 spin_lock_bh(&tp
->lock
);
8906 err
= tg3_readphy(tp
, data
->reg_num
& 0x1f, &mii_regval
);
8907 spin_unlock_bh(&tp
->lock
);
8909 data
->val_out
= mii_regval
;
8915 if (tp
->tg3_flags2
& TG3_FLG2_PHY_SERDES
)
8916 break; /* We have no PHY */
8918 if (!capable(CAP_NET_ADMIN
))
8921 if (tp
->link_config
.phy_is_low_power
)
8924 spin_lock_bh(&tp
->lock
);
8925 err
= tg3_writephy(tp
, data
->reg_num
& 0x1f, data
->val_in
);
8926 spin_unlock_bh(&tp
->lock
);
8937 #if TG3_VLAN_TAG_USED
8938 static void tg3_vlan_rx_register(struct net_device
*dev
, struct vlan_group
*grp
)
8940 struct tg3
*tp
= netdev_priv(dev
);
8942 if (netif_running(dev
))
8945 tg3_full_lock(tp
, 0);
8949 /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
8950 __tg3_set_rx_mode(dev
);
8952 tg3_full_unlock(tp
);
8954 if (netif_running(dev
))
8955 tg3_netif_start(tp
);
8958 static void tg3_vlan_rx_kill_vid(struct net_device
*dev
, unsigned short vid
)
8960 struct tg3
*tp
= netdev_priv(dev
);
8962 if (netif_running(dev
))
8965 tg3_full_lock(tp
, 0);
8967 tp
->vlgrp
->vlan_devices
[vid
] = NULL
;
8968 tg3_full_unlock(tp
);
8970 if (netif_running(dev
))
8971 tg3_netif_start(tp
);
8975 static int tg3_get_coalesce(struct net_device
*dev
, struct ethtool_coalesce
*ec
)
8977 struct tg3
*tp
= netdev_priv(dev
);
8979 memcpy(ec
, &tp
->coal
, sizeof(*ec
));
8983 static int tg3_set_coalesce(struct net_device
*dev
, struct ethtool_coalesce
*ec
)
8985 struct tg3
*tp
= netdev_priv(dev
);
8986 u32 max_rxcoal_tick_int
= 0, max_txcoal_tick_int
= 0;
8987 u32 max_stat_coal_ticks
= 0, min_stat_coal_ticks
= 0;
8989 if (!(tp
->tg3_flags2
& TG3_FLG2_5705_PLUS
)) {
8990 max_rxcoal_tick_int
= MAX_RXCOAL_TICK_INT
;
8991 max_txcoal_tick_int
= MAX_TXCOAL_TICK_INT
;
8992 max_stat_coal_ticks
= MAX_STAT_COAL_TICKS
;
8993 min_stat_coal_ticks
= MIN_STAT_COAL_TICKS
;
8996 if ((ec
->rx_coalesce_usecs
> MAX_RXCOL_TICKS
) ||
8997 (ec
->tx_coalesce_usecs
> MAX_TXCOL_TICKS
) ||
8998 (ec
->rx_max_coalesced_frames
> MAX_RXMAX_FRAMES
) ||
8999 (ec
->tx_max_coalesced_frames
> MAX_TXMAX_FRAMES
) ||
9000 (ec
->rx_coalesce_usecs_irq
> max_rxcoal_tick_int
) ||
9001 (ec
->tx_coalesce_usecs_irq
> max_txcoal_tick_int
) ||
9002 (ec
->rx_max_coalesced_frames_irq
> MAX_RXCOAL_MAXF_INT
) ||
9003 (ec
->tx_max_coalesced_frames_irq
> MAX_TXCOAL_MAXF_INT
) ||
9004 (ec
->stats_block_coalesce_usecs
> max_stat_coal_ticks
) ||
9005 (ec
->stats_block_coalesce_usecs
< min_stat_coal_ticks
))
9008 /* No rx interrupts will be generated if both are zero */
9009 if ((ec
->rx_coalesce_usecs
== 0) &&
9010 (ec
->rx_max_coalesced_frames
== 0))
9013 /* No tx interrupts will be generated if both are zero */
9014 if ((ec
->tx_coalesce_usecs
== 0) &&
9015 (ec
->tx_max_coalesced_frames
== 0))
9018 /* Only copy relevant parameters, ignore all others. */
9019 tp
->coal
.rx_coalesce_usecs
= ec
->rx_coalesce_usecs
;
9020 tp
->coal
.tx_coalesce_usecs
= ec
->tx_coalesce_usecs
;
9021 tp
->coal
.rx_max_coalesced_frames
= ec
->rx_max_coalesced_frames
;
9022 tp
->coal
.tx_max_coalesced_frames
= ec
->tx_max_coalesced_frames
;
9023 tp
->coal
.rx_coalesce_usecs_irq
= ec
->rx_coalesce_usecs_irq
;
9024 tp
->coal
.tx_coalesce_usecs_irq
= ec
->tx_coalesce_usecs_irq
;
9025 tp
->coal
.rx_max_coalesced_frames_irq
= ec
->rx_max_coalesced_frames_irq
;
9026 tp
->coal
.tx_max_coalesced_frames_irq
= ec
->tx_max_coalesced_frames_irq
;
9027 tp
->coal
.stats_block_coalesce_usecs
= ec
->stats_block_coalesce_usecs
;
9029 if (netif_running(dev
)) {
9030 tg3_full_lock(tp
, 0);
9031 __tg3_set_coalesce(tp
, &tp
->coal
);
9032 tg3_full_unlock(tp
);
9037 static const struct ethtool_ops tg3_ethtool_ops
= {
9038 .get_settings
= tg3_get_settings
,
9039 .set_settings
= tg3_set_settings
,
9040 .get_drvinfo
= tg3_get_drvinfo
,
9041 .get_regs_len
= tg3_get_regs_len
,
9042 .get_regs
= tg3_get_regs
,
9043 .get_wol
= tg3_get_wol
,
9044 .set_wol
= tg3_set_wol
,
9045 .get_msglevel
= tg3_get_msglevel
,
9046 .set_msglevel
= tg3_set_msglevel
,
9047 .nway_reset
= tg3_nway_reset
,
9048 .get_link
= ethtool_op_get_link
,
9049 .get_eeprom_len
= tg3_get_eeprom_len
,
9050 .get_eeprom
= tg3_get_eeprom
,
9051 .set_eeprom
= tg3_set_eeprom
,
9052 .get_ringparam
= tg3_get_ringparam
,
9053 .set_ringparam
= tg3_set_ringparam
,
9054 .get_pauseparam
= tg3_get_pauseparam
,
9055 .set_pauseparam
= tg3_set_pauseparam
,
9056 .get_rx_csum
= tg3_get_rx_csum
,
9057 .set_rx_csum
= tg3_set_rx_csum
,
9058 .get_tx_csum
= ethtool_op_get_tx_csum
,
9059 .set_tx_csum
= tg3_set_tx_csum
,
9060 .get_sg
= ethtool_op_get_sg
,
9061 .set_sg
= ethtool_op_set_sg
,
9062 #if TG3_TSO_SUPPORT != 0
9063 .get_tso
= ethtool_op_get_tso
,
9064 .set_tso
= tg3_set_tso
,
9066 .self_test_count
= tg3_get_test_count
,
9067 .self_test
= tg3_self_test
,
9068 .get_strings
= tg3_get_strings
,
9069 .phys_id
= tg3_phys_id
,
9070 .get_stats_count
= tg3_get_stats_count
,
9071 .get_ethtool_stats
= tg3_get_ethtool_stats
,
9072 .get_coalesce
= tg3_get_coalesce
,
9073 .set_coalesce
= tg3_set_coalesce
,
9074 .get_perm_addr
= ethtool_op_get_perm_addr
,
9077 static void __devinit
tg3_get_eeprom_size(struct tg3
*tp
)
9079 u32 cursize
, val
, magic
;
9081 tp
->nvram_size
= EEPROM_CHIP_SIZE
;
9083 if (tg3_nvram_read_swab(tp
, 0, &magic
) != 0)
9086 if ((magic
!= TG3_EEPROM_MAGIC
) && ((magic
& 0xff000000) != 0xa5000000))
9090 * Size the chip by reading offsets at increasing powers of two.
9091 * When we encounter our validation signature, we know the addressing
9092 * has wrapped around, and thus have our chip size.
9096 while (cursize
< tp
->nvram_size
) {
9097 if (tg3_nvram_read_swab(tp
, cursize
, &val
) != 0)
9106 tp
->nvram_size
= cursize
;
9109 static void __devinit
tg3_get_nvram_size(struct tg3
*tp
)
9113 if (tg3_nvram_read_swab(tp
, 0, &val
) != 0)
9116 /* Selfboot format */
9117 if (val
!= TG3_EEPROM_MAGIC
) {
9118 tg3_get_eeprom_size(tp
);
9122 if (tg3_nvram_read(tp
, 0xf0, &val
) == 0) {
9124 tp
->nvram_size
= (val
>> 16) * 1024;
9128 tp
->nvram_size
= 0x20000;
9131 static void __devinit
tg3_get_nvram_info(struct tg3
*tp
)
9135 nvcfg1
= tr32(NVRAM_CFG1
);
9136 if (nvcfg1
& NVRAM_CFG1_FLASHIF_ENAB
) {
9137 tp
->tg3_flags2
|= TG3_FLG2_FLASH
;
9140 nvcfg1
&= ~NVRAM_CFG1_COMPAT_BYPASS
;
9141 tw32(NVRAM_CFG1
, nvcfg1
);
9144 if ((GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5750
) ||
9145 (tp
->tg3_flags2
& TG3_FLG2_5780_CLASS
)) {
9146 switch (nvcfg1
& NVRAM_CFG1_VENDOR_MASK
) {
9147 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED
:
9148 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
9149 tp
->nvram_pagesize
= ATMEL_AT45DB0X1B_PAGE_SIZE
;
9150 tp
->tg3_flags
|= TG3_FLAG_NVRAM_BUFFERED
;
9152 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED
:
9153 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
9154 tp
->nvram_pagesize
= ATMEL_AT25F512_PAGE_SIZE
;
9156 case FLASH_VENDOR_ATMEL_EEPROM
:
9157 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
9158 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
9159 tp
->tg3_flags
|= TG3_FLAG_NVRAM_BUFFERED
;
9161 case FLASH_VENDOR_ST
:
9162 tp
->nvram_jedecnum
= JEDEC_ST
;
9163 tp
->nvram_pagesize
= ST_M45PEX0_PAGE_SIZE
;
9164 tp
->tg3_flags
|= TG3_FLAG_NVRAM_BUFFERED
;
9166 case FLASH_VENDOR_SAIFUN
:
9167 tp
->nvram_jedecnum
= JEDEC_SAIFUN
;
9168 tp
->nvram_pagesize
= SAIFUN_SA25F0XX_PAGE_SIZE
;
9170 case FLASH_VENDOR_SST_SMALL
:
9171 case FLASH_VENDOR_SST_LARGE
:
9172 tp
->nvram_jedecnum
= JEDEC_SST
;
9173 tp
->nvram_pagesize
= SST_25VF0X0_PAGE_SIZE
;
9178 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
9179 tp
->nvram_pagesize
= ATMEL_AT45DB0X1B_PAGE_SIZE
;
9180 tp
->tg3_flags
|= TG3_FLAG_NVRAM_BUFFERED
;
9184 static void __devinit
tg3_get_5752_nvram_info(struct tg3
*tp
)
9188 nvcfg1
= tr32(NVRAM_CFG1
);
9190 /* NVRAM protection for TPM */
9191 if (nvcfg1
& (1 << 27))
9192 tp
->tg3_flags2
|= TG3_FLG2_PROTECTED_NVRAM
;
9194 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
9195 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ
:
9196 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ
:
9197 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
9198 tp
->tg3_flags
|= TG3_FLAG_NVRAM_BUFFERED
;
9200 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED
:
9201 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
9202 tp
->tg3_flags
|= TG3_FLAG_NVRAM_BUFFERED
;
9203 tp
->tg3_flags2
|= TG3_FLG2_FLASH
;
9205 case FLASH_5752VENDOR_ST_M45PE10
:
9206 case FLASH_5752VENDOR_ST_M45PE20
:
9207 case FLASH_5752VENDOR_ST_M45PE40
:
9208 tp
->nvram_jedecnum
= JEDEC_ST
;
9209 tp
->tg3_flags
|= TG3_FLAG_NVRAM_BUFFERED
;
9210 tp
->tg3_flags2
|= TG3_FLG2_FLASH
;
9214 if (tp
->tg3_flags2
& TG3_FLG2_FLASH
) {
9215 switch (nvcfg1
& NVRAM_CFG1_5752PAGE_SIZE_MASK
) {
9216 case FLASH_5752PAGE_SIZE_256
:
9217 tp
->nvram_pagesize
= 256;
9219 case FLASH_5752PAGE_SIZE_512
:
9220 tp
->nvram_pagesize
= 512;
9222 case FLASH_5752PAGE_SIZE_1K
:
9223 tp
->nvram_pagesize
= 1024;
9225 case FLASH_5752PAGE_SIZE_2K
:
9226 tp
->nvram_pagesize
= 2048;
9228 case FLASH_5752PAGE_SIZE_4K
:
9229 tp
->nvram_pagesize
= 4096;
9231 case FLASH_5752PAGE_SIZE_264
:
9232 tp
->nvram_pagesize
= 264;
9237 /* For eeprom, set pagesize to maximum eeprom size */
9238 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
9240 nvcfg1
&= ~NVRAM_CFG1_COMPAT_BYPASS
;
9241 tw32(NVRAM_CFG1
, nvcfg1
);
9245 static void __devinit
tg3_get_5755_nvram_info(struct tg3
*tp
)
9249 nvcfg1
= tr32(NVRAM_CFG1
);
9251 /* NVRAM protection for TPM */
9252 if (nvcfg1
& (1 << 27))
9253 tp
->tg3_flags2
|= TG3_FLG2_PROTECTED_NVRAM
;
9255 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
9256 case FLASH_5755VENDOR_ATMEL_EEPROM_64KHZ
:
9257 case FLASH_5755VENDOR_ATMEL_EEPROM_376KHZ
:
9258 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
9259 tp
->tg3_flags
|= TG3_FLAG_NVRAM_BUFFERED
;
9260 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
9262 nvcfg1
&= ~NVRAM_CFG1_COMPAT_BYPASS
;
9263 tw32(NVRAM_CFG1
, nvcfg1
);
9265 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED
:
9266 case FLASH_5755VENDOR_ATMEL_FLASH_1
:
9267 case FLASH_5755VENDOR_ATMEL_FLASH_2
:
9268 case FLASH_5755VENDOR_ATMEL_FLASH_3
:
9269 case FLASH_5755VENDOR_ATMEL_FLASH_4
:
9270 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
9271 tp
->tg3_flags
|= TG3_FLAG_NVRAM_BUFFERED
;
9272 tp
->tg3_flags2
|= TG3_FLG2_FLASH
;
9273 tp
->nvram_pagesize
= 264;
9275 case FLASH_5752VENDOR_ST_M45PE10
:
9276 case FLASH_5752VENDOR_ST_M45PE20
:
9277 case FLASH_5752VENDOR_ST_M45PE40
:
9278 tp
->nvram_jedecnum
= JEDEC_ST
;
9279 tp
->tg3_flags
|= TG3_FLAG_NVRAM_BUFFERED
;
9280 tp
->tg3_flags2
|= TG3_FLG2_FLASH
;
9281 tp
->nvram_pagesize
= 256;
9286 static void __devinit
tg3_get_5787_nvram_info(struct tg3
*tp
)
9290 nvcfg1
= tr32(NVRAM_CFG1
);
9292 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
9293 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ
:
9294 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ
:
9295 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ
:
9296 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ
:
9297 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
9298 tp
->tg3_flags
|= TG3_FLAG_NVRAM_BUFFERED
;
9299 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
9301 nvcfg1
&= ~NVRAM_CFG1_COMPAT_BYPASS
;
9302 tw32(NVRAM_CFG1
, nvcfg1
);
9304 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED
:
9305 case FLASH_5755VENDOR_ATMEL_FLASH_1
:
9306 case FLASH_5755VENDOR_ATMEL_FLASH_2
:
9307 case FLASH_5755VENDOR_ATMEL_FLASH_3
:
9308 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
9309 tp
->tg3_flags
|= TG3_FLAG_NVRAM_BUFFERED
;
9310 tp
->tg3_flags2
|= TG3_FLG2_FLASH
;
9311 tp
->nvram_pagesize
= 264;
9313 case FLASH_5752VENDOR_ST_M45PE10
:
9314 case FLASH_5752VENDOR_ST_M45PE20
:
9315 case FLASH_5752VENDOR_ST_M45PE40
:
9316 tp
->nvram_jedecnum
= JEDEC_ST
;
9317 tp
->tg3_flags
|= TG3_FLAG_NVRAM_BUFFERED
;
9318 tp
->tg3_flags2
|= TG3_FLG2_FLASH
;
9319 tp
->nvram_pagesize
= 256;
9324 static void __devinit
tg3_get_5906_nvram_info(struct tg3
*tp
)
9326 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
9327 tp
->tg3_flags
|= TG3_FLAG_NVRAM_BUFFERED
;
9328 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
9331 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
9332 static void __devinit
tg3_nvram_init(struct tg3
*tp
)
9336 tw32_f(GRC_EEPROM_ADDR
,
9337 (EEPROM_ADDR_FSM_RESET
|
9338 (EEPROM_DEFAULT_CLOCK_PERIOD
<<
9339 EEPROM_ADDR_CLKPERD_SHIFT
)));
9341 /* XXX schedule_timeout() ... */
9342 for (j
= 0; j
< 100; j
++)
9345 /* Enable seeprom accesses. */
9346 tw32_f(GRC_LOCAL_CTRL
,
9347 tr32(GRC_LOCAL_CTRL
) | GRC_LCLCTRL_AUTO_SEEPROM
);
9350 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5700
&&
9351 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5701
) {
9352 tp
->tg3_flags
|= TG3_FLAG_NVRAM
;
9354 if (tg3_nvram_lock(tp
)) {
9355 printk(KERN_WARNING PFX
"%s: Cannot get nvarm lock, "
9356 "tg3_nvram_init failed.\n", tp
->dev
->name
);
9359 tg3_enable_nvram_access(tp
);
9361 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5752
)
9362 tg3_get_5752_nvram_info(tp
);
9363 else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5755
)
9364 tg3_get_5755_nvram_info(tp
);
9365 else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5787
)
9366 tg3_get_5787_nvram_info(tp
);
9367 else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
)
9368 tg3_get_5906_nvram_info(tp
);
9370 tg3_get_nvram_info(tp
);
9372 tg3_get_nvram_size(tp
);
9374 tg3_disable_nvram_access(tp
);
9375 tg3_nvram_unlock(tp
);
9378 tp
->tg3_flags
&= ~(TG3_FLAG_NVRAM
| TG3_FLAG_NVRAM_BUFFERED
);
9380 tg3_get_eeprom_size(tp
);
9384 static int tg3_nvram_read_using_eeprom(struct tg3
*tp
,
9385 u32 offset
, u32
*val
)
9390 if (offset
> EEPROM_ADDR_ADDR_MASK
||
9394 tmp
= tr32(GRC_EEPROM_ADDR
) & ~(EEPROM_ADDR_ADDR_MASK
|
9395 EEPROM_ADDR_DEVID_MASK
|
9397 tw32(GRC_EEPROM_ADDR
,
9399 (0 << EEPROM_ADDR_DEVID_SHIFT
) |
9400 ((offset
<< EEPROM_ADDR_ADDR_SHIFT
) &
9401 EEPROM_ADDR_ADDR_MASK
) |
9402 EEPROM_ADDR_READ
| EEPROM_ADDR_START
);
9404 for (i
= 0; i
< 10000; i
++) {
9405 tmp
= tr32(GRC_EEPROM_ADDR
);
9407 if (tmp
& EEPROM_ADDR_COMPLETE
)
9411 if (!(tmp
& EEPROM_ADDR_COMPLETE
))
9414 *val
= tr32(GRC_EEPROM_DATA
);
9418 #define NVRAM_CMD_TIMEOUT 10000
9420 static int tg3_nvram_exec_cmd(struct tg3
*tp
, u32 nvram_cmd
)
9424 tw32(NVRAM_CMD
, nvram_cmd
);
9425 for (i
= 0; i
< NVRAM_CMD_TIMEOUT
; i
++) {
9427 if (tr32(NVRAM_CMD
) & NVRAM_CMD_DONE
) {
9432 if (i
== NVRAM_CMD_TIMEOUT
) {
9438 static u32
tg3_nvram_phys_addr(struct tg3
*tp
, u32 addr
)
9440 if ((tp
->tg3_flags
& TG3_FLAG_NVRAM
) &&
9441 (tp
->tg3_flags
& TG3_FLAG_NVRAM_BUFFERED
) &&
9442 (tp
->tg3_flags2
& TG3_FLG2_FLASH
) &&
9443 (tp
->nvram_jedecnum
== JEDEC_ATMEL
))
9445 addr
= ((addr
/ tp
->nvram_pagesize
) <<
9446 ATMEL_AT45DB0X1B_PAGE_POS
) +
9447 (addr
% tp
->nvram_pagesize
);
9452 static u32
tg3_nvram_logical_addr(struct tg3
*tp
, u32 addr
)
9454 if ((tp
->tg3_flags
& TG3_FLAG_NVRAM
) &&
9455 (tp
->tg3_flags
& TG3_FLAG_NVRAM_BUFFERED
) &&
9456 (tp
->tg3_flags2
& TG3_FLG2_FLASH
) &&
9457 (tp
->nvram_jedecnum
== JEDEC_ATMEL
))
9459 addr
= ((addr
>> ATMEL_AT45DB0X1B_PAGE_POS
) *
9460 tp
->nvram_pagesize
) +
9461 (addr
& ((1 << ATMEL_AT45DB0X1B_PAGE_POS
) - 1));
9466 static int tg3_nvram_read(struct tg3
*tp
, u32 offset
, u32
*val
)
9470 if (!(tp
->tg3_flags
& TG3_FLAG_NVRAM
))
9471 return tg3_nvram_read_using_eeprom(tp
, offset
, val
);
9473 offset
= tg3_nvram_phys_addr(tp
, offset
);
9475 if (offset
> NVRAM_ADDR_MSK
)
9478 ret
= tg3_nvram_lock(tp
);
9482 tg3_enable_nvram_access(tp
);
9484 tw32(NVRAM_ADDR
, offset
);
9485 ret
= tg3_nvram_exec_cmd(tp
, NVRAM_CMD_RD
| NVRAM_CMD_GO
|
9486 NVRAM_CMD_FIRST
| NVRAM_CMD_LAST
| NVRAM_CMD_DONE
);
9489 *val
= swab32(tr32(NVRAM_RDDATA
));
9491 tg3_disable_nvram_access(tp
);
9493 tg3_nvram_unlock(tp
);
9498 static int tg3_nvram_read_swab(struct tg3
*tp
, u32 offset
, u32
*val
)
9503 err
= tg3_nvram_read(tp
, offset
, &tmp
);
9508 static int tg3_nvram_write_block_using_eeprom(struct tg3
*tp
,
9509 u32 offset
, u32 len
, u8
*buf
)
9514 for (i
= 0; i
< len
; i
+= 4) {
9519 memcpy(&data
, buf
+ i
, 4);
9521 tw32(GRC_EEPROM_DATA
, cpu_to_le32(data
));
9523 val
= tr32(GRC_EEPROM_ADDR
);
9524 tw32(GRC_EEPROM_ADDR
, val
| EEPROM_ADDR_COMPLETE
);
9526 val
&= ~(EEPROM_ADDR_ADDR_MASK
| EEPROM_ADDR_DEVID_MASK
|
9528 tw32(GRC_EEPROM_ADDR
, val
|
9529 (0 << EEPROM_ADDR_DEVID_SHIFT
) |
9530 (addr
& EEPROM_ADDR_ADDR_MASK
) |
9534 for (j
= 0; j
< 10000; j
++) {
9535 val
= tr32(GRC_EEPROM_ADDR
);
9537 if (val
& EEPROM_ADDR_COMPLETE
)
9541 if (!(val
& EEPROM_ADDR_COMPLETE
)) {
9550 /* offset and length are dword aligned */
9551 static int tg3_nvram_write_block_unbuffered(struct tg3
*tp
, u32 offset
, u32 len
,
9555 u32 pagesize
= tp
->nvram_pagesize
;
9556 u32 pagemask
= pagesize
- 1;
9560 tmp
= kmalloc(pagesize
, GFP_KERNEL
);
9566 u32 phy_addr
, page_off
, size
;
9568 phy_addr
= offset
& ~pagemask
;
9570 for (j
= 0; j
< pagesize
; j
+= 4) {
9571 if ((ret
= tg3_nvram_read(tp
, phy_addr
+ j
,
9572 (u32
*) (tmp
+ j
))))
9578 page_off
= offset
& pagemask
;
9585 memcpy(tmp
+ page_off
, buf
, size
);
9587 offset
= offset
+ (pagesize
- page_off
);
9589 tg3_enable_nvram_access(tp
);
9592 * Before we can erase the flash page, we need
9593 * to issue a special "write enable" command.
9595 nvram_cmd
= NVRAM_CMD_WREN
| NVRAM_CMD_GO
| NVRAM_CMD_DONE
;
9597 if (tg3_nvram_exec_cmd(tp
, nvram_cmd
))
9600 /* Erase the target page */
9601 tw32(NVRAM_ADDR
, phy_addr
);
9603 nvram_cmd
= NVRAM_CMD_GO
| NVRAM_CMD_DONE
| NVRAM_CMD_WR
|
9604 NVRAM_CMD_FIRST
| NVRAM_CMD_LAST
| NVRAM_CMD_ERASE
;
9606 if (tg3_nvram_exec_cmd(tp
, nvram_cmd
))
9609 /* Issue another write enable to start the write. */
9610 nvram_cmd
= NVRAM_CMD_WREN
| NVRAM_CMD_GO
| NVRAM_CMD_DONE
;
9612 if (tg3_nvram_exec_cmd(tp
, nvram_cmd
))
9615 for (j
= 0; j
< pagesize
; j
+= 4) {
9618 data
= *((u32
*) (tmp
+ j
));
9619 tw32(NVRAM_WRDATA
, cpu_to_be32(data
));
9621 tw32(NVRAM_ADDR
, phy_addr
+ j
);
9623 nvram_cmd
= NVRAM_CMD_GO
| NVRAM_CMD_DONE
|
9627 nvram_cmd
|= NVRAM_CMD_FIRST
;
9628 else if (j
== (pagesize
- 4))
9629 nvram_cmd
|= NVRAM_CMD_LAST
;
9631 if ((ret
= tg3_nvram_exec_cmd(tp
, nvram_cmd
)))
9638 nvram_cmd
= NVRAM_CMD_WRDI
| NVRAM_CMD_GO
| NVRAM_CMD_DONE
;
9639 tg3_nvram_exec_cmd(tp
, nvram_cmd
);
9646 /* offset and length are dword aligned */
9647 static int tg3_nvram_write_block_buffered(struct tg3
*tp
, u32 offset
, u32 len
,
9652 for (i
= 0; i
< len
; i
+= 4, offset
+= 4) {
9653 u32 data
, page_off
, phy_addr
, nvram_cmd
;
9655 memcpy(&data
, buf
+ i
, 4);
9656 tw32(NVRAM_WRDATA
, cpu_to_be32(data
));
9658 page_off
= offset
% tp
->nvram_pagesize
;
9660 phy_addr
= tg3_nvram_phys_addr(tp
, offset
);
9662 tw32(NVRAM_ADDR
, phy_addr
);
9664 nvram_cmd
= NVRAM_CMD_GO
| NVRAM_CMD_DONE
| NVRAM_CMD_WR
;
9666 if ((page_off
== 0) || (i
== 0))
9667 nvram_cmd
|= NVRAM_CMD_FIRST
;
9668 if (page_off
== (tp
->nvram_pagesize
- 4))
9669 nvram_cmd
|= NVRAM_CMD_LAST
;
9672 nvram_cmd
|= NVRAM_CMD_LAST
;
9674 if ((GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5752
) &&
9675 (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5755
) &&
9676 (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5787
) &&
9677 (tp
->nvram_jedecnum
== JEDEC_ST
) &&
9678 (nvram_cmd
& NVRAM_CMD_FIRST
)) {
9680 if ((ret
= tg3_nvram_exec_cmd(tp
,
9681 NVRAM_CMD_WREN
| NVRAM_CMD_GO
|
9686 if (!(tp
->tg3_flags2
& TG3_FLG2_FLASH
)) {
9687 /* We always do complete word writes to eeprom. */
9688 nvram_cmd
|= (NVRAM_CMD_FIRST
| NVRAM_CMD_LAST
);
9691 if ((ret
= tg3_nvram_exec_cmd(tp
, nvram_cmd
)))
9697 /* offset and length are dword aligned */
9698 static int tg3_nvram_write_block(struct tg3
*tp
, u32 offset
, u32 len
, u8
*buf
)
9702 if (tp
->tg3_flags
& TG3_FLAG_EEPROM_WRITE_PROT
) {
9703 tw32_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
&
9704 ~GRC_LCLCTRL_GPIO_OUTPUT1
);
9708 if (!(tp
->tg3_flags
& TG3_FLAG_NVRAM
)) {
9709 ret
= tg3_nvram_write_block_using_eeprom(tp
, offset
, len
, buf
);
9714 ret
= tg3_nvram_lock(tp
);
9718 tg3_enable_nvram_access(tp
);
9719 if ((tp
->tg3_flags2
& TG3_FLG2_5750_PLUS
) &&
9720 !(tp
->tg3_flags2
& TG3_FLG2_PROTECTED_NVRAM
))
9721 tw32(NVRAM_WRITE1
, 0x406);
9723 grc_mode
= tr32(GRC_MODE
);
9724 tw32(GRC_MODE
, grc_mode
| GRC_MODE_NVRAM_WR_ENABLE
);
9726 if ((tp
->tg3_flags
& TG3_FLAG_NVRAM_BUFFERED
) ||
9727 !(tp
->tg3_flags2
& TG3_FLG2_FLASH
)) {
9729 ret
= tg3_nvram_write_block_buffered(tp
, offset
, len
,
9733 ret
= tg3_nvram_write_block_unbuffered(tp
, offset
, len
,
9737 grc_mode
= tr32(GRC_MODE
);
9738 tw32(GRC_MODE
, grc_mode
& ~GRC_MODE_NVRAM_WR_ENABLE
);
9740 tg3_disable_nvram_access(tp
);
9741 tg3_nvram_unlock(tp
);
9744 if (tp
->tg3_flags
& TG3_FLAG_EEPROM_WRITE_PROT
) {
9745 tw32_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
);
9752 struct subsys_tbl_ent
{
9753 u16 subsys_vendor
, subsys_devid
;
9757 static struct subsys_tbl_ent subsys_id_to_phy_id
[] = {
9758 /* Broadcom boards. */
9759 { PCI_VENDOR_ID_BROADCOM
, 0x1644, PHY_ID_BCM5401
}, /* BCM95700A6 */
9760 { PCI_VENDOR_ID_BROADCOM
, 0x0001, PHY_ID_BCM5701
}, /* BCM95701A5 */
9761 { PCI_VENDOR_ID_BROADCOM
, 0x0002, PHY_ID_BCM8002
}, /* BCM95700T6 */
9762 { PCI_VENDOR_ID_BROADCOM
, 0x0003, 0 }, /* BCM95700A9 */
9763 { PCI_VENDOR_ID_BROADCOM
, 0x0005, PHY_ID_BCM5701
}, /* BCM95701T1 */
9764 { PCI_VENDOR_ID_BROADCOM
, 0x0006, PHY_ID_BCM5701
}, /* BCM95701T8 */
9765 { PCI_VENDOR_ID_BROADCOM
, 0x0007, 0 }, /* BCM95701A7 */
9766 { PCI_VENDOR_ID_BROADCOM
, 0x0008, PHY_ID_BCM5701
}, /* BCM95701A10 */
9767 { PCI_VENDOR_ID_BROADCOM
, 0x8008, PHY_ID_BCM5701
}, /* BCM95701A12 */
9768 { PCI_VENDOR_ID_BROADCOM
, 0x0009, PHY_ID_BCM5703
}, /* BCM95703Ax1 */
9769 { PCI_VENDOR_ID_BROADCOM
, 0x8009, PHY_ID_BCM5703
}, /* BCM95703Ax2 */
9772 { PCI_VENDOR_ID_3COM
, 0x1000, PHY_ID_BCM5401
}, /* 3C996T */
9773 { PCI_VENDOR_ID_3COM
, 0x1006, PHY_ID_BCM5701
}, /* 3C996BT */
9774 { PCI_VENDOR_ID_3COM
, 0x1004, 0 }, /* 3C996SX */
9775 { PCI_VENDOR_ID_3COM
, 0x1007, PHY_ID_BCM5701
}, /* 3C1000T */
9776 { PCI_VENDOR_ID_3COM
, 0x1008, PHY_ID_BCM5701
}, /* 3C940BR01 */
9779 { PCI_VENDOR_ID_DELL
, 0x00d1, PHY_ID_BCM5401
}, /* VIPER */
9780 { PCI_VENDOR_ID_DELL
, 0x0106, PHY_ID_BCM5401
}, /* JAGUAR */
9781 { PCI_VENDOR_ID_DELL
, 0x0109, PHY_ID_BCM5411
}, /* MERLOT */
9782 { PCI_VENDOR_ID_DELL
, 0x010a, PHY_ID_BCM5411
}, /* SLIM_MERLOT */
9784 /* Compaq boards. */
9785 { PCI_VENDOR_ID_COMPAQ
, 0x007c, PHY_ID_BCM5701
}, /* BANSHEE */
9786 { PCI_VENDOR_ID_COMPAQ
, 0x009a, PHY_ID_BCM5701
}, /* BANSHEE_2 */
9787 { PCI_VENDOR_ID_COMPAQ
, 0x007d, 0 }, /* CHANGELING */
9788 { PCI_VENDOR_ID_COMPAQ
, 0x0085, PHY_ID_BCM5701
}, /* NC7780 */
9789 { PCI_VENDOR_ID_COMPAQ
, 0x0099, PHY_ID_BCM5701
}, /* NC7780_2 */
9792 { PCI_VENDOR_ID_IBM
, 0x0281, 0 } /* IBM??? */
9795 static inline struct subsys_tbl_ent
*lookup_by_subsys(struct tg3
*tp
)
9799 for (i
= 0; i
< ARRAY_SIZE(subsys_id_to_phy_id
); i
++) {
9800 if ((subsys_id_to_phy_id
[i
].subsys_vendor
==
9801 tp
->pdev
->subsystem_vendor
) &&
9802 (subsys_id_to_phy_id
[i
].subsys_devid
==
9803 tp
->pdev
->subsystem_device
))
9804 return &subsys_id_to_phy_id
[i
];
9809 static void __devinit
tg3_get_eeprom_hw_cfg(struct tg3
*tp
)
9814 /* On some early chips the SRAM cannot be accessed in D3hot state,
9815 * so need make sure we're in D0.
9817 pci_read_config_word(tp
->pdev
, tp
->pm_cap
+ PCI_PM_CTRL
, &pmcsr
);
9818 pmcsr
&= ~PCI_PM_CTRL_STATE_MASK
;
9819 pci_write_config_word(tp
->pdev
, tp
->pm_cap
+ PCI_PM_CTRL
, pmcsr
);
9822 /* Make sure register accesses (indirect or otherwise)
9823 * will function correctly.
9825 pci_write_config_dword(tp
->pdev
, TG3PCI_MISC_HOST_CTRL
,
9826 tp
->misc_host_ctrl
);
9828 /* The memory arbiter has to be enabled in order for SRAM accesses
9829 * to succeed. Normally on powerup the tg3 chip firmware will make
9830 * sure it is enabled, but other entities such as system netboot
9831 * code might disable it.
9833 val
= tr32(MEMARB_MODE
);
9834 tw32(MEMARB_MODE
, val
| MEMARB_MODE_ENABLE
);
9836 tp
->phy_id
= PHY_ID_INVALID
;
9837 tp
->led_ctrl
= LED_CTRL_MODE_PHY_1
;
9839 /* Assume an onboard device by default. */
9840 tp
->tg3_flags
|= TG3_FLAG_EEPROM_WRITE_PROT
;
9842 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
) {
9843 if (!(tr32(PCIE_TRANSACTION_CFG
) & PCIE_TRANS_CFG_LOM
))
9844 tp
->tg3_flags
&= ~TG3_FLAG_EEPROM_WRITE_PROT
;
9848 tg3_read_mem(tp
, NIC_SRAM_DATA_SIG
, &val
);
9849 if (val
== NIC_SRAM_DATA_SIG_MAGIC
) {
9850 u32 nic_cfg
, led_cfg
;
9851 u32 nic_phy_id
, ver
, cfg2
= 0, eeprom_phy_id
;
9852 int eeprom_phy_serdes
= 0;
9854 tg3_read_mem(tp
, NIC_SRAM_DATA_CFG
, &nic_cfg
);
9855 tp
->nic_sram_data_cfg
= nic_cfg
;
9857 tg3_read_mem(tp
, NIC_SRAM_DATA_VER
, &ver
);
9858 ver
>>= NIC_SRAM_DATA_VER_SHIFT
;
9859 if ((GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5700
) &&
9860 (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5701
) &&
9861 (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5703
) &&
9862 (ver
> 0) && (ver
< 0x100))
9863 tg3_read_mem(tp
, NIC_SRAM_DATA_CFG_2
, &cfg2
);
9865 if ((nic_cfg
& NIC_SRAM_DATA_CFG_PHY_TYPE_MASK
) ==
9866 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER
)
9867 eeprom_phy_serdes
= 1;
9869 tg3_read_mem(tp
, NIC_SRAM_DATA_PHY_ID
, &nic_phy_id
);
9870 if (nic_phy_id
!= 0) {
9871 u32 id1
= nic_phy_id
& NIC_SRAM_DATA_PHY_ID1_MASK
;
9872 u32 id2
= nic_phy_id
& NIC_SRAM_DATA_PHY_ID2_MASK
;
9874 eeprom_phy_id
= (id1
>> 16) << 10;
9875 eeprom_phy_id
|= (id2
& 0xfc00) << 16;
9876 eeprom_phy_id
|= (id2
& 0x03ff) << 0;
9880 tp
->phy_id
= eeprom_phy_id
;
9881 if (eeprom_phy_serdes
) {
9882 if (tp
->tg3_flags2
& TG3_FLG2_5780_CLASS
)
9883 tp
->tg3_flags2
|= TG3_FLG2_MII_SERDES
;
9885 tp
->tg3_flags2
|= TG3_FLG2_PHY_SERDES
;
9888 if (tp
->tg3_flags2
& TG3_FLG2_5750_PLUS
)
9889 led_cfg
= cfg2
& (NIC_SRAM_DATA_CFG_LED_MODE_MASK
|
9890 SHASTA_EXT_LED_MODE_MASK
);
9892 led_cfg
= nic_cfg
& NIC_SRAM_DATA_CFG_LED_MODE_MASK
;
9896 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1
:
9897 tp
->led_ctrl
= LED_CTRL_MODE_PHY_1
;
9900 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2
:
9901 tp
->led_ctrl
= LED_CTRL_MODE_PHY_2
;
9904 case NIC_SRAM_DATA_CFG_LED_MODE_MAC
:
9905 tp
->led_ctrl
= LED_CTRL_MODE_MAC
;
9907 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
9908 * read on some older 5700/5701 bootcode.
9910 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) ==
9912 GET_ASIC_REV(tp
->pci_chip_rev_id
) ==
9914 tp
->led_ctrl
= LED_CTRL_MODE_PHY_1
;
9918 case SHASTA_EXT_LED_SHARED
:
9919 tp
->led_ctrl
= LED_CTRL_MODE_SHARED
;
9920 if (tp
->pci_chip_rev_id
!= CHIPREV_ID_5750_A0
&&
9921 tp
->pci_chip_rev_id
!= CHIPREV_ID_5750_A1
)
9922 tp
->led_ctrl
|= (LED_CTRL_MODE_PHY_1
|
9923 LED_CTRL_MODE_PHY_2
);
9926 case SHASTA_EXT_LED_MAC
:
9927 tp
->led_ctrl
= LED_CTRL_MODE_SHASTA_MAC
;
9930 case SHASTA_EXT_LED_COMBO
:
9931 tp
->led_ctrl
= LED_CTRL_MODE_COMBO
;
9932 if (tp
->pci_chip_rev_id
!= CHIPREV_ID_5750_A0
)
9933 tp
->led_ctrl
|= (LED_CTRL_MODE_PHY_1
|
9934 LED_CTRL_MODE_PHY_2
);
9939 if ((GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
9940 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
) &&
9941 tp
->pdev
->subsystem_vendor
== PCI_VENDOR_ID_DELL
)
9942 tp
->led_ctrl
= LED_CTRL_MODE_PHY_2
;
9944 if (nic_cfg
& NIC_SRAM_DATA_CFG_EEPROM_WP
)
9945 tp
->tg3_flags
|= TG3_FLAG_EEPROM_WRITE_PROT
;
9947 tp
->tg3_flags
&= ~TG3_FLAG_EEPROM_WRITE_PROT
;
9949 if (nic_cfg
& NIC_SRAM_DATA_CFG_ASF_ENABLE
) {
9950 tp
->tg3_flags
|= TG3_FLAG_ENABLE_ASF
;
9951 if (tp
->tg3_flags2
& TG3_FLG2_5750_PLUS
)
9952 tp
->tg3_flags2
|= TG3_FLG2_ASF_NEW_HANDSHAKE
;
9954 if (nic_cfg
& NIC_SRAM_DATA_CFG_FIBER_WOL
)
9955 tp
->tg3_flags
|= TG3_FLAG_SERDES_WOL_CAP
;
9957 if (cfg2
& (1 << 17))
9958 tp
->tg3_flags2
|= TG3_FLG2_CAPACITIVE_COUPLING
;
9960 /* serdes signal pre-emphasis in register 0x590 set by */
9961 /* bootcode if bit 18 is set */
9962 if (cfg2
& (1 << 18))
9963 tp
->tg3_flags2
|= TG3_FLG2_SERDES_PREEMPHASIS
;
9967 static int __devinit
tg3_phy_probe(struct tg3
*tp
)
9969 u32 hw_phy_id_1
, hw_phy_id_2
;
9970 u32 hw_phy_id
, hw_phy_id_masked
;
9973 /* Reading the PHY ID register can conflict with ASF
9974 * firwmare access to the PHY hardware.
9977 if (tp
->tg3_flags
& TG3_FLAG_ENABLE_ASF
) {
9978 hw_phy_id
= hw_phy_id_masked
= PHY_ID_INVALID
;
9980 /* Now read the physical PHY_ID from the chip and verify
9981 * that it is sane. If it doesn't look good, we fall back
9982 * to either the hard-coded table based PHY_ID and failing
9983 * that the value found in the eeprom area.
9985 err
|= tg3_readphy(tp
, MII_PHYSID1
, &hw_phy_id_1
);
9986 err
|= tg3_readphy(tp
, MII_PHYSID2
, &hw_phy_id_2
);
9988 hw_phy_id
= (hw_phy_id_1
& 0xffff) << 10;
9989 hw_phy_id
|= (hw_phy_id_2
& 0xfc00) << 16;
9990 hw_phy_id
|= (hw_phy_id_2
& 0x03ff) << 0;
9992 hw_phy_id_masked
= hw_phy_id
& PHY_ID_MASK
;
9995 if (!err
&& KNOWN_PHY_ID(hw_phy_id_masked
)) {
9996 tp
->phy_id
= hw_phy_id
;
9997 if (hw_phy_id_masked
== PHY_ID_BCM8002
)
9998 tp
->tg3_flags2
|= TG3_FLG2_PHY_SERDES
;
10000 tp
->tg3_flags2
&= ~TG3_FLG2_PHY_SERDES
;
10002 if (tp
->phy_id
!= PHY_ID_INVALID
) {
10003 /* Do nothing, phy ID already set up in
10004 * tg3_get_eeprom_hw_cfg().
10007 struct subsys_tbl_ent
*p
;
10009 /* No eeprom signature? Try the hardcoded
10010 * subsys device table.
10012 p
= lookup_by_subsys(tp
);
10016 tp
->phy_id
= p
->phy_id
;
10018 tp
->phy_id
== PHY_ID_BCM8002
)
10019 tp
->tg3_flags2
|= TG3_FLG2_PHY_SERDES
;
10023 if (!(tp
->tg3_flags2
& TG3_FLG2_ANY_SERDES
) &&
10024 !(tp
->tg3_flags
& TG3_FLAG_ENABLE_ASF
)) {
10025 u32 bmsr
, adv_reg
, tg3_ctrl
;
10027 tg3_readphy(tp
, MII_BMSR
, &bmsr
);
10028 if (!tg3_readphy(tp
, MII_BMSR
, &bmsr
) &&
10029 (bmsr
& BMSR_LSTATUS
))
10030 goto skip_phy_reset
;
10032 err
= tg3_phy_reset(tp
);
10036 adv_reg
= (ADVERTISE_10HALF
| ADVERTISE_10FULL
|
10037 ADVERTISE_100HALF
| ADVERTISE_100FULL
|
10038 ADVERTISE_CSMA
| ADVERTISE_PAUSE_CAP
);
10040 if (!(tp
->tg3_flags
& TG3_FLAG_10_100_ONLY
)) {
10041 tg3_ctrl
= (MII_TG3_CTRL_ADV_1000_HALF
|
10042 MII_TG3_CTRL_ADV_1000_FULL
);
10043 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5701_A0
||
10044 tp
->pci_chip_rev_id
== CHIPREV_ID_5701_B0
)
10045 tg3_ctrl
|= (MII_TG3_CTRL_AS_MASTER
|
10046 MII_TG3_CTRL_ENABLE_AS_MASTER
);
10049 if (!tg3_copper_is_advertising_all(tp
)) {
10050 tg3_writephy(tp
, MII_ADVERTISE
, adv_reg
);
10052 if (!(tp
->tg3_flags
& TG3_FLAG_10_100_ONLY
))
10053 tg3_writephy(tp
, MII_TG3_CTRL
, tg3_ctrl
);
10055 tg3_writephy(tp
, MII_BMCR
,
10056 BMCR_ANENABLE
| BMCR_ANRESTART
);
10058 tg3_phy_set_wirespeed(tp
);
10060 tg3_writephy(tp
, MII_ADVERTISE
, adv_reg
);
10061 if (!(tp
->tg3_flags
& TG3_FLAG_10_100_ONLY
))
10062 tg3_writephy(tp
, MII_TG3_CTRL
, tg3_ctrl
);
10066 if ((tp
->phy_id
& PHY_ID_MASK
) == PHY_ID_BCM5401
) {
10067 err
= tg3_init_5401phy_dsp(tp
);
10072 if (!err
&& ((tp
->phy_id
& PHY_ID_MASK
) == PHY_ID_BCM5401
)) {
10073 err
= tg3_init_5401phy_dsp(tp
);
10076 if (tp
->tg3_flags2
& TG3_FLG2_ANY_SERDES
)
10077 tp
->link_config
.advertising
=
10078 (ADVERTISED_1000baseT_Half
|
10079 ADVERTISED_1000baseT_Full
|
10080 ADVERTISED_Autoneg
|
10082 if (tp
->tg3_flags
& TG3_FLAG_10_100_ONLY
)
10083 tp
->link_config
.advertising
&=
10084 ~(ADVERTISED_1000baseT_Half
|
10085 ADVERTISED_1000baseT_Full
);
10090 static void __devinit
tg3_read_partno(struct tg3
*tp
)
10092 unsigned char vpd_data
[256];
10096 if (tg3_nvram_read_swab(tp
, 0x0, &magic
))
10097 goto out_not_found
;
10099 if (magic
== TG3_EEPROM_MAGIC
) {
10100 for (i
= 0; i
< 256; i
+= 4) {
10103 if (tg3_nvram_read(tp
, 0x100 + i
, &tmp
))
10104 goto out_not_found
;
10106 vpd_data
[i
+ 0] = ((tmp
>> 0) & 0xff);
10107 vpd_data
[i
+ 1] = ((tmp
>> 8) & 0xff);
10108 vpd_data
[i
+ 2] = ((tmp
>> 16) & 0xff);
10109 vpd_data
[i
+ 3] = ((tmp
>> 24) & 0xff);
10114 vpd_cap
= pci_find_capability(tp
->pdev
, PCI_CAP_ID_VPD
);
10115 for (i
= 0; i
< 256; i
+= 4) {
10119 pci_write_config_word(tp
->pdev
, vpd_cap
+ PCI_VPD_ADDR
,
10121 while (j
++ < 100) {
10122 pci_read_config_word(tp
->pdev
, vpd_cap
+
10123 PCI_VPD_ADDR
, &tmp16
);
10124 if (tmp16
& 0x8000)
10128 if (!(tmp16
& 0x8000))
10129 goto out_not_found
;
10131 pci_read_config_dword(tp
->pdev
, vpd_cap
+ PCI_VPD_DATA
,
10133 tmp
= cpu_to_le32(tmp
);
10134 memcpy(&vpd_data
[i
], &tmp
, 4);
10138 /* Now parse and find the part number. */
10139 for (i
= 0; i
< 256; ) {
10140 unsigned char val
= vpd_data
[i
];
10143 if (val
== 0x82 || val
== 0x91) {
10146 (vpd_data
[i
+ 2] << 8)));
10151 goto out_not_found
;
10153 block_end
= (i
+ 3 +
10155 (vpd_data
[i
+ 2] << 8)));
10157 while (i
< block_end
) {
10158 if (vpd_data
[i
+ 0] == 'P' &&
10159 vpd_data
[i
+ 1] == 'N') {
10160 int partno_len
= vpd_data
[i
+ 2];
10162 if (partno_len
> 24)
10163 goto out_not_found
;
10165 memcpy(tp
->board_part_number
,
10174 /* Part number not found. */
10175 goto out_not_found
;
10179 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
)
10180 strcpy(tp
->board_part_number
, "BCM95906");
10182 strcpy(tp
->board_part_number
, "none");
10185 static void __devinit
tg3_read_fw_ver(struct tg3
*tp
)
10187 u32 val
, offset
, start
;
10189 if (tg3_nvram_read_swab(tp
, 0, &val
))
10192 if (val
!= TG3_EEPROM_MAGIC
)
10195 if (tg3_nvram_read_swab(tp
, 0xc, &offset
) ||
10196 tg3_nvram_read_swab(tp
, 0x4, &start
))
10199 offset
= tg3_nvram_logical_addr(tp
, offset
);
10200 if (tg3_nvram_read_swab(tp
, offset
, &val
))
10203 if ((val
& 0xfc000000) == 0x0c000000) {
10204 u32 ver_offset
, addr
;
10207 if (tg3_nvram_read_swab(tp
, offset
+ 4, &val
) ||
10208 tg3_nvram_read_swab(tp
, offset
+ 8, &ver_offset
))
10214 addr
= offset
+ ver_offset
- start
;
10215 for (i
= 0; i
< 16; i
+= 4) {
10216 if (tg3_nvram_read(tp
, addr
+ i
, &val
))
10219 val
= cpu_to_le32(val
);
10220 memcpy(tp
->fw_ver
+ i
, &val
, 4);
10225 static int __devinit
tg3_get_invariants(struct tg3
*tp
)
10227 static struct pci_device_id write_reorder_chipsets
[] = {
10228 { PCI_DEVICE(PCI_VENDOR_ID_AMD
,
10229 PCI_DEVICE_ID_AMD_FE_GATE_700C
) },
10230 { PCI_DEVICE(PCI_VENDOR_ID_AMD
,
10231 PCI_DEVICE_ID_AMD_8131_BRIDGE
) },
10232 { PCI_DEVICE(PCI_VENDOR_ID_VIA
,
10233 PCI_DEVICE_ID_VIA_8385_0
) },
10237 u32 cacheline_sz_reg
;
10238 u32 pci_state_reg
, grc_misc_cfg
;
10243 /* Force memory write invalidate off. If we leave it on,
10244 * then on 5700_BX chips we have to enable a workaround.
10245 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
10246 * to match the cacheline size. The Broadcom driver have this
10247 * workaround but turns MWI off all the times so never uses
10248 * it. This seems to suggest that the workaround is insufficient.
10250 pci_read_config_word(tp
->pdev
, PCI_COMMAND
, &pci_cmd
);
10251 pci_cmd
&= ~PCI_COMMAND_INVALIDATE
;
10252 pci_write_config_word(tp
->pdev
, PCI_COMMAND
, pci_cmd
);
10254 /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
10255 * has the register indirect write enable bit set before
10256 * we try to access any of the MMIO registers. It is also
10257 * critical that the PCI-X hw workaround situation is decided
10258 * before that as well.
10260 pci_read_config_dword(tp
->pdev
, TG3PCI_MISC_HOST_CTRL
,
10263 tp
->pci_chip_rev_id
= (misc_ctrl_reg
>>
10264 MISC_HOST_CTRL_CHIPREV_SHIFT
);
10266 /* Wrong chip ID in 5752 A0. This code can be removed later
10267 * as A0 is not in production.
10269 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5752_A0_HW
)
10270 tp
->pci_chip_rev_id
= CHIPREV_ID_5752_A0
;
10272 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
10273 * we need to disable memory and use config. cycles
10274 * only to access all registers. The 5702/03 chips
10275 * can mistakenly decode the special cycles from the
10276 * ICH chipsets as memory write cycles, causing corruption
10277 * of register and memory space. Only certain ICH bridges
10278 * will drive special cycles with non-zero data during the
10279 * address phase which can fall within the 5703's address
10280 * range. This is not an ICH bug as the PCI spec allows
10281 * non-zero address during special cycles. However, only
10282 * these ICH bridges are known to drive non-zero addresses
10283 * during special cycles.
10285 * Since special cycles do not cross PCI bridges, we only
10286 * enable this workaround if the 5703 is on the secondary
10287 * bus of these ICH bridges.
10289 if ((tp
->pci_chip_rev_id
== CHIPREV_ID_5703_A1
) ||
10290 (tp
->pci_chip_rev_id
== CHIPREV_ID_5703_A2
)) {
10291 static struct tg3_dev_id
{
10295 } ich_chipsets
[] = {
10296 { PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_82801AA_8
,
10298 { PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_82801AB_8
,
10300 { PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_82801BA_11
,
10302 { PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_82801BA_6
,
10306 struct tg3_dev_id
*pci_id
= &ich_chipsets
[0];
10307 struct pci_dev
*bridge
= NULL
;
10309 while (pci_id
->vendor
!= 0) {
10310 bridge
= pci_get_device(pci_id
->vendor
, pci_id
->device
,
10316 if (pci_id
->rev
!= PCI_ANY_ID
) {
10319 pci_read_config_byte(bridge
, PCI_REVISION_ID
,
10321 if (rev
> pci_id
->rev
)
10324 if (bridge
->subordinate
&&
10325 (bridge
->subordinate
->number
==
10326 tp
->pdev
->bus
->number
)) {
10328 tp
->tg3_flags2
|= TG3_FLG2_ICH_WORKAROUND
;
10329 pci_dev_put(bridge
);
10335 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
10336 * DMA addresses > 40-bit. This bridge may have other additional
10337 * 57xx devices behind it in some 4-port NIC designs for example.
10338 * Any tg3 device found behind the bridge will also need the 40-bit
10341 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5780
||
10342 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5714
) {
10343 tp
->tg3_flags2
|= TG3_FLG2_5780_CLASS
;
10344 tp
->tg3_flags
|= TG3_FLAG_40BIT_DMA_BUG
;
10345 tp
->msi_cap
= pci_find_capability(tp
->pdev
, PCI_CAP_ID_MSI
);
10348 struct pci_dev
*bridge
= NULL
;
10351 bridge
= pci_get_device(PCI_VENDOR_ID_SERVERWORKS
,
10352 PCI_DEVICE_ID_SERVERWORKS_EPB
,
10354 if (bridge
&& bridge
->subordinate
&&
10355 (bridge
->subordinate
->number
<=
10356 tp
->pdev
->bus
->number
) &&
10357 (bridge
->subordinate
->subordinate
>=
10358 tp
->pdev
->bus
->number
)) {
10359 tp
->tg3_flags
|= TG3_FLAG_40BIT_DMA_BUG
;
10360 pci_dev_put(bridge
);
10366 /* Initialize misc host control in PCI block. */
10367 tp
->misc_host_ctrl
|= (misc_ctrl_reg
&
10368 MISC_HOST_CTRL_CHIPREV
);
10369 pci_write_config_dword(tp
->pdev
, TG3PCI_MISC_HOST_CTRL
,
10370 tp
->misc_host_ctrl
);
10372 pci_read_config_dword(tp
->pdev
, TG3PCI_CACHELINESZ
,
10373 &cacheline_sz_reg
);
10375 tp
->pci_cacheline_sz
= (cacheline_sz_reg
>> 0) & 0xff;
10376 tp
->pci_lat_timer
= (cacheline_sz_reg
>> 8) & 0xff;
10377 tp
->pci_hdr_type
= (cacheline_sz_reg
>> 16) & 0xff;
10378 tp
->pci_bist
= (cacheline_sz_reg
>> 24) & 0xff;
10380 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5750
||
10381 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5752
||
10382 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5755
||
10383 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5787
||
10384 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
||
10385 (tp
->tg3_flags2
& TG3_FLG2_5780_CLASS
))
10386 tp
->tg3_flags2
|= TG3_FLG2_5750_PLUS
;
10388 if ((GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
) ||
10389 (tp
->tg3_flags2
& TG3_FLG2_5750_PLUS
))
10390 tp
->tg3_flags2
|= TG3_FLG2_5705_PLUS
;
10392 if (tp
->tg3_flags2
& TG3_FLG2_5750_PLUS
) {
10393 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5755
||
10394 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5787
||
10395 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
) {
10396 tp
->tg3_flags2
|= TG3_FLG2_HW_TSO_2
;
10397 tp
->tg3_flags2
|= TG3_FLG2_1SHOT_MSI
;
10399 tp
->tg3_flags2
|= TG3_FLG2_HW_TSO_1
|
10400 TG3_FLG2_HW_TSO_1_BUG
;
10401 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) ==
10403 tp
->pci_chip_rev_id
>= CHIPREV_ID_5750_C2
)
10404 tp
->tg3_flags2
&= ~TG3_FLG2_HW_TSO_1_BUG
;
10408 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5705
&&
10409 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5750
&&
10410 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5752
&&
10411 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5755
&&
10412 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5787
&&
10413 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5906
)
10414 tp
->tg3_flags2
|= TG3_FLG2_JUMBO_CAPABLE
;
10416 if (pci_find_capability(tp
->pdev
, PCI_CAP_ID_EXP
) != 0)
10417 tp
->tg3_flags2
|= TG3_FLG2_PCI_EXPRESS
;
10419 /* If we have an AMD 762 or VIA K8T800 chipset, write
10420 * reordering to the mailbox registers done by the host
10421 * controller can cause major troubles. We read back from
10422 * every mailbox register write to force the writes to be
10423 * posted to the chip in order.
10425 if (pci_dev_present(write_reorder_chipsets
) &&
10426 !(tp
->tg3_flags2
& TG3_FLG2_PCI_EXPRESS
))
10427 tp
->tg3_flags
|= TG3_FLAG_MBOX_WRITE_REORDER
;
10429 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5703
&&
10430 tp
->pci_lat_timer
< 64) {
10431 tp
->pci_lat_timer
= 64;
10433 cacheline_sz_reg
= ((tp
->pci_cacheline_sz
& 0xff) << 0);
10434 cacheline_sz_reg
|= ((tp
->pci_lat_timer
& 0xff) << 8);
10435 cacheline_sz_reg
|= ((tp
->pci_hdr_type
& 0xff) << 16);
10436 cacheline_sz_reg
|= ((tp
->pci_bist
& 0xff) << 24);
10438 pci_write_config_dword(tp
->pdev
, TG3PCI_CACHELINESZ
,
10442 pci_read_config_dword(tp
->pdev
, TG3PCI_PCISTATE
,
10445 if ((pci_state_reg
& PCISTATE_CONV_PCI_MODE
) == 0) {
10446 tp
->tg3_flags
|= TG3_FLAG_PCIX_MODE
;
10448 /* If this is a 5700 BX chipset, and we are in PCI-X
10449 * mode, enable register write workaround.
10451 * The workaround is to use indirect register accesses
10452 * for all chip writes not to mailbox registers.
10454 if (GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5700_BX
) {
10458 tp
->tg3_flags
|= TG3_FLAG_PCIX_TARGET_HWBUG
;
10460 /* The chip can have it's power management PCI config
10461 * space registers clobbered due to this bug.
10462 * So explicitly force the chip into D0 here.
10464 pci_read_config_dword(tp
->pdev
, TG3PCI_PM_CTRL_STAT
,
10466 pm_reg
&= ~PCI_PM_CTRL_STATE_MASK
;
10467 pm_reg
|= PCI_PM_CTRL_PME_ENABLE
| 0 /* D0 */;
10468 pci_write_config_dword(tp
->pdev
, TG3PCI_PM_CTRL_STAT
,
10471 /* Also, force SERR#/PERR# in PCI command. */
10472 pci_read_config_word(tp
->pdev
, PCI_COMMAND
, &pci_cmd
);
10473 pci_cmd
|= PCI_COMMAND_PARITY
| PCI_COMMAND_SERR
;
10474 pci_write_config_word(tp
->pdev
, PCI_COMMAND
, pci_cmd
);
10478 /* 5700 BX chips need to have their TX producer index mailboxes
10479 * written twice to workaround a bug.
10481 if (GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5700_BX
)
10482 tp
->tg3_flags
|= TG3_FLAG_TXD_MBOX_HWBUG
;
10484 /* Back to back register writes can cause problems on this chip,
10485 * the workaround is to read back all reg writes except those to
10486 * mailbox regs. See tg3_write_indirect_reg32().
10488 * PCI Express 5750_A0 rev chips need this workaround too.
10490 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
||
10491 ((tp
->tg3_flags2
& TG3_FLG2_PCI_EXPRESS
) &&
10492 tp
->pci_chip_rev_id
== CHIPREV_ID_5750_A0
))
10493 tp
->tg3_flags
|= TG3_FLAG_5701_REG_WRITE_BUG
;
10495 if ((pci_state_reg
& PCISTATE_BUS_SPEED_HIGH
) != 0)
10496 tp
->tg3_flags
|= TG3_FLAG_PCI_HIGH_SPEED
;
10497 if ((pci_state_reg
& PCISTATE_BUS_32BIT
) != 0)
10498 tp
->tg3_flags
|= TG3_FLAG_PCI_32BIT
;
10500 /* Chip-specific fixup from Broadcom driver */
10501 if ((tp
->pci_chip_rev_id
== CHIPREV_ID_5704_A0
) &&
10502 (!(pci_state_reg
& PCISTATE_RETRY_SAME_DMA
))) {
10503 pci_state_reg
|= PCISTATE_RETRY_SAME_DMA
;
10504 pci_write_config_dword(tp
->pdev
, TG3PCI_PCISTATE
, pci_state_reg
);
10507 /* Default fast path register access methods */
10508 tp
->read32
= tg3_read32
;
10509 tp
->write32
= tg3_write32
;
10510 tp
->read32_mbox
= tg3_read32
;
10511 tp
->write32_mbox
= tg3_write32
;
10512 tp
->write32_tx_mbox
= tg3_write32
;
10513 tp
->write32_rx_mbox
= tg3_write32
;
10515 /* Various workaround register access methods */
10516 if (tp
->tg3_flags
& TG3_FLAG_PCIX_TARGET_HWBUG
)
10517 tp
->write32
= tg3_write_indirect_reg32
;
10518 else if (tp
->tg3_flags
& TG3_FLAG_5701_REG_WRITE_BUG
)
10519 tp
->write32
= tg3_write_flush_reg32
;
10521 if ((tp
->tg3_flags
& TG3_FLAG_TXD_MBOX_HWBUG
) ||
10522 (tp
->tg3_flags
& TG3_FLAG_MBOX_WRITE_REORDER
)) {
10523 tp
->write32_tx_mbox
= tg3_write32_tx_mbox
;
10524 if (tp
->tg3_flags
& TG3_FLAG_MBOX_WRITE_REORDER
)
10525 tp
->write32_rx_mbox
= tg3_write_flush_reg32
;
10528 if (tp
->tg3_flags2
& TG3_FLG2_ICH_WORKAROUND
) {
10529 tp
->read32
= tg3_read_indirect_reg32
;
10530 tp
->write32
= tg3_write_indirect_reg32
;
10531 tp
->read32_mbox
= tg3_read_indirect_mbox
;
10532 tp
->write32_mbox
= tg3_write_indirect_mbox
;
10533 tp
->write32_tx_mbox
= tg3_write_indirect_mbox
;
10534 tp
->write32_rx_mbox
= tg3_write_indirect_mbox
;
10539 pci_read_config_word(tp
->pdev
, PCI_COMMAND
, &pci_cmd
);
10540 pci_cmd
&= ~PCI_COMMAND_MEMORY
;
10541 pci_write_config_word(tp
->pdev
, PCI_COMMAND
, pci_cmd
);
10543 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
) {
10544 tp
->read32_mbox
= tg3_read32_mbox_5906
;
10545 tp
->write32_mbox
= tg3_write32_mbox_5906
;
10546 tp
->write32_tx_mbox
= tg3_write32_mbox_5906
;
10547 tp
->write32_rx_mbox
= tg3_write32_mbox_5906
;
10550 if (tp
->write32
== tg3_write_indirect_reg32
||
10551 ((tp
->tg3_flags
& TG3_FLAG_PCIX_MODE
) &&
10552 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
10553 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
)))
10554 tp
->tg3_flags
|= TG3_FLAG_SRAM_USE_CONFIG
;
10556 /* Get eeprom hw config before calling tg3_set_power_state().
10557 * In particular, the TG3_FLAG_EEPROM_WRITE_PROT flag must be
10558 * determined before calling tg3_set_power_state() so that
10559 * we know whether or not to switch out of Vaux power.
10560 * When the flag is set, it means that GPIO1 is used for eeprom
10561 * write protect and also implies that it is a LOM where GPIOs
10562 * are not used to switch power.
10564 tg3_get_eeprom_hw_cfg(tp
);
10566 /* Set up tp->grc_local_ctrl before calling tg3_set_power_state().
10567 * GPIO1 driven high will bring 5700's external PHY out of reset.
10568 * It is also used as eeprom write protect on LOMs.
10570 tp
->grc_local_ctrl
= GRC_LCLCTRL_INT_ON_ATTN
| GRC_LCLCTRL_AUTO_SEEPROM
;
10571 if ((GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
) ||
10572 (tp
->tg3_flags
& TG3_FLAG_EEPROM_WRITE_PROT
))
10573 tp
->grc_local_ctrl
|= (GRC_LCLCTRL_GPIO_OE1
|
10574 GRC_LCLCTRL_GPIO_OUTPUT1
);
10575 /* Unused GPIO3 must be driven as output on 5752 because there
10576 * are no pull-up resistors on unused GPIO pins.
10578 else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5752
)
10579 tp
->grc_local_ctrl
|= GRC_LCLCTRL_GPIO_OE3
;
10581 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5755
)
10582 tp
->grc_local_ctrl
|= GRC_LCLCTRL_GPIO_UART_SEL
;
10584 /* Force the chip into D0. */
10585 err
= tg3_set_power_state(tp
, PCI_D0
);
10587 printk(KERN_ERR PFX
"(%s) transition to D0 failed\n",
10588 pci_name(tp
->pdev
));
10592 /* 5700 B0 chips do not support checksumming correctly due
10593 * to hardware bugs.
10595 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5700_B0
)
10596 tp
->tg3_flags
|= TG3_FLAG_BROKEN_CHECKSUMS
;
10598 /* Derive initial jumbo mode from MTU assigned in
10599 * ether_setup() via the alloc_etherdev() call
10601 if (tp
->dev
->mtu
> ETH_DATA_LEN
&&
10602 !(tp
->tg3_flags2
& TG3_FLG2_5780_CLASS
))
10603 tp
->tg3_flags
|= TG3_FLAG_JUMBO_RING_ENABLE
;
10605 /* Determine WakeOnLan speed to use. */
10606 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
10607 tp
->pci_chip_rev_id
== CHIPREV_ID_5701_A0
||
10608 tp
->pci_chip_rev_id
== CHIPREV_ID_5701_B0
||
10609 tp
->pci_chip_rev_id
== CHIPREV_ID_5701_B2
) {
10610 tp
->tg3_flags
&= ~(TG3_FLAG_WOL_SPEED_100MB
);
10612 tp
->tg3_flags
|= TG3_FLAG_WOL_SPEED_100MB
;
10615 /* A few boards don't want Ethernet@WireSpeed phy feature */
10616 if ((GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
) ||
10617 ((GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
) &&
10618 (tp
->pci_chip_rev_id
!= CHIPREV_ID_5705_A0
) &&
10619 (tp
->pci_chip_rev_id
!= CHIPREV_ID_5705_A1
)) ||
10620 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
) ||
10621 (tp
->tg3_flags2
& TG3_FLG2_ANY_SERDES
))
10622 tp
->tg3_flags2
|= TG3_FLG2_NO_ETH_WIRE_SPEED
;
10624 if (GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5703_AX
||
10625 GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5704_AX
)
10626 tp
->tg3_flags2
|= TG3_FLG2_PHY_ADC_BUG
;
10627 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5704_A0
)
10628 tp
->tg3_flags2
|= TG3_FLG2_PHY_5704_A0_BUG
;
10630 if (tp
->tg3_flags2
& TG3_FLG2_5705_PLUS
) {
10631 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5755
||
10632 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5787
)
10633 tp
->tg3_flags2
|= TG3_FLG2_PHY_JITTER_BUG
;
10634 else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5906
)
10635 tp
->tg3_flags2
|= TG3_FLG2_PHY_BER_BUG
;
10638 tp
->coalesce_mode
= 0;
10639 if (GET_CHIP_REV(tp
->pci_chip_rev_id
) != CHIPREV_5700_AX
&&
10640 GET_CHIP_REV(tp
->pci_chip_rev_id
) != CHIPREV_5700_BX
)
10641 tp
->coalesce_mode
|= HOSTCC_MODE_32BYTE
;
10643 /* Initialize MAC MI mode, polling disabled. */
10644 tw32_f(MAC_MI_MODE
, tp
->mi_mode
);
10647 /* Initialize data/descriptor byte/word swapping. */
10648 val
= tr32(GRC_MODE
);
10649 val
&= GRC_MODE_HOST_STACKUP
;
10650 tw32(GRC_MODE
, val
| tp
->grc_mode
);
10652 tg3_switch_clocks(tp
);
10654 /* Clear this out for sanity. */
10655 tw32(TG3PCI_MEM_WIN_BASE_ADDR
, 0);
10657 pci_read_config_dword(tp
->pdev
, TG3PCI_PCISTATE
,
10659 if ((pci_state_reg
& PCISTATE_CONV_PCI_MODE
) == 0 &&
10660 (tp
->tg3_flags
& TG3_FLAG_PCIX_TARGET_HWBUG
) == 0) {
10661 u32 chiprevid
= GET_CHIP_REV_ID(tp
->misc_host_ctrl
);
10663 if (chiprevid
== CHIPREV_ID_5701_A0
||
10664 chiprevid
== CHIPREV_ID_5701_B0
||
10665 chiprevid
== CHIPREV_ID_5701_B2
||
10666 chiprevid
== CHIPREV_ID_5701_B5
) {
10667 void __iomem
*sram_base
;
10669 /* Write some dummy words into the SRAM status block
10670 * area, see if it reads back correctly. If the return
10671 * value is bad, force enable the PCIX workaround.
10673 sram_base
= tp
->regs
+ NIC_SRAM_WIN_BASE
+ NIC_SRAM_STATS_BLK
;
10675 writel(0x00000000, sram_base
);
10676 writel(0x00000000, sram_base
+ 4);
10677 writel(0xffffffff, sram_base
+ 4);
10678 if (readl(sram_base
) != 0x00000000)
10679 tp
->tg3_flags
|= TG3_FLAG_PCIX_TARGET_HWBUG
;
10684 tg3_nvram_init(tp
);
10686 grc_misc_cfg
= tr32(GRC_MISC_CFG
);
10687 grc_misc_cfg
&= GRC_MISC_CFG_BOARD_ID_MASK
;
10689 /* Broadcom's driver says that CIOBE multisplit has a bug */
10691 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
&&
10692 grc_misc_cfg
== GRC_MISC_CFG_BOARD_ID_5704CIOBE
) {
10693 tp
->tg3_flags
|= TG3_FLAG_SPLIT_MODE
;
10694 tp
->split_mode_max_reqs
= SPLIT_MODE_5704_MAX_REQ
;
10697 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
&&
10698 (grc_misc_cfg
== GRC_MISC_CFG_BOARD_ID_5788
||
10699 grc_misc_cfg
== GRC_MISC_CFG_BOARD_ID_5788M
))
10700 tp
->tg3_flags2
|= TG3_FLG2_IS_5788
;
10702 if (!(tp
->tg3_flags2
& TG3_FLG2_IS_5788
) &&
10703 (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5700
))
10704 tp
->tg3_flags
|= TG3_FLAG_TAGGED_STATUS
;
10705 if (tp
->tg3_flags
& TG3_FLAG_TAGGED_STATUS
) {
10706 tp
->coalesce_mode
|= (HOSTCC_MODE_CLRTICK_RXBD
|
10707 HOSTCC_MODE_CLRTICK_TXBD
);
10709 tp
->misc_host_ctrl
|= MISC_HOST_CTRL_TAGGED_STATUS
;
10710 pci_write_config_dword(tp
->pdev
, TG3PCI_MISC_HOST_CTRL
,
10711 tp
->misc_host_ctrl
);
10714 /* these are limited to 10/100 only */
10715 if ((GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5703
&&
10716 (grc_misc_cfg
== 0x8000 || grc_misc_cfg
== 0x4000)) ||
10717 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
&&
10718 tp
->pdev
->vendor
== PCI_VENDOR_ID_BROADCOM
&&
10719 (tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5901
||
10720 tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5901_2
||
10721 tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5705F
)) ||
10722 (tp
->pdev
->vendor
== PCI_VENDOR_ID_BROADCOM
&&
10723 (tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5751F
||
10724 tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5753F
)) ||
10725 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
)
10726 tp
->tg3_flags
|= TG3_FLAG_10_100_ONLY
;
10728 err
= tg3_phy_probe(tp
);
10730 printk(KERN_ERR PFX
"(%s) phy probe failed, err %d\n",
10731 pci_name(tp
->pdev
), err
);
10732 /* ... but do not return immediately ... */
10735 tg3_read_partno(tp
);
10736 tg3_read_fw_ver(tp
);
10738 if (tp
->tg3_flags2
& TG3_FLG2_PHY_SERDES
) {
10739 tp
->tg3_flags
&= ~TG3_FLAG_USE_MI_INTERRUPT
;
10741 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
)
10742 tp
->tg3_flags
|= TG3_FLAG_USE_MI_INTERRUPT
;
10744 tp
->tg3_flags
&= ~TG3_FLAG_USE_MI_INTERRUPT
;
10747 /* 5700 {AX,BX} chips have a broken status block link
10748 * change bit implementation, so we must use the
10749 * status register in those cases.
10751 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
)
10752 tp
->tg3_flags
|= TG3_FLAG_USE_LINKCHG_REG
;
10754 tp
->tg3_flags
&= ~TG3_FLAG_USE_LINKCHG_REG
;
10756 /* The led_ctrl is set during tg3_phy_probe, here we might
10757 * have to force the link status polling mechanism based
10758 * upon subsystem IDs.
10760 if (tp
->pdev
->subsystem_vendor
== PCI_VENDOR_ID_DELL
&&
10761 !(tp
->tg3_flags2
& TG3_FLG2_PHY_SERDES
)) {
10762 tp
->tg3_flags
|= (TG3_FLAG_USE_MI_INTERRUPT
|
10763 TG3_FLAG_USE_LINKCHG_REG
);
10766 /* For all SERDES we poll the MAC status register. */
10767 if (tp
->tg3_flags2
& TG3_FLG2_PHY_SERDES
)
10768 tp
->tg3_flags
|= TG3_FLAG_POLL_SERDES
;
10770 tp
->tg3_flags
&= ~TG3_FLAG_POLL_SERDES
;
10772 /* All chips before 5787 can get confused if TX buffers
10773 * straddle the 4GB address boundary in some cases.
10775 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5755
||
10776 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5787
||
10777 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
)
10778 tp
->dev
->hard_start_xmit
= tg3_start_xmit
;
10780 tp
->dev
->hard_start_xmit
= tg3_start_xmit_dma_bug
;
10783 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
&&
10784 (tp
->tg3_flags
& TG3_FLAG_PCIX_MODE
) != 0)
10787 tp
->rx_std_max_post
= TG3_RX_RING_SIZE
;
10789 /* Increment the rx prod index on the rx std ring by at most
10790 * 8 for these chips to workaround hw errata.
10792 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5750
||
10793 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5752
||
10794 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5755
)
10795 tp
->rx_std_max_post
= 8;
10797 /* By default, disable wake-on-lan. User can change this
10798 * using ETHTOOL_SWOL.
10800 tp
->tg3_flags
&= ~TG3_FLAG_WOL_ENABLE
;
10805 #ifdef CONFIG_SPARC64
10806 static int __devinit
tg3_get_macaddr_sparc(struct tg3
*tp
)
10808 struct net_device
*dev
= tp
->dev
;
10809 struct pci_dev
*pdev
= tp
->pdev
;
10810 struct pcidev_cookie
*pcp
= pdev
->sysdata
;
10813 unsigned char *addr
;
10816 addr
= of_get_property(pcp
->prom_node
, "local-mac-address",
10818 if (addr
&& len
== 6) {
10819 memcpy(dev
->dev_addr
, addr
, 6);
10820 memcpy(dev
->perm_addr
, dev
->dev_addr
, 6);
10827 static int __devinit
tg3_get_default_macaddr_sparc(struct tg3
*tp
)
10829 struct net_device
*dev
= tp
->dev
;
10831 memcpy(dev
->dev_addr
, idprom
->id_ethaddr
, 6);
10832 memcpy(dev
->perm_addr
, idprom
->id_ethaddr
, 6);
10837 static int __devinit
tg3_get_device_address(struct tg3
*tp
)
10839 struct net_device
*dev
= tp
->dev
;
10840 u32 hi
, lo
, mac_offset
;
10843 #ifdef CONFIG_SPARC64
10844 if (!tg3_get_macaddr_sparc(tp
))
10849 if ((GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
) ||
10850 (tp
->tg3_flags2
& TG3_FLG2_5780_CLASS
)) {
10851 if (tr32(TG3PCI_DUAL_MAC_CTRL
) & DUAL_MAC_CTRL_ID
)
10853 if (tg3_nvram_lock(tp
))
10854 tw32_f(NVRAM_CMD
, NVRAM_CMD_RESET
);
10856 tg3_nvram_unlock(tp
);
10858 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
)
10861 /* First try to get it from MAC address mailbox. */
10862 tg3_read_mem(tp
, NIC_SRAM_MAC_ADDR_HIGH_MBOX
, &hi
);
10863 if ((hi
>> 16) == 0x484b) {
10864 dev
->dev_addr
[0] = (hi
>> 8) & 0xff;
10865 dev
->dev_addr
[1] = (hi
>> 0) & 0xff;
10867 tg3_read_mem(tp
, NIC_SRAM_MAC_ADDR_LOW_MBOX
, &lo
);
10868 dev
->dev_addr
[2] = (lo
>> 24) & 0xff;
10869 dev
->dev_addr
[3] = (lo
>> 16) & 0xff;
10870 dev
->dev_addr
[4] = (lo
>> 8) & 0xff;
10871 dev
->dev_addr
[5] = (lo
>> 0) & 0xff;
10873 /* Some old bootcode may report a 0 MAC address in SRAM */
10874 addr_ok
= is_valid_ether_addr(&dev
->dev_addr
[0]);
10877 /* Next, try NVRAM. */
10878 if (!tg3_nvram_read(tp
, mac_offset
+ 0, &hi
) &&
10879 !tg3_nvram_read(tp
, mac_offset
+ 4, &lo
)) {
10880 dev
->dev_addr
[0] = ((hi
>> 16) & 0xff);
10881 dev
->dev_addr
[1] = ((hi
>> 24) & 0xff);
10882 dev
->dev_addr
[2] = ((lo
>> 0) & 0xff);
10883 dev
->dev_addr
[3] = ((lo
>> 8) & 0xff);
10884 dev
->dev_addr
[4] = ((lo
>> 16) & 0xff);
10885 dev
->dev_addr
[5] = ((lo
>> 24) & 0xff);
10887 /* Finally just fetch it out of the MAC control regs. */
10889 hi
= tr32(MAC_ADDR_0_HIGH
);
10890 lo
= tr32(MAC_ADDR_0_LOW
);
10892 dev
->dev_addr
[5] = lo
& 0xff;
10893 dev
->dev_addr
[4] = (lo
>> 8) & 0xff;
10894 dev
->dev_addr
[3] = (lo
>> 16) & 0xff;
10895 dev
->dev_addr
[2] = (lo
>> 24) & 0xff;
10896 dev
->dev_addr
[1] = hi
& 0xff;
10897 dev
->dev_addr
[0] = (hi
>> 8) & 0xff;
10901 if (!is_valid_ether_addr(&dev
->dev_addr
[0])) {
10902 #ifdef CONFIG_SPARC64
10903 if (!tg3_get_default_macaddr_sparc(tp
))
10908 memcpy(dev
->perm_addr
, dev
->dev_addr
, dev
->addr_len
);
10912 #define BOUNDARY_SINGLE_CACHELINE 1
10913 #define BOUNDARY_MULTI_CACHELINE 2
10915 static u32 __devinit
tg3_calc_dma_bndry(struct tg3
*tp
, u32 val
)
10917 int cacheline_size
;
10921 pci_read_config_byte(tp
->pdev
, PCI_CACHE_LINE_SIZE
, &byte
);
10923 cacheline_size
= 1024;
10925 cacheline_size
= (int) byte
* 4;
10927 /* On 5703 and later chips, the boundary bits have no
10930 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5700
&&
10931 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5701
&&
10932 !(tp
->tg3_flags2
& TG3_FLG2_PCI_EXPRESS
))
10935 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
10936 goal
= BOUNDARY_MULTI_CACHELINE
;
10938 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
10939 goal
= BOUNDARY_SINGLE_CACHELINE
;
10948 /* PCI controllers on most RISC systems tend to disconnect
10949 * when a device tries to burst across a cache-line boundary.
10950 * Therefore, letting tg3 do so just wastes PCI bandwidth.
10952 * Unfortunately, for PCI-E there are only limited
10953 * write-side controls for this, and thus for reads
10954 * we will still get the disconnects. We'll also waste
10955 * these PCI cycles for both read and write for chips
10956 * other than 5700 and 5701 which do not implement the
10959 if ((tp
->tg3_flags
& TG3_FLAG_PCIX_MODE
) &&
10960 !(tp
->tg3_flags2
& TG3_FLG2_PCI_EXPRESS
)) {
10961 switch (cacheline_size
) {
10966 if (goal
== BOUNDARY_SINGLE_CACHELINE
) {
10967 val
|= (DMA_RWCTRL_READ_BNDRY_128_PCIX
|
10968 DMA_RWCTRL_WRITE_BNDRY_128_PCIX
);
10970 val
|= (DMA_RWCTRL_READ_BNDRY_384_PCIX
|
10971 DMA_RWCTRL_WRITE_BNDRY_384_PCIX
);
10976 val
|= (DMA_RWCTRL_READ_BNDRY_256_PCIX
|
10977 DMA_RWCTRL_WRITE_BNDRY_256_PCIX
);
10981 val
|= (DMA_RWCTRL_READ_BNDRY_384_PCIX
|
10982 DMA_RWCTRL_WRITE_BNDRY_384_PCIX
);
10985 } else if (tp
->tg3_flags2
& TG3_FLG2_PCI_EXPRESS
) {
10986 switch (cacheline_size
) {
10990 if (goal
== BOUNDARY_SINGLE_CACHELINE
) {
10991 val
&= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE
;
10992 val
|= DMA_RWCTRL_WRITE_BNDRY_64_PCIE
;
10998 val
&= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE
;
10999 val
|= DMA_RWCTRL_WRITE_BNDRY_128_PCIE
;
11003 switch (cacheline_size
) {
11005 if (goal
== BOUNDARY_SINGLE_CACHELINE
) {
11006 val
|= (DMA_RWCTRL_READ_BNDRY_16
|
11007 DMA_RWCTRL_WRITE_BNDRY_16
);
11012 if (goal
== BOUNDARY_SINGLE_CACHELINE
) {
11013 val
|= (DMA_RWCTRL_READ_BNDRY_32
|
11014 DMA_RWCTRL_WRITE_BNDRY_32
);
11019 if (goal
== BOUNDARY_SINGLE_CACHELINE
) {
11020 val
|= (DMA_RWCTRL_READ_BNDRY_64
|
11021 DMA_RWCTRL_WRITE_BNDRY_64
);
11026 if (goal
== BOUNDARY_SINGLE_CACHELINE
) {
11027 val
|= (DMA_RWCTRL_READ_BNDRY_128
|
11028 DMA_RWCTRL_WRITE_BNDRY_128
);
11033 val
|= (DMA_RWCTRL_READ_BNDRY_256
|
11034 DMA_RWCTRL_WRITE_BNDRY_256
);
11037 val
|= (DMA_RWCTRL_READ_BNDRY_512
|
11038 DMA_RWCTRL_WRITE_BNDRY_512
);
11042 val
|= (DMA_RWCTRL_READ_BNDRY_1024
|
11043 DMA_RWCTRL_WRITE_BNDRY_1024
);
11052 static int __devinit
tg3_do_test_dma(struct tg3
*tp
, u32
*buf
, dma_addr_t buf_dma
, int size
, int to_device
)
11054 struct tg3_internal_buffer_desc test_desc
;
11055 u32 sram_dma_descs
;
11058 sram_dma_descs
= NIC_SRAM_DMA_DESC_POOL_BASE
;
11060 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ
, 0);
11061 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ
, 0);
11062 tw32(RDMAC_STATUS
, 0);
11063 tw32(WDMAC_STATUS
, 0);
11065 tw32(BUFMGR_MODE
, 0);
11066 tw32(FTQ_RESET
, 0);
11068 test_desc
.addr_hi
= ((u64
) buf_dma
) >> 32;
11069 test_desc
.addr_lo
= buf_dma
& 0xffffffff;
11070 test_desc
.nic_mbuf
= 0x00002100;
11071 test_desc
.len
= size
;
11074 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
11075 * the *second* time the tg3 driver was getting loaded after an
11078 * Broadcom tells me:
11079 * ...the DMA engine is connected to the GRC block and a DMA
11080 * reset may affect the GRC block in some unpredictable way...
11081 * The behavior of resets to individual blocks has not been tested.
11083 * Broadcom noted the GRC reset will also reset all sub-components.
11086 test_desc
.cqid_sqid
= (13 << 8) | 2;
11088 tw32_f(RDMAC_MODE
, RDMAC_MODE_ENABLE
);
11091 test_desc
.cqid_sqid
= (16 << 8) | 7;
11093 tw32_f(WDMAC_MODE
, WDMAC_MODE_ENABLE
);
11096 test_desc
.flags
= 0x00000005;
11098 for (i
= 0; i
< (sizeof(test_desc
) / sizeof(u32
)); i
++) {
11101 val
= *(((u32
*)&test_desc
) + i
);
11102 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
,
11103 sram_dma_descs
+ (i
* sizeof(u32
)));
11104 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_DATA
, val
);
11106 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
, 0);
11109 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ
, sram_dma_descs
);
11111 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ
, sram_dma_descs
);
11115 for (i
= 0; i
< 40; i
++) {
11119 val
= tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ
);
11121 val
= tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ
);
11122 if ((val
& 0xffff) == sram_dma_descs
) {
11133 #define TEST_BUFFER_SIZE 0x2000
11135 static int __devinit
tg3_test_dma(struct tg3
*tp
)
11137 dma_addr_t buf_dma
;
11138 u32
*buf
, saved_dma_rwctrl
;
11141 buf
= pci_alloc_consistent(tp
->pdev
, TEST_BUFFER_SIZE
, &buf_dma
);
11147 tp
->dma_rwctrl
= ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT
) |
11148 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT
));
11150 tp
->dma_rwctrl
= tg3_calc_dma_bndry(tp
, tp
->dma_rwctrl
);
11152 if (tp
->tg3_flags2
& TG3_FLG2_PCI_EXPRESS
) {
11153 /* DMA read watermark not used on PCIE */
11154 tp
->dma_rwctrl
|= 0x00180000;
11155 } else if (!(tp
->tg3_flags
& TG3_FLAG_PCIX_MODE
)) {
11156 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
||
11157 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5750
)
11158 tp
->dma_rwctrl
|= 0x003f0000;
11160 tp
->dma_rwctrl
|= 0x003f000f;
11162 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5703
||
11163 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
) {
11164 u32 ccval
= (tr32(TG3PCI_CLOCK_CTRL
) & 0x1f);
11166 /* If the 5704 is behind the EPB bridge, we can
11167 * do the less restrictive ONE_DMA workaround for
11168 * better performance.
11170 if ((tp
->tg3_flags
& TG3_FLAG_40BIT_DMA_BUG
) &&
11171 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
)
11172 tp
->dma_rwctrl
|= 0x8000;
11173 else if (ccval
== 0x6 || ccval
== 0x7)
11174 tp
->dma_rwctrl
|= DMA_RWCTRL_ONE_DMA
;
11176 /* Set bit 23 to enable PCIX hw bug fix */
11177 tp
->dma_rwctrl
|= 0x009f0000;
11178 } else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5780
) {
11179 /* 5780 always in PCIX mode */
11180 tp
->dma_rwctrl
|= 0x00144000;
11181 } else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5714
) {
11182 /* 5714 always in PCIX mode */
11183 tp
->dma_rwctrl
|= 0x00148000;
11185 tp
->dma_rwctrl
|= 0x001b000f;
11189 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5703
||
11190 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
)
11191 tp
->dma_rwctrl
&= 0xfffffff0;
11193 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
11194 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
) {
11195 /* Remove this if it causes problems for some boards. */
11196 tp
->dma_rwctrl
|= DMA_RWCTRL_USE_MEM_READ_MULT
;
11198 /* On 5700/5701 chips, we need to set this bit.
11199 * Otherwise the chip will issue cacheline transactions
11200 * to streamable DMA memory with not all the byte
11201 * enables turned on. This is an error on several
11202 * RISC PCI controllers, in particular sparc64.
11204 * On 5703/5704 chips, this bit has been reassigned
11205 * a different meaning. In particular, it is used
11206 * on those chips to enable a PCI-X workaround.
11208 tp
->dma_rwctrl
|= DMA_RWCTRL_ASSERT_ALL_BE
;
11211 tw32(TG3PCI_DMA_RW_CTRL
, tp
->dma_rwctrl
);
11214 /* Unneeded, already done by tg3_get_invariants. */
11215 tg3_switch_clocks(tp
);
11219 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5700
&&
11220 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5701
)
11223 /* It is best to perform DMA test with maximum write burst size
11224 * to expose the 5700/5701 write DMA bug.
11226 saved_dma_rwctrl
= tp
->dma_rwctrl
;
11227 tp
->dma_rwctrl
&= ~DMA_RWCTRL_WRITE_BNDRY_MASK
;
11228 tw32(TG3PCI_DMA_RW_CTRL
, tp
->dma_rwctrl
);
11233 for (i
= 0; i
< TEST_BUFFER_SIZE
/ sizeof(u32
); i
++)
11236 /* Send the buffer to the chip. */
11237 ret
= tg3_do_test_dma(tp
, buf
, buf_dma
, TEST_BUFFER_SIZE
, 1);
11239 printk(KERN_ERR
"tg3_test_dma() Write the buffer failed %d\n", ret
);
11244 /* validate data reached card RAM correctly. */
11245 for (i
= 0; i
< TEST_BUFFER_SIZE
/ sizeof(u32
); i
++) {
11247 tg3_read_mem(tp
, 0x2100 + (i
*4), &val
);
11248 if (le32_to_cpu(val
) != p
[i
]) {
11249 printk(KERN_ERR
" tg3_test_dma() Card buffer corrupted on write! (%d != %d)\n", val
, i
);
11250 /* ret = -ENODEV here? */
11255 /* Now read it back. */
11256 ret
= tg3_do_test_dma(tp
, buf
, buf_dma
, TEST_BUFFER_SIZE
, 0);
11258 printk(KERN_ERR
"tg3_test_dma() Read the buffer failed %d\n", ret
);
11264 for (i
= 0; i
< TEST_BUFFER_SIZE
/ sizeof(u32
); i
++) {
11268 if ((tp
->dma_rwctrl
& DMA_RWCTRL_WRITE_BNDRY_MASK
) !=
11269 DMA_RWCTRL_WRITE_BNDRY_16
) {
11270 tp
->dma_rwctrl
&= ~DMA_RWCTRL_WRITE_BNDRY_MASK
;
11271 tp
->dma_rwctrl
|= DMA_RWCTRL_WRITE_BNDRY_16
;
11272 tw32(TG3PCI_DMA_RW_CTRL
, tp
->dma_rwctrl
);
11275 printk(KERN_ERR
"tg3_test_dma() buffer corrupted on read back! (%d != %d)\n", p
[i
], i
);
11281 if (i
== (TEST_BUFFER_SIZE
/ sizeof(u32
))) {
11287 if ((tp
->dma_rwctrl
& DMA_RWCTRL_WRITE_BNDRY_MASK
) !=
11288 DMA_RWCTRL_WRITE_BNDRY_16
) {
11289 static struct pci_device_id dma_wait_state_chipsets
[] = {
11290 { PCI_DEVICE(PCI_VENDOR_ID_APPLE
,
11291 PCI_DEVICE_ID_APPLE_UNI_N_PCI15
) },
11295 /* DMA test passed without adjusting DMA boundary,
11296 * now look for chipsets that are known to expose the
11297 * DMA bug without failing the test.
11299 if (pci_dev_present(dma_wait_state_chipsets
)) {
11300 tp
->dma_rwctrl
&= ~DMA_RWCTRL_WRITE_BNDRY_MASK
;
11301 tp
->dma_rwctrl
|= DMA_RWCTRL_WRITE_BNDRY_16
;
11304 /* Safe to use the calculated DMA boundary. */
11305 tp
->dma_rwctrl
= saved_dma_rwctrl
;
11307 tw32(TG3PCI_DMA_RW_CTRL
, tp
->dma_rwctrl
);
11311 pci_free_consistent(tp
->pdev
, TEST_BUFFER_SIZE
, buf
, buf_dma
);
11316 static void __devinit
tg3_init_link_config(struct tg3
*tp
)
11318 tp
->link_config
.advertising
=
11319 (ADVERTISED_10baseT_Half
| ADVERTISED_10baseT_Full
|
11320 ADVERTISED_100baseT_Half
| ADVERTISED_100baseT_Full
|
11321 ADVERTISED_1000baseT_Half
| ADVERTISED_1000baseT_Full
|
11322 ADVERTISED_Autoneg
| ADVERTISED_MII
);
11323 tp
->link_config
.speed
= SPEED_INVALID
;
11324 tp
->link_config
.duplex
= DUPLEX_INVALID
;
11325 tp
->link_config
.autoneg
= AUTONEG_ENABLE
;
11326 tp
->link_config
.active_speed
= SPEED_INVALID
;
11327 tp
->link_config
.active_duplex
= DUPLEX_INVALID
;
11328 tp
->link_config
.phy_is_low_power
= 0;
11329 tp
->link_config
.orig_speed
= SPEED_INVALID
;
11330 tp
->link_config
.orig_duplex
= DUPLEX_INVALID
;
11331 tp
->link_config
.orig_autoneg
= AUTONEG_INVALID
;
11334 static void __devinit
tg3_init_bufmgr_config(struct tg3
*tp
)
11336 if (tp
->tg3_flags2
& TG3_FLG2_5705_PLUS
) {
11337 tp
->bufmgr_config
.mbuf_read_dma_low_water
=
11338 DEFAULT_MB_RDMA_LOW_WATER_5705
;
11339 tp
->bufmgr_config
.mbuf_mac_rx_low_water
=
11340 DEFAULT_MB_MACRX_LOW_WATER_5705
;
11341 tp
->bufmgr_config
.mbuf_high_water
=
11342 DEFAULT_MB_HIGH_WATER_5705
;
11343 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5906
) {
11344 tp
->bufmgr_config
.mbuf_mac_rx_low_water
=
11345 DEFAULT_MB_MACRX_LOW_WATER_5906
;
11346 tp
->bufmgr_config
.mbuf_high_water
=
11347 DEFAULT_MB_HIGH_WATER_5906
;
11350 tp
->bufmgr_config
.mbuf_read_dma_low_water_jumbo
=
11351 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780
;
11352 tp
->bufmgr_config
.mbuf_mac_rx_low_water_jumbo
=
11353 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780
;
11354 tp
->bufmgr_config
.mbuf_high_water_jumbo
=
11355 DEFAULT_MB_HIGH_WATER_JUMBO_5780
;
11357 tp
->bufmgr_config
.mbuf_read_dma_low_water
=
11358 DEFAULT_MB_RDMA_LOW_WATER
;
11359 tp
->bufmgr_config
.mbuf_mac_rx_low_water
=
11360 DEFAULT_MB_MACRX_LOW_WATER
;
11361 tp
->bufmgr_config
.mbuf_high_water
=
11362 DEFAULT_MB_HIGH_WATER
;
11364 tp
->bufmgr_config
.mbuf_read_dma_low_water_jumbo
=
11365 DEFAULT_MB_RDMA_LOW_WATER_JUMBO
;
11366 tp
->bufmgr_config
.mbuf_mac_rx_low_water_jumbo
=
11367 DEFAULT_MB_MACRX_LOW_WATER_JUMBO
;
11368 tp
->bufmgr_config
.mbuf_high_water_jumbo
=
11369 DEFAULT_MB_HIGH_WATER_JUMBO
;
11372 tp
->bufmgr_config
.dma_low_water
= DEFAULT_DMA_LOW_WATER
;
11373 tp
->bufmgr_config
.dma_high_water
= DEFAULT_DMA_HIGH_WATER
;
11376 static char * __devinit
tg3_phy_string(struct tg3
*tp
)
11378 switch (tp
->phy_id
& PHY_ID_MASK
) {
11379 case PHY_ID_BCM5400
: return "5400";
11380 case PHY_ID_BCM5401
: return "5401";
11381 case PHY_ID_BCM5411
: return "5411";
11382 case PHY_ID_BCM5701
: return "5701";
11383 case PHY_ID_BCM5703
: return "5703";
11384 case PHY_ID_BCM5704
: return "5704";
11385 case PHY_ID_BCM5705
: return "5705";
11386 case PHY_ID_BCM5750
: return "5750";
11387 case PHY_ID_BCM5752
: return "5752";
11388 case PHY_ID_BCM5714
: return "5714";
11389 case PHY_ID_BCM5780
: return "5780";
11390 case PHY_ID_BCM5755
: return "5755";
11391 case PHY_ID_BCM5787
: return "5787";
11392 case PHY_ID_BCM5756
: return "5722/5756";
11393 case PHY_ID_BCM5906
: return "5906";
11394 case PHY_ID_BCM8002
: return "8002/serdes";
11395 case 0: return "serdes";
11396 default: return "unknown";
11400 static char * __devinit
tg3_bus_string(struct tg3
*tp
, char *str
)
11402 if (tp
->tg3_flags2
& TG3_FLG2_PCI_EXPRESS
) {
11403 strcpy(str
, "PCI Express");
11405 } else if (tp
->tg3_flags
& TG3_FLAG_PCIX_MODE
) {
11406 u32 clock_ctrl
= tr32(TG3PCI_CLOCK_CTRL
) & 0x1f;
11408 strcpy(str
, "PCIX:");
11410 if ((clock_ctrl
== 7) ||
11411 ((tr32(GRC_MISC_CFG
) & GRC_MISC_CFG_BOARD_ID_MASK
) ==
11412 GRC_MISC_CFG_BOARD_ID_5704CIOBE
))
11413 strcat(str
, "133MHz");
11414 else if (clock_ctrl
== 0)
11415 strcat(str
, "33MHz");
11416 else if (clock_ctrl
== 2)
11417 strcat(str
, "50MHz");
11418 else if (clock_ctrl
== 4)
11419 strcat(str
, "66MHz");
11420 else if (clock_ctrl
== 6)
11421 strcat(str
, "100MHz");
11423 strcpy(str
, "PCI:");
11424 if (tp
->tg3_flags
& TG3_FLAG_PCI_HIGH_SPEED
)
11425 strcat(str
, "66MHz");
11427 strcat(str
, "33MHz");
11429 if (tp
->tg3_flags
& TG3_FLAG_PCI_32BIT
)
11430 strcat(str
, ":32-bit");
11432 strcat(str
, ":64-bit");
11436 static struct pci_dev
* __devinit
tg3_find_peer(struct tg3
*tp
)
11438 struct pci_dev
*peer
;
11439 unsigned int func
, devnr
= tp
->pdev
->devfn
& ~7;
11441 for (func
= 0; func
< 8; func
++) {
11442 peer
= pci_get_slot(tp
->pdev
->bus
, devnr
| func
);
11443 if (peer
&& peer
!= tp
->pdev
)
11447 /* 5704 can be configured in single-port mode, set peer to
11448 * tp->pdev in that case.
11456 * We don't need to keep the refcount elevated; there's no way
11457 * to remove one half of this device without removing the other
11464 static void __devinit
tg3_init_coal(struct tg3
*tp
)
11466 struct ethtool_coalesce
*ec
= &tp
->coal
;
11468 memset(ec
, 0, sizeof(*ec
));
11469 ec
->cmd
= ETHTOOL_GCOALESCE
;
11470 ec
->rx_coalesce_usecs
= LOW_RXCOL_TICKS
;
11471 ec
->tx_coalesce_usecs
= LOW_TXCOL_TICKS
;
11472 ec
->rx_max_coalesced_frames
= LOW_RXMAX_FRAMES
;
11473 ec
->tx_max_coalesced_frames
= LOW_TXMAX_FRAMES
;
11474 ec
->rx_coalesce_usecs_irq
= DEFAULT_RXCOAL_TICK_INT
;
11475 ec
->tx_coalesce_usecs_irq
= DEFAULT_TXCOAL_TICK_INT
;
11476 ec
->rx_max_coalesced_frames_irq
= DEFAULT_RXCOAL_MAXF_INT
;
11477 ec
->tx_max_coalesced_frames_irq
= DEFAULT_TXCOAL_MAXF_INT
;
11478 ec
->stats_block_coalesce_usecs
= DEFAULT_STAT_COAL_TICKS
;
11480 if (tp
->coalesce_mode
& (HOSTCC_MODE_CLRTICK_RXBD
|
11481 HOSTCC_MODE_CLRTICK_TXBD
)) {
11482 ec
->rx_coalesce_usecs
= LOW_RXCOL_TICKS_CLRTCKS
;
11483 ec
->rx_coalesce_usecs_irq
= DEFAULT_RXCOAL_TICK_INT_CLRTCKS
;
11484 ec
->tx_coalesce_usecs
= LOW_TXCOL_TICKS_CLRTCKS
;
11485 ec
->tx_coalesce_usecs_irq
= DEFAULT_TXCOAL_TICK_INT_CLRTCKS
;
11488 if (tp
->tg3_flags2
& TG3_FLG2_5705_PLUS
) {
11489 ec
->rx_coalesce_usecs_irq
= 0;
11490 ec
->tx_coalesce_usecs_irq
= 0;
11491 ec
->stats_block_coalesce_usecs
= 0;
11495 static int __devinit
tg3_init_one(struct pci_dev
*pdev
,
11496 const struct pci_device_id
*ent
)
11498 static int tg3_version_printed
= 0;
11499 unsigned long tg3reg_base
, tg3reg_len
;
11500 struct net_device
*dev
;
11502 int i
, err
, pm_cap
;
11504 u64 dma_mask
, persist_dma_mask
;
11506 if (tg3_version_printed
++ == 0)
11507 printk(KERN_INFO
"%s", version
);
11509 err
= pci_enable_device(pdev
);
11511 printk(KERN_ERR PFX
"Cannot enable PCI device, "
11516 if (!(pci_resource_flags(pdev
, 0) & IORESOURCE_MEM
)) {
11517 printk(KERN_ERR PFX
"Cannot find proper PCI device "
11518 "base address, aborting.\n");
11520 goto err_out_disable_pdev
;
11523 err
= pci_request_regions(pdev
, DRV_MODULE_NAME
);
11525 printk(KERN_ERR PFX
"Cannot obtain PCI resources, "
11527 goto err_out_disable_pdev
;
11530 pci_set_master(pdev
);
11532 /* Find power-management capability. */
11533 pm_cap
= pci_find_capability(pdev
, PCI_CAP_ID_PM
);
11535 printk(KERN_ERR PFX
"Cannot find PowerManagement capability, "
11538 goto err_out_free_res
;
11541 tg3reg_base
= pci_resource_start(pdev
, 0);
11542 tg3reg_len
= pci_resource_len(pdev
, 0);
11544 dev
= alloc_etherdev(sizeof(*tp
));
11546 printk(KERN_ERR PFX
"Etherdev alloc failed, aborting.\n");
11548 goto err_out_free_res
;
11551 SET_MODULE_OWNER(dev
);
11552 SET_NETDEV_DEV(dev
, &pdev
->dev
);
11554 #if TG3_VLAN_TAG_USED
11555 dev
->features
|= NETIF_F_HW_VLAN_TX
| NETIF_F_HW_VLAN_RX
;
11556 dev
->vlan_rx_register
= tg3_vlan_rx_register
;
11557 dev
->vlan_rx_kill_vid
= tg3_vlan_rx_kill_vid
;
11560 tp
= netdev_priv(dev
);
11563 tp
->pm_cap
= pm_cap
;
11564 tp
->mac_mode
= TG3_DEF_MAC_MODE
;
11565 tp
->rx_mode
= TG3_DEF_RX_MODE
;
11566 tp
->tx_mode
= TG3_DEF_TX_MODE
;
11567 tp
->mi_mode
= MAC_MI_MODE_BASE
;
11569 tp
->msg_enable
= tg3_debug
;
11571 tp
->msg_enable
= TG3_DEF_MSG_ENABLE
;
11573 /* The word/byte swap controls here control register access byte
11574 * swapping. DMA data byte swapping is controlled in the GRC_MODE
11577 tp
->misc_host_ctrl
=
11578 MISC_HOST_CTRL_MASK_PCI_INT
|
11579 MISC_HOST_CTRL_WORD_SWAP
|
11580 MISC_HOST_CTRL_INDIR_ACCESS
|
11581 MISC_HOST_CTRL_PCISTATE_RW
;
11583 /* The NONFRM (non-frame) byte/word swap controls take effect
11584 * on descriptor entries, anything which isn't packet data.
11586 * The StrongARM chips on the board (one for tx, one for rx)
11587 * are running in big-endian mode.
11589 tp
->grc_mode
= (GRC_MODE_WSWAP_DATA
| GRC_MODE_BSWAP_DATA
|
11590 GRC_MODE_WSWAP_NONFRM_DATA
);
11591 #ifdef __BIG_ENDIAN
11592 tp
->grc_mode
|= GRC_MODE_BSWAP_NONFRM_DATA
;
11594 spin_lock_init(&tp
->lock
);
11595 spin_lock_init(&tp
->indirect_lock
);
11596 INIT_WORK(&tp
->reset_task
, tg3_reset_task
, tp
);
11598 tp
->regs
= ioremap_nocache(tg3reg_base
, tg3reg_len
);
11599 if (tp
->regs
== 0UL) {
11600 printk(KERN_ERR PFX
"Cannot map device registers, "
11603 goto err_out_free_dev
;
11606 tg3_init_link_config(tp
);
11608 tp
->rx_pending
= TG3_DEF_RX_RING_PENDING
;
11609 tp
->rx_jumbo_pending
= TG3_DEF_RX_JUMBO_RING_PENDING
;
11610 tp
->tx_pending
= TG3_DEF_TX_RING_PENDING
;
11612 dev
->open
= tg3_open
;
11613 dev
->stop
= tg3_close
;
11614 dev
->get_stats
= tg3_get_stats
;
11615 dev
->set_multicast_list
= tg3_set_rx_mode
;
11616 dev
->set_mac_address
= tg3_set_mac_addr
;
11617 dev
->do_ioctl
= tg3_ioctl
;
11618 dev
->tx_timeout
= tg3_tx_timeout
;
11619 dev
->poll
= tg3_poll
;
11620 dev
->ethtool_ops
= &tg3_ethtool_ops
;
11622 dev
->watchdog_timeo
= TG3_TX_TIMEOUT
;
11623 dev
->change_mtu
= tg3_change_mtu
;
11624 dev
->irq
= pdev
->irq
;
11625 #ifdef CONFIG_NET_POLL_CONTROLLER
11626 dev
->poll_controller
= tg3_poll_controller
;
11629 err
= tg3_get_invariants(tp
);
11631 printk(KERN_ERR PFX
"Problem fetching invariants of chip, "
11633 goto err_out_iounmap
;
11636 /* The EPB bridge inside 5714, 5715, and 5780 and any
11637 * device behind the EPB cannot support DMA addresses > 40-bit.
11638 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
11639 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
11640 * do DMA address check in tg3_start_xmit().
11642 if (tp
->tg3_flags2
& TG3_FLG2_IS_5788
)
11643 persist_dma_mask
= dma_mask
= DMA_32BIT_MASK
;
11644 else if (tp
->tg3_flags
& TG3_FLAG_40BIT_DMA_BUG
) {
11645 persist_dma_mask
= dma_mask
= DMA_40BIT_MASK
;
11646 #ifdef CONFIG_HIGHMEM
11647 dma_mask
= DMA_64BIT_MASK
;
11650 persist_dma_mask
= dma_mask
= DMA_64BIT_MASK
;
11652 /* Configure DMA attributes. */
11653 if (dma_mask
> DMA_32BIT_MASK
) {
11654 err
= pci_set_dma_mask(pdev
, dma_mask
);
11656 dev
->features
|= NETIF_F_HIGHDMA
;
11657 err
= pci_set_consistent_dma_mask(pdev
,
11660 printk(KERN_ERR PFX
"Unable to obtain 64 bit "
11661 "DMA for consistent allocations\n");
11662 goto err_out_iounmap
;
11666 if (err
|| dma_mask
== DMA_32BIT_MASK
) {
11667 err
= pci_set_dma_mask(pdev
, DMA_32BIT_MASK
);
11669 printk(KERN_ERR PFX
"No usable DMA configuration, "
11671 goto err_out_iounmap
;
11675 tg3_init_bufmgr_config(tp
);
11677 #if TG3_TSO_SUPPORT != 0
11678 if (tp
->tg3_flags2
& TG3_FLG2_HW_TSO
) {
11679 tp
->tg3_flags2
|= TG3_FLG2_TSO_CAPABLE
;
11681 else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
11682 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
||
11683 tp
->pci_chip_rev_id
== CHIPREV_ID_5705_A0
||
11684 (tp
->tg3_flags
& TG3_FLAG_ENABLE_ASF
) != 0) {
11685 tp
->tg3_flags2
&= ~TG3_FLG2_TSO_CAPABLE
;
11687 tp
->tg3_flags2
|= TG3_FLG2_TSO_CAPABLE
;
11690 /* TSO is on by default on chips that support hardware TSO.
11691 * Firmware TSO on older chips gives lower performance, so it
11692 * is off by default, but can be enabled using ethtool.
11694 if (tp
->tg3_flags2
& TG3_FLG2_HW_TSO
) {
11695 dev
->features
|= NETIF_F_TSO
;
11696 if ((tp
->tg3_flags2
& TG3_FLG2_HW_TSO_2
) &&
11697 (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5906
))
11698 dev
->features
|= NETIF_F_TSO6
;
11703 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5705_A1
&&
11704 !(tp
->tg3_flags2
& TG3_FLG2_TSO_CAPABLE
) &&
11705 !(tr32(TG3PCI_PCISTATE
) & PCISTATE_BUS_SPEED_HIGH
)) {
11706 tp
->tg3_flags2
|= TG3_FLG2_MAX_RXPEND_64
;
11707 tp
->rx_pending
= 63;
11710 if ((GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
) ||
11711 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5714
))
11712 tp
->pdev_peer
= tg3_find_peer(tp
);
11714 err
= tg3_get_device_address(tp
);
11716 printk(KERN_ERR PFX
"Could not obtain valid ethernet address, "
11718 goto err_out_iounmap
;
11722 * Reset chip in case UNDI or EFI driver did not shutdown
11723 * DMA self test will enable WDMAC and we'll see (spurious)
11724 * pending DMA on the PCI bus at that point.
11726 if ((tr32(HOSTCC_MODE
) & HOSTCC_MODE_ENABLE
) ||
11727 (tr32(WDMAC_MODE
) & WDMAC_MODE_ENABLE
)) {
11728 pci_save_state(tp
->pdev
);
11729 tw32(MEMARB_MODE
, MEMARB_MODE_ENABLE
);
11730 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
11733 err
= tg3_test_dma(tp
);
11735 printk(KERN_ERR PFX
"DMA engine test failed, aborting.\n");
11736 goto err_out_iounmap
;
11739 /* Tigon3 can do ipv4 only... and some chips have buggy
11742 if ((tp
->tg3_flags
& TG3_FLAG_BROKEN_CHECKSUMS
) == 0) {
11743 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5755
||
11744 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5787
)
11745 dev
->features
|= NETIF_F_HW_CSUM
;
11747 dev
->features
|= NETIF_F_IP_CSUM
;
11748 dev
->features
|= NETIF_F_SG
;
11749 tp
->tg3_flags
|= TG3_FLAG_RX_CHECKSUMS
;
11751 tp
->tg3_flags
&= ~TG3_FLAG_RX_CHECKSUMS
;
11753 /* flow control autonegotiation is default behavior */
11754 tp
->tg3_flags
|= TG3_FLAG_PAUSE_AUTONEG
;
11758 /* Now that we have fully setup the chip, save away a snapshot
11759 * of the PCI config space. We need to restore this after
11760 * GRC_MISC_CFG core clock resets and some resume events.
11762 pci_save_state(tp
->pdev
);
11764 err
= register_netdev(dev
);
11766 printk(KERN_ERR PFX
"Cannot register net device, "
11768 goto err_out_iounmap
;
11771 pci_set_drvdata(pdev
, dev
);
11773 printk(KERN_INFO
"%s: Tigon3 [partno(%s) rev %04x PHY(%s)] (%s) %sBaseT Ethernet ",
11775 tp
->board_part_number
,
11776 tp
->pci_chip_rev_id
,
11777 tg3_phy_string(tp
),
11778 tg3_bus_string(tp
, str
),
11779 (tp
->tg3_flags
& TG3_FLAG_10_100_ONLY
) ? "10/100" : "10/100/1000");
11781 for (i
= 0; i
< 6; i
++)
11782 printk("%2.2x%c", dev
->dev_addr
[i
],
11783 i
== 5 ? '\n' : ':');
11785 printk(KERN_INFO
"%s: RXcsums[%d] LinkChgREG[%d] "
11786 "MIirq[%d] ASF[%d] Split[%d] WireSpeed[%d] "
11789 (tp
->tg3_flags
& TG3_FLAG_RX_CHECKSUMS
) != 0,
11790 (tp
->tg3_flags
& TG3_FLAG_USE_LINKCHG_REG
) != 0,
11791 (tp
->tg3_flags
& TG3_FLAG_USE_MI_INTERRUPT
) != 0,
11792 (tp
->tg3_flags
& TG3_FLAG_ENABLE_ASF
) != 0,
11793 (tp
->tg3_flags
& TG3_FLAG_SPLIT_MODE
) != 0,
11794 (tp
->tg3_flags2
& TG3_FLG2_NO_ETH_WIRE_SPEED
) == 0,
11795 (tp
->tg3_flags2
& TG3_FLG2_TSO_CAPABLE
) != 0);
11796 printk(KERN_INFO
"%s: dma_rwctrl[%08x] dma_mask[%d-bit]\n",
11797 dev
->name
, tp
->dma_rwctrl
,
11798 (pdev
->dma_mask
== DMA_32BIT_MASK
) ? 32 :
11799 (((u64
) pdev
->dma_mask
== DMA_40BIT_MASK
) ? 40 : 64));
11801 netif_carrier_off(tp
->dev
);
11815 pci_release_regions(pdev
);
11817 err_out_disable_pdev
:
11818 pci_disable_device(pdev
);
11819 pci_set_drvdata(pdev
, NULL
);
11823 static void __devexit
tg3_remove_one(struct pci_dev
*pdev
)
11825 struct net_device
*dev
= pci_get_drvdata(pdev
);
11828 struct tg3
*tp
= netdev_priv(dev
);
11830 flush_scheduled_work();
11831 unregister_netdev(dev
);
11837 pci_release_regions(pdev
);
11838 pci_disable_device(pdev
);
11839 pci_set_drvdata(pdev
, NULL
);
11843 static int tg3_suspend(struct pci_dev
*pdev
, pm_message_t state
)
11845 struct net_device
*dev
= pci_get_drvdata(pdev
);
11846 struct tg3
*tp
= netdev_priv(dev
);
11849 if (!netif_running(dev
))
11852 flush_scheduled_work();
11853 tg3_netif_stop(tp
);
11855 del_timer_sync(&tp
->timer
);
11857 tg3_full_lock(tp
, 1);
11858 tg3_disable_ints(tp
);
11859 tg3_full_unlock(tp
);
11861 netif_device_detach(dev
);
11863 tg3_full_lock(tp
, 0);
11864 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
11865 tp
->tg3_flags
&= ~TG3_FLAG_INIT_COMPLETE
;
11866 tg3_full_unlock(tp
);
11868 err
= tg3_set_power_state(tp
, pci_choose_state(pdev
, state
));
11870 tg3_full_lock(tp
, 0);
11872 tp
->tg3_flags
|= TG3_FLAG_INIT_COMPLETE
;
11873 if (tg3_restart_hw(tp
, 1))
11876 tp
->timer
.expires
= jiffies
+ tp
->timer_offset
;
11877 add_timer(&tp
->timer
);
11879 netif_device_attach(dev
);
11880 tg3_netif_start(tp
);
11883 tg3_full_unlock(tp
);
11889 static int tg3_resume(struct pci_dev
*pdev
)
11891 struct net_device
*dev
= pci_get_drvdata(pdev
);
11892 struct tg3
*tp
= netdev_priv(dev
);
11895 if (!netif_running(dev
))
11898 pci_restore_state(tp
->pdev
);
11900 err
= tg3_set_power_state(tp
, PCI_D0
);
11904 netif_device_attach(dev
);
11906 tg3_full_lock(tp
, 0);
11908 tp
->tg3_flags
|= TG3_FLAG_INIT_COMPLETE
;
11909 err
= tg3_restart_hw(tp
, 1);
11913 tp
->timer
.expires
= jiffies
+ tp
->timer_offset
;
11914 add_timer(&tp
->timer
);
11916 tg3_netif_start(tp
);
11919 tg3_full_unlock(tp
);
11924 static struct pci_driver tg3_driver
= {
11925 .name
= DRV_MODULE_NAME
,
11926 .id_table
= tg3_pci_tbl
,
11927 .probe
= tg3_init_one
,
11928 .remove
= __devexit_p(tg3_remove_one
),
11929 .suspend
= tg3_suspend
,
11930 .resume
= tg3_resume
11933 static int __init
tg3_init(void)
11935 return pci_register_driver(&tg3_driver
);
11938 static void __exit
tg3_cleanup(void)
11940 pci_unregister_driver(&tg3_driver
);
11943 module_init(tg3_init
);
11944 module_exit(tg3_cleanup
);