2 * tg3.c: Broadcom Tigon3 ethernet driver.
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005 Broadcom Corporation.
10 * Derived from proprietary unpublished source code,
11 * Copyright (C) 2000-2003 Broadcom Corporation.
13 * Permission is hereby granted for the distribution of this firmware
14 * data in hexadecimal or equivalent format, provided this copyright
15 * notice is accompanying it.
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/kernel.h>
22 #include <linux/types.h>
23 #include <linux/compiler.h>
24 #include <linux/slab.h>
25 #include <linux/delay.h>
27 #include <linux/init.h>
28 #include <linux/ioport.h>
29 #include <linux/pci.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/ethtool.h>
34 #include <linux/mii.h>
35 #include <linux/if_vlan.h>
37 #include <linux/tcp.h>
38 #include <linux/workqueue.h>
39 #include <linux/prefetch.h>
40 #include <linux/dma-mapping.h>
42 #include <net/checksum.h>
44 #include <asm/system.h>
46 #include <asm/byteorder.h>
47 #include <asm/uaccess.h>
50 #include <asm/idprom.h>
51 #include <asm/oplib.h>
55 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
56 #define TG3_VLAN_TAG_USED 1
58 #define TG3_VLAN_TAG_USED 0
62 #define TG3_TSO_SUPPORT 1
64 #define TG3_TSO_SUPPORT 0
69 #define DRV_MODULE_NAME "tg3"
70 #define PFX DRV_MODULE_NAME ": "
71 #define DRV_MODULE_VERSION "3.65"
72 #define DRV_MODULE_RELDATE "August 07, 2006"
74 #define TG3_DEF_MAC_MODE 0
75 #define TG3_DEF_RX_MODE 0
76 #define TG3_DEF_TX_MODE 0
77 #define TG3_DEF_MSG_ENABLE \
87 /* length of time before we decide the hardware is borked,
88 * and dev->tx_timeout() should be called to fix the problem
90 #define TG3_TX_TIMEOUT (5 * HZ)
92 /* hardware minimum and maximum for a single frame's data payload */
93 #define TG3_MIN_MTU 60
94 #define TG3_MAX_MTU(tp) \
95 ((tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) ? 9000 : 1500)
97 /* These numbers seem to be hard coded in the NIC firmware somehow.
98 * You can't change the ring sizes, but you can change where you place
99 * them in the NIC onboard memory.
101 #define TG3_RX_RING_SIZE 512
102 #define TG3_DEF_RX_RING_PENDING 200
103 #define TG3_RX_JUMBO_RING_SIZE 256
104 #define TG3_DEF_RX_JUMBO_RING_PENDING 100
106 /* Do not place this n-ring entries value into the tp struct itself,
107 * we really want to expose these constants to GCC so that modulo et
108 * al. operations are done with shifts and masks instead of with
109 * hw multiply/modulo instructions. Another solution would be to
110 * replace things like '% foo' with '& (foo - 1)'.
112 #define TG3_RX_RCB_RING_SIZE(tp) \
113 ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ? 512 : 1024)
115 #define TG3_TX_RING_SIZE 512
116 #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
118 #define TG3_RX_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
120 #define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
121 TG3_RX_JUMBO_RING_SIZE)
122 #define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
123 TG3_RX_RCB_RING_SIZE(tp))
124 #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
126 #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
128 #define RX_PKT_BUF_SZ (1536 + tp->rx_offset + 64)
129 #define RX_JUMBO_PKT_BUF_SZ (9046 + tp->rx_offset + 64)
131 /* minimum number of free TX descriptors required to wake up TX process */
132 #define TG3_TX_WAKEUP_THRESH (TG3_TX_RING_SIZE / 4)
134 /* number of ETHTOOL_GSTATS u64's */
135 #define TG3_NUM_STATS (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
137 #define TG3_NUM_TEST 6
139 static char version
[] __devinitdata
=
140 DRV_MODULE_NAME
".c:v" DRV_MODULE_VERSION
" (" DRV_MODULE_RELDATE
")\n";
142 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
143 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
144 MODULE_LICENSE("GPL");
145 MODULE_VERSION(DRV_MODULE_VERSION
);
147 static int tg3_debug
= -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
148 module_param(tg3_debug
, int, 0);
149 MODULE_PARM_DESC(tg3_debug
, "Tigon3 bitmapped debugging message enable value");
151 static struct pci_device_id tg3_pci_tbl
[] = {
152 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5700
)},
153 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5701
)},
154 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5702
)},
155 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5703
)},
156 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5704
)},
157 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5702FE
)},
158 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5705
)},
159 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5705_2
)},
160 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5705M
)},
161 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5705M_2
)},
162 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5702X
)},
163 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5703X
)},
164 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5704S
)},
165 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5702A3
)},
166 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5703A3
)},
167 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5782
)},
168 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5788
)},
169 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5789
)},
170 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5901
)},
171 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5901_2
)},
172 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5704S_2
)},
173 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5705F
)},
174 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5720
)},
175 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5721
)},
176 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5722
)},
177 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5750
)},
178 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5751
)},
179 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5750M
)},
180 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5751M
)},
181 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5751F
)},
182 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5752
)},
183 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5752M
)},
184 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5753
)},
185 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5753M
)},
186 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5753F
)},
187 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5754
)},
188 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5754M
)},
189 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5755
)},
190 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5755M
)},
191 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5756
)},
192 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5786
)},
193 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5787
)},
194 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5787M
)},
195 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5714
)},
196 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5714S
)},
197 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5715
)},
198 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5715S
)},
199 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5780
)},
200 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5780S
)},
201 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_TIGON3_5781
)},
202 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT
, PCI_DEVICE_ID_SYSKONNECT_9DXX
)},
203 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT
, PCI_DEVICE_ID_SYSKONNECT_9MXX
)},
204 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA
, PCI_DEVICE_ID_ALTIMA_AC1000
)},
205 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA
, PCI_DEVICE_ID_ALTIMA_AC1001
)},
206 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA
, PCI_DEVICE_ID_ALTIMA_AC1003
)},
207 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA
, PCI_DEVICE_ID_ALTIMA_AC9100
)},
208 {PCI_DEVICE(PCI_VENDOR_ID_APPLE
, PCI_DEVICE_ID_APPLE_TIGON3
)},
212 MODULE_DEVICE_TABLE(pci
, tg3_pci_tbl
);
214 static const struct {
215 const char string
[ETH_GSTRING_LEN
];
216 } ethtool_stats_keys
[TG3_NUM_STATS
] = {
219 { "rx_ucast_packets" },
220 { "rx_mcast_packets" },
221 { "rx_bcast_packets" },
223 { "rx_align_errors" },
224 { "rx_xon_pause_rcvd" },
225 { "rx_xoff_pause_rcvd" },
226 { "rx_mac_ctrl_rcvd" },
227 { "rx_xoff_entered" },
228 { "rx_frame_too_long_errors" },
230 { "rx_undersize_packets" },
231 { "rx_in_length_errors" },
232 { "rx_out_length_errors" },
233 { "rx_64_or_less_octet_packets" },
234 { "rx_65_to_127_octet_packets" },
235 { "rx_128_to_255_octet_packets" },
236 { "rx_256_to_511_octet_packets" },
237 { "rx_512_to_1023_octet_packets" },
238 { "rx_1024_to_1522_octet_packets" },
239 { "rx_1523_to_2047_octet_packets" },
240 { "rx_2048_to_4095_octet_packets" },
241 { "rx_4096_to_8191_octet_packets" },
242 { "rx_8192_to_9022_octet_packets" },
249 { "tx_flow_control" },
251 { "tx_single_collisions" },
252 { "tx_mult_collisions" },
254 { "tx_excessive_collisions" },
255 { "tx_late_collisions" },
256 { "tx_collide_2times" },
257 { "tx_collide_3times" },
258 { "tx_collide_4times" },
259 { "tx_collide_5times" },
260 { "tx_collide_6times" },
261 { "tx_collide_7times" },
262 { "tx_collide_8times" },
263 { "tx_collide_9times" },
264 { "tx_collide_10times" },
265 { "tx_collide_11times" },
266 { "tx_collide_12times" },
267 { "tx_collide_13times" },
268 { "tx_collide_14times" },
269 { "tx_collide_15times" },
270 { "tx_ucast_packets" },
271 { "tx_mcast_packets" },
272 { "tx_bcast_packets" },
273 { "tx_carrier_sense_errors" },
277 { "dma_writeq_full" },
278 { "dma_write_prioq_full" },
282 { "rx_threshold_hit" },
284 { "dma_readq_full" },
285 { "dma_read_prioq_full" },
286 { "tx_comp_queue_full" },
288 { "ring_set_send_prod_index" },
289 { "ring_status_update" },
291 { "nic_avoided_irqs" },
292 { "nic_tx_threshold_hit" }
295 static const struct {
296 const char string
[ETH_GSTRING_LEN
];
297 } ethtool_test_keys
[TG3_NUM_TEST
] = {
298 { "nvram test (online) " },
299 { "link test (online) " },
300 { "register test (offline)" },
301 { "memory test (offline)" },
302 { "loopback test (offline)" },
303 { "interrupt test (offline)" },
306 static void tg3_write32(struct tg3
*tp
, u32 off
, u32 val
)
308 writel(val
, tp
->regs
+ off
);
311 static u32
tg3_read32(struct tg3
*tp
, u32 off
)
313 return (readl(tp
->regs
+ off
));
316 static void tg3_write_indirect_reg32(struct tg3
*tp
, u32 off
, u32 val
)
320 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
321 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_BASE_ADDR
, off
);
322 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_DATA
, val
);
323 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
326 static void tg3_write_flush_reg32(struct tg3
*tp
, u32 off
, u32 val
)
328 writel(val
, tp
->regs
+ off
);
329 readl(tp
->regs
+ off
);
332 static u32
tg3_read_indirect_reg32(struct tg3
*tp
, u32 off
)
337 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
338 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_BASE_ADDR
, off
);
339 pci_read_config_dword(tp
->pdev
, TG3PCI_REG_DATA
, &val
);
340 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
344 static void tg3_write_indirect_mbox(struct tg3
*tp
, u32 off
, u32 val
)
348 if (off
== (MAILBOX_RCVRET_CON_IDX_0
+ TG3_64BIT_REG_LOW
)) {
349 pci_write_config_dword(tp
->pdev
, TG3PCI_RCV_RET_RING_CON_IDX
+
350 TG3_64BIT_REG_LOW
, val
);
353 if (off
== (MAILBOX_RCV_STD_PROD_IDX
+ TG3_64BIT_REG_LOW
)) {
354 pci_write_config_dword(tp
->pdev
, TG3PCI_STD_RING_PROD_IDX
+
355 TG3_64BIT_REG_LOW
, val
);
359 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
360 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_BASE_ADDR
, off
+ 0x5600);
361 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_DATA
, val
);
362 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
364 /* In indirect mode when disabling interrupts, we also need
365 * to clear the interrupt bit in the GRC local ctrl register.
367 if ((off
== (MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
)) &&
369 pci_write_config_dword(tp
->pdev
, TG3PCI_MISC_LOCAL_CTRL
,
370 tp
->grc_local_ctrl
|GRC_LCLCTRL_CLEARINT
);
374 static u32
tg3_read_indirect_mbox(struct tg3
*tp
, u32 off
)
379 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
380 pci_write_config_dword(tp
->pdev
, TG3PCI_REG_BASE_ADDR
, off
+ 0x5600);
381 pci_read_config_dword(tp
->pdev
, TG3PCI_REG_DATA
, &val
);
382 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
386 /* usec_wait specifies the wait time in usec when writing to certain registers
387 * where it is unsafe to read back the register without some delay.
388 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
389 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
391 static void _tw32_flush(struct tg3
*tp
, u32 off
, u32 val
, u32 usec_wait
)
393 if ((tp
->tg3_flags
& TG3_FLAG_PCIX_TARGET_HWBUG
) ||
394 (tp
->tg3_flags2
& TG3_FLG2_ICH_WORKAROUND
))
395 /* Non-posted methods */
396 tp
->write32(tp
, off
, val
);
399 tg3_write32(tp
, off
, val
);
404 /* Wait again after the read for the posted method to guarantee that
405 * the wait time is met.
411 static inline void tw32_mailbox_flush(struct tg3
*tp
, u32 off
, u32 val
)
413 tp
->write32_mbox(tp
, off
, val
);
414 if (!(tp
->tg3_flags
& TG3_FLAG_MBOX_WRITE_REORDER
) &&
415 !(tp
->tg3_flags2
& TG3_FLG2_ICH_WORKAROUND
))
416 tp
->read32_mbox(tp
, off
);
419 static void tg3_write32_tx_mbox(struct tg3
*tp
, u32 off
, u32 val
)
421 void __iomem
*mbox
= tp
->regs
+ off
;
423 if (tp
->tg3_flags
& TG3_FLAG_TXD_MBOX_HWBUG
)
425 if (tp
->tg3_flags
& TG3_FLAG_MBOX_WRITE_REORDER
)
429 #define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
430 #define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
431 #define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
432 #define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
433 #define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
435 #define tw32(reg,val) tp->write32(tp, reg, val)
436 #define tw32_f(reg,val) _tw32_flush(tp,(reg),(val), 0)
437 #define tw32_wait_f(reg,val,us) _tw32_flush(tp,(reg),(val), (us))
438 #define tr32(reg) tp->read32(tp, reg)
440 static void tg3_write_mem(struct tg3
*tp
, u32 off
, u32 val
)
444 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
445 if (tp
->tg3_flags
& TG3_FLAG_SRAM_USE_CONFIG
) {
446 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
, off
);
447 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_DATA
, val
);
449 /* Always leave this as zero. */
450 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
, 0);
452 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR
, off
);
453 tw32_f(TG3PCI_MEM_WIN_DATA
, val
);
455 /* Always leave this as zero. */
456 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR
, 0);
458 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
461 static void tg3_read_mem(struct tg3
*tp
, u32 off
, u32
*val
)
465 spin_lock_irqsave(&tp
->indirect_lock
, flags
);
466 if (tp
->tg3_flags
& TG3_FLAG_SRAM_USE_CONFIG
) {
467 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
, off
);
468 pci_read_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_DATA
, val
);
470 /* Always leave this as zero. */
471 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
, 0);
473 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR
, off
);
474 *val
= tr32(TG3PCI_MEM_WIN_DATA
);
476 /* Always leave this as zero. */
477 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR
, 0);
479 spin_unlock_irqrestore(&tp
->indirect_lock
, flags
);
482 static void tg3_disable_ints(struct tg3
*tp
)
484 tw32(TG3PCI_MISC_HOST_CTRL
,
485 (tp
->misc_host_ctrl
| MISC_HOST_CTRL_MASK_PCI_INT
));
486 tw32_mailbox_f(MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
, 0x00000001);
489 static inline void tg3_cond_int(struct tg3
*tp
)
491 if (!(tp
->tg3_flags
& TG3_FLAG_TAGGED_STATUS
) &&
492 (tp
->hw_status
->status
& SD_STATUS_UPDATED
))
493 tw32(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
| GRC_LCLCTRL_SETINT
);
496 static void tg3_enable_ints(struct tg3
*tp
)
501 tw32(TG3PCI_MISC_HOST_CTRL
,
502 (tp
->misc_host_ctrl
& ~MISC_HOST_CTRL_MASK_PCI_INT
));
503 tw32_mailbox_f(MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
,
504 (tp
->last_tag
<< 24));
505 if (tp
->tg3_flags2
& TG3_FLG2_1SHOT_MSI
)
506 tw32_mailbox_f(MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
,
507 (tp
->last_tag
<< 24));
511 static inline unsigned int tg3_has_work(struct tg3
*tp
)
513 struct tg3_hw_status
*sblk
= tp
->hw_status
;
514 unsigned int work_exists
= 0;
516 /* check for phy events */
517 if (!(tp
->tg3_flags
&
518 (TG3_FLAG_USE_LINKCHG_REG
|
519 TG3_FLAG_POLL_SERDES
))) {
520 if (sblk
->status
& SD_STATUS_LINK_CHG
)
523 /* check for RX/TX work to do */
524 if (sblk
->idx
[0].tx_consumer
!= tp
->tx_cons
||
525 sblk
->idx
[0].rx_producer
!= tp
->rx_rcb_ptr
)
532 * similar to tg3_enable_ints, but it accurately determines whether there
533 * is new work pending and can return without flushing the PIO write
534 * which reenables interrupts
536 static void tg3_restart_ints(struct tg3
*tp
)
538 tw32_mailbox(MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
,
542 /* When doing tagged status, this work check is unnecessary.
543 * The last_tag we write above tells the chip which piece of
544 * work we've completed.
546 if (!(tp
->tg3_flags
& TG3_FLAG_TAGGED_STATUS
) &&
548 tw32(HOSTCC_MODE
, tp
->coalesce_mode
|
549 (HOSTCC_MODE_ENABLE
| HOSTCC_MODE_NOW
));
552 static inline void tg3_netif_stop(struct tg3
*tp
)
554 tp
->dev
->trans_start
= jiffies
; /* prevent tx timeout */
555 netif_poll_disable(tp
->dev
);
556 netif_tx_disable(tp
->dev
);
559 static inline void tg3_netif_start(struct tg3
*tp
)
561 netif_wake_queue(tp
->dev
);
562 /* NOTE: unconditional netif_wake_queue is only appropriate
563 * so long as all callers are assured to have free tx slots
564 * (such as after tg3_init_hw)
566 netif_poll_enable(tp
->dev
);
567 tp
->hw_status
->status
|= SD_STATUS_UPDATED
;
571 static void tg3_switch_clocks(struct tg3
*tp
)
573 u32 clock_ctrl
= tr32(TG3PCI_CLOCK_CTRL
);
576 if (tp
->tg3_flags2
& TG3_FLG2_5780_CLASS
)
579 orig_clock_ctrl
= clock_ctrl
;
580 clock_ctrl
&= (CLOCK_CTRL_FORCE_CLKRUN
|
581 CLOCK_CTRL_CLKRUN_OENABLE
|
583 tp
->pci_clock_ctrl
= clock_ctrl
;
585 if (tp
->tg3_flags2
& TG3_FLG2_5705_PLUS
) {
586 if (orig_clock_ctrl
& CLOCK_CTRL_625_CORE
) {
587 tw32_wait_f(TG3PCI_CLOCK_CTRL
,
588 clock_ctrl
| CLOCK_CTRL_625_CORE
, 40);
590 } else if ((orig_clock_ctrl
& CLOCK_CTRL_44MHZ_CORE
) != 0) {
591 tw32_wait_f(TG3PCI_CLOCK_CTRL
,
593 (CLOCK_CTRL_44MHZ_CORE
| CLOCK_CTRL_ALTCLK
),
595 tw32_wait_f(TG3PCI_CLOCK_CTRL
,
596 clock_ctrl
| (CLOCK_CTRL_ALTCLK
),
599 tw32_wait_f(TG3PCI_CLOCK_CTRL
, clock_ctrl
, 40);
602 #define PHY_BUSY_LOOPS 5000
604 static int tg3_readphy(struct tg3
*tp
, int reg
, u32
*val
)
610 if ((tp
->mi_mode
& MAC_MI_MODE_AUTO_POLL
) != 0) {
612 (tp
->mi_mode
& ~MAC_MI_MODE_AUTO_POLL
));
618 frame_val
= ((PHY_ADDR
<< MI_COM_PHY_ADDR_SHIFT
) &
619 MI_COM_PHY_ADDR_MASK
);
620 frame_val
|= ((reg
<< MI_COM_REG_ADDR_SHIFT
) &
621 MI_COM_REG_ADDR_MASK
);
622 frame_val
|= (MI_COM_CMD_READ
| MI_COM_START
);
624 tw32_f(MAC_MI_COM
, frame_val
);
626 loops
= PHY_BUSY_LOOPS
;
629 frame_val
= tr32(MAC_MI_COM
);
631 if ((frame_val
& MI_COM_BUSY
) == 0) {
633 frame_val
= tr32(MAC_MI_COM
);
641 *val
= frame_val
& MI_COM_DATA_MASK
;
645 if ((tp
->mi_mode
& MAC_MI_MODE_AUTO_POLL
) != 0) {
646 tw32_f(MAC_MI_MODE
, tp
->mi_mode
);
653 static int tg3_writephy(struct tg3
*tp
, int reg
, u32 val
)
659 if ((tp
->mi_mode
& MAC_MI_MODE_AUTO_POLL
) != 0) {
661 (tp
->mi_mode
& ~MAC_MI_MODE_AUTO_POLL
));
665 frame_val
= ((PHY_ADDR
<< MI_COM_PHY_ADDR_SHIFT
) &
666 MI_COM_PHY_ADDR_MASK
);
667 frame_val
|= ((reg
<< MI_COM_REG_ADDR_SHIFT
) &
668 MI_COM_REG_ADDR_MASK
);
669 frame_val
|= (val
& MI_COM_DATA_MASK
);
670 frame_val
|= (MI_COM_CMD_WRITE
| MI_COM_START
);
672 tw32_f(MAC_MI_COM
, frame_val
);
674 loops
= PHY_BUSY_LOOPS
;
677 frame_val
= tr32(MAC_MI_COM
);
678 if ((frame_val
& MI_COM_BUSY
) == 0) {
680 frame_val
= tr32(MAC_MI_COM
);
690 if ((tp
->mi_mode
& MAC_MI_MODE_AUTO_POLL
) != 0) {
691 tw32_f(MAC_MI_MODE
, tp
->mi_mode
);
698 static void tg3_phy_set_wirespeed(struct tg3
*tp
)
702 if (tp
->tg3_flags2
& TG3_FLG2_NO_ETH_WIRE_SPEED
)
705 if (!tg3_writephy(tp
, MII_TG3_AUX_CTRL
, 0x7007) &&
706 !tg3_readphy(tp
, MII_TG3_AUX_CTRL
, &val
))
707 tg3_writephy(tp
, MII_TG3_AUX_CTRL
,
708 (val
| (1 << 15) | (1 << 4)));
711 static int tg3_bmcr_reset(struct tg3
*tp
)
716 /* OK, reset it, and poll the BMCR_RESET bit until it
717 * clears or we time out.
719 phy_control
= BMCR_RESET
;
720 err
= tg3_writephy(tp
, MII_BMCR
, phy_control
);
726 err
= tg3_readphy(tp
, MII_BMCR
, &phy_control
);
730 if ((phy_control
& BMCR_RESET
) == 0) {
742 static int tg3_wait_macro_done(struct tg3
*tp
)
749 if (!tg3_readphy(tp
, 0x16, &tmp32
)) {
750 if ((tmp32
& 0x1000) == 0)
760 static int tg3_phy_write_and_check_testpat(struct tg3
*tp
, int *resetp
)
762 static const u32 test_pat
[4][6] = {
763 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
764 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
765 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
766 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
770 for (chan
= 0; chan
< 4; chan
++) {
773 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
,
774 (chan
* 0x2000) | 0x0200);
775 tg3_writephy(tp
, 0x16, 0x0002);
777 for (i
= 0; i
< 6; i
++)
778 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
,
781 tg3_writephy(tp
, 0x16, 0x0202);
782 if (tg3_wait_macro_done(tp
)) {
787 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
,
788 (chan
* 0x2000) | 0x0200);
789 tg3_writephy(tp
, 0x16, 0x0082);
790 if (tg3_wait_macro_done(tp
)) {
795 tg3_writephy(tp
, 0x16, 0x0802);
796 if (tg3_wait_macro_done(tp
)) {
801 for (i
= 0; i
< 6; i
+= 2) {
804 if (tg3_readphy(tp
, MII_TG3_DSP_RW_PORT
, &low
) ||
805 tg3_readphy(tp
, MII_TG3_DSP_RW_PORT
, &high
) ||
806 tg3_wait_macro_done(tp
)) {
812 if (low
!= test_pat
[chan
][i
] ||
813 high
!= test_pat
[chan
][i
+1]) {
814 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, 0x000b);
815 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x4001);
816 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x4005);
826 static int tg3_phy_reset_chanpat(struct tg3
*tp
)
830 for (chan
= 0; chan
< 4; chan
++) {
833 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
,
834 (chan
* 0x2000) | 0x0200);
835 tg3_writephy(tp
, 0x16, 0x0002);
836 for (i
= 0; i
< 6; i
++)
837 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x000);
838 tg3_writephy(tp
, 0x16, 0x0202);
839 if (tg3_wait_macro_done(tp
))
846 static int tg3_phy_reset_5703_4_5(struct tg3
*tp
)
848 u32 reg32
, phy9_orig
;
849 int retries
, do_phy_reset
, err
;
855 err
= tg3_bmcr_reset(tp
);
861 /* Disable transmitter and interrupt. */
862 if (tg3_readphy(tp
, MII_TG3_EXT_CTRL
, ®32
))
866 tg3_writephy(tp
, MII_TG3_EXT_CTRL
, reg32
);
868 /* Set full-duplex, 1000 mbps. */
869 tg3_writephy(tp
, MII_BMCR
,
870 BMCR_FULLDPLX
| TG3_BMCR_SPEED1000
);
872 /* Set to master mode. */
873 if (tg3_readphy(tp
, MII_TG3_CTRL
, &phy9_orig
))
876 tg3_writephy(tp
, MII_TG3_CTRL
,
877 (MII_TG3_CTRL_AS_MASTER
|
878 MII_TG3_CTRL_ENABLE_AS_MASTER
));
880 /* Enable SM_DSP_CLOCK and 6dB. */
881 tg3_writephy(tp
, MII_TG3_AUX_CTRL
, 0x0c00);
883 /* Block the PHY control access. */
884 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, 0x8005);
885 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x0800);
887 err
= tg3_phy_write_and_check_testpat(tp
, &do_phy_reset
);
892 err
= tg3_phy_reset_chanpat(tp
);
896 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, 0x8005);
897 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x0000);
899 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, 0x8200);
900 tg3_writephy(tp
, 0x16, 0x0000);
902 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5703
||
903 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
) {
904 /* Set Extended packet length bit for jumbo frames */
905 tg3_writephy(tp
, MII_TG3_AUX_CTRL
, 0x4400);
908 tg3_writephy(tp
, MII_TG3_AUX_CTRL
, 0x0400);
911 tg3_writephy(tp
, MII_TG3_CTRL
, phy9_orig
);
913 if (!tg3_readphy(tp
, MII_TG3_EXT_CTRL
, ®32
)) {
915 tg3_writephy(tp
, MII_TG3_EXT_CTRL
, reg32
);
922 static void tg3_link_report(struct tg3
*);
924 /* This will reset the tigon3 PHY if there is no valid
925 * link unless the FORCE argument is non-zero.
927 static int tg3_phy_reset(struct tg3
*tp
)
932 err
= tg3_readphy(tp
, MII_BMSR
, &phy_status
);
933 err
|= tg3_readphy(tp
, MII_BMSR
, &phy_status
);
937 if (netif_running(tp
->dev
) && netif_carrier_ok(tp
->dev
)) {
938 netif_carrier_off(tp
->dev
);
942 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5703
||
943 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
||
944 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
) {
945 err
= tg3_phy_reset_5703_4_5(tp
);
951 err
= tg3_bmcr_reset(tp
);
956 if (tp
->tg3_flags2
& TG3_FLG2_PHY_ADC_BUG
) {
957 tg3_writephy(tp
, MII_TG3_AUX_CTRL
, 0x0c00);
958 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, 0x201f);
959 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x2aaa);
960 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, 0x000a);
961 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x0323);
962 tg3_writephy(tp
, MII_TG3_AUX_CTRL
, 0x0400);
964 if (tp
->tg3_flags2
& TG3_FLG2_PHY_5704_A0_BUG
) {
965 tg3_writephy(tp
, 0x1c, 0x8d68);
966 tg3_writephy(tp
, 0x1c, 0x8d68);
968 if (tp
->tg3_flags2
& TG3_FLG2_PHY_BER_BUG
) {
969 tg3_writephy(tp
, MII_TG3_AUX_CTRL
, 0x0c00);
970 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, 0x000a);
971 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x310b);
972 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, 0x201f);
973 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x9506);
974 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, 0x401f);
975 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x14e2);
976 tg3_writephy(tp
, MII_TG3_AUX_CTRL
, 0x0400);
978 else if (tp
->tg3_flags2
& TG3_FLG2_PHY_JITTER_BUG
) {
979 tg3_writephy(tp
, MII_TG3_AUX_CTRL
, 0x0c00);
980 tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, 0x000a);
981 tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x010b);
982 tg3_writephy(tp
, MII_TG3_AUX_CTRL
, 0x0400);
984 /* Set Extended packet length bit (bit 14) on all chips that */
985 /* support jumbo frames */
986 if ((tp
->phy_id
& PHY_ID_MASK
) == PHY_ID_BCM5401
) {
987 /* Cannot do read-modify-write on 5401 */
988 tg3_writephy(tp
, MII_TG3_AUX_CTRL
, 0x4c20);
989 } else if (tp
->tg3_flags2
& TG3_FLG2_JUMBO_CAPABLE
) {
992 /* Set bit 14 with read-modify-write to preserve other bits */
993 if (!tg3_writephy(tp
, MII_TG3_AUX_CTRL
, 0x0007) &&
994 !tg3_readphy(tp
, MII_TG3_AUX_CTRL
, &phy_reg
))
995 tg3_writephy(tp
, MII_TG3_AUX_CTRL
, phy_reg
| 0x4000);
998 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
999 * jumbo frames transmission.
1001 if (tp
->tg3_flags2
& TG3_FLG2_JUMBO_CAPABLE
) {
1004 if (!tg3_readphy(tp
, MII_TG3_EXT_CTRL
, &phy_reg
))
1005 tg3_writephy(tp
, MII_TG3_EXT_CTRL
,
1006 phy_reg
| MII_TG3_EXT_CTRL_FIFO_ELASTIC
);
1009 tg3_phy_set_wirespeed(tp
);
1013 static void tg3_frob_aux_power(struct tg3
*tp
)
1015 struct tg3
*tp_peer
= tp
;
1017 if ((tp
->tg3_flags
& TG3_FLAG_EEPROM_WRITE_PROT
) != 0)
1020 if ((GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
) ||
1021 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5714
)) {
1022 struct net_device
*dev_peer
;
1024 dev_peer
= pci_get_drvdata(tp
->pdev_peer
);
1025 /* remove_one() may have been run on the peer. */
1029 tp_peer
= netdev_priv(dev_peer
);
1032 if ((tp
->tg3_flags
& TG3_FLAG_WOL_ENABLE
) != 0 ||
1033 (tp
->tg3_flags
& TG3_FLAG_ENABLE_ASF
) != 0 ||
1034 (tp_peer
->tg3_flags
& TG3_FLAG_WOL_ENABLE
) != 0 ||
1035 (tp_peer
->tg3_flags
& TG3_FLAG_ENABLE_ASF
) != 0) {
1036 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
1037 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
) {
1038 tw32_wait_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
|
1039 (GRC_LCLCTRL_GPIO_OE0
|
1040 GRC_LCLCTRL_GPIO_OE1
|
1041 GRC_LCLCTRL_GPIO_OE2
|
1042 GRC_LCLCTRL_GPIO_OUTPUT0
|
1043 GRC_LCLCTRL_GPIO_OUTPUT1
),
1047 u32 grc_local_ctrl
= 0;
1049 if (tp_peer
!= tp
&&
1050 (tp_peer
->tg3_flags
& TG3_FLAG_INIT_COMPLETE
) != 0)
1053 /* Workaround to prevent overdrawing Amps. */
1054 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) ==
1056 grc_local_ctrl
|= GRC_LCLCTRL_GPIO_OE3
;
1057 tw32_wait_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
|
1058 grc_local_ctrl
, 100);
1061 /* On 5753 and variants, GPIO2 cannot be used. */
1062 no_gpio2
= tp
->nic_sram_data_cfg
&
1063 NIC_SRAM_DATA_CFG_NO_GPIO2
;
1065 grc_local_ctrl
|= GRC_LCLCTRL_GPIO_OE0
|
1066 GRC_LCLCTRL_GPIO_OE1
|
1067 GRC_LCLCTRL_GPIO_OE2
|
1068 GRC_LCLCTRL_GPIO_OUTPUT1
|
1069 GRC_LCLCTRL_GPIO_OUTPUT2
;
1071 grc_local_ctrl
&= ~(GRC_LCLCTRL_GPIO_OE2
|
1072 GRC_LCLCTRL_GPIO_OUTPUT2
);
1074 tw32_wait_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
|
1075 grc_local_ctrl
, 100);
1077 grc_local_ctrl
|= GRC_LCLCTRL_GPIO_OUTPUT0
;
1079 tw32_wait_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
|
1080 grc_local_ctrl
, 100);
1083 grc_local_ctrl
&= ~GRC_LCLCTRL_GPIO_OUTPUT2
;
1084 tw32_wait_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
|
1085 grc_local_ctrl
, 100);
1089 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5700
&&
1090 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5701
) {
1091 if (tp_peer
!= tp
&&
1092 (tp_peer
->tg3_flags
& TG3_FLAG_INIT_COMPLETE
) != 0)
1095 tw32_wait_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
|
1096 (GRC_LCLCTRL_GPIO_OE1
|
1097 GRC_LCLCTRL_GPIO_OUTPUT1
), 100);
1099 tw32_wait_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
|
1100 GRC_LCLCTRL_GPIO_OE1
, 100);
1102 tw32_wait_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
|
1103 (GRC_LCLCTRL_GPIO_OE1
|
1104 GRC_LCLCTRL_GPIO_OUTPUT1
), 100);
1109 static int tg3_setup_phy(struct tg3
*, int);
1111 #define RESET_KIND_SHUTDOWN 0
1112 #define RESET_KIND_INIT 1
1113 #define RESET_KIND_SUSPEND 2
1115 static void tg3_write_sig_post_reset(struct tg3
*, int);
1116 static int tg3_halt_cpu(struct tg3
*, u32
);
1117 static int tg3_nvram_lock(struct tg3
*);
1118 static void tg3_nvram_unlock(struct tg3
*);
1120 static void tg3_power_down_phy(struct tg3
*tp
)
1122 if (tp
->tg3_flags2
& TG3_FLG2_PHY_SERDES
)
1125 tg3_writephy(tp
, MII_TG3_EXT_CTRL
, MII_TG3_EXT_CTRL_FORCE_LED_OFF
);
1126 tg3_writephy(tp
, MII_TG3_AUX_CTRL
, 0x01b2);
1128 /* The PHY should not be powered down on some chips because
1131 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
1132 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
||
1133 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5780
&&
1134 (tp
->tg3_flags2
& TG3_FLG2_MII_SERDES
)))
1136 tg3_writephy(tp
, MII_BMCR
, BMCR_PDOWN
);
1139 static int tg3_set_power_state(struct tg3
*tp
, pci_power_t state
)
1142 u16 power_control
, power_caps
;
1143 int pm
= tp
->pm_cap
;
1145 /* Make sure register accesses (indirect or otherwise)
1146 * will function correctly.
1148 pci_write_config_dword(tp
->pdev
,
1149 TG3PCI_MISC_HOST_CTRL
,
1150 tp
->misc_host_ctrl
);
1152 pci_read_config_word(tp
->pdev
,
1155 power_control
|= PCI_PM_CTRL_PME_STATUS
;
1156 power_control
&= ~(PCI_PM_CTRL_STATE_MASK
);
1160 pci_write_config_word(tp
->pdev
,
1163 udelay(100); /* Delay after power state change */
1165 /* Switch out of Vaux if it is not a LOM */
1166 if (!(tp
->tg3_flags
& TG3_FLAG_EEPROM_WRITE_PROT
))
1167 tw32_wait_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
, 100);
1184 printk(KERN_WARNING PFX
"%s: Invalid power state (%d) "
1186 tp
->dev
->name
, state
);
1190 power_control
|= PCI_PM_CTRL_PME_ENABLE
;
1192 misc_host_ctrl
= tr32(TG3PCI_MISC_HOST_CTRL
);
1193 tw32(TG3PCI_MISC_HOST_CTRL
,
1194 misc_host_ctrl
| MISC_HOST_CTRL_MASK_PCI_INT
);
1196 if (tp
->link_config
.phy_is_low_power
== 0) {
1197 tp
->link_config
.phy_is_low_power
= 1;
1198 tp
->link_config
.orig_speed
= tp
->link_config
.speed
;
1199 tp
->link_config
.orig_duplex
= tp
->link_config
.duplex
;
1200 tp
->link_config
.orig_autoneg
= tp
->link_config
.autoneg
;
1203 if (!(tp
->tg3_flags2
& TG3_FLG2_ANY_SERDES
)) {
1204 tp
->link_config
.speed
= SPEED_10
;
1205 tp
->link_config
.duplex
= DUPLEX_HALF
;
1206 tp
->link_config
.autoneg
= AUTONEG_ENABLE
;
1207 tg3_setup_phy(tp
, 0);
1210 if (!(tp
->tg3_flags
& TG3_FLAG_ENABLE_ASF
)) {
1214 for (i
= 0; i
< 200; i
++) {
1215 tg3_read_mem(tp
, NIC_SRAM_FW_ASF_STATUS_MBOX
, &val
);
1216 if (val
== ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1
)
1221 tg3_write_mem(tp
, NIC_SRAM_WOL_MBOX
, WOL_SIGNATURE
|
1222 WOL_DRV_STATE_SHUTDOWN
|
1223 WOL_DRV_WOL
| WOL_SET_MAGIC_PKT
);
1225 pci_read_config_word(tp
->pdev
, pm
+ PCI_PM_PMC
, &power_caps
);
1227 if (tp
->tg3_flags
& TG3_FLAG_WOL_ENABLE
) {
1230 if (!(tp
->tg3_flags2
& TG3_FLG2_PHY_SERDES
)) {
1231 tg3_writephy(tp
, MII_TG3_AUX_CTRL
, 0x5a);
1234 if (tp
->tg3_flags2
& TG3_FLG2_MII_SERDES
)
1235 mac_mode
= MAC_MODE_PORT_MODE_GMII
;
1237 mac_mode
= MAC_MODE_PORT_MODE_MII
;
1239 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5700
||
1240 !(tp
->tg3_flags
& TG3_FLAG_WOL_SPEED_100MB
))
1241 mac_mode
|= MAC_MODE_LINK_POLARITY
;
1243 mac_mode
= MAC_MODE_PORT_MODE_TBI
;
1246 if (!(tp
->tg3_flags2
& TG3_FLG2_5750_PLUS
))
1247 tw32(MAC_LED_CTRL
, tp
->led_ctrl
);
1249 if (((power_caps
& PCI_PM_CAP_PME_D3cold
) &&
1250 (tp
->tg3_flags
& TG3_FLAG_WOL_ENABLE
)))
1251 mac_mode
|= MAC_MODE_MAGIC_PKT_ENABLE
;
1253 tw32_f(MAC_MODE
, mac_mode
);
1256 tw32_f(MAC_RX_MODE
, RX_MODE_ENABLE
);
1260 if (!(tp
->tg3_flags
& TG3_FLAG_WOL_SPEED_100MB
) &&
1261 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
1262 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
)) {
1265 base_val
= tp
->pci_clock_ctrl
;
1266 base_val
|= (CLOCK_CTRL_RXCLK_DISABLE
|
1267 CLOCK_CTRL_TXCLK_DISABLE
);
1269 tw32_wait_f(TG3PCI_CLOCK_CTRL
, base_val
| CLOCK_CTRL_ALTCLK
|
1270 CLOCK_CTRL_PWRDOWN_PLL133
, 40);
1271 } else if (tp
->tg3_flags2
& TG3_FLG2_5780_CLASS
) {
1273 } else if (!((tp
->tg3_flags2
& TG3_FLG2_5750_PLUS
) &&
1274 (tp
->tg3_flags
& TG3_FLAG_ENABLE_ASF
))) {
1275 u32 newbits1
, newbits2
;
1277 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
1278 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
) {
1279 newbits1
= (CLOCK_CTRL_RXCLK_DISABLE
|
1280 CLOCK_CTRL_TXCLK_DISABLE
|
1282 newbits2
= newbits1
| CLOCK_CTRL_44MHZ_CORE
;
1283 } else if (tp
->tg3_flags2
& TG3_FLG2_5705_PLUS
) {
1284 newbits1
= CLOCK_CTRL_625_CORE
;
1285 newbits2
= newbits1
| CLOCK_CTRL_ALTCLK
;
1287 newbits1
= CLOCK_CTRL_ALTCLK
;
1288 newbits2
= newbits1
| CLOCK_CTRL_44MHZ_CORE
;
1291 tw32_wait_f(TG3PCI_CLOCK_CTRL
, tp
->pci_clock_ctrl
| newbits1
,
1294 tw32_wait_f(TG3PCI_CLOCK_CTRL
, tp
->pci_clock_ctrl
| newbits2
,
1297 if (!(tp
->tg3_flags2
& TG3_FLG2_5705_PLUS
)) {
1300 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
1301 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
) {
1302 newbits3
= (CLOCK_CTRL_RXCLK_DISABLE
|
1303 CLOCK_CTRL_TXCLK_DISABLE
|
1304 CLOCK_CTRL_44MHZ_CORE
);
1306 newbits3
= CLOCK_CTRL_44MHZ_CORE
;
1309 tw32_wait_f(TG3PCI_CLOCK_CTRL
,
1310 tp
->pci_clock_ctrl
| newbits3
, 40);
1314 if (!(tp
->tg3_flags
& TG3_FLAG_WOL_ENABLE
) &&
1315 !(tp
->tg3_flags
& TG3_FLAG_ENABLE_ASF
))
1316 tg3_power_down_phy(tp
);
1318 tg3_frob_aux_power(tp
);
1320 /* Workaround for unstable PLL clock */
1321 if ((GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5750_AX
) ||
1322 (GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5750_BX
)) {
1323 u32 val
= tr32(0x7d00);
1325 val
&= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
1327 if (!(tp
->tg3_flags
& TG3_FLAG_ENABLE_ASF
)) {
1330 err
= tg3_nvram_lock(tp
);
1331 tg3_halt_cpu(tp
, RX_CPU_BASE
);
1333 tg3_nvram_unlock(tp
);
1337 tg3_write_sig_post_reset(tp
, RESET_KIND_SHUTDOWN
);
1339 /* Finally, set the new power state. */
1340 pci_write_config_word(tp
->pdev
, pm
+ PCI_PM_CTRL
, power_control
);
1341 udelay(100); /* Delay after power state change */
1346 static void tg3_link_report(struct tg3
*tp
)
1348 if (!netif_carrier_ok(tp
->dev
)) {
1349 printk(KERN_INFO PFX
"%s: Link is down.\n", tp
->dev
->name
);
1351 printk(KERN_INFO PFX
"%s: Link is up at %d Mbps, %s duplex.\n",
1353 (tp
->link_config
.active_speed
== SPEED_1000
?
1355 (tp
->link_config
.active_speed
== SPEED_100
?
1357 (tp
->link_config
.active_duplex
== DUPLEX_FULL
?
1360 printk(KERN_INFO PFX
"%s: Flow control is %s for TX and "
1363 (tp
->tg3_flags
& TG3_FLAG_TX_PAUSE
) ? "on" : "off",
1364 (tp
->tg3_flags
& TG3_FLAG_RX_PAUSE
) ? "on" : "off");
1368 static void tg3_setup_flow_control(struct tg3
*tp
, u32 local_adv
, u32 remote_adv
)
1370 u32 new_tg3_flags
= 0;
1371 u32 old_rx_mode
= tp
->rx_mode
;
1372 u32 old_tx_mode
= tp
->tx_mode
;
1374 if (tp
->tg3_flags
& TG3_FLAG_PAUSE_AUTONEG
) {
1376 /* Convert 1000BaseX flow control bits to 1000BaseT
1377 * bits before resolving flow control.
1379 if (tp
->tg3_flags2
& TG3_FLG2_MII_SERDES
) {
1380 local_adv
&= ~(ADVERTISE_PAUSE_CAP
|
1381 ADVERTISE_PAUSE_ASYM
);
1382 remote_adv
&= ~(LPA_PAUSE_CAP
| LPA_PAUSE_ASYM
);
1384 if (local_adv
& ADVERTISE_1000XPAUSE
)
1385 local_adv
|= ADVERTISE_PAUSE_CAP
;
1386 if (local_adv
& ADVERTISE_1000XPSE_ASYM
)
1387 local_adv
|= ADVERTISE_PAUSE_ASYM
;
1388 if (remote_adv
& LPA_1000XPAUSE
)
1389 remote_adv
|= LPA_PAUSE_CAP
;
1390 if (remote_adv
& LPA_1000XPAUSE_ASYM
)
1391 remote_adv
|= LPA_PAUSE_ASYM
;
1394 if (local_adv
& ADVERTISE_PAUSE_CAP
) {
1395 if (local_adv
& ADVERTISE_PAUSE_ASYM
) {
1396 if (remote_adv
& LPA_PAUSE_CAP
)
1398 (TG3_FLAG_RX_PAUSE
|
1400 else if (remote_adv
& LPA_PAUSE_ASYM
)
1402 (TG3_FLAG_RX_PAUSE
);
1404 if (remote_adv
& LPA_PAUSE_CAP
)
1406 (TG3_FLAG_RX_PAUSE
|
1409 } else if (local_adv
& ADVERTISE_PAUSE_ASYM
) {
1410 if ((remote_adv
& LPA_PAUSE_CAP
) &&
1411 (remote_adv
& LPA_PAUSE_ASYM
))
1412 new_tg3_flags
|= TG3_FLAG_TX_PAUSE
;
1415 tp
->tg3_flags
&= ~(TG3_FLAG_RX_PAUSE
| TG3_FLAG_TX_PAUSE
);
1416 tp
->tg3_flags
|= new_tg3_flags
;
1418 new_tg3_flags
= tp
->tg3_flags
;
1421 if (new_tg3_flags
& TG3_FLAG_RX_PAUSE
)
1422 tp
->rx_mode
|= RX_MODE_FLOW_CTRL_ENABLE
;
1424 tp
->rx_mode
&= ~RX_MODE_FLOW_CTRL_ENABLE
;
1426 if (old_rx_mode
!= tp
->rx_mode
) {
1427 tw32_f(MAC_RX_MODE
, tp
->rx_mode
);
1430 if (new_tg3_flags
& TG3_FLAG_TX_PAUSE
)
1431 tp
->tx_mode
|= TX_MODE_FLOW_CTRL_ENABLE
;
1433 tp
->tx_mode
&= ~TX_MODE_FLOW_CTRL_ENABLE
;
1435 if (old_tx_mode
!= tp
->tx_mode
) {
1436 tw32_f(MAC_TX_MODE
, tp
->tx_mode
);
1440 static void tg3_aux_stat_to_speed_duplex(struct tg3
*tp
, u32 val
, u16
*speed
, u8
*duplex
)
1442 switch (val
& MII_TG3_AUX_STAT_SPDMASK
) {
1443 case MII_TG3_AUX_STAT_10HALF
:
1445 *duplex
= DUPLEX_HALF
;
1448 case MII_TG3_AUX_STAT_10FULL
:
1450 *duplex
= DUPLEX_FULL
;
1453 case MII_TG3_AUX_STAT_100HALF
:
1455 *duplex
= DUPLEX_HALF
;
1458 case MII_TG3_AUX_STAT_100FULL
:
1460 *duplex
= DUPLEX_FULL
;
1463 case MII_TG3_AUX_STAT_1000HALF
:
1464 *speed
= SPEED_1000
;
1465 *duplex
= DUPLEX_HALF
;
1468 case MII_TG3_AUX_STAT_1000FULL
:
1469 *speed
= SPEED_1000
;
1470 *duplex
= DUPLEX_FULL
;
1474 *speed
= SPEED_INVALID
;
1475 *duplex
= DUPLEX_INVALID
;
1480 static void tg3_phy_copper_begin(struct tg3
*tp
)
1485 if (tp
->link_config
.phy_is_low_power
) {
1486 /* Entering low power mode. Disable gigabit and
1487 * 100baseT advertisements.
1489 tg3_writephy(tp
, MII_TG3_CTRL
, 0);
1491 new_adv
= (ADVERTISE_10HALF
| ADVERTISE_10FULL
|
1492 ADVERTISE_CSMA
| ADVERTISE_PAUSE_CAP
);
1493 if (tp
->tg3_flags
& TG3_FLAG_WOL_SPEED_100MB
)
1494 new_adv
|= (ADVERTISE_100HALF
| ADVERTISE_100FULL
);
1496 tg3_writephy(tp
, MII_ADVERTISE
, new_adv
);
1497 } else if (tp
->link_config
.speed
== SPEED_INVALID
) {
1498 tp
->link_config
.advertising
=
1499 (ADVERTISED_10baseT_Half
| ADVERTISED_10baseT_Full
|
1500 ADVERTISED_100baseT_Half
| ADVERTISED_100baseT_Full
|
1501 ADVERTISED_1000baseT_Half
| ADVERTISED_1000baseT_Full
|
1502 ADVERTISED_Autoneg
| ADVERTISED_MII
);
1504 if (tp
->tg3_flags
& TG3_FLAG_10_100_ONLY
)
1505 tp
->link_config
.advertising
&=
1506 ~(ADVERTISED_1000baseT_Half
|
1507 ADVERTISED_1000baseT_Full
);
1509 new_adv
= (ADVERTISE_CSMA
| ADVERTISE_PAUSE_CAP
);
1510 if (tp
->link_config
.advertising
& ADVERTISED_10baseT_Half
)
1511 new_adv
|= ADVERTISE_10HALF
;
1512 if (tp
->link_config
.advertising
& ADVERTISED_10baseT_Full
)
1513 new_adv
|= ADVERTISE_10FULL
;
1514 if (tp
->link_config
.advertising
& ADVERTISED_100baseT_Half
)
1515 new_adv
|= ADVERTISE_100HALF
;
1516 if (tp
->link_config
.advertising
& ADVERTISED_100baseT_Full
)
1517 new_adv
|= ADVERTISE_100FULL
;
1518 tg3_writephy(tp
, MII_ADVERTISE
, new_adv
);
1520 if (tp
->link_config
.advertising
&
1521 (ADVERTISED_1000baseT_Half
| ADVERTISED_1000baseT_Full
)) {
1523 if (tp
->link_config
.advertising
& ADVERTISED_1000baseT_Half
)
1524 new_adv
|= MII_TG3_CTRL_ADV_1000_HALF
;
1525 if (tp
->link_config
.advertising
& ADVERTISED_1000baseT_Full
)
1526 new_adv
|= MII_TG3_CTRL_ADV_1000_FULL
;
1527 if (!(tp
->tg3_flags
& TG3_FLAG_10_100_ONLY
) &&
1528 (tp
->pci_chip_rev_id
== CHIPREV_ID_5701_A0
||
1529 tp
->pci_chip_rev_id
== CHIPREV_ID_5701_B0
))
1530 new_adv
|= (MII_TG3_CTRL_AS_MASTER
|
1531 MII_TG3_CTRL_ENABLE_AS_MASTER
);
1532 tg3_writephy(tp
, MII_TG3_CTRL
, new_adv
);
1534 tg3_writephy(tp
, MII_TG3_CTRL
, 0);
1537 /* Asking for a specific link mode. */
1538 if (tp
->link_config
.speed
== SPEED_1000
) {
1539 new_adv
= ADVERTISE_CSMA
| ADVERTISE_PAUSE_CAP
;
1540 tg3_writephy(tp
, MII_ADVERTISE
, new_adv
);
1542 if (tp
->link_config
.duplex
== DUPLEX_FULL
)
1543 new_adv
= MII_TG3_CTRL_ADV_1000_FULL
;
1545 new_adv
= MII_TG3_CTRL_ADV_1000_HALF
;
1546 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5701_A0
||
1547 tp
->pci_chip_rev_id
== CHIPREV_ID_5701_B0
)
1548 new_adv
|= (MII_TG3_CTRL_AS_MASTER
|
1549 MII_TG3_CTRL_ENABLE_AS_MASTER
);
1550 tg3_writephy(tp
, MII_TG3_CTRL
, new_adv
);
1552 tg3_writephy(tp
, MII_TG3_CTRL
, 0);
1554 new_adv
= ADVERTISE_CSMA
| ADVERTISE_PAUSE_CAP
;
1555 if (tp
->link_config
.speed
== SPEED_100
) {
1556 if (tp
->link_config
.duplex
== DUPLEX_FULL
)
1557 new_adv
|= ADVERTISE_100FULL
;
1559 new_adv
|= ADVERTISE_100HALF
;
1561 if (tp
->link_config
.duplex
== DUPLEX_FULL
)
1562 new_adv
|= ADVERTISE_10FULL
;
1564 new_adv
|= ADVERTISE_10HALF
;
1566 tg3_writephy(tp
, MII_ADVERTISE
, new_adv
);
1570 if (tp
->link_config
.autoneg
== AUTONEG_DISABLE
&&
1571 tp
->link_config
.speed
!= SPEED_INVALID
) {
1572 u32 bmcr
, orig_bmcr
;
1574 tp
->link_config
.active_speed
= tp
->link_config
.speed
;
1575 tp
->link_config
.active_duplex
= tp
->link_config
.duplex
;
1578 switch (tp
->link_config
.speed
) {
1584 bmcr
|= BMCR_SPEED100
;
1588 bmcr
|= TG3_BMCR_SPEED1000
;
1592 if (tp
->link_config
.duplex
== DUPLEX_FULL
)
1593 bmcr
|= BMCR_FULLDPLX
;
1595 if (!tg3_readphy(tp
, MII_BMCR
, &orig_bmcr
) &&
1596 (bmcr
!= orig_bmcr
)) {
1597 tg3_writephy(tp
, MII_BMCR
, BMCR_LOOPBACK
);
1598 for (i
= 0; i
< 1500; i
++) {
1602 if (tg3_readphy(tp
, MII_BMSR
, &tmp
) ||
1603 tg3_readphy(tp
, MII_BMSR
, &tmp
))
1605 if (!(tmp
& BMSR_LSTATUS
)) {
1610 tg3_writephy(tp
, MII_BMCR
, bmcr
);
1614 tg3_writephy(tp
, MII_BMCR
,
1615 BMCR_ANENABLE
| BMCR_ANRESTART
);
1619 static int tg3_init_5401phy_dsp(struct tg3
*tp
)
1623 /* Turn off tap power management. */
1624 /* Set Extended packet length bit */
1625 err
= tg3_writephy(tp
, MII_TG3_AUX_CTRL
, 0x4c20);
1627 err
|= tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, 0x0012);
1628 err
|= tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x1804);
1630 err
|= tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, 0x0013);
1631 err
|= tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x1204);
1633 err
|= tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, 0x8006);
1634 err
|= tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x0132);
1636 err
|= tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, 0x8006);
1637 err
|= tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x0232);
1639 err
|= tg3_writephy(tp
, MII_TG3_DSP_ADDRESS
, 0x201f);
1640 err
|= tg3_writephy(tp
, MII_TG3_DSP_RW_PORT
, 0x0a20);
1647 static int tg3_copper_is_advertising_all(struct tg3
*tp
)
1649 u32 adv_reg
, all_mask
;
1651 if (tg3_readphy(tp
, MII_ADVERTISE
, &adv_reg
))
1654 all_mask
= (ADVERTISE_10HALF
| ADVERTISE_10FULL
|
1655 ADVERTISE_100HALF
| ADVERTISE_100FULL
);
1656 if ((adv_reg
& all_mask
) != all_mask
)
1658 if (!(tp
->tg3_flags
& TG3_FLAG_10_100_ONLY
)) {
1661 if (tg3_readphy(tp
, MII_TG3_CTRL
, &tg3_ctrl
))
1664 all_mask
= (MII_TG3_CTRL_ADV_1000_HALF
|
1665 MII_TG3_CTRL_ADV_1000_FULL
);
1666 if ((tg3_ctrl
& all_mask
) != all_mask
)
1672 static int tg3_setup_copper_phy(struct tg3
*tp
, int force_reset
)
1674 int current_link_up
;
1683 (MAC_STATUS_SYNC_CHANGED
|
1684 MAC_STATUS_CFG_CHANGED
|
1685 MAC_STATUS_MI_COMPLETION
|
1686 MAC_STATUS_LNKSTATE_CHANGED
));
1689 tp
->mi_mode
= MAC_MI_MODE_BASE
;
1690 tw32_f(MAC_MI_MODE
, tp
->mi_mode
);
1693 tg3_writephy(tp
, MII_TG3_AUX_CTRL
, 0x02);
1695 /* Some third-party PHYs need to be reset on link going
1698 if ((GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5703
||
1699 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
||
1700 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
) &&
1701 netif_carrier_ok(tp
->dev
)) {
1702 tg3_readphy(tp
, MII_BMSR
, &bmsr
);
1703 if (!tg3_readphy(tp
, MII_BMSR
, &bmsr
) &&
1704 !(bmsr
& BMSR_LSTATUS
))
1710 if ((tp
->phy_id
& PHY_ID_MASK
) == PHY_ID_BCM5401
) {
1711 tg3_readphy(tp
, MII_BMSR
, &bmsr
);
1712 if (tg3_readphy(tp
, MII_BMSR
, &bmsr
) ||
1713 !(tp
->tg3_flags
& TG3_FLAG_INIT_COMPLETE
))
1716 if (!(bmsr
& BMSR_LSTATUS
)) {
1717 err
= tg3_init_5401phy_dsp(tp
);
1721 tg3_readphy(tp
, MII_BMSR
, &bmsr
);
1722 for (i
= 0; i
< 1000; i
++) {
1724 if (!tg3_readphy(tp
, MII_BMSR
, &bmsr
) &&
1725 (bmsr
& BMSR_LSTATUS
)) {
1731 if ((tp
->phy_id
& PHY_ID_REV_MASK
) == PHY_REV_BCM5401_B0
&&
1732 !(bmsr
& BMSR_LSTATUS
) &&
1733 tp
->link_config
.active_speed
== SPEED_1000
) {
1734 err
= tg3_phy_reset(tp
);
1736 err
= tg3_init_5401phy_dsp(tp
);
1741 } else if (tp
->pci_chip_rev_id
== CHIPREV_ID_5701_A0
||
1742 tp
->pci_chip_rev_id
== CHIPREV_ID_5701_B0
) {
1743 /* 5701 {A0,B0} CRC bug workaround */
1744 tg3_writephy(tp
, 0x15, 0x0a75);
1745 tg3_writephy(tp
, 0x1c, 0x8c68);
1746 tg3_writephy(tp
, 0x1c, 0x8d68);
1747 tg3_writephy(tp
, 0x1c, 0x8c68);
1750 /* Clear pending interrupts... */
1751 tg3_readphy(tp
, MII_TG3_ISTAT
, &dummy
);
1752 tg3_readphy(tp
, MII_TG3_ISTAT
, &dummy
);
1754 if (tp
->tg3_flags
& TG3_FLAG_USE_MI_INTERRUPT
)
1755 tg3_writephy(tp
, MII_TG3_IMASK
, ~MII_TG3_INT_LINKCHG
);
1757 tg3_writephy(tp
, MII_TG3_IMASK
, ~0);
1759 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
1760 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
) {
1761 if (tp
->led_ctrl
== LED_CTRL_MODE_PHY_1
)
1762 tg3_writephy(tp
, MII_TG3_EXT_CTRL
,
1763 MII_TG3_EXT_CTRL_LNK3_LED_MODE
);
1765 tg3_writephy(tp
, MII_TG3_EXT_CTRL
, 0);
1768 current_link_up
= 0;
1769 current_speed
= SPEED_INVALID
;
1770 current_duplex
= DUPLEX_INVALID
;
1772 if (tp
->tg3_flags2
& TG3_FLG2_CAPACITIVE_COUPLING
) {
1775 tg3_writephy(tp
, MII_TG3_AUX_CTRL
, 0x4007);
1776 tg3_readphy(tp
, MII_TG3_AUX_CTRL
, &val
);
1777 if (!(val
& (1 << 10))) {
1779 tg3_writephy(tp
, MII_TG3_AUX_CTRL
, val
);
1785 for (i
= 0; i
< 100; i
++) {
1786 tg3_readphy(tp
, MII_BMSR
, &bmsr
);
1787 if (!tg3_readphy(tp
, MII_BMSR
, &bmsr
) &&
1788 (bmsr
& BMSR_LSTATUS
))
1793 if (bmsr
& BMSR_LSTATUS
) {
1796 tg3_readphy(tp
, MII_TG3_AUX_STAT
, &aux_stat
);
1797 for (i
= 0; i
< 2000; i
++) {
1799 if (!tg3_readphy(tp
, MII_TG3_AUX_STAT
, &aux_stat
) &&
1804 tg3_aux_stat_to_speed_duplex(tp
, aux_stat
,
1809 for (i
= 0; i
< 200; i
++) {
1810 tg3_readphy(tp
, MII_BMCR
, &bmcr
);
1811 if (tg3_readphy(tp
, MII_BMCR
, &bmcr
))
1813 if (bmcr
&& bmcr
!= 0x7fff)
1818 if (tp
->link_config
.autoneg
== AUTONEG_ENABLE
) {
1819 if (bmcr
& BMCR_ANENABLE
) {
1820 current_link_up
= 1;
1822 /* Force autoneg restart if we are exiting
1825 if (!tg3_copper_is_advertising_all(tp
))
1826 current_link_up
= 0;
1828 current_link_up
= 0;
1831 if (!(bmcr
& BMCR_ANENABLE
) &&
1832 tp
->link_config
.speed
== current_speed
&&
1833 tp
->link_config
.duplex
== current_duplex
) {
1834 current_link_up
= 1;
1836 current_link_up
= 0;
1840 tp
->link_config
.active_speed
= current_speed
;
1841 tp
->link_config
.active_duplex
= current_duplex
;
1844 if (current_link_up
== 1 &&
1845 (tp
->link_config
.active_duplex
== DUPLEX_FULL
) &&
1846 (tp
->link_config
.autoneg
== AUTONEG_ENABLE
)) {
1847 u32 local_adv
, remote_adv
;
1849 if (tg3_readphy(tp
, MII_ADVERTISE
, &local_adv
))
1851 local_adv
&= (ADVERTISE_PAUSE_CAP
| ADVERTISE_PAUSE_ASYM
);
1853 if (tg3_readphy(tp
, MII_LPA
, &remote_adv
))
1856 remote_adv
&= (LPA_PAUSE_CAP
| LPA_PAUSE_ASYM
);
1858 /* If we are not advertising full pause capability,
1859 * something is wrong. Bring the link down and reconfigure.
1861 if (local_adv
!= ADVERTISE_PAUSE_CAP
) {
1862 current_link_up
= 0;
1864 tg3_setup_flow_control(tp
, local_adv
, remote_adv
);
1868 if (current_link_up
== 0 || tp
->link_config
.phy_is_low_power
) {
1871 tg3_phy_copper_begin(tp
);
1873 tg3_readphy(tp
, MII_BMSR
, &tmp
);
1874 if (!tg3_readphy(tp
, MII_BMSR
, &tmp
) &&
1875 (tmp
& BMSR_LSTATUS
))
1876 current_link_up
= 1;
1879 tp
->mac_mode
&= ~MAC_MODE_PORT_MODE_MASK
;
1880 if (current_link_up
== 1) {
1881 if (tp
->link_config
.active_speed
== SPEED_100
||
1882 tp
->link_config
.active_speed
== SPEED_10
)
1883 tp
->mac_mode
|= MAC_MODE_PORT_MODE_MII
;
1885 tp
->mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
1887 tp
->mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
1889 tp
->mac_mode
&= ~MAC_MODE_HALF_DUPLEX
;
1890 if (tp
->link_config
.active_duplex
== DUPLEX_HALF
)
1891 tp
->mac_mode
|= MAC_MODE_HALF_DUPLEX
;
1893 tp
->mac_mode
&= ~MAC_MODE_LINK_POLARITY
;
1894 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
) {
1895 if ((tp
->led_ctrl
== LED_CTRL_MODE_PHY_2
) ||
1896 (current_link_up
== 1 &&
1897 tp
->link_config
.active_speed
== SPEED_10
))
1898 tp
->mac_mode
|= MAC_MODE_LINK_POLARITY
;
1900 if (current_link_up
== 1)
1901 tp
->mac_mode
|= MAC_MODE_LINK_POLARITY
;
1904 /* ??? Without this setting Netgear GA302T PHY does not
1905 * ??? send/receive packets...
1907 if ((tp
->phy_id
& PHY_ID_MASK
) == PHY_ID_BCM5411
&&
1908 tp
->pci_chip_rev_id
== CHIPREV_ID_5700_ALTIMA
) {
1909 tp
->mi_mode
|= MAC_MI_MODE_AUTO_POLL
;
1910 tw32_f(MAC_MI_MODE
, tp
->mi_mode
);
1914 tw32_f(MAC_MODE
, tp
->mac_mode
);
1917 if (tp
->tg3_flags
& TG3_FLAG_USE_LINKCHG_REG
) {
1918 /* Polled via timer. */
1919 tw32_f(MAC_EVENT
, 0);
1921 tw32_f(MAC_EVENT
, MAC_EVENT_LNKSTATE_CHANGED
);
1925 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
&&
1926 current_link_up
== 1 &&
1927 tp
->link_config
.active_speed
== SPEED_1000
&&
1928 ((tp
->tg3_flags
& TG3_FLAG_PCIX_MODE
) ||
1929 (tp
->tg3_flags
& TG3_FLAG_PCI_HIGH_SPEED
))) {
1932 (MAC_STATUS_SYNC_CHANGED
|
1933 MAC_STATUS_CFG_CHANGED
));
1936 NIC_SRAM_FIRMWARE_MBOX
,
1937 NIC_SRAM_FIRMWARE_MBOX_MAGIC2
);
1940 if (current_link_up
!= netif_carrier_ok(tp
->dev
)) {
1941 if (current_link_up
)
1942 netif_carrier_on(tp
->dev
);
1944 netif_carrier_off(tp
->dev
);
1945 tg3_link_report(tp
);
1951 struct tg3_fiber_aneginfo
{
1953 #define ANEG_STATE_UNKNOWN 0
1954 #define ANEG_STATE_AN_ENABLE 1
1955 #define ANEG_STATE_RESTART_INIT 2
1956 #define ANEG_STATE_RESTART 3
1957 #define ANEG_STATE_DISABLE_LINK_OK 4
1958 #define ANEG_STATE_ABILITY_DETECT_INIT 5
1959 #define ANEG_STATE_ABILITY_DETECT 6
1960 #define ANEG_STATE_ACK_DETECT_INIT 7
1961 #define ANEG_STATE_ACK_DETECT 8
1962 #define ANEG_STATE_COMPLETE_ACK_INIT 9
1963 #define ANEG_STATE_COMPLETE_ACK 10
1964 #define ANEG_STATE_IDLE_DETECT_INIT 11
1965 #define ANEG_STATE_IDLE_DETECT 12
1966 #define ANEG_STATE_LINK_OK 13
1967 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
1968 #define ANEG_STATE_NEXT_PAGE_WAIT 15
1971 #define MR_AN_ENABLE 0x00000001
1972 #define MR_RESTART_AN 0x00000002
1973 #define MR_AN_COMPLETE 0x00000004
1974 #define MR_PAGE_RX 0x00000008
1975 #define MR_NP_LOADED 0x00000010
1976 #define MR_TOGGLE_TX 0x00000020
1977 #define MR_LP_ADV_FULL_DUPLEX 0x00000040
1978 #define MR_LP_ADV_HALF_DUPLEX 0x00000080
1979 #define MR_LP_ADV_SYM_PAUSE 0x00000100
1980 #define MR_LP_ADV_ASYM_PAUSE 0x00000200
1981 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
1982 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
1983 #define MR_LP_ADV_NEXT_PAGE 0x00001000
1984 #define MR_TOGGLE_RX 0x00002000
1985 #define MR_NP_RX 0x00004000
1987 #define MR_LINK_OK 0x80000000
1989 unsigned long link_time
, cur_time
;
1991 u32 ability_match_cfg
;
1992 int ability_match_count
;
1994 char ability_match
, idle_match
, ack_match
;
1996 u32 txconfig
, rxconfig
;
1997 #define ANEG_CFG_NP 0x00000080
1998 #define ANEG_CFG_ACK 0x00000040
1999 #define ANEG_CFG_RF2 0x00000020
2000 #define ANEG_CFG_RF1 0x00000010
2001 #define ANEG_CFG_PS2 0x00000001
2002 #define ANEG_CFG_PS1 0x00008000
2003 #define ANEG_CFG_HD 0x00004000
2004 #define ANEG_CFG_FD 0x00002000
2005 #define ANEG_CFG_INVAL 0x00001f06
2010 #define ANEG_TIMER_ENAB 2
2011 #define ANEG_FAILED -1
2013 #define ANEG_STATE_SETTLE_TIME 10000
2015 static int tg3_fiber_aneg_smachine(struct tg3
*tp
,
2016 struct tg3_fiber_aneginfo
*ap
)
2018 unsigned long delta
;
2022 if (ap
->state
== ANEG_STATE_UNKNOWN
) {
2026 ap
->ability_match_cfg
= 0;
2027 ap
->ability_match_count
= 0;
2028 ap
->ability_match
= 0;
2034 if (tr32(MAC_STATUS
) & MAC_STATUS_RCVD_CFG
) {
2035 rx_cfg_reg
= tr32(MAC_RX_AUTO_NEG
);
2037 if (rx_cfg_reg
!= ap
->ability_match_cfg
) {
2038 ap
->ability_match_cfg
= rx_cfg_reg
;
2039 ap
->ability_match
= 0;
2040 ap
->ability_match_count
= 0;
2042 if (++ap
->ability_match_count
> 1) {
2043 ap
->ability_match
= 1;
2044 ap
->ability_match_cfg
= rx_cfg_reg
;
2047 if (rx_cfg_reg
& ANEG_CFG_ACK
)
2055 ap
->ability_match_cfg
= 0;
2056 ap
->ability_match_count
= 0;
2057 ap
->ability_match
= 0;
2063 ap
->rxconfig
= rx_cfg_reg
;
2067 case ANEG_STATE_UNKNOWN
:
2068 if (ap
->flags
& (MR_AN_ENABLE
| MR_RESTART_AN
))
2069 ap
->state
= ANEG_STATE_AN_ENABLE
;
2072 case ANEG_STATE_AN_ENABLE
:
2073 ap
->flags
&= ~(MR_AN_COMPLETE
| MR_PAGE_RX
);
2074 if (ap
->flags
& MR_AN_ENABLE
) {
2077 ap
->ability_match_cfg
= 0;
2078 ap
->ability_match_count
= 0;
2079 ap
->ability_match
= 0;
2083 ap
->state
= ANEG_STATE_RESTART_INIT
;
2085 ap
->state
= ANEG_STATE_DISABLE_LINK_OK
;
2089 case ANEG_STATE_RESTART_INIT
:
2090 ap
->link_time
= ap
->cur_time
;
2091 ap
->flags
&= ~(MR_NP_LOADED
);
2093 tw32(MAC_TX_AUTO_NEG
, 0);
2094 tp
->mac_mode
|= MAC_MODE_SEND_CONFIGS
;
2095 tw32_f(MAC_MODE
, tp
->mac_mode
);
2098 ret
= ANEG_TIMER_ENAB
;
2099 ap
->state
= ANEG_STATE_RESTART
;
2102 case ANEG_STATE_RESTART
:
2103 delta
= ap
->cur_time
- ap
->link_time
;
2104 if (delta
> ANEG_STATE_SETTLE_TIME
) {
2105 ap
->state
= ANEG_STATE_ABILITY_DETECT_INIT
;
2107 ret
= ANEG_TIMER_ENAB
;
2111 case ANEG_STATE_DISABLE_LINK_OK
:
2115 case ANEG_STATE_ABILITY_DETECT_INIT
:
2116 ap
->flags
&= ~(MR_TOGGLE_TX
);
2117 ap
->txconfig
= (ANEG_CFG_FD
| ANEG_CFG_PS1
);
2118 tw32(MAC_TX_AUTO_NEG
, ap
->txconfig
);
2119 tp
->mac_mode
|= MAC_MODE_SEND_CONFIGS
;
2120 tw32_f(MAC_MODE
, tp
->mac_mode
);
2123 ap
->state
= ANEG_STATE_ABILITY_DETECT
;
2126 case ANEG_STATE_ABILITY_DETECT
:
2127 if (ap
->ability_match
!= 0 && ap
->rxconfig
!= 0) {
2128 ap
->state
= ANEG_STATE_ACK_DETECT_INIT
;
2132 case ANEG_STATE_ACK_DETECT_INIT
:
2133 ap
->txconfig
|= ANEG_CFG_ACK
;
2134 tw32(MAC_TX_AUTO_NEG
, ap
->txconfig
);
2135 tp
->mac_mode
|= MAC_MODE_SEND_CONFIGS
;
2136 tw32_f(MAC_MODE
, tp
->mac_mode
);
2139 ap
->state
= ANEG_STATE_ACK_DETECT
;
2142 case ANEG_STATE_ACK_DETECT
:
2143 if (ap
->ack_match
!= 0) {
2144 if ((ap
->rxconfig
& ~ANEG_CFG_ACK
) ==
2145 (ap
->ability_match_cfg
& ~ANEG_CFG_ACK
)) {
2146 ap
->state
= ANEG_STATE_COMPLETE_ACK_INIT
;
2148 ap
->state
= ANEG_STATE_AN_ENABLE
;
2150 } else if (ap
->ability_match
!= 0 &&
2151 ap
->rxconfig
== 0) {
2152 ap
->state
= ANEG_STATE_AN_ENABLE
;
2156 case ANEG_STATE_COMPLETE_ACK_INIT
:
2157 if (ap
->rxconfig
& ANEG_CFG_INVAL
) {
2161 ap
->flags
&= ~(MR_LP_ADV_FULL_DUPLEX
|
2162 MR_LP_ADV_HALF_DUPLEX
|
2163 MR_LP_ADV_SYM_PAUSE
|
2164 MR_LP_ADV_ASYM_PAUSE
|
2165 MR_LP_ADV_REMOTE_FAULT1
|
2166 MR_LP_ADV_REMOTE_FAULT2
|
2167 MR_LP_ADV_NEXT_PAGE
|
2170 if (ap
->rxconfig
& ANEG_CFG_FD
)
2171 ap
->flags
|= MR_LP_ADV_FULL_DUPLEX
;
2172 if (ap
->rxconfig
& ANEG_CFG_HD
)
2173 ap
->flags
|= MR_LP_ADV_HALF_DUPLEX
;
2174 if (ap
->rxconfig
& ANEG_CFG_PS1
)
2175 ap
->flags
|= MR_LP_ADV_SYM_PAUSE
;
2176 if (ap
->rxconfig
& ANEG_CFG_PS2
)
2177 ap
->flags
|= MR_LP_ADV_ASYM_PAUSE
;
2178 if (ap
->rxconfig
& ANEG_CFG_RF1
)
2179 ap
->flags
|= MR_LP_ADV_REMOTE_FAULT1
;
2180 if (ap
->rxconfig
& ANEG_CFG_RF2
)
2181 ap
->flags
|= MR_LP_ADV_REMOTE_FAULT2
;
2182 if (ap
->rxconfig
& ANEG_CFG_NP
)
2183 ap
->flags
|= MR_LP_ADV_NEXT_PAGE
;
2185 ap
->link_time
= ap
->cur_time
;
2187 ap
->flags
^= (MR_TOGGLE_TX
);
2188 if (ap
->rxconfig
& 0x0008)
2189 ap
->flags
|= MR_TOGGLE_RX
;
2190 if (ap
->rxconfig
& ANEG_CFG_NP
)
2191 ap
->flags
|= MR_NP_RX
;
2192 ap
->flags
|= MR_PAGE_RX
;
2194 ap
->state
= ANEG_STATE_COMPLETE_ACK
;
2195 ret
= ANEG_TIMER_ENAB
;
2198 case ANEG_STATE_COMPLETE_ACK
:
2199 if (ap
->ability_match
!= 0 &&
2200 ap
->rxconfig
== 0) {
2201 ap
->state
= ANEG_STATE_AN_ENABLE
;
2204 delta
= ap
->cur_time
- ap
->link_time
;
2205 if (delta
> ANEG_STATE_SETTLE_TIME
) {
2206 if (!(ap
->flags
& (MR_LP_ADV_NEXT_PAGE
))) {
2207 ap
->state
= ANEG_STATE_IDLE_DETECT_INIT
;
2209 if ((ap
->txconfig
& ANEG_CFG_NP
) == 0 &&
2210 !(ap
->flags
& MR_NP_RX
)) {
2211 ap
->state
= ANEG_STATE_IDLE_DETECT_INIT
;
2219 case ANEG_STATE_IDLE_DETECT_INIT
:
2220 ap
->link_time
= ap
->cur_time
;
2221 tp
->mac_mode
&= ~MAC_MODE_SEND_CONFIGS
;
2222 tw32_f(MAC_MODE
, tp
->mac_mode
);
2225 ap
->state
= ANEG_STATE_IDLE_DETECT
;
2226 ret
= ANEG_TIMER_ENAB
;
2229 case ANEG_STATE_IDLE_DETECT
:
2230 if (ap
->ability_match
!= 0 &&
2231 ap
->rxconfig
== 0) {
2232 ap
->state
= ANEG_STATE_AN_ENABLE
;
2235 delta
= ap
->cur_time
- ap
->link_time
;
2236 if (delta
> ANEG_STATE_SETTLE_TIME
) {
2237 /* XXX another gem from the Broadcom driver :( */
2238 ap
->state
= ANEG_STATE_LINK_OK
;
2242 case ANEG_STATE_LINK_OK
:
2243 ap
->flags
|= (MR_AN_COMPLETE
| MR_LINK_OK
);
2247 case ANEG_STATE_NEXT_PAGE_WAIT_INIT
:
2248 /* ??? unimplemented */
2251 case ANEG_STATE_NEXT_PAGE_WAIT
:
2252 /* ??? unimplemented */
2263 static int fiber_autoneg(struct tg3
*tp
, u32
*flags
)
2266 struct tg3_fiber_aneginfo aninfo
;
2267 int status
= ANEG_FAILED
;
2271 tw32_f(MAC_TX_AUTO_NEG
, 0);
2273 tmp
= tp
->mac_mode
& ~MAC_MODE_PORT_MODE_MASK
;
2274 tw32_f(MAC_MODE
, tmp
| MAC_MODE_PORT_MODE_GMII
);
2277 tw32_f(MAC_MODE
, tp
->mac_mode
| MAC_MODE_SEND_CONFIGS
);
2280 memset(&aninfo
, 0, sizeof(aninfo
));
2281 aninfo
.flags
|= MR_AN_ENABLE
;
2282 aninfo
.state
= ANEG_STATE_UNKNOWN
;
2283 aninfo
.cur_time
= 0;
2285 while (++tick
< 195000) {
2286 status
= tg3_fiber_aneg_smachine(tp
, &aninfo
);
2287 if (status
== ANEG_DONE
|| status
== ANEG_FAILED
)
2293 tp
->mac_mode
&= ~MAC_MODE_SEND_CONFIGS
;
2294 tw32_f(MAC_MODE
, tp
->mac_mode
);
2297 *flags
= aninfo
.flags
;
2299 if (status
== ANEG_DONE
&&
2300 (aninfo
.flags
& (MR_AN_COMPLETE
| MR_LINK_OK
|
2301 MR_LP_ADV_FULL_DUPLEX
)))
2307 static void tg3_init_bcm8002(struct tg3
*tp
)
2309 u32 mac_status
= tr32(MAC_STATUS
);
2312 /* Reset when initting first time or we have a link. */
2313 if ((tp
->tg3_flags
& TG3_FLAG_INIT_COMPLETE
) &&
2314 !(mac_status
& MAC_STATUS_PCS_SYNCED
))
2317 /* Set PLL lock range. */
2318 tg3_writephy(tp
, 0x16, 0x8007);
2321 tg3_writephy(tp
, MII_BMCR
, BMCR_RESET
);
2323 /* Wait for reset to complete. */
2324 /* XXX schedule_timeout() ... */
2325 for (i
= 0; i
< 500; i
++)
2328 /* Config mode; select PMA/Ch 1 regs. */
2329 tg3_writephy(tp
, 0x10, 0x8411);
2331 /* Enable auto-lock and comdet, select txclk for tx. */
2332 tg3_writephy(tp
, 0x11, 0x0a10);
2334 tg3_writephy(tp
, 0x18, 0x00a0);
2335 tg3_writephy(tp
, 0x16, 0x41ff);
2337 /* Assert and deassert POR. */
2338 tg3_writephy(tp
, 0x13, 0x0400);
2340 tg3_writephy(tp
, 0x13, 0x0000);
2342 tg3_writephy(tp
, 0x11, 0x0a50);
2344 tg3_writephy(tp
, 0x11, 0x0a10);
2346 /* Wait for signal to stabilize */
2347 /* XXX schedule_timeout() ... */
2348 for (i
= 0; i
< 15000; i
++)
2351 /* Deselect the channel register so we can read the PHYID
2354 tg3_writephy(tp
, 0x10, 0x8011);
2357 static int tg3_setup_fiber_hw_autoneg(struct tg3
*tp
, u32 mac_status
)
2359 u32 sg_dig_ctrl
, sg_dig_status
;
2360 u32 serdes_cfg
, expected_sg_dig_ctrl
;
2361 int workaround
, port_a
;
2362 int current_link_up
;
2365 expected_sg_dig_ctrl
= 0;
2368 current_link_up
= 0;
2370 if (tp
->pci_chip_rev_id
!= CHIPREV_ID_5704_A0
&&
2371 tp
->pci_chip_rev_id
!= CHIPREV_ID_5704_A1
) {
2373 if (tr32(TG3PCI_DUAL_MAC_CTRL
) & DUAL_MAC_CTRL_ID
)
2376 /* preserve bits 0-11,13,14 for signal pre-emphasis */
2377 /* preserve bits 20-23 for voltage regulator */
2378 serdes_cfg
= tr32(MAC_SERDES_CFG
) & 0x00f06fff;
2381 sg_dig_ctrl
= tr32(SG_DIG_CTRL
);
2383 if (tp
->link_config
.autoneg
!= AUTONEG_ENABLE
) {
2384 if (sg_dig_ctrl
& (1 << 31)) {
2386 u32 val
= serdes_cfg
;
2392 tw32_f(MAC_SERDES_CFG
, val
);
2394 tw32_f(SG_DIG_CTRL
, 0x01388400);
2396 if (mac_status
& MAC_STATUS_PCS_SYNCED
) {
2397 tg3_setup_flow_control(tp
, 0, 0);
2398 current_link_up
= 1;
2403 /* Want auto-negotiation. */
2404 expected_sg_dig_ctrl
= 0x81388400;
2406 /* Pause capability */
2407 expected_sg_dig_ctrl
|= (1 << 11);
2409 /* Asymettric pause */
2410 expected_sg_dig_ctrl
|= (1 << 12);
2412 if (sg_dig_ctrl
!= expected_sg_dig_ctrl
) {
2413 if ((tp
->tg3_flags2
& TG3_FLG2_PARALLEL_DETECT
) &&
2414 tp
->serdes_counter
&&
2415 ((mac_status
& (MAC_STATUS_PCS_SYNCED
|
2416 MAC_STATUS_RCVD_CFG
)) ==
2417 MAC_STATUS_PCS_SYNCED
)) {
2418 tp
->serdes_counter
--;
2419 current_link_up
= 1;
2424 tw32_f(MAC_SERDES_CFG
, serdes_cfg
| 0xc011000);
2425 tw32_f(SG_DIG_CTRL
, expected_sg_dig_ctrl
| (1 << 30));
2427 tw32_f(SG_DIG_CTRL
, expected_sg_dig_ctrl
);
2429 tp
->serdes_counter
= SERDES_AN_TIMEOUT_5704S
;
2430 tp
->tg3_flags2
&= ~TG3_FLG2_PARALLEL_DETECT
;
2431 } else if (mac_status
& (MAC_STATUS_PCS_SYNCED
|
2432 MAC_STATUS_SIGNAL_DET
)) {
2433 sg_dig_status
= tr32(SG_DIG_STATUS
);
2434 mac_status
= tr32(MAC_STATUS
);
2436 if ((sg_dig_status
& (1 << 1)) &&
2437 (mac_status
& MAC_STATUS_PCS_SYNCED
)) {
2438 u32 local_adv
, remote_adv
;
2440 local_adv
= ADVERTISE_PAUSE_CAP
;
2442 if (sg_dig_status
& (1 << 19))
2443 remote_adv
|= LPA_PAUSE_CAP
;
2444 if (sg_dig_status
& (1 << 20))
2445 remote_adv
|= LPA_PAUSE_ASYM
;
2447 tg3_setup_flow_control(tp
, local_adv
, remote_adv
);
2448 current_link_up
= 1;
2449 tp
->serdes_counter
= 0;
2450 tp
->tg3_flags2
&= ~TG3_FLG2_PARALLEL_DETECT
;
2451 } else if (!(sg_dig_status
& (1 << 1))) {
2452 if (tp
->serdes_counter
)
2453 tp
->serdes_counter
--;
2456 u32 val
= serdes_cfg
;
2463 tw32_f(MAC_SERDES_CFG
, val
);
2466 tw32_f(SG_DIG_CTRL
, 0x01388400);
2469 /* Link parallel detection - link is up */
2470 /* only if we have PCS_SYNC and not */
2471 /* receiving config code words */
2472 mac_status
= tr32(MAC_STATUS
);
2473 if ((mac_status
& MAC_STATUS_PCS_SYNCED
) &&
2474 !(mac_status
& MAC_STATUS_RCVD_CFG
)) {
2475 tg3_setup_flow_control(tp
, 0, 0);
2476 current_link_up
= 1;
2478 TG3_FLG2_PARALLEL_DETECT
;
2479 tp
->serdes_counter
=
2480 SERDES_PARALLEL_DET_TIMEOUT
;
2482 goto restart_autoneg
;
2486 tp
->serdes_counter
= SERDES_AN_TIMEOUT_5704S
;
2487 tp
->tg3_flags2
&= ~TG3_FLG2_PARALLEL_DETECT
;
2491 return current_link_up
;
2494 static int tg3_setup_fiber_by_hand(struct tg3
*tp
, u32 mac_status
)
2496 int current_link_up
= 0;
2498 if (!(mac_status
& MAC_STATUS_PCS_SYNCED
)) {
2499 tp
->tg3_flags
&= ~TG3_FLAG_GOT_SERDES_FLOWCTL
;
2503 if (tp
->link_config
.autoneg
== AUTONEG_ENABLE
) {
2507 if (fiber_autoneg(tp
, &flags
)) {
2508 u32 local_adv
, remote_adv
;
2510 local_adv
= ADVERTISE_PAUSE_CAP
;
2512 if (flags
& MR_LP_ADV_SYM_PAUSE
)
2513 remote_adv
|= LPA_PAUSE_CAP
;
2514 if (flags
& MR_LP_ADV_ASYM_PAUSE
)
2515 remote_adv
|= LPA_PAUSE_ASYM
;
2517 tg3_setup_flow_control(tp
, local_adv
, remote_adv
);
2519 tp
->tg3_flags
|= TG3_FLAG_GOT_SERDES_FLOWCTL
;
2520 current_link_up
= 1;
2522 for (i
= 0; i
< 30; i
++) {
2525 (MAC_STATUS_SYNC_CHANGED
|
2526 MAC_STATUS_CFG_CHANGED
));
2528 if ((tr32(MAC_STATUS
) &
2529 (MAC_STATUS_SYNC_CHANGED
|
2530 MAC_STATUS_CFG_CHANGED
)) == 0)
2534 mac_status
= tr32(MAC_STATUS
);
2535 if (current_link_up
== 0 &&
2536 (mac_status
& MAC_STATUS_PCS_SYNCED
) &&
2537 !(mac_status
& MAC_STATUS_RCVD_CFG
))
2538 current_link_up
= 1;
2540 /* Forcing 1000FD link up. */
2541 current_link_up
= 1;
2542 tp
->tg3_flags
|= TG3_FLAG_GOT_SERDES_FLOWCTL
;
2544 tw32_f(MAC_MODE
, (tp
->mac_mode
| MAC_MODE_SEND_CONFIGS
));
2549 return current_link_up
;
2552 static int tg3_setup_fiber_phy(struct tg3
*tp
, int force_reset
)
2555 u16 orig_active_speed
;
2556 u8 orig_active_duplex
;
2558 int current_link_up
;
2562 (tp
->tg3_flags
& (TG3_FLAG_RX_PAUSE
|
2563 TG3_FLAG_TX_PAUSE
));
2564 orig_active_speed
= tp
->link_config
.active_speed
;
2565 orig_active_duplex
= tp
->link_config
.active_duplex
;
2567 if (!(tp
->tg3_flags2
& TG3_FLG2_HW_AUTONEG
) &&
2568 netif_carrier_ok(tp
->dev
) &&
2569 (tp
->tg3_flags
& TG3_FLAG_INIT_COMPLETE
)) {
2570 mac_status
= tr32(MAC_STATUS
);
2571 mac_status
&= (MAC_STATUS_PCS_SYNCED
|
2572 MAC_STATUS_SIGNAL_DET
|
2573 MAC_STATUS_CFG_CHANGED
|
2574 MAC_STATUS_RCVD_CFG
);
2575 if (mac_status
== (MAC_STATUS_PCS_SYNCED
|
2576 MAC_STATUS_SIGNAL_DET
)) {
2577 tw32_f(MAC_STATUS
, (MAC_STATUS_SYNC_CHANGED
|
2578 MAC_STATUS_CFG_CHANGED
));
2583 tw32_f(MAC_TX_AUTO_NEG
, 0);
2585 tp
->mac_mode
&= ~(MAC_MODE_PORT_MODE_MASK
| MAC_MODE_HALF_DUPLEX
);
2586 tp
->mac_mode
|= MAC_MODE_PORT_MODE_TBI
;
2587 tw32_f(MAC_MODE
, tp
->mac_mode
);
2590 if (tp
->phy_id
== PHY_ID_BCM8002
)
2591 tg3_init_bcm8002(tp
);
2593 /* Enable link change event even when serdes polling. */
2594 tw32_f(MAC_EVENT
, MAC_EVENT_LNKSTATE_CHANGED
);
2597 current_link_up
= 0;
2598 mac_status
= tr32(MAC_STATUS
);
2600 if (tp
->tg3_flags2
& TG3_FLG2_HW_AUTONEG
)
2601 current_link_up
= tg3_setup_fiber_hw_autoneg(tp
, mac_status
);
2603 current_link_up
= tg3_setup_fiber_by_hand(tp
, mac_status
);
2605 tp
->mac_mode
&= ~MAC_MODE_LINK_POLARITY
;
2606 tw32_f(MAC_MODE
, tp
->mac_mode
);
2609 tp
->hw_status
->status
=
2610 (SD_STATUS_UPDATED
|
2611 (tp
->hw_status
->status
& ~SD_STATUS_LINK_CHG
));
2613 for (i
= 0; i
< 100; i
++) {
2614 tw32_f(MAC_STATUS
, (MAC_STATUS_SYNC_CHANGED
|
2615 MAC_STATUS_CFG_CHANGED
));
2617 if ((tr32(MAC_STATUS
) & (MAC_STATUS_SYNC_CHANGED
|
2618 MAC_STATUS_CFG_CHANGED
|
2619 MAC_STATUS_LNKSTATE_CHANGED
)) == 0)
2623 mac_status
= tr32(MAC_STATUS
);
2624 if ((mac_status
& MAC_STATUS_PCS_SYNCED
) == 0) {
2625 current_link_up
= 0;
2626 if (tp
->link_config
.autoneg
== AUTONEG_ENABLE
&&
2627 tp
->serdes_counter
== 0) {
2628 tw32_f(MAC_MODE
, (tp
->mac_mode
|
2629 MAC_MODE_SEND_CONFIGS
));
2631 tw32_f(MAC_MODE
, tp
->mac_mode
);
2635 if (current_link_up
== 1) {
2636 tp
->link_config
.active_speed
= SPEED_1000
;
2637 tp
->link_config
.active_duplex
= DUPLEX_FULL
;
2638 tw32(MAC_LED_CTRL
, (tp
->led_ctrl
|
2639 LED_CTRL_LNKLED_OVERRIDE
|
2640 LED_CTRL_1000MBPS_ON
));
2642 tp
->link_config
.active_speed
= SPEED_INVALID
;
2643 tp
->link_config
.active_duplex
= DUPLEX_INVALID
;
2644 tw32(MAC_LED_CTRL
, (tp
->led_ctrl
|
2645 LED_CTRL_LNKLED_OVERRIDE
|
2646 LED_CTRL_TRAFFIC_OVERRIDE
));
2649 if (current_link_up
!= netif_carrier_ok(tp
->dev
)) {
2650 if (current_link_up
)
2651 netif_carrier_on(tp
->dev
);
2653 netif_carrier_off(tp
->dev
);
2654 tg3_link_report(tp
);
2657 tp
->tg3_flags
& (TG3_FLAG_RX_PAUSE
|
2659 if (orig_pause_cfg
!= now_pause_cfg
||
2660 orig_active_speed
!= tp
->link_config
.active_speed
||
2661 orig_active_duplex
!= tp
->link_config
.active_duplex
)
2662 tg3_link_report(tp
);
2668 static int tg3_setup_fiber_mii_phy(struct tg3
*tp
, int force_reset
)
2670 int current_link_up
, err
= 0;
2675 tp
->mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
2676 tw32_f(MAC_MODE
, tp
->mac_mode
);
2682 (MAC_STATUS_SYNC_CHANGED
|
2683 MAC_STATUS_CFG_CHANGED
|
2684 MAC_STATUS_MI_COMPLETION
|
2685 MAC_STATUS_LNKSTATE_CHANGED
));
2691 current_link_up
= 0;
2692 current_speed
= SPEED_INVALID
;
2693 current_duplex
= DUPLEX_INVALID
;
2695 err
|= tg3_readphy(tp
, MII_BMSR
, &bmsr
);
2696 err
|= tg3_readphy(tp
, MII_BMSR
, &bmsr
);
2697 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5714
) {
2698 if (tr32(MAC_TX_STATUS
) & TX_STATUS_LINK_UP
)
2699 bmsr
|= BMSR_LSTATUS
;
2701 bmsr
&= ~BMSR_LSTATUS
;
2704 err
|= tg3_readphy(tp
, MII_BMCR
, &bmcr
);
2706 if ((tp
->link_config
.autoneg
== AUTONEG_ENABLE
) && !force_reset
&&
2707 (tp
->tg3_flags2
& TG3_FLG2_PARALLEL_DETECT
)) {
2708 /* do nothing, just check for link up at the end */
2709 } else if (tp
->link_config
.autoneg
== AUTONEG_ENABLE
) {
2712 err
|= tg3_readphy(tp
, MII_ADVERTISE
, &adv
);
2713 new_adv
= adv
& ~(ADVERTISE_1000XFULL
| ADVERTISE_1000XHALF
|
2714 ADVERTISE_1000XPAUSE
|
2715 ADVERTISE_1000XPSE_ASYM
|
2718 /* Always advertise symmetric PAUSE just like copper */
2719 new_adv
|= ADVERTISE_1000XPAUSE
;
2721 if (tp
->link_config
.advertising
& ADVERTISED_1000baseT_Half
)
2722 new_adv
|= ADVERTISE_1000XHALF
;
2723 if (tp
->link_config
.advertising
& ADVERTISED_1000baseT_Full
)
2724 new_adv
|= ADVERTISE_1000XFULL
;
2726 if ((new_adv
!= adv
) || !(bmcr
& BMCR_ANENABLE
)) {
2727 tg3_writephy(tp
, MII_ADVERTISE
, new_adv
);
2728 bmcr
|= BMCR_ANENABLE
| BMCR_ANRESTART
;
2729 tg3_writephy(tp
, MII_BMCR
, bmcr
);
2731 tw32_f(MAC_EVENT
, MAC_EVENT_LNKSTATE_CHANGED
);
2732 tp
->serdes_counter
= SERDES_AN_TIMEOUT_5714S
;
2733 tp
->tg3_flags2
&= ~TG3_FLG2_PARALLEL_DETECT
;
2740 bmcr
&= ~BMCR_SPEED1000
;
2741 new_bmcr
= bmcr
& ~(BMCR_ANENABLE
| BMCR_FULLDPLX
);
2743 if (tp
->link_config
.duplex
== DUPLEX_FULL
)
2744 new_bmcr
|= BMCR_FULLDPLX
;
2746 if (new_bmcr
!= bmcr
) {
2747 /* BMCR_SPEED1000 is a reserved bit that needs
2748 * to be set on write.
2750 new_bmcr
|= BMCR_SPEED1000
;
2752 /* Force a linkdown */
2753 if (netif_carrier_ok(tp
->dev
)) {
2756 err
|= tg3_readphy(tp
, MII_ADVERTISE
, &adv
);
2757 adv
&= ~(ADVERTISE_1000XFULL
|
2758 ADVERTISE_1000XHALF
|
2760 tg3_writephy(tp
, MII_ADVERTISE
, adv
);
2761 tg3_writephy(tp
, MII_BMCR
, bmcr
|
2765 netif_carrier_off(tp
->dev
);
2767 tg3_writephy(tp
, MII_BMCR
, new_bmcr
);
2769 err
|= tg3_readphy(tp
, MII_BMSR
, &bmsr
);
2770 err
|= tg3_readphy(tp
, MII_BMSR
, &bmsr
);
2771 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) ==
2773 if (tr32(MAC_TX_STATUS
) & TX_STATUS_LINK_UP
)
2774 bmsr
|= BMSR_LSTATUS
;
2776 bmsr
&= ~BMSR_LSTATUS
;
2778 tp
->tg3_flags2
&= ~TG3_FLG2_PARALLEL_DETECT
;
2782 if (bmsr
& BMSR_LSTATUS
) {
2783 current_speed
= SPEED_1000
;
2784 current_link_up
= 1;
2785 if (bmcr
& BMCR_FULLDPLX
)
2786 current_duplex
= DUPLEX_FULL
;
2788 current_duplex
= DUPLEX_HALF
;
2790 if (bmcr
& BMCR_ANENABLE
) {
2791 u32 local_adv
, remote_adv
, common
;
2793 err
|= tg3_readphy(tp
, MII_ADVERTISE
, &local_adv
);
2794 err
|= tg3_readphy(tp
, MII_LPA
, &remote_adv
);
2795 common
= local_adv
& remote_adv
;
2796 if (common
& (ADVERTISE_1000XHALF
|
2797 ADVERTISE_1000XFULL
)) {
2798 if (common
& ADVERTISE_1000XFULL
)
2799 current_duplex
= DUPLEX_FULL
;
2801 current_duplex
= DUPLEX_HALF
;
2803 tg3_setup_flow_control(tp
, local_adv
,
2807 current_link_up
= 0;
2811 tp
->mac_mode
&= ~MAC_MODE_HALF_DUPLEX
;
2812 if (tp
->link_config
.active_duplex
== DUPLEX_HALF
)
2813 tp
->mac_mode
|= MAC_MODE_HALF_DUPLEX
;
2815 tw32_f(MAC_MODE
, tp
->mac_mode
);
2818 tw32_f(MAC_EVENT
, MAC_EVENT_LNKSTATE_CHANGED
);
2820 tp
->link_config
.active_speed
= current_speed
;
2821 tp
->link_config
.active_duplex
= current_duplex
;
2823 if (current_link_up
!= netif_carrier_ok(tp
->dev
)) {
2824 if (current_link_up
)
2825 netif_carrier_on(tp
->dev
);
2827 netif_carrier_off(tp
->dev
);
2828 tp
->tg3_flags2
&= ~TG3_FLG2_PARALLEL_DETECT
;
2830 tg3_link_report(tp
);
2835 static void tg3_serdes_parallel_detect(struct tg3
*tp
)
2837 if (tp
->serdes_counter
) {
2838 /* Give autoneg time to complete. */
2839 tp
->serdes_counter
--;
2842 if (!netif_carrier_ok(tp
->dev
) &&
2843 (tp
->link_config
.autoneg
== AUTONEG_ENABLE
)) {
2846 tg3_readphy(tp
, MII_BMCR
, &bmcr
);
2847 if (bmcr
& BMCR_ANENABLE
) {
2850 /* Select shadow register 0x1f */
2851 tg3_writephy(tp
, 0x1c, 0x7c00);
2852 tg3_readphy(tp
, 0x1c, &phy1
);
2854 /* Select expansion interrupt status register */
2855 tg3_writephy(tp
, 0x17, 0x0f01);
2856 tg3_readphy(tp
, 0x15, &phy2
);
2857 tg3_readphy(tp
, 0x15, &phy2
);
2859 if ((phy1
& 0x10) && !(phy2
& 0x20)) {
2860 /* We have signal detect and not receiving
2861 * config code words, link is up by parallel
2865 bmcr
&= ~BMCR_ANENABLE
;
2866 bmcr
|= BMCR_SPEED1000
| BMCR_FULLDPLX
;
2867 tg3_writephy(tp
, MII_BMCR
, bmcr
);
2868 tp
->tg3_flags2
|= TG3_FLG2_PARALLEL_DETECT
;
2872 else if (netif_carrier_ok(tp
->dev
) &&
2873 (tp
->link_config
.autoneg
== AUTONEG_ENABLE
) &&
2874 (tp
->tg3_flags2
& TG3_FLG2_PARALLEL_DETECT
)) {
2877 /* Select expansion interrupt status register */
2878 tg3_writephy(tp
, 0x17, 0x0f01);
2879 tg3_readphy(tp
, 0x15, &phy2
);
2883 /* Config code words received, turn on autoneg. */
2884 tg3_readphy(tp
, MII_BMCR
, &bmcr
);
2885 tg3_writephy(tp
, MII_BMCR
, bmcr
| BMCR_ANENABLE
);
2887 tp
->tg3_flags2
&= ~TG3_FLG2_PARALLEL_DETECT
;
2893 static int tg3_setup_phy(struct tg3
*tp
, int force_reset
)
2897 if (tp
->tg3_flags2
& TG3_FLG2_PHY_SERDES
) {
2898 err
= tg3_setup_fiber_phy(tp
, force_reset
);
2899 } else if (tp
->tg3_flags2
& TG3_FLG2_MII_SERDES
) {
2900 err
= tg3_setup_fiber_mii_phy(tp
, force_reset
);
2902 err
= tg3_setup_copper_phy(tp
, force_reset
);
2905 if (tp
->link_config
.active_speed
== SPEED_1000
&&
2906 tp
->link_config
.active_duplex
== DUPLEX_HALF
)
2907 tw32(MAC_TX_LENGTHS
,
2908 ((2 << TX_LENGTHS_IPG_CRS_SHIFT
) |
2909 (6 << TX_LENGTHS_IPG_SHIFT
) |
2910 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT
)));
2912 tw32(MAC_TX_LENGTHS
,
2913 ((2 << TX_LENGTHS_IPG_CRS_SHIFT
) |
2914 (6 << TX_LENGTHS_IPG_SHIFT
) |
2915 (32 << TX_LENGTHS_SLOT_TIME_SHIFT
)));
2917 if (!(tp
->tg3_flags2
& TG3_FLG2_5705_PLUS
)) {
2918 if (netif_carrier_ok(tp
->dev
)) {
2919 tw32(HOSTCC_STAT_COAL_TICKS
,
2920 tp
->coal
.stats_block_coalesce_usecs
);
2922 tw32(HOSTCC_STAT_COAL_TICKS
, 0);
2929 /* This is called whenever we suspect that the system chipset is re-
2930 * ordering the sequence of MMIO to the tx send mailbox. The symptom
2931 * is bogus tx completions. We try to recover by setting the
2932 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
2935 static void tg3_tx_recover(struct tg3
*tp
)
2937 BUG_ON((tp
->tg3_flags
& TG3_FLAG_MBOX_WRITE_REORDER
) ||
2938 tp
->write32_tx_mbox
== tg3_write_indirect_mbox
);
2940 printk(KERN_WARNING PFX
"%s: The system may be re-ordering memory-"
2941 "mapped I/O cycles to the network device, attempting to "
2942 "recover. Please report the problem to the driver maintainer "
2943 "and include system chipset information.\n", tp
->dev
->name
);
2945 spin_lock(&tp
->lock
);
2946 tp
->tg3_flags
|= TG3_FLAG_TX_RECOVERY_PENDING
;
2947 spin_unlock(&tp
->lock
);
2950 static inline u32
tg3_tx_avail(struct tg3
*tp
)
2953 return (tp
->tx_pending
-
2954 ((tp
->tx_prod
- tp
->tx_cons
) & (TG3_TX_RING_SIZE
- 1)));
2957 /* Tigon3 never reports partial packet sends. So we do not
2958 * need special logic to handle SKBs that have not had all
2959 * of their frags sent yet, like SunGEM does.
2961 static void tg3_tx(struct tg3
*tp
)
2963 u32 hw_idx
= tp
->hw_status
->idx
[0].tx_consumer
;
2964 u32 sw_idx
= tp
->tx_cons
;
2966 while (sw_idx
!= hw_idx
) {
2967 struct tx_ring_info
*ri
= &tp
->tx_buffers
[sw_idx
];
2968 struct sk_buff
*skb
= ri
->skb
;
2971 if (unlikely(skb
== NULL
)) {
2976 pci_unmap_single(tp
->pdev
,
2977 pci_unmap_addr(ri
, mapping
),
2983 sw_idx
= NEXT_TX(sw_idx
);
2985 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
2986 ri
= &tp
->tx_buffers
[sw_idx
];
2987 if (unlikely(ri
->skb
!= NULL
|| sw_idx
== hw_idx
))
2990 pci_unmap_page(tp
->pdev
,
2991 pci_unmap_addr(ri
, mapping
),
2992 skb_shinfo(skb
)->frags
[i
].size
,
2995 sw_idx
= NEXT_TX(sw_idx
);
3000 if (unlikely(tx_bug
)) {
3006 tp
->tx_cons
= sw_idx
;
3008 /* Need to make the tx_cons update visible to tg3_start_xmit()
3009 * before checking for netif_queue_stopped(). Without the
3010 * memory barrier, there is a small possibility that tg3_start_xmit()
3011 * will miss it and cause the queue to be stopped forever.
3015 if (unlikely(netif_queue_stopped(tp
->dev
) &&
3016 (tg3_tx_avail(tp
) > TG3_TX_WAKEUP_THRESH
))) {
3017 netif_tx_lock(tp
->dev
);
3018 if (netif_queue_stopped(tp
->dev
) &&
3019 (tg3_tx_avail(tp
) > TG3_TX_WAKEUP_THRESH
))
3020 netif_wake_queue(tp
->dev
);
3021 netif_tx_unlock(tp
->dev
);
3025 /* Returns size of skb allocated or < 0 on error.
3027 * We only need to fill in the address because the other members
3028 * of the RX descriptor are invariant, see tg3_init_rings.
3030 * Note the purposeful assymetry of cpu vs. chip accesses. For
3031 * posting buffers we only dirty the first cache line of the RX
3032 * descriptor (containing the address). Whereas for the RX status
3033 * buffers the cpu only reads the last cacheline of the RX descriptor
3034 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
3036 static int tg3_alloc_rx_skb(struct tg3
*tp
, u32 opaque_key
,
3037 int src_idx
, u32 dest_idx_unmasked
)
3039 struct tg3_rx_buffer_desc
*desc
;
3040 struct ring_info
*map
, *src_map
;
3041 struct sk_buff
*skb
;
3043 int skb_size
, dest_idx
;
3046 switch (opaque_key
) {
3047 case RXD_OPAQUE_RING_STD
:
3048 dest_idx
= dest_idx_unmasked
% TG3_RX_RING_SIZE
;
3049 desc
= &tp
->rx_std
[dest_idx
];
3050 map
= &tp
->rx_std_buffers
[dest_idx
];
3052 src_map
= &tp
->rx_std_buffers
[src_idx
];
3053 skb_size
= tp
->rx_pkt_buf_sz
;
3056 case RXD_OPAQUE_RING_JUMBO
:
3057 dest_idx
= dest_idx_unmasked
% TG3_RX_JUMBO_RING_SIZE
;
3058 desc
= &tp
->rx_jumbo
[dest_idx
];
3059 map
= &tp
->rx_jumbo_buffers
[dest_idx
];
3061 src_map
= &tp
->rx_jumbo_buffers
[src_idx
];
3062 skb_size
= RX_JUMBO_PKT_BUF_SZ
;
3069 /* Do not overwrite any of the map or rp information
3070 * until we are sure we can commit to a new buffer.
3072 * Callers depend upon this behavior and assume that
3073 * we leave everything unchanged if we fail.
3075 skb
= netdev_alloc_skb(tp
->dev
, skb_size
);
3079 skb_reserve(skb
, tp
->rx_offset
);
3081 mapping
= pci_map_single(tp
->pdev
, skb
->data
,
3082 skb_size
- tp
->rx_offset
,
3083 PCI_DMA_FROMDEVICE
);
3086 pci_unmap_addr_set(map
, mapping
, mapping
);
3088 if (src_map
!= NULL
)
3089 src_map
->skb
= NULL
;
3091 desc
->addr_hi
= ((u64
)mapping
>> 32);
3092 desc
->addr_lo
= ((u64
)mapping
& 0xffffffff);
3097 /* We only need to move over in the address because the other
3098 * members of the RX descriptor are invariant. See notes above
3099 * tg3_alloc_rx_skb for full details.
3101 static void tg3_recycle_rx(struct tg3
*tp
, u32 opaque_key
,
3102 int src_idx
, u32 dest_idx_unmasked
)
3104 struct tg3_rx_buffer_desc
*src_desc
, *dest_desc
;
3105 struct ring_info
*src_map
, *dest_map
;
3108 switch (opaque_key
) {
3109 case RXD_OPAQUE_RING_STD
:
3110 dest_idx
= dest_idx_unmasked
% TG3_RX_RING_SIZE
;
3111 dest_desc
= &tp
->rx_std
[dest_idx
];
3112 dest_map
= &tp
->rx_std_buffers
[dest_idx
];
3113 src_desc
= &tp
->rx_std
[src_idx
];
3114 src_map
= &tp
->rx_std_buffers
[src_idx
];
3117 case RXD_OPAQUE_RING_JUMBO
:
3118 dest_idx
= dest_idx_unmasked
% TG3_RX_JUMBO_RING_SIZE
;
3119 dest_desc
= &tp
->rx_jumbo
[dest_idx
];
3120 dest_map
= &tp
->rx_jumbo_buffers
[dest_idx
];
3121 src_desc
= &tp
->rx_jumbo
[src_idx
];
3122 src_map
= &tp
->rx_jumbo_buffers
[src_idx
];
3129 dest_map
->skb
= src_map
->skb
;
3130 pci_unmap_addr_set(dest_map
, mapping
,
3131 pci_unmap_addr(src_map
, mapping
));
3132 dest_desc
->addr_hi
= src_desc
->addr_hi
;
3133 dest_desc
->addr_lo
= src_desc
->addr_lo
;
3135 src_map
->skb
= NULL
;
3138 #if TG3_VLAN_TAG_USED
3139 static int tg3_vlan_rx(struct tg3
*tp
, struct sk_buff
*skb
, u16 vlan_tag
)
3141 return vlan_hwaccel_receive_skb(skb
, tp
->vlgrp
, vlan_tag
);
3145 /* The RX ring scheme is composed of multiple rings which post fresh
3146 * buffers to the chip, and one special ring the chip uses to report
3147 * status back to the host.
3149 * The special ring reports the status of received packets to the
3150 * host. The chip does not write into the original descriptor the
3151 * RX buffer was obtained from. The chip simply takes the original
3152 * descriptor as provided by the host, updates the status and length
3153 * field, then writes this into the next status ring entry.
3155 * Each ring the host uses to post buffers to the chip is described
3156 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
3157 * it is first placed into the on-chip ram. When the packet's length
3158 * is known, it walks down the TG3_BDINFO entries to select the ring.
3159 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
3160 * which is within the range of the new packet's length is chosen.
3162 * The "separate ring for rx status" scheme may sound queer, but it makes
3163 * sense from a cache coherency perspective. If only the host writes
3164 * to the buffer post rings, and only the chip writes to the rx status
3165 * rings, then cache lines never move beyond shared-modified state.
3166 * If both the host and chip were to write into the same ring, cache line
3167 * eviction could occur since both entities want it in an exclusive state.
3169 static int tg3_rx(struct tg3
*tp
, int budget
)
3171 u32 work_mask
, rx_std_posted
= 0;
3172 u32 sw_idx
= tp
->rx_rcb_ptr
;
3176 hw_idx
= tp
->hw_status
->idx
[0].rx_producer
;
3178 * We need to order the read of hw_idx and the read of
3179 * the opaque cookie.
3184 while (sw_idx
!= hw_idx
&& budget
> 0) {
3185 struct tg3_rx_buffer_desc
*desc
= &tp
->rx_rcb
[sw_idx
];
3187 struct sk_buff
*skb
;
3188 dma_addr_t dma_addr
;
3189 u32 opaque_key
, desc_idx
, *post_ptr
;
3191 desc_idx
= desc
->opaque
& RXD_OPAQUE_INDEX_MASK
;
3192 opaque_key
= desc
->opaque
& RXD_OPAQUE_RING_MASK
;
3193 if (opaque_key
== RXD_OPAQUE_RING_STD
) {
3194 dma_addr
= pci_unmap_addr(&tp
->rx_std_buffers
[desc_idx
],
3196 skb
= tp
->rx_std_buffers
[desc_idx
].skb
;
3197 post_ptr
= &tp
->rx_std_ptr
;
3199 } else if (opaque_key
== RXD_OPAQUE_RING_JUMBO
) {
3200 dma_addr
= pci_unmap_addr(&tp
->rx_jumbo_buffers
[desc_idx
],
3202 skb
= tp
->rx_jumbo_buffers
[desc_idx
].skb
;
3203 post_ptr
= &tp
->rx_jumbo_ptr
;
3206 goto next_pkt_nopost
;
3209 work_mask
|= opaque_key
;
3211 if ((desc
->err_vlan
& RXD_ERR_MASK
) != 0 &&
3212 (desc
->err_vlan
!= RXD_ERR_ODD_NIBBLE_RCVD_MII
)) {
3214 tg3_recycle_rx(tp
, opaque_key
,
3215 desc_idx
, *post_ptr
);
3217 /* Other statistics kept track of by card. */
3218 tp
->net_stats
.rx_dropped
++;
3222 len
= ((desc
->idx_len
& RXD_LEN_MASK
) >> RXD_LEN_SHIFT
) - 4; /* omit crc */
3224 if (len
> RX_COPY_THRESHOLD
3225 && tp
->rx_offset
== 2
3226 /* rx_offset != 2 iff this is a 5701 card running
3227 * in PCI-X mode [see tg3_get_invariants()] */
3231 skb_size
= tg3_alloc_rx_skb(tp
, opaque_key
,
3232 desc_idx
, *post_ptr
);
3236 pci_unmap_single(tp
->pdev
, dma_addr
,
3237 skb_size
- tp
->rx_offset
,
3238 PCI_DMA_FROMDEVICE
);
3242 struct sk_buff
*copy_skb
;
3244 tg3_recycle_rx(tp
, opaque_key
,
3245 desc_idx
, *post_ptr
);
3247 copy_skb
= netdev_alloc_skb(tp
->dev
, len
+ 2);
3248 if (copy_skb
== NULL
)
3249 goto drop_it_no_recycle
;
3251 skb_reserve(copy_skb
, 2);
3252 skb_put(copy_skb
, len
);
3253 pci_dma_sync_single_for_cpu(tp
->pdev
, dma_addr
, len
, PCI_DMA_FROMDEVICE
);
3254 memcpy(copy_skb
->data
, skb
->data
, len
);
3255 pci_dma_sync_single_for_device(tp
->pdev
, dma_addr
, len
, PCI_DMA_FROMDEVICE
);
3257 /* We'll reuse the original ring buffer. */
3261 if ((tp
->tg3_flags
& TG3_FLAG_RX_CHECKSUMS
) &&
3262 (desc
->type_flags
& RXD_FLAG_TCPUDP_CSUM
) &&
3263 (((desc
->ip_tcp_csum
& RXD_TCPCSUM_MASK
)
3264 >> RXD_TCPCSUM_SHIFT
) == 0xffff))
3265 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
3267 skb
->ip_summed
= CHECKSUM_NONE
;
3269 skb
->protocol
= eth_type_trans(skb
, tp
->dev
);
3270 #if TG3_VLAN_TAG_USED
3271 if (tp
->vlgrp
!= NULL
&&
3272 desc
->type_flags
& RXD_FLAG_VLAN
) {
3273 tg3_vlan_rx(tp
, skb
,
3274 desc
->err_vlan
& RXD_VLAN_MASK
);
3277 netif_receive_skb(skb
);
3279 tp
->dev
->last_rx
= jiffies
;
3286 if (unlikely(rx_std_posted
>= tp
->rx_std_max_post
)) {
3287 u32 idx
= *post_ptr
% TG3_RX_RING_SIZE
;
3289 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX
+
3290 TG3_64BIT_REG_LOW
, idx
);
3291 work_mask
&= ~RXD_OPAQUE_RING_STD
;
3296 sw_idx
%= TG3_RX_RCB_RING_SIZE(tp
);
3298 /* Refresh hw_idx to see if there is new work */
3299 if (sw_idx
== hw_idx
) {
3300 hw_idx
= tp
->hw_status
->idx
[0].rx_producer
;
3305 /* ACK the status ring. */
3306 tp
->rx_rcb_ptr
= sw_idx
;
3307 tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0
+ TG3_64BIT_REG_LOW
, sw_idx
);
3309 /* Refill RX ring(s). */
3310 if (work_mask
& RXD_OPAQUE_RING_STD
) {
3311 sw_idx
= tp
->rx_std_ptr
% TG3_RX_RING_SIZE
;
3312 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX
+ TG3_64BIT_REG_LOW
,
3315 if (work_mask
& RXD_OPAQUE_RING_JUMBO
) {
3316 sw_idx
= tp
->rx_jumbo_ptr
% TG3_RX_JUMBO_RING_SIZE
;
3317 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX
+ TG3_64BIT_REG_LOW
,
3325 static int tg3_poll(struct net_device
*netdev
, int *budget
)
3327 struct tg3
*tp
= netdev_priv(netdev
);
3328 struct tg3_hw_status
*sblk
= tp
->hw_status
;
3331 /* handle link change and other phy events */
3332 if (!(tp
->tg3_flags
&
3333 (TG3_FLAG_USE_LINKCHG_REG
|
3334 TG3_FLAG_POLL_SERDES
))) {
3335 if (sblk
->status
& SD_STATUS_LINK_CHG
) {
3336 sblk
->status
= SD_STATUS_UPDATED
|
3337 (sblk
->status
& ~SD_STATUS_LINK_CHG
);
3338 spin_lock(&tp
->lock
);
3339 tg3_setup_phy(tp
, 0);
3340 spin_unlock(&tp
->lock
);
3344 /* run TX completion thread */
3345 if (sblk
->idx
[0].tx_consumer
!= tp
->tx_cons
) {
3347 if (unlikely(tp
->tg3_flags
& TG3_FLAG_TX_RECOVERY_PENDING
)) {
3348 netif_rx_complete(netdev
);
3349 schedule_work(&tp
->reset_task
);
3354 /* run RX thread, within the bounds set by NAPI.
3355 * All RX "locking" is done by ensuring outside
3356 * code synchronizes with dev->poll()
3358 if (sblk
->idx
[0].rx_producer
!= tp
->rx_rcb_ptr
) {
3359 int orig_budget
= *budget
;
3362 if (orig_budget
> netdev
->quota
)
3363 orig_budget
= netdev
->quota
;
3365 work_done
= tg3_rx(tp
, orig_budget
);
3367 *budget
-= work_done
;
3368 netdev
->quota
-= work_done
;
3371 if (tp
->tg3_flags
& TG3_FLAG_TAGGED_STATUS
) {
3372 tp
->last_tag
= sblk
->status_tag
;
3375 sblk
->status
&= ~SD_STATUS_UPDATED
;
3377 /* if no more work, tell net stack and NIC we're done */
3378 done
= !tg3_has_work(tp
);
3380 netif_rx_complete(netdev
);
3381 tg3_restart_ints(tp
);
3384 return (done
? 0 : 1);
3387 static void tg3_irq_quiesce(struct tg3
*tp
)
3389 BUG_ON(tp
->irq_sync
);
3394 synchronize_irq(tp
->pdev
->irq
);
3397 static inline int tg3_irq_sync(struct tg3
*tp
)
3399 return tp
->irq_sync
;
3402 /* Fully shutdown all tg3 driver activity elsewhere in the system.
3403 * If irq_sync is non-zero, then the IRQ handler must be synchronized
3404 * with as well. Most of the time, this is not necessary except when
3405 * shutting down the device.
3407 static inline void tg3_full_lock(struct tg3
*tp
, int irq_sync
)
3410 tg3_irq_quiesce(tp
);
3411 spin_lock_bh(&tp
->lock
);
3414 static inline void tg3_full_unlock(struct tg3
*tp
)
3416 spin_unlock_bh(&tp
->lock
);
3419 /* One-shot MSI handler - Chip automatically disables interrupt
3420 * after sending MSI so driver doesn't have to do it.
3422 static irqreturn_t
tg3_msi_1shot(int irq
, void *dev_id
, struct pt_regs
*regs
)
3424 struct net_device
*dev
= dev_id
;
3425 struct tg3
*tp
= netdev_priv(dev
);
3427 prefetch(tp
->hw_status
);
3428 prefetch(&tp
->rx_rcb
[tp
->rx_rcb_ptr
]);
3430 if (likely(!tg3_irq_sync(tp
)))
3431 netif_rx_schedule(dev
); /* schedule NAPI poll */
3436 /* MSI ISR - No need to check for interrupt sharing and no need to
3437 * flush status block and interrupt mailbox. PCI ordering rules
3438 * guarantee that MSI will arrive after the status block.
3440 static irqreturn_t
tg3_msi(int irq
, void *dev_id
, struct pt_regs
*regs
)
3442 struct net_device
*dev
= dev_id
;
3443 struct tg3
*tp
= netdev_priv(dev
);
3445 prefetch(tp
->hw_status
);
3446 prefetch(&tp
->rx_rcb
[tp
->rx_rcb_ptr
]);
3448 * Writing any value to intr-mbox-0 clears PCI INTA# and
3449 * chip-internal interrupt pending events.
3450 * Writing non-zero to intr-mbox-0 additional tells the
3451 * NIC to stop sending us irqs, engaging "in-intr-handler"
3454 tw32_mailbox(MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
, 0x00000001);
3455 if (likely(!tg3_irq_sync(tp
)))
3456 netif_rx_schedule(dev
); /* schedule NAPI poll */
3458 return IRQ_RETVAL(1);
3461 static irqreturn_t
tg3_interrupt(int irq
, void *dev_id
, struct pt_regs
*regs
)
3463 struct net_device
*dev
= dev_id
;
3464 struct tg3
*tp
= netdev_priv(dev
);
3465 struct tg3_hw_status
*sblk
= tp
->hw_status
;
3466 unsigned int handled
= 1;
3468 /* In INTx mode, it is possible for the interrupt to arrive at
3469 * the CPU before the status block posted prior to the interrupt.
3470 * Reading the PCI State register will confirm whether the
3471 * interrupt is ours and will flush the status block.
3473 if ((sblk
->status
& SD_STATUS_UPDATED
) ||
3474 !(tr32(TG3PCI_PCISTATE
) & PCISTATE_INT_NOT_ACTIVE
)) {
3476 * Writing any value to intr-mbox-0 clears PCI INTA# and
3477 * chip-internal interrupt pending events.
3478 * Writing non-zero to intr-mbox-0 additional tells the
3479 * NIC to stop sending us irqs, engaging "in-intr-handler"
3482 tw32_mailbox(MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
,
3484 if (tg3_irq_sync(tp
))
3486 sblk
->status
&= ~SD_STATUS_UPDATED
;
3487 if (likely(tg3_has_work(tp
))) {
3488 prefetch(&tp
->rx_rcb
[tp
->rx_rcb_ptr
]);
3489 netif_rx_schedule(dev
); /* schedule NAPI poll */
3491 /* No work, shared interrupt perhaps? re-enable
3492 * interrupts, and flush that PCI write
3494 tw32_mailbox_f(MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
,
3497 } else { /* shared interrupt */
3501 return IRQ_RETVAL(handled
);
3504 static irqreturn_t
tg3_interrupt_tagged(int irq
, void *dev_id
, struct pt_regs
*regs
)
3506 struct net_device
*dev
= dev_id
;
3507 struct tg3
*tp
= netdev_priv(dev
);
3508 struct tg3_hw_status
*sblk
= tp
->hw_status
;
3509 unsigned int handled
= 1;
3511 /* In INTx mode, it is possible for the interrupt to arrive at
3512 * the CPU before the status block posted prior to the interrupt.
3513 * Reading the PCI State register will confirm whether the
3514 * interrupt is ours and will flush the status block.
3516 if ((sblk
->status_tag
!= tp
->last_tag
) ||
3517 !(tr32(TG3PCI_PCISTATE
) & PCISTATE_INT_NOT_ACTIVE
)) {
3519 * writing any value to intr-mbox-0 clears PCI INTA# and
3520 * chip-internal interrupt pending events.
3521 * writing non-zero to intr-mbox-0 additional tells the
3522 * NIC to stop sending us irqs, engaging "in-intr-handler"
3525 tw32_mailbox(MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
,
3527 if (tg3_irq_sync(tp
))
3529 if (netif_rx_schedule_prep(dev
)) {
3530 prefetch(&tp
->rx_rcb
[tp
->rx_rcb_ptr
]);
3531 /* Update last_tag to mark that this status has been
3532 * seen. Because interrupt may be shared, we may be
3533 * racing with tg3_poll(), so only update last_tag
3534 * if tg3_poll() is not scheduled.
3536 tp
->last_tag
= sblk
->status_tag
;
3537 __netif_rx_schedule(dev
);
3539 } else { /* shared interrupt */
3543 return IRQ_RETVAL(handled
);
3546 /* ISR for interrupt test */
3547 static irqreturn_t
tg3_test_isr(int irq
, void *dev_id
,
3548 struct pt_regs
*regs
)
3550 struct net_device
*dev
= dev_id
;
3551 struct tg3
*tp
= netdev_priv(dev
);
3552 struct tg3_hw_status
*sblk
= tp
->hw_status
;
3554 if ((sblk
->status
& SD_STATUS_UPDATED
) ||
3555 !(tr32(TG3PCI_PCISTATE
) & PCISTATE_INT_NOT_ACTIVE
)) {
3556 tw32_mailbox(MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
,
3558 return IRQ_RETVAL(1);
3560 return IRQ_RETVAL(0);
3563 static int tg3_init_hw(struct tg3
*, int);
3564 static int tg3_halt(struct tg3
*, int, int);
3566 /* Restart hardware after configuration changes, self-test, etc.
3567 * Invoked with tp->lock held.
3569 static int tg3_restart_hw(struct tg3
*tp
, int reset_phy
)
3573 err
= tg3_init_hw(tp
, reset_phy
);
3575 printk(KERN_ERR PFX
"%s: Failed to re-initialize device, "
3576 "aborting.\n", tp
->dev
->name
);
3577 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
3578 tg3_full_unlock(tp
);
3579 del_timer_sync(&tp
->timer
);
3581 netif_poll_enable(tp
->dev
);
3583 tg3_full_lock(tp
, 0);
3588 #ifdef CONFIG_NET_POLL_CONTROLLER
3589 static void tg3_poll_controller(struct net_device
*dev
)
3591 struct tg3
*tp
= netdev_priv(dev
);
3593 tg3_interrupt(tp
->pdev
->irq
, dev
, NULL
);
3597 static void tg3_reset_task(void *_data
)
3599 struct tg3
*tp
= _data
;
3600 unsigned int restart_timer
;
3602 tg3_full_lock(tp
, 0);
3603 tp
->tg3_flags
|= TG3_FLAG_IN_RESET_TASK
;
3605 if (!netif_running(tp
->dev
)) {
3606 tp
->tg3_flags
&= ~TG3_FLAG_IN_RESET_TASK
;
3607 tg3_full_unlock(tp
);
3611 tg3_full_unlock(tp
);
3615 tg3_full_lock(tp
, 1);
3617 restart_timer
= tp
->tg3_flags2
& TG3_FLG2_RESTART_TIMER
;
3618 tp
->tg3_flags2
&= ~TG3_FLG2_RESTART_TIMER
;
3620 if (tp
->tg3_flags
& TG3_FLAG_TX_RECOVERY_PENDING
) {
3621 tp
->write32_tx_mbox
= tg3_write32_tx_mbox
;
3622 tp
->write32_rx_mbox
= tg3_write_flush_reg32
;
3623 tp
->tg3_flags
|= TG3_FLAG_MBOX_WRITE_REORDER
;
3624 tp
->tg3_flags
&= ~TG3_FLAG_TX_RECOVERY_PENDING
;
3627 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 0);
3628 if (tg3_init_hw(tp
, 1))
3631 tg3_netif_start(tp
);
3634 mod_timer(&tp
->timer
, jiffies
+ 1);
3637 tp
->tg3_flags
&= ~TG3_FLAG_IN_RESET_TASK
;
3639 tg3_full_unlock(tp
);
3642 static void tg3_tx_timeout(struct net_device
*dev
)
3644 struct tg3
*tp
= netdev_priv(dev
);
3646 printk(KERN_ERR PFX
"%s: transmit timed out, resetting\n",
3649 schedule_work(&tp
->reset_task
);
3652 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
3653 static inline int tg3_4g_overflow_test(dma_addr_t mapping
, int len
)
3655 u32 base
= (u32
) mapping
& 0xffffffff;
3657 return ((base
> 0xffffdcc0) &&
3658 (base
+ len
+ 8 < base
));
3661 /* Test for DMA addresses > 40-bit */
3662 static inline int tg3_40bit_overflow_test(struct tg3
*tp
, dma_addr_t mapping
,
3665 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
3666 if (tp
->tg3_flags
& TG3_FLAG_40BIT_DMA_BUG
)
3667 return (((u64
) mapping
+ len
) > DMA_40BIT_MASK
);
3674 static void tg3_set_txd(struct tg3
*, int, dma_addr_t
, int, u32
, u32
);
3676 /* Workaround 4GB and 40-bit hardware DMA bugs. */
3677 static int tigon3_dma_hwbug_workaround(struct tg3
*tp
, struct sk_buff
*skb
,
3678 u32 last_plus_one
, u32
*start
,
3679 u32 base_flags
, u32 mss
)
3681 struct sk_buff
*new_skb
= skb_copy(skb
, GFP_ATOMIC
);
3682 dma_addr_t new_addr
= 0;
3689 /* New SKB is guaranteed to be linear. */
3691 new_addr
= pci_map_single(tp
->pdev
, new_skb
->data
, new_skb
->len
,
3693 /* Make sure new skb does not cross any 4G boundaries.
3694 * Drop the packet if it does.
3696 if (tg3_4g_overflow_test(new_addr
, new_skb
->len
)) {
3698 dev_kfree_skb(new_skb
);
3701 tg3_set_txd(tp
, entry
, new_addr
, new_skb
->len
,
3702 base_flags
, 1 | (mss
<< 1));
3703 *start
= NEXT_TX(entry
);
3707 /* Now clean up the sw ring entries. */
3709 while (entry
!= last_plus_one
) {
3713 len
= skb_headlen(skb
);
3715 len
= skb_shinfo(skb
)->frags
[i
-1].size
;
3716 pci_unmap_single(tp
->pdev
,
3717 pci_unmap_addr(&tp
->tx_buffers
[entry
], mapping
),
3718 len
, PCI_DMA_TODEVICE
);
3720 tp
->tx_buffers
[entry
].skb
= new_skb
;
3721 pci_unmap_addr_set(&tp
->tx_buffers
[entry
], mapping
, new_addr
);
3723 tp
->tx_buffers
[entry
].skb
= NULL
;
3725 entry
= NEXT_TX(entry
);
3734 static void tg3_set_txd(struct tg3
*tp
, int entry
,
3735 dma_addr_t mapping
, int len
, u32 flags
,
3738 struct tg3_tx_buffer_desc
*txd
= &tp
->tx_ring
[entry
];
3739 int is_end
= (mss_and_is_end
& 0x1);
3740 u32 mss
= (mss_and_is_end
>> 1);
3744 flags
|= TXD_FLAG_END
;
3745 if (flags
& TXD_FLAG_VLAN
) {
3746 vlan_tag
= flags
>> 16;
3749 vlan_tag
|= (mss
<< TXD_MSS_SHIFT
);
3751 txd
->addr_hi
= ((u64
) mapping
>> 32);
3752 txd
->addr_lo
= ((u64
) mapping
& 0xffffffff);
3753 txd
->len_flags
= (len
<< TXD_LEN_SHIFT
) | flags
;
3754 txd
->vlan_tag
= vlan_tag
<< TXD_VLAN_TAG_SHIFT
;
3757 /* hard_start_xmit for devices that don't have any bugs and
3758 * support TG3_FLG2_HW_TSO_2 only.
3760 static int tg3_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
3762 struct tg3
*tp
= netdev_priv(dev
);
3764 u32 len
, entry
, base_flags
, mss
;
3766 len
= skb_headlen(skb
);
3768 /* We are running in BH disabled context with netif_tx_lock
3769 * and TX reclaim runs via tp->poll inside of a software
3770 * interrupt. Furthermore, IRQ processing runs lockless so we have
3771 * no IRQ context deadlocks to worry about either. Rejoice!
3773 if (unlikely(tg3_tx_avail(tp
) <= (skb_shinfo(skb
)->nr_frags
+ 1))) {
3774 if (!netif_queue_stopped(dev
)) {
3775 netif_stop_queue(dev
);
3777 /* This is a hard error, log it. */
3778 printk(KERN_ERR PFX
"%s: BUG! Tx Ring full when "
3779 "queue awake!\n", dev
->name
);
3781 return NETDEV_TX_BUSY
;
3784 entry
= tp
->tx_prod
;
3786 #if TG3_TSO_SUPPORT != 0
3788 if (skb
->len
> (tp
->dev
->mtu
+ ETH_HLEN
) &&
3789 (mss
= skb_shinfo(skb
)->gso_size
) != 0) {
3790 int tcp_opt_len
, ip_tcp_len
;
3792 if (skb_header_cloned(skb
) &&
3793 pskb_expand_head(skb
, 0, 0, GFP_ATOMIC
)) {
3798 if (skb_shinfo(skb
)->gso_type
& SKB_GSO_TCPV6
)
3799 mss
|= (skb_headlen(skb
) - ETH_HLEN
) << 9;
3801 tcp_opt_len
= ((skb
->h
.th
->doff
- 5) * 4);
3802 ip_tcp_len
= (skb
->nh
.iph
->ihl
* 4) +
3803 sizeof(struct tcphdr
);
3805 skb
->nh
.iph
->check
= 0;
3806 skb
->nh
.iph
->tot_len
= htons(mss
+ ip_tcp_len
+
3808 mss
|= (ip_tcp_len
+ tcp_opt_len
) << 9;
3811 base_flags
|= (TXD_FLAG_CPU_PRE_DMA
|
3812 TXD_FLAG_CPU_POST_DMA
);
3814 skb
->h
.th
->check
= 0;
3817 else if (skb
->ip_summed
== CHECKSUM_PARTIAL
)
3818 base_flags
|= TXD_FLAG_TCPUDP_CSUM
;
3821 if (skb
->ip_summed
== CHECKSUM_PARTIAL
)
3822 base_flags
|= TXD_FLAG_TCPUDP_CSUM
;
3824 #if TG3_VLAN_TAG_USED
3825 if (tp
->vlgrp
!= NULL
&& vlan_tx_tag_present(skb
))
3826 base_flags
|= (TXD_FLAG_VLAN
|
3827 (vlan_tx_tag_get(skb
) << 16));
3830 /* Queue skb data, a.k.a. the main skb fragment. */
3831 mapping
= pci_map_single(tp
->pdev
, skb
->data
, len
, PCI_DMA_TODEVICE
);
3833 tp
->tx_buffers
[entry
].skb
= skb
;
3834 pci_unmap_addr_set(&tp
->tx_buffers
[entry
], mapping
, mapping
);
3836 tg3_set_txd(tp
, entry
, mapping
, len
, base_flags
,
3837 (skb_shinfo(skb
)->nr_frags
== 0) | (mss
<< 1));
3839 entry
= NEXT_TX(entry
);
3841 /* Now loop through additional data fragments, and queue them. */
3842 if (skb_shinfo(skb
)->nr_frags
> 0) {
3843 unsigned int i
, last
;
3845 last
= skb_shinfo(skb
)->nr_frags
- 1;
3846 for (i
= 0; i
<= last
; i
++) {
3847 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
3850 mapping
= pci_map_page(tp
->pdev
,
3853 len
, PCI_DMA_TODEVICE
);
3855 tp
->tx_buffers
[entry
].skb
= NULL
;
3856 pci_unmap_addr_set(&tp
->tx_buffers
[entry
], mapping
, mapping
);
3858 tg3_set_txd(tp
, entry
, mapping
, len
,
3859 base_flags
, (i
== last
) | (mss
<< 1));
3861 entry
= NEXT_TX(entry
);
3865 /* Packets are ready, update Tx producer idx local and on card. */
3866 tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0
+ TG3_64BIT_REG_LOW
), entry
);
3868 tp
->tx_prod
= entry
;
3869 if (unlikely(tg3_tx_avail(tp
) <= (MAX_SKB_FRAGS
+ 1))) {
3870 netif_stop_queue(dev
);
3871 if (tg3_tx_avail(tp
) > TG3_TX_WAKEUP_THRESH
)
3872 netif_wake_queue(tp
->dev
);
3878 dev
->trans_start
= jiffies
;
3880 return NETDEV_TX_OK
;
3883 #if TG3_TSO_SUPPORT != 0
3884 static int tg3_start_xmit_dma_bug(struct sk_buff
*, struct net_device
*);
3886 /* Use GSO to workaround a rare TSO bug that may be triggered when the
3887 * TSO header is greater than 80 bytes.
3889 static int tg3_tso_bug(struct tg3
*tp
, struct sk_buff
*skb
)
3891 struct sk_buff
*segs
, *nskb
;
3893 /* Estimate the number of fragments in the worst case */
3894 if (unlikely(tg3_tx_avail(tp
) <= (skb_shinfo(skb
)->gso_segs
* 3))) {
3895 netif_stop_queue(tp
->dev
);
3896 return NETDEV_TX_BUSY
;
3899 segs
= skb_gso_segment(skb
, tp
->dev
->features
& ~NETIF_F_TSO
);
3900 if (unlikely(IS_ERR(segs
)))
3901 goto tg3_tso_bug_end
;
3907 tg3_start_xmit_dma_bug(nskb
, tp
->dev
);
3913 return NETDEV_TX_OK
;
3917 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
3918 * support TG3_FLG2_HW_TSO_1 or firmware TSO only.
3920 static int tg3_start_xmit_dma_bug(struct sk_buff
*skb
, struct net_device
*dev
)
3922 struct tg3
*tp
= netdev_priv(dev
);
3924 u32 len
, entry
, base_flags
, mss
;
3925 int would_hit_hwbug
;
3927 len
= skb_headlen(skb
);
3929 /* We are running in BH disabled context with netif_tx_lock
3930 * and TX reclaim runs via tp->poll inside of a software
3931 * interrupt. Furthermore, IRQ processing runs lockless so we have
3932 * no IRQ context deadlocks to worry about either. Rejoice!
3934 if (unlikely(tg3_tx_avail(tp
) <= (skb_shinfo(skb
)->nr_frags
+ 1))) {
3935 if (!netif_queue_stopped(dev
)) {
3936 netif_stop_queue(dev
);
3938 /* This is a hard error, log it. */
3939 printk(KERN_ERR PFX
"%s: BUG! Tx Ring full when "
3940 "queue awake!\n", dev
->name
);
3942 return NETDEV_TX_BUSY
;
3945 entry
= tp
->tx_prod
;
3947 if (skb
->ip_summed
== CHECKSUM_PARTIAL
)
3948 base_flags
|= TXD_FLAG_TCPUDP_CSUM
;
3949 #if TG3_TSO_SUPPORT != 0
3951 if (skb
->len
> (tp
->dev
->mtu
+ ETH_HLEN
) &&
3952 (mss
= skb_shinfo(skb
)->gso_size
) != 0) {
3953 int tcp_opt_len
, ip_tcp_len
, hdr_len
;
3955 if (skb_header_cloned(skb
) &&
3956 pskb_expand_head(skb
, 0, 0, GFP_ATOMIC
)) {
3961 tcp_opt_len
= ((skb
->h
.th
->doff
- 5) * 4);
3962 ip_tcp_len
= (skb
->nh
.iph
->ihl
* 4) + sizeof(struct tcphdr
);
3964 hdr_len
= ip_tcp_len
+ tcp_opt_len
;
3965 if (unlikely((ETH_HLEN
+ hdr_len
) > 80) &&
3966 (tp
->tg3_flags2
& TG3_FLG2_HW_TSO_1_BUG
))
3967 return (tg3_tso_bug(tp
, skb
));
3969 base_flags
|= (TXD_FLAG_CPU_PRE_DMA
|
3970 TXD_FLAG_CPU_POST_DMA
);
3972 skb
->nh
.iph
->check
= 0;
3973 skb
->nh
.iph
->tot_len
= htons(mss
+ hdr_len
);
3974 if (tp
->tg3_flags2
& TG3_FLG2_HW_TSO
) {
3975 skb
->h
.th
->check
= 0;
3976 base_flags
&= ~TXD_FLAG_TCPUDP_CSUM
;
3980 ~csum_tcpudp_magic(skb
->nh
.iph
->saddr
,
3985 if ((tp
->tg3_flags2
& TG3_FLG2_HW_TSO
) ||
3986 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
)) {
3987 if (tcp_opt_len
|| skb
->nh
.iph
->ihl
> 5) {
3990 tsflags
= ((skb
->nh
.iph
->ihl
- 5) +
3991 (tcp_opt_len
>> 2));
3992 mss
|= (tsflags
<< 11);
3995 if (tcp_opt_len
|| skb
->nh
.iph
->ihl
> 5) {
3998 tsflags
= ((skb
->nh
.iph
->ihl
- 5) +
3999 (tcp_opt_len
>> 2));
4000 base_flags
|= tsflags
<< 12;
4007 #if TG3_VLAN_TAG_USED
4008 if (tp
->vlgrp
!= NULL
&& vlan_tx_tag_present(skb
))
4009 base_flags
|= (TXD_FLAG_VLAN
|
4010 (vlan_tx_tag_get(skb
) << 16));
4013 /* Queue skb data, a.k.a. the main skb fragment. */
4014 mapping
= pci_map_single(tp
->pdev
, skb
->data
, len
, PCI_DMA_TODEVICE
);
4016 tp
->tx_buffers
[entry
].skb
= skb
;
4017 pci_unmap_addr_set(&tp
->tx_buffers
[entry
], mapping
, mapping
);
4019 would_hit_hwbug
= 0;
4021 if (tg3_4g_overflow_test(mapping
, len
))
4022 would_hit_hwbug
= 1;
4024 tg3_set_txd(tp
, entry
, mapping
, len
, base_flags
,
4025 (skb_shinfo(skb
)->nr_frags
== 0) | (mss
<< 1));
4027 entry
= NEXT_TX(entry
);
4029 /* Now loop through additional data fragments, and queue them. */
4030 if (skb_shinfo(skb
)->nr_frags
> 0) {
4031 unsigned int i
, last
;
4033 last
= skb_shinfo(skb
)->nr_frags
- 1;
4034 for (i
= 0; i
<= last
; i
++) {
4035 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
4038 mapping
= pci_map_page(tp
->pdev
,
4041 len
, PCI_DMA_TODEVICE
);
4043 tp
->tx_buffers
[entry
].skb
= NULL
;
4044 pci_unmap_addr_set(&tp
->tx_buffers
[entry
], mapping
, mapping
);
4046 if (tg3_4g_overflow_test(mapping
, len
))
4047 would_hit_hwbug
= 1;
4049 if (tg3_40bit_overflow_test(tp
, mapping
, len
))
4050 would_hit_hwbug
= 1;
4052 if (tp
->tg3_flags2
& TG3_FLG2_HW_TSO
)
4053 tg3_set_txd(tp
, entry
, mapping
, len
,
4054 base_flags
, (i
== last
)|(mss
<< 1));
4056 tg3_set_txd(tp
, entry
, mapping
, len
,
4057 base_flags
, (i
== last
));
4059 entry
= NEXT_TX(entry
);
4063 if (would_hit_hwbug
) {
4064 u32 last_plus_one
= entry
;
4067 start
= entry
- 1 - skb_shinfo(skb
)->nr_frags
;
4068 start
&= (TG3_TX_RING_SIZE
- 1);
4070 /* If the workaround fails due to memory/mapping
4071 * failure, silently drop this packet.
4073 if (tigon3_dma_hwbug_workaround(tp
, skb
, last_plus_one
,
4074 &start
, base_flags
, mss
))
4080 /* Packets are ready, update Tx producer idx local and on card. */
4081 tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0
+ TG3_64BIT_REG_LOW
), entry
);
4083 tp
->tx_prod
= entry
;
4084 if (unlikely(tg3_tx_avail(tp
) <= (MAX_SKB_FRAGS
+ 1))) {
4085 netif_stop_queue(dev
);
4086 if (tg3_tx_avail(tp
) > TG3_TX_WAKEUP_THRESH
)
4087 netif_wake_queue(tp
->dev
);
4093 dev
->trans_start
= jiffies
;
4095 return NETDEV_TX_OK
;
4098 static inline void tg3_set_mtu(struct net_device
*dev
, struct tg3
*tp
,
4103 if (new_mtu
> ETH_DATA_LEN
) {
4104 if (tp
->tg3_flags2
& TG3_FLG2_5780_CLASS
) {
4105 tp
->tg3_flags2
&= ~TG3_FLG2_TSO_CAPABLE
;
4106 ethtool_op_set_tso(dev
, 0);
4109 tp
->tg3_flags
|= TG3_FLAG_JUMBO_RING_ENABLE
;
4111 if (tp
->tg3_flags2
& TG3_FLG2_5780_CLASS
)
4112 tp
->tg3_flags2
|= TG3_FLG2_TSO_CAPABLE
;
4113 tp
->tg3_flags
&= ~TG3_FLAG_JUMBO_RING_ENABLE
;
4117 static int tg3_change_mtu(struct net_device
*dev
, int new_mtu
)
4119 struct tg3
*tp
= netdev_priv(dev
);
4122 if (new_mtu
< TG3_MIN_MTU
|| new_mtu
> TG3_MAX_MTU(tp
))
4125 if (!netif_running(dev
)) {
4126 /* We'll just catch it later when the
4129 tg3_set_mtu(dev
, tp
, new_mtu
);
4135 tg3_full_lock(tp
, 1);
4137 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
4139 tg3_set_mtu(dev
, tp
, new_mtu
);
4141 err
= tg3_restart_hw(tp
, 0);
4144 tg3_netif_start(tp
);
4146 tg3_full_unlock(tp
);
4151 /* Free up pending packets in all rx/tx rings.
4153 * The chip has been shut down and the driver detached from
4154 * the networking, so no interrupts or new tx packets will
4155 * end up in the driver. tp->{tx,}lock is not held and we are not
4156 * in an interrupt context and thus may sleep.
4158 static void tg3_free_rings(struct tg3
*tp
)
4160 struct ring_info
*rxp
;
4163 for (i
= 0; i
< TG3_RX_RING_SIZE
; i
++) {
4164 rxp
= &tp
->rx_std_buffers
[i
];
4166 if (rxp
->skb
== NULL
)
4168 pci_unmap_single(tp
->pdev
,
4169 pci_unmap_addr(rxp
, mapping
),
4170 tp
->rx_pkt_buf_sz
- tp
->rx_offset
,
4171 PCI_DMA_FROMDEVICE
);
4172 dev_kfree_skb_any(rxp
->skb
);
4176 for (i
= 0; i
< TG3_RX_JUMBO_RING_SIZE
; i
++) {
4177 rxp
= &tp
->rx_jumbo_buffers
[i
];
4179 if (rxp
->skb
== NULL
)
4181 pci_unmap_single(tp
->pdev
,
4182 pci_unmap_addr(rxp
, mapping
),
4183 RX_JUMBO_PKT_BUF_SZ
- tp
->rx_offset
,
4184 PCI_DMA_FROMDEVICE
);
4185 dev_kfree_skb_any(rxp
->skb
);
4189 for (i
= 0; i
< TG3_TX_RING_SIZE
; ) {
4190 struct tx_ring_info
*txp
;
4191 struct sk_buff
*skb
;
4194 txp
= &tp
->tx_buffers
[i
];
4202 pci_unmap_single(tp
->pdev
,
4203 pci_unmap_addr(txp
, mapping
),
4210 for (j
= 0; j
< skb_shinfo(skb
)->nr_frags
; j
++) {
4211 txp
= &tp
->tx_buffers
[i
& (TG3_TX_RING_SIZE
- 1)];
4212 pci_unmap_page(tp
->pdev
,
4213 pci_unmap_addr(txp
, mapping
),
4214 skb_shinfo(skb
)->frags
[j
].size
,
4219 dev_kfree_skb_any(skb
);
4223 /* Initialize tx/rx rings for packet processing.
4225 * The chip has been shut down and the driver detached from
4226 * the networking, so no interrupts or new tx packets will
4227 * end up in the driver. tp->{tx,}lock are held and thus
4230 static int tg3_init_rings(struct tg3
*tp
)
4234 /* Free up all the SKBs. */
4237 /* Zero out all descriptors. */
4238 memset(tp
->rx_std
, 0, TG3_RX_RING_BYTES
);
4239 memset(tp
->rx_jumbo
, 0, TG3_RX_JUMBO_RING_BYTES
);
4240 memset(tp
->rx_rcb
, 0, TG3_RX_RCB_RING_BYTES(tp
));
4241 memset(tp
->tx_ring
, 0, TG3_TX_RING_BYTES
);
4243 tp
->rx_pkt_buf_sz
= RX_PKT_BUF_SZ
;
4244 if ((tp
->tg3_flags2
& TG3_FLG2_5780_CLASS
) &&
4245 (tp
->dev
->mtu
> ETH_DATA_LEN
))
4246 tp
->rx_pkt_buf_sz
= RX_JUMBO_PKT_BUF_SZ
;
4248 /* Initialize invariants of the rings, we only set this
4249 * stuff once. This works because the card does not
4250 * write into the rx buffer posting rings.
4252 for (i
= 0; i
< TG3_RX_RING_SIZE
; i
++) {
4253 struct tg3_rx_buffer_desc
*rxd
;
4255 rxd
= &tp
->rx_std
[i
];
4256 rxd
->idx_len
= (tp
->rx_pkt_buf_sz
- tp
->rx_offset
- 64)
4258 rxd
->type_flags
= (RXD_FLAG_END
<< RXD_FLAGS_SHIFT
);
4259 rxd
->opaque
= (RXD_OPAQUE_RING_STD
|
4260 (i
<< RXD_OPAQUE_INDEX_SHIFT
));
4263 if (tp
->tg3_flags
& TG3_FLAG_JUMBO_RING_ENABLE
) {
4264 for (i
= 0; i
< TG3_RX_JUMBO_RING_SIZE
; i
++) {
4265 struct tg3_rx_buffer_desc
*rxd
;
4267 rxd
= &tp
->rx_jumbo
[i
];
4268 rxd
->idx_len
= (RX_JUMBO_PKT_BUF_SZ
- tp
->rx_offset
- 64)
4270 rxd
->type_flags
= (RXD_FLAG_END
<< RXD_FLAGS_SHIFT
) |
4272 rxd
->opaque
= (RXD_OPAQUE_RING_JUMBO
|
4273 (i
<< RXD_OPAQUE_INDEX_SHIFT
));
4277 /* Now allocate fresh SKBs for each rx ring. */
4278 for (i
= 0; i
< tp
->rx_pending
; i
++) {
4279 if (tg3_alloc_rx_skb(tp
, RXD_OPAQUE_RING_STD
, -1, i
) < 0) {
4280 printk(KERN_WARNING PFX
4281 "%s: Using a smaller RX standard ring, "
4282 "only %d out of %d buffers were allocated "
4284 tp
->dev
->name
, i
, tp
->rx_pending
);
4292 if (tp
->tg3_flags
& TG3_FLAG_JUMBO_RING_ENABLE
) {
4293 for (i
= 0; i
< tp
->rx_jumbo_pending
; i
++) {
4294 if (tg3_alloc_rx_skb(tp
, RXD_OPAQUE_RING_JUMBO
,
4296 printk(KERN_WARNING PFX
4297 "%s: Using a smaller RX jumbo ring, "
4298 "only %d out of %d buffers were "
4299 "allocated successfully.\n",
4300 tp
->dev
->name
, i
, tp
->rx_jumbo_pending
);
4305 tp
->rx_jumbo_pending
= i
;
4314 * Must not be invoked with interrupt sources disabled and
4315 * the hardware shutdown down.
4317 static void tg3_free_consistent(struct tg3
*tp
)
4319 kfree(tp
->rx_std_buffers
);
4320 tp
->rx_std_buffers
= NULL
;
4322 pci_free_consistent(tp
->pdev
, TG3_RX_RING_BYTES
,
4323 tp
->rx_std
, tp
->rx_std_mapping
);
4327 pci_free_consistent(tp
->pdev
, TG3_RX_JUMBO_RING_BYTES
,
4328 tp
->rx_jumbo
, tp
->rx_jumbo_mapping
);
4329 tp
->rx_jumbo
= NULL
;
4332 pci_free_consistent(tp
->pdev
, TG3_RX_RCB_RING_BYTES(tp
),
4333 tp
->rx_rcb
, tp
->rx_rcb_mapping
);
4337 pci_free_consistent(tp
->pdev
, TG3_TX_RING_BYTES
,
4338 tp
->tx_ring
, tp
->tx_desc_mapping
);
4341 if (tp
->hw_status
) {
4342 pci_free_consistent(tp
->pdev
, TG3_HW_STATUS_SIZE
,
4343 tp
->hw_status
, tp
->status_mapping
);
4344 tp
->hw_status
= NULL
;
4347 pci_free_consistent(tp
->pdev
, sizeof(struct tg3_hw_stats
),
4348 tp
->hw_stats
, tp
->stats_mapping
);
4349 tp
->hw_stats
= NULL
;
4354 * Must not be invoked with interrupt sources disabled and
4355 * the hardware shutdown down. Can sleep.
4357 static int tg3_alloc_consistent(struct tg3
*tp
)
4359 tp
->rx_std_buffers
= kmalloc((sizeof(struct ring_info
) *
4361 TG3_RX_JUMBO_RING_SIZE
)) +
4362 (sizeof(struct tx_ring_info
) *
4365 if (!tp
->rx_std_buffers
)
4368 memset(tp
->rx_std_buffers
, 0,
4369 (sizeof(struct ring_info
) *
4371 TG3_RX_JUMBO_RING_SIZE
)) +
4372 (sizeof(struct tx_ring_info
) *
4375 tp
->rx_jumbo_buffers
= &tp
->rx_std_buffers
[TG3_RX_RING_SIZE
];
4376 tp
->tx_buffers
= (struct tx_ring_info
*)
4377 &tp
->rx_jumbo_buffers
[TG3_RX_JUMBO_RING_SIZE
];
4379 tp
->rx_std
= pci_alloc_consistent(tp
->pdev
, TG3_RX_RING_BYTES
,
4380 &tp
->rx_std_mapping
);
4384 tp
->rx_jumbo
= pci_alloc_consistent(tp
->pdev
, TG3_RX_JUMBO_RING_BYTES
,
4385 &tp
->rx_jumbo_mapping
);
4390 tp
->rx_rcb
= pci_alloc_consistent(tp
->pdev
, TG3_RX_RCB_RING_BYTES(tp
),
4391 &tp
->rx_rcb_mapping
);
4395 tp
->tx_ring
= pci_alloc_consistent(tp
->pdev
, TG3_TX_RING_BYTES
,
4396 &tp
->tx_desc_mapping
);
4400 tp
->hw_status
= pci_alloc_consistent(tp
->pdev
,
4402 &tp
->status_mapping
);
4406 tp
->hw_stats
= pci_alloc_consistent(tp
->pdev
,
4407 sizeof(struct tg3_hw_stats
),
4408 &tp
->stats_mapping
);
4412 memset(tp
->hw_status
, 0, TG3_HW_STATUS_SIZE
);
4413 memset(tp
->hw_stats
, 0, sizeof(struct tg3_hw_stats
));
4418 tg3_free_consistent(tp
);
4422 #define MAX_WAIT_CNT 1000
4424 /* To stop a block, clear the enable bit and poll till it
4425 * clears. tp->lock is held.
4427 static int tg3_stop_block(struct tg3
*tp
, unsigned long ofs
, u32 enable_bit
, int silent
)
4432 if (tp
->tg3_flags2
& TG3_FLG2_5705_PLUS
) {
4439 /* We can't enable/disable these bits of the
4440 * 5705/5750, just say success.
4453 for (i
= 0; i
< MAX_WAIT_CNT
; i
++) {
4456 if ((val
& enable_bit
) == 0)
4460 if (i
== MAX_WAIT_CNT
&& !silent
) {
4461 printk(KERN_ERR PFX
"tg3_stop_block timed out, "
4462 "ofs=%lx enable_bit=%x\n",
4470 /* tp->lock is held. */
4471 static int tg3_abort_hw(struct tg3
*tp
, int silent
)
4475 tg3_disable_ints(tp
);
4477 tp
->rx_mode
&= ~RX_MODE_ENABLE
;
4478 tw32_f(MAC_RX_MODE
, tp
->rx_mode
);
4481 err
= tg3_stop_block(tp
, RCVBDI_MODE
, RCVBDI_MODE_ENABLE
, silent
);
4482 err
|= tg3_stop_block(tp
, RCVLPC_MODE
, RCVLPC_MODE_ENABLE
, silent
);
4483 err
|= tg3_stop_block(tp
, RCVLSC_MODE
, RCVLSC_MODE_ENABLE
, silent
);
4484 err
|= tg3_stop_block(tp
, RCVDBDI_MODE
, RCVDBDI_MODE_ENABLE
, silent
);
4485 err
|= tg3_stop_block(tp
, RCVDCC_MODE
, RCVDCC_MODE_ENABLE
, silent
);
4486 err
|= tg3_stop_block(tp
, RCVCC_MODE
, RCVCC_MODE_ENABLE
, silent
);
4488 err
|= tg3_stop_block(tp
, SNDBDS_MODE
, SNDBDS_MODE_ENABLE
, silent
);
4489 err
|= tg3_stop_block(tp
, SNDBDI_MODE
, SNDBDI_MODE_ENABLE
, silent
);
4490 err
|= tg3_stop_block(tp
, SNDDATAI_MODE
, SNDDATAI_MODE_ENABLE
, silent
);
4491 err
|= tg3_stop_block(tp
, RDMAC_MODE
, RDMAC_MODE_ENABLE
, silent
);
4492 err
|= tg3_stop_block(tp
, SNDDATAC_MODE
, SNDDATAC_MODE_ENABLE
, silent
);
4493 err
|= tg3_stop_block(tp
, DMAC_MODE
, DMAC_MODE_ENABLE
, silent
);
4494 err
|= tg3_stop_block(tp
, SNDBDC_MODE
, SNDBDC_MODE_ENABLE
, silent
);
4496 tp
->mac_mode
&= ~MAC_MODE_TDE_ENABLE
;
4497 tw32_f(MAC_MODE
, tp
->mac_mode
);
4500 tp
->tx_mode
&= ~TX_MODE_ENABLE
;
4501 tw32_f(MAC_TX_MODE
, tp
->tx_mode
);
4503 for (i
= 0; i
< MAX_WAIT_CNT
; i
++) {
4505 if (!(tr32(MAC_TX_MODE
) & TX_MODE_ENABLE
))
4508 if (i
>= MAX_WAIT_CNT
) {
4509 printk(KERN_ERR PFX
"tg3_abort_hw timed out for %s, "
4510 "TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n",
4511 tp
->dev
->name
, tr32(MAC_TX_MODE
));
4515 err
|= tg3_stop_block(tp
, HOSTCC_MODE
, HOSTCC_MODE_ENABLE
, silent
);
4516 err
|= tg3_stop_block(tp
, WDMAC_MODE
, WDMAC_MODE_ENABLE
, silent
);
4517 err
|= tg3_stop_block(tp
, MBFREE_MODE
, MBFREE_MODE_ENABLE
, silent
);
4519 tw32(FTQ_RESET
, 0xffffffff);
4520 tw32(FTQ_RESET
, 0x00000000);
4522 err
|= tg3_stop_block(tp
, BUFMGR_MODE
, BUFMGR_MODE_ENABLE
, silent
);
4523 err
|= tg3_stop_block(tp
, MEMARB_MODE
, MEMARB_MODE_ENABLE
, silent
);
4526 memset(tp
->hw_status
, 0, TG3_HW_STATUS_SIZE
);
4528 memset(tp
->hw_stats
, 0, sizeof(struct tg3_hw_stats
));
4533 /* tp->lock is held. */
4534 static int tg3_nvram_lock(struct tg3
*tp
)
4536 if (tp
->tg3_flags
& TG3_FLAG_NVRAM
) {
4539 if (tp
->nvram_lock_cnt
== 0) {
4540 tw32(NVRAM_SWARB
, SWARB_REQ_SET1
);
4541 for (i
= 0; i
< 8000; i
++) {
4542 if (tr32(NVRAM_SWARB
) & SWARB_GNT1
)
4547 tw32(NVRAM_SWARB
, SWARB_REQ_CLR1
);
4551 tp
->nvram_lock_cnt
++;
4556 /* tp->lock is held. */
4557 static void tg3_nvram_unlock(struct tg3
*tp
)
4559 if (tp
->tg3_flags
& TG3_FLAG_NVRAM
) {
4560 if (tp
->nvram_lock_cnt
> 0)
4561 tp
->nvram_lock_cnt
--;
4562 if (tp
->nvram_lock_cnt
== 0)
4563 tw32_f(NVRAM_SWARB
, SWARB_REQ_CLR1
);
4567 /* tp->lock is held. */
4568 static void tg3_enable_nvram_access(struct tg3
*tp
)
4570 if ((tp
->tg3_flags2
& TG3_FLG2_5750_PLUS
) &&
4571 !(tp
->tg3_flags2
& TG3_FLG2_PROTECTED_NVRAM
)) {
4572 u32 nvaccess
= tr32(NVRAM_ACCESS
);
4574 tw32(NVRAM_ACCESS
, nvaccess
| ACCESS_ENABLE
);
4578 /* tp->lock is held. */
4579 static void tg3_disable_nvram_access(struct tg3
*tp
)
4581 if ((tp
->tg3_flags2
& TG3_FLG2_5750_PLUS
) &&
4582 !(tp
->tg3_flags2
& TG3_FLG2_PROTECTED_NVRAM
)) {
4583 u32 nvaccess
= tr32(NVRAM_ACCESS
);
4585 tw32(NVRAM_ACCESS
, nvaccess
& ~ACCESS_ENABLE
);
4589 /* tp->lock is held. */
4590 static void tg3_write_sig_pre_reset(struct tg3
*tp
, int kind
)
4592 tg3_write_mem(tp
, NIC_SRAM_FIRMWARE_MBOX
,
4593 NIC_SRAM_FIRMWARE_MBOX_MAGIC1
);
4595 if (tp
->tg3_flags2
& TG3_FLG2_ASF_NEW_HANDSHAKE
) {
4597 case RESET_KIND_INIT
:
4598 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
4602 case RESET_KIND_SHUTDOWN
:
4603 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
4607 case RESET_KIND_SUSPEND
:
4608 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
4618 /* tp->lock is held. */
4619 static void tg3_write_sig_post_reset(struct tg3
*tp
, int kind
)
4621 if (tp
->tg3_flags2
& TG3_FLG2_ASF_NEW_HANDSHAKE
) {
4623 case RESET_KIND_INIT
:
4624 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
4625 DRV_STATE_START_DONE
);
4628 case RESET_KIND_SHUTDOWN
:
4629 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
4630 DRV_STATE_UNLOAD_DONE
);
4639 /* tp->lock is held. */
4640 static void tg3_write_sig_legacy(struct tg3
*tp
, int kind
)
4642 if (tp
->tg3_flags
& TG3_FLAG_ENABLE_ASF
) {
4644 case RESET_KIND_INIT
:
4645 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
4649 case RESET_KIND_SHUTDOWN
:
4650 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
4654 case RESET_KIND_SUSPEND
:
4655 tg3_write_mem(tp
, NIC_SRAM_FW_DRV_STATE_MBOX
,
4665 static int tg3_poll_fw(struct tg3
*tp
)
4670 /* Wait for firmware initialization to complete. */
4671 for (i
= 0; i
< 100000; i
++) {
4672 tg3_read_mem(tp
, NIC_SRAM_FIRMWARE_MBOX
, &val
);
4673 if (val
== ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1
)
4678 /* Chip might not be fitted with firmware. Some Sun onboard
4679 * parts are configured like that. So don't signal the timeout
4680 * of the above loop as an error, but do report the lack of
4681 * running firmware once.
4684 !(tp
->tg3_flags2
& TG3_FLG2_NO_FWARE_REPORTED
)) {
4685 tp
->tg3_flags2
|= TG3_FLG2_NO_FWARE_REPORTED
;
4687 printk(KERN_INFO PFX
"%s: No firmware running.\n",
4694 static void tg3_stop_fw(struct tg3
*);
4696 /* tp->lock is held. */
4697 static int tg3_chip_reset(struct tg3
*tp
)
4700 void (*write_op
)(struct tg3
*, u32
, u32
);
4705 /* No matching tg3_nvram_unlock() after this because
4706 * chip reset below will undo the nvram lock.
4708 tp
->nvram_lock_cnt
= 0;
4710 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5752
||
4711 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5755
||
4712 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5787
)
4713 tw32(GRC_FASTBOOT_PC
, 0);
4716 * We must avoid the readl() that normally takes place.
4717 * It locks machines, causes machine checks, and other
4718 * fun things. So, temporarily disable the 5701
4719 * hardware workaround, while we do the reset.
4721 write_op
= tp
->write32
;
4722 if (write_op
== tg3_write_flush_reg32
)
4723 tp
->write32
= tg3_write32
;
4726 val
= GRC_MISC_CFG_CORECLK_RESET
;
4728 if (tp
->tg3_flags2
& TG3_FLG2_PCI_EXPRESS
) {
4729 if (tr32(0x7e2c) == 0x60) {
4732 if (tp
->pci_chip_rev_id
!= CHIPREV_ID_5750_A0
) {
4733 tw32(GRC_MISC_CFG
, (1 << 29));
4738 if (tp
->tg3_flags2
& TG3_FLG2_5705_PLUS
)
4739 val
|= GRC_MISC_CFG_KEEP_GPHY_POWER
;
4740 tw32(GRC_MISC_CFG
, val
);
4742 /* restore 5701 hardware bug workaround write method */
4743 tp
->write32
= write_op
;
4745 /* Unfortunately, we have to delay before the PCI read back.
4746 * Some 575X chips even will not respond to a PCI cfg access
4747 * when the reset command is given to the chip.
4749 * How do these hardware designers expect things to work
4750 * properly if the PCI write is posted for a long period
4751 * of time? It is always necessary to have some method by
4752 * which a register read back can occur to push the write
4753 * out which does the reset.
4755 * For most tg3 variants the trick below was working.
4760 /* Flush PCI posted writes. The normal MMIO registers
4761 * are inaccessible at this time so this is the only
4762 * way to make this reliably (actually, this is no longer
4763 * the case, see above). I tried to use indirect
4764 * register read/write but this upset some 5701 variants.
4766 pci_read_config_dword(tp
->pdev
, PCI_COMMAND
, &val
);
4770 if (tp
->tg3_flags2
& TG3_FLG2_PCI_EXPRESS
) {
4771 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5750_A0
) {
4775 /* Wait for link training to complete. */
4776 for (i
= 0; i
< 5000; i
++)
4779 pci_read_config_dword(tp
->pdev
, 0xc4, &cfg_val
);
4780 pci_write_config_dword(tp
->pdev
, 0xc4,
4781 cfg_val
| (1 << 15));
4783 /* Set PCIE max payload size and clear error status. */
4784 pci_write_config_dword(tp
->pdev
, 0xd8, 0xf5000);
4787 /* Re-enable indirect register accesses. */
4788 pci_write_config_dword(tp
->pdev
, TG3PCI_MISC_HOST_CTRL
,
4789 tp
->misc_host_ctrl
);
4791 /* Set MAX PCI retry to zero. */
4792 val
= (PCISTATE_ROM_ENABLE
| PCISTATE_ROM_RETRY_ENABLE
);
4793 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5704_A0
&&
4794 (tp
->tg3_flags
& TG3_FLAG_PCIX_MODE
))
4795 val
|= PCISTATE_RETRY_SAME_DMA
;
4796 pci_write_config_dword(tp
->pdev
, TG3PCI_PCISTATE
, val
);
4798 pci_restore_state(tp
->pdev
);
4800 /* Make sure PCI-X relaxed ordering bit is clear. */
4801 pci_read_config_dword(tp
->pdev
, TG3PCI_X_CAPS
, &val
);
4802 val
&= ~PCIX_CAPS_RELAXED_ORDERING
;
4803 pci_write_config_dword(tp
->pdev
, TG3PCI_X_CAPS
, val
);
4805 if (tp
->tg3_flags2
& TG3_FLG2_5780_CLASS
) {
4808 /* Chip reset on 5780 will reset MSI enable bit,
4809 * so need to restore it.
4811 if (tp
->tg3_flags2
& TG3_FLG2_USING_MSI
) {
4814 pci_read_config_word(tp
->pdev
,
4815 tp
->msi_cap
+ PCI_MSI_FLAGS
,
4817 pci_write_config_word(tp
->pdev
,
4818 tp
->msi_cap
+ PCI_MSI_FLAGS
,
4819 ctrl
| PCI_MSI_FLAGS_ENABLE
);
4820 val
= tr32(MSGINT_MODE
);
4821 tw32(MSGINT_MODE
, val
| MSGINT_MODE_ENABLE
);
4824 val
= tr32(MEMARB_MODE
);
4825 tw32(MEMARB_MODE
, val
| MEMARB_MODE_ENABLE
);
4828 tw32(MEMARB_MODE
, MEMARB_MODE_ENABLE
);
4830 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5750_A3
) {
4832 tw32(0x5000, 0x400);
4835 tw32(GRC_MODE
, tp
->grc_mode
);
4837 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5705_A0
) {
4838 u32 val
= tr32(0xc4);
4840 tw32(0xc4, val
| (1 << 15));
4843 if ((tp
->nic_sram_data_cfg
& NIC_SRAM_DATA_CFG_MINI_PCI
) != 0 &&
4844 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
) {
4845 tp
->pci_clock_ctrl
|= CLOCK_CTRL_CLKRUN_OENABLE
;
4846 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5705_A0
)
4847 tp
->pci_clock_ctrl
|= CLOCK_CTRL_FORCE_CLKRUN
;
4848 tw32(TG3PCI_CLOCK_CTRL
, tp
->pci_clock_ctrl
);
4851 if (tp
->tg3_flags2
& TG3_FLG2_PHY_SERDES
) {
4852 tp
->mac_mode
= MAC_MODE_PORT_MODE_TBI
;
4853 tw32_f(MAC_MODE
, tp
->mac_mode
);
4854 } else if (tp
->tg3_flags2
& TG3_FLG2_MII_SERDES
) {
4855 tp
->mac_mode
= MAC_MODE_PORT_MODE_GMII
;
4856 tw32_f(MAC_MODE
, tp
->mac_mode
);
4858 tw32_f(MAC_MODE
, 0);
4861 err
= tg3_poll_fw(tp
);
4865 if ((tp
->tg3_flags2
& TG3_FLG2_PCI_EXPRESS
) &&
4866 tp
->pci_chip_rev_id
!= CHIPREV_ID_5750_A0
) {
4867 u32 val
= tr32(0x7c00);
4869 tw32(0x7c00, val
| (1 << 25));
4872 /* Reprobe ASF enable state. */
4873 tp
->tg3_flags
&= ~TG3_FLAG_ENABLE_ASF
;
4874 tp
->tg3_flags2
&= ~TG3_FLG2_ASF_NEW_HANDSHAKE
;
4875 tg3_read_mem(tp
, NIC_SRAM_DATA_SIG
, &val
);
4876 if (val
== NIC_SRAM_DATA_SIG_MAGIC
) {
4879 tg3_read_mem(tp
, NIC_SRAM_DATA_CFG
, &nic_cfg
);
4880 if (nic_cfg
& NIC_SRAM_DATA_CFG_ASF_ENABLE
) {
4881 tp
->tg3_flags
|= TG3_FLAG_ENABLE_ASF
;
4882 if (tp
->tg3_flags2
& TG3_FLG2_5750_PLUS
)
4883 tp
->tg3_flags2
|= TG3_FLG2_ASF_NEW_HANDSHAKE
;
4890 /* tp->lock is held. */
4891 static void tg3_stop_fw(struct tg3
*tp
)
4893 if (tp
->tg3_flags
& TG3_FLAG_ENABLE_ASF
) {
4897 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_MBOX
, FWCMD_NICDRV_PAUSE_FW
);
4898 val
= tr32(GRC_RX_CPU_EVENT
);
4900 tw32(GRC_RX_CPU_EVENT
, val
);
4902 /* Wait for RX cpu to ACK the event. */
4903 for (i
= 0; i
< 100; i
++) {
4904 if (!(tr32(GRC_RX_CPU_EVENT
) & (1 << 14)))
4911 /* tp->lock is held. */
4912 static int tg3_halt(struct tg3
*tp
, int kind
, int silent
)
4918 tg3_write_sig_pre_reset(tp
, kind
);
4920 tg3_abort_hw(tp
, silent
);
4921 err
= tg3_chip_reset(tp
);
4923 tg3_write_sig_legacy(tp
, kind
);
4924 tg3_write_sig_post_reset(tp
, kind
);
4932 #define TG3_FW_RELEASE_MAJOR 0x0
4933 #define TG3_FW_RELASE_MINOR 0x0
4934 #define TG3_FW_RELEASE_FIX 0x0
4935 #define TG3_FW_START_ADDR 0x08000000
4936 #define TG3_FW_TEXT_ADDR 0x08000000
4937 #define TG3_FW_TEXT_LEN 0x9c0
4938 #define TG3_FW_RODATA_ADDR 0x080009c0
4939 #define TG3_FW_RODATA_LEN 0x60
4940 #define TG3_FW_DATA_ADDR 0x08000a40
4941 #define TG3_FW_DATA_LEN 0x20
4942 #define TG3_FW_SBSS_ADDR 0x08000a60
4943 #define TG3_FW_SBSS_LEN 0xc
4944 #define TG3_FW_BSS_ADDR 0x08000a70
4945 #define TG3_FW_BSS_LEN 0x10
4947 static const u32 tg3FwText
[(TG3_FW_TEXT_LEN
/ sizeof(u32
)) + 1] = {
4948 0x00000000, 0x10000003, 0x00000000, 0x0000000d, 0x0000000d, 0x3c1d0800,
4949 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100000, 0x0e000018, 0x00000000,
4950 0x0000000d, 0x3c1d0800, 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100034,
4951 0x0e00021c, 0x00000000, 0x0000000d, 0x00000000, 0x00000000, 0x00000000,
4952 0x27bdffe0, 0x3c1cc000, 0xafbf0018, 0xaf80680c, 0x0e00004c, 0x241b2105,
4953 0x97850000, 0x97870002, 0x9782002c, 0x9783002e, 0x3c040800, 0x248409c0,
4954 0xafa00014, 0x00021400, 0x00621825, 0x00052c00, 0xafa30010, 0x8f860010,
4955 0x00e52825, 0x0e000060, 0x24070102, 0x3c02ac00, 0x34420100, 0x3c03ac01,
4956 0x34630100, 0xaf820490, 0x3c02ffff, 0xaf820494, 0xaf830498, 0xaf82049c,
4957 0x24020001, 0xaf825ce0, 0x0e00003f, 0xaf825d00, 0x0e000140, 0x00000000,
4958 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x2402ffff, 0xaf825404, 0x8f835400,
4959 0x34630400, 0xaf835400, 0xaf825404, 0x3c020800, 0x24420034, 0xaf82541c,
4960 0x03e00008, 0xaf805400, 0x00000000, 0x00000000, 0x3c020800, 0x34423000,
4961 0x3c030800, 0x34633000, 0x3c040800, 0x348437ff, 0x3c010800, 0xac220a64,
4962 0x24020040, 0x3c010800, 0xac220a68, 0x3c010800, 0xac200a60, 0xac600000,
4963 0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
4964 0x00804821, 0x8faa0010, 0x3c020800, 0x8c420a60, 0x3c040800, 0x8c840a68,
4965 0x8fab0014, 0x24430001, 0x0044102b, 0x3c010800, 0xac230a60, 0x14400003,
4966 0x00004021, 0x3c010800, 0xac200a60, 0x3c020800, 0x8c420a60, 0x3c030800,
4967 0x8c630a64, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
4968 0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020800, 0x8c420a60,
4969 0x3c030800, 0x8c630a64, 0x8f84680c, 0x00021140, 0x00431021, 0xac440008,
4970 0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
4971 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4972 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4974 0x02000008, 0x00000000, 0x0a0001e3, 0x3c0a0001, 0x0a0001e3, 0x3c0a0002,
4975 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4976 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4977 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4978 0x0a0001e3, 0x3c0a0007, 0x0a0001e3, 0x3c0a0008, 0x0a0001e3, 0x3c0a0009,
4979 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000b,
4980 0x0a0001e3, 0x3c0a000c, 0x0a0001e3, 0x3c0a000d, 0x0a0001e3, 0x00000000,
4981 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000e, 0x0a0001e3, 0x00000000,
4982 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4983 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4984 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a0013, 0x0a0001e3, 0x3c0a0014,
4985 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4986 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4987 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4988 0x27bdffe0, 0x00001821, 0x00001021, 0xafbf0018, 0xafb10014, 0xafb00010,
4989 0x3c010800, 0x00220821, 0xac200a70, 0x3c010800, 0x00220821, 0xac200a74,
4990 0x3c010800, 0x00220821, 0xac200a78, 0x24630001, 0x1860fff5, 0x2442000c,
4991 0x24110001, 0x8f906810, 0x32020004, 0x14400005, 0x24040001, 0x3c020800,
4992 0x8c420a78, 0x18400003, 0x00002021, 0x0e000182, 0x00000000, 0x32020001,
4993 0x10400003, 0x00000000, 0x0e000169, 0x00000000, 0x0a000153, 0xaf915028,
4994 0x8fbf0018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020, 0x3c050800,
4995 0x8ca50a70, 0x3c060800, 0x8cc60a80, 0x3c070800, 0x8ce70a78, 0x27bdffe0,
4996 0x3c040800, 0x248409d0, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014,
4997 0x0e00017b, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x24020001,
4998 0x8f836810, 0x00821004, 0x00021027, 0x00621824, 0x03e00008, 0xaf836810,
4999 0x27bdffd8, 0xafbf0024, 0x1080002e, 0xafb00020, 0x8f825cec, 0xafa20018,
5000 0x8f825cec, 0x3c100800, 0x26100a78, 0xafa2001c, 0x34028000, 0xaf825cec,
5001 0x8e020000, 0x18400016, 0x00000000, 0x3c020800, 0x94420a74, 0x8fa3001c,
5002 0x000221c0, 0xac830004, 0x8fa2001c, 0x3c010800, 0x0e000201, 0xac220a74,
5003 0x10400005, 0x00000000, 0x8e020000, 0x24420001, 0x0a0001df, 0xae020000,
5004 0x3c020800, 0x8c420a70, 0x00021c02, 0x000321c0, 0x0a0001c5, 0xafa2001c,
5005 0x0e000201, 0x00000000, 0x1040001f, 0x00000000, 0x8e020000, 0x8fa3001c,
5006 0x24420001, 0x3c010800, 0xac230a70, 0x3c010800, 0xac230a74, 0x0a0001df,
5007 0xae020000, 0x3c100800, 0x26100a78, 0x8e020000, 0x18400028, 0x00000000,
5008 0x0e000201, 0x00000000, 0x14400024, 0x00000000, 0x8e020000, 0x3c030800,
5009 0x8c630a70, 0x2442ffff, 0xafa3001c, 0x18400006, 0xae020000, 0x00031402,
5010 0x000221c0, 0x8c820004, 0x3c010800, 0xac220a70, 0x97a2001e, 0x2442ff00,
5011 0x2c420300, 0x1440000b, 0x24024000, 0x3c040800, 0x248409dc, 0xafa00010,
5012 0xafa00014, 0x8fa6001c, 0x24050008, 0x0e000060, 0x00003821, 0x0a0001df,
5013 0x00000000, 0xaf825cf8, 0x3c020800, 0x8c420a40, 0x8fa3001c, 0x24420001,
5014 0xaf835cf8, 0x3c010800, 0xac220a40, 0x8fbf0024, 0x8fb00020, 0x03e00008,
5015 0x27bd0028, 0x27bdffe0, 0x3c040800, 0x248409e8, 0x00002821, 0x00003021,
5016 0x00003821, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x8fbf0018,
5017 0x03e00008, 0x27bd0020, 0x8f82680c, 0x8f85680c, 0x00021827, 0x0003182b,
5018 0x00031823, 0x00431024, 0x00441021, 0x00a2282b, 0x10a00006, 0x00000000,
5019 0x00401821, 0x8f82680c, 0x0043102b, 0x1440fffd, 0x00000000, 0x03e00008,
5020 0x00000000, 0x3c040800, 0x8c840000, 0x3c030800, 0x8c630a40, 0x0064102b,
5021 0x54400002, 0x00831023, 0x00641023, 0x2c420008, 0x03e00008, 0x38420001,
5022 0x27bdffe0, 0x00802821, 0x3c040800, 0x24840a00, 0x00003021, 0x00003821,
5023 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x0a000216, 0x00000000,
5024 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000, 0x27bdffe0, 0x3c1cc000,
5025 0xafbf0018, 0x0e00004c, 0xaf80680c, 0x3c040800, 0x24840a10, 0x03802821,
5026 0x00003021, 0x00003821, 0xafa00010, 0x0e000060, 0xafa00014, 0x2402ffff,
5027 0xaf825404, 0x3c0200aa, 0x0e000234, 0xaf825434, 0x8fbf0018, 0x03e00008,
5028 0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe8, 0xafb00010,
5029 0x24100001, 0xafbf0014, 0x3c01c003, 0xac200000, 0x8f826810, 0x30422000,
5030 0x10400003, 0x00000000, 0x0e000246, 0x00000000, 0x0a00023a, 0xaf905428,
5031 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x27bdfff8, 0x8f845d0c,
5032 0x3c0200ff, 0x3c030800, 0x8c630a50, 0x3442fff8, 0x00821024, 0x1043001e,
5033 0x3c0500ff, 0x34a5fff8, 0x3c06c003, 0x3c074000, 0x00851824, 0x8c620010,
5034 0x3c010800, 0xac230a50, 0x30420008, 0x10400005, 0x00871025, 0x8cc20000,
5035 0x24420001, 0xacc20000, 0x00871025, 0xaf825d0c, 0x8fa20000, 0x24420001,
5036 0xafa20000, 0x8fa20000, 0x8fa20000, 0x24420001, 0xafa20000, 0x8fa20000,
5037 0x8f845d0c, 0x3c030800, 0x8c630a50, 0x00851024, 0x1443ffe8, 0x00851824,
5038 0x27bd0008, 0x03e00008, 0x00000000, 0x00000000, 0x00000000
5041 static const u32 tg3FwRodata
[(TG3_FW_RODATA_LEN
/ sizeof(u32
)) + 1] = {
5042 0x35373031, 0x726c7341, 0x00000000, 0x00000000, 0x53774576, 0x656e7430,
5043 0x00000000, 0x726c7045, 0x76656e74, 0x31000000, 0x556e6b6e, 0x45766e74,
5044 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
5045 0x00000000, 0x00000000, 0x4d61696e, 0x43707542, 0x00000000, 0x00000000,
5049 #if 0 /* All zeros, don't eat up space with it. */
5050 u32 tg3FwData
[(TG3_FW_DATA_LEN
/ sizeof(u32
)) + 1] = {
5051 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
5052 0x00000000, 0x00000000, 0x00000000, 0x00000000
5056 #define RX_CPU_SCRATCH_BASE 0x30000
5057 #define RX_CPU_SCRATCH_SIZE 0x04000
5058 #define TX_CPU_SCRATCH_BASE 0x34000
5059 #define TX_CPU_SCRATCH_SIZE 0x04000
5061 /* tp->lock is held. */
5062 static int tg3_halt_cpu(struct tg3
*tp
, u32 offset
)
5066 BUG_ON(offset
== TX_CPU_BASE
&&
5067 (tp
->tg3_flags2
& TG3_FLG2_5705_PLUS
));
5069 if (offset
== RX_CPU_BASE
) {
5070 for (i
= 0; i
< 10000; i
++) {
5071 tw32(offset
+ CPU_STATE
, 0xffffffff);
5072 tw32(offset
+ CPU_MODE
, CPU_MODE_HALT
);
5073 if (tr32(offset
+ CPU_MODE
) & CPU_MODE_HALT
)
5077 tw32(offset
+ CPU_STATE
, 0xffffffff);
5078 tw32_f(offset
+ CPU_MODE
, CPU_MODE_HALT
);
5081 for (i
= 0; i
< 10000; i
++) {
5082 tw32(offset
+ CPU_STATE
, 0xffffffff);
5083 tw32(offset
+ CPU_MODE
, CPU_MODE_HALT
);
5084 if (tr32(offset
+ CPU_MODE
) & CPU_MODE_HALT
)
5090 printk(KERN_ERR PFX
"tg3_reset_cpu timed out for %s, "
5093 (offset
== RX_CPU_BASE
? "RX" : "TX"));
5097 /* Clear firmware's nvram arbitration. */
5098 if (tp
->tg3_flags
& TG3_FLAG_NVRAM
)
5099 tw32(NVRAM_SWARB
, SWARB_REQ_CLR0
);
5104 unsigned int text_base
;
5105 unsigned int text_len
;
5106 const u32
*text_data
;
5107 unsigned int rodata_base
;
5108 unsigned int rodata_len
;
5109 const u32
*rodata_data
;
5110 unsigned int data_base
;
5111 unsigned int data_len
;
5112 const u32
*data_data
;
5115 /* tp->lock is held. */
5116 static int tg3_load_firmware_cpu(struct tg3
*tp
, u32 cpu_base
, u32 cpu_scratch_base
,
5117 int cpu_scratch_size
, struct fw_info
*info
)
5119 int err
, lock_err
, i
;
5120 void (*write_op
)(struct tg3
*, u32
, u32
);
5122 if (cpu_base
== TX_CPU_BASE
&&
5123 (tp
->tg3_flags2
& TG3_FLG2_5705_PLUS
)) {
5124 printk(KERN_ERR PFX
"tg3_load_firmware_cpu: Trying to load "
5125 "TX cpu firmware on %s which is 5705.\n",
5130 if (tp
->tg3_flags2
& TG3_FLG2_5705_PLUS
)
5131 write_op
= tg3_write_mem
;
5133 write_op
= tg3_write_indirect_reg32
;
5135 /* It is possible that bootcode is still loading at this point.
5136 * Get the nvram lock first before halting the cpu.
5138 lock_err
= tg3_nvram_lock(tp
);
5139 err
= tg3_halt_cpu(tp
, cpu_base
);
5141 tg3_nvram_unlock(tp
);
5145 for (i
= 0; i
< cpu_scratch_size
; i
+= sizeof(u32
))
5146 write_op(tp
, cpu_scratch_base
+ i
, 0);
5147 tw32(cpu_base
+ CPU_STATE
, 0xffffffff);
5148 tw32(cpu_base
+ CPU_MODE
, tr32(cpu_base
+CPU_MODE
)|CPU_MODE_HALT
);
5149 for (i
= 0; i
< (info
->text_len
/ sizeof(u32
)); i
++)
5150 write_op(tp
, (cpu_scratch_base
+
5151 (info
->text_base
& 0xffff) +
5154 info
->text_data
[i
] : 0));
5155 for (i
= 0; i
< (info
->rodata_len
/ sizeof(u32
)); i
++)
5156 write_op(tp
, (cpu_scratch_base
+
5157 (info
->rodata_base
& 0xffff) +
5159 (info
->rodata_data
?
5160 info
->rodata_data
[i
] : 0));
5161 for (i
= 0; i
< (info
->data_len
/ sizeof(u32
)); i
++)
5162 write_op(tp
, (cpu_scratch_base
+
5163 (info
->data_base
& 0xffff) +
5166 info
->data_data
[i
] : 0));
5174 /* tp->lock is held. */
5175 static int tg3_load_5701_a0_firmware_fix(struct tg3
*tp
)
5177 struct fw_info info
;
5180 info
.text_base
= TG3_FW_TEXT_ADDR
;
5181 info
.text_len
= TG3_FW_TEXT_LEN
;
5182 info
.text_data
= &tg3FwText
[0];
5183 info
.rodata_base
= TG3_FW_RODATA_ADDR
;
5184 info
.rodata_len
= TG3_FW_RODATA_LEN
;
5185 info
.rodata_data
= &tg3FwRodata
[0];
5186 info
.data_base
= TG3_FW_DATA_ADDR
;
5187 info
.data_len
= TG3_FW_DATA_LEN
;
5188 info
.data_data
= NULL
;
5190 err
= tg3_load_firmware_cpu(tp
, RX_CPU_BASE
,
5191 RX_CPU_SCRATCH_BASE
, RX_CPU_SCRATCH_SIZE
,
5196 err
= tg3_load_firmware_cpu(tp
, TX_CPU_BASE
,
5197 TX_CPU_SCRATCH_BASE
, TX_CPU_SCRATCH_SIZE
,
5202 /* Now startup only the RX cpu. */
5203 tw32(RX_CPU_BASE
+ CPU_STATE
, 0xffffffff);
5204 tw32_f(RX_CPU_BASE
+ CPU_PC
, TG3_FW_TEXT_ADDR
);
5206 for (i
= 0; i
< 5; i
++) {
5207 if (tr32(RX_CPU_BASE
+ CPU_PC
) == TG3_FW_TEXT_ADDR
)
5209 tw32(RX_CPU_BASE
+ CPU_STATE
, 0xffffffff);
5210 tw32(RX_CPU_BASE
+ CPU_MODE
, CPU_MODE_HALT
);
5211 tw32_f(RX_CPU_BASE
+ CPU_PC
, TG3_FW_TEXT_ADDR
);
5215 printk(KERN_ERR PFX
"tg3_load_firmware fails for %s "
5216 "to set RX CPU PC, is %08x should be %08x\n",
5217 tp
->dev
->name
, tr32(RX_CPU_BASE
+ CPU_PC
),
5221 tw32(RX_CPU_BASE
+ CPU_STATE
, 0xffffffff);
5222 tw32_f(RX_CPU_BASE
+ CPU_MODE
, 0x00000000);
5227 #if TG3_TSO_SUPPORT != 0
5229 #define TG3_TSO_FW_RELEASE_MAJOR 0x1
5230 #define TG3_TSO_FW_RELASE_MINOR 0x6
5231 #define TG3_TSO_FW_RELEASE_FIX 0x0
5232 #define TG3_TSO_FW_START_ADDR 0x08000000
5233 #define TG3_TSO_FW_TEXT_ADDR 0x08000000
5234 #define TG3_TSO_FW_TEXT_LEN 0x1aa0
5235 #define TG3_TSO_FW_RODATA_ADDR 0x08001aa0
5236 #define TG3_TSO_FW_RODATA_LEN 0x60
5237 #define TG3_TSO_FW_DATA_ADDR 0x08001b20
5238 #define TG3_TSO_FW_DATA_LEN 0x30
5239 #define TG3_TSO_FW_SBSS_ADDR 0x08001b50
5240 #define TG3_TSO_FW_SBSS_LEN 0x2c
5241 #define TG3_TSO_FW_BSS_ADDR 0x08001b80
5242 #define TG3_TSO_FW_BSS_LEN 0x894
5244 static const u32 tg3TsoFwText
[(TG3_TSO_FW_TEXT_LEN
/ 4) + 1] = {
5245 0x0e000003, 0x00000000, 0x08001b24, 0x00000000, 0x10000003, 0x00000000,
5246 0x0000000d, 0x0000000d, 0x3c1d0800, 0x37bd4000, 0x03a0f021, 0x3c100800,
5247 0x26100000, 0x0e000010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
5248 0xafbf0018, 0x0e0005d8, 0x34840002, 0x0e000668, 0x00000000, 0x3c030800,
5249 0x90631b68, 0x24020002, 0x3c040800, 0x24841aac, 0x14620003, 0x24050001,
5250 0x3c040800, 0x24841aa0, 0x24060006, 0x00003821, 0xafa00010, 0x0e00067c,
5251 0xafa00014, 0x8f625c50, 0x34420001, 0xaf625c50, 0x8f625c90, 0x34420001,
5252 0xaf625c90, 0x2402ffff, 0x0e000034, 0xaf625404, 0x8fbf0018, 0x03e00008,
5253 0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c,
5254 0xafb20018, 0xafb10014, 0x0e00005b, 0xafb00010, 0x24120002, 0x24110001,
5255 0x8f706820, 0x32020100, 0x10400003, 0x00000000, 0x0e0000bb, 0x00000000,
5256 0x8f706820, 0x32022000, 0x10400004, 0x32020001, 0x0e0001f0, 0x24040001,
5257 0x32020001, 0x10400003, 0x00000000, 0x0e0000a3, 0x00000000, 0x3c020800,
5258 0x90421b98, 0x14520003, 0x00000000, 0x0e0004c0, 0x00000000, 0x0a00003c,
5259 0xaf715028, 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008,
5260 0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ac0, 0x00002821, 0x00003021,
5261 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x3c040800,
5262 0x248423d8, 0xa4800000, 0x3c010800, 0xa0201b98, 0x3c010800, 0xac201b9c,
5263 0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
5264 0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bbc, 0x8f624434, 0x3c010800,
5265 0xac221b88, 0x8f624438, 0x3c010800, 0xac221b8c, 0x8f624410, 0xac80f7a8,
5266 0x3c010800, 0xac201b84, 0x3c010800, 0xac2023e0, 0x3c010800, 0xac2023c8,
5267 0x3c010800, 0xac2023cc, 0x3c010800, 0xac202400, 0x3c010800, 0xac221b90,
5268 0x8f620068, 0x24030007, 0x00021702, 0x10430005, 0x00000000, 0x8f620068,
5269 0x00021702, 0x14400004, 0x24020001, 0x3c010800, 0x0a000097, 0xac20240c,
5270 0xac820034, 0x3c040800, 0x24841acc, 0x3c050800, 0x8ca5240c, 0x00003021,
5271 0x00003821, 0xafa00010, 0x0e00067c, 0xafa00014, 0x8fbf0018, 0x03e00008,
5272 0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ad8, 0x00002821, 0x00003021,
5273 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x0e00005b,
5274 0x00000000, 0x0e0000b4, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5275 0x24020001, 0x8f636820, 0x00821004, 0x00021027, 0x00621824, 0x03e00008,
5276 0xaf636820, 0x27bdffd0, 0xafbf002c, 0xafb60028, 0xafb50024, 0xafb40020,
5277 0xafb3001c, 0xafb20018, 0xafb10014, 0xafb00010, 0x8f675c5c, 0x3c030800,
5278 0x24631bbc, 0x8c620000, 0x14470005, 0x3c0200ff, 0x3c020800, 0x90421b98,
5279 0x14400119, 0x3c0200ff, 0x3442fff8, 0x00e28824, 0xac670000, 0x00111902,
5280 0x306300ff, 0x30e20003, 0x000211c0, 0x00622825, 0x00a04021, 0x00071602,
5281 0x3c030800, 0x90631b98, 0x3044000f, 0x14600036, 0x00804821, 0x24020001,
5282 0x3c010800, 0xa0221b98, 0x00051100, 0x00821025, 0x3c010800, 0xac201b9c,
5283 0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
5284 0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bb0, 0x3c010800, 0xac201bb4,
5285 0x3c010800, 0xa42223d8, 0x9622000c, 0x30437fff, 0x3c010800, 0xa4222410,
5286 0x30428000, 0x3c010800, 0xa4231bc6, 0x10400005, 0x24020001, 0x3c010800,
5287 0xac2223f4, 0x0a000102, 0x2406003e, 0x24060036, 0x3c010800, 0xac2023f4,
5288 0x9622000a, 0x3c030800, 0x94631bc6, 0x3c010800, 0xac2023f0, 0x3c010800,
5289 0xac2023f8, 0x00021302, 0x00021080, 0x00c21021, 0x00621821, 0x3c010800,
5290 0xa42223d0, 0x3c010800, 0x0a000115, 0xa4231b96, 0x9622000c, 0x3c010800,
5291 0xa42223ec, 0x3c040800, 0x24841b9c, 0x8c820000, 0x00021100, 0x3c010800,
5292 0x00220821, 0xac311bc8, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
5293 0xac271bcc, 0x8c820000, 0x25030001, 0x306601ff, 0x00021100, 0x3c010800,
5294 0x00220821, 0xac261bd0, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
5295 0xac291bd4, 0x96230008, 0x3c020800, 0x8c421bac, 0x00432821, 0x3c010800,
5296 0xac251bac, 0x9622000a, 0x30420004, 0x14400018, 0x00061100, 0x8f630c14,
5297 0x3063000f, 0x2c620002, 0x1440000b, 0x3c02c000, 0x8f630c14, 0x3c020800,
5298 0x8c421b40, 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002,
5299 0x1040fff7, 0x3c02c000, 0x00e21825, 0xaf635c5c, 0x8f625c50, 0x30420002,
5300 0x10400014, 0x00000000, 0x0a000147, 0x00000000, 0x3c030800, 0x8c631b80,
5301 0x3c040800, 0x94841b94, 0x01221025, 0x3c010800, 0xa42223da, 0x24020001,
5302 0x3c010800, 0xac221bb8, 0x24630001, 0x0085202a, 0x3c010800, 0x10800003,
5303 0xac231b80, 0x3c010800, 0xa4251b94, 0x3c060800, 0x24c61b9c, 0x8cc20000,
5304 0x24420001, 0xacc20000, 0x28420080, 0x14400005, 0x00000000, 0x0e000656,
5305 0x24040002, 0x0a0001e6, 0x00000000, 0x3c020800, 0x8c421bb8, 0x10400078,
5306 0x24020001, 0x3c050800, 0x90a51b98, 0x14a20072, 0x00000000, 0x3c150800,
5307 0x96b51b96, 0x3c040800, 0x8c841bac, 0x32a3ffff, 0x0083102a, 0x1440006c,
5308 0x00000000, 0x14830003, 0x00000000, 0x3c010800, 0xac2523f0, 0x1060005c,
5309 0x00009021, 0x24d60004, 0x0060a021, 0x24d30014, 0x8ec20000, 0x00028100,
5310 0x3c110800, 0x02308821, 0x0e000625, 0x8e311bc8, 0x00402821, 0x10a00054,
5311 0x00000000, 0x9628000a, 0x31020040, 0x10400005, 0x2407180c, 0x8e22000c,
5312 0x2407188c, 0x00021400, 0xaca20018, 0x3c030800, 0x00701821, 0x8c631bd0,
5313 0x3c020800, 0x00501021, 0x8c421bd4, 0x00031d00, 0x00021400, 0x00621825,
5314 0xaca30014, 0x8ec30004, 0x96220008, 0x00432023, 0x3242ffff, 0x3083ffff,
5315 0x00431021, 0x0282102a, 0x14400002, 0x02b23023, 0x00803021, 0x8e620000,
5316 0x30c4ffff, 0x00441021, 0xae620000, 0x8e220000, 0xaca20000, 0x8e220004,
5317 0x8e63fff4, 0x00431021, 0xaca20004, 0xa4a6000e, 0x8e62fff4, 0x00441021,
5318 0xae62fff4, 0x96230008, 0x0043102a, 0x14400005, 0x02469021, 0x8e62fff0,
5319 0xae60fff4, 0x24420001, 0xae62fff0, 0xaca00008, 0x3242ffff, 0x14540008,
5320 0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x24020905, 0xa4a2000c,
5321 0x0a0001cb, 0x34e70020, 0xa4a2000c, 0x3c020800, 0x8c4223f0, 0x10400003,
5322 0x3c024b65, 0x0a0001d3, 0x34427654, 0x3c02b49a, 0x344289ab, 0xaca2001c,
5323 0x30e2ffff, 0xaca20010, 0x0e0005a2, 0x00a02021, 0x3242ffff, 0x0054102b,
5324 0x1440ffa9, 0x00000000, 0x24020002, 0x3c010800, 0x0a0001e6, 0xa0221b98,
5325 0x8ec2083c, 0x24420001, 0x0a0001e6, 0xaec2083c, 0x0e0004c0, 0x00000000,
5326 0x8fbf002c, 0x8fb60028, 0x8fb50024, 0x8fb40020, 0x8fb3001c, 0x8fb20018,
5327 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0030, 0x27bdffd0, 0xafbf0028,
5328 0xafb30024, 0xafb20020, 0xafb1001c, 0xafb00018, 0x8f725c9c, 0x3c0200ff,
5329 0x3442fff8, 0x3c070800, 0x24e71bb4, 0x02428824, 0x9623000e, 0x8ce20000,
5330 0x00431021, 0xace20000, 0x8e220010, 0x30420020, 0x14400011, 0x00809821,
5331 0x0e00063b, 0x02202021, 0x3c02c000, 0x02421825, 0xaf635c9c, 0x8f625c90,
5332 0x30420002, 0x1040011e, 0x00000000, 0xaf635c9c, 0x8f625c90, 0x30420002,
5333 0x10400119, 0x00000000, 0x0a00020d, 0x00000000, 0x8e240008, 0x8e230014,
5334 0x00041402, 0x000231c0, 0x00031502, 0x304201ff, 0x2442ffff, 0x3042007f,
5335 0x00031942, 0x30637800, 0x00021100, 0x24424000, 0x00624821, 0x9522000a,
5336 0x3084ffff, 0x30420008, 0x104000b0, 0x000429c0, 0x3c020800, 0x8c422400,
5337 0x14400024, 0x24c50008, 0x94c20014, 0x3c010800, 0xa42223d0, 0x8cc40010,
5338 0x00041402, 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42423d4, 0x94c2000e,
5339 0x3083ffff, 0x00431023, 0x3c010800, 0xac222408, 0x94c2001a, 0x3c010800,
5340 0xac262400, 0x3c010800, 0xac322404, 0x3c010800, 0xac2223fc, 0x3c02c000,
5341 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e5, 0x00000000,
5342 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e0, 0x00000000, 0x0a000246,
5343 0x00000000, 0x94c2000e, 0x3c030800, 0x946323d4, 0x00434023, 0x3103ffff,
5344 0x2c620008, 0x1040001c, 0x00000000, 0x94c20014, 0x24420028, 0x00a22821,
5345 0x00031042, 0x1840000b, 0x00002021, 0x24e60848, 0x00403821, 0x94a30000,
5346 0x8cc20000, 0x24840001, 0x00431021, 0xacc20000, 0x0087102a, 0x1440fff9,
5347 0x24a50002, 0x31020001, 0x1040001f, 0x3c024000, 0x3c040800, 0x248423fc,
5348 0xa0a00001, 0x94a30000, 0x8c820000, 0x00431021, 0x0a000285, 0xac820000,
5349 0x8f626800, 0x3c030010, 0x00431024, 0x10400009, 0x00000000, 0x94c2001a,
5350 0x3c030800, 0x8c6323fc, 0x00431021, 0x3c010800, 0xac2223fc, 0x0a000286,
5351 0x3c024000, 0x94c2001a, 0x94c4001c, 0x3c030800, 0x8c6323fc, 0x00441023,
5352 0x00621821, 0x3c010800, 0xac2323fc, 0x3c024000, 0x02421825, 0xaf635c9c,
5353 0x8f625c90, 0x30420002, 0x1440fffc, 0x00000000, 0x9522000a, 0x30420010,
5354 0x1040009b, 0x00000000, 0x3c030800, 0x946323d4, 0x3c070800, 0x24e72400,
5355 0x8ce40000, 0x8f626800, 0x24630030, 0x00832821, 0x3c030010, 0x00431024,
5356 0x1440000a, 0x00000000, 0x94a20004, 0x3c040800, 0x8c842408, 0x3c030800,
5357 0x8c6323fc, 0x00441023, 0x00621821, 0x3c010800, 0xac2323fc, 0x3c040800,
5358 0x8c8423fc, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402, 0x00822021,
5359 0x00041027, 0xa4a20006, 0x3c030800, 0x8c632404, 0x3c0200ff, 0x3442fff8,
5360 0x00628824, 0x96220008, 0x24050001, 0x24034000, 0x000231c0, 0x00801021,
5361 0xa4c2001a, 0xa4c0001c, 0xace00000, 0x3c010800, 0xac251b60, 0xaf635cb8,
5362 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000, 0x3c010800, 0xac201b60,
5363 0x8e220008, 0xaf625cb8, 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000,
5364 0x3c010800, 0xac201b60, 0x3c020800, 0x8c421b60, 0x1040ffec, 0x00000000,
5365 0x3c040800, 0x0e00063b, 0x8c842404, 0x0a00032a, 0x00000000, 0x3c030800,
5366 0x90631b98, 0x24020002, 0x14620003, 0x3c034b65, 0x0a0002e1, 0x00008021,
5367 0x8e22001c, 0x34637654, 0x10430002, 0x24100002, 0x24100001, 0x00c02021,
5368 0x0e000350, 0x02003021, 0x24020003, 0x3c010800, 0xa0221b98, 0x24020002,
5369 0x1202000a, 0x24020001, 0x3c030800, 0x8c6323f0, 0x10620006, 0x00000000,
5370 0x3c020800, 0x944223d8, 0x00021400, 0x0a00031f, 0xae220014, 0x3c040800,
5371 0x248423da, 0x94820000, 0x00021400, 0xae220014, 0x3c020800, 0x8c421bbc,
5372 0x3c03c000, 0x3c010800, 0xa0201b98, 0x00431025, 0xaf625c5c, 0x8f625c50,
5373 0x30420002, 0x10400009, 0x00000000, 0x2484f7e2, 0x8c820000, 0x00431025,
5374 0xaf625c5c, 0x8f625c50, 0x30420002, 0x1440fffa, 0x00000000, 0x3c020800,
5375 0x24421b84, 0x8c430000, 0x24630001, 0xac430000, 0x8f630c14, 0x3063000f,
5376 0x2c620002, 0x1440000c, 0x3c024000, 0x8f630c14, 0x3c020800, 0x8c421b40,
5377 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7,
5378 0x00000000, 0x3c024000, 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002,
5379 0x1440fffc, 0x00000000, 0x12600003, 0x00000000, 0x0e0004c0, 0x00000000,
5380 0x8fbf0028, 0x8fb30024, 0x8fb20020, 0x8fb1001c, 0x8fb00018, 0x03e00008,
5381 0x27bd0030, 0x8f634450, 0x3c040800, 0x24841b88, 0x8c820000, 0x00031c02,
5382 0x0043102b, 0x14400007, 0x3c038000, 0x8c840004, 0x8f624450, 0x00021c02,
5383 0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
5384 0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3c024000,
5385 0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00000000,
5386 0x03e00008, 0x00000000, 0x27bdffe0, 0x00805821, 0x14c00011, 0x256e0008,
5387 0x3c020800, 0x8c4223f4, 0x10400007, 0x24020016, 0x3c010800, 0xa42223d2,
5388 0x2402002a, 0x3c010800, 0x0a000364, 0xa42223d4, 0x8d670010, 0x00071402,
5389 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42723d4, 0x3c040800, 0x948423d4,
5390 0x3c030800, 0x946323d2, 0x95cf0006, 0x3c020800, 0x944223d0, 0x00832023,
5391 0x01e2c023, 0x3065ffff, 0x24a20028, 0x01c24821, 0x3082ffff, 0x14c0001a,
5392 0x01226021, 0x9582000c, 0x3042003f, 0x3c010800, 0xa42223d6, 0x95820004,
5393 0x95830006, 0x3c010800, 0xac2023e4, 0x3c010800, 0xac2023e8, 0x00021400,
5394 0x00431025, 0x3c010800, 0xac221bc0, 0x95220004, 0x3c010800, 0xa4221bc4,
5395 0x95230002, 0x01e51023, 0x0043102a, 0x10400010, 0x24020001, 0x3c010800,
5396 0x0a000398, 0xac2223f8, 0x3c030800, 0x8c6323e8, 0x3c020800, 0x94421bc4,
5397 0x00431021, 0xa5220004, 0x3c020800, 0x94421bc0, 0xa5820004, 0x3c020800,
5398 0x8c421bc0, 0xa5820006, 0x3c020800, 0x8c4223f0, 0x3c0d0800, 0x8dad23e4,
5399 0x3c0a0800, 0x144000e5, 0x8d4a23e8, 0x3c020800, 0x94421bc4, 0x004a1821,
5400 0x3063ffff, 0x0062182b, 0x24020002, 0x10c2000d, 0x01435023, 0x3c020800,
5401 0x944223d6, 0x30420009, 0x10400008, 0x00000000, 0x9582000c, 0x3042fff6,
5402 0xa582000c, 0x3c020800, 0x944223d6, 0x30420009, 0x01a26823, 0x3c020800,
5403 0x8c4223f8, 0x1040004a, 0x01203821, 0x3c020800, 0x944223d2, 0x00004021,
5404 0xa520000a, 0x01e21023, 0xa5220002, 0x3082ffff, 0x00021042, 0x18400008,
5405 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021, 0x0103102a,
5406 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061402,
5407 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021, 0x2527000c,
5408 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004, 0x1440fffb,
5409 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023, 0x01803821,
5410 0x3082ffff, 0xa4e00010, 0x00621821, 0x00021042, 0x18400010, 0x00c33021,
5411 0x00404821, 0x94e20000, 0x24e70002, 0x00c23021, 0x30e2007f, 0x14400006,
5412 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80, 0x00625824, 0x25670008,
5413 0x0109102a, 0x1440fff3, 0x00000000, 0x30820001, 0x10400005, 0x00061c02,
5414 0xa0e00001, 0x94e20000, 0x00c23021, 0x00061c02, 0x30c2ffff, 0x00623021,
5415 0x00061402, 0x00c23021, 0x0a00047d, 0x30c6ffff, 0x24020002, 0x14c20081,
5416 0x00000000, 0x3c020800, 0x8c42240c, 0x14400007, 0x00000000, 0x3c020800,
5417 0x944223d2, 0x95230002, 0x01e21023, 0x10620077, 0x00000000, 0x3c020800,
5418 0x944223d2, 0x01e21023, 0xa5220002, 0x3c020800, 0x8c42240c, 0x1040001a,
5419 0x31e3ffff, 0x8dc70010, 0x3c020800, 0x94421b96, 0x00e04021, 0x00072c02,
5420 0x00aa2021, 0x00431023, 0x00823823, 0x00072402, 0x30e2ffff, 0x00823821,
5421 0x00071027, 0xa522000a, 0x3102ffff, 0x3c040800, 0x948423d4, 0x00453023,
5422 0x00e02821, 0x00641823, 0x006d1821, 0x00c33021, 0x00061c02, 0x30c2ffff,
5423 0x0a00047d, 0x00623021, 0x01203821, 0x00004021, 0x3082ffff, 0x00021042,
5424 0x18400008, 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021,
5425 0x0103102a, 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021,
5426 0x00061402, 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021,
5427 0x2527000c, 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004,
5428 0x1440fffb, 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023,
5429 0x01803821, 0x3082ffff, 0xa4e00010, 0x3c040800, 0x948423d4, 0x00621821,
5430 0x00c33021, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061c02, 0x3c020800,
5431 0x944223d0, 0x00c34821, 0x00441023, 0x00021fc2, 0x00431021, 0x00021043,
5432 0x18400010, 0x00003021, 0x00402021, 0x94e20000, 0x24e70002, 0x00c23021,
5433 0x30e2007f, 0x14400006, 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80,
5434 0x00625824, 0x25670008, 0x0104102a, 0x1440fff3, 0x00000000, 0x3c020800,
5435 0x944223ec, 0x00c23021, 0x3122ffff, 0x00c23021, 0x00061c02, 0x30c2ffff,
5436 0x00623021, 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010,
5437 0xadc00014, 0x0a00049d, 0xadc00000, 0x8dc70010, 0x00e04021, 0x11400007,
5438 0x00072c02, 0x00aa3021, 0x00061402, 0x30c3ffff, 0x00433021, 0x00061402,
5439 0x00c22821, 0x00051027, 0xa522000a, 0x3c030800, 0x946323d4, 0x3102ffff,
5440 0x01e21021, 0x00433023, 0x00cd3021, 0x00061c02, 0x30c2ffff, 0x00623021,
5441 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010, 0x3102ffff,
5442 0x00051c00, 0x00431025, 0xadc20010, 0x3c020800, 0x8c4223f4, 0x10400005,
5443 0x2de205eb, 0x14400002, 0x25e2fff2, 0x34028870, 0xa5c20034, 0x3c030800,
5444 0x246323e8, 0x8c620000, 0x24420001, 0xac620000, 0x3c040800, 0x8c8423e4,
5445 0x3c020800, 0x8c421bc0, 0x3303ffff, 0x00832021, 0x00431821, 0x0062102b,
5446 0x3c010800, 0xac2423e4, 0x10400003, 0x2482ffff, 0x3c010800, 0xac2223e4,
5447 0x3c010800, 0xac231bc0, 0x03e00008, 0x27bd0020, 0x27bdffb8, 0x3c050800,
5448 0x24a51b96, 0xafbf0044, 0xafbe0040, 0xafb7003c, 0xafb60038, 0xafb50034,
5449 0xafb40030, 0xafb3002c, 0xafb20028, 0xafb10024, 0xafb00020, 0x94a90000,
5450 0x3c020800, 0x944223d0, 0x3c030800, 0x8c631bb0, 0x3c040800, 0x8c841bac,
5451 0x01221023, 0x0064182a, 0xa7a9001e, 0x106000be, 0xa7a20016, 0x24be0022,
5452 0x97b6001e, 0x24b3001a, 0x24b70016, 0x8fc20000, 0x14400008, 0x00000000,
5453 0x8fc2fff8, 0x97a30016, 0x8fc4fff4, 0x00431021, 0x0082202a, 0x148000b0,
5454 0x00000000, 0x97d50818, 0x32a2ffff, 0x104000a3, 0x00009021, 0x0040a021,
5455 0x00008821, 0x0e000625, 0x00000000, 0x00403021, 0x14c00007, 0x00000000,
5456 0x3c020800, 0x8c4223dc, 0x24420001, 0x3c010800, 0x0a000596, 0xac2223dc,
5457 0x3c100800, 0x02118021, 0x8e101bc8, 0x9608000a, 0x31020040, 0x10400005,
5458 0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x31020080,
5459 0x54400001, 0x34e70010, 0x3c020800, 0x00511021, 0x8c421bd0, 0x3c030800,
5460 0x00711821, 0x8c631bd4, 0x00021500, 0x00031c00, 0x00431025, 0xacc20014,
5461 0x96040008, 0x3242ffff, 0x00821021, 0x0282102a, 0x14400002, 0x02b22823,
5462 0x00802821, 0x8e020000, 0x02459021, 0xacc20000, 0x8e020004, 0x00c02021,
5463 0x26310010, 0xac820004, 0x30e2ffff, 0xac800008, 0xa485000e, 0xac820010,
5464 0x24020305, 0x0e0005a2, 0xa482000c, 0x3242ffff, 0x0054102b, 0x1440ffc5,
5465 0x3242ffff, 0x0a00058e, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
5466 0x10400067, 0x00000000, 0x8e62fff0, 0x00028900, 0x3c100800, 0x02118021,
5467 0x0e000625, 0x8e101bc8, 0x00403021, 0x14c00005, 0x00000000, 0x8e62082c,
5468 0x24420001, 0x0a000596, 0xae62082c, 0x9608000a, 0x31020040, 0x10400005,
5469 0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x3c020800,
5470 0x00511021, 0x8c421bd0, 0x3c030800, 0x00711821, 0x8c631bd4, 0x00021500,
5471 0x00031c00, 0x00431025, 0xacc20014, 0x8e63fff4, 0x96020008, 0x00432023,
5472 0x3242ffff, 0x3083ffff, 0x00431021, 0x02c2102a, 0x10400003, 0x00802821,
5473 0x97a9001e, 0x01322823, 0x8e620000, 0x30a4ffff, 0x00441021, 0xae620000,
5474 0xa4c5000e, 0x8e020000, 0xacc20000, 0x8e020004, 0x8e63fff4, 0x00431021,
5475 0xacc20004, 0x8e63fff4, 0x96020008, 0x00641821, 0x0062102a, 0x14400006,
5476 0x02459021, 0x8e62fff0, 0xae60fff4, 0x24420001, 0x0a000571, 0xae62fff0,
5477 0xae63fff4, 0xacc00008, 0x3242ffff, 0x10560003, 0x31020004, 0x10400006,
5478 0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x34e70020, 0x24020905,
5479 0xa4c2000c, 0x8ee30000, 0x8ee20004, 0x14620007, 0x3c02b49a, 0x8ee20860,
5480 0x54400001, 0x34e70400, 0x3c024b65, 0x0a000588, 0x34427654, 0x344289ab,
5481 0xacc2001c, 0x30e2ffff, 0xacc20010, 0x0e0005a2, 0x00c02021, 0x3242ffff,
5482 0x0056102b, 0x1440ff9b, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
5483 0x1440ff48, 0x00000000, 0x8fbf0044, 0x8fbe0040, 0x8fb7003c, 0x8fb60038,
5484 0x8fb50034, 0x8fb40030, 0x8fb3002c, 0x8fb20028, 0x8fb10024, 0x8fb00020,
5485 0x03e00008, 0x27bd0048, 0x27bdffe8, 0xafbf0014, 0xafb00010, 0x8f624450,
5486 0x8f634410, 0x0a0005b1, 0x00808021, 0x8f626820, 0x30422000, 0x10400003,
5487 0x00000000, 0x0e0001f0, 0x00002021, 0x8f624450, 0x8f634410, 0x3042ffff,
5488 0x0043102b, 0x1440fff5, 0x00000000, 0x8f630c14, 0x3063000f, 0x2c620002,
5489 0x1440000b, 0x00000000, 0x8f630c14, 0x3c020800, 0x8c421b40, 0x3063000f,
5490 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7, 0x00000000,
5491 0xaf705c18, 0x8f625c10, 0x30420002, 0x10400009, 0x00000000, 0x8f626820,
5492 0x30422000, 0x1040fff8, 0x00000000, 0x0e0001f0, 0x00002021, 0x0a0005c4,
5493 0x00000000, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000,
5494 0x00000000, 0x00000000, 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010,
5495 0xaf60680c, 0x8f626804, 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50,
5496 0x3c010800, 0xac221b54, 0x24020b78, 0x3c010800, 0xac221b64, 0x34630002,
5497 0xaf634000, 0x0e000605, 0x00808021, 0x3c010800, 0xa0221b68, 0x304200ff,
5498 0x24030002, 0x14430005, 0x00000000, 0x3c020800, 0x8c421b54, 0x0a0005f8,
5499 0xac5000c0, 0x3c020800, 0x8c421b54, 0xac5000bc, 0x8f624434, 0x8f634438,
5500 0x8f644410, 0x3c010800, 0xac221b5c, 0x3c010800, 0xac231b6c, 0x3c010800,
5501 0xac241b58, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x3c040800,
5502 0x8c870000, 0x3c03aa55, 0x3463aa55, 0x3c06c003, 0xac830000, 0x8cc20000,
5503 0x14430007, 0x24050002, 0x3c0355aa, 0x346355aa, 0xac830000, 0x8cc20000,
5504 0x50430001, 0x24050001, 0x3c020800, 0xac470000, 0x03e00008, 0x00a01021,
5505 0x27bdfff8, 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe,
5506 0x00000000, 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008,
5507 0x27bd0008, 0x8f634450, 0x3c020800, 0x8c421b5c, 0x00031c02, 0x0043102b,
5508 0x14400008, 0x3c038000, 0x3c040800, 0x8c841b6c, 0x8f624450, 0x00021c02,
5509 0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
5510 0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff,
5511 0x2442e000, 0x2c422001, 0x14400003, 0x3c024000, 0x0a000648, 0x2402ffff,
5512 0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021,
5513 0x03e00008, 0x00000000, 0x8f624450, 0x3c030800, 0x8c631b58, 0x0a000651,
5514 0x3042ffff, 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000,
5515 0x03e00008, 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040800, 0x24841af0,
5516 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014,
5517 0x0a000660, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000,
5518 0x00000000, 0x00000000, 0x3c020800, 0x34423000, 0x3c030800, 0x34633000,
5519 0x3c040800, 0x348437ff, 0x3c010800, 0xac221b74, 0x24020040, 0x3c010800,
5520 0xac221b78, 0x3c010800, 0xac201b70, 0xac600000, 0x24630004, 0x0083102b,
5521 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000, 0x00804821, 0x8faa0010,
5522 0x3c020800, 0x8c421b70, 0x3c040800, 0x8c841b78, 0x8fab0014, 0x24430001,
5523 0x0044102b, 0x3c010800, 0xac231b70, 0x14400003, 0x00004021, 0x3c010800,
5524 0xac201b70, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74, 0x91240000,
5525 0x00021140, 0x00431021, 0x00481021, 0x25080001, 0xa0440000, 0x29020008,
5526 0x1440fff4, 0x25290001, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74,
5527 0x8f64680c, 0x00021140, 0x00431021, 0xac440008, 0xac45000c, 0xac460010,
5528 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c, 0x00000000, 0x00000000,
5531 static const u32 tg3TsoFwRodata
[] = {
5532 0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
5533 0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x496e0000, 0x73746b6f,
5534 0x66662a2a, 0x00000000, 0x53774576, 0x656e7430, 0x00000000, 0x00000000,
5535 0x00000000, 0x00000000, 0x66617461, 0x6c457272, 0x00000000, 0x00000000,
5539 static const u32 tg3TsoFwData
[] = {
5540 0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x362e3000, 0x00000000,
5541 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
5545 /* 5705 needs a special version of the TSO firmware. */
5546 #define TG3_TSO5_FW_RELEASE_MAJOR 0x1
5547 #define TG3_TSO5_FW_RELASE_MINOR 0x2
5548 #define TG3_TSO5_FW_RELEASE_FIX 0x0
5549 #define TG3_TSO5_FW_START_ADDR 0x00010000
5550 #define TG3_TSO5_FW_TEXT_ADDR 0x00010000
5551 #define TG3_TSO5_FW_TEXT_LEN 0xe90
5552 #define TG3_TSO5_FW_RODATA_ADDR 0x00010e90
5553 #define TG3_TSO5_FW_RODATA_LEN 0x50
5554 #define TG3_TSO5_FW_DATA_ADDR 0x00010f00
5555 #define TG3_TSO5_FW_DATA_LEN 0x20
5556 #define TG3_TSO5_FW_SBSS_ADDR 0x00010f20
5557 #define TG3_TSO5_FW_SBSS_LEN 0x28
5558 #define TG3_TSO5_FW_BSS_ADDR 0x00010f50
5559 #define TG3_TSO5_FW_BSS_LEN 0x88
5561 static const u32 tg3Tso5FwText
[(TG3_TSO5_FW_TEXT_LEN
/ 4) + 1] = {
5562 0x0c004003, 0x00000000, 0x00010f04, 0x00000000, 0x10000003, 0x00000000,
5563 0x0000000d, 0x0000000d, 0x3c1d0001, 0x37bde000, 0x03a0f021, 0x3c100001,
5564 0x26100000, 0x0c004010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
5565 0xafbf0018, 0x0c0042e8, 0x34840002, 0x0c004364, 0x00000000, 0x3c030001,
5566 0x90630f34, 0x24020002, 0x3c040001, 0x24840e9c, 0x14620003, 0x24050001,
5567 0x3c040001, 0x24840e90, 0x24060002, 0x00003821, 0xafa00010, 0x0c004378,
5568 0xafa00014, 0x0c00402c, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5569 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c, 0xafb20018, 0xafb10014,
5570 0x0c0042d4, 0xafb00010, 0x3c128000, 0x24110001, 0x8f706810, 0x32020400,
5571 0x10400007, 0x00000000, 0x8f641008, 0x00921024, 0x14400003, 0x00000000,
5572 0x0c004064, 0x00000000, 0x3c020001, 0x90420f56, 0x10510003, 0x32020200,
5573 0x1040fff1, 0x00000000, 0x0c0041b4, 0x00000000, 0x08004034, 0x00000000,
5574 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020,
5575 0x27bdffe0, 0x3c040001, 0x24840eb0, 0x00002821, 0x00003021, 0x00003821,
5576 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130,
5577 0xaf625000, 0x3c010001, 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018,
5578 0x03e00008, 0x27bd0020, 0x00000000, 0x00000000, 0x3c030001, 0x24630f60,
5579 0x90620000, 0x27bdfff0, 0x14400003, 0x0080c021, 0x08004073, 0x00004821,
5580 0x3c022000, 0x03021024, 0x10400003, 0x24090002, 0x08004073, 0xa0600000,
5581 0x24090001, 0x00181040, 0x30431f80, 0x346f8008, 0x1520004b, 0x25eb0028,
5582 0x3c040001, 0x00832021, 0x8c848010, 0x3c050001, 0x24a50f7a, 0x00041402,
5583 0xa0a20000, 0x3c010001, 0xa0240f7b, 0x3c020001, 0x00431021, 0x94428014,
5584 0x3c010001, 0xa0220f7c, 0x3c0c0001, 0x01836021, 0x8d8c8018, 0x304200ff,
5585 0x24420008, 0x000220c3, 0x24020001, 0x3c010001, 0xa0220f60, 0x0124102b,
5586 0x1040000c, 0x00003821, 0x24a6000e, 0x01602821, 0x8ca20000, 0x8ca30004,
5587 0x24a50008, 0x24e70001, 0xacc20000, 0xacc30004, 0x00e4102b, 0x1440fff8,
5588 0x24c60008, 0x00003821, 0x3c080001, 0x25080f7b, 0x91060000, 0x3c020001,
5589 0x90420f7c, 0x2503000d, 0x00c32821, 0x00461023, 0x00021fc2, 0x00431021,
5590 0x00021043, 0x1840000c, 0x00002021, 0x91020001, 0x00461023, 0x00021fc2,
5591 0x00431021, 0x00021843, 0x94a20000, 0x24e70001, 0x00822021, 0x00e3102a,
5592 0x1440fffb, 0x24a50002, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
5593 0x00822021, 0x3c02ffff, 0x01821024, 0x3083ffff, 0x00431025, 0x3c010001,
5594 0x080040fa, 0xac220f80, 0x3c050001, 0x24a50f7c, 0x90a20000, 0x3c0c0001,
5595 0x01836021, 0x8d8c8018, 0x000220c2, 0x1080000e, 0x00003821, 0x01603021,
5596 0x24a5000c, 0x8ca20000, 0x8ca30004, 0x24a50008, 0x24e70001, 0xacc20000,
5597 0xacc30004, 0x00e4102b, 0x1440fff8, 0x24c60008, 0x3c050001, 0x24a50f7c,
5598 0x90a20000, 0x30430007, 0x24020004, 0x10620011, 0x28620005, 0x10400005,
5599 0x24020002, 0x10620008, 0x000710c0, 0x080040fa, 0x00000000, 0x24020006,
5600 0x1062000e, 0x000710c0, 0x080040fa, 0x00000000, 0x00a21821, 0x9463000c,
5601 0x004b1021, 0x080040fa, 0xa4430000, 0x000710c0, 0x00a21821, 0x8c63000c,
5602 0x004b1021, 0x080040fa, 0xac430000, 0x00a21821, 0x8c63000c, 0x004b2021,
5603 0x00a21021, 0xac830000, 0x94420010, 0xa4820004, 0x95e70006, 0x3c020001,
5604 0x90420f7c, 0x3c030001, 0x90630f7a, 0x00e2c823, 0x3c020001, 0x90420f7b,
5605 0x24630028, 0x01e34021, 0x24420028, 0x15200012, 0x01e23021, 0x94c2000c,
5606 0x3c010001, 0xa4220f78, 0x94c20004, 0x94c30006, 0x3c010001, 0xa4200f76,
5607 0x3c010001, 0xa4200f72, 0x00021400, 0x00431025, 0x3c010001, 0xac220f6c,
5608 0x95020004, 0x3c010001, 0x08004124, 0xa4220f70, 0x3c020001, 0x94420f70,
5609 0x3c030001, 0x94630f72, 0x00431021, 0xa5020004, 0x3c020001, 0x94420f6c,
5610 0xa4c20004, 0x3c020001, 0x8c420f6c, 0xa4c20006, 0x3c040001, 0x94840f72,
5611 0x3c020001, 0x94420f70, 0x3c0a0001, 0x954a0f76, 0x00441821, 0x3063ffff,
5612 0x0062182a, 0x24020002, 0x1122000b, 0x00832023, 0x3c030001, 0x94630f78,
5613 0x30620009, 0x10400006, 0x3062fff6, 0xa4c2000c, 0x3c020001, 0x94420f78,
5614 0x30420009, 0x01425023, 0x24020001, 0x1122001b, 0x29220002, 0x50400005,
5615 0x24020002, 0x11200007, 0x31a2ffff, 0x08004197, 0x00000000, 0x1122001d,
5616 0x24020016, 0x08004197, 0x31a2ffff, 0x3c0e0001, 0x95ce0f80, 0x10800005,
5617 0x01806821, 0x01c42021, 0x00041c02, 0x3082ffff, 0x00627021, 0x000e1027,
5618 0xa502000a, 0x3c030001, 0x90630f7b, 0x31a2ffff, 0x00e21021, 0x0800418d,
5619 0x00432023, 0x3c020001, 0x94420f80, 0x00442021, 0x00041c02, 0x3082ffff,
5620 0x00622021, 0x00807021, 0x00041027, 0x08004185, 0xa502000a, 0x3c050001,
5621 0x24a50f7a, 0x90a30000, 0x14620002, 0x24e2fff2, 0xa5e20034, 0x90a20000,
5622 0x00e21023, 0xa5020002, 0x3c030001, 0x94630f80, 0x3c020001, 0x94420f5a,
5623 0x30e5ffff, 0x00641821, 0x00451023, 0x00622023, 0x00041c02, 0x3082ffff,
5624 0x00622021, 0x00041027, 0xa502000a, 0x3c030001, 0x90630f7c, 0x24620001,
5625 0x14a20005, 0x00807021, 0x01631021, 0x90420000, 0x08004185, 0x00026200,
5626 0x24620002, 0x14a20003, 0x306200fe, 0x004b1021, 0x944c0000, 0x3c020001,
5627 0x94420f82, 0x3183ffff, 0x3c040001, 0x90840f7b, 0x00431021, 0x00e21021,
5628 0x00442023, 0x008a2021, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
5629 0x00822021, 0x00806821, 0x00041027, 0xa4c20010, 0x31a2ffff, 0x000e1c00,
5630 0x00431025, 0x3c040001, 0x24840f72, 0xade20010, 0x94820000, 0x3c050001,
5631 0x94a50f76, 0x3c030001, 0x8c630f6c, 0x24420001, 0x00b92821, 0xa4820000,
5632 0x3322ffff, 0x00622021, 0x0083182b, 0x3c010001, 0xa4250f76, 0x10600003,
5633 0x24a2ffff, 0x3c010001, 0xa4220f76, 0x3c024000, 0x03021025, 0x3c010001,
5634 0xac240f6c, 0xaf621008, 0x03e00008, 0x27bd0010, 0x3c030001, 0x90630f56,
5635 0x27bdffe8, 0x24020001, 0xafbf0014, 0x10620026, 0xafb00010, 0x8f620cf4,
5636 0x2442ffff, 0x3042007f, 0x00021100, 0x8c434000, 0x3c010001, 0xac230f64,
5637 0x8c434008, 0x24444000, 0x8c5c4004, 0x30620040, 0x14400002, 0x24020088,
5638 0x24020008, 0x3c010001, 0xa4220f68, 0x30620004, 0x10400005, 0x24020001,
5639 0x3c010001, 0xa0220f57, 0x080041d5, 0x00031402, 0x3c010001, 0xa0200f57,
5640 0x00031402, 0x3c010001, 0xa4220f54, 0x9483000c, 0x24020001, 0x3c010001,
5641 0xa4200f50, 0x3c010001, 0xa0220f56, 0x3c010001, 0xa4230f62, 0x24020001,
5642 0x1342001e, 0x00000000, 0x13400005, 0x24020003, 0x13420067, 0x00000000,
5643 0x080042cf, 0x00000000, 0x3c020001, 0x94420f62, 0x241a0001, 0x3c010001,
5644 0xa4200f5e, 0x3c010001, 0xa4200f52, 0x304407ff, 0x00021bc2, 0x00031823,
5645 0x3063003e, 0x34630036, 0x00021242, 0x3042003c, 0x00621821, 0x3c010001,
5646 0xa4240f58, 0x00832021, 0x24630030, 0x3c010001, 0xa4240f5a, 0x3c010001,
5647 0xa4230f5c, 0x3c060001, 0x24c60f52, 0x94c50000, 0x94c30002, 0x3c040001,
5648 0x94840f5a, 0x00651021, 0x0044102a, 0x10400013, 0x3c108000, 0x00a31021,
5649 0xa4c20000, 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008,
5650 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4,
5651 0x00501024, 0x104000b7, 0x00000000, 0x0800420f, 0x00000000, 0x3c030001,
5652 0x94630f50, 0x00851023, 0xa4c40000, 0x00621821, 0x3042ffff, 0x3c010001,
5653 0xa4230f50, 0xaf620ce8, 0x3c020001, 0x94420f68, 0x34420024, 0xaf620cec,
5654 0x94c30002, 0x3c020001, 0x94420f50, 0x14620012, 0x3c028000, 0x3c108000,
5655 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008, 0x00901024,
5656 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024,
5657 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003, 0xaf620cf4, 0x3c108000,
5658 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000,
5659 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003,
5660 0x3c070001, 0x24e70f50, 0x94e20000, 0x03821021, 0xaf620ce0, 0x3c020001,
5661 0x8c420f64, 0xaf620ce4, 0x3c050001, 0x94a50f54, 0x94e30000, 0x3c040001,
5662 0x94840f58, 0x3c020001, 0x94420f5e, 0x00a32823, 0x00822023, 0x30a6ffff,
5663 0x3083ffff, 0x00c3102b, 0x14400043, 0x00000000, 0x3c020001, 0x94420f5c,
5664 0x00021400, 0x00621025, 0xaf620ce8, 0x94e20000, 0x3c030001, 0x94630f54,
5665 0x00441021, 0xa4e20000, 0x3042ffff, 0x14430021, 0x3c020008, 0x3c020001,
5666 0x90420f57, 0x10400006, 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624,
5667 0x0800427c, 0x0000d021, 0x3c020001, 0x94420f68, 0x3c030008, 0x34630624,
5668 0x00431025, 0xaf620cec, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
5669 0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
5670 0x00000000, 0x8f620cf4, 0x00501024, 0x10400015, 0x00000000, 0x08004283,
5671 0x00000000, 0x3c030001, 0x94630f68, 0x34420624, 0x3c108000, 0x00621825,
5672 0x3c028000, 0xaf630cec, 0xaf620cf4, 0x8f641008, 0x00901024, 0x14400003,
5673 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7,
5674 0x00000000, 0x3c010001, 0x080042cf, 0xa4200f5e, 0x3c020001, 0x94420f5c,
5675 0x00021400, 0x00c21025, 0xaf620ce8, 0x3c020001, 0x90420f57, 0x10400009,
5676 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624, 0x0000d021, 0x00431025,
5677 0xaf620cec, 0x080042c1, 0x3c108000, 0x3c020001, 0x94420f68, 0x3c030008,
5678 0x34630604, 0x00431025, 0xaf620cec, 0x3c020001, 0x94420f5e, 0x00451021,
5679 0x3c010001, 0xa4220f5e, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
5680 0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
5681 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x8fbf0014,
5682 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000, 0x27bdffe0, 0x3c040001,
5683 0x24840ec0, 0x00002821, 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010,
5684 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130, 0xaf625000, 0x3c010001,
5685 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5686 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010, 0xaf60680c, 0x8f626804,
5687 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50, 0x3c010001, 0xac220f20,
5688 0x24020b78, 0x3c010001, 0xac220f30, 0x34630002, 0xaf634000, 0x0c004315,
5689 0x00808021, 0x3c010001, 0xa0220f34, 0x304200ff, 0x24030002, 0x14430005,
5690 0x00000000, 0x3c020001, 0x8c420f20, 0x08004308, 0xac5000c0, 0x3c020001,
5691 0x8c420f20, 0xac5000bc, 0x8f624434, 0x8f634438, 0x8f644410, 0x3c010001,
5692 0xac220f28, 0x3c010001, 0xac230f38, 0x3c010001, 0xac240f24, 0x8fbf0014,
5693 0x8fb00010, 0x03e00008, 0x27bd0018, 0x03e00008, 0x24020001, 0x27bdfff8,
5694 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe, 0x00000000,
5695 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008, 0x27bd0008,
5696 0x8f634450, 0x3c020001, 0x8c420f28, 0x00031c02, 0x0043102b, 0x14400008,
5697 0x3c038000, 0x3c040001, 0x8c840f38, 0x8f624450, 0x00021c02, 0x0083102b,
5698 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024, 0x1440fffd,
5699 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff, 0x2442e000,
5700 0x2c422001, 0x14400003, 0x3c024000, 0x08004347, 0x2402ffff, 0x00822025,
5701 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021, 0x03e00008,
5702 0x00000000, 0x8f624450, 0x3c030001, 0x8c630f24, 0x08004350, 0x3042ffff,
5703 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000, 0x03e00008,
5704 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040001, 0x24840ed0, 0x00003021,
5705 0x00003821, 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0800435f,
5706 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x3c020001, 0x3442d600,
5707 0x3c030001, 0x3463d600, 0x3c040001, 0x3484ddff, 0x3c010001, 0xac220f40,
5708 0x24020040, 0x3c010001, 0xac220f44, 0x3c010001, 0xac200f3c, 0xac600000,
5709 0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
5710 0x00804821, 0x8faa0010, 0x3c020001, 0x8c420f3c, 0x3c040001, 0x8c840f44,
5711 0x8fab0014, 0x24430001, 0x0044102b, 0x3c010001, 0xac230f3c, 0x14400003,
5712 0x00004021, 0x3c010001, 0xac200f3c, 0x3c020001, 0x8c420f3c, 0x3c030001,
5713 0x8c630f40, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
5714 0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020001, 0x8c420f3c,
5715 0x3c030001, 0x8c630f40, 0x8f64680c, 0x00021140, 0x00431021, 0xac440008,
5716 0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
5717 0x00000000, 0x00000000, 0x00000000,
5720 static const u32 tg3Tso5FwRodata
[(TG3_TSO5_FW_RODATA_LEN
/ 4) + 1] = {
5721 0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
5722 0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000,
5723 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
5724 0x00000000, 0x00000000, 0x00000000,
5727 static const u32 tg3Tso5FwData
[(TG3_TSO5_FW_DATA_LEN
/ 4) + 1] = {
5728 0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x322e3000, 0x00000000,
5729 0x00000000, 0x00000000, 0x00000000,
5732 /* tp->lock is held. */
5733 static int tg3_load_tso_firmware(struct tg3
*tp
)
5735 struct fw_info info
;
5736 unsigned long cpu_base
, cpu_scratch_base
, cpu_scratch_size
;
5739 if (tp
->tg3_flags2
& TG3_FLG2_HW_TSO
)
5742 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
) {
5743 info
.text_base
= TG3_TSO5_FW_TEXT_ADDR
;
5744 info
.text_len
= TG3_TSO5_FW_TEXT_LEN
;
5745 info
.text_data
= &tg3Tso5FwText
[0];
5746 info
.rodata_base
= TG3_TSO5_FW_RODATA_ADDR
;
5747 info
.rodata_len
= TG3_TSO5_FW_RODATA_LEN
;
5748 info
.rodata_data
= &tg3Tso5FwRodata
[0];
5749 info
.data_base
= TG3_TSO5_FW_DATA_ADDR
;
5750 info
.data_len
= TG3_TSO5_FW_DATA_LEN
;
5751 info
.data_data
= &tg3Tso5FwData
[0];
5752 cpu_base
= RX_CPU_BASE
;
5753 cpu_scratch_base
= NIC_SRAM_MBUF_POOL_BASE5705
;
5754 cpu_scratch_size
= (info
.text_len
+
5757 TG3_TSO5_FW_SBSS_LEN
+
5758 TG3_TSO5_FW_BSS_LEN
);
5760 info
.text_base
= TG3_TSO_FW_TEXT_ADDR
;
5761 info
.text_len
= TG3_TSO_FW_TEXT_LEN
;
5762 info
.text_data
= &tg3TsoFwText
[0];
5763 info
.rodata_base
= TG3_TSO_FW_RODATA_ADDR
;
5764 info
.rodata_len
= TG3_TSO_FW_RODATA_LEN
;
5765 info
.rodata_data
= &tg3TsoFwRodata
[0];
5766 info
.data_base
= TG3_TSO_FW_DATA_ADDR
;
5767 info
.data_len
= TG3_TSO_FW_DATA_LEN
;
5768 info
.data_data
= &tg3TsoFwData
[0];
5769 cpu_base
= TX_CPU_BASE
;
5770 cpu_scratch_base
= TX_CPU_SCRATCH_BASE
;
5771 cpu_scratch_size
= TX_CPU_SCRATCH_SIZE
;
5774 err
= tg3_load_firmware_cpu(tp
, cpu_base
,
5775 cpu_scratch_base
, cpu_scratch_size
,
5780 /* Now startup the cpu. */
5781 tw32(cpu_base
+ CPU_STATE
, 0xffffffff);
5782 tw32_f(cpu_base
+ CPU_PC
, info
.text_base
);
5784 for (i
= 0; i
< 5; i
++) {
5785 if (tr32(cpu_base
+ CPU_PC
) == info
.text_base
)
5787 tw32(cpu_base
+ CPU_STATE
, 0xffffffff);
5788 tw32(cpu_base
+ CPU_MODE
, CPU_MODE_HALT
);
5789 tw32_f(cpu_base
+ CPU_PC
, info
.text_base
);
5793 printk(KERN_ERR PFX
"tg3_load_tso_firmware fails for %s "
5794 "to set CPU PC, is %08x should be %08x\n",
5795 tp
->dev
->name
, tr32(cpu_base
+ CPU_PC
),
5799 tw32(cpu_base
+ CPU_STATE
, 0xffffffff);
5800 tw32_f(cpu_base
+ CPU_MODE
, 0x00000000);
5804 #endif /* TG3_TSO_SUPPORT != 0 */
5806 /* tp->lock is held. */
5807 static void __tg3_set_mac_addr(struct tg3
*tp
)
5809 u32 addr_high
, addr_low
;
5812 addr_high
= ((tp
->dev
->dev_addr
[0] << 8) |
5813 tp
->dev
->dev_addr
[1]);
5814 addr_low
= ((tp
->dev
->dev_addr
[2] << 24) |
5815 (tp
->dev
->dev_addr
[3] << 16) |
5816 (tp
->dev
->dev_addr
[4] << 8) |
5817 (tp
->dev
->dev_addr
[5] << 0));
5818 for (i
= 0; i
< 4; i
++) {
5819 tw32(MAC_ADDR_0_HIGH
+ (i
* 8), addr_high
);
5820 tw32(MAC_ADDR_0_LOW
+ (i
* 8), addr_low
);
5823 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5703
||
5824 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
) {
5825 for (i
= 0; i
< 12; i
++) {
5826 tw32(MAC_EXTADDR_0_HIGH
+ (i
* 8), addr_high
);
5827 tw32(MAC_EXTADDR_0_LOW
+ (i
* 8), addr_low
);
5831 addr_high
= (tp
->dev
->dev_addr
[0] +
5832 tp
->dev
->dev_addr
[1] +
5833 tp
->dev
->dev_addr
[2] +
5834 tp
->dev
->dev_addr
[3] +
5835 tp
->dev
->dev_addr
[4] +
5836 tp
->dev
->dev_addr
[5]) &
5837 TX_BACKOFF_SEED_MASK
;
5838 tw32(MAC_TX_BACKOFF_SEED
, addr_high
);
5841 static int tg3_set_mac_addr(struct net_device
*dev
, void *p
)
5843 struct tg3
*tp
= netdev_priv(dev
);
5844 struct sockaddr
*addr
= p
;
5847 if (!is_valid_ether_addr(addr
->sa_data
))
5850 memcpy(dev
->dev_addr
, addr
->sa_data
, dev
->addr_len
);
5852 if (!netif_running(dev
))
5855 if (tp
->tg3_flags
& TG3_FLAG_ENABLE_ASF
) {
5856 /* Reset chip so that ASF can re-init any MAC addresses it
5860 tg3_full_lock(tp
, 1);
5862 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
5863 err
= tg3_restart_hw(tp
, 0);
5865 tg3_netif_start(tp
);
5866 tg3_full_unlock(tp
);
5868 spin_lock_bh(&tp
->lock
);
5869 __tg3_set_mac_addr(tp
);
5870 spin_unlock_bh(&tp
->lock
);
5876 /* tp->lock is held. */
5877 static void tg3_set_bdinfo(struct tg3
*tp
, u32 bdinfo_addr
,
5878 dma_addr_t mapping
, u32 maxlen_flags
,
5882 (bdinfo_addr
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_HIGH
),
5883 ((u64
) mapping
>> 32));
5885 (bdinfo_addr
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_LOW
),
5886 ((u64
) mapping
& 0xffffffff));
5888 (bdinfo_addr
+ TG3_BDINFO_MAXLEN_FLAGS
),
5891 if (!(tp
->tg3_flags2
& TG3_FLG2_5705_PLUS
))
5893 (bdinfo_addr
+ TG3_BDINFO_NIC_ADDR
),
5897 static void __tg3_set_rx_mode(struct net_device
*);
5898 static void __tg3_set_coalesce(struct tg3
*tp
, struct ethtool_coalesce
*ec
)
5900 tw32(HOSTCC_RXCOL_TICKS
, ec
->rx_coalesce_usecs
);
5901 tw32(HOSTCC_TXCOL_TICKS
, ec
->tx_coalesce_usecs
);
5902 tw32(HOSTCC_RXMAX_FRAMES
, ec
->rx_max_coalesced_frames
);
5903 tw32(HOSTCC_TXMAX_FRAMES
, ec
->tx_max_coalesced_frames
);
5904 if (!(tp
->tg3_flags2
& TG3_FLG2_5705_PLUS
)) {
5905 tw32(HOSTCC_RXCOAL_TICK_INT
, ec
->rx_coalesce_usecs_irq
);
5906 tw32(HOSTCC_TXCOAL_TICK_INT
, ec
->tx_coalesce_usecs_irq
);
5908 tw32(HOSTCC_RXCOAL_MAXF_INT
, ec
->rx_max_coalesced_frames_irq
);
5909 tw32(HOSTCC_TXCOAL_MAXF_INT
, ec
->tx_max_coalesced_frames_irq
);
5910 if (!(tp
->tg3_flags2
& TG3_FLG2_5705_PLUS
)) {
5911 u32 val
= ec
->stats_block_coalesce_usecs
;
5913 if (!netif_carrier_ok(tp
->dev
))
5916 tw32(HOSTCC_STAT_COAL_TICKS
, val
);
5920 /* tp->lock is held. */
5921 static int tg3_reset_hw(struct tg3
*tp
, int reset_phy
)
5923 u32 val
, rdmac_mode
;
5926 tg3_disable_ints(tp
);
5930 tg3_write_sig_pre_reset(tp
, RESET_KIND_INIT
);
5932 if (tp
->tg3_flags
& TG3_FLAG_INIT_COMPLETE
) {
5933 tg3_abort_hw(tp
, 1);
5936 if ((tp
->tg3_flags2
& TG3_FLG2_MII_SERDES
) && reset_phy
)
5939 err
= tg3_chip_reset(tp
);
5943 tg3_write_sig_legacy(tp
, RESET_KIND_INIT
);
5945 /* This works around an issue with Athlon chipsets on
5946 * B3 tigon3 silicon. This bit has no effect on any
5947 * other revision. But do not set this on PCI Express
5950 if (!(tp
->tg3_flags2
& TG3_FLG2_PCI_EXPRESS
))
5951 tp
->pci_clock_ctrl
|= CLOCK_CTRL_DELAY_PCI_GRANT
;
5952 tw32_f(TG3PCI_CLOCK_CTRL
, tp
->pci_clock_ctrl
);
5954 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5704_A0
&&
5955 (tp
->tg3_flags
& TG3_FLAG_PCIX_MODE
)) {
5956 val
= tr32(TG3PCI_PCISTATE
);
5957 val
|= PCISTATE_RETRY_SAME_DMA
;
5958 tw32(TG3PCI_PCISTATE
, val
);
5961 if (GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5704_BX
) {
5962 /* Enable some hw fixes. */
5963 val
= tr32(TG3PCI_MSI_DATA
);
5964 val
|= (1 << 26) | (1 << 28) | (1 << 29);
5965 tw32(TG3PCI_MSI_DATA
, val
);
5968 /* Descriptor ring init may make accesses to the
5969 * NIC SRAM area to setup the TX descriptors, so we
5970 * can only do this after the hardware has been
5971 * successfully reset.
5973 err
= tg3_init_rings(tp
);
5977 /* This value is determined during the probe time DMA
5978 * engine test, tg3_test_dma.
5980 tw32(TG3PCI_DMA_RW_CTRL
, tp
->dma_rwctrl
);
5982 tp
->grc_mode
&= ~(GRC_MODE_HOST_SENDBDS
|
5983 GRC_MODE_4X_NIC_SEND_RINGS
|
5984 GRC_MODE_NO_TX_PHDR_CSUM
|
5985 GRC_MODE_NO_RX_PHDR_CSUM
);
5986 tp
->grc_mode
|= GRC_MODE_HOST_SENDBDS
;
5988 /* Pseudo-header checksum is done by hardware logic and not
5989 * the offload processers, so make the chip do the pseudo-
5990 * header checksums on receive. For transmit it is more
5991 * convenient to do the pseudo-header checksum in software
5992 * as Linux does that on transmit for us in all cases.
5994 tp
->grc_mode
|= GRC_MODE_NO_TX_PHDR_CSUM
;
5998 (GRC_MODE_IRQ_ON_MAC_ATTN
| GRC_MODE_HOST_STACKUP
));
6000 /* Setup the timer prescalar register. Clock is always 66Mhz. */
6001 val
= tr32(GRC_MISC_CFG
);
6003 val
|= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT
);
6004 tw32(GRC_MISC_CFG
, val
);
6006 /* Initialize MBUF/DESC pool. */
6007 if (tp
->tg3_flags2
& TG3_FLG2_5750_PLUS
) {
6009 } else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5705
) {
6010 tw32(BUFMGR_MB_POOL_ADDR
, NIC_SRAM_MBUF_POOL_BASE
);
6011 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
)
6012 tw32(BUFMGR_MB_POOL_SIZE
, NIC_SRAM_MBUF_POOL_SIZE64
);
6014 tw32(BUFMGR_MB_POOL_SIZE
, NIC_SRAM_MBUF_POOL_SIZE96
);
6015 tw32(BUFMGR_DMA_DESC_POOL_ADDR
, NIC_SRAM_DMA_DESC_POOL_BASE
);
6016 tw32(BUFMGR_DMA_DESC_POOL_SIZE
, NIC_SRAM_DMA_DESC_POOL_SIZE
);
6018 #if TG3_TSO_SUPPORT != 0
6019 else if (tp
->tg3_flags2
& TG3_FLG2_TSO_CAPABLE
) {
6022 fw_len
= (TG3_TSO5_FW_TEXT_LEN
+
6023 TG3_TSO5_FW_RODATA_LEN
+
6024 TG3_TSO5_FW_DATA_LEN
+
6025 TG3_TSO5_FW_SBSS_LEN
+
6026 TG3_TSO5_FW_BSS_LEN
);
6027 fw_len
= (fw_len
+ (0x80 - 1)) & ~(0x80 - 1);
6028 tw32(BUFMGR_MB_POOL_ADDR
,
6029 NIC_SRAM_MBUF_POOL_BASE5705
+ fw_len
);
6030 tw32(BUFMGR_MB_POOL_SIZE
,
6031 NIC_SRAM_MBUF_POOL_SIZE5705
- fw_len
- 0xa00);
6035 if (tp
->dev
->mtu
<= ETH_DATA_LEN
) {
6036 tw32(BUFMGR_MB_RDMA_LOW_WATER
,
6037 tp
->bufmgr_config
.mbuf_read_dma_low_water
);
6038 tw32(BUFMGR_MB_MACRX_LOW_WATER
,
6039 tp
->bufmgr_config
.mbuf_mac_rx_low_water
);
6040 tw32(BUFMGR_MB_HIGH_WATER
,
6041 tp
->bufmgr_config
.mbuf_high_water
);
6043 tw32(BUFMGR_MB_RDMA_LOW_WATER
,
6044 tp
->bufmgr_config
.mbuf_read_dma_low_water_jumbo
);
6045 tw32(BUFMGR_MB_MACRX_LOW_WATER
,
6046 tp
->bufmgr_config
.mbuf_mac_rx_low_water_jumbo
);
6047 tw32(BUFMGR_MB_HIGH_WATER
,
6048 tp
->bufmgr_config
.mbuf_high_water_jumbo
);
6050 tw32(BUFMGR_DMA_LOW_WATER
,
6051 tp
->bufmgr_config
.dma_low_water
);
6052 tw32(BUFMGR_DMA_HIGH_WATER
,
6053 tp
->bufmgr_config
.dma_high_water
);
6055 tw32(BUFMGR_MODE
, BUFMGR_MODE_ENABLE
| BUFMGR_MODE_ATTN_ENABLE
);
6056 for (i
= 0; i
< 2000; i
++) {
6057 if (tr32(BUFMGR_MODE
) & BUFMGR_MODE_ENABLE
)
6062 printk(KERN_ERR PFX
"tg3_reset_hw cannot enable BUFMGR for %s.\n",
6067 /* Setup replenish threshold. */
6068 val
= tp
->rx_pending
/ 8;
6071 else if (val
> tp
->rx_std_max_post
)
6072 val
= tp
->rx_std_max_post
;
6074 tw32(RCVBDI_STD_THRESH
, val
);
6076 /* Initialize TG3_BDINFO's at:
6077 * RCVDBDI_STD_BD: standard eth size rx ring
6078 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
6079 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
6082 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
6083 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
6084 * ring attribute flags
6085 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
6087 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
6088 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
6090 * The size of each ring is fixed in the firmware, but the location is
6093 tw32(RCVDBDI_STD_BD
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_HIGH
,
6094 ((u64
) tp
->rx_std_mapping
>> 32));
6095 tw32(RCVDBDI_STD_BD
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_LOW
,
6096 ((u64
) tp
->rx_std_mapping
& 0xffffffff));
6097 tw32(RCVDBDI_STD_BD
+ TG3_BDINFO_NIC_ADDR
,
6098 NIC_SRAM_RX_BUFFER_DESC
);
6100 /* Don't even try to program the JUMBO/MINI buffer descriptor
6103 if (tp
->tg3_flags2
& TG3_FLG2_5705_PLUS
) {
6104 tw32(RCVDBDI_STD_BD
+ TG3_BDINFO_MAXLEN_FLAGS
,
6105 RX_STD_MAX_SIZE_5705
<< BDINFO_FLAGS_MAXLEN_SHIFT
);
6107 tw32(RCVDBDI_STD_BD
+ TG3_BDINFO_MAXLEN_FLAGS
,
6108 RX_STD_MAX_SIZE
<< BDINFO_FLAGS_MAXLEN_SHIFT
);
6110 tw32(RCVDBDI_MINI_BD
+ TG3_BDINFO_MAXLEN_FLAGS
,
6111 BDINFO_FLAGS_DISABLED
);
6113 /* Setup replenish threshold. */
6114 tw32(RCVBDI_JUMBO_THRESH
, tp
->rx_jumbo_pending
/ 8);
6116 if (tp
->tg3_flags
& TG3_FLAG_JUMBO_RING_ENABLE
) {
6117 tw32(RCVDBDI_JUMBO_BD
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_HIGH
,
6118 ((u64
) tp
->rx_jumbo_mapping
>> 32));
6119 tw32(RCVDBDI_JUMBO_BD
+ TG3_BDINFO_HOST_ADDR
+ TG3_64BIT_REG_LOW
,
6120 ((u64
) tp
->rx_jumbo_mapping
& 0xffffffff));
6121 tw32(RCVDBDI_JUMBO_BD
+ TG3_BDINFO_MAXLEN_FLAGS
,
6122 RX_JUMBO_MAX_SIZE
<< BDINFO_FLAGS_MAXLEN_SHIFT
);
6123 tw32(RCVDBDI_JUMBO_BD
+ TG3_BDINFO_NIC_ADDR
,
6124 NIC_SRAM_RX_JUMBO_BUFFER_DESC
);
6126 tw32(RCVDBDI_JUMBO_BD
+ TG3_BDINFO_MAXLEN_FLAGS
,
6127 BDINFO_FLAGS_DISABLED
);
6132 /* There is only one send ring on 5705/5750, no need to explicitly
6133 * disable the others.
6135 if (!(tp
->tg3_flags2
& TG3_FLG2_5705_PLUS
)) {
6136 /* Clear out send RCB ring in SRAM. */
6137 for (i
= NIC_SRAM_SEND_RCB
; i
< NIC_SRAM_RCV_RET_RCB
; i
+= TG3_BDINFO_SIZE
)
6138 tg3_write_mem(tp
, i
+ TG3_BDINFO_MAXLEN_FLAGS
,
6139 BDINFO_FLAGS_DISABLED
);
6144 tw32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0
+ TG3_64BIT_REG_LOW
, 0);
6145 tw32_tx_mbox(MAILBOX_SNDNIC_PROD_IDX_0
+ TG3_64BIT_REG_LOW
, 0);
6147 tg3_set_bdinfo(tp
, NIC_SRAM_SEND_RCB
,
6148 tp
->tx_desc_mapping
,
6149 (TG3_TX_RING_SIZE
<<
6150 BDINFO_FLAGS_MAXLEN_SHIFT
),
6151 NIC_SRAM_TX_BUFFER_DESC
);
6153 /* There is only one receive return ring on 5705/5750, no need
6154 * to explicitly disable the others.
6156 if (!(tp
->tg3_flags2
& TG3_FLG2_5705_PLUS
)) {
6157 for (i
= NIC_SRAM_RCV_RET_RCB
; i
< NIC_SRAM_STATS_BLK
;
6158 i
+= TG3_BDINFO_SIZE
) {
6159 tg3_write_mem(tp
, i
+ TG3_BDINFO_MAXLEN_FLAGS
,
6160 BDINFO_FLAGS_DISABLED
);
6165 tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0
+ TG3_64BIT_REG_LOW
, 0);
6167 tg3_set_bdinfo(tp
, NIC_SRAM_RCV_RET_RCB
,
6169 (TG3_RX_RCB_RING_SIZE(tp
) <<
6170 BDINFO_FLAGS_MAXLEN_SHIFT
),
6173 tp
->rx_std_ptr
= tp
->rx_pending
;
6174 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX
+ TG3_64BIT_REG_LOW
,
6177 tp
->rx_jumbo_ptr
= (tp
->tg3_flags
& TG3_FLAG_JUMBO_RING_ENABLE
) ?
6178 tp
->rx_jumbo_pending
: 0;
6179 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX
+ TG3_64BIT_REG_LOW
,
6182 /* Initialize MAC address and backoff seed. */
6183 __tg3_set_mac_addr(tp
);
6185 /* MTU + ethernet header + FCS + optional VLAN tag */
6186 tw32(MAC_RX_MTU_SIZE
, tp
->dev
->mtu
+ ETH_HLEN
+ 8);
6188 /* The slot time is changed by tg3_setup_phy if we
6189 * run at gigabit with half duplex.
6191 tw32(MAC_TX_LENGTHS
,
6192 (2 << TX_LENGTHS_IPG_CRS_SHIFT
) |
6193 (6 << TX_LENGTHS_IPG_SHIFT
) |
6194 (32 << TX_LENGTHS_SLOT_TIME_SHIFT
));
6196 /* Receive rules. */
6197 tw32(MAC_RCV_RULE_CFG
, RCV_RULE_CFG_DEFAULT_CLASS
);
6198 tw32(RCVLPC_CONFIG
, 0x0181);
6200 /* Calculate RDMAC_MODE setting early, we need it to determine
6201 * the RCVLPC_STATE_ENABLE mask.
6203 rdmac_mode
= (RDMAC_MODE_ENABLE
| RDMAC_MODE_TGTABORT_ENAB
|
6204 RDMAC_MODE_MSTABORT_ENAB
| RDMAC_MODE_PARITYERR_ENAB
|
6205 RDMAC_MODE_ADDROFLOW_ENAB
| RDMAC_MODE_FIFOOFLOW_ENAB
|
6206 RDMAC_MODE_FIFOURUN_ENAB
| RDMAC_MODE_FIFOOREAD_ENAB
|
6207 RDMAC_MODE_LNGREAD_ENAB
);
6208 if (tp
->tg3_flags
& TG3_FLAG_SPLIT_MODE
)
6209 rdmac_mode
|= RDMAC_MODE_SPLIT_ENABLE
;
6211 /* If statement applies to 5705 and 5750 PCI devices only */
6212 if ((GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
&&
6213 tp
->pci_chip_rev_id
!= CHIPREV_ID_5705_A0
) ||
6214 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5750
)) {
6215 if (tp
->tg3_flags2
& TG3_FLG2_TSO_CAPABLE
&&
6216 (tp
->pci_chip_rev_id
== CHIPREV_ID_5705_A1
||
6217 tp
->pci_chip_rev_id
== CHIPREV_ID_5705_A2
)) {
6218 rdmac_mode
|= RDMAC_MODE_FIFO_SIZE_128
;
6219 } else if (!(tr32(TG3PCI_PCISTATE
) & PCISTATE_BUS_SPEED_HIGH
) &&
6220 !(tp
->tg3_flags2
& TG3_FLG2_IS_5788
)) {
6221 rdmac_mode
|= RDMAC_MODE_FIFO_LONG_BURST
;
6225 if (tp
->tg3_flags2
& TG3_FLG2_PCI_EXPRESS
)
6226 rdmac_mode
|= RDMAC_MODE_FIFO_LONG_BURST
;
6228 #if TG3_TSO_SUPPORT != 0
6229 if (tp
->tg3_flags2
& TG3_FLG2_HW_TSO
)
6230 rdmac_mode
|= (1 << 27);
6233 /* Receive/send statistics. */
6234 if (tp
->tg3_flags2
& TG3_FLG2_5750_PLUS
) {
6235 val
= tr32(RCVLPC_STATS_ENABLE
);
6236 val
&= ~RCVLPC_STATSENAB_DACK_FIX
;
6237 tw32(RCVLPC_STATS_ENABLE
, val
);
6238 } else if ((rdmac_mode
& RDMAC_MODE_FIFO_SIZE_128
) &&
6239 (tp
->tg3_flags2
& TG3_FLG2_TSO_CAPABLE
)) {
6240 val
= tr32(RCVLPC_STATS_ENABLE
);
6241 val
&= ~RCVLPC_STATSENAB_LNGBRST_RFIX
;
6242 tw32(RCVLPC_STATS_ENABLE
, val
);
6244 tw32(RCVLPC_STATS_ENABLE
, 0xffffff);
6246 tw32(RCVLPC_STATSCTRL
, RCVLPC_STATSCTRL_ENABLE
);
6247 tw32(SNDDATAI_STATSENAB
, 0xffffff);
6248 tw32(SNDDATAI_STATSCTRL
,
6249 (SNDDATAI_SCTRL_ENABLE
|
6250 SNDDATAI_SCTRL_FASTUPD
));
6252 /* Setup host coalescing engine. */
6253 tw32(HOSTCC_MODE
, 0);
6254 for (i
= 0; i
< 2000; i
++) {
6255 if (!(tr32(HOSTCC_MODE
) & HOSTCC_MODE_ENABLE
))
6260 __tg3_set_coalesce(tp
, &tp
->coal
);
6262 /* set status block DMA address */
6263 tw32(HOSTCC_STATUS_BLK_HOST_ADDR
+ TG3_64BIT_REG_HIGH
,
6264 ((u64
) tp
->status_mapping
>> 32));
6265 tw32(HOSTCC_STATUS_BLK_HOST_ADDR
+ TG3_64BIT_REG_LOW
,
6266 ((u64
) tp
->status_mapping
& 0xffffffff));
6268 if (!(tp
->tg3_flags2
& TG3_FLG2_5705_PLUS
)) {
6269 /* Status/statistics block address. See tg3_timer,
6270 * the tg3_periodic_fetch_stats call there, and
6271 * tg3_get_stats to see how this works for 5705/5750 chips.
6273 tw32(HOSTCC_STATS_BLK_HOST_ADDR
+ TG3_64BIT_REG_HIGH
,
6274 ((u64
) tp
->stats_mapping
>> 32));
6275 tw32(HOSTCC_STATS_BLK_HOST_ADDR
+ TG3_64BIT_REG_LOW
,
6276 ((u64
) tp
->stats_mapping
& 0xffffffff));
6277 tw32(HOSTCC_STATS_BLK_NIC_ADDR
, NIC_SRAM_STATS_BLK
);
6278 tw32(HOSTCC_STATUS_BLK_NIC_ADDR
, NIC_SRAM_STATUS_BLK
);
6281 tw32(HOSTCC_MODE
, HOSTCC_MODE_ENABLE
| tp
->coalesce_mode
);
6283 tw32(RCVCC_MODE
, RCVCC_MODE_ENABLE
| RCVCC_MODE_ATTN_ENABLE
);
6284 tw32(RCVLPC_MODE
, RCVLPC_MODE_ENABLE
);
6285 if (!(tp
->tg3_flags2
& TG3_FLG2_5705_PLUS
))
6286 tw32(RCVLSC_MODE
, RCVLSC_MODE_ENABLE
| RCVLSC_MODE_ATTN_ENABLE
);
6288 /* Clear statistics/status block in chip, and status block in ram. */
6289 for (i
= NIC_SRAM_STATS_BLK
;
6290 i
< NIC_SRAM_STATUS_BLK
+ TG3_HW_STATUS_SIZE
;
6292 tg3_write_mem(tp
, i
, 0);
6295 memset(tp
->hw_status
, 0, TG3_HW_STATUS_SIZE
);
6297 if (tp
->tg3_flags2
& TG3_FLG2_MII_SERDES
) {
6298 tp
->tg3_flags2
&= ~TG3_FLG2_PARALLEL_DETECT
;
6299 /* reset to prevent losing 1st rx packet intermittently */
6300 tw32_f(MAC_RX_MODE
, RX_MODE_RESET
);
6304 tp
->mac_mode
= MAC_MODE_TXSTAT_ENABLE
| MAC_MODE_RXSTAT_ENABLE
|
6305 MAC_MODE_TDE_ENABLE
| MAC_MODE_RDE_ENABLE
| MAC_MODE_FHDE_ENABLE
;
6306 tw32_f(MAC_MODE
, tp
->mac_mode
| MAC_MODE_RXSTAT_CLEAR
| MAC_MODE_TXSTAT_CLEAR
);
6309 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
6310 * If TG3_FLAG_EEPROM_WRITE_PROT is set, we should read the
6311 * register to preserve the GPIO settings for LOMs. The GPIOs,
6312 * whether used as inputs or outputs, are set by boot code after
6315 if (tp
->tg3_flags
& TG3_FLAG_EEPROM_WRITE_PROT
) {
6318 gpio_mask
= GRC_LCLCTRL_GPIO_OE0
| GRC_LCLCTRL_GPIO_OE2
|
6319 GRC_LCLCTRL_GPIO_OUTPUT0
| GRC_LCLCTRL_GPIO_OUTPUT2
;
6321 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5752
)
6322 gpio_mask
|= GRC_LCLCTRL_GPIO_OE3
|
6323 GRC_LCLCTRL_GPIO_OUTPUT3
;
6325 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5755
)
6326 gpio_mask
|= GRC_LCLCTRL_GPIO_UART_SEL
;
6328 tp
->grc_local_ctrl
|= tr32(GRC_LOCAL_CTRL
) & gpio_mask
;
6330 /* GPIO1 must be driven high for eeprom write protect */
6331 tp
->grc_local_ctrl
|= (GRC_LCLCTRL_GPIO_OE1
|
6332 GRC_LCLCTRL_GPIO_OUTPUT1
);
6334 tw32_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
);
6337 tw32_mailbox_f(MAILBOX_INTERRUPT_0
+ TG3_64BIT_REG_LOW
, 0);
6340 if (!(tp
->tg3_flags2
& TG3_FLG2_5705_PLUS
)) {
6341 tw32_f(DMAC_MODE
, DMAC_MODE_ENABLE
);
6345 val
= (WDMAC_MODE_ENABLE
| WDMAC_MODE_TGTABORT_ENAB
|
6346 WDMAC_MODE_MSTABORT_ENAB
| WDMAC_MODE_PARITYERR_ENAB
|
6347 WDMAC_MODE_ADDROFLOW_ENAB
| WDMAC_MODE_FIFOOFLOW_ENAB
|
6348 WDMAC_MODE_FIFOURUN_ENAB
| WDMAC_MODE_FIFOOREAD_ENAB
|
6349 WDMAC_MODE_LNGREAD_ENAB
);
6351 /* If statement applies to 5705 and 5750 PCI devices only */
6352 if ((GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
&&
6353 tp
->pci_chip_rev_id
!= CHIPREV_ID_5705_A0
) ||
6354 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5750
) {
6355 if ((tp
->tg3_flags
& TG3_FLG2_TSO_CAPABLE
) &&
6356 (tp
->pci_chip_rev_id
== CHIPREV_ID_5705_A1
||
6357 tp
->pci_chip_rev_id
== CHIPREV_ID_5705_A2
)) {
6359 } else if (!(tr32(TG3PCI_PCISTATE
) & PCISTATE_BUS_SPEED_HIGH
) &&
6360 !(tp
->tg3_flags2
& TG3_FLG2_IS_5788
) &&
6361 !(tp
->tg3_flags2
& TG3_FLG2_PCI_EXPRESS
)) {
6362 val
|= WDMAC_MODE_RX_ACCEL
;
6366 /* Enable host coalescing bug fix */
6367 if ((GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5755
) ||
6368 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5787
))
6371 tw32_f(WDMAC_MODE
, val
);
6374 if ((tp
->tg3_flags
& TG3_FLAG_PCIX_MODE
) != 0) {
6375 val
= tr32(TG3PCI_X_CAPS
);
6376 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5703
) {
6377 val
&= ~PCIX_CAPS_BURST_MASK
;
6378 val
|= (PCIX_CAPS_MAX_BURST_CPIOB
<< PCIX_CAPS_BURST_SHIFT
);
6379 } else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
) {
6380 val
&= ~(PCIX_CAPS_SPLIT_MASK
| PCIX_CAPS_BURST_MASK
);
6381 val
|= (PCIX_CAPS_MAX_BURST_CPIOB
<< PCIX_CAPS_BURST_SHIFT
);
6382 if (tp
->tg3_flags
& TG3_FLAG_SPLIT_MODE
)
6383 val
|= (tp
->split_mode_max_reqs
<<
6384 PCIX_CAPS_SPLIT_SHIFT
);
6386 tw32(TG3PCI_X_CAPS
, val
);
6389 tw32_f(RDMAC_MODE
, rdmac_mode
);
6392 tw32(RCVDCC_MODE
, RCVDCC_MODE_ENABLE
| RCVDCC_MODE_ATTN_ENABLE
);
6393 if (!(tp
->tg3_flags2
& TG3_FLG2_5705_PLUS
))
6394 tw32(MBFREE_MODE
, MBFREE_MODE_ENABLE
);
6395 tw32(SNDDATAC_MODE
, SNDDATAC_MODE_ENABLE
);
6396 tw32(SNDBDC_MODE
, SNDBDC_MODE_ENABLE
| SNDBDC_MODE_ATTN_ENABLE
);
6397 tw32(RCVBDI_MODE
, RCVBDI_MODE_ENABLE
| RCVBDI_MODE_RCB_ATTN_ENAB
);
6398 tw32(RCVDBDI_MODE
, RCVDBDI_MODE_ENABLE
| RCVDBDI_MODE_INV_RING_SZ
);
6399 tw32(SNDDATAI_MODE
, SNDDATAI_MODE_ENABLE
);
6400 #if TG3_TSO_SUPPORT != 0
6401 if (tp
->tg3_flags2
& TG3_FLG2_HW_TSO
)
6402 tw32(SNDDATAI_MODE
, SNDDATAI_MODE_ENABLE
| 0x8);
6404 tw32(SNDBDI_MODE
, SNDBDI_MODE_ENABLE
| SNDBDI_MODE_ATTN_ENABLE
);
6405 tw32(SNDBDS_MODE
, SNDBDS_MODE_ENABLE
| SNDBDS_MODE_ATTN_ENABLE
);
6407 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5701_A0
) {
6408 err
= tg3_load_5701_a0_firmware_fix(tp
);
6413 #if TG3_TSO_SUPPORT != 0
6414 if (tp
->tg3_flags2
& TG3_FLG2_TSO_CAPABLE
) {
6415 err
= tg3_load_tso_firmware(tp
);
6421 tp
->tx_mode
= TX_MODE_ENABLE
;
6422 tw32_f(MAC_TX_MODE
, tp
->tx_mode
);
6425 tp
->rx_mode
= RX_MODE_ENABLE
;
6426 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5755
)
6427 tp
->rx_mode
|= RX_MODE_IPV6_CSUM_ENABLE
;
6429 tw32_f(MAC_RX_MODE
, tp
->rx_mode
);
6432 if (tp
->link_config
.phy_is_low_power
) {
6433 tp
->link_config
.phy_is_low_power
= 0;
6434 tp
->link_config
.speed
= tp
->link_config
.orig_speed
;
6435 tp
->link_config
.duplex
= tp
->link_config
.orig_duplex
;
6436 tp
->link_config
.autoneg
= tp
->link_config
.orig_autoneg
;
6439 tp
->mi_mode
= MAC_MI_MODE_BASE
;
6440 tw32_f(MAC_MI_MODE
, tp
->mi_mode
);
6443 tw32(MAC_LED_CTRL
, tp
->led_ctrl
);
6445 tw32(MAC_MI_STAT
, MAC_MI_STAT_LNKSTAT_ATTN_ENAB
);
6446 if (tp
->tg3_flags2
& TG3_FLG2_PHY_SERDES
) {
6447 tw32_f(MAC_RX_MODE
, RX_MODE_RESET
);
6450 tw32_f(MAC_RX_MODE
, tp
->rx_mode
);
6453 if (tp
->tg3_flags2
& TG3_FLG2_PHY_SERDES
) {
6454 if ((GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
) &&
6455 !(tp
->tg3_flags2
& TG3_FLG2_SERDES_PREEMPHASIS
)) {
6456 /* Set drive transmission level to 1.2V */
6457 /* only if the signal pre-emphasis bit is not set */
6458 val
= tr32(MAC_SERDES_CFG
);
6461 tw32(MAC_SERDES_CFG
, val
);
6463 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5703_A1
)
6464 tw32(MAC_SERDES_CFG
, 0x616000);
6467 /* Prevent chip from dropping frames when flow control
6470 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME
, 2);
6472 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
&&
6473 (tp
->tg3_flags2
& TG3_FLG2_PHY_SERDES
)) {
6474 /* Use hardware link auto-negotiation */
6475 tp
->tg3_flags2
|= TG3_FLG2_HW_AUTONEG
;
6478 if ((tp
->tg3_flags2
& TG3_FLG2_MII_SERDES
) &&
6479 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5714
)) {
6482 tmp
= tr32(SERDES_RX_CTRL
);
6483 tw32(SERDES_RX_CTRL
, tmp
| SERDES_RX_SIG_DETECT
);
6484 tp
->grc_local_ctrl
&= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT
;
6485 tp
->grc_local_ctrl
|= GRC_LCLCTRL_USE_SIG_DETECT
;
6486 tw32(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
);
6489 err
= tg3_setup_phy(tp
, reset_phy
);
6493 if (!(tp
->tg3_flags2
& TG3_FLG2_PHY_SERDES
)) {
6496 /* Clear CRC stats. */
6497 if (!tg3_readphy(tp
, 0x1e, &tmp
)) {
6498 tg3_writephy(tp
, 0x1e, tmp
| 0x8000);
6499 tg3_readphy(tp
, 0x14, &tmp
);
6503 __tg3_set_rx_mode(tp
->dev
);
6505 /* Initialize receive rules. */
6506 tw32(MAC_RCV_RULE_0
, 0xc2000000 & RCV_RULE_DISABLE_MASK
);
6507 tw32(MAC_RCV_VALUE_0
, 0xffffffff & RCV_RULE_DISABLE_MASK
);
6508 tw32(MAC_RCV_RULE_1
, 0x86000004 & RCV_RULE_DISABLE_MASK
);
6509 tw32(MAC_RCV_VALUE_1
, 0xffffffff & RCV_RULE_DISABLE_MASK
);
6511 if ((tp
->tg3_flags2
& TG3_FLG2_5705_PLUS
) &&
6512 !(tp
->tg3_flags2
& TG3_FLG2_5780_CLASS
))
6516 if (tp
->tg3_flags
& TG3_FLAG_ENABLE_ASF
)
6520 tw32(MAC_RCV_RULE_15
, 0); tw32(MAC_RCV_VALUE_15
, 0);
6522 tw32(MAC_RCV_RULE_14
, 0); tw32(MAC_RCV_VALUE_14
, 0);
6524 tw32(MAC_RCV_RULE_13
, 0); tw32(MAC_RCV_VALUE_13
, 0);
6526 tw32(MAC_RCV_RULE_12
, 0); tw32(MAC_RCV_VALUE_12
, 0);
6528 tw32(MAC_RCV_RULE_11
, 0); tw32(MAC_RCV_VALUE_11
, 0);
6530 tw32(MAC_RCV_RULE_10
, 0); tw32(MAC_RCV_VALUE_10
, 0);
6532 tw32(MAC_RCV_RULE_9
, 0); tw32(MAC_RCV_VALUE_9
, 0);
6534 tw32(MAC_RCV_RULE_8
, 0); tw32(MAC_RCV_VALUE_8
, 0);
6536 tw32(MAC_RCV_RULE_7
, 0); tw32(MAC_RCV_VALUE_7
, 0);
6538 tw32(MAC_RCV_RULE_6
, 0); tw32(MAC_RCV_VALUE_6
, 0);
6540 tw32(MAC_RCV_RULE_5
, 0); tw32(MAC_RCV_VALUE_5
, 0);
6542 tw32(MAC_RCV_RULE_4
, 0); tw32(MAC_RCV_VALUE_4
, 0);
6544 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
6546 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
6554 tg3_write_sig_post_reset(tp
, RESET_KIND_INIT
);
6559 /* Called at device open time to get the chip ready for
6560 * packet processing. Invoked with tp->lock held.
6562 static int tg3_init_hw(struct tg3
*tp
, int reset_phy
)
6566 /* Force the chip into D0. */
6567 err
= tg3_set_power_state(tp
, PCI_D0
);
6571 tg3_switch_clocks(tp
);
6573 tw32(TG3PCI_MEM_WIN_BASE_ADDR
, 0);
6575 err
= tg3_reset_hw(tp
, reset_phy
);
6581 #define TG3_STAT_ADD32(PSTAT, REG) \
6582 do { u32 __val = tr32(REG); \
6583 (PSTAT)->low += __val; \
6584 if ((PSTAT)->low < __val) \
6585 (PSTAT)->high += 1; \
6588 static void tg3_periodic_fetch_stats(struct tg3
*tp
)
6590 struct tg3_hw_stats
*sp
= tp
->hw_stats
;
6592 if (!netif_carrier_ok(tp
->dev
))
6595 TG3_STAT_ADD32(&sp
->tx_octets
, MAC_TX_STATS_OCTETS
);
6596 TG3_STAT_ADD32(&sp
->tx_collisions
, MAC_TX_STATS_COLLISIONS
);
6597 TG3_STAT_ADD32(&sp
->tx_xon_sent
, MAC_TX_STATS_XON_SENT
);
6598 TG3_STAT_ADD32(&sp
->tx_xoff_sent
, MAC_TX_STATS_XOFF_SENT
);
6599 TG3_STAT_ADD32(&sp
->tx_mac_errors
, MAC_TX_STATS_MAC_ERRORS
);
6600 TG3_STAT_ADD32(&sp
->tx_single_collisions
, MAC_TX_STATS_SINGLE_COLLISIONS
);
6601 TG3_STAT_ADD32(&sp
->tx_mult_collisions
, MAC_TX_STATS_MULT_COLLISIONS
);
6602 TG3_STAT_ADD32(&sp
->tx_deferred
, MAC_TX_STATS_DEFERRED
);
6603 TG3_STAT_ADD32(&sp
->tx_excessive_collisions
, MAC_TX_STATS_EXCESSIVE_COL
);
6604 TG3_STAT_ADD32(&sp
->tx_late_collisions
, MAC_TX_STATS_LATE_COL
);
6605 TG3_STAT_ADD32(&sp
->tx_ucast_packets
, MAC_TX_STATS_UCAST
);
6606 TG3_STAT_ADD32(&sp
->tx_mcast_packets
, MAC_TX_STATS_MCAST
);
6607 TG3_STAT_ADD32(&sp
->tx_bcast_packets
, MAC_TX_STATS_BCAST
);
6609 TG3_STAT_ADD32(&sp
->rx_octets
, MAC_RX_STATS_OCTETS
);
6610 TG3_STAT_ADD32(&sp
->rx_fragments
, MAC_RX_STATS_FRAGMENTS
);
6611 TG3_STAT_ADD32(&sp
->rx_ucast_packets
, MAC_RX_STATS_UCAST
);
6612 TG3_STAT_ADD32(&sp
->rx_mcast_packets
, MAC_RX_STATS_MCAST
);
6613 TG3_STAT_ADD32(&sp
->rx_bcast_packets
, MAC_RX_STATS_BCAST
);
6614 TG3_STAT_ADD32(&sp
->rx_fcs_errors
, MAC_RX_STATS_FCS_ERRORS
);
6615 TG3_STAT_ADD32(&sp
->rx_align_errors
, MAC_RX_STATS_ALIGN_ERRORS
);
6616 TG3_STAT_ADD32(&sp
->rx_xon_pause_rcvd
, MAC_RX_STATS_XON_PAUSE_RECVD
);
6617 TG3_STAT_ADD32(&sp
->rx_xoff_pause_rcvd
, MAC_RX_STATS_XOFF_PAUSE_RECVD
);
6618 TG3_STAT_ADD32(&sp
->rx_mac_ctrl_rcvd
, MAC_RX_STATS_MAC_CTRL_RECVD
);
6619 TG3_STAT_ADD32(&sp
->rx_xoff_entered
, MAC_RX_STATS_XOFF_ENTERED
);
6620 TG3_STAT_ADD32(&sp
->rx_frame_too_long_errors
, MAC_RX_STATS_FRAME_TOO_LONG
);
6621 TG3_STAT_ADD32(&sp
->rx_jabbers
, MAC_RX_STATS_JABBERS
);
6622 TG3_STAT_ADD32(&sp
->rx_undersize_packets
, MAC_RX_STATS_UNDERSIZE
);
6624 TG3_STAT_ADD32(&sp
->rxbds_empty
, RCVLPC_NO_RCV_BD_CNT
);
6625 TG3_STAT_ADD32(&sp
->rx_discards
, RCVLPC_IN_DISCARDS_CNT
);
6626 TG3_STAT_ADD32(&sp
->rx_errors
, RCVLPC_IN_ERRORS_CNT
);
6629 static void tg3_timer(unsigned long __opaque
)
6631 struct tg3
*tp
= (struct tg3
*) __opaque
;
6636 spin_lock(&tp
->lock
);
6638 if (!(tp
->tg3_flags
& TG3_FLAG_TAGGED_STATUS
)) {
6639 /* All of this garbage is because when using non-tagged
6640 * IRQ status the mailbox/status_block protocol the chip
6641 * uses with the cpu is race prone.
6643 if (tp
->hw_status
->status
& SD_STATUS_UPDATED
) {
6644 tw32(GRC_LOCAL_CTRL
,
6645 tp
->grc_local_ctrl
| GRC_LCLCTRL_SETINT
);
6647 tw32(HOSTCC_MODE
, tp
->coalesce_mode
|
6648 (HOSTCC_MODE_ENABLE
| HOSTCC_MODE_NOW
));
6651 if (!(tr32(WDMAC_MODE
) & WDMAC_MODE_ENABLE
)) {
6652 tp
->tg3_flags2
|= TG3_FLG2_RESTART_TIMER
;
6653 spin_unlock(&tp
->lock
);
6654 schedule_work(&tp
->reset_task
);
6659 /* This part only runs once per second. */
6660 if (!--tp
->timer_counter
) {
6661 if (tp
->tg3_flags2
& TG3_FLG2_5705_PLUS
)
6662 tg3_periodic_fetch_stats(tp
);
6664 if (tp
->tg3_flags
& TG3_FLAG_USE_LINKCHG_REG
) {
6668 mac_stat
= tr32(MAC_STATUS
);
6671 if (tp
->tg3_flags
& TG3_FLAG_USE_MI_INTERRUPT
) {
6672 if (mac_stat
& MAC_STATUS_MI_INTERRUPT
)
6674 } else if (mac_stat
& MAC_STATUS_LNKSTATE_CHANGED
)
6678 tg3_setup_phy(tp
, 0);
6679 } else if (tp
->tg3_flags
& TG3_FLAG_POLL_SERDES
) {
6680 u32 mac_stat
= tr32(MAC_STATUS
);
6683 if (netif_carrier_ok(tp
->dev
) &&
6684 (mac_stat
& MAC_STATUS_LNKSTATE_CHANGED
)) {
6687 if (! netif_carrier_ok(tp
->dev
) &&
6688 (mac_stat
& (MAC_STATUS_PCS_SYNCED
|
6689 MAC_STATUS_SIGNAL_DET
))) {
6693 if (!tp
->serdes_counter
) {
6696 ~MAC_MODE_PORT_MODE_MASK
));
6698 tw32_f(MAC_MODE
, tp
->mac_mode
);
6701 tg3_setup_phy(tp
, 0);
6703 } else if (tp
->tg3_flags2
& TG3_FLG2_MII_SERDES
)
6704 tg3_serdes_parallel_detect(tp
);
6706 tp
->timer_counter
= tp
->timer_multiplier
;
6709 /* Heartbeat is only sent once every 2 seconds.
6711 * The heartbeat is to tell the ASF firmware that the host
6712 * driver is still alive. In the event that the OS crashes,
6713 * ASF needs to reset the hardware to free up the FIFO space
6714 * that may be filled with rx packets destined for the host.
6715 * If the FIFO is full, ASF will no longer function properly.
6717 * Unintended resets have been reported on real time kernels
6718 * where the timer doesn't run on time. Netpoll will also have
6721 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
6722 * to check the ring condition when the heartbeat is expiring
6723 * before doing the reset. This will prevent most unintended
6726 if (!--tp
->asf_counter
) {
6727 if (tp
->tg3_flags
& TG3_FLAG_ENABLE_ASF
) {
6730 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_MBOX
,
6731 FWCMD_NICDRV_ALIVE3
);
6732 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_LEN_MBOX
, 4);
6733 /* 5 seconds timeout */
6734 tg3_write_mem(tp
, NIC_SRAM_FW_CMD_DATA_MBOX
, 5);
6735 val
= tr32(GRC_RX_CPU_EVENT
);
6737 tw32(GRC_RX_CPU_EVENT
, val
);
6739 tp
->asf_counter
= tp
->asf_multiplier
;
6742 spin_unlock(&tp
->lock
);
6745 tp
->timer
.expires
= jiffies
+ tp
->timer_offset
;
6746 add_timer(&tp
->timer
);
6749 static int tg3_request_irq(struct tg3
*tp
)
6751 irqreturn_t (*fn
)(int, void *, struct pt_regs
*);
6752 unsigned long flags
;
6753 struct net_device
*dev
= tp
->dev
;
6755 if (tp
->tg3_flags2
& TG3_FLG2_USING_MSI
) {
6757 if (tp
->tg3_flags2
& TG3_FLG2_1SHOT_MSI
)
6759 flags
= IRQF_SAMPLE_RANDOM
;
6762 if (tp
->tg3_flags
& TG3_FLAG_TAGGED_STATUS
)
6763 fn
= tg3_interrupt_tagged
;
6764 flags
= IRQF_SHARED
| IRQF_SAMPLE_RANDOM
;
6766 return (request_irq(tp
->pdev
->irq
, fn
, flags
, dev
->name
, dev
));
6769 static int tg3_test_interrupt(struct tg3
*tp
)
6771 struct net_device
*dev
= tp
->dev
;
6775 if (!netif_running(dev
))
6778 tg3_disable_ints(tp
);
6780 free_irq(tp
->pdev
->irq
, dev
);
6782 err
= request_irq(tp
->pdev
->irq
, tg3_test_isr
,
6783 IRQF_SHARED
| IRQF_SAMPLE_RANDOM
, dev
->name
, dev
);
6787 tp
->hw_status
->status
&= ~SD_STATUS_UPDATED
;
6788 tg3_enable_ints(tp
);
6790 tw32_f(HOSTCC_MODE
, tp
->coalesce_mode
| HOSTCC_MODE_ENABLE
|
6793 for (i
= 0; i
< 5; i
++) {
6794 int_mbox
= tr32_mailbox(MAILBOX_INTERRUPT_0
+
6801 tg3_disable_ints(tp
);
6803 free_irq(tp
->pdev
->irq
, dev
);
6805 err
= tg3_request_irq(tp
);
6816 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
6817 * successfully restored
6819 static int tg3_test_msi(struct tg3
*tp
)
6821 struct net_device
*dev
= tp
->dev
;
6825 if (!(tp
->tg3_flags2
& TG3_FLG2_USING_MSI
))
6828 /* Turn off SERR reporting in case MSI terminates with Master
6831 pci_read_config_word(tp
->pdev
, PCI_COMMAND
, &pci_cmd
);
6832 pci_write_config_word(tp
->pdev
, PCI_COMMAND
,
6833 pci_cmd
& ~PCI_COMMAND_SERR
);
6835 err
= tg3_test_interrupt(tp
);
6837 pci_write_config_word(tp
->pdev
, PCI_COMMAND
, pci_cmd
);
6842 /* other failures */
6846 /* MSI test failed, go back to INTx mode */
6847 printk(KERN_WARNING PFX
"%s: No interrupt was generated using MSI, "
6848 "switching to INTx mode. Please report this failure to "
6849 "the PCI maintainer and include system chipset information.\n",
6852 free_irq(tp
->pdev
->irq
, dev
);
6853 pci_disable_msi(tp
->pdev
);
6855 tp
->tg3_flags2
&= ~TG3_FLG2_USING_MSI
;
6857 err
= tg3_request_irq(tp
);
6861 /* Need to reset the chip because the MSI cycle may have terminated
6862 * with Master Abort.
6864 tg3_full_lock(tp
, 1);
6866 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
6867 err
= tg3_init_hw(tp
, 1);
6869 tg3_full_unlock(tp
);
6872 free_irq(tp
->pdev
->irq
, dev
);
6877 static int tg3_open(struct net_device
*dev
)
6879 struct tg3
*tp
= netdev_priv(dev
);
6882 tg3_full_lock(tp
, 0);
6884 err
= tg3_set_power_state(tp
, PCI_D0
);
6888 tg3_disable_ints(tp
);
6889 tp
->tg3_flags
&= ~TG3_FLAG_INIT_COMPLETE
;
6891 tg3_full_unlock(tp
);
6893 /* The placement of this call is tied
6894 * to the setup and use of Host TX descriptors.
6896 err
= tg3_alloc_consistent(tp
);
6900 if ((tp
->tg3_flags2
& TG3_FLG2_5750_PLUS
) &&
6901 (GET_CHIP_REV(tp
->pci_chip_rev_id
) != CHIPREV_5750_AX
) &&
6902 (GET_CHIP_REV(tp
->pci_chip_rev_id
) != CHIPREV_5750_BX
) &&
6903 !((GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5714
) &&
6904 (tp
->pdev_peer
== tp
->pdev
))) {
6905 /* All MSI supporting chips should support tagged
6906 * status. Assert that this is the case.
6908 if (!(tp
->tg3_flags
& TG3_FLAG_TAGGED_STATUS
)) {
6909 printk(KERN_WARNING PFX
"%s: MSI without TAGGED? "
6910 "Not using MSI.\n", tp
->dev
->name
);
6911 } else if (pci_enable_msi(tp
->pdev
) == 0) {
6914 msi_mode
= tr32(MSGINT_MODE
);
6915 tw32(MSGINT_MODE
, msi_mode
| MSGINT_MODE_ENABLE
);
6916 tp
->tg3_flags2
|= TG3_FLG2_USING_MSI
;
6919 err
= tg3_request_irq(tp
);
6922 if (tp
->tg3_flags2
& TG3_FLG2_USING_MSI
) {
6923 pci_disable_msi(tp
->pdev
);
6924 tp
->tg3_flags2
&= ~TG3_FLG2_USING_MSI
;
6926 tg3_free_consistent(tp
);
6930 tg3_full_lock(tp
, 0);
6932 err
= tg3_init_hw(tp
, 1);
6934 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
6937 if (tp
->tg3_flags
& TG3_FLAG_TAGGED_STATUS
)
6938 tp
->timer_offset
= HZ
;
6940 tp
->timer_offset
= HZ
/ 10;
6942 BUG_ON(tp
->timer_offset
> HZ
);
6943 tp
->timer_counter
= tp
->timer_multiplier
=
6944 (HZ
/ tp
->timer_offset
);
6945 tp
->asf_counter
= tp
->asf_multiplier
=
6946 ((HZ
/ tp
->timer_offset
) * 2);
6948 init_timer(&tp
->timer
);
6949 tp
->timer
.expires
= jiffies
+ tp
->timer_offset
;
6950 tp
->timer
.data
= (unsigned long) tp
;
6951 tp
->timer
.function
= tg3_timer
;
6954 tg3_full_unlock(tp
);
6957 free_irq(tp
->pdev
->irq
, dev
);
6958 if (tp
->tg3_flags2
& TG3_FLG2_USING_MSI
) {
6959 pci_disable_msi(tp
->pdev
);
6960 tp
->tg3_flags2
&= ~TG3_FLG2_USING_MSI
;
6962 tg3_free_consistent(tp
);
6966 if (tp
->tg3_flags2
& TG3_FLG2_USING_MSI
) {
6967 err
= tg3_test_msi(tp
);
6970 tg3_full_lock(tp
, 0);
6972 if (tp
->tg3_flags2
& TG3_FLG2_USING_MSI
) {
6973 pci_disable_msi(tp
->pdev
);
6974 tp
->tg3_flags2
&= ~TG3_FLG2_USING_MSI
;
6976 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
6978 tg3_free_consistent(tp
);
6980 tg3_full_unlock(tp
);
6985 if (tp
->tg3_flags2
& TG3_FLG2_USING_MSI
) {
6986 if (tp
->tg3_flags2
& TG3_FLG2_1SHOT_MSI
) {
6987 u32 val
= tr32(0x7c04);
6989 tw32(0x7c04, val
| (1 << 29));
6994 tg3_full_lock(tp
, 0);
6996 add_timer(&tp
->timer
);
6997 tp
->tg3_flags
|= TG3_FLAG_INIT_COMPLETE
;
6998 tg3_enable_ints(tp
);
7000 tg3_full_unlock(tp
);
7002 netif_start_queue(dev
);
7008 /*static*/ void tg3_dump_state(struct tg3
*tp
)
7010 u32 val32
, val32_2
, val32_3
, val32_4
, val32_5
;
7014 pci_read_config_word(tp
->pdev
, PCI_STATUS
, &val16
);
7015 pci_read_config_dword(tp
->pdev
, TG3PCI_PCISTATE
, &val32
);
7016 printk("DEBUG: PCI status [%04x] TG3PCI state[%08x]\n",
7020 printk("DEBUG: MAC_MODE[%08x] MAC_STATUS[%08x]\n",
7021 tr32(MAC_MODE
), tr32(MAC_STATUS
));
7022 printk(" MAC_EVENT[%08x] MAC_LED_CTRL[%08x]\n",
7023 tr32(MAC_EVENT
), tr32(MAC_LED_CTRL
));
7024 printk("DEBUG: MAC_TX_MODE[%08x] MAC_TX_STATUS[%08x]\n",
7025 tr32(MAC_TX_MODE
), tr32(MAC_TX_STATUS
));
7026 printk(" MAC_RX_MODE[%08x] MAC_RX_STATUS[%08x]\n",
7027 tr32(MAC_RX_MODE
), tr32(MAC_RX_STATUS
));
7029 /* Send data initiator control block */
7030 printk("DEBUG: SNDDATAI_MODE[%08x] SNDDATAI_STATUS[%08x]\n",
7031 tr32(SNDDATAI_MODE
), tr32(SNDDATAI_STATUS
));
7032 printk(" SNDDATAI_STATSCTRL[%08x]\n",
7033 tr32(SNDDATAI_STATSCTRL
));
7035 /* Send data completion control block */
7036 printk("DEBUG: SNDDATAC_MODE[%08x]\n", tr32(SNDDATAC_MODE
));
7038 /* Send BD ring selector block */
7039 printk("DEBUG: SNDBDS_MODE[%08x] SNDBDS_STATUS[%08x]\n",
7040 tr32(SNDBDS_MODE
), tr32(SNDBDS_STATUS
));
7042 /* Send BD initiator control block */
7043 printk("DEBUG: SNDBDI_MODE[%08x] SNDBDI_STATUS[%08x]\n",
7044 tr32(SNDBDI_MODE
), tr32(SNDBDI_STATUS
));
7046 /* Send BD completion control block */
7047 printk("DEBUG: SNDBDC_MODE[%08x]\n", tr32(SNDBDC_MODE
));
7049 /* Receive list placement control block */
7050 printk("DEBUG: RCVLPC_MODE[%08x] RCVLPC_STATUS[%08x]\n",
7051 tr32(RCVLPC_MODE
), tr32(RCVLPC_STATUS
));
7052 printk(" RCVLPC_STATSCTRL[%08x]\n",
7053 tr32(RCVLPC_STATSCTRL
));
7055 /* Receive data and receive BD initiator control block */
7056 printk("DEBUG: RCVDBDI_MODE[%08x] RCVDBDI_STATUS[%08x]\n",
7057 tr32(RCVDBDI_MODE
), tr32(RCVDBDI_STATUS
));
7059 /* Receive data completion control block */
7060 printk("DEBUG: RCVDCC_MODE[%08x]\n",
7063 /* Receive BD initiator control block */
7064 printk("DEBUG: RCVBDI_MODE[%08x] RCVBDI_STATUS[%08x]\n",
7065 tr32(RCVBDI_MODE
), tr32(RCVBDI_STATUS
));
7067 /* Receive BD completion control block */
7068 printk("DEBUG: RCVCC_MODE[%08x] RCVCC_STATUS[%08x]\n",
7069 tr32(RCVCC_MODE
), tr32(RCVCC_STATUS
));
7071 /* Receive list selector control block */
7072 printk("DEBUG: RCVLSC_MODE[%08x] RCVLSC_STATUS[%08x]\n",
7073 tr32(RCVLSC_MODE
), tr32(RCVLSC_STATUS
));
7075 /* Mbuf cluster free block */
7076 printk("DEBUG: MBFREE_MODE[%08x] MBFREE_STATUS[%08x]\n",
7077 tr32(MBFREE_MODE
), tr32(MBFREE_STATUS
));
7079 /* Host coalescing control block */
7080 printk("DEBUG: HOSTCC_MODE[%08x] HOSTCC_STATUS[%08x]\n",
7081 tr32(HOSTCC_MODE
), tr32(HOSTCC_STATUS
));
7082 printk("DEBUG: HOSTCC_STATS_BLK_HOST_ADDR[%08x%08x]\n",
7083 tr32(HOSTCC_STATS_BLK_HOST_ADDR
+ TG3_64BIT_REG_HIGH
),
7084 tr32(HOSTCC_STATS_BLK_HOST_ADDR
+ TG3_64BIT_REG_LOW
));
7085 printk("DEBUG: HOSTCC_STATUS_BLK_HOST_ADDR[%08x%08x]\n",
7086 tr32(HOSTCC_STATUS_BLK_HOST_ADDR
+ TG3_64BIT_REG_HIGH
),
7087 tr32(HOSTCC_STATUS_BLK_HOST_ADDR
+ TG3_64BIT_REG_LOW
));
7088 printk("DEBUG: HOSTCC_STATS_BLK_NIC_ADDR[%08x]\n",
7089 tr32(HOSTCC_STATS_BLK_NIC_ADDR
));
7090 printk("DEBUG: HOSTCC_STATUS_BLK_NIC_ADDR[%08x]\n",
7091 tr32(HOSTCC_STATUS_BLK_NIC_ADDR
));
7093 /* Memory arbiter control block */
7094 printk("DEBUG: MEMARB_MODE[%08x] MEMARB_STATUS[%08x]\n",
7095 tr32(MEMARB_MODE
), tr32(MEMARB_STATUS
));
7097 /* Buffer manager control block */
7098 printk("DEBUG: BUFMGR_MODE[%08x] BUFMGR_STATUS[%08x]\n",
7099 tr32(BUFMGR_MODE
), tr32(BUFMGR_STATUS
));
7100 printk("DEBUG: BUFMGR_MB_POOL_ADDR[%08x] BUFMGR_MB_POOL_SIZE[%08x]\n",
7101 tr32(BUFMGR_MB_POOL_ADDR
), tr32(BUFMGR_MB_POOL_SIZE
));
7102 printk("DEBUG: BUFMGR_DMA_DESC_POOL_ADDR[%08x] "
7103 "BUFMGR_DMA_DESC_POOL_SIZE[%08x]\n",
7104 tr32(BUFMGR_DMA_DESC_POOL_ADDR
),
7105 tr32(BUFMGR_DMA_DESC_POOL_SIZE
));
7107 /* Read DMA control block */
7108 printk("DEBUG: RDMAC_MODE[%08x] RDMAC_STATUS[%08x]\n",
7109 tr32(RDMAC_MODE
), tr32(RDMAC_STATUS
));
7111 /* Write DMA control block */
7112 printk("DEBUG: WDMAC_MODE[%08x] WDMAC_STATUS[%08x]\n",
7113 tr32(WDMAC_MODE
), tr32(WDMAC_STATUS
));
7115 /* DMA completion block */
7116 printk("DEBUG: DMAC_MODE[%08x]\n",
7120 printk("DEBUG: GRC_MODE[%08x] GRC_MISC_CFG[%08x]\n",
7121 tr32(GRC_MODE
), tr32(GRC_MISC_CFG
));
7122 printk("DEBUG: GRC_LOCAL_CTRL[%08x]\n",
7123 tr32(GRC_LOCAL_CTRL
));
7126 printk("DEBUG: RCVDBDI_JUMBO_BD[%08x%08x:%08x:%08x]\n",
7127 tr32(RCVDBDI_JUMBO_BD
+ 0x0),
7128 tr32(RCVDBDI_JUMBO_BD
+ 0x4),
7129 tr32(RCVDBDI_JUMBO_BD
+ 0x8),
7130 tr32(RCVDBDI_JUMBO_BD
+ 0xc));
7131 printk("DEBUG: RCVDBDI_STD_BD[%08x%08x:%08x:%08x]\n",
7132 tr32(RCVDBDI_STD_BD
+ 0x0),
7133 tr32(RCVDBDI_STD_BD
+ 0x4),
7134 tr32(RCVDBDI_STD_BD
+ 0x8),
7135 tr32(RCVDBDI_STD_BD
+ 0xc));
7136 printk("DEBUG: RCVDBDI_MINI_BD[%08x%08x:%08x:%08x]\n",
7137 tr32(RCVDBDI_MINI_BD
+ 0x0),
7138 tr32(RCVDBDI_MINI_BD
+ 0x4),
7139 tr32(RCVDBDI_MINI_BD
+ 0x8),
7140 tr32(RCVDBDI_MINI_BD
+ 0xc));
7142 tg3_read_mem(tp
, NIC_SRAM_SEND_RCB
+ 0x0, &val32
);
7143 tg3_read_mem(tp
, NIC_SRAM_SEND_RCB
+ 0x4, &val32_2
);
7144 tg3_read_mem(tp
, NIC_SRAM_SEND_RCB
+ 0x8, &val32_3
);
7145 tg3_read_mem(tp
, NIC_SRAM_SEND_RCB
+ 0xc, &val32_4
);
7146 printk("DEBUG: SRAM_SEND_RCB_0[%08x%08x:%08x:%08x]\n",
7147 val32
, val32_2
, val32_3
, val32_4
);
7149 tg3_read_mem(tp
, NIC_SRAM_RCV_RET_RCB
+ 0x0, &val32
);
7150 tg3_read_mem(tp
, NIC_SRAM_RCV_RET_RCB
+ 0x4, &val32_2
);
7151 tg3_read_mem(tp
, NIC_SRAM_RCV_RET_RCB
+ 0x8, &val32_3
);
7152 tg3_read_mem(tp
, NIC_SRAM_RCV_RET_RCB
+ 0xc, &val32_4
);
7153 printk("DEBUG: SRAM_RCV_RET_RCB_0[%08x%08x:%08x:%08x]\n",
7154 val32
, val32_2
, val32_3
, val32_4
);
7156 tg3_read_mem(tp
, NIC_SRAM_STATUS_BLK
+ 0x0, &val32
);
7157 tg3_read_mem(tp
, NIC_SRAM_STATUS_BLK
+ 0x4, &val32_2
);
7158 tg3_read_mem(tp
, NIC_SRAM_STATUS_BLK
+ 0x8, &val32_3
);
7159 tg3_read_mem(tp
, NIC_SRAM_STATUS_BLK
+ 0xc, &val32_4
);
7160 tg3_read_mem(tp
, NIC_SRAM_STATUS_BLK
+ 0x10, &val32_5
);
7161 printk("DEBUG: SRAM_STATUS_BLK[%08x:%08x:%08x:%08x:%08x]\n",
7162 val32
, val32_2
, val32_3
, val32_4
, val32_5
);
7164 /* SW status block */
7165 printk("DEBUG: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
7166 tp
->hw_status
->status
,
7167 tp
->hw_status
->status_tag
,
7168 tp
->hw_status
->rx_jumbo_consumer
,
7169 tp
->hw_status
->rx_consumer
,
7170 tp
->hw_status
->rx_mini_consumer
,
7171 tp
->hw_status
->idx
[0].rx_producer
,
7172 tp
->hw_status
->idx
[0].tx_consumer
);
7174 /* SW statistics block */
7175 printk("DEBUG: Host statistics block [%08x:%08x:%08x:%08x]\n",
7176 ((u32
*)tp
->hw_stats
)[0],
7177 ((u32
*)tp
->hw_stats
)[1],
7178 ((u32
*)tp
->hw_stats
)[2],
7179 ((u32
*)tp
->hw_stats
)[3]);
7182 printk("DEBUG: SNDHOST_PROD[%08x%08x] SNDNIC_PROD[%08x%08x]\n",
7183 tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0
+ 0x0),
7184 tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0
+ 0x4),
7185 tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0
+ 0x0),
7186 tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0
+ 0x4));
7188 /* NIC side send descriptors. */
7189 for (i
= 0; i
< 6; i
++) {
7192 txd
= tp
->regs
+ NIC_SRAM_WIN_BASE
+ NIC_SRAM_TX_BUFFER_DESC
7193 + (i
* sizeof(struct tg3_tx_buffer_desc
));
7194 printk("DEBUG: NIC TXD(%d)[%08x:%08x:%08x:%08x]\n",
7196 readl(txd
+ 0x0), readl(txd
+ 0x4),
7197 readl(txd
+ 0x8), readl(txd
+ 0xc));
7200 /* NIC side RX descriptors. */
7201 for (i
= 0; i
< 6; i
++) {
7204 rxd
= tp
->regs
+ NIC_SRAM_WIN_BASE
+ NIC_SRAM_RX_BUFFER_DESC
7205 + (i
* sizeof(struct tg3_rx_buffer_desc
));
7206 printk("DEBUG: NIC RXD_STD(%d)[0][%08x:%08x:%08x:%08x]\n",
7208 readl(rxd
+ 0x0), readl(rxd
+ 0x4),
7209 readl(rxd
+ 0x8), readl(rxd
+ 0xc));
7210 rxd
+= (4 * sizeof(u32
));
7211 printk("DEBUG: NIC RXD_STD(%d)[1][%08x:%08x:%08x:%08x]\n",
7213 readl(rxd
+ 0x0), readl(rxd
+ 0x4),
7214 readl(rxd
+ 0x8), readl(rxd
+ 0xc));
7217 for (i
= 0; i
< 6; i
++) {
7220 rxd
= tp
->regs
+ NIC_SRAM_WIN_BASE
+ NIC_SRAM_RX_JUMBO_BUFFER_DESC
7221 + (i
* sizeof(struct tg3_rx_buffer_desc
));
7222 printk("DEBUG: NIC RXD_JUMBO(%d)[0][%08x:%08x:%08x:%08x]\n",
7224 readl(rxd
+ 0x0), readl(rxd
+ 0x4),
7225 readl(rxd
+ 0x8), readl(rxd
+ 0xc));
7226 rxd
+= (4 * sizeof(u32
));
7227 printk("DEBUG: NIC RXD_JUMBO(%d)[1][%08x:%08x:%08x:%08x]\n",
7229 readl(rxd
+ 0x0), readl(rxd
+ 0x4),
7230 readl(rxd
+ 0x8), readl(rxd
+ 0xc));
7235 static struct net_device_stats
*tg3_get_stats(struct net_device
*);
7236 static struct tg3_ethtool_stats
*tg3_get_estats(struct tg3
*);
7238 static int tg3_close(struct net_device
*dev
)
7240 struct tg3
*tp
= netdev_priv(dev
);
7242 /* Calling flush_scheduled_work() may deadlock because
7243 * linkwatch_event() may be on the workqueue and it will try to get
7244 * the rtnl_lock which we are holding.
7246 while (tp
->tg3_flags
& TG3_FLAG_IN_RESET_TASK
)
7249 netif_stop_queue(dev
);
7251 del_timer_sync(&tp
->timer
);
7253 tg3_full_lock(tp
, 1);
7258 tg3_disable_ints(tp
);
7260 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
7263 ~(TG3_FLAG_INIT_COMPLETE
|
7264 TG3_FLAG_GOT_SERDES_FLOWCTL
);
7266 tg3_full_unlock(tp
);
7268 free_irq(tp
->pdev
->irq
, dev
);
7269 if (tp
->tg3_flags2
& TG3_FLG2_USING_MSI
) {
7270 pci_disable_msi(tp
->pdev
);
7271 tp
->tg3_flags2
&= ~TG3_FLG2_USING_MSI
;
7274 memcpy(&tp
->net_stats_prev
, tg3_get_stats(tp
->dev
),
7275 sizeof(tp
->net_stats_prev
));
7276 memcpy(&tp
->estats_prev
, tg3_get_estats(tp
),
7277 sizeof(tp
->estats_prev
));
7279 tg3_free_consistent(tp
);
7281 tg3_set_power_state(tp
, PCI_D3hot
);
7283 netif_carrier_off(tp
->dev
);
7288 static inline unsigned long get_stat64(tg3_stat64_t
*val
)
7292 #if (BITS_PER_LONG == 32)
7295 ret
= ((u64
)val
->high
<< 32) | ((u64
)val
->low
);
7300 static unsigned long calc_crc_errors(struct tg3
*tp
)
7302 struct tg3_hw_stats
*hw_stats
= tp
->hw_stats
;
7304 if (!(tp
->tg3_flags2
& TG3_FLG2_PHY_SERDES
) &&
7305 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
7306 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
)) {
7309 spin_lock_bh(&tp
->lock
);
7310 if (!tg3_readphy(tp
, 0x1e, &val
)) {
7311 tg3_writephy(tp
, 0x1e, val
| 0x8000);
7312 tg3_readphy(tp
, 0x14, &val
);
7315 spin_unlock_bh(&tp
->lock
);
7317 tp
->phy_crc_errors
+= val
;
7319 return tp
->phy_crc_errors
;
7322 return get_stat64(&hw_stats
->rx_fcs_errors
);
7325 #define ESTAT_ADD(member) \
7326 estats->member = old_estats->member + \
7327 get_stat64(&hw_stats->member)
7329 static struct tg3_ethtool_stats
*tg3_get_estats(struct tg3
*tp
)
7331 struct tg3_ethtool_stats
*estats
= &tp
->estats
;
7332 struct tg3_ethtool_stats
*old_estats
= &tp
->estats_prev
;
7333 struct tg3_hw_stats
*hw_stats
= tp
->hw_stats
;
7338 ESTAT_ADD(rx_octets
);
7339 ESTAT_ADD(rx_fragments
);
7340 ESTAT_ADD(rx_ucast_packets
);
7341 ESTAT_ADD(rx_mcast_packets
);
7342 ESTAT_ADD(rx_bcast_packets
);
7343 ESTAT_ADD(rx_fcs_errors
);
7344 ESTAT_ADD(rx_align_errors
);
7345 ESTAT_ADD(rx_xon_pause_rcvd
);
7346 ESTAT_ADD(rx_xoff_pause_rcvd
);
7347 ESTAT_ADD(rx_mac_ctrl_rcvd
);
7348 ESTAT_ADD(rx_xoff_entered
);
7349 ESTAT_ADD(rx_frame_too_long_errors
);
7350 ESTAT_ADD(rx_jabbers
);
7351 ESTAT_ADD(rx_undersize_packets
);
7352 ESTAT_ADD(rx_in_length_errors
);
7353 ESTAT_ADD(rx_out_length_errors
);
7354 ESTAT_ADD(rx_64_or_less_octet_packets
);
7355 ESTAT_ADD(rx_65_to_127_octet_packets
);
7356 ESTAT_ADD(rx_128_to_255_octet_packets
);
7357 ESTAT_ADD(rx_256_to_511_octet_packets
);
7358 ESTAT_ADD(rx_512_to_1023_octet_packets
);
7359 ESTAT_ADD(rx_1024_to_1522_octet_packets
);
7360 ESTAT_ADD(rx_1523_to_2047_octet_packets
);
7361 ESTAT_ADD(rx_2048_to_4095_octet_packets
);
7362 ESTAT_ADD(rx_4096_to_8191_octet_packets
);
7363 ESTAT_ADD(rx_8192_to_9022_octet_packets
);
7365 ESTAT_ADD(tx_octets
);
7366 ESTAT_ADD(tx_collisions
);
7367 ESTAT_ADD(tx_xon_sent
);
7368 ESTAT_ADD(tx_xoff_sent
);
7369 ESTAT_ADD(tx_flow_control
);
7370 ESTAT_ADD(tx_mac_errors
);
7371 ESTAT_ADD(tx_single_collisions
);
7372 ESTAT_ADD(tx_mult_collisions
);
7373 ESTAT_ADD(tx_deferred
);
7374 ESTAT_ADD(tx_excessive_collisions
);
7375 ESTAT_ADD(tx_late_collisions
);
7376 ESTAT_ADD(tx_collide_2times
);
7377 ESTAT_ADD(tx_collide_3times
);
7378 ESTAT_ADD(tx_collide_4times
);
7379 ESTAT_ADD(tx_collide_5times
);
7380 ESTAT_ADD(tx_collide_6times
);
7381 ESTAT_ADD(tx_collide_7times
);
7382 ESTAT_ADD(tx_collide_8times
);
7383 ESTAT_ADD(tx_collide_9times
);
7384 ESTAT_ADD(tx_collide_10times
);
7385 ESTAT_ADD(tx_collide_11times
);
7386 ESTAT_ADD(tx_collide_12times
);
7387 ESTAT_ADD(tx_collide_13times
);
7388 ESTAT_ADD(tx_collide_14times
);
7389 ESTAT_ADD(tx_collide_15times
);
7390 ESTAT_ADD(tx_ucast_packets
);
7391 ESTAT_ADD(tx_mcast_packets
);
7392 ESTAT_ADD(tx_bcast_packets
);
7393 ESTAT_ADD(tx_carrier_sense_errors
);
7394 ESTAT_ADD(tx_discards
);
7395 ESTAT_ADD(tx_errors
);
7397 ESTAT_ADD(dma_writeq_full
);
7398 ESTAT_ADD(dma_write_prioq_full
);
7399 ESTAT_ADD(rxbds_empty
);
7400 ESTAT_ADD(rx_discards
);
7401 ESTAT_ADD(rx_errors
);
7402 ESTAT_ADD(rx_threshold_hit
);
7404 ESTAT_ADD(dma_readq_full
);
7405 ESTAT_ADD(dma_read_prioq_full
);
7406 ESTAT_ADD(tx_comp_queue_full
);
7408 ESTAT_ADD(ring_set_send_prod_index
);
7409 ESTAT_ADD(ring_status_update
);
7410 ESTAT_ADD(nic_irqs
);
7411 ESTAT_ADD(nic_avoided_irqs
);
7412 ESTAT_ADD(nic_tx_threshold_hit
);
7417 static struct net_device_stats
*tg3_get_stats(struct net_device
*dev
)
7419 struct tg3
*tp
= netdev_priv(dev
);
7420 struct net_device_stats
*stats
= &tp
->net_stats
;
7421 struct net_device_stats
*old_stats
= &tp
->net_stats_prev
;
7422 struct tg3_hw_stats
*hw_stats
= tp
->hw_stats
;
7427 stats
->rx_packets
= old_stats
->rx_packets
+
7428 get_stat64(&hw_stats
->rx_ucast_packets
) +
7429 get_stat64(&hw_stats
->rx_mcast_packets
) +
7430 get_stat64(&hw_stats
->rx_bcast_packets
);
7432 stats
->tx_packets
= old_stats
->tx_packets
+
7433 get_stat64(&hw_stats
->tx_ucast_packets
) +
7434 get_stat64(&hw_stats
->tx_mcast_packets
) +
7435 get_stat64(&hw_stats
->tx_bcast_packets
);
7437 stats
->rx_bytes
= old_stats
->rx_bytes
+
7438 get_stat64(&hw_stats
->rx_octets
);
7439 stats
->tx_bytes
= old_stats
->tx_bytes
+
7440 get_stat64(&hw_stats
->tx_octets
);
7442 stats
->rx_errors
= old_stats
->rx_errors
+
7443 get_stat64(&hw_stats
->rx_errors
);
7444 stats
->tx_errors
= old_stats
->tx_errors
+
7445 get_stat64(&hw_stats
->tx_errors
) +
7446 get_stat64(&hw_stats
->tx_mac_errors
) +
7447 get_stat64(&hw_stats
->tx_carrier_sense_errors
) +
7448 get_stat64(&hw_stats
->tx_discards
);
7450 stats
->multicast
= old_stats
->multicast
+
7451 get_stat64(&hw_stats
->rx_mcast_packets
);
7452 stats
->collisions
= old_stats
->collisions
+
7453 get_stat64(&hw_stats
->tx_collisions
);
7455 stats
->rx_length_errors
= old_stats
->rx_length_errors
+
7456 get_stat64(&hw_stats
->rx_frame_too_long_errors
) +
7457 get_stat64(&hw_stats
->rx_undersize_packets
);
7459 stats
->rx_over_errors
= old_stats
->rx_over_errors
+
7460 get_stat64(&hw_stats
->rxbds_empty
);
7461 stats
->rx_frame_errors
= old_stats
->rx_frame_errors
+
7462 get_stat64(&hw_stats
->rx_align_errors
);
7463 stats
->tx_aborted_errors
= old_stats
->tx_aborted_errors
+
7464 get_stat64(&hw_stats
->tx_discards
);
7465 stats
->tx_carrier_errors
= old_stats
->tx_carrier_errors
+
7466 get_stat64(&hw_stats
->tx_carrier_sense_errors
);
7468 stats
->rx_crc_errors
= old_stats
->rx_crc_errors
+
7469 calc_crc_errors(tp
);
7471 stats
->rx_missed_errors
= old_stats
->rx_missed_errors
+
7472 get_stat64(&hw_stats
->rx_discards
);
7477 static inline u32
calc_crc(unsigned char *buf
, int len
)
7485 for (j
= 0; j
< len
; j
++) {
7488 for (k
= 0; k
< 8; k
++) {
7502 static void tg3_set_multi(struct tg3
*tp
, unsigned int accept_all
)
7504 /* accept or reject all multicast frames */
7505 tw32(MAC_HASH_REG_0
, accept_all
? 0xffffffff : 0);
7506 tw32(MAC_HASH_REG_1
, accept_all
? 0xffffffff : 0);
7507 tw32(MAC_HASH_REG_2
, accept_all
? 0xffffffff : 0);
7508 tw32(MAC_HASH_REG_3
, accept_all
? 0xffffffff : 0);
7511 static void __tg3_set_rx_mode(struct net_device
*dev
)
7513 struct tg3
*tp
= netdev_priv(dev
);
7516 rx_mode
= tp
->rx_mode
& ~(RX_MODE_PROMISC
|
7517 RX_MODE_KEEP_VLAN_TAG
);
7519 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
7522 #if TG3_VLAN_TAG_USED
7524 !(tp
->tg3_flags
& TG3_FLAG_ENABLE_ASF
))
7525 rx_mode
|= RX_MODE_KEEP_VLAN_TAG
;
7527 /* By definition, VLAN is disabled always in this
7530 if (!(tp
->tg3_flags
& TG3_FLAG_ENABLE_ASF
))
7531 rx_mode
|= RX_MODE_KEEP_VLAN_TAG
;
7534 if (dev
->flags
& IFF_PROMISC
) {
7535 /* Promiscuous mode. */
7536 rx_mode
|= RX_MODE_PROMISC
;
7537 } else if (dev
->flags
& IFF_ALLMULTI
) {
7538 /* Accept all multicast. */
7539 tg3_set_multi (tp
, 1);
7540 } else if (dev
->mc_count
< 1) {
7541 /* Reject all multicast. */
7542 tg3_set_multi (tp
, 0);
7544 /* Accept one or more multicast(s). */
7545 struct dev_mc_list
*mclist
;
7547 u32 mc_filter
[4] = { 0, };
7552 for (i
= 0, mclist
= dev
->mc_list
; mclist
&& i
< dev
->mc_count
;
7553 i
++, mclist
= mclist
->next
) {
7555 crc
= calc_crc (mclist
->dmi_addr
, ETH_ALEN
);
7557 regidx
= (bit
& 0x60) >> 5;
7559 mc_filter
[regidx
] |= (1 << bit
);
7562 tw32(MAC_HASH_REG_0
, mc_filter
[0]);
7563 tw32(MAC_HASH_REG_1
, mc_filter
[1]);
7564 tw32(MAC_HASH_REG_2
, mc_filter
[2]);
7565 tw32(MAC_HASH_REG_3
, mc_filter
[3]);
7568 if (rx_mode
!= tp
->rx_mode
) {
7569 tp
->rx_mode
= rx_mode
;
7570 tw32_f(MAC_RX_MODE
, rx_mode
);
7575 static void tg3_set_rx_mode(struct net_device
*dev
)
7577 struct tg3
*tp
= netdev_priv(dev
);
7579 if (!netif_running(dev
))
7582 tg3_full_lock(tp
, 0);
7583 __tg3_set_rx_mode(dev
);
7584 tg3_full_unlock(tp
);
7587 #define TG3_REGDUMP_LEN (32 * 1024)
7589 static int tg3_get_regs_len(struct net_device
*dev
)
7591 return TG3_REGDUMP_LEN
;
7594 static void tg3_get_regs(struct net_device
*dev
,
7595 struct ethtool_regs
*regs
, void *_p
)
7598 struct tg3
*tp
= netdev_priv(dev
);
7604 memset(p
, 0, TG3_REGDUMP_LEN
);
7606 if (tp
->link_config
.phy_is_low_power
)
7609 tg3_full_lock(tp
, 0);
7611 #define __GET_REG32(reg) (*(p)++ = tr32(reg))
7612 #define GET_REG32_LOOP(base,len) \
7613 do { p = (u32 *)(orig_p + (base)); \
7614 for (i = 0; i < len; i += 4) \
7615 __GET_REG32((base) + i); \
7617 #define GET_REG32_1(reg) \
7618 do { p = (u32 *)(orig_p + (reg)); \
7619 __GET_REG32((reg)); \
7622 GET_REG32_LOOP(TG3PCI_VENDOR
, 0xb0);
7623 GET_REG32_LOOP(MAILBOX_INTERRUPT_0
, 0x200);
7624 GET_REG32_LOOP(MAC_MODE
, 0x4f0);
7625 GET_REG32_LOOP(SNDDATAI_MODE
, 0xe0);
7626 GET_REG32_1(SNDDATAC_MODE
);
7627 GET_REG32_LOOP(SNDBDS_MODE
, 0x80);
7628 GET_REG32_LOOP(SNDBDI_MODE
, 0x48);
7629 GET_REG32_1(SNDBDC_MODE
);
7630 GET_REG32_LOOP(RCVLPC_MODE
, 0x20);
7631 GET_REG32_LOOP(RCVLPC_SELLST_BASE
, 0x15c);
7632 GET_REG32_LOOP(RCVDBDI_MODE
, 0x0c);
7633 GET_REG32_LOOP(RCVDBDI_JUMBO_BD
, 0x3c);
7634 GET_REG32_LOOP(RCVDBDI_BD_PROD_IDX_0
, 0x44);
7635 GET_REG32_1(RCVDCC_MODE
);
7636 GET_REG32_LOOP(RCVBDI_MODE
, 0x20);
7637 GET_REG32_LOOP(RCVCC_MODE
, 0x14);
7638 GET_REG32_LOOP(RCVLSC_MODE
, 0x08);
7639 GET_REG32_1(MBFREE_MODE
);
7640 GET_REG32_LOOP(HOSTCC_MODE
, 0x100);
7641 GET_REG32_LOOP(MEMARB_MODE
, 0x10);
7642 GET_REG32_LOOP(BUFMGR_MODE
, 0x58);
7643 GET_REG32_LOOP(RDMAC_MODE
, 0x08);
7644 GET_REG32_LOOP(WDMAC_MODE
, 0x08);
7645 GET_REG32_1(RX_CPU_MODE
);
7646 GET_REG32_1(RX_CPU_STATE
);
7647 GET_REG32_1(RX_CPU_PGMCTR
);
7648 GET_REG32_1(RX_CPU_HWBKPT
);
7649 GET_REG32_1(TX_CPU_MODE
);
7650 GET_REG32_1(TX_CPU_STATE
);
7651 GET_REG32_1(TX_CPU_PGMCTR
);
7652 GET_REG32_LOOP(GRCMBOX_INTERRUPT_0
, 0x110);
7653 GET_REG32_LOOP(FTQ_RESET
, 0x120);
7654 GET_REG32_LOOP(MSGINT_MODE
, 0x0c);
7655 GET_REG32_1(DMAC_MODE
);
7656 GET_REG32_LOOP(GRC_MODE
, 0x4c);
7657 if (tp
->tg3_flags
& TG3_FLAG_NVRAM
)
7658 GET_REG32_LOOP(NVRAM_CMD
, 0x24);
7661 #undef GET_REG32_LOOP
7664 tg3_full_unlock(tp
);
7667 static int tg3_get_eeprom_len(struct net_device
*dev
)
7669 struct tg3
*tp
= netdev_priv(dev
);
7671 return tp
->nvram_size
;
7674 static int tg3_nvram_read(struct tg3
*tp
, u32 offset
, u32
*val
);
7675 static int tg3_nvram_read_swab(struct tg3
*tp
, u32 offset
, u32
*val
);
7677 static int tg3_get_eeprom(struct net_device
*dev
, struct ethtool_eeprom
*eeprom
, u8
*data
)
7679 struct tg3
*tp
= netdev_priv(dev
);
7682 u32 i
, offset
, len
, val
, b_offset
, b_count
;
7684 if (tp
->link_config
.phy_is_low_power
)
7687 offset
= eeprom
->offset
;
7691 eeprom
->magic
= TG3_EEPROM_MAGIC
;
7694 /* adjustments to start on required 4 byte boundary */
7695 b_offset
= offset
& 3;
7696 b_count
= 4 - b_offset
;
7697 if (b_count
> len
) {
7698 /* i.e. offset=1 len=2 */
7701 ret
= tg3_nvram_read(tp
, offset
-b_offset
, &val
);
7704 val
= cpu_to_le32(val
);
7705 memcpy(data
, ((char*)&val
) + b_offset
, b_count
);
7708 eeprom
->len
+= b_count
;
7711 /* read bytes upto the last 4 byte boundary */
7712 pd
= &data
[eeprom
->len
];
7713 for (i
= 0; i
< (len
- (len
& 3)); i
+= 4) {
7714 ret
= tg3_nvram_read(tp
, offset
+ i
, &val
);
7719 val
= cpu_to_le32(val
);
7720 memcpy(pd
+ i
, &val
, 4);
7725 /* read last bytes not ending on 4 byte boundary */
7726 pd
= &data
[eeprom
->len
];
7728 b_offset
= offset
+ len
- b_count
;
7729 ret
= tg3_nvram_read(tp
, b_offset
, &val
);
7732 val
= cpu_to_le32(val
);
7733 memcpy(pd
, ((char*)&val
), b_count
);
7734 eeprom
->len
+= b_count
;
7739 static int tg3_nvram_write_block(struct tg3
*tp
, u32 offset
, u32 len
, u8
*buf
);
7741 static int tg3_set_eeprom(struct net_device
*dev
, struct ethtool_eeprom
*eeprom
, u8
*data
)
7743 struct tg3
*tp
= netdev_priv(dev
);
7745 u32 offset
, len
, b_offset
, odd_len
, start
, end
;
7748 if (tp
->link_config
.phy_is_low_power
)
7751 if (eeprom
->magic
!= TG3_EEPROM_MAGIC
)
7754 offset
= eeprom
->offset
;
7757 if ((b_offset
= (offset
& 3))) {
7758 /* adjustments to start on required 4 byte boundary */
7759 ret
= tg3_nvram_read(tp
, offset
-b_offset
, &start
);
7762 start
= cpu_to_le32(start
);
7771 /* adjustments to end on required 4 byte boundary */
7773 len
= (len
+ 3) & ~3;
7774 ret
= tg3_nvram_read(tp
, offset
+len
-4, &end
);
7777 end
= cpu_to_le32(end
);
7781 if (b_offset
|| odd_len
) {
7782 buf
= kmalloc(len
, GFP_KERNEL
);
7786 memcpy(buf
, &start
, 4);
7788 memcpy(buf
+len
-4, &end
, 4);
7789 memcpy(buf
+ b_offset
, data
, eeprom
->len
);
7792 ret
= tg3_nvram_write_block(tp
, offset
, len
, buf
);
7800 static int tg3_get_settings(struct net_device
*dev
, struct ethtool_cmd
*cmd
)
7802 struct tg3
*tp
= netdev_priv(dev
);
7804 cmd
->supported
= (SUPPORTED_Autoneg
);
7806 if (!(tp
->tg3_flags
& TG3_FLAG_10_100_ONLY
))
7807 cmd
->supported
|= (SUPPORTED_1000baseT_Half
|
7808 SUPPORTED_1000baseT_Full
);
7810 if (!(tp
->tg3_flags2
& TG3_FLG2_ANY_SERDES
)) {
7811 cmd
->supported
|= (SUPPORTED_100baseT_Half
|
7812 SUPPORTED_100baseT_Full
|
7813 SUPPORTED_10baseT_Half
|
7814 SUPPORTED_10baseT_Full
|
7816 cmd
->port
= PORT_TP
;
7818 cmd
->supported
|= SUPPORTED_FIBRE
;
7819 cmd
->port
= PORT_FIBRE
;
7822 cmd
->advertising
= tp
->link_config
.advertising
;
7823 if (netif_running(dev
)) {
7824 cmd
->speed
= tp
->link_config
.active_speed
;
7825 cmd
->duplex
= tp
->link_config
.active_duplex
;
7827 cmd
->phy_address
= PHY_ADDR
;
7828 cmd
->transceiver
= 0;
7829 cmd
->autoneg
= tp
->link_config
.autoneg
;
7835 static int tg3_set_settings(struct net_device
*dev
, struct ethtool_cmd
*cmd
)
7837 struct tg3
*tp
= netdev_priv(dev
);
7839 if (tp
->tg3_flags2
& TG3_FLG2_ANY_SERDES
) {
7840 /* These are the only valid advertisement bits allowed. */
7841 if (cmd
->autoneg
== AUTONEG_ENABLE
&&
7842 (cmd
->advertising
& ~(ADVERTISED_1000baseT_Half
|
7843 ADVERTISED_1000baseT_Full
|
7844 ADVERTISED_Autoneg
|
7847 /* Fiber can only do SPEED_1000. */
7848 else if ((cmd
->autoneg
!= AUTONEG_ENABLE
) &&
7849 (cmd
->speed
!= SPEED_1000
))
7851 /* Copper cannot force SPEED_1000. */
7852 } else if ((cmd
->autoneg
!= AUTONEG_ENABLE
) &&
7853 (cmd
->speed
== SPEED_1000
))
7855 else if ((cmd
->speed
== SPEED_1000
) &&
7856 (tp
->tg3_flags2
& TG3_FLAG_10_100_ONLY
))
7859 tg3_full_lock(tp
, 0);
7861 tp
->link_config
.autoneg
= cmd
->autoneg
;
7862 if (cmd
->autoneg
== AUTONEG_ENABLE
) {
7863 tp
->link_config
.advertising
= cmd
->advertising
;
7864 tp
->link_config
.speed
= SPEED_INVALID
;
7865 tp
->link_config
.duplex
= DUPLEX_INVALID
;
7867 tp
->link_config
.advertising
= 0;
7868 tp
->link_config
.speed
= cmd
->speed
;
7869 tp
->link_config
.duplex
= cmd
->duplex
;
7872 if (netif_running(dev
))
7873 tg3_setup_phy(tp
, 1);
7875 tg3_full_unlock(tp
);
7880 static void tg3_get_drvinfo(struct net_device
*dev
, struct ethtool_drvinfo
*info
)
7882 struct tg3
*tp
= netdev_priv(dev
);
7884 strcpy(info
->driver
, DRV_MODULE_NAME
);
7885 strcpy(info
->version
, DRV_MODULE_VERSION
);
7886 strcpy(info
->fw_version
, tp
->fw_ver
);
7887 strcpy(info
->bus_info
, pci_name(tp
->pdev
));
7890 static void tg3_get_wol(struct net_device
*dev
, struct ethtool_wolinfo
*wol
)
7892 struct tg3
*tp
= netdev_priv(dev
);
7894 wol
->supported
= WAKE_MAGIC
;
7896 if (tp
->tg3_flags
& TG3_FLAG_WOL_ENABLE
)
7897 wol
->wolopts
= WAKE_MAGIC
;
7898 memset(&wol
->sopass
, 0, sizeof(wol
->sopass
));
7901 static int tg3_set_wol(struct net_device
*dev
, struct ethtool_wolinfo
*wol
)
7903 struct tg3
*tp
= netdev_priv(dev
);
7905 if (wol
->wolopts
& ~WAKE_MAGIC
)
7907 if ((wol
->wolopts
& WAKE_MAGIC
) &&
7908 tp
->tg3_flags2
& TG3_FLG2_ANY_SERDES
&&
7909 !(tp
->tg3_flags
& TG3_FLAG_SERDES_WOL_CAP
))
7912 spin_lock_bh(&tp
->lock
);
7913 if (wol
->wolopts
& WAKE_MAGIC
)
7914 tp
->tg3_flags
|= TG3_FLAG_WOL_ENABLE
;
7916 tp
->tg3_flags
&= ~TG3_FLAG_WOL_ENABLE
;
7917 spin_unlock_bh(&tp
->lock
);
7922 static u32
tg3_get_msglevel(struct net_device
*dev
)
7924 struct tg3
*tp
= netdev_priv(dev
);
7925 return tp
->msg_enable
;
7928 static void tg3_set_msglevel(struct net_device
*dev
, u32 value
)
7930 struct tg3
*tp
= netdev_priv(dev
);
7931 tp
->msg_enable
= value
;
7934 #if TG3_TSO_SUPPORT != 0
7935 static int tg3_set_tso(struct net_device
*dev
, u32 value
)
7937 struct tg3
*tp
= netdev_priv(dev
);
7939 if (!(tp
->tg3_flags2
& TG3_FLG2_TSO_CAPABLE
)) {
7944 if (tp
->tg3_flags2
& TG3_FLG2_HW_TSO_2
) {
7946 dev
->features
|= NETIF_F_TSO6
;
7948 dev
->features
&= ~NETIF_F_TSO6
;
7950 return ethtool_op_set_tso(dev
, value
);
7954 static int tg3_nway_reset(struct net_device
*dev
)
7956 struct tg3
*tp
= netdev_priv(dev
);
7960 if (!netif_running(dev
))
7963 if (tp
->tg3_flags2
& TG3_FLG2_PHY_SERDES
)
7966 spin_lock_bh(&tp
->lock
);
7968 tg3_readphy(tp
, MII_BMCR
, &bmcr
);
7969 if (!tg3_readphy(tp
, MII_BMCR
, &bmcr
) &&
7970 ((bmcr
& BMCR_ANENABLE
) ||
7971 (tp
->tg3_flags2
& TG3_FLG2_PARALLEL_DETECT
))) {
7972 tg3_writephy(tp
, MII_BMCR
, bmcr
| BMCR_ANRESTART
|
7976 spin_unlock_bh(&tp
->lock
);
7981 static void tg3_get_ringparam(struct net_device
*dev
, struct ethtool_ringparam
*ering
)
7983 struct tg3
*tp
= netdev_priv(dev
);
7985 ering
->rx_max_pending
= TG3_RX_RING_SIZE
- 1;
7986 ering
->rx_mini_max_pending
= 0;
7987 if (tp
->tg3_flags
& TG3_FLAG_JUMBO_RING_ENABLE
)
7988 ering
->rx_jumbo_max_pending
= TG3_RX_JUMBO_RING_SIZE
- 1;
7990 ering
->rx_jumbo_max_pending
= 0;
7992 ering
->tx_max_pending
= TG3_TX_RING_SIZE
- 1;
7994 ering
->rx_pending
= tp
->rx_pending
;
7995 ering
->rx_mini_pending
= 0;
7996 if (tp
->tg3_flags
& TG3_FLAG_JUMBO_RING_ENABLE
)
7997 ering
->rx_jumbo_pending
= tp
->rx_jumbo_pending
;
7999 ering
->rx_jumbo_pending
= 0;
8001 ering
->tx_pending
= tp
->tx_pending
;
8004 static int tg3_set_ringparam(struct net_device
*dev
, struct ethtool_ringparam
*ering
)
8006 struct tg3
*tp
= netdev_priv(dev
);
8007 int irq_sync
= 0, err
= 0;
8009 if ((ering
->rx_pending
> TG3_RX_RING_SIZE
- 1) ||
8010 (ering
->rx_jumbo_pending
> TG3_RX_JUMBO_RING_SIZE
- 1) ||
8011 (ering
->tx_pending
> TG3_TX_RING_SIZE
- 1))
8014 if (netif_running(dev
)) {
8019 tg3_full_lock(tp
, irq_sync
);
8021 tp
->rx_pending
= ering
->rx_pending
;
8023 if ((tp
->tg3_flags2
& TG3_FLG2_MAX_RXPEND_64
) &&
8024 tp
->rx_pending
> 63)
8025 tp
->rx_pending
= 63;
8026 tp
->rx_jumbo_pending
= ering
->rx_jumbo_pending
;
8027 tp
->tx_pending
= ering
->tx_pending
;
8029 if (netif_running(dev
)) {
8030 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
8031 err
= tg3_restart_hw(tp
, 1);
8033 tg3_netif_start(tp
);
8036 tg3_full_unlock(tp
);
8041 static void tg3_get_pauseparam(struct net_device
*dev
, struct ethtool_pauseparam
*epause
)
8043 struct tg3
*tp
= netdev_priv(dev
);
8045 epause
->autoneg
= (tp
->tg3_flags
& TG3_FLAG_PAUSE_AUTONEG
) != 0;
8046 epause
->rx_pause
= (tp
->tg3_flags
& TG3_FLAG_RX_PAUSE
) != 0;
8047 epause
->tx_pause
= (tp
->tg3_flags
& TG3_FLAG_TX_PAUSE
) != 0;
8050 static int tg3_set_pauseparam(struct net_device
*dev
, struct ethtool_pauseparam
*epause
)
8052 struct tg3
*tp
= netdev_priv(dev
);
8053 int irq_sync
= 0, err
= 0;
8055 if (netif_running(dev
)) {
8060 tg3_full_lock(tp
, irq_sync
);
8062 if (epause
->autoneg
)
8063 tp
->tg3_flags
|= TG3_FLAG_PAUSE_AUTONEG
;
8065 tp
->tg3_flags
&= ~TG3_FLAG_PAUSE_AUTONEG
;
8066 if (epause
->rx_pause
)
8067 tp
->tg3_flags
|= TG3_FLAG_RX_PAUSE
;
8069 tp
->tg3_flags
&= ~TG3_FLAG_RX_PAUSE
;
8070 if (epause
->tx_pause
)
8071 tp
->tg3_flags
|= TG3_FLAG_TX_PAUSE
;
8073 tp
->tg3_flags
&= ~TG3_FLAG_TX_PAUSE
;
8075 if (netif_running(dev
)) {
8076 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
8077 err
= tg3_restart_hw(tp
, 1);
8079 tg3_netif_start(tp
);
8082 tg3_full_unlock(tp
);
8087 static u32
tg3_get_rx_csum(struct net_device
*dev
)
8089 struct tg3
*tp
= netdev_priv(dev
);
8090 return (tp
->tg3_flags
& TG3_FLAG_RX_CHECKSUMS
) != 0;
8093 static int tg3_set_rx_csum(struct net_device
*dev
, u32 data
)
8095 struct tg3
*tp
= netdev_priv(dev
);
8097 if (tp
->tg3_flags
& TG3_FLAG_BROKEN_CHECKSUMS
) {
8103 spin_lock_bh(&tp
->lock
);
8105 tp
->tg3_flags
|= TG3_FLAG_RX_CHECKSUMS
;
8107 tp
->tg3_flags
&= ~TG3_FLAG_RX_CHECKSUMS
;
8108 spin_unlock_bh(&tp
->lock
);
8113 static int tg3_set_tx_csum(struct net_device
*dev
, u32 data
)
8115 struct tg3
*tp
= netdev_priv(dev
);
8117 if (tp
->tg3_flags
& TG3_FLAG_BROKEN_CHECKSUMS
) {
8123 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5755
||
8124 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5787
)
8125 ethtool_op_set_tx_hw_csum(dev
, data
);
8127 ethtool_op_set_tx_csum(dev
, data
);
8132 static int tg3_get_stats_count (struct net_device
*dev
)
8134 return TG3_NUM_STATS
;
8137 static int tg3_get_test_count (struct net_device
*dev
)
8139 return TG3_NUM_TEST
;
8142 static void tg3_get_strings (struct net_device
*dev
, u32 stringset
, u8
*buf
)
8144 switch (stringset
) {
8146 memcpy(buf
, ðtool_stats_keys
, sizeof(ethtool_stats_keys
));
8149 memcpy(buf
, ðtool_test_keys
, sizeof(ethtool_test_keys
));
8152 WARN_ON(1); /* we need a WARN() */
8157 static int tg3_phys_id(struct net_device
*dev
, u32 data
)
8159 struct tg3
*tp
= netdev_priv(dev
);
8162 if (!netif_running(tp
->dev
))
8168 for (i
= 0; i
< (data
* 2); i
++) {
8170 tw32(MAC_LED_CTRL
, LED_CTRL_LNKLED_OVERRIDE
|
8171 LED_CTRL_1000MBPS_ON
|
8172 LED_CTRL_100MBPS_ON
|
8173 LED_CTRL_10MBPS_ON
|
8174 LED_CTRL_TRAFFIC_OVERRIDE
|
8175 LED_CTRL_TRAFFIC_BLINK
|
8176 LED_CTRL_TRAFFIC_LED
);
8179 tw32(MAC_LED_CTRL
, LED_CTRL_LNKLED_OVERRIDE
|
8180 LED_CTRL_TRAFFIC_OVERRIDE
);
8182 if (msleep_interruptible(500))
8185 tw32(MAC_LED_CTRL
, tp
->led_ctrl
);
8189 static void tg3_get_ethtool_stats (struct net_device
*dev
,
8190 struct ethtool_stats
*estats
, u64
*tmp_stats
)
8192 struct tg3
*tp
= netdev_priv(dev
);
8193 memcpy(tmp_stats
, tg3_get_estats(tp
), sizeof(tp
->estats
));
8196 #define NVRAM_TEST_SIZE 0x100
8197 #define NVRAM_SELFBOOT_FORMAT1_SIZE 0x14
8199 static int tg3_test_nvram(struct tg3
*tp
)
8201 u32
*buf
, csum
, magic
;
8202 int i
, j
, err
= 0, size
;
8204 if (tg3_nvram_read_swab(tp
, 0, &magic
) != 0)
8207 if (magic
== TG3_EEPROM_MAGIC
)
8208 size
= NVRAM_TEST_SIZE
;
8209 else if ((magic
& 0xff000000) == 0xa5000000) {
8210 if ((magic
& 0xe00000) == 0x200000)
8211 size
= NVRAM_SELFBOOT_FORMAT1_SIZE
;
8217 buf
= kmalloc(size
, GFP_KERNEL
);
8222 for (i
= 0, j
= 0; i
< size
; i
+= 4, j
++) {
8225 if ((err
= tg3_nvram_read(tp
, i
, &val
)) != 0)
8227 buf
[j
] = cpu_to_le32(val
);
8232 /* Selfboot format */
8233 if (cpu_to_be32(buf
[0]) != TG3_EEPROM_MAGIC
) {
8234 u8
*buf8
= (u8
*) buf
, csum8
= 0;
8236 for (i
= 0; i
< size
; i
++)
8248 /* Bootstrap checksum at offset 0x10 */
8249 csum
= calc_crc((unsigned char *) buf
, 0x10);
8250 if(csum
!= cpu_to_le32(buf
[0x10/4]))
8253 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
8254 csum
= calc_crc((unsigned char *) &buf
[0x74/4], 0x88);
8255 if (csum
!= cpu_to_le32(buf
[0xfc/4]))
8265 #define TG3_SERDES_TIMEOUT_SEC 2
8266 #define TG3_COPPER_TIMEOUT_SEC 6
8268 static int tg3_test_link(struct tg3
*tp
)
8272 if (!netif_running(tp
->dev
))
8275 if (tp
->tg3_flags2
& TG3_FLG2_ANY_SERDES
)
8276 max
= TG3_SERDES_TIMEOUT_SEC
;
8278 max
= TG3_COPPER_TIMEOUT_SEC
;
8280 for (i
= 0; i
< max
; i
++) {
8281 if (netif_carrier_ok(tp
->dev
))
8284 if (msleep_interruptible(1000))
8291 /* Only test the commonly used registers */
8292 static int tg3_test_registers(struct tg3
*tp
)
8295 u32 offset
, read_mask
, write_mask
, val
, save_val
, read_val
;
8299 #define TG3_FL_5705 0x1
8300 #define TG3_FL_NOT_5705 0x2
8301 #define TG3_FL_NOT_5788 0x4
8305 /* MAC Control Registers */
8306 { MAC_MODE
, TG3_FL_NOT_5705
,
8307 0x00000000, 0x00ef6f8c },
8308 { MAC_MODE
, TG3_FL_5705
,
8309 0x00000000, 0x01ef6b8c },
8310 { MAC_STATUS
, TG3_FL_NOT_5705
,
8311 0x03800107, 0x00000000 },
8312 { MAC_STATUS
, TG3_FL_5705
,
8313 0x03800100, 0x00000000 },
8314 { MAC_ADDR_0_HIGH
, 0x0000,
8315 0x00000000, 0x0000ffff },
8316 { MAC_ADDR_0_LOW
, 0x0000,
8317 0x00000000, 0xffffffff },
8318 { MAC_RX_MTU_SIZE
, 0x0000,
8319 0x00000000, 0x0000ffff },
8320 { MAC_TX_MODE
, 0x0000,
8321 0x00000000, 0x00000070 },
8322 { MAC_TX_LENGTHS
, 0x0000,
8323 0x00000000, 0x00003fff },
8324 { MAC_RX_MODE
, TG3_FL_NOT_5705
,
8325 0x00000000, 0x000007fc },
8326 { MAC_RX_MODE
, TG3_FL_5705
,
8327 0x00000000, 0x000007dc },
8328 { MAC_HASH_REG_0
, 0x0000,
8329 0x00000000, 0xffffffff },
8330 { MAC_HASH_REG_1
, 0x0000,
8331 0x00000000, 0xffffffff },
8332 { MAC_HASH_REG_2
, 0x0000,
8333 0x00000000, 0xffffffff },
8334 { MAC_HASH_REG_3
, 0x0000,
8335 0x00000000, 0xffffffff },
8337 /* Receive Data and Receive BD Initiator Control Registers. */
8338 { RCVDBDI_JUMBO_BD
+0, TG3_FL_NOT_5705
,
8339 0x00000000, 0xffffffff },
8340 { RCVDBDI_JUMBO_BD
+4, TG3_FL_NOT_5705
,
8341 0x00000000, 0xffffffff },
8342 { RCVDBDI_JUMBO_BD
+8, TG3_FL_NOT_5705
,
8343 0x00000000, 0x00000003 },
8344 { RCVDBDI_JUMBO_BD
+0xc, TG3_FL_NOT_5705
,
8345 0x00000000, 0xffffffff },
8346 { RCVDBDI_STD_BD
+0, 0x0000,
8347 0x00000000, 0xffffffff },
8348 { RCVDBDI_STD_BD
+4, 0x0000,
8349 0x00000000, 0xffffffff },
8350 { RCVDBDI_STD_BD
+8, 0x0000,
8351 0x00000000, 0xffff0002 },
8352 { RCVDBDI_STD_BD
+0xc, 0x0000,
8353 0x00000000, 0xffffffff },
8355 /* Receive BD Initiator Control Registers. */
8356 { RCVBDI_STD_THRESH
, TG3_FL_NOT_5705
,
8357 0x00000000, 0xffffffff },
8358 { RCVBDI_STD_THRESH
, TG3_FL_5705
,
8359 0x00000000, 0x000003ff },
8360 { RCVBDI_JUMBO_THRESH
, TG3_FL_NOT_5705
,
8361 0x00000000, 0xffffffff },
8363 /* Host Coalescing Control Registers. */
8364 { HOSTCC_MODE
, TG3_FL_NOT_5705
,
8365 0x00000000, 0x00000004 },
8366 { HOSTCC_MODE
, TG3_FL_5705
,
8367 0x00000000, 0x000000f6 },
8368 { HOSTCC_RXCOL_TICKS
, TG3_FL_NOT_5705
,
8369 0x00000000, 0xffffffff },
8370 { HOSTCC_RXCOL_TICKS
, TG3_FL_5705
,
8371 0x00000000, 0x000003ff },
8372 { HOSTCC_TXCOL_TICKS
, TG3_FL_NOT_5705
,
8373 0x00000000, 0xffffffff },
8374 { HOSTCC_TXCOL_TICKS
, TG3_FL_5705
,
8375 0x00000000, 0x000003ff },
8376 { HOSTCC_RXMAX_FRAMES
, TG3_FL_NOT_5705
,
8377 0x00000000, 0xffffffff },
8378 { HOSTCC_RXMAX_FRAMES
, TG3_FL_5705
| TG3_FL_NOT_5788
,
8379 0x00000000, 0x000000ff },
8380 { HOSTCC_TXMAX_FRAMES
, TG3_FL_NOT_5705
,
8381 0x00000000, 0xffffffff },
8382 { HOSTCC_TXMAX_FRAMES
, TG3_FL_5705
| TG3_FL_NOT_5788
,
8383 0x00000000, 0x000000ff },
8384 { HOSTCC_RXCOAL_TICK_INT
, TG3_FL_NOT_5705
,
8385 0x00000000, 0xffffffff },
8386 { HOSTCC_TXCOAL_TICK_INT
, TG3_FL_NOT_5705
,
8387 0x00000000, 0xffffffff },
8388 { HOSTCC_RXCOAL_MAXF_INT
, TG3_FL_NOT_5705
,
8389 0x00000000, 0xffffffff },
8390 { HOSTCC_RXCOAL_MAXF_INT
, TG3_FL_5705
| TG3_FL_NOT_5788
,
8391 0x00000000, 0x000000ff },
8392 { HOSTCC_TXCOAL_MAXF_INT
, TG3_FL_NOT_5705
,
8393 0x00000000, 0xffffffff },
8394 { HOSTCC_TXCOAL_MAXF_INT
, TG3_FL_5705
| TG3_FL_NOT_5788
,
8395 0x00000000, 0x000000ff },
8396 { HOSTCC_STAT_COAL_TICKS
, TG3_FL_NOT_5705
,
8397 0x00000000, 0xffffffff },
8398 { HOSTCC_STATS_BLK_HOST_ADDR
, TG3_FL_NOT_5705
,
8399 0x00000000, 0xffffffff },
8400 { HOSTCC_STATS_BLK_HOST_ADDR
+4, TG3_FL_NOT_5705
,
8401 0x00000000, 0xffffffff },
8402 { HOSTCC_STATUS_BLK_HOST_ADDR
, 0x0000,
8403 0x00000000, 0xffffffff },
8404 { HOSTCC_STATUS_BLK_HOST_ADDR
+4, 0x0000,
8405 0x00000000, 0xffffffff },
8406 { HOSTCC_STATS_BLK_NIC_ADDR
, 0x0000,
8407 0xffffffff, 0x00000000 },
8408 { HOSTCC_STATUS_BLK_NIC_ADDR
, 0x0000,
8409 0xffffffff, 0x00000000 },
8411 /* Buffer Manager Control Registers. */
8412 { BUFMGR_MB_POOL_ADDR
, 0x0000,
8413 0x00000000, 0x007fff80 },
8414 { BUFMGR_MB_POOL_SIZE
, 0x0000,
8415 0x00000000, 0x007fffff },
8416 { BUFMGR_MB_RDMA_LOW_WATER
, 0x0000,
8417 0x00000000, 0x0000003f },
8418 { BUFMGR_MB_MACRX_LOW_WATER
, 0x0000,
8419 0x00000000, 0x000001ff },
8420 { BUFMGR_MB_HIGH_WATER
, 0x0000,
8421 0x00000000, 0x000001ff },
8422 { BUFMGR_DMA_DESC_POOL_ADDR
, TG3_FL_NOT_5705
,
8423 0xffffffff, 0x00000000 },
8424 { BUFMGR_DMA_DESC_POOL_SIZE
, TG3_FL_NOT_5705
,
8425 0xffffffff, 0x00000000 },
8427 /* Mailbox Registers */
8428 { GRCMBOX_RCVSTD_PROD_IDX
+4, 0x0000,
8429 0x00000000, 0x000001ff },
8430 { GRCMBOX_RCVJUMBO_PROD_IDX
+4, TG3_FL_NOT_5705
,
8431 0x00000000, 0x000001ff },
8432 { GRCMBOX_RCVRET_CON_IDX_0
+4, 0x0000,
8433 0x00000000, 0x000007ff },
8434 { GRCMBOX_SNDHOST_PROD_IDX_0
+4, 0x0000,
8435 0x00000000, 0x000001ff },
8437 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
8440 if (tp
->tg3_flags2
& TG3_FLG2_5705_PLUS
)
8445 for (i
= 0; reg_tbl
[i
].offset
!= 0xffff; i
++) {
8446 if (is_5705
&& (reg_tbl
[i
].flags
& TG3_FL_NOT_5705
))
8449 if (!is_5705
&& (reg_tbl
[i
].flags
& TG3_FL_5705
))
8452 if ((tp
->tg3_flags2
& TG3_FLG2_IS_5788
) &&
8453 (reg_tbl
[i
].flags
& TG3_FL_NOT_5788
))
8456 offset
= (u32
) reg_tbl
[i
].offset
;
8457 read_mask
= reg_tbl
[i
].read_mask
;
8458 write_mask
= reg_tbl
[i
].write_mask
;
8460 /* Save the original register content */
8461 save_val
= tr32(offset
);
8463 /* Determine the read-only value. */
8464 read_val
= save_val
& read_mask
;
8466 /* Write zero to the register, then make sure the read-only bits
8467 * are not changed and the read/write bits are all zeros.
8473 /* Test the read-only and read/write bits. */
8474 if (((val
& read_mask
) != read_val
) || (val
& write_mask
))
8477 /* Write ones to all the bits defined by RdMask and WrMask, then
8478 * make sure the read-only bits are not changed and the
8479 * read/write bits are all ones.
8481 tw32(offset
, read_mask
| write_mask
);
8485 /* Test the read-only bits. */
8486 if ((val
& read_mask
) != read_val
)
8489 /* Test the read/write bits. */
8490 if ((val
& write_mask
) != write_mask
)
8493 tw32(offset
, save_val
);
8499 printk(KERN_ERR PFX
"Register test failed at offset %x\n", offset
);
8500 tw32(offset
, save_val
);
8504 static int tg3_do_mem_test(struct tg3
*tp
, u32 offset
, u32 len
)
8506 static const u32 test_pattern
[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
8510 for (i
= 0; i
< sizeof(test_pattern
)/sizeof(u32
); i
++) {
8511 for (j
= 0; j
< len
; j
+= 4) {
8514 tg3_write_mem(tp
, offset
+ j
, test_pattern
[i
]);
8515 tg3_read_mem(tp
, offset
+ j
, &val
);
8516 if (val
!= test_pattern
[i
])
8523 static int tg3_test_memory(struct tg3
*tp
)
8525 static struct mem_entry
{
8528 } mem_tbl_570x
[] = {
8529 { 0x00000000, 0x00b50},
8530 { 0x00002000, 0x1c000},
8531 { 0xffffffff, 0x00000}
8532 }, mem_tbl_5705
[] = {
8533 { 0x00000100, 0x0000c},
8534 { 0x00000200, 0x00008},
8535 { 0x00004000, 0x00800},
8536 { 0x00006000, 0x01000},
8537 { 0x00008000, 0x02000},
8538 { 0x00010000, 0x0e000},
8539 { 0xffffffff, 0x00000}
8540 }, mem_tbl_5755
[] = {
8541 { 0x00000200, 0x00008},
8542 { 0x00004000, 0x00800},
8543 { 0x00006000, 0x00800},
8544 { 0x00008000, 0x02000},
8545 { 0x00010000, 0x0c000},
8546 { 0xffffffff, 0x00000}
8548 struct mem_entry
*mem_tbl
;
8552 if (tp
->tg3_flags2
& TG3_FLG2_5705_PLUS
) {
8553 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5755
||
8554 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5787
)
8555 mem_tbl
= mem_tbl_5755
;
8557 mem_tbl
= mem_tbl_5705
;
8559 mem_tbl
= mem_tbl_570x
;
8561 for (i
= 0; mem_tbl
[i
].offset
!= 0xffffffff; i
++) {
8562 if ((err
= tg3_do_mem_test(tp
, mem_tbl
[i
].offset
,
8563 mem_tbl
[i
].len
)) != 0)
8570 #define TG3_MAC_LOOPBACK 0
8571 #define TG3_PHY_LOOPBACK 1
8573 static int tg3_run_loopback(struct tg3
*tp
, int loopback_mode
)
8575 u32 mac_mode
, rx_start_idx
, rx_idx
, tx_idx
, opaque_key
;
8577 struct sk_buff
*skb
, *rx_skb
;
8580 int num_pkts
, tx_len
, rx_len
, i
, err
;
8581 struct tg3_rx_buffer_desc
*desc
;
8583 if (loopback_mode
== TG3_MAC_LOOPBACK
) {
8584 /* HW errata - mac loopback fails in some cases on 5780.
8585 * Normal traffic and PHY loopback are not affected by
8588 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5780
)
8591 mac_mode
= (tp
->mac_mode
& ~MAC_MODE_PORT_MODE_MASK
) |
8592 MAC_MODE_PORT_INT_LPBACK
| MAC_MODE_LINK_POLARITY
;
8593 if (tp
->tg3_flags
& TG3_FLAG_10_100_ONLY
)
8594 mac_mode
|= MAC_MODE_PORT_MODE_MII
;
8596 mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
8597 tw32(MAC_MODE
, mac_mode
);
8598 } else if (loopback_mode
== TG3_PHY_LOOPBACK
) {
8601 val
= BMCR_LOOPBACK
| BMCR_FULLDPLX
;
8602 if (tp
->tg3_flags
& TG3_FLAG_10_100_ONLY
)
8603 val
|= BMCR_SPEED100
;
8605 val
|= BMCR_SPEED1000
;
8607 tg3_writephy(tp
, MII_BMCR
, val
);
8609 /* reset to prevent losing 1st rx packet intermittently */
8610 if (tp
->tg3_flags2
& TG3_FLG2_MII_SERDES
) {
8611 tw32_f(MAC_RX_MODE
, RX_MODE_RESET
);
8613 tw32_f(MAC_RX_MODE
, tp
->rx_mode
);
8615 mac_mode
= (tp
->mac_mode
& ~MAC_MODE_PORT_MODE_MASK
) |
8616 MAC_MODE_LINK_POLARITY
;
8617 if (tp
->tg3_flags
& TG3_FLAG_10_100_ONLY
)
8618 mac_mode
|= MAC_MODE_PORT_MODE_MII
;
8620 mac_mode
|= MAC_MODE_PORT_MODE_GMII
;
8621 if ((tp
->phy_id
& PHY_ID_MASK
) == PHY_ID_BCM5401
) {
8622 mac_mode
&= ~MAC_MODE_LINK_POLARITY
;
8623 tg3_writephy(tp
, MII_TG3_EXT_CTRL
,
8624 MII_TG3_EXT_CTRL_LNK3_LED_MODE
);
8626 tw32(MAC_MODE
, mac_mode
);
8634 skb
= netdev_alloc_skb(tp
->dev
, tx_len
);
8638 tx_data
= skb_put(skb
, tx_len
);
8639 memcpy(tx_data
, tp
->dev
->dev_addr
, 6);
8640 memset(tx_data
+ 6, 0x0, 8);
8642 tw32(MAC_RX_MTU_SIZE
, tx_len
+ 4);
8644 for (i
= 14; i
< tx_len
; i
++)
8645 tx_data
[i
] = (u8
) (i
& 0xff);
8647 map
= pci_map_single(tp
->pdev
, skb
->data
, tx_len
, PCI_DMA_TODEVICE
);
8649 tw32_f(HOSTCC_MODE
, tp
->coalesce_mode
| HOSTCC_MODE_ENABLE
|
8654 rx_start_idx
= tp
->hw_status
->idx
[0].rx_producer
;
8658 tg3_set_txd(tp
, tp
->tx_prod
, map
, tx_len
, 0, 1);
8663 tw32_tx_mbox(MAILBOX_SNDHOST_PROD_IDX_0
+ TG3_64BIT_REG_LOW
,
8665 tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0
+ TG3_64BIT_REG_LOW
);
8669 /* 250 usec to allow enough time on some 10/100 Mbps devices. */
8670 for (i
= 0; i
< 25; i
++) {
8671 tw32_f(HOSTCC_MODE
, tp
->coalesce_mode
| HOSTCC_MODE_ENABLE
|
8676 tx_idx
= tp
->hw_status
->idx
[0].tx_consumer
;
8677 rx_idx
= tp
->hw_status
->idx
[0].rx_producer
;
8678 if ((tx_idx
== tp
->tx_prod
) &&
8679 (rx_idx
== (rx_start_idx
+ num_pkts
)))
8683 pci_unmap_single(tp
->pdev
, map
, tx_len
, PCI_DMA_TODEVICE
);
8686 if (tx_idx
!= tp
->tx_prod
)
8689 if (rx_idx
!= rx_start_idx
+ num_pkts
)
8692 desc
= &tp
->rx_rcb
[rx_start_idx
];
8693 desc_idx
= desc
->opaque
& RXD_OPAQUE_INDEX_MASK
;
8694 opaque_key
= desc
->opaque
& RXD_OPAQUE_RING_MASK
;
8695 if (opaque_key
!= RXD_OPAQUE_RING_STD
)
8698 if ((desc
->err_vlan
& RXD_ERR_MASK
) != 0 &&
8699 (desc
->err_vlan
!= RXD_ERR_ODD_NIBBLE_RCVD_MII
))
8702 rx_len
= ((desc
->idx_len
& RXD_LEN_MASK
) >> RXD_LEN_SHIFT
) - 4;
8703 if (rx_len
!= tx_len
)
8706 rx_skb
= tp
->rx_std_buffers
[desc_idx
].skb
;
8708 map
= pci_unmap_addr(&tp
->rx_std_buffers
[desc_idx
], mapping
);
8709 pci_dma_sync_single_for_cpu(tp
->pdev
, map
, rx_len
, PCI_DMA_FROMDEVICE
);
8711 for (i
= 14; i
< tx_len
; i
++) {
8712 if (*(rx_skb
->data
+ i
) != (u8
) (i
& 0xff))
8717 /* tg3_free_rings will unmap and free the rx_skb */
8722 #define TG3_MAC_LOOPBACK_FAILED 1
8723 #define TG3_PHY_LOOPBACK_FAILED 2
8724 #define TG3_LOOPBACK_FAILED (TG3_MAC_LOOPBACK_FAILED | \
8725 TG3_PHY_LOOPBACK_FAILED)
8727 static int tg3_test_loopback(struct tg3
*tp
)
8731 if (!netif_running(tp
->dev
))
8732 return TG3_LOOPBACK_FAILED
;
8734 err
= tg3_reset_hw(tp
, 1);
8736 return TG3_LOOPBACK_FAILED
;
8738 if (tg3_run_loopback(tp
, TG3_MAC_LOOPBACK
))
8739 err
|= TG3_MAC_LOOPBACK_FAILED
;
8740 if (!(tp
->tg3_flags2
& TG3_FLG2_PHY_SERDES
)) {
8741 if (tg3_run_loopback(tp
, TG3_PHY_LOOPBACK
))
8742 err
|= TG3_PHY_LOOPBACK_FAILED
;
8748 static void tg3_self_test(struct net_device
*dev
, struct ethtool_test
*etest
,
8751 struct tg3
*tp
= netdev_priv(dev
);
8753 if (tp
->link_config
.phy_is_low_power
)
8754 tg3_set_power_state(tp
, PCI_D0
);
8756 memset(data
, 0, sizeof(u64
) * TG3_NUM_TEST
);
8758 if (tg3_test_nvram(tp
) != 0) {
8759 etest
->flags
|= ETH_TEST_FL_FAILED
;
8762 if (tg3_test_link(tp
) != 0) {
8763 etest
->flags
|= ETH_TEST_FL_FAILED
;
8766 if (etest
->flags
& ETH_TEST_FL_OFFLINE
) {
8767 int err
, irq_sync
= 0;
8769 if (netif_running(dev
)) {
8774 tg3_full_lock(tp
, irq_sync
);
8776 tg3_halt(tp
, RESET_KIND_SUSPEND
, 1);
8777 err
= tg3_nvram_lock(tp
);
8778 tg3_halt_cpu(tp
, RX_CPU_BASE
);
8779 if (!(tp
->tg3_flags2
& TG3_FLG2_5705_PLUS
))
8780 tg3_halt_cpu(tp
, TX_CPU_BASE
);
8782 tg3_nvram_unlock(tp
);
8784 if (tp
->tg3_flags2
& TG3_FLG2_MII_SERDES
)
8787 if (tg3_test_registers(tp
) != 0) {
8788 etest
->flags
|= ETH_TEST_FL_FAILED
;
8791 if (tg3_test_memory(tp
) != 0) {
8792 etest
->flags
|= ETH_TEST_FL_FAILED
;
8795 if ((data
[4] = tg3_test_loopback(tp
)) != 0)
8796 etest
->flags
|= ETH_TEST_FL_FAILED
;
8798 tg3_full_unlock(tp
);
8800 if (tg3_test_interrupt(tp
) != 0) {
8801 etest
->flags
|= ETH_TEST_FL_FAILED
;
8805 tg3_full_lock(tp
, 0);
8807 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
8808 if (netif_running(dev
)) {
8809 tp
->tg3_flags
|= TG3_FLAG_INIT_COMPLETE
;
8810 if (!tg3_restart_hw(tp
, 1))
8811 tg3_netif_start(tp
);
8814 tg3_full_unlock(tp
);
8816 if (tp
->link_config
.phy_is_low_power
)
8817 tg3_set_power_state(tp
, PCI_D3hot
);
8821 static int tg3_ioctl(struct net_device
*dev
, struct ifreq
*ifr
, int cmd
)
8823 struct mii_ioctl_data
*data
= if_mii(ifr
);
8824 struct tg3
*tp
= netdev_priv(dev
);
8829 data
->phy_id
= PHY_ADDR
;
8835 if (tp
->tg3_flags2
& TG3_FLG2_PHY_SERDES
)
8836 break; /* We have no PHY */
8838 if (tp
->link_config
.phy_is_low_power
)
8841 spin_lock_bh(&tp
->lock
);
8842 err
= tg3_readphy(tp
, data
->reg_num
& 0x1f, &mii_regval
);
8843 spin_unlock_bh(&tp
->lock
);
8845 data
->val_out
= mii_regval
;
8851 if (tp
->tg3_flags2
& TG3_FLG2_PHY_SERDES
)
8852 break; /* We have no PHY */
8854 if (!capable(CAP_NET_ADMIN
))
8857 if (tp
->link_config
.phy_is_low_power
)
8860 spin_lock_bh(&tp
->lock
);
8861 err
= tg3_writephy(tp
, data
->reg_num
& 0x1f, data
->val_in
);
8862 spin_unlock_bh(&tp
->lock
);
8873 #if TG3_VLAN_TAG_USED
8874 static void tg3_vlan_rx_register(struct net_device
*dev
, struct vlan_group
*grp
)
8876 struct tg3
*tp
= netdev_priv(dev
);
8878 if (netif_running(dev
))
8881 tg3_full_lock(tp
, 0);
8885 /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
8886 __tg3_set_rx_mode(dev
);
8888 tg3_full_unlock(tp
);
8890 if (netif_running(dev
))
8891 tg3_netif_start(tp
);
8894 static void tg3_vlan_rx_kill_vid(struct net_device
*dev
, unsigned short vid
)
8896 struct tg3
*tp
= netdev_priv(dev
);
8898 if (netif_running(dev
))
8901 tg3_full_lock(tp
, 0);
8903 tp
->vlgrp
->vlan_devices
[vid
] = NULL
;
8904 tg3_full_unlock(tp
);
8906 if (netif_running(dev
))
8907 tg3_netif_start(tp
);
8911 static int tg3_get_coalesce(struct net_device
*dev
, struct ethtool_coalesce
*ec
)
8913 struct tg3
*tp
= netdev_priv(dev
);
8915 memcpy(ec
, &tp
->coal
, sizeof(*ec
));
8919 static int tg3_set_coalesce(struct net_device
*dev
, struct ethtool_coalesce
*ec
)
8921 struct tg3
*tp
= netdev_priv(dev
);
8922 u32 max_rxcoal_tick_int
= 0, max_txcoal_tick_int
= 0;
8923 u32 max_stat_coal_ticks
= 0, min_stat_coal_ticks
= 0;
8925 if (!(tp
->tg3_flags2
& TG3_FLG2_5705_PLUS
)) {
8926 max_rxcoal_tick_int
= MAX_RXCOAL_TICK_INT
;
8927 max_txcoal_tick_int
= MAX_TXCOAL_TICK_INT
;
8928 max_stat_coal_ticks
= MAX_STAT_COAL_TICKS
;
8929 min_stat_coal_ticks
= MIN_STAT_COAL_TICKS
;
8932 if ((ec
->rx_coalesce_usecs
> MAX_RXCOL_TICKS
) ||
8933 (ec
->tx_coalesce_usecs
> MAX_TXCOL_TICKS
) ||
8934 (ec
->rx_max_coalesced_frames
> MAX_RXMAX_FRAMES
) ||
8935 (ec
->tx_max_coalesced_frames
> MAX_TXMAX_FRAMES
) ||
8936 (ec
->rx_coalesce_usecs_irq
> max_rxcoal_tick_int
) ||
8937 (ec
->tx_coalesce_usecs_irq
> max_txcoal_tick_int
) ||
8938 (ec
->rx_max_coalesced_frames_irq
> MAX_RXCOAL_MAXF_INT
) ||
8939 (ec
->tx_max_coalesced_frames_irq
> MAX_TXCOAL_MAXF_INT
) ||
8940 (ec
->stats_block_coalesce_usecs
> max_stat_coal_ticks
) ||
8941 (ec
->stats_block_coalesce_usecs
< min_stat_coal_ticks
))
8944 /* No rx interrupts will be generated if both are zero */
8945 if ((ec
->rx_coalesce_usecs
== 0) &&
8946 (ec
->rx_max_coalesced_frames
== 0))
8949 /* No tx interrupts will be generated if both are zero */
8950 if ((ec
->tx_coalesce_usecs
== 0) &&
8951 (ec
->tx_max_coalesced_frames
== 0))
8954 /* Only copy relevant parameters, ignore all others. */
8955 tp
->coal
.rx_coalesce_usecs
= ec
->rx_coalesce_usecs
;
8956 tp
->coal
.tx_coalesce_usecs
= ec
->tx_coalesce_usecs
;
8957 tp
->coal
.rx_max_coalesced_frames
= ec
->rx_max_coalesced_frames
;
8958 tp
->coal
.tx_max_coalesced_frames
= ec
->tx_max_coalesced_frames
;
8959 tp
->coal
.rx_coalesce_usecs_irq
= ec
->rx_coalesce_usecs_irq
;
8960 tp
->coal
.tx_coalesce_usecs_irq
= ec
->tx_coalesce_usecs_irq
;
8961 tp
->coal
.rx_max_coalesced_frames_irq
= ec
->rx_max_coalesced_frames_irq
;
8962 tp
->coal
.tx_max_coalesced_frames_irq
= ec
->tx_max_coalesced_frames_irq
;
8963 tp
->coal
.stats_block_coalesce_usecs
= ec
->stats_block_coalesce_usecs
;
8965 if (netif_running(dev
)) {
8966 tg3_full_lock(tp
, 0);
8967 __tg3_set_coalesce(tp
, &tp
->coal
);
8968 tg3_full_unlock(tp
);
8973 static const struct ethtool_ops tg3_ethtool_ops
= {
8974 .get_settings
= tg3_get_settings
,
8975 .set_settings
= tg3_set_settings
,
8976 .get_drvinfo
= tg3_get_drvinfo
,
8977 .get_regs_len
= tg3_get_regs_len
,
8978 .get_regs
= tg3_get_regs
,
8979 .get_wol
= tg3_get_wol
,
8980 .set_wol
= tg3_set_wol
,
8981 .get_msglevel
= tg3_get_msglevel
,
8982 .set_msglevel
= tg3_set_msglevel
,
8983 .nway_reset
= tg3_nway_reset
,
8984 .get_link
= ethtool_op_get_link
,
8985 .get_eeprom_len
= tg3_get_eeprom_len
,
8986 .get_eeprom
= tg3_get_eeprom
,
8987 .set_eeprom
= tg3_set_eeprom
,
8988 .get_ringparam
= tg3_get_ringparam
,
8989 .set_ringparam
= tg3_set_ringparam
,
8990 .get_pauseparam
= tg3_get_pauseparam
,
8991 .set_pauseparam
= tg3_set_pauseparam
,
8992 .get_rx_csum
= tg3_get_rx_csum
,
8993 .set_rx_csum
= tg3_set_rx_csum
,
8994 .get_tx_csum
= ethtool_op_get_tx_csum
,
8995 .set_tx_csum
= tg3_set_tx_csum
,
8996 .get_sg
= ethtool_op_get_sg
,
8997 .set_sg
= ethtool_op_set_sg
,
8998 #if TG3_TSO_SUPPORT != 0
8999 .get_tso
= ethtool_op_get_tso
,
9000 .set_tso
= tg3_set_tso
,
9002 .self_test_count
= tg3_get_test_count
,
9003 .self_test
= tg3_self_test
,
9004 .get_strings
= tg3_get_strings
,
9005 .phys_id
= tg3_phys_id
,
9006 .get_stats_count
= tg3_get_stats_count
,
9007 .get_ethtool_stats
= tg3_get_ethtool_stats
,
9008 .get_coalesce
= tg3_get_coalesce
,
9009 .set_coalesce
= tg3_set_coalesce
,
9010 .get_perm_addr
= ethtool_op_get_perm_addr
,
9013 static void __devinit
tg3_get_eeprom_size(struct tg3
*tp
)
9015 u32 cursize
, val
, magic
;
9017 tp
->nvram_size
= EEPROM_CHIP_SIZE
;
9019 if (tg3_nvram_read_swab(tp
, 0, &magic
) != 0)
9022 if ((magic
!= TG3_EEPROM_MAGIC
) && ((magic
& 0xff000000) != 0xa5000000))
9026 * Size the chip by reading offsets at increasing powers of two.
9027 * When we encounter our validation signature, we know the addressing
9028 * has wrapped around, and thus have our chip size.
9032 while (cursize
< tp
->nvram_size
) {
9033 if (tg3_nvram_read_swab(tp
, cursize
, &val
) != 0)
9042 tp
->nvram_size
= cursize
;
9045 static void __devinit
tg3_get_nvram_size(struct tg3
*tp
)
9049 if (tg3_nvram_read_swab(tp
, 0, &val
) != 0)
9052 /* Selfboot format */
9053 if (val
!= TG3_EEPROM_MAGIC
) {
9054 tg3_get_eeprom_size(tp
);
9058 if (tg3_nvram_read(tp
, 0xf0, &val
) == 0) {
9060 tp
->nvram_size
= (val
>> 16) * 1024;
9064 tp
->nvram_size
= 0x20000;
9067 static void __devinit
tg3_get_nvram_info(struct tg3
*tp
)
9071 nvcfg1
= tr32(NVRAM_CFG1
);
9072 if (nvcfg1
& NVRAM_CFG1_FLASHIF_ENAB
) {
9073 tp
->tg3_flags2
|= TG3_FLG2_FLASH
;
9076 nvcfg1
&= ~NVRAM_CFG1_COMPAT_BYPASS
;
9077 tw32(NVRAM_CFG1
, nvcfg1
);
9080 if ((GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5750
) ||
9081 (tp
->tg3_flags2
& TG3_FLG2_5780_CLASS
)) {
9082 switch (nvcfg1
& NVRAM_CFG1_VENDOR_MASK
) {
9083 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED
:
9084 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
9085 tp
->nvram_pagesize
= ATMEL_AT45DB0X1B_PAGE_SIZE
;
9086 tp
->tg3_flags
|= TG3_FLAG_NVRAM_BUFFERED
;
9088 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED
:
9089 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
9090 tp
->nvram_pagesize
= ATMEL_AT25F512_PAGE_SIZE
;
9092 case FLASH_VENDOR_ATMEL_EEPROM
:
9093 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
9094 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
9095 tp
->tg3_flags
|= TG3_FLAG_NVRAM_BUFFERED
;
9097 case FLASH_VENDOR_ST
:
9098 tp
->nvram_jedecnum
= JEDEC_ST
;
9099 tp
->nvram_pagesize
= ST_M45PEX0_PAGE_SIZE
;
9100 tp
->tg3_flags
|= TG3_FLAG_NVRAM_BUFFERED
;
9102 case FLASH_VENDOR_SAIFUN
:
9103 tp
->nvram_jedecnum
= JEDEC_SAIFUN
;
9104 tp
->nvram_pagesize
= SAIFUN_SA25F0XX_PAGE_SIZE
;
9106 case FLASH_VENDOR_SST_SMALL
:
9107 case FLASH_VENDOR_SST_LARGE
:
9108 tp
->nvram_jedecnum
= JEDEC_SST
;
9109 tp
->nvram_pagesize
= SST_25VF0X0_PAGE_SIZE
;
9114 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
9115 tp
->nvram_pagesize
= ATMEL_AT45DB0X1B_PAGE_SIZE
;
9116 tp
->tg3_flags
|= TG3_FLAG_NVRAM_BUFFERED
;
9120 static void __devinit
tg3_get_5752_nvram_info(struct tg3
*tp
)
9124 nvcfg1
= tr32(NVRAM_CFG1
);
9126 /* NVRAM protection for TPM */
9127 if (nvcfg1
& (1 << 27))
9128 tp
->tg3_flags2
|= TG3_FLG2_PROTECTED_NVRAM
;
9130 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
9131 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ
:
9132 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ
:
9133 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
9134 tp
->tg3_flags
|= TG3_FLAG_NVRAM_BUFFERED
;
9136 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED
:
9137 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
9138 tp
->tg3_flags
|= TG3_FLAG_NVRAM_BUFFERED
;
9139 tp
->tg3_flags2
|= TG3_FLG2_FLASH
;
9141 case FLASH_5752VENDOR_ST_M45PE10
:
9142 case FLASH_5752VENDOR_ST_M45PE20
:
9143 case FLASH_5752VENDOR_ST_M45PE40
:
9144 tp
->nvram_jedecnum
= JEDEC_ST
;
9145 tp
->tg3_flags
|= TG3_FLAG_NVRAM_BUFFERED
;
9146 tp
->tg3_flags2
|= TG3_FLG2_FLASH
;
9150 if (tp
->tg3_flags2
& TG3_FLG2_FLASH
) {
9151 switch (nvcfg1
& NVRAM_CFG1_5752PAGE_SIZE_MASK
) {
9152 case FLASH_5752PAGE_SIZE_256
:
9153 tp
->nvram_pagesize
= 256;
9155 case FLASH_5752PAGE_SIZE_512
:
9156 tp
->nvram_pagesize
= 512;
9158 case FLASH_5752PAGE_SIZE_1K
:
9159 tp
->nvram_pagesize
= 1024;
9161 case FLASH_5752PAGE_SIZE_2K
:
9162 tp
->nvram_pagesize
= 2048;
9164 case FLASH_5752PAGE_SIZE_4K
:
9165 tp
->nvram_pagesize
= 4096;
9167 case FLASH_5752PAGE_SIZE_264
:
9168 tp
->nvram_pagesize
= 264;
9173 /* For eeprom, set pagesize to maximum eeprom size */
9174 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
9176 nvcfg1
&= ~NVRAM_CFG1_COMPAT_BYPASS
;
9177 tw32(NVRAM_CFG1
, nvcfg1
);
9181 static void __devinit
tg3_get_5755_nvram_info(struct tg3
*tp
)
9185 nvcfg1
= tr32(NVRAM_CFG1
);
9187 /* NVRAM protection for TPM */
9188 if (nvcfg1
& (1 << 27))
9189 tp
->tg3_flags2
|= TG3_FLG2_PROTECTED_NVRAM
;
9191 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
9192 case FLASH_5755VENDOR_ATMEL_EEPROM_64KHZ
:
9193 case FLASH_5755VENDOR_ATMEL_EEPROM_376KHZ
:
9194 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
9195 tp
->tg3_flags
|= TG3_FLAG_NVRAM_BUFFERED
;
9196 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
9198 nvcfg1
&= ~NVRAM_CFG1_COMPAT_BYPASS
;
9199 tw32(NVRAM_CFG1
, nvcfg1
);
9201 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED
:
9202 case FLASH_5755VENDOR_ATMEL_FLASH_1
:
9203 case FLASH_5755VENDOR_ATMEL_FLASH_2
:
9204 case FLASH_5755VENDOR_ATMEL_FLASH_3
:
9205 case FLASH_5755VENDOR_ATMEL_FLASH_4
:
9206 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
9207 tp
->tg3_flags
|= TG3_FLAG_NVRAM_BUFFERED
;
9208 tp
->tg3_flags2
|= TG3_FLG2_FLASH
;
9209 tp
->nvram_pagesize
= 264;
9211 case FLASH_5752VENDOR_ST_M45PE10
:
9212 case FLASH_5752VENDOR_ST_M45PE20
:
9213 case FLASH_5752VENDOR_ST_M45PE40
:
9214 tp
->nvram_jedecnum
= JEDEC_ST
;
9215 tp
->tg3_flags
|= TG3_FLAG_NVRAM_BUFFERED
;
9216 tp
->tg3_flags2
|= TG3_FLG2_FLASH
;
9217 tp
->nvram_pagesize
= 256;
9222 static void __devinit
tg3_get_5787_nvram_info(struct tg3
*tp
)
9226 nvcfg1
= tr32(NVRAM_CFG1
);
9228 switch (nvcfg1
& NVRAM_CFG1_5752VENDOR_MASK
) {
9229 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ
:
9230 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ
:
9231 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ
:
9232 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ
:
9233 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
9234 tp
->tg3_flags
|= TG3_FLAG_NVRAM_BUFFERED
;
9235 tp
->nvram_pagesize
= ATMEL_AT24C512_CHIP_SIZE
;
9237 nvcfg1
&= ~NVRAM_CFG1_COMPAT_BYPASS
;
9238 tw32(NVRAM_CFG1
, nvcfg1
);
9240 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED
:
9241 case FLASH_5755VENDOR_ATMEL_FLASH_1
:
9242 case FLASH_5755VENDOR_ATMEL_FLASH_2
:
9243 case FLASH_5755VENDOR_ATMEL_FLASH_3
:
9244 tp
->nvram_jedecnum
= JEDEC_ATMEL
;
9245 tp
->tg3_flags
|= TG3_FLAG_NVRAM_BUFFERED
;
9246 tp
->tg3_flags2
|= TG3_FLG2_FLASH
;
9247 tp
->nvram_pagesize
= 264;
9249 case FLASH_5752VENDOR_ST_M45PE10
:
9250 case FLASH_5752VENDOR_ST_M45PE20
:
9251 case FLASH_5752VENDOR_ST_M45PE40
:
9252 tp
->nvram_jedecnum
= JEDEC_ST
;
9253 tp
->tg3_flags
|= TG3_FLAG_NVRAM_BUFFERED
;
9254 tp
->tg3_flags2
|= TG3_FLG2_FLASH
;
9255 tp
->nvram_pagesize
= 256;
9260 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
9261 static void __devinit
tg3_nvram_init(struct tg3
*tp
)
9265 tw32_f(GRC_EEPROM_ADDR
,
9266 (EEPROM_ADDR_FSM_RESET
|
9267 (EEPROM_DEFAULT_CLOCK_PERIOD
<<
9268 EEPROM_ADDR_CLKPERD_SHIFT
)));
9270 /* XXX schedule_timeout() ... */
9271 for (j
= 0; j
< 100; j
++)
9274 /* Enable seeprom accesses. */
9275 tw32_f(GRC_LOCAL_CTRL
,
9276 tr32(GRC_LOCAL_CTRL
) | GRC_LCLCTRL_AUTO_SEEPROM
);
9279 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5700
&&
9280 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5701
) {
9281 tp
->tg3_flags
|= TG3_FLAG_NVRAM
;
9283 if (tg3_nvram_lock(tp
)) {
9284 printk(KERN_WARNING PFX
"%s: Cannot get nvarm lock, "
9285 "tg3_nvram_init failed.\n", tp
->dev
->name
);
9288 tg3_enable_nvram_access(tp
);
9290 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5752
)
9291 tg3_get_5752_nvram_info(tp
);
9292 else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5755
)
9293 tg3_get_5755_nvram_info(tp
);
9294 else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5787
)
9295 tg3_get_5787_nvram_info(tp
);
9297 tg3_get_nvram_info(tp
);
9299 tg3_get_nvram_size(tp
);
9301 tg3_disable_nvram_access(tp
);
9302 tg3_nvram_unlock(tp
);
9305 tp
->tg3_flags
&= ~(TG3_FLAG_NVRAM
| TG3_FLAG_NVRAM_BUFFERED
);
9307 tg3_get_eeprom_size(tp
);
9311 static int tg3_nvram_read_using_eeprom(struct tg3
*tp
,
9312 u32 offset
, u32
*val
)
9317 if (offset
> EEPROM_ADDR_ADDR_MASK
||
9321 tmp
= tr32(GRC_EEPROM_ADDR
) & ~(EEPROM_ADDR_ADDR_MASK
|
9322 EEPROM_ADDR_DEVID_MASK
|
9324 tw32(GRC_EEPROM_ADDR
,
9326 (0 << EEPROM_ADDR_DEVID_SHIFT
) |
9327 ((offset
<< EEPROM_ADDR_ADDR_SHIFT
) &
9328 EEPROM_ADDR_ADDR_MASK
) |
9329 EEPROM_ADDR_READ
| EEPROM_ADDR_START
);
9331 for (i
= 0; i
< 10000; i
++) {
9332 tmp
= tr32(GRC_EEPROM_ADDR
);
9334 if (tmp
& EEPROM_ADDR_COMPLETE
)
9338 if (!(tmp
& EEPROM_ADDR_COMPLETE
))
9341 *val
= tr32(GRC_EEPROM_DATA
);
9345 #define NVRAM_CMD_TIMEOUT 10000
9347 static int tg3_nvram_exec_cmd(struct tg3
*tp
, u32 nvram_cmd
)
9351 tw32(NVRAM_CMD
, nvram_cmd
);
9352 for (i
= 0; i
< NVRAM_CMD_TIMEOUT
; i
++) {
9354 if (tr32(NVRAM_CMD
) & NVRAM_CMD_DONE
) {
9359 if (i
== NVRAM_CMD_TIMEOUT
) {
9365 static u32
tg3_nvram_phys_addr(struct tg3
*tp
, u32 addr
)
9367 if ((tp
->tg3_flags
& TG3_FLAG_NVRAM
) &&
9368 (tp
->tg3_flags
& TG3_FLAG_NVRAM_BUFFERED
) &&
9369 (tp
->tg3_flags2
& TG3_FLG2_FLASH
) &&
9370 (tp
->nvram_jedecnum
== JEDEC_ATMEL
))
9372 addr
= ((addr
/ tp
->nvram_pagesize
) <<
9373 ATMEL_AT45DB0X1B_PAGE_POS
) +
9374 (addr
% tp
->nvram_pagesize
);
9379 static u32
tg3_nvram_logical_addr(struct tg3
*tp
, u32 addr
)
9381 if ((tp
->tg3_flags
& TG3_FLAG_NVRAM
) &&
9382 (tp
->tg3_flags
& TG3_FLAG_NVRAM_BUFFERED
) &&
9383 (tp
->tg3_flags2
& TG3_FLG2_FLASH
) &&
9384 (tp
->nvram_jedecnum
== JEDEC_ATMEL
))
9386 addr
= ((addr
>> ATMEL_AT45DB0X1B_PAGE_POS
) *
9387 tp
->nvram_pagesize
) +
9388 (addr
& ((1 << ATMEL_AT45DB0X1B_PAGE_POS
) - 1));
9393 static int tg3_nvram_read(struct tg3
*tp
, u32 offset
, u32
*val
)
9397 if (!(tp
->tg3_flags
& TG3_FLAG_NVRAM
))
9398 return tg3_nvram_read_using_eeprom(tp
, offset
, val
);
9400 offset
= tg3_nvram_phys_addr(tp
, offset
);
9402 if (offset
> NVRAM_ADDR_MSK
)
9405 ret
= tg3_nvram_lock(tp
);
9409 tg3_enable_nvram_access(tp
);
9411 tw32(NVRAM_ADDR
, offset
);
9412 ret
= tg3_nvram_exec_cmd(tp
, NVRAM_CMD_RD
| NVRAM_CMD_GO
|
9413 NVRAM_CMD_FIRST
| NVRAM_CMD_LAST
| NVRAM_CMD_DONE
);
9416 *val
= swab32(tr32(NVRAM_RDDATA
));
9418 tg3_disable_nvram_access(tp
);
9420 tg3_nvram_unlock(tp
);
9425 static int tg3_nvram_read_swab(struct tg3
*tp
, u32 offset
, u32
*val
)
9430 err
= tg3_nvram_read(tp
, offset
, &tmp
);
9435 static int tg3_nvram_write_block_using_eeprom(struct tg3
*tp
,
9436 u32 offset
, u32 len
, u8
*buf
)
9441 for (i
= 0; i
< len
; i
+= 4) {
9446 memcpy(&data
, buf
+ i
, 4);
9448 tw32(GRC_EEPROM_DATA
, cpu_to_le32(data
));
9450 val
= tr32(GRC_EEPROM_ADDR
);
9451 tw32(GRC_EEPROM_ADDR
, val
| EEPROM_ADDR_COMPLETE
);
9453 val
&= ~(EEPROM_ADDR_ADDR_MASK
| EEPROM_ADDR_DEVID_MASK
|
9455 tw32(GRC_EEPROM_ADDR
, val
|
9456 (0 << EEPROM_ADDR_DEVID_SHIFT
) |
9457 (addr
& EEPROM_ADDR_ADDR_MASK
) |
9461 for (j
= 0; j
< 10000; j
++) {
9462 val
= tr32(GRC_EEPROM_ADDR
);
9464 if (val
& EEPROM_ADDR_COMPLETE
)
9468 if (!(val
& EEPROM_ADDR_COMPLETE
)) {
9477 /* offset and length are dword aligned */
9478 static int tg3_nvram_write_block_unbuffered(struct tg3
*tp
, u32 offset
, u32 len
,
9482 u32 pagesize
= tp
->nvram_pagesize
;
9483 u32 pagemask
= pagesize
- 1;
9487 tmp
= kmalloc(pagesize
, GFP_KERNEL
);
9493 u32 phy_addr
, page_off
, size
;
9495 phy_addr
= offset
& ~pagemask
;
9497 for (j
= 0; j
< pagesize
; j
+= 4) {
9498 if ((ret
= tg3_nvram_read(tp
, phy_addr
+ j
,
9499 (u32
*) (tmp
+ j
))))
9505 page_off
= offset
& pagemask
;
9512 memcpy(tmp
+ page_off
, buf
, size
);
9514 offset
= offset
+ (pagesize
- page_off
);
9516 tg3_enable_nvram_access(tp
);
9519 * Before we can erase the flash page, we need
9520 * to issue a special "write enable" command.
9522 nvram_cmd
= NVRAM_CMD_WREN
| NVRAM_CMD_GO
| NVRAM_CMD_DONE
;
9524 if (tg3_nvram_exec_cmd(tp
, nvram_cmd
))
9527 /* Erase the target page */
9528 tw32(NVRAM_ADDR
, phy_addr
);
9530 nvram_cmd
= NVRAM_CMD_GO
| NVRAM_CMD_DONE
| NVRAM_CMD_WR
|
9531 NVRAM_CMD_FIRST
| NVRAM_CMD_LAST
| NVRAM_CMD_ERASE
;
9533 if (tg3_nvram_exec_cmd(tp
, nvram_cmd
))
9536 /* Issue another write enable to start the write. */
9537 nvram_cmd
= NVRAM_CMD_WREN
| NVRAM_CMD_GO
| NVRAM_CMD_DONE
;
9539 if (tg3_nvram_exec_cmd(tp
, nvram_cmd
))
9542 for (j
= 0; j
< pagesize
; j
+= 4) {
9545 data
= *((u32
*) (tmp
+ j
));
9546 tw32(NVRAM_WRDATA
, cpu_to_be32(data
));
9548 tw32(NVRAM_ADDR
, phy_addr
+ j
);
9550 nvram_cmd
= NVRAM_CMD_GO
| NVRAM_CMD_DONE
|
9554 nvram_cmd
|= NVRAM_CMD_FIRST
;
9555 else if (j
== (pagesize
- 4))
9556 nvram_cmd
|= NVRAM_CMD_LAST
;
9558 if ((ret
= tg3_nvram_exec_cmd(tp
, nvram_cmd
)))
9565 nvram_cmd
= NVRAM_CMD_WRDI
| NVRAM_CMD_GO
| NVRAM_CMD_DONE
;
9566 tg3_nvram_exec_cmd(tp
, nvram_cmd
);
9573 /* offset and length are dword aligned */
9574 static int tg3_nvram_write_block_buffered(struct tg3
*tp
, u32 offset
, u32 len
,
9579 for (i
= 0; i
< len
; i
+= 4, offset
+= 4) {
9580 u32 data
, page_off
, phy_addr
, nvram_cmd
;
9582 memcpy(&data
, buf
+ i
, 4);
9583 tw32(NVRAM_WRDATA
, cpu_to_be32(data
));
9585 page_off
= offset
% tp
->nvram_pagesize
;
9587 phy_addr
= tg3_nvram_phys_addr(tp
, offset
);
9589 tw32(NVRAM_ADDR
, phy_addr
);
9591 nvram_cmd
= NVRAM_CMD_GO
| NVRAM_CMD_DONE
| NVRAM_CMD_WR
;
9593 if ((page_off
== 0) || (i
== 0))
9594 nvram_cmd
|= NVRAM_CMD_FIRST
;
9595 if (page_off
== (tp
->nvram_pagesize
- 4))
9596 nvram_cmd
|= NVRAM_CMD_LAST
;
9599 nvram_cmd
|= NVRAM_CMD_LAST
;
9601 if ((GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5752
) &&
9602 (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5755
) &&
9603 (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5787
) &&
9604 (tp
->nvram_jedecnum
== JEDEC_ST
) &&
9605 (nvram_cmd
& NVRAM_CMD_FIRST
)) {
9607 if ((ret
= tg3_nvram_exec_cmd(tp
,
9608 NVRAM_CMD_WREN
| NVRAM_CMD_GO
|
9613 if (!(tp
->tg3_flags2
& TG3_FLG2_FLASH
)) {
9614 /* We always do complete word writes to eeprom. */
9615 nvram_cmd
|= (NVRAM_CMD_FIRST
| NVRAM_CMD_LAST
);
9618 if ((ret
= tg3_nvram_exec_cmd(tp
, nvram_cmd
)))
9624 /* offset and length are dword aligned */
9625 static int tg3_nvram_write_block(struct tg3
*tp
, u32 offset
, u32 len
, u8
*buf
)
9629 if (tp
->tg3_flags
& TG3_FLAG_EEPROM_WRITE_PROT
) {
9630 tw32_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
&
9631 ~GRC_LCLCTRL_GPIO_OUTPUT1
);
9635 if (!(tp
->tg3_flags
& TG3_FLAG_NVRAM
)) {
9636 ret
= tg3_nvram_write_block_using_eeprom(tp
, offset
, len
, buf
);
9641 ret
= tg3_nvram_lock(tp
);
9645 tg3_enable_nvram_access(tp
);
9646 if ((tp
->tg3_flags2
& TG3_FLG2_5750_PLUS
) &&
9647 !(tp
->tg3_flags2
& TG3_FLG2_PROTECTED_NVRAM
))
9648 tw32(NVRAM_WRITE1
, 0x406);
9650 grc_mode
= tr32(GRC_MODE
);
9651 tw32(GRC_MODE
, grc_mode
| GRC_MODE_NVRAM_WR_ENABLE
);
9653 if ((tp
->tg3_flags
& TG3_FLAG_NVRAM_BUFFERED
) ||
9654 !(tp
->tg3_flags2
& TG3_FLG2_FLASH
)) {
9656 ret
= tg3_nvram_write_block_buffered(tp
, offset
, len
,
9660 ret
= tg3_nvram_write_block_unbuffered(tp
, offset
, len
,
9664 grc_mode
= tr32(GRC_MODE
);
9665 tw32(GRC_MODE
, grc_mode
& ~GRC_MODE_NVRAM_WR_ENABLE
);
9667 tg3_disable_nvram_access(tp
);
9668 tg3_nvram_unlock(tp
);
9671 if (tp
->tg3_flags
& TG3_FLAG_EEPROM_WRITE_PROT
) {
9672 tw32_f(GRC_LOCAL_CTRL
, tp
->grc_local_ctrl
);
9679 struct subsys_tbl_ent
{
9680 u16 subsys_vendor
, subsys_devid
;
9684 static struct subsys_tbl_ent subsys_id_to_phy_id
[] = {
9685 /* Broadcom boards. */
9686 { PCI_VENDOR_ID_BROADCOM
, 0x1644, PHY_ID_BCM5401
}, /* BCM95700A6 */
9687 { PCI_VENDOR_ID_BROADCOM
, 0x0001, PHY_ID_BCM5701
}, /* BCM95701A5 */
9688 { PCI_VENDOR_ID_BROADCOM
, 0x0002, PHY_ID_BCM8002
}, /* BCM95700T6 */
9689 { PCI_VENDOR_ID_BROADCOM
, 0x0003, 0 }, /* BCM95700A9 */
9690 { PCI_VENDOR_ID_BROADCOM
, 0x0005, PHY_ID_BCM5701
}, /* BCM95701T1 */
9691 { PCI_VENDOR_ID_BROADCOM
, 0x0006, PHY_ID_BCM5701
}, /* BCM95701T8 */
9692 { PCI_VENDOR_ID_BROADCOM
, 0x0007, 0 }, /* BCM95701A7 */
9693 { PCI_VENDOR_ID_BROADCOM
, 0x0008, PHY_ID_BCM5701
}, /* BCM95701A10 */
9694 { PCI_VENDOR_ID_BROADCOM
, 0x8008, PHY_ID_BCM5701
}, /* BCM95701A12 */
9695 { PCI_VENDOR_ID_BROADCOM
, 0x0009, PHY_ID_BCM5703
}, /* BCM95703Ax1 */
9696 { PCI_VENDOR_ID_BROADCOM
, 0x8009, PHY_ID_BCM5703
}, /* BCM95703Ax2 */
9699 { PCI_VENDOR_ID_3COM
, 0x1000, PHY_ID_BCM5401
}, /* 3C996T */
9700 { PCI_VENDOR_ID_3COM
, 0x1006, PHY_ID_BCM5701
}, /* 3C996BT */
9701 { PCI_VENDOR_ID_3COM
, 0x1004, 0 }, /* 3C996SX */
9702 { PCI_VENDOR_ID_3COM
, 0x1007, PHY_ID_BCM5701
}, /* 3C1000T */
9703 { PCI_VENDOR_ID_3COM
, 0x1008, PHY_ID_BCM5701
}, /* 3C940BR01 */
9706 { PCI_VENDOR_ID_DELL
, 0x00d1, PHY_ID_BCM5401
}, /* VIPER */
9707 { PCI_VENDOR_ID_DELL
, 0x0106, PHY_ID_BCM5401
}, /* JAGUAR */
9708 { PCI_VENDOR_ID_DELL
, 0x0109, PHY_ID_BCM5411
}, /* MERLOT */
9709 { PCI_VENDOR_ID_DELL
, 0x010a, PHY_ID_BCM5411
}, /* SLIM_MERLOT */
9711 /* Compaq boards. */
9712 { PCI_VENDOR_ID_COMPAQ
, 0x007c, PHY_ID_BCM5701
}, /* BANSHEE */
9713 { PCI_VENDOR_ID_COMPAQ
, 0x009a, PHY_ID_BCM5701
}, /* BANSHEE_2 */
9714 { PCI_VENDOR_ID_COMPAQ
, 0x007d, 0 }, /* CHANGELING */
9715 { PCI_VENDOR_ID_COMPAQ
, 0x0085, PHY_ID_BCM5701
}, /* NC7780 */
9716 { PCI_VENDOR_ID_COMPAQ
, 0x0099, PHY_ID_BCM5701
}, /* NC7780_2 */
9719 { PCI_VENDOR_ID_IBM
, 0x0281, 0 } /* IBM??? */
9722 static inline struct subsys_tbl_ent
*lookup_by_subsys(struct tg3
*tp
)
9726 for (i
= 0; i
< ARRAY_SIZE(subsys_id_to_phy_id
); i
++) {
9727 if ((subsys_id_to_phy_id
[i
].subsys_vendor
==
9728 tp
->pdev
->subsystem_vendor
) &&
9729 (subsys_id_to_phy_id
[i
].subsys_devid
==
9730 tp
->pdev
->subsystem_device
))
9731 return &subsys_id_to_phy_id
[i
];
9736 static void __devinit
tg3_get_eeprom_hw_cfg(struct tg3
*tp
)
9741 /* On some early chips the SRAM cannot be accessed in D3hot state,
9742 * so need make sure we're in D0.
9744 pci_read_config_word(tp
->pdev
, tp
->pm_cap
+ PCI_PM_CTRL
, &pmcsr
);
9745 pmcsr
&= ~PCI_PM_CTRL_STATE_MASK
;
9746 pci_write_config_word(tp
->pdev
, tp
->pm_cap
+ PCI_PM_CTRL
, pmcsr
);
9749 /* Make sure register accesses (indirect or otherwise)
9750 * will function correctly.
9752 pci_write_config_dword(tp
->pdev
, TG3PCI_MISC_HOST_CTRL
,
9753 tp
->misc_host_ctrl
);
9755 /* The memory arbiter has to be enabled in order for SRAM accesses
9756 * to succeed. Normally on powerup the tg3 chip firmware will make
9757 * sure it is enabled, but other entities such as system netboot
9758 * code might disable it.
9760 val
= tr32(MEMARB_MODE
);
9761 tw32(MEMARB_MODE
, val
| MEMARB_MODE_ENABLE
);
9763 tp
->phy_id
= PHY_ID_INVALID
;
9764 tp
->led_ctrl
= LED_CTRL_MODE_PHY_1
;
9766 /* Assume an onboard device by default. */
9767 tp
->tg3_flags
|= TG3_FLAG_EEPROM_WRITE_PROT
;
9769 tg3_read_mem(tp
, NIC_SRAM_DATA_SIG
, &val
);
9770 if (val
== NIC_SRAM_DATA_SIG_MAGIC
) {
9771 u32 nic_cfg
, led_cfg
;
9772 u32 nic_phy_id
, ver
, cfg2
= 0, eeprom_phy_id
;
9773 int eeprom_phy_serdes
= 0;
9775 tg3_read_mem(tp
, NIC_SRAM_DATA_CFG
, &nic_cfg
);
9776 tp
->nic_sram_data_cfg
= nic_cfg
;
9778 tg3_read_mem(tp
, NIC_SRAM_DATA_VER
, &ver
);
9779 ver
>>= NIC_SRAM_DATA_VER_SHIFT
;
9780 if ((GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5700
) &&
9781 (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5701
) &&
9782 (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5703
) &&
9783 (ver
> 0) && (ver
< 0x100))
9784 tg3_read_mem(tp
, NIC_SRAM_DATA_CFG_2
, &cfg2
);
9786 if ((nic_cfg
& NIC_SRAM_DATA_CFG_PHY_TYPE_MASK
) ==
9787 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER
)
9788 eeprom_phy_serdes
= 1;
9790 tg3_read_mem(tp
, NIC_SRAM_DATA_PHY_ID
, &nic_phy_id
);
9791 if (nic_phy_id
!= 0) {
9792 u32 id1
= nic_phy_id
& NIC_SRAM_DATA_PHY_ID1_MASK
;
9793 u32 id2
= nic_phy_id
& NIC_SRAM_DATA_PHY_ID2_MASK
;
9795 eeprom_phy_id
= (id1
>> 16) << 10;
9796 eeprom_phy_id
|= (id2
& 0xfc00) << 16;
9797 eeprom_phy_id
|= (id2
& 0x03ff) << 0;
9801 tp
->phy_id
= eeprom_phy_id
;
9802 if (eeprom_phy_serdes
) {
9803 if (tp
->tg3_flags2
& TG3_FLG2_5780_CLASS
)
9804 tp
->tg3_flags2
|= TG3_FLG2_MII_SERDES
;
9806 tp
->tg3_flags2
|= TG3_FLG2_PHY_SERDES
;
9809 if (tp
->tg3_flags2
& TG3_FLG2_5750_PLUS
)
9810 led_cfg
= cfg2
& (NIC_SRAM_DATA_CFG_LED_MODE_MASK
|
9811 SHASTA_EXT_LED_MODE_MASK
);
9813 led_cfg
= nic_cfg
& NIC_SRAM_DATA_CFG_LED_MODE_MASK
;
9817 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1
:
9818 tp
->led_ctrl
= LED_CTRL_MODE_PHY_1
;
9821 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2
:
9822 tp
->led_ctrl
= LED_CTRL_MODE_PHY_2
;
9825 case NIC_SRAM_DATA_CFG_LED_MODE_MAC
:
9826 tp
->led_ctrl
= LED_CTRL_MODE_MAC
;
9828 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
9829 * read on some older 5700/5701 bootcode.
9831 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) ==
9833 GET_ASIC_REV(tp
->pci_chip_rev_id
) ==
9835 tp
->led_ctrl
= LED_CTRL_MODE_PHY_1
;
9839 case SHASTA_EXT_LED_SHARED
:
9840 tp
->led_ctrl
= LED_CTRL_MODE_SHARED
;
9841 if (tp
->pci_chip_rev_id
!= CHIPREV_ID_5750_A0
&&
9842 tp
->pci_chip_rev_id
!= CHIPREV_ID_5750_A1
)
9843 tp
->led_ctrl
|= (LED_CTRL_MODE_PHY_1
|
9844 LED_CTRL_MODE_PHY_2
);
9847 case SHASTA_EXT_LED_MAC
:
9848 tp
->led_ctrl
= LED_CTRL_MODE_SHASTA_MAC
;
9851 case SHASTA_EXT_LED_COMBO
:
9852 tp
->led_ctrl
= LED_CTRL_MODE_COMBO
;
9853 if (tp
->pci_chip_rev_id
!= CHIPREV_ID_5750_A0
)
9854 tp
->led_ctrl
|= (LED_CTRL_MODE_PHY_1
|
9855 LED_CTRL_MODE_PHY_2
);
9860 if ((GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
9861 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
) &&
9862 tp
->pdev
->subsystem_vendor
== PCI_VENDOR_ID_DELL
)
9863 tp
->led_ctrl
= LED_CTRL_MODE_PHY_2
;
9865 if (nic_cfg
& NIC_SRAM_DATA_CFG_EEPROM_WP
)
9866 tp
->tg3_flags
|= TG3_FLAG_EEPROM_WRITE_PROT
;
9868 tp
->tg3_flags
&= ~TG3_FLAG_EEPROM_WRITE_PROT
;
9870 if (nic_cfg
& NIC_SRAM_DATA_CFG_ASF_ENABLE
) {
9871 tp
->tg3_flags
|= TG3_FLAG_ENABLE_ASF
;
9872 if (tp
->tg3_flags2
& TG3_FLG2_5750_PLUS
)
9873 tp
->tg3_flags2
|= TG3_FLG2_ASF_NEW_HANDSHAKE
;
9875 if (nic_cfg
& NIC_SRAM_DATA_CFG_FIBER_WOL
)
9876 tp
->tg3_flags
|= TG3_FLAG_SERDES_WOL_CAP
;
9878 if (cfg2
& (1 << 17))
9879 tp
->tg3_flags2
|= TG3_FLG2_CAPACITIVE_COUPLING
;
9881 /* serdes signal pre-emphasis in register 0x590 set by */
9882 /* bootcode if bit 18 is set */
9883 if (cfg2
& (1 << 18))
9884 tp
->tg3_flags2
|= TG3_FLG2_SERDES_PREEMPHASIS
;
9888 static int __devinit
tg3_phy_probe(struct tg3
*tp
)
9890 u32 hw_phy_id_1
, hw_phy_id_2
;
9891 u32 hw_phy_id
, hw_phy_id_masked
;
9894 /* Reading the PHY ID register can conflict with ASF
9895 * firwmare access to the PHY hardware.
9898 if (tp
->tg3_flags
& TG3_FLAG_ENABLE_ASF
) {
9899 hw_phy_id
= hw_phy_id_masked
= PHY_ID_INVALID
;
9901 /* Now read the physical PHY_ID from the chip and verify
9902 * that it is sane. If it doesn't look good, we fall back
9903 * to either the hard-coded table based PHY_ID and failing
9904 * that the value found in the eeprom area.
9906 err
|= tg3_readphy(tp
, MII_PHYSID1
, &hw_phy_id_1
);
9907 err
|= tg3_readphy(tp
, MII_PHYSID2
, &hw_phy_id_2
);
9909 hw_phy_id
= (hw_phy_id_1
& 0xffff) << 10;
9910 hw_phy_id
|= (hw_phy_id_2
& 0xfc00) << 16;
9911 hw_phy_id
|= (hw_phy_id_2
& 0x03ff) << 0;
9913 hw_phy_id_masked
= hw_phy_id
& PHY_ID_MASK
;
9916 if (!err
&& KNOWN_PHY_ID(hw_phy_id_masked
)) {
9917 tp
->phy_id
= hw_phy_id
;
9918 if (hw_phy_id_masked
== PHY_ID_BCM8002
)
9919 tp
->tg3_flags2
|= TG3_FLG2_PHY_SERDES
;
9921 tp
->tg3_flags2
&= ~TG3_FLG2_PHY_SERDES
;
9923 if (tp
->phy_id
!= PHY_ID_INVALID
) {
9924 /* Do nothing, phy ID already set up in
9925 * tg3_get_eeprom_hw_cfg().
9928 struct subsys_tbl_ent
*p
;
9930 /* No eeprom signature? Try the hardcoded
9931 * subsys device table.
9933 p
= lookup_by_subsys(tp
);
9937 tp
->phy_id
= p
->phy_id
;
9939 tp
->phy_id
== PHY_ID_BCM8002
)
9940 tp
->tg3_flags2
|= TG3_FLG2_PHY_SERDES
;
9944 if (!(tp
->tg3_flags2
& TG3_FLG2_ANY_SERDES
) &&
9945 !(tp
->tg3_flags
& TG3_FLAG_ENABLE_ASF
)) {
9946 u32 bmsr
, adv_reg
, tg3_ctrl
;
9948 tg3_readphy(tp
, MII_BMSR
, &bmsr
);
9949 if (!tg3_readphy(tp
, MII_BMSR
, &bmsr
) &&
9950 (bmsr
& BMSR_LSTATUS
))
9951 goto skip_phy_reset
;
9953 err
= tg3_phy_reset(tp
);
9957 adv_reg
= (ADVERTISE_10HALF
| ADVERTISE_10FULL
|
9958 ADVERTISE_100HALF
| ADVERTISE_100FULL
|
9959 ADVERTISE_CSMA
| ADVERTISE_PAUSE_CAP
);
9961 if (!(tp
->tg3_flags
& TG3_FLAG_10_100_ONLY
)) {
9962 tg3_ctrl
= (MII_TG3_CTRL_ADV_1000_HALF
|
9963 MII_TG3_CTRL_ADV_1000_FULL
);
9964 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5701_A0
||
9965 tp
->pci_chip_rev_id
== CHIPREV_ID_5701_B0
)
9966 tg3_ctrl
|= (MII_TG3_CTRL_AS_MASTER
|
9967 MII_TG3_CTRL_ENABLE_AS_MASTER
);
9970 if (!tg3_copper_is_advertising_all(tp
)) {
9971 tg3_writephy(tp
, MII_ADVERTISE
, adv_reg
);
9973 if (!(tp
->tg3_flags
& TG3_FLAG_10_100_ONLY
))
9974 tg3_writephy(tp
, MII_TG3_CTRL
, tg3_ctrl
);
9976 tg3_writephy(tp
, MII_BMCR
,
9977 BMCR_ANENABLE
| BMCR_ANRESTART
);
9979 tg3_phy_set_wirespeed(tp
);
9981 tg3_writephy(tp
, MII_ADVERTISE
, adv_reg
);
9982 if (!(tp
->tg3_flags
& TG3_FLAG_10_100_ONLY
))
9983 tg3_writephy(tp
, MII_TG3_CTRL
, tg3_ctrl
);
9987 if ((tp
->phy_id
& PHY_ID_MASK
) == PHY_ID_BCM5401
) {
9988 err
= tg3_init_5401phy_dsp(tp
);
9993 if (!err
&& ((tp
->phy_id
& PHY_ID_MASK
) == PHY_ID_BCM5401
)) {
9994 err
= tg3_init_5401phy_dsp(tp
);
9997 if (tp
->tg3_flags2
& TG3_FLG2_ANY_SERDES
)
9998 tp
->link_config
.advertising
=
9999 (ADVERTISED_1000baseT_Half
|
10000 ADVERTISED_1000baseT_Full
|
10001 ADVERTISED_Autoneg
|
10003 if (tp
->tg3_flags
& TG3_FLAG_10_100_ONLY
)
10004 tp
->link_config
.advertising
&=
10005 ~(ADVERTISED_1000baseT_Half
|
10006 ADVERTISED_1000baseT_Full
);
10011 static void __devinit
tg3_read_partno(struct tg3
*tp
)
10013 unsigned char vpd_data
[256];
10017 if (tg3_nvram_read_swab(tp
, 0x0, &magic
))
10018 goto out_not_found
;
10020 if (magic
== TG3_EEPROM_MAGIC
) {
10021 for (i
= 0; i
< 256; i
+= 4) {
10024 if (tg3_nvram_read(tp
, 0x100 + i
, &tmp
))
10025 goto out_not_found
;
10027 vpd_data
[i
+ 0] = ((tmp
>> 0) & 0xff);
10028 vpd_data
[i
+ 1] = ((tmp
>> 8) & 0xff);
10029 vpd_data
[i
+ 2] = ((tmp
>> 16) & 0xff);
10030 vpd_data
[i
+ 3] = ((tmp
>> 24) & 0xff);
10035 vpd_cap
= pci_find_capability(tp
->pdev
, PCI_CAP_ID_VPD
);
10036 for (i
= 0; i
< 256; i
+= 4) {
10040 pci_write_config_word(tp
->pdev
, vpd_cap
+ PCI_VPD_ADDR
,
10042 while (j
++ < 100) {
10043 pci_read_config_word(tp
->pdev
, vpd_cap
+
10044 PCI_VPD_ADDR
, &tmp16
);
10045 if (tmp16
& 0x8000)
10049 if (!(tmp16
& 0x8000))
10050 goto out_not_found
;
10052 pci_read_config_dword(tp
->pdev
, vpd_cap
+ PCI_VPD_DATA
,
10054 tmp
= cpu_to_le32(tmp
);
10055 memcpy(&vpd_data
[i
], &tmp
, 4);
10059 /* Now parse and find the part number. */
10060 for (i
= 0; i
< 256; ) {
10061 unsigned char val
= vpd_data
[i
];
10064 if (val
== 0x82 || val
== 0x91) {
10067 (vpd_data
[i
+ 2] << 8)));
10072 goto out_not_found
;
10074 block_end
= (i
+ 3 +
10076 (vpd_data
[i
+ 2] << 8)));
10078 while (i
< block_end
) {
10079 if (vpd_data
[i
+ 0] == 'P' &&
10080 vpd_data
[i
+ 1] == 'N') {
10081 int partno_len
= vpd_data
[i
+ 2];
10083 if (partno_len
> 24)
10084 goto out_not_found
;
10086 memcpy(tp
->board_part_number
,
10095 /* Part number not found. */
10096 goto out_not_found
;
10100 strcpy(tp
->board_part_number
, "none");
10103 static void __devinit
tg3_read_fw_ver(struct tg3
*tp
)
10105 u32 val
, offset
, start
;
10107 if (tg3_nvram_read_swab(tp
, 0, &val
))
10110 if (val
!= TG3_EEPROM_MAGIC
)
10113 if (tg3_nvram_read_swab(tp
, 0xc, &offset
) ||
10114 tg3_nvram_read_swab(tp
, 0x4, &start
))
10117 offset
= tg3_nvram_logical_addr(tp
, offset
);
10118 if (tg3_nvram_read_swab(tp
, offset
, &val
))
10121 if ((val
& 0xfc000000) == 0x0c000000) {
10122 u32 ver_offset
, addr
;
10125 if (tg3_nvram_read_swab(tp
, offset
+ 4, &val
) ||
10126 tg3_nvram_read_swab(tp
, offset
+ 8, &ver_offset
))
10132 addr
= offset
+ ver_offset
- start
;
10133 for (i
= 0; i
< 16; i
+= 4) {
10134 if (tg3_nvram_read(tp
, addr
+ i
, &val
))
10137 val
= cpu_to_le32(val
);
10138 memcpy(tp
->fw_ver
+ i
, &val
, 4);
10143 static int __devinit
tg3_get_invariants(struct tg3
*tp
)
10145 static struct pci_device_id write_reorder_chipsets
[] = {
10146 { PCI_DEVICE(PCI_VENDOR_ID_AMD
,
10147 PCI_DEVICE_ID_AMD_FE_GATE_700C
) },
10148 { PCI_DEVICE(PCI_VENDOR_ID_AMD
,
10149 PCI_DEVICE_ID_AMD_8131_BRIDGE
) },
10150 { PCI_DEVICE(PCI_VENDOR_ID_VIA
,
10151 PCI_DEVICE_ID_VIA_8385_0
) },
10155 u32 cacheline_sz_reg
;
10156 u32 pci_state_reg
, grc_misc_cfg
;
10161 /* Force memory write invalidate off. If we leave it on,
10162 * then on 5700_BX chips we have to enable a workaround.
10163 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
10164 * to match the cacheline size. The Broadcom driver have this
10165 * workaround but turns MWI off all the times so never uses
10166 * it. This seems to suggest that the workaround is insufficient.
10168 pci_read_config_word(tp
->pdev
, PCI_COMMAND
, &pci_cmd
);
10169 pci_cmd
&= ~PCI_COMMAND_INVALIDATE
;
10170 pci_write_config_word(tp
->pdev
, PCI_COMMAND
, pci_cmd
);
10172 /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
10173 * has the register indirect write enable bit set before
10174 * we try to access any of the MMIO registers. It is also
10175 * critical that the PCI-X hw workaround situation is decided
10176 * before that as well.
10178 pci_read_config_dword(tp
->pdev
, TG3PCI_MISC_HOST_CTRL
,
10181 tp
->pci_chip_rev_id
= (misc_ctrl_reg
>>
10182 MISC_HOST_CTRL_CHIPREV_SHIFT
);
10184 /* Wrong chip ID in 5752 A0. This code can be removed later
10185 * as A0 is not in production.
10187 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5752_A0_HW
)
10188 tp
->pci_chip_rev_id
= CHIPREV_ID_5752_A0
;
10190 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
10191 * we need to disable memory and use config. cycles
10192 * only to access all registers. The 5702/03 chips
10193 * can mistakenly decode the special cycles from the
10194 * ICH chipsets as memory write cycles, causing corruption
10195 * of register and memory space. Only certain ICH bridges
10196 * will drive special cycles with non-zero data during the
10197 * address phase which can fall within the 5703's address
10198 * range. This is not an ICH bug as the PCI spec allows
10199 * non-zero address during special cycles. However, only
10200 * these ICH bridges are known to drive non-zero addresses
10201 * during special cycles.
10203 * Since special cycles do not cross PCI bridges, we only
10204 * enable this workaround if the 5703 is on the secondary
10205 * bus of these ICH bridges.
10207 if ((tp
->pci_chip_rev_id
== CHIPREV_ID_5703_A1
) ||
10208 (tp
->pci_chip_rev_id
== CHIPREV_ID_5703_A2
)) {
10209 static struct tg3_dev_id
{
10213 } ich_chipsets
[] = {
10214 { PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_82801AA_8
,
10216 { PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_82801AB_8
,
10218 { PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_82801BA_11
,
10220 { PCI_VENDOR_ID_INTEL
, PCI_DEVICE_ID_INTEL_82801BA_6
,
10224 struct tg3_dev_id
*pci_id
= &ich_chipsets
[0];
10225 struct pci_dev
*bridge
= NULL
;
10227 while (pci_id
->vendor
!= 0) {
10228 bridge
= pci_get_device(pci_id
->vendor
, pci_id
->device
,
10234 if (pci_id
->rev
!= PCI_ANY_ID
) {
10237 pci_read_config_byte(bridge
, PCI_REVISION_ID
,
10239 if (rev
> pci_id
->rev
)
10242 if (bridge
->subordinate
&&
10243 (bridge
->subordinate
->number
==
10244 tp
->pdev
->bus
->number
)) {
10246 tp
->tg3_flags2
|= TG3_FLG2_ICH_WORKAROUND
;
10247 pci_dev_put(bridge
);
10253 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
10254 * DMA addresses > 40-bit. This bridge may have other additional
10255 * 57xx devices behind it in some 4-port NIC designs for example.
10256 * Any tg3 device found behind the bridge will also need the 40-bit
10259 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5780
||
10260 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5714
) {
10261 tp
->tg3_flags2
|= TG3_FLG2_5780_CLASS
;
10262 tp
->tg3_flags
|= TG3_FLAG_40BIT_DMA_BUG
;
10263 tp
->msi_cap
= pci_find_capability(tp
->pdev
, PCI_CAP_ID_MSI
);
10266 struct pci_dev
*bridge
= NULL
;
10269 bridge
= pci_get_device(PCI_VENDOR_ID_SERVERWORKS
,
10270 PCI_DEVICE_ID_SERVERWORKS_EPB
,
10272 if (bridge
&& bridge
->subordinate
&&
10273 (bridge
->subordinate
->number
<=
10274 tp
->pdev
->bus
->number
) &&
10275 (bridge
->subordinate
->subordinate
>=
10276 tp
->pdev
->bus
->number
)) {
10277 tp
->tg3_flags
|= TG3_FLAG_40BIT_DMA_BUG
;
10278 pci_dev_put(bridge
);
10284 /* Initialize misc host control in PCI block. */
10285 tp
->misc_host_ctrl
|= (misc_ctrl_reg
&
10286 MISC_HOST_CTRL_CHIPREV
);
10287 pci_write_config_dword(tp
->pdev
, TG3PCI_MISC_HOST_CTRL
,
10288 tp
->misc_host_ctrl
);
10290 pci_read_config_dword(tp
->pdev
, TG3PCI_CACHELINESZ
,
10291 &cacheline_sz_reg
);
10293 tp
->pci_cacheline_sz
= (cacheline_sz_reg
>> 0) & 0xff;
10294 tp
->pci_lat_timer
= (cacheline_sz_reg
>> 8) & 0xff;
10295 tp
->pci_hdr_type
= (cacheline_sz_reg
>> 16) & 0xff;
10296 tp
->pci_bist
= (cacheline_sz_reg
>> 24) & 0xff;
10298 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5750
||
10299 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5752
||
10300 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5755
||
10301 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5787
||
10302 (tp
->tg3_flags2
& TG3_FLG2_5780_CLASS
))
10303 tp
->tg3_flags2
|= TG3_FLG2_5750_PLUS
;
10305 if ((GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
) ||
10306 (tp
->tg3_flags2
& TG3_FLG2_5750_PLUS
))
10307 tp
->tg3_flags2
|= TG3_FLG2_5705_PLUS
;
10309 if (tp
->tg3_flags2
& TG3_FLG2_5750_PLUS
) {
10310 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5755
||
10311 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5787
) {
10312 tp
->tg3_flags2
|= TG3_FLG2_HW_TSO_2
;
10313 tp
->tg3_flags2
|= TG3_FLG2_1SHOT_MSI
;
10315 tp
->tg3_flags2
|= TG3_FLG2_HW_TSO_1
|
10316 TG3_FLG2_HW_TSO_1_BUG
;
10317 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) ==
10319 tp
->pci_chip_rev_id
>= CHIPREV_ID_5750_C2
)
10320 tp
->tg3_flags2
&= ~TG3_FLG2_HW_TSO_1_BUG
;
10324 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5705
&&
10325 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5750
&&
10326 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5752
&&
10327 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5755
&&
10328 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5787
)
10329 tp
->tg3_flags2
|= TG3_FLG2_JUMBO_CAPABLE
;
10331 if (pci_find_capability(tp
->pdev
, PCI_CAP_ID_EXP
) != 0)
10332 tp
->tg3_flags2
|= TG3_FLG2_PCI_EXPRESS
;
10334 /* If we have an AMD 762 or VIA K8T800 chipset, write
10335 * reordering to the mailbox registers done by the host
10336 * controller can cause major troubles. We read back from
10337 * every mailbox register write to force the writes to be
10338 * posted to the chip in order.
10340 if (pci_dev_present(write_reorder_chipsets
) &&
10341 !(tp
->tg3_flags2
& TG3_FLG2_PCI_EXPRESS
))
10342 tp
->tg3_flags
|= TG3_FLAG_MBOX_WRITE_REORDER
;
10344 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5703
&&
10345 tp
->pci_lat_timer
< 64) {
10346 tp
->pci_lat_timer
= 64;
10348 cacheline_sz_reg
= ((tp
->pci_cacheline_sz
& 0xff) << 0);
10349 cacheline_sz_reg
|= ((tp
->pci_lat_timer
& 0xff) << 8);
10350 cacheline_sz_reg
|= ((tp
->pci_hdr_type
& 0xff) << 16);
10351 cacheline_sz_reg
|= ((tp
->pci_bist
& 0xff) << 24);
10353 pci_write_config_dword(tp
->pdev
, TG3PCI_CACHELINESZ
,
10357 pci_read_config_dword(tp
->pdev
, TG3PCI_PCISTATE
,
10360 if ((pci_state_reg
& PCISTATE_CONV_PCI_MODE
) == 0) {
10361 tp
->tg3_flags
|= TG3_FLAG_PCIX_MODE
;
10363 /* If this is a 5700 BX chipset, and we are in PCI-X
10364 * mode, enable register write workaround.
10366 * The workaround is to use indirect register accesses
10367 * for all chip writes not to mailbox registers.
10369 if (GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5700_BX
) {
10373 tp
->tg3_flags
|= TG3_FLAG_PCIX_TARGET_HWBUG
;
10375 /* The chip can have it's power management PCI config
10376 * space registers clobbered due to this bug.
10377 * So explicitly force the chip into D0 here.
10379 pci_read_config_dword(tp
->pdev
, TG3PCI_PM_CTRL_STAT
,
10381 pm_reg
&= ~PCI_PM_CTRL_STATE_MASK
;
10382 pm_reg
|= PCI_PM_CTRL_PME_ENABLE
| 0 /* D0 */;
10383 pci_write_config_dword(tp
->pdev
, TG3PCI_PM_CTRL_STAT
,
10386 /* Also, force SERR#/PERR# in PCI command. */
10387 pci_read_config_word(tp
->pdev
, PCI_COMMAND
, &pci_cmd
);
10388 pci_cmd
|= PCI_COMMAND_PARITY
| PCI_COMMAND_SERR
;
10389 pci_write_config_word(tp
->pdev
, PCI_COMMAND
, pci_cmd
);
10393 /* 5700 BX chips need to have their TX producer index mailboxes
10394 * written twice to workaround a bug.
10396 if (GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5700_BX
)
10397 tp
->tg3_flags
|= TG3_FLAG_TXD_MBOX_HWBUG
;
10399 /* Back to back register writes can cause problems on this chip,
10400 * the workaround is to read back all reg writes except those to
10401 * mailbox regs. See tg3_write_indirect_reg32().
10403 * PCI Express 5750_A0 rev chips need this workaround too.
10405 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
||
10406 ((tp
->tg3_flags2
& TG3_FLG2_PCI_EXPRESS
) &&
10407 tp
->pci_chip_rev_id
== CHIPREV_ID_5750_A0
))
10408 tp
->tg3_flags
|= TG3_FLAG_5701_REG_WRITE_BUG
;
10410 if ((pci_state_reg
& PCISTATE_BUS_SPEED_HIGH
) != 0)
10411 tp
->tg3_flags
|= TG3_FLAG_PCI_HIGH_SPEED
;
10412 if ((pci_state_reg
& PCISTATE_BUS_32BIT
) != 0)
10413 tp
->tg3_flags
|= TG3_FLAG_PCI_32BIT
;
10415 /* Chip-specific fixup from Broadcom driver */
10416 if ((tp
->pci_chip_rev_id
== CHIPREV_ID_5704_A0
) &&
10417 (!(pci_state_reg
& PCISTATE_RETRY_SAME_DMA
))) {
10418 pci_state_reg
|= PCISTATE_RETRY_SAME_DMA
;
10419 pci_write_config_dword(tp
->pdev
, TG3PCI_PCISTATE
, pci_state_reg
);
10422 /* Default fast path register access methods */
10423 tp
->read32
= tg3_read32
;
10424 tp
->write32
= tg3_write32
;
10425 tp
->read32_mbox
= tg3_read32
;
10426 tp
->write32_mbox
= tg3_write32
;
10427 tp
->write32_tx_mbox
= tg3_write32
;
10428 tp
->write32_rx_mbox
= tg3_write32
;
10430 /* Various workaround register access methods */
10431 if (tp
->tg3_flags
& TG3_FLAG_PCIX_TARGET_HWBUG
)
10432 tp
->write32
= tg3_write_indirect_reg32
;
10433 else if (tp
->tg3_flags
& TG3_FLAG_5701_REG_WRITE_BUG
)
10434 tp
->write32
= tg3_write_flush_reg32
;
10436 if ((tp
->tg3_flags
& TG3_FLAG_TXD_MBOX_HWBUG
) ||
10437 (tp
->tg3_flags
& TG3_FLAG_MBOX_WRITE_REORDER
)) {
10438 tp
->write32_tx_mbox
= tg3_write32_tx_mbox
;
10439 if (tp
->tg3_flags
& TG3_FLAG_MBOX_WRITE_REORDER
)
10440 tp
->write32_rx_mbox
= tg3_write_flush_reg32
;
10443 if (tp
->tg3_flags2
& TG3_FLG2_ICH_WORKAROUND
) {
10444 tp
->read32
= tg3_read_indirect_reg32
;
10445 tp
->write32
= tg3_write_indirect_reg32
;
10446 tp
->read32_mbox
= tg3_read_indirect_mbox
;
10447 tp
->write32_mbox
= tg3_write_indirect_mbox
;
10448 tp
->write32_tx_mbox
= tg3_write_indirect_mbox
;
10449 tp
->write32_rx_mbox
= tg3_write_indirect_mbox
;
10454 pci_read_config_word(tp
->pdev
, PCI_COMMAND
, &pci_cmd
);
10455 pci_cmd
&= ~PCI_COMMAND_MEMORY
;
10456 pci_write_config_word(tp
->pdev
, PCI_COMMAND
, pci_cmd
);
10459 if (tp
->write32
== tg3_write_indirect_reg32
||
10460 ((tp
->tg3_flags
& TG3_FLAG_PCIX_MODE
) &&
10461 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
10462 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
)))
10463 tp
->tg3_flags
|= TG3_FLAG_SRAM_USE_CONFIG
;
10465 /* Get eeprom hw config before calling tg3_set_power_state().
10466 * In particular, the TG3_FLAG_EEPROM_WRITE_PROT flag must be
10467 * determined before calling tg3_set_power_state() so that
10468 * we know whether or not to switch out of Vaux power.
10469 * When the flag is set, it means that GPIO1 is used for eeprom
10470 * write protect and also implies that it is a LOM where GPIOs
10471 * are not used to switch power.
10473 tg3_get_eeprom_hw_cfg(tp
);
10475 /* Set up tp->grc_local_ctrl before calling tg3_set_power_state().
10476 * GPIO1 driven high will bring 5700's external PHY out of reset.
10477 * It is also used as eeprom write protect on LOMs.
10479 tp
->grc_local_ctrl
= GRC_LCLCTRL_INT_ON_ATTN
| GRC_LCLCTRL_AUTO_SEEPROM
;
10480 if ((GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
) ||
10481 (tp
->tg3_flags
& TG3_FLAG_EEPROM_WRITE_PROT
))
10482 tp
->grc_local_ctrl
|= (GRC_LCLCTRL_GPIO_OE1
|
10483 GRC_LCLCTRL_GPIO_OUTPUT1
);
10484 /* Unused GPIO3 must be driven as output on 5752 because there
10485 * are no pull-up resistors on unused GPIO pins.
10487 else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5752
)
10488 tp
->grc_local_ctrl
|= GRC_LCLCTRL_GPIO_OE3
;
10490 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5755
)
10491 tp
->grc_local_ctrl
|= GRC_LCLCTRL_GPIO_UART_SEL
;
10493 /* Force the chip into D0. */
10494 err
= tg3_set_power_state(tp
, PCI_D0
);
10496 printk(KERN_ERR PFX
"(%s) transition to D0 failed\n",
10497 pci_name(tp
->pdev
));
10501 /* 5700 B0 chips do not support checksumming correctly due
10502 * to hardware bugs.
10504 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5700_B0
)
10505 tp
->tg3_flags
|= TG3_FLAG_BROKEN_CHECKSUMS
;
10507 /* Derive initial jumbo mode from MTU assigned in
10508 * ether_setup() via the alloc_etherdev() call
10510 if (tp
->dev
->mtu
> ETH_DATA_LEN
&&
10511 !(tp
->tg3_flags2
& TG3_FLG2_5780_CLASS
))
10512 tp
->tg3_flags
|= TG3_FLAG_JUMBO_RING_ENABLE
;
10514 /* Determine WakeOnLan speed to use. */
10515 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
10516 tp
->pci_chip_rev_id
== CHIPREV_ID_5701_A0
||
10517 tp
->pci_chip_rev_id
== CHIPREV_ID_5701_B0
||
10518 tp
->pci_chip_rev_id
== CHIPREV_ID_5701_B2
) {
10519 tp
->tg3_flags
&= ~(TG3_FLAG_WOL_SPEED_100MB
);
10521 tp
->tg3_flags
|= TG3_FLAG_WOL_SPEED_100MB
;
10524 /* A few boards don't want Ethernet@WireSpeed phy feature */
10525 if ((GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
) ||
10526 ((GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
) &&
10527 (tp
->pci_chip_rev_id
!= CHIPREV_ID_5705_A0
) &&
10528 (tp
->pci_chip_rev_id
!= CHIPREV_ID_5705_A1
)) ||
10529 (tp
->tg3_flags2
& TG3_FLG2_ANY_SERDES
))
10530 tp
->tg3_flags2
|= TG3_FLG2_NO_ETH_WIRE_SPEED
;
10532 if (GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5703_AX
||
10533 GET_CHIP_REV(tp
->pci_chip_rev_id
) == CHIPREV_5704_AX
)
10534 tp
->tg3_flags2
|= TG3_FLG2_PHY_ADC_BUG
;
10535 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5704_A0
)
10536 tp
->tg3_flags2
|= TG3_FLG2_PHY_5704_A0_BUG
;
10538 if (tp
->tg3_flags2
& TG3_FLG2_5705_PLUS
) {
10539 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5755
||
10540 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5787
)
10541 tp
->tg3_flags2
|= TG3_FLG2_PHY_JITTER_BUG
;
10543 tp
->tg3_flags2
|= TG3_FLG2_PHY_BER_BUG
;
10546 tp
->coalesce_mode
= 0;
10547 if (GET_CHIP_REV(tp
->pci_chip_rev_id
) != CHIPREV_5700_AX
&&
10548 GET_CHIP_REV(tp
->pci_chip_rev_id
) != CHIPREV_5700_BX
)
10549 tp
->coalesce_mode
|= HOSTCC_MODE_32BYTE
;
10551 /* Initialize MAC MI mode, polling disabled. */
10552 tw32_f(MAC_MI_MODE
, tp
->mi_mode
);
10555 /* Initialize data/descriptor byte/word swapping. */
10556 val
= tr32(GRC_MODE
);
10557 val
&= GRC_MODE_HOST_STACKUP
;
10558 tw32(GRC_MODE
, val
| tp
->grc_mode
);
10560 tg3_switch_clocks(tp
);
10562 /* Clear this out for sanity. */
10563 tw32(TG3PCI_MEM_WIN_BASE_ADDR
, 0);
10565 pci_read_config_dword(tp
->pdev
, TG3PCI_PCISTATE
,
10567 if ((pci_state_reg
& PCISTATE_CONV_PCI_MODE
) == 0 &&
10568 (tp
->tg3_flags
& TG3_FLAG_PCIX_TARGET_HWBUG
) == 0) {
10569 u32 chiprevid
= GET_CHIP_REV_ID(tp
->misc_host_ctrl
);
10571 if (chiprevid
== CHIPREV_ID_5701_A0
||
10572 chiprevid
== CHIPREV_ID_5701_B0
||
10573 chiprevid
== CHIPREV_ID_5701_B2
||
10574 chiprevid
== CHIPREV_ID_5701_B5
) {
10575 void __iomem
*sram_base
;
10577 /* Write some dummy words into the SRAM status block
10578 * area, see if it reads back correctly. If the return
10579 * value is bad, force enable the PCIX workaround.
10581 sram_base
= tp
->regs
+ NIC_SRAM_WIN_BASE
+ NIC_SRAM_STATS_BLK
;
10583 writel(0x00000000, sram_base
);
10584 writel(0x00000000, sram_base
+ 4);
10585 writel(0xffffffff, sram_base
+ 4);
10586 if (readl(sram_base
) != 0x00000000)
10587 tp
->tg3_flags
|= TG3_FLAG_PCIX_TARGET_HWBUG
;
10592 tg3_nvram_init(tp
);
10594 grc_misc_cfg
= tr32(GRC_MISC_CFG
);
10595 grc_misc_cfg
&= GRC_MISC_CFG_BOARD_ID_MASK
;
10597 /* Broadcom's driver says that CIOBE multisplit has a bug */
10599 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
&&
10600 grc_misc_cfg
== GRC_MISC_CFG_BOARD_ID_5704CIOBE
) {
10601 tp
->tg3_flags
|= TG3_FLAG_SPLIT_MODE
;
10602 tp
->split_mode_max_reqs
= SPLIT_MODE_5704_MAX_REQ
;
10605 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
&&
10606 (grc_misc_cfg
== GRC_MISC_CFG_BOARD_ID_5788
||
10607 grc_misc_cfg
== GRC_MISC_CFG_BOARD_ID_5788M
))
10608 tp
->tg3_flags2
|= TG3_FLG2_IS_5788
;
10610 if (!(tp
->tg3_flags2
& TG3_FLG2_IS_5788
) &&
10611 (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5700
))
10612 tp
->tg3_flags
|= TG3_FLAG_TAGGED_STATUS
;
10613 if (tp
->tg3_flags
& TG3_FLAG_TAGGED_STATUS
) {
10614 tp
->coalesce_mode
|= (HOSTCC_MODE_CLRTICK_RXBD
|
10615 HOSTCC_MODE_CLRTICK_TXBD
);
10617 tp
->misc_host_ctrl
|= MISC_HOST_CTRL_TAGGED_STATUS
;
10618 pci_write_config_dword(tp
->pdev
, TG3PCI_MISC_HOST_CTRL
,
10619 tp
->misc_host_ctrl
);
10622 /* these are limited to 10/100 only */
10623 if ((GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5703
&&
10624 (grc_misc_cfg
== 0x8000 || grc_misc_cfg
== 0x4000)) ||
10625 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
&&
10626 tp
->pdev
->vendor
== PCI_VENDOR_ID_BROADCOM
&&
10627 (tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5901
||
10628 tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5901_2
||
10629 tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5705F
)) ||
10630 (tp
->pdev
->vendor
== PCI_VENDOR_ID_BROADCOM
&&
10631 (tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5751F
||
10632 tp
->pdev
->device
== PCI_DEVICE_ID_TIGON3_5753F
)))
10633 tp
->tg3_flags
|= TG3_FLAG_10_100_ONLY
;
10635 err
= tg3_phy_probe(tp
);
10637 printk(KERN_ERR PFX
"(%s) phy probe failed, err %d\n",
10638 pci_name(tp
->pdev
), err
);
10639 /* ... but do not return immediately ... */
10642 tg3_read_partno(tp
);
10643 tg3_read_fw_ver(tp
);
10645 if (tp
->tg3_flags2
& TG3_FLG2_PHY_SERDES
) {
10646 tp
->tg3_flags
&= ~TG3_FLAG_USE_MI_INTERRUPT
;
10648 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
)
10649 tp
->tg3_flags
|= TG3_FLAG_USE_MI_INTERRUPT
;
10651 tp
->tg3_flags
&= ~TG3_FLAG_USE_MI_INTERRUPT
;
10654 /* 5700 {AX,BX} chips have a broken status block link
10655 * change bit implementation, so we must use the
10656 * status register in those cases.
10658 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
)
10659 tp
->tg3_flags
|= TG3_FLAG_USE_LINKCHG_REG
;
10661 tp
->tg3_flags
&= ~TG3_FLAG_USE_LINKCHG_REG
;
10663 /* The led_ctrl is set during tg3_phy_probe, here we might
10664 * have to force the link status polling mechanism based
10665 * upon subsystem IDs.
10667 if (tp
->pdev
->subsystem_vendor
== PCI_VENDOR_ID_DELL
&&
10668 !(tp
->tg3_flags2
& TG3_FLG2_PHY_SERDES
)) {
10669 tp
->tg3_flags
|= (TG3_FLAG_USE_MI_INTERRUPT
|
10670 TG3_FLAG_USE_LINKCHG_REG
);
10673 /* For all SERDES we poll the MAC status register. */
10674 if (tp
->tg3_flags2
& TG3_FLG2_PHY_SERDES
)
10675 tp
->tg3_flags
|= TG3_FLAG_POLL_SERDES
;
10677 tp
->tg3_flags
&= ~TG3_FLAG_POLL_SERDES
;
10679 /* All chips before 5787 can get confused if TX buffers
10680 * straddle the 4GB address boundary in some cases.
10682 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5755
||
10683 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5787
)
10684 tp
->dev
->hard_start_xmit
= tg3_start_xmit
;
10686 tp
->dev
->hard_start_xmit
= tg3_start_xmit_dma_bug
;
10689 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
&&
10690 (tp
->tg3_flags
& TG3_FLAG_PCIX_MODE
) != 0)
10693 tp
->rx_std_max_post
= TG3_RX_RING_SIZE
;
10695 /* Increment the rx prod index on the rx std ring by at most
10696 * 8 for these chips to workaround hw errata.
10698 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5750
||
10699 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5752
||
10700 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5755
)
10701 tp
->rx_std_max_post
= 8;
10703 /* By default, disable wake-on-lan. User can change this
10704 * using ETHTOOL_SWOL.
10706 tp
->tg3_flags
&= ~TG3_FLAG_WOL_ENABLE
;
10711 #ifdef CONFIG_SPARC64
10712 static int __devinit
tg3_get_macaddr_sparc(struct tg3
*tp
)
10714 struct net_device
*dev
= tp
->dev
;
10715 struct pci_dev
*pdev
= tp
->pdev
;
10716 struct pcidev_cookie
*pcp
= pdev
->sysdata
;
10719 unsigned char *addr
;
10722 addr
= of_get_property(pcp
->prom_node
, "local-mac-address",
10724 if (addr
&& len
== 6) {
10725 memcpy(dev
->dev_addr
, addr
, 6);
10726 memcpy(dev
->perm_addr
, dev
->dev_addr
, 6);
10733 static int __devinit
tg3_get_default_macaddr_sparc(struct tg3
*tp
)
10735 struct net_device
*dev
= tp
->dev
;
10737 memcpy(dev
->dev_addr
, idprom
->id_ethaddr
, 6);
10738 memcpy(dev
->perm_addr
, idprom
->id_ethaddr
, 6);
10743 static int __devinit
tg3_get_device_address(struct tg3
*tp
)
10745 struct net_device
*dev
= tp
->dev
;
10746 u32 hi
, lo
, mac_offset
;
10749 #ifdef CONFIG_SPARC64
10750 if (!tg3_get_macaddr_sparc(tp
))
10755 if ((GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
) ||
10756 (tp
->tg3_flags2
& TG3_FLG2_5780_CLASS
)) {
10757 if (tr32(TG3PCI_DUAL_MAC_CTRL
) & DUAL_MAC_CTRL_ID
)
10759 if (tg3_nvram_lock(tp
))
10760 tw32_f(NVRAM_CMD
, NVRAM_CMD_RESET
);
10762 tg3_nvram_unlock(tp
);
10765 /* First try to get it from MAC address mailbox. */
10766 tg3_read_mem(tp
, NIC_SRAM_MAC_ADDR_HIGH_MBOX
, &hi
);
10767 if ((hi
>> 16) == 0x484b) {
10768 dev
->dev_addr
[0] = (hi
>> 8) & 0xff;
10769 dev
->dev_addr
[1] = (hi
>> 0) & 0xff;
10771 tg3_read_mem(tp
, NIC_SRAM_MAC_ADDR_LOW_MBOX
, &lo
);
10772 dev
->dev_addr
[2] = (lo
>> 24) & 0xff;
10773 dev
->dev_addr
[3] = (lo
>> 16) & 0xff;
10774 dev
->dev_addr
[4] = (lo
>> 8) & 0xff;
10775 dev
->dev_addr
[5] = (lo
>> 0) & 0xff;
10777 /* Some old bootcode may report a 0 MAC address in SRAM */
10778 addr_ok
= is_valid_ether_addr(&dev
->dev_addr
[0]);
10781 /* Next, try NVRAM. */
10782 if (!tg3_nvram_read(tp
, mac_offset
+ 0, &hi
) &&
10783 !tg3_nvram_read(tp
, mac_offset
+ 4, &lo
)) {
10784 dev
->dev_addr
[0] = ((hi
>> 16) & 0xff);
10785 dev
->dev_addr
[1] = ((hi
>> 24) & 0xff);
10786 dev
->dev_addr
[2] = ((lo
>> 0) & 0xff);
10787 dev
->dev_addr
[3] = ((lo
>> 8) & 0xff);
10788 dev
->dev_addr
[4] = ((lo
>> 16) & 0xff);
10789 dev
->dev_addr
[5] = ((lo
>> 24) & 0xff);
10791 /* Finally just fetch it out of the MAC control regs. */
10793 hi
= tr32(MAC_ADDR_0_HIGH
);
10794 lo
= tr32(MAC_ADDR_0_LOW
);
10796 dev
->dev_addr
[5] = lo
& 0xff;
10797 dev
->dev_addr
[4] = (lo
>> 8) & 0xff;
10798 dev
->dev_addr
[3] = (lo
>> 16) & 0xff;
10799 dev
->dev_addr
[2] = (lo
>> 24) & 0xff;
10800 dev
->dev_addr
[1] = hi
& 0xff;
10801 dev
->dev_addr
[0] = (hi
>> 8) & 0xff;
10805 if (!is_valid_ether_addr(&dev
->dev_addr
[0])) {
10806 #ifdef CONFIG_SPARC64
10807 if (!tg3_get_default_macaddr_sparc(tp
))
10812 memcpy(dev
->perm_addr
, dev
->dev_addr
, dev
->addr_len
);
10816 #define BOUNDARY_SINGLE_CACHELINE 1
10817 #define BOUNDARY_MULTI_CACHELINE 2
10819 static u32 __devinit
tg3_calc_dma_bndry(struct tg3
*tp
, u32 val
)
10821 int cacheline_size
;
10825 pci_read_config_byte(tp
->pdev
, PCI_CACHE_LINE_SIZE
, &byte
);
10827 cacheline_size
= 1024;
10829 cacheline_size
= (int) byte
* 4;
10831 /* On 5703 and later chips, the boundary bits have no
10834 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5700
&&
10835 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5701
&&
10836 !(tp
->tg3_flags2
& TG3_FLG2_PCI_EXPRESS
))
10839 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
10840 goal
= BOUNDARY_MULTI_CACHELINE
;
10842 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
10843 goal
= BOUNDARY_SINGLE_CACHELINE
;
10852 /* PCI controllers on most RISC systems tend to disconnect
10853 * when a device tries to burst across a cache-line boundary.
10854 * Therefore, letting tg3 do so just wastes PCI bandwidth.
10856 * Unfortunately, for PCI-E there are only limited
10857 * write-side controls for this, and thus for reads
10858 * we will still get the disconnects. We'll also waste
10859 * these PCI cycles for both read and write for chips
10860 * other than 5700 and 5701 which do not implement the
10863 if ((tp
->tg3_flags
& TG3_FLAG_PCIX_MODE
) &&
10864 !(tp
->tg3_flags2
& TG3_FLG2_PCI_EXPRESS
)) {
10865 switch (cacheline_size
) {
10870 if (goal
== BOUNDARY_SINGLE_CACHELINE
) {
10871 val
|= (DMA_RWCTRL_READ_BNDRY_128_PCIX
|
10872 DMA_RWCTRL_WRITE_BNDRY_128_PCIX
);
10874 val
|= (DMA_RWCTRL_READ_BNDRY_384_PCIX
|
10875 DMA_RWCTRL_WRITE_BNDRY_384_PCIX
);
10880 val
|= (DMA_RWCTRL_READ_BNDRY_256_PCIX
|
10881 DMA_RWCTRL_WRITE_BNDRY_256_PCIX
);
10885 val
|= (DMA_RWCTRL_READ_BNDRY_384_PCIX
|
10886 DMA_RWCTRL_WRITE_BNDRY_384_PCIX
);
10889 } else if (tp
->tg3_flags2
& TG3_FLG2_PCI_EXPRESS
) {
10890 switch (cacheline_size
) {
10894 if (goal
== BOUNDARY_SINGLE_CACHELINE
) {
10895 val
&= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE
;
10896 val
|= DMA_RWCTRL_WRITE_BNDRY_64_PCIE
;
10902 val
&= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE
;
10903 val
|= DMA_RWCTRL_WRITE_BNDRY_128_PCIE
;
10907 switch (cacheline_size
) {
10909 if (goal
== BOUNDARY_SINGLE_CACHELINE
) {
10910 val
|= (DMA_RWCTRL_READ_BNDRY_16
|
10911 DMA_RWCTRL_WRITE_BNDRY_16
);
10916 if (goal
== BOUNDARY_SINGLE_CACHELINE
) {
10917 val
|= (DMA_RWCTRL_READ_BNDRY_32
|
10918 DMA_RWCTRL_WRITE_BNDRY_32
);
10923 if (goal
== BOUNDARY_SINGLE_CACHELINE
) {
10924 val
|= (DMA_RWCTRL_READ_BNDRY_64
|
10925 DMA_RWCTRL_WRITE_BNDRY_64
);
10930 if (goal
== BOUNDARY_SINGLE_CACHELINE
) {
10931 val
|= (DMA_RWCTRL_READ_BNDRY_128
|
10932 DMA_RWCTRL_WRITE_BNDRY_128
);
10937 val
|= (DMA_RWCTRL_READ_BNDRY_256
|
10938 DMA_RWCTRL_WRITE_BNDRY_256
);
10941 val
|= (DMA_RWCTRL_READ_BNDRY_512
|
10942 DMA_RWCTRL_WRITE_BNDRY_512
);
10946 val
|= (DMA_RWCTRL_READ_BNDRY_1024
|
10947 DMA_RWCTRL_WRITE_BNDRY_1024
);
10956 static int __devinit
tg3_do_test_dma(struct tg3
*tp
, u32
*buf
, dma_addr_t buf_dma
, int size
, int to_device
)
10958 struct tg3_internal_buffer_desc test_desc
;
10959 u32 sram_dma_descs
;
10962 sram_dma_descs
= NIC_SRAM_DMA_DESC_POOL_BASE
;
10964 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ
, 0);
10965 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ
, 0);
10966 tw32(RDMAC_STATUS
, 0);
10967 tw32(WDMAC_STATUS
, 0);
10969 tw32(BUFMGR_MODE
, 0);
10970 tw32(FTQ_RESET
, 0);
10972 test_desc
.addr_hi
= ((u64
) buf_dma
) >> 32;
10973 test_desc
.addr_lo
= buf_dma
& 0xffffffff;
10974 test_desc
.nic_mbuf
= 0x00002100;
10975 test_desc
.len
= size
;
10978 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
10979 * the *second* time the tg3 driver was getting loaded after an
10982 * Broadcom tells me:
10983 * ...the DMA engine is connected to the GRC block and a DMA
10984 * reset may affect the GRC block in some unpredictable way...
10985 * The behavior of resets to individual blocks has not been tested.
10987 * Broadcom noted the GRC reset will also reset all sub-components.
10990 test_desc
.cqid_sqid
= (13 << 8) | 2;
10992 tw32_f(RDMAC_MODE
, RDMAC_MODE_ENABLE
);
10995 test_desc
.cqid_sqid
= (16 << 8) | 7;
10997 tw32_f(WDMAC_MODE
, WDMAC_MODE_ENABLE
);
11000 test_desc
.flags
= 0x00000005;
11002 for (i
= 0; i
< (sizeof(test_desc
) / sizeof(u32
)); i
++) {
11005 val
= *(((u32
*)&test_desc
) + i
);
11006 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
,
11007 sram_dma_descs
+ (i
* sizeof(u32
)));
11008 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_DATA
, val
);
11010 pci_write_config_dword(tp
->pdev
, TG3PCI_MEM_WIN_BASE_ADDR
, 0);
11013 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ
, sram_dma_descs
);
11015 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ
, sram_dma_descs
);
11019 for (i
= 0; i
< 40; i
++) {
11023 val
= tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ
);
11025 val
= tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ
);
11026 if ((val
& 0xffff) == sram_dma_descs
) {
11037 #define TEST_BUFFER_SIZE 0x2000
11039 static int __devinit
tg3_test_dma(struct tg3
*tp
)
11041 dma_addr_t buf_dma
;
11042 u32
*buf
, saved_dma_rwctrl
;
11045 buf
= pci_alloc_consistent(tp
->pdev
, TEST_BUFFER_SIZE
, &buf_dma
);
11051 tp
->dma_rwctrl
= ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT
) |
11052 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT
));
11054 tp
->dma_rwctrl
= tg3_calc_dma_bndry(tp
, tp
->dma_rwctrl
);
11056 if (tp
->tg3_flags2
& TG3_FLG2_PCI_EXPRESS
) {
11057 /* DMA read watermark not used on PCIE */
11058 tp
->dma_rwctrl
|= 0x00180000;
11059 } else if (!(tp
->tg3_flags
& TG3_FLAG_PCIX_MODE
)) {
11060 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5705
||
11061 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5750
)
11062 tp
->dma_rwctrl
|= 0x003f0000;
11064 tp
->dma_rwctrl
|= 0x003f000f;
11066 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5703
||
11067 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
) {
11068 u32 ccval
= (tr32(TG3PCI_CLOCK_CTRL
) & 0x1f);
11070 /* If the 5704 is behind the EPB bridge, we can
11071 * do the less restrictive ONE_DMA workaround for
11072 * better performance.
11074 if ((tp
->tg3_flags
& TG3_FLAG_40BIT_DMA_BUG
) &&
11075 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
)
11076 tp
->dma_rwctrl
|= 0x8000;
11077 else if (ccval
== 0x6 || ccval
== 0x7)
11078 tp
->dma_rwctrl
|= DMA_RWCTRL_ONE_DMA
;
11080 /* Set bit 23 to enable PCIX hw bug fix */
11081 tp
->dma_rwctrl
|= 0x009f0000;
11082 } else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5780
) {
11083 /* 5780 always in PCIX mode */
11084 tp
->dma_rwctrl
|= 0x00144000;
11085 } else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5714
) {
11086 /* 5714 always in PCIX mode */
11087 tp
->dma_rwctrl
|= 0x00148000;
11089 tp
->dma_rwctrl
|= 0x001b000f;
11093 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5703
||
11094 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
)
11095 tp
->dma_rwctrl
&= 0xfffffff0;
11097 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
11098 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
) {
11099 /* Remove this if it causes problems for some boards. */
11100 tp
->dma_rwctrl
|= DMA_RWCTRL_USE_MEM_READ_MULT
;
11102 /* On 5700/5701 chips, we need to set this bit.
11103 * Otherwise the chip will issue cacheline transactions
11104 * to streamable DMA memory with not all the byte
11105 * enables turned on. This is an error on several
11106 * RISC PCI controllers, in particular sparc64.
11108 * On 5703/5704 chips, this bit has been reassigned
11109 * a different meaning. In particular, it is used
11110 * on those chips to enable a PCI-X workaround.
11112 tp
->dma_rwctrl
|= DMA_RWCTRL_ASSERT_ALL_BE
;
11115 tw32(TG3PCI_DMA_RW_CTRL
, tp
->dma_rwctrl
);
11118 /* Unneeded, already done by tg3_get_invariants. */
11119 tg3_switch_clocks(tp
);
11123 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5700
&&
11124 GET_ASIC_REV(tp
->pci_chip_rev_id
) != ASIC_REV_5701
)
11127 /* It is best to perform DMA test with maximum write burst size
11128 * to expose the 5700/5701 write DMA bug.
11130 saved_dma_rwctrl
= tp
->dma_rwctrl
;
11131 tp
->dma_rwctrl
&= ~DMA_RWCTRL_WRITE_BNDRY_MASK
;
11132 tw32(TG3PCI_DMA_RW_CTRL
, tp
->dma_rwctrl
);
11137 for (i
= 0; i
< TEST_BUFFER_SIZE
/ sizeof(u32
); i
++)
11140 /* Send the buffer to the chip. */
11141 ret
= tg3_do_test_dma(tp
, buf
, buf_dma
, TEST_BUFFER_SIZE
, 1);
11143 printk(KERN_ERR
"tg3_test_dma() Write the buffer failed %d\n", ret
);
11148 /* validate data reached card RAM correctly. */
11149 for (i
= 0; i
< TEST_BUFFER_SIZE
/ sizeof(u32
); i
++) {
11151 tg3_read_mem(tp
, 0x2100 + (i
*4), &val
);
11152 if (le32_to_cpu(val
) != p
[i
]) {
11153 printk(KERN_ERR
" tg3_test_dma() Card buffer corrupted on write! (%d != %d)\n", val
, i
);
11154 /* ret = -ENODEV here? */
11159 /* Now read it back. */
11160 ret
= tg3_do_test_dma(tp
, buf
, buf_dma
, TEST_BUFFER_SIZE
, 0);
11162 printk(KERN_ERR
"tg3_test_dma() Read the buffer failed %d\n", ret
);
11168 for (i
= 0; i
< TEST_BUFFER_SIZE
/ sizeof(u32
); i
++) {
11172 if ((tp
->dma_rwctrl
& DMA_RWCTRL_WRITE_BNDRY_MASK
) !=
11173 DMA_RWCTRL_WRITE_BNDRY_16
) {
11174 tp
->dma_rwctrl
&= ~DMA_RWCTRL_WRITE_BNDRY_MASK
;
11175 tp
->dma_rwctrl
|= DMA_RWCTRL_WRITE_BNDRY_16
;
11176 tw32(TG3PCI_DMA_RW_CTRL
, tp
->dma_rwctrl
);
11179 printk(KERN_ERR
"tg3_test_dma() buffer corrupted on read back! (%d != %d)\n", p
[i
], i
);
11185 if (i
== (TEST_BUFFER_SIZE
/ sizeof(u32
))) {
11191 if ((tp
->dma_rwctrl
& DMA_RWCTRL_WRITE_BNDRY_MASK
) !=
11192 DMA_RWCTRL_WRITE_BNDRY_16
) {
11193 static struct pci_device_id dma_wait_state_chipsets
[] = {
11194 { PCI_DEVICE(PCI_VENDOR_ID_APPLE
,
11195 PCI_DEVICE_ID_APPLE_UNI_N_PCI15
) },
11199 /* DMA test passed without adjusting DMA boundary,
11200 * now look for chipsets that are known to expose the
11201 * DMA bug without failing the test.
11203 if (pci_dev_present(dma_wait_state_chipsets
)) {
11204 tp
->dma_rwctrl
&= ~DMA_RWCTRL_WRITE_BNDRY_MASK
;
11205 tp
->dma_rwctrl
|= DMA_RWCTRL_WRITE_BNDRY_16
;
11208 /* Safe to use the calculated DMA boundary. */
11209 tp
->dma_rwctrl
= saved_dma_rwctrl
;
11211 tw32(TG3PCI_DMA_RW_CTRL
, tp
->dma_rwctrl
);
11215 pci_free_consistent(tp
->pdev
, TEST_BUFFER_SIZE
, buf
, buf_dma
);
11220 static void __devinit
tg3_init_link_config(struct tg3
*tp
)
11222 tp
->link_config
.advertising
=
11223 (ADVERTISED_10baseT_Half
| ADVERTISED_10baseT_Full
|
11224 ADVERTISED_100baseT_Half
| ADVERTISED_100baseT_Full
|
11225 ADVERTISED_1000baseT_Half
| ADVERTISED_1000baseT_Full
|
11226 ADVERTISED_Autoneg
| ADVERTISED_MII
);
11227 tp
->link_config
.speed
= SPEED_INVALID
;
11228 tp
->link_config
.duplex
= DUPLEX_INVALID
;
11229 tp
->link_config
.autoneg
= AUTONEG_ENABLE
;
11230 tp
->link_config
.active_speed
= SPEED_INVALID
;
11231 tp
->link_config
.active_duplex
= DUPLEX_INVALID
;
11232 tp
->link_config
.phy_is_low_power
= 0;
11233 tp
->link_config
.orig_speed
= SPEED_INVALID
;
11234 tp
->link_config
.orig_duplex
= DUPLEX_INVALID
;
11235 tp
->link_config
.orig_autoneg
= AUTONEG_INVALID
;
11238 static void __devinit
tg3_init_bufmgr_config(struct tg3
*tp
)
11240 if (tp
->tg3_flags2
& TG3_FLG2_5705_PLUS
) {
11241 tp
->bufmgr_config
.mbuf_read_dma_low_water
=
11242 DEFAULT_MB_RDMA_LOW_WATER_5705
;
11243 tp
->bufmgr_config
.mbuf_mac_rx_low_water
=
11244 DEFAULT_MB_MACRX_LOW_WATER_5705
;
11245 tp
->bufmgr_config
.mbuf_high_water
=
11246 DEFAULT_MB_HIGH_WATER_5705
;
11248 tp
->bufmgr_config
.mbuf_read_dma_low_water_jumbo
=
11249 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780
;
11250 tp
->bufmgr_config
.mbuf_mac_rx_low_water_jumbo
=
11251 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780
;
11252 tp
->bufmgr_config
.mbuf_high_water_jumbo
=
11253 DEFAULT_MB_HIGH_WATER_JUMBO_5780
;
11255 tp
->bufmgr_config
.mbuf_read_dma_low_water
=
11256 DEFAULT_MB_RDMA_LOW_WATER
;
11257 tp
->bufmgr_config
.mbuf_mac_rx_low_water
=
11258 DEFAULT_MB_MACRX_LOW_WATER
;
11259 tp
->bufmgr_config
.mbuf_high_water
=
11260 DEFAULT_MB_HIGH_WATER
;
11262 tp
->bufmgr_config
.mbuf_read_dma_low_water_jumbo
=
11263 DEFAULT_MB_RDMA_LOW_WATER_JUMBO
;
11264 tp
->bufmgr_config
.mbuf_mac_rx_low_water_jumbo
=
11265 DEFAULT_MB_MACRX_LOW_WATER_JUMBO
;
11266 tp
->bufmgr_config
.mbuf_high_water_jumbo
=
11267 DEFAULT_MB_HIGH_WATER_JUMBO
;
11270 tp
->bufmgr_config
.dma_low_water
= DEFAULT_DMA_LOW_WATER
;
11271 tp
->bufmgr_config
.dma_high_water
= DEFAULT_DMA_HIGH_WATER
;
11274 static char * __devinit
tg3_phy_string(struct tg3
*tp
)
11276 switch (tp
->phy_id
& PHY_ID_MASK
) {
11277 case PHY_ID_BCM5400
: return "5400";
11278 case PHY_ID_BCM5401
: return "5401";
11279 case PHY_ID_BCM5411
: return "5411";
11280 case PHY_ID_BCM5701
: return "5701";
11281 case PHY_ID_BCM5703
: return "5703";
11282 case PHY_ID_BCM5704
: return "5704";
11283 case PHY_ID_BCM5705
: return "5705";
11284 case PHY_ID_BCM5750
: return "5750";
11285 case PHY_ID_BCM5752
: return "5752";
11286 case PHY_ID_BCM5714
: return "5714";
11287 case PHY_ID_BCM5780
: return "5780";
11288 case PHY_ID_BCM5755
: return "5755";
11289 case PHY_ID_BCM5787
: return "5787";
11290 case PHY_ID_BCM5756
: return "5722/5756";
11291 case PHY_ID_BCM8002
: return "8002/serdes";
11292 case 0: return "serdes";
11293 default: return "unknown";
11297 static char * __devinit
tg3_bus_string(struct tg3
*tp
, char *str
)
11299 if (tp
->tg3_flags2
& TG3_FLG2_PCI_EXPRESS
) {
11300 strcpy(str
, "PCI Express");
11302 } else if (tp
->tg3_flags
& TG3_FLAG_PCIX_MODE
) {
11303 u32 clock_ctrl
= tr32(TG3PCI_CLOCK_CTRL
) & 0x1f;
11305 strcpy(str
, "PCIX:");
11307 if ((clock_ctrl
== 7) ||
11308 ((tr32(GRC_MISC_CFG
) & GRC_MISC_CFG_BOARD_ID_MASK
) ==
11309 GRC_MISC_CFG_BOARD_ID_5704CIOBE
))
11310 strcat(str
, "133MHz");
11311 else if (clock_ctrl
== 0)
11312 strcat(str
, "33MHz");
11313 else if (clock_ctrl
== 2)
11314 strcat(str
, "50MHz");
11315 else if (clock_ctrl
== 4)
11316 strcat(str
, "66MHz");
11317 else if (clock_ctrl
== 6)
11318 strcat(str
, "100MHz");
11320 strcpy(str
, "PCI:");
11321 if (tp
->tg3_flags
& TG3_FLAG_PCI_HIGH_SPEED
)
11322 strcat(str
, "66MHz");
11324 strcat(str
, "33MHz");
11326 if (tp
->tg3_flags
& TG3_FLAG_PCI_32BIT
)
11327 strcat(str
, ":32-bit");
11329 strcat(str
, ":64-bit");
11333 static struct pci_dev
* __devinit
tg3_find_peer(struct tg3
*tp
)
11335 struct pci_dev
*peer
;
11336 unsigned int func
, devnr
= tp
->pdev
->devfn
& ~7;
11338 for (func
= 0; func
< 8; func
++) {
11339 peer
= pci_get_slot(tp
->pdev
->bus
, devnr
| func
);
11340 if (peer
&& peer
!= tp
->pdev
)
11344 /* 5704 can be configured in single-port mode, set peer to
11345 * tp->pdev in that case.
11353 * We don't need to keep the refcount elevated; there's no way
11354 * to remove one half of this device without removing the other
11361 static void __devinit
tg3_init_coal(struct tg3
*tp
)
11363 struct ethtool_coalesce
*ec
= &tp
->coal
;
11365 memset(ec
, 0, sizeof(*ec
));
11366 ec
->cmd
= ETHTOOL_GCOALESCE
;
11367 ec
->rx_coalesce_usecs
= LOW_RXCOL_TICKS
;
11368 ec
->tx_coalesce_usecs
= LOW_TXCOL_TICKS
;
11369 ec
->rx_max_coalesced_frames
= LOW_RXMAX_FRAMES
;
11370 ec
->tx_max_coalesced_frames
= LOW_TXMAX_FRAMES
;
11371 ec
->rx_coalesce_usecs_irq
= DEFAULT_RXCOAL_TICK_INT
;
11372 ec
->tx_coalesce_usecs_irq
= DEFAULT_TXCOAL_TICK_INT
;
11373 ec
->rx_max_coalesced_frames_irq
= DEFAULT_RXCOAL_MAXF_INT
;
11374 ec
->tx_max_coalesced_frames_irq
= DEFAULT_TXCOAL_MAXF_INT
;
11375 ec
->stats_block_coalesce_usecs
= DEFAULT_STAT_COAL_TICKS
;
11377 if (tp
->coalesce_mode
& (HOSTCC_MODE_CLRTICK_RXBD
|
11378 HOSTCC_MODE_CLRTICK_TXBD
)) {
11379 ec
->rx_coalesce_usecs
= LOW_RXCOL_TICKS_CLRTCKS
;
11380 ec
->rx_coalesce_usecs_irq
= DEFAULT_RXCOAL_TICK_INT_CLRTCKS
;
11381 ec
->tx_coalesce_usecs
= LOW_TXCOL_TICKS_CLRTCKS
;
11382 ec
->tx_coalesce_usecs_irq
= DEFAULT_TXCOAL_TICK_INT_CLRTCKS
;
11385 if (tp
->tg3_flags2
& TG3_FLG2_5705_PLUS
) {
11386 ec
->rx_coalesce_usecs_irq
= 0;
11387 ec
->tx_coalesce_usecs_irq
= 0;
11388 ec
->stats_block_coalesce_usecs
= 0;
11392 static int __devinit
tg3_init_one(struct pci_dev
*pdev
,
11393 const struct pci_device_id
*ent
)
11395 static int tg3_version_printed
= 0;
11396 unsigned long tg3reg_base
, tg3reg_len
;
11397 struct net_device
*dev
;
11399 int i
, err
, pm_cap
;
11401 u64 dma_mask
, persist_dma_mask
;
11403 if (tg3_version_printed
++ == 0)
11404 printk(KERN_INFO
"%s", version
);
11406 err
= pci_enable_device(pdev
);
11408 printk(KERN_ERR PFX
"Cannot enable PCI device, "
11413 if (!(pci_resource_flags(pdev
, 0) & IORESOURCE_MEM
)) {
11414 printk(KERN_ERR PFX
"Cannot find proper PCI device "
11415 "base address, aborting.\n");
11417 goto err_out_disable_pdev
;
11420 err
= pci_request_regions(pdev
, DRV_MODULE_NAME
);
11422 printk(KERN_ERR PFX
"Cannot obtain PCI resources, "
11424 goto err_out_disable_pdev
;
11427 pci_set_master(pdev
);
11429 /* Find power-management capability. */
11430 pm_cap
= pci_find_capability(pdev
, PCI_CAP_ID_PM
);
11432 printk(KERN_ERR PFX
"Cannot find PowerManagement capability, "
11435 goto err_out_free_res
;
11438 tg3reg_base
= pci_resource_start(pdev
, 0);
11439 tg3reg_len
= pci_resource_len(pdev
, 0);
11441 dev
= alloc_etherdev(sizeof(*tp
));
11443 printk(KERN_ERR PFX
"Etherdev alloc failed, aborting.\n");
11445 goto err_out_free_res
;
11448 SET_MODULE_OWNER(dev
);
11449 SET_NETDEV_DEV(dev
, &pdev
->dev
);
11451 #if TG3_VLAN_TAG_USED
11452 dev
->features
|= NETIF_F_HW_VLAN_TX
| NETIF_F_HW_VLAN_RX
;
11453 dev
->vlan_rx_register
= tg3_vlan_rx_register
;
11454 dev
->vlan_rx_kill_vid
= tg3_vlan_rx_kill_vid
;
11457 tp
= netdev_priv(dev
);
11460 tp
->pm_cap
= pm_cap
;
11461 tp
->mac_mode
= TG3_DEF_MAC_MODE
;
11462 tp
->rx_mode
= TG3_DEF_RX_MODE
;
11463 tp
->tx_mode
= TG3_DEF_TX_MODE
;
11464 tp
->mi_mode
= MAC_MI_MODE_BASE
;
11466 tp
->msg_enable
= tg3_debug
;
11468 tp
->msg_enable
= TG3_DEF_MSG_ENABLE
;
11470 /* The word/byte swap controls here control register access byte
11471 * swapping. DMA data byte swapping is controlled in the GRC_MODE
11474 tp
->misc_host_ctrl
=
11475 MISC_HOST_CTRL_MASK_PCI_INT
|
11476 MISC_HOST_CTRL_WORD_SWAP
|
11477 MISC_HOST_CTRL_INDIR_ACCESS
|
11478 MISC_HOST_CTRL_PCISTATE_RW
;
11480 /* The NONFRM (non-frame) byte/word swap controls take effect
11481 * on descriptor entries, anything which isn't packet data.
11483 * The StrongARM chips on the board (one for tx, one for rx)
11484 * are running in big-endian mode.
11486 tp
->grc_mode
= (GRC_MODE_WSWAP_DATA
| GRC_MODE_BSWAP_DATA
|
11487 GRC_MODE_WSWAP_NONFRM_DATA
);
11488 #ifdef __BIG_ENDIAN
11489 tp
->grc_mode
|= GRC_MODE_BSWAP_NONFRM_DATA
;
11491 spin_lock_init(&tp
->lock
);
11492 spin_lock_init(&tp
->indirect_lock
);
11493 INIT_WORK(&tp
->reset_task
, tg3_reset_task
, tp
);
11495 tp
->regs
= ioremap_nocache(tg3reg_base
, tg3reg_len
);
11496 if (tp
->regs
== 0UL) {
11497 printk(KERN_ERR PFX
"Cannot map device registers, "
11500 goto err_out_free_dev
;
11503 tg3_init_link_config(tp
);
11505 tp
->rx_pending
= TG3_DEF_RX_RING_PENDING
;
11506 tp
->rx_jumbo_pending
= TG3_DEF_RX_JUMBO_RING_PENDING
;
11507 tp
->tx_pending
= TG3_DEF_TX_RING_PENDING
;
11509 dev
->open
= tg3_open
;
11510 dev
->stop
= tg3_close
;
11511 dev
->get_stats
= tg3_get_stats
;
11512 dev
->set_multicast_list
= tg3_set_rx_mode
;
11513 dev
->set_mac_address
= tg3_set_mac_addr
;
11514 dev
->do_ioctl
= tg3_ioctl
;
11515 dev
->tx_timeout
= tg3_tx_timeout
;
11516 dev
->poll
= tg3_poll
;
11517 dev
->ethtool_ops
= &tg3_ethtool_ops
;
11519 dev
->watchdog_timeo
= TG3_TX_TIMEOUT
;
11520 dev
->change_mtu
= tg3_change_mtu
;
11521 dev
->irq
= pdev
->irq
;
11522 #ifdef CONFIG_NET_POLL_CONTROLLER
11523 dev
->poll_controller
= tg3_poll_controller
;
11526 err
= tg3_get_invariants(tp
);
11528 printk(KERN_ERR PFX
"Problem fetching invariants of chip, "
11530 goto err_out_iounmap
;
11533 /* The EPB bridge inside 5714, 5715, and 5780 and any
11534 * device behind the EPB cannot support DMA addresses > 40-bit.
11535 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
11536 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
11537 * do DMA address check in tg3_start_xmit().
11539 if (tp
->tg3_flags2
& TG3_FLG2_IS_5788
)
11540 persist_dma_mask
= dma_mask
= DMA_32BIT_MASK
;
11541 else if (tp
->tg3_flags
& TG3_FLAG_40BIT_DMA_BUG
) {
11542 persist_dma_mask
= dma_mask
= DMA_40BIT_MASK
;
11543 #ifdef CONFIG_HIGHMEM
11544 dma_mask
= DMA_64BIT_MASK
;
11547 persist_dma_mask
= dma_mask
= DMA_64BIT_MASK
;
11549 /* Configure DMA attributes. */
11550 if (dma_mask
> DMA_32BIT_MASK
) {
11551 err
= pci_set_dma_mask(pdev
, dma_mask
);
11553 dev
->features
|= NETIF_F_HIGHDMA
;
11554 err
= pci_set_consistent_dma_mask(pdev
,
11557 printk(KERN_ERR PFX
"Unable to obtain 64 bit "
11558 "DMA for consistent allocations\n");
11559 goto err_out_iounmap
;
11563 if (err
|| dma_mask
== DMA_32BIT_MASK
) {
11564 err
= pci_set_dma_mask(pdev
, DMA_32BIT_MASK
);
11566 printk(KERN_ERR PFX
"No usable DMA configuration, "
11568 goto err_out_iounmap
;
11572 tg3_init_bufmgr_config(tp
);
11574 #if TG3_TSO_SUPPORT != 0
11575 if (tp
->tg3_flags2
& TG3_FLG2_HW_TSO
) {
11576 tp
->tg3_flags2
|= TG3_FLG2_TSO_CAPABLE
;
11578 else if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5700
||
11579 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5701
||
11580 tp
->pci_chip_rev_id
== CHIPREV_ID_5705_A0
||
11581 (tp
->tg3_flags
& TG3_FLAG_ENABLE_ASF
) != 0) {
11582 tp
->tg3_flags2
&= ~TG3_FLG2_TSO_CAPABLE
;
11584 tp
->tg3_flags2
|= TG3_FLG2_TSO_CAPABLE
;
11587 /* TSO is on by default on chips that support hardware TSO.
11588 * Firmware TSO on older chips gives lower performance, so it
11589 * is off by default, but can be enabled using ethtool.
11591 if (tp
->tg3_flags2
& TG3_FLG2_HW_TSO
) {
11592 dev
->features
|= NETIF_F_TSO
;
11593 if (tp
->tg3_flags2
& TG3_FLG2_HW_TSO_2
)
11594 dev
->features
|= NETIF_F_TSO6
;
11599 if (tp
->pci_chip_rev_id
== CHIPREV_ID_5705_A1
&&
11600 !(tp
->tg3_flags2
& TG3_FLG2_TSO_CAPABLE
) &&
11601 !(tr32(TG3PCI_PCISTATE
) & PCISTATE_BUS_SPEED_HIGH
)) {
11602 tp
->tg3_flags2
|= TG3_FLG2_MAX_RXPEND_64
;
11603 tp
->rx_pending
= 63;
11606 if ((GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5704
) ||
11607 (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5714
))
11608 tp
->pdev_peer
= tg3_find_peer(tp
);
11610 err
= tg3_get_device_address(tp
);
11612 printk(KERN_ERR PFX
"Could not obtain valid ethernet address, "
11614 goto err_out_iounmap
;
11618 * Reset chip in case UNDI or EFI driver did not shutdown
11619 * DMA self test will enable WDMAC and we'll see (spurious)
11620 * pending DMA on the PCI bus at that point.
11622 if ((tr32(HOSTCC_MODE
) & HOSTCC_MODE_ENABLE
) ||
11623 (tr32(WDMAC_MODE
) & WDMAC_MODE_ENABLE
)) {
11624 pci_save_state(tp
->pdev
);
11625 tw32(MEMARB_MODE
, MEMARB_MODE_ENABLE
);
11626 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
11629 err
= tg3_test_dma(tp
);
11631 printk(KERN_ERR PFX
"DMA engine test failed, aborting.\n");
11632 goto err_out_iounmap
;
11635 /* Tigon3 can do ipv4 only... and some chips have buggy
11638 if ((tp
->tg3_flags
& TG3_FLAG_BROKEN_CHECKSUMS
) == 0) {
11639 if (GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5755
||
11640 GET_ASIC_REV(tp
->pci_chip_rev_id
) == ASIC_REV_5787
)
11641 dev
->features
|= NETIF_F_HW_CSUM
;
11643 dev
->features
|= NETIF_F_IP_CSUM
;
11644 dev
->features
|= NETIF_F_SG
;
11645 tp
->tg3_flags
|= TG3_FLAG_RX_CHECKSUMS
;
11647 tp
->tg3_flags
&= ~TG3_FLAG_RX_CHECKSUMS
;
11649 /* flow control autonegotiation is default behavior */
11650 tp
->tg3_flags
|= TG3_FLAG_PAUSE_AUTONEG
;
11654 /* Now that we have fully setup the chip, save away a snapshot
11655 * of the PCI config space. We need to restore this after
11656 * GRC_MISC_CFG core clock resets and some resume events.
11658 pci_save_state(tp
->pdev
);
11660 err
= register_netdev(dev
);
11662 printk(KERN_ERR PFX
"Cannot register net device, "
11664 goto err_out_iounmap
;
11667 pci_set_drvdata(pdev
, dev
);
11669 printk(KERN_INFO
"%s: Tigon3 [partno(%s) rev %04x PHY(%s)] (%s) %sBaseT Ethernet ",
11671 tp
->board_part_number
,
11672 tp
->pci_chip_rev_id
,
11673 tg3_phy_string(tp
),
11674 tg3_bus_string(tp
, str
),
11675 (tp
->tg3_flags
& TG3_FLAG_10_100_ONLY
) ? "10/100" : "10/100/1000");
11677 for (i
= 0; i
< 6; i
++)
11678 printk("%2.2x%c", dev
->dev_addr
[i
],
11679 i
== 5 ? '\n' : ':');
11681 printk(KERN_INFO
"%s: RXcsums[%d] LinkChgREG[%d] "
11682 "MIirq[%d] ASF[%d] Split[%d] WireSpeed[%d] "
11685 (tp
->tg3_flags
& TG3_FLAG_RX_CHECKSUMS
) != 0,
11686 (tp
->tg3_flags
& TG3_FLAG_USE_LINKCHG_REG
) != 0,
11687 (tp
->tg3_flags
& TG3_FLAG_USE_MI_INTERRUPT
) != 0,
11688 (tp
->tg3_flags
& TG3_FLAG_ENABLE_ASF
) != 0,
11689 (tp
->tg3_flags
& TG3_FLAG_SPLIT_MODE
) != 0,
11690 (tp
->tg3_flags2
& TG3_FLG2_NO_ETH_WIRE_SPEED
) == 0,
11691 (tp
->tg3_flags2
& TG3_FLG2_TSO_CAPABLE
) != 0);
11692 printk(KERN_INFO
"%s: dma_rwctrl[%08x] dma_mask[%d-bit]\n",
11693 dev
->name
, tp
->dma_rwctrl
,
11694 (pdev
->dma_mask
== DMA_32BIT_MASK
) ? 32 :
11695 (((u64
) pdev
->dma_mask
== DMA_40BIT_MASK
) ? 40 : 64));
11697 netif_carrier_off(tp
->dev
);
11711 pci_release_regions(pdev
);
11713 err_out_disable_pdev
:
11714 pci_disable_device(pdev
);
11715 pci_set_drvdata(pdev
, NULL
);
11719 static void __devexit
tg3_remove_one(struct pci_dev
*pdev
)
11721 struct net_device
*dev
= pci_get_drvdata(pdev
);
11724 struct tg3
*tp
= netdev_priv(dev
);
11726 flush_scheduled_work();
11727 unregister_netdev(dev
);
11733 pci_release_regions(pdev
);
11734 pci_disable_device(pdev
);
11735 pci_set_drvdata(pdev
, NULL
);
11739 static int tg3_suspend(struct pci_dev
*pdev
, pm_message_t state
)
11741 struct net_device
*dev
= pci_get_drvdata(pdev
);
11742 struct tg3
*tp
= netdev_priv(dev
);
11745 if (!netif_running(dev
))
11748 flush_scheduled_work();
11749 tg3_netif_stop(tp
);
11751 del_timer_sync(&tp
->timer
);
11753 tg3_full_lock(tp
, 1);
11754 tg3_disable_ints(tp
);
11755 tg3_full_unlock(tp
);
11757 netif_device_detach(dev
);
11759 tg3_full_lock(tp
, 0);
11760 tg3_halt(tp
, RESET_KIND_SHUTDOWN
, 1);
11761 tp
->tg3_flags
&= ~TG3_FLAG_INIT_COMPLETE
;
11762 tg3_full_unlock(tp
);
11764 err
= tg3_set_power_state(tp
, pci_choose_state(pdev
, state
));
11766 tg3_full_lock(tp
, 0);
11768 tp
->tg3_flags
|= TG3_FLAG_INIT_COMPLETE
;
11769 if (tg3_restart_hw(tp
, 1))
11772 tp
->timer
.expires
= jiffies
+ tp
->timer_offset
;
11773 add_timer(&tp
->timer
);
11775 netif_device_attach(dev
);
11776 tg3_netif_start(tp
);
11779 tg3_full_unlock(tp
);
11785 static int tg3_resume(struct pci_dev
*pdev
)
11787 struct net_device
*dev
= pci_get_drvdata(pdev
);
11788 struct tg3
*tp
= netdev_priv(dev
);
11791 if (!netif_running(dev
))
11794 pci_restore_state(tp
->pdev
);
11796 err
= tg3_set_power_state(tp
, PCI_D0
);
11800 netif_device_attach(dev
);
11802 tg3_full_lock(tp
, 0);
11804 tp
->tg3_flags
|= TG3_FLAG_INIT_COMPLETE
;
11805 err
= tg3_restart_hw(tp
, 1);
11809 tp
->timer
.expires
= jiffies
+ tp
->timer_offset
;
11810 add_timer(&tp
->timer
);
11812 tg3_netif_start(tp
);
11815 tg3_full_unlock(tp
);
11820 static struct pci_driver tg3_driver
= {
11821 .name
= DRV_MODULE_NAME
,
11822 .id_table
= tg3_pci_tbl
,
11823 .probe
= tg3_init_one
,
11824 .remove
= __devexit_p(tg3_remove_one
),
11825 .suspend
= tg3_suspend
,
11826 .resume
= tg3_resume
11829 static int __init
tg3_init(void)
11831 return pci_register_driver(&tg3_driver
);
11834 static void __exit
tg3_cleanup(void)
11836 pci_unregister_driver(&tg3_driver
);
11839 module_init(tg3_init
);
11840 module_exit(tg3_cleanup
);