[TG3]: add basic bcm5752 support
[deliverable/linux.git] / drivers / net / tg3.c
CommitLineData
1da177e4
LT
1/*
2 * tg3.c: Broadcom Tigon3 ethernet driver.
3 *
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005 Broadcom Corporation.
8 *
9 * Firmware is:
10 * Copyright (C) 2000-2003 Broadcom Corporation.
11 */
12
13#include <linux/config.h>
14
15#include <linux/module.h>
16#include <linux/moduleparam.h>
17#include <linux/kernel.h>
18#include <linux/types.h>
19#include <linux/compiler.h>
20#include <linux/slab.h>
21#include <linux/delay.h>
22#include <linux/init.h>
23#include <linux/ioport.h>
24#include <linux/pci.h>
25#include <linux/netdevice.h>
26#include <linux/etherdevice.h>
27#include <linux/skbuff.h>
28#include <linux/ethtool.h>
29#include <linux/mii.h>
30#include <linux/if_vlan.h>
31#include <linux/ip.h>
32#include <linux/tcp.h>
33#include <linux/workqueue.h>
34
35#include <net/checksum.h>
36
37#include <asm/system.h>
38#include <asm/io.h>
39#include <asm/byteorder.h>
40#include <asm/uaccess.h>
41
42#ifdef CONFIG_SPARC64
43#include <asm/idprom.h>
44#include <asm/oplib.h>
45#include <asm/pbm.h>
46#endif
47
48#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
49#define TG3_VLAN_TAG_USED 1
50#else
51#define TG3_VLAN_TAG_USED 0
52#endif
53
54#ifdef NETIF_F_TSO
55#define TG3_TSO_SUPPORT 1
56#else
57#define TG3_TSO_SUPPORT 0
58#endif
59
60#include "tg3.h"
61
62#define DRV_MODULE_NAME "tg3"
63#define PFX DRV_MODULE_NAME ": "
64#define DRV_MODULE_VERSION "3.25"
65#define DRV_MODULE_RELDATE "March 24, 2005"
66
67#define TG3_DEF_MAC_MODE 0
68#define TG3_DEF_RX_MODE 0
69#define TG3_DEF_TX_MODE 0
70#define TG3_DEF_MSG_ENABLE \
71 (NETIF_MSG_DRV | \
72 NETIF_MSG_PROBE | \
73 NETIF_MSG_LINK | \
74 NETIF_MSG_TIMER | \
75 NETIF_MSG_IFDOWN | \
76 NETIF_MSG_IFUP | \
77 NETIF_MSG_RX_ERR | \
78 NETIF_MSG_TX_ERR)
79
80/* length of time before we decide the hardware is borked,
81 * and dev->tx_timeout() should be called to fix the problem
82 */
83#define TG3_TX_TIMEOUT (5 * HZ)
84
85/* hardware minimum and maximum for a single frame's data payload */
86#define TG3_MIN_MTU 60
87#define TG3_MAX_MTU(tp) \
88 ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 && \
2052da94
JL
89 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750 && \
90 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752) ? 9000 : 1500)
1da177e4
LT
91
92/* These numbers seem to be hard coded in the NIC firmware somehow.
93 * You can't change the ring sizes, but you can change where you place
94 * them in the NIC onboard memory.
95 */
96#define TG3_RX_RING_SIZE 512
97#define TG3_DEF_RX_RING_PENDING 200
98#define TG3_RX_JUMBO_RING_SIZE 256
99#define TG3_DEF_RX_JUMBO_RING_PENDING 100
100
101/* Do not place this n-ring entries value into the tp struct itself,
102 * we really want to expose these constants to GCC so that modulo et
103 * al. operations are done with shifts and masks instead of with
104 * hw multiply/modulo instructions. Another solution would be to
105 * replace things like '% foo' with '& (foo - 1)'.
106 */
107#define TG3_RX_RCB_RING_SIZE(tp) \
108 ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ? 512 : 1024)
109
110#define TG3_TX_RING_SIZE 512
111#define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
112
113#define TG3_RX_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
114 TG3_RX_RING_SIZE)
115#define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
116 TG3_RX_JUMBO_RING_SIZE)
117#define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
118 TG3_RX_RCB_RING_SIZE(tp))
119#define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
120 TG3_TX_RING_SIZE)
121#define TX_RING_GAP(TP) \
122 (TG3_TX_RING_SIZE - (TP)->tx_pending)
123#define TX_BUFFS_AVAIL(TP) \
124 (((TP)->tx_cons <= (TP)->tx_prod) ? \
125 (TP)->tx_cons + (TP)->tx_pending - (TP)->tx_prod : \
126 (TP)->tx_cons - (TP)->tx_prod - TX_RING_GAP(TP))
127#define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
128
129#define RX_PKT_BUF_SZ (1536 + tp->rx_offset + 64)
130#define RX_JUMBO_PKT_BUF_SZ (9046 + tp->rx_offset + 64)
131
132/* minimum number of free TX descriptors required to wake up TX process */
133#define TG3_TX_WAKEUP_THRESH (TG3_TX_RING_SIZE / 4)
134
135/* number of ETHTOOL_GSTATS u64's */
136#define TG3_NUM_STATS (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
137
138static char version[] __devinitdata =
139 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
140
141MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
142MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
143MODULE_LICENSE("GPL");
144MODULE_VERSION(DRV_MODULE_VERSION);
145
146static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
147module_param(tg3_debug, int, 0);
148MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
149
150static struct pci_device_id tg3_pci_tbl[] = {
151 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700,
152 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
153 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701,
154 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
155 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702,
156 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
157 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703,
158 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
159 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704,
160 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
161 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE,
162 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
163 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705,
164 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
165 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2,
166 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
167 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M,
168 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
169 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2,
170 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
171 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X,
172 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
173 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X,
174 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
175 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S,
176 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
177 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3,
178 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
179 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3,
180 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
181 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782,
182 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
183 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788,
184 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
185 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789,
186 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
187 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901,
188 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
189 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2,
190 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
191 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2,
192 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
193 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F,
194 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
195 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720,
196 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
197 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721,
198 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
199 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750,
200 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
201 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751,
202 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
203 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M,
204 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
205 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M,
206 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
207 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F,
208 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
209 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753,
210 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
211 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M,
212 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
213 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F,
214 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
215 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781,
216 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
217 { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX,
218 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
219 { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX,
220 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
221 { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000,
222 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
223 { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001,
224 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
225 { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003,
226 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
227 { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100,
228 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
229 { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3,
230 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
231 { 0, }
232};
233
234MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
235
236static struct {
237 const char string[ETH_GSTRING_LEN];
238} ethtool_stats_keys[TG3_NUM_STATS] = {
239 { "rx_octets" },
240 { "rx_fragments" },
241 { "rx_ucast_packets" },
242 { "rx_mcast_packets" },
243 { "rx_bcast_packets" },
244 { "rx_fcs_errors" },
245 { "rx_align_errors" },
246 { "rx_xon_pause_rcvd" },
247 { "rx_xoff_pause_rcvd" },
248 { "rx_mac_ctrl_rcvd" },
249 { "rx_xoff_entered" },
250 { "rx_frame_too_long_errors" },
251 { "rx_jabbers" },
252 { "rx_undersize_packets" },
253 { "rx_in_length_errors" },
254 { "rx_out_length_errors" },
255 { "rx_64_or_less_octet_packets" },
256 { "rx_65_to_127_octet_packets" },
257 { "rx_128_to_255_octet_packets" },
258 { "rx_256_to_511_octet_packets" },
259 { "rx_512_to_1023_octet_packets" },
260 { "rx_1024_to_1522_octet_packets" },
261 { "rx_1523_to_2047_octet_packets" },
262 { "rx_2048_to_4095_octet_packets" },
263 { "rx_4096_to_8191_octet_packets" },
264 { "rx_8192_to_9022_octet_packets" },
265
266 { "tx_octets" },
267 { "tx_collisions" },
268
269 { "tx_xon_sent" },
270 { "tx_xoff_sent" },
271 { "tx_flow_control" },
272 { "tx_mac_errors" },
273 { "tx_single_collisions" },
274 { "tx_mult_collisions" },
275 { "tx_deferred" },
276 { "tx_excessive_collisions" },
277 { "tx_late_collisions" },
278 { "tx_collide_2times" },
279 { "tx_collide_3times" },
280 { "tx_collide_4times" },
281 { "tx_collide_5times" },
282 { "tx_collide_6times" },
283 { "tx_collide_7times" },
284 { "tx_collide_8times" },
285 { "tx_collide_9times" },
286 { "tx_collide_10times" },
287 { "tx_collide_11times" },
288 { "tx_collide_12times" },
289 { "tx_collide_13times" },
290 { "tx_collide_14times" },
291 { "tx_collide_15times" },
292 { "tx_ucast_packets" },
293 { "tx_mcast_packets" },
294 { "tx_bcast_packets" },
295 { "tx_carrier_sense_errors" },
296 { "tx_discards" },
297 { "tx_errors" },
298
299 { "dma_writeq_full" },
300 { "dma_write_prioq_full" },
301 { "rxbds_empty" },
302 { "rx_discards" },
303 { "rx_errors" },
304 { "rx_threshold_hit" },
305
306 { "dma_readq_full" },
307 { "dma_read_prioq_full" },
308 { "tx_comp_queue_full" },
309
310 { "ring_set_send_prod_index" },
311 { "ring_status_update" },
312 { "nic_irqs" },
313 { "nic_avoided_irqs" },
314 { "nic_tx_threshold_hit" }
315};
316
317static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
318{
319 if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) != 0) {
320 unsigned long flags;
321
322 spin_lock_irqsave(&tp->indirect_lock, flags);
323 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
324 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
325 spin_unlock_irqrestore(&tp->indirect_lock, flags);
326 } else {
327 writel(val, tp->regs + off);
328 if ((tp->tg3_flags & TG3_FLAG_5701_REG_WRITE_BUG) != 0)
329 readl(tp->regs + off);
330 }
331}
332
333static void _tw32_flush(struct tg3 *tp, u32 off, u32 val)
334{
335 if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) != 0) {
336 unsigned long flags;
337
338 spin_lock_irqsave(&tp->indirect_lock, flags);
339 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
340 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
341 spin_unlock_irqrestore(&tp->indirect_lock, flags);
342 } else {
343 void __iomem *dest = tp->regs + off;
344 writel(val, dest);
345 readl(dest); /* always flush PCI write */
346 }
347}
348
349static inline void _tw32_rx_mbox(struct tg3 *tp, u32 off, u32 val)
350{
351 void __iomem *mbox = tp->regs + off;
352 writel(val, mbox);
353 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
354 readl(mbox);
355}
356
357static inline void _tw32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
358{
359 void __iomem *mbox = tp->regs + off;
360 writel(val, mbox);
361 if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
362 writel(val, mbox);
363 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
364 readl(mbox);
365}
366
367#define tw32_mailbox(reg, val) writel(((val) & 0xffffffff), tp->regs + (reg))
368#define tw32_rx_mbox(reg, val) _tw32_rx_mbox(tp, reg, val)
369#define tw32_tx_mbox(reg, val) _tw32_tx_mbox(tp, reg, val)
370
371#define tw32(reg,val) tg3_write_indirect_reg32(tp,(reg),(val))
372#define tw32_f(reg,val) _tw32_flush(tp,(reg),(val))
373#define tw16(reg,val) writew(((val) & 0xffff), tp->regs + (reg))
374#define tw8(reg,val) writeb(((val) & 0xff), tp->regs + (reg))
375#define tr32(reg) readl(tp->regs + (reg))
376#define tr16(reg) readw(tp->regs + (reg))
377#define tr8(reg) readb(tp->regs + (reg))
378
379static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
380{
381 unsigned long flags;
382
383 spin_lock_irqsave(&tp->indirect_lock, flags);
384 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
385 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
386
387 /* Always leave this as zero. */
388 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
389 spin_unlock_irqrestore(&tp->indirect_lock, flags);
390}
391
392static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
393{
394 unsigned long flags;
395
396 spin_lock_irqsave(&tp->indirect_lock, flags);
397 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
398 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
399
400 /* Always leave this as zero. */
401 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
402 spin_unlock_irqrestore(&tp->indirect_lock, flags);
403}
404
405static void tg3_disable_ints(struct tg3 *tp)
406{
407 tw32(TG3PCI_MISC_HOST_CTRL,
408 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
409 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
410 tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
411}
412
413static inline void tg3_cond_int(struct tg3 *tp)
414{
415 if (tp->hw_status->status & SD_STATUS_UPDATED)
416 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
417}
418
419static void tg3_enable_ints(struct tg3 *tp)
420{
421 tw32(TG3PCI_MISC_HOST_CTRL,
422 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
423 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000000);
424 tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
425
426 tg3_cond_int(tp);
427}
428
429/* tg3_restart_ints
430 * similar to tg3_enable_ints, but it can return without flushing the
431 * PIO write which reenables interrupts
432 */
433static void tg3_restart_ints(struct tg3 *tp)
434{
435 tw32(TG3PCI_MISC_HOST_CTRL,
436 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
437 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000000);
438 mmiowb();
439
440 tg3_cond_int(tp);
441}
442
443static inline void tg3_netif_stop(struct tg3 *tp)
444{
445 netif_poll_disable(tp->dev);
446 netif_tx_disable(tp->dev);
447}
448
449static inline void tg3_netif_start(struct tg3 *tp)
450{
451 netif_wake_queue(tp->dev);
452 /* NOTE: unconditional netif_wake_queue is only appropriate
453 * so long as all callers are assured to have free tx slots
454 * (such as after tg3_init_hw)
455 */
456 netif_poll_enable(tp->dev);
457 tg3_cond_int(tp);
458}
459
460static void tg3_switch_clocks(struct tg3 *tp)
461{
462 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
463 u32 orig_clock_ctrl;
464
465 orig_clock_ctrl = clock_ctrl;
466 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
467 CLOCK_CTRL_CLKRUN_OENABLE |
468 0x1f);
469 tp->pci_clock_ctrl = clock_ctrl;
470
471 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
472 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
473 tw32_f(TG3PCI_CLOCK_CTRL,
474 clock_ctrl | CLOCK_CTRL_625_CORE);
475 udelay(40);
476 }
477 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
478 tw32_f(TG3PCI_CLOCK_CTRL,
479 clock_ctrl |
480 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK));
481 udelay(40);
482 tw32_f(TG3PCI_CLOCK_CTRL,
483 clock_ctrl | (CLOCK_CTRL_ALTCLK));
484 udelay(40);
485 }
486 tw32_f(TG3PCI_CLOCK_CTRL, clock_ctrl);
487 udelay(40);
488}
489
490#define PHY_BUSY_LOOPS 5000
491
492static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
493{
494 u32 frame_val;
495 unsigned int loops;
496 int ret;
497
498 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
499 tw32_f(MAC_MI_MODE,
500 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
501 udelay(80);
502 }
503
504 *val = 0x0;
505
506 frame_val = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
507 MI_COM_PHY_ADDR_MASK);
508 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
509 MI_COM_REG_ADDR_MASK);
510 frame_val |= (MI_COM_CMD_READ | MI_COM_START);
511
512 tw32_f(MAC_MI_COM, frame_val);
513
514 loops = PHY_BUSY_LOOPS;
515 while (loops != 0) {
516 udelay(10);
517 frame_val = tr32(MAC_MI_COM);
518
519 if ((frame_val & MI_COM_BUSY) == 0) {
520 udelay(5);
521 frame_val = tr32(MAC_MI_COM);
522 break;
523 }
524 loops -= 1;
525 }
526
527 ret = -EBUSY;
528 if (loops != 0) {
529 *val = frame_val & MI_COM_DATA_MASK;
530 ret = 0;
531 }
532
533 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
534 tw32_f(MAC_MI_MODE, tp->mi_mode);
535 udelay(80);
536 }
537
538 return ret;
539}
540
541static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
542{
543 u32 frame_val;
544 unsigned int loops;
545 int ret;
546
547 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
548 tw32_f(MAC_MI_MODE,
549 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
550 udelay(80);
551 }
552
553 frame_val = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
554 MI_COM_PHY_ADDR_MASK);
555 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
556 MI_COM_REG_ADDR_MASK);
557 frame_val |= (val & MI_COM_DATA_MASK);
558 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
559
560 tw32_f(MAC_MI_COM, frame_val);
561
562 loops = PHY_BUSY_LOOPS;
563 while (loops != 0) {
564 udelay(10);
565 frame_val = tr32(MAC_MI_COM);
566 if ((frame_val & MI_COM_BUSY) == 0) {
567 udelay(5);
568 frame_val = tr32(MAC_MI_COM);
569 break;
570 }
571 loops -= 1;
572 }
573
574 ret = -EBUSY;
575 if (loops != 0)
576 ret = 0;
577
578 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
579 tw32_f(MAC_MI_MODE, tp->mi_mode);
580 udelay(80);
581 }
582
583 return ret;
584}
585
586static void tg3_phy_set_wirespeed(struct tg3 *tp)
587{
588 u32 val;
589
590 if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
591 return;
592
593 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) &&
594 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
595 tg3_writephy(tp, MII_TG3_AUX_CTRL,
596 (val | (1 << 15) | (1 << 4)));
597}
598
599static int tg3_bmcr_reset(struct tg3 *tp)
600{
601 u32 phy_control;
602 int limit, err;
603
604 /* OK, reset it, and poll the BMCR_RESET bit until it
605 * clears or we time out.
606 */
607 phy_control = BMCR_RESET;
608 err = tg3_writephy(tp, MII_BMCR, phy_control);
609 if (err != 0)
610 return -EBUSY;
611
612 limit = 5000;
613 while (limit--) {
614 err = tg3_readphy(tp, MII_BMCR, &phy_control);
615 if (err != 0)
616 return -EBUSY;
617
618 if ((phy_control & BMCR_RESET) == 0) {
619 udelay(40);
620 break;
621 }
622 udelay(10);
623 }
624 if (limit <= 0)
625 return -EBUSY;
626
627 return 0;
628}
629
630static int tg3_wait_macro_done(struct tg3 *tp)
631{
632 int limit = 100;
633
634 while (limit--) {
635 u32 tmp32;
636
637 if (!tg3_readphy(tp, 0x16, &tmp32)) {
638 if ((tmp32 & 0x1000) == 0)
639 break;
640 }
641 }
642 if (limit <= 0)
643 return -EBUSY;
644
645 return 0;
646}
647
648static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
649{
650 static const u32 test_pat[4][6] = {
651 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
652 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
653 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
654 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
655 };
656 int chan;
657
658 for (chan = 0; chan < 4; chan++) {
659 int i;
660
661 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
662 (chan * 0x2000) | 0x0200);
663 tg3_writephy(tp, 0x16, 0x0002);
664
665 for (i = 0; i < 6; i++)
666 tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
667 test_pat[chan][i]);
668
669 tg3_writephy(tp, 0x16, 0x0202);
670 if (tg3_wait_macro_done(tp)) {
671 *resetp = 1;
672 return -EBUSY;
673 }
674
675 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
676 (chan * 0x2000) | 0x0200);
677 tg3_writephy(tp, 0x16, 0x0082);
678 if (tg3_wait_macro_done(tp)) {
679 *resetp = 1;
680 return -EBUSY;
681 }
682
683 tg3_writephy(tp, 0x16, 0x0802);
684 if (tg3_wait_macro_done(tp)) {
685 *resetp = 1;
686 return -EBUSY;
687 }
688
689 for (i = 0; i < 6; i += 2) {
690 u32 low, high;
691
692 if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
693 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
694 tg3_wait_macro_done(tp)) {
695 *resetp = 1;
696 return -EBUSY;
697 }
698 low &= 0x7fff;
699 high &= 0x000f;
700 if (low != test_pat[chan][i] ||
701 high != test_pat[chan][i+1]) {
702 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
703 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
704 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
705
706 return -EBUSY;
707 }
708 }
709 }
710
711 return 0;
712}
713
714static int tg3_phy_reset_chanpat(struct tg3 *tp)
715{
716 int chan;
717
718 for (chan = 0; chan < 4; chan++) {
719 int i;
720
721 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
722 (chan * 0x2000) | 0x0200);
723 tg3_writephy(tp, 0x16, 0x0002);
724 for (i = 0; i < 6; i++)
725 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
726 tg3_writephy(tp, 0x16, 0x0202);
727 if (tg3_wait_macro_done(tp))
728 return -EBUSY;
729 }
730
731 return 0;
732}
733
734static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
735{
736 u32 reg32, phy9_orig;
737 int retries, do_phy_reset, err;
738
739 retries = 10;
740 do_phy_reset = 1;
741 do {
742 if (do_phy_reset) {
743 err = tg3_bmcr_reset(tp);
744 if (err)
745 return err;
746 do_phy_reset = 0;
747 }
748
749 /* Disable transmitter and interrupt. */
750 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
751 continue;
752
753 reg32 |= 0x3000;
754 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
755
756 /* Set full-duplex, 1000 mbps. */
757 tg3_writephy(tp, MII_BMCR,
758 BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
759
760 /* Set to master mode. */
761 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
762 continue;
763
764 tg3_writephy(tp, MII_TG3_CTRL,
765 (MII_TG3_CTRL_AS_MASTER |
766 MII_TG3_CTRL_ENABLE_AS_MASTER));
767
768 /* Enable SM_DSP_CLOCK and 6dB. */
769 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
770
771 /* Block the PHY control access. */
772 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
773 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
774
775 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
776 if (!err)
777 break;
778 } while (--retries);
779
780 err = tg3_phy_reset_chanpat(tp);
781 if (err)
782 return err;
783
784 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
785 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
786
787 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
788 tg3_writephy(tp, 0x16, 0x0000);
789
790 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
791 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
792 /* Set Extended packet length bit for jumbo frames */
793 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
794 }
795 else {
796 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
797 }
798
799 tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
800
801 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
802 reg32 &= ~0x3000;
803 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
804 } else if (!err)
805 err = -EBUSY;
806
807 return err;
808}
809
810/* This will reset the tigon3 PHY if there is no valid
811 * link unless the FORCE argument is non-zero.
812 */
813static int tg3_phy_reset(struct tg3 *tp)
814{
815 u32 phy_status;
816 int err;
817
818 err = tg3_readphy(tp, MII_BMSR, &phy_status);
819 err |= tg3_readphy(tp, MII_BMSR, &phy_status);
820 if (err != 0)
821 return -EBUSY;
822
823 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
824 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
825 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
826 err = tg3_phy_reset_5703_4_5(tp);
827 if (err)
828 return err;
829 goto out;
830 }
831
832 err = tg3_bmcr_reset(tp);
833 if (err)
834 return err;
835
836out:
837 if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
838 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
839 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
840 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
841 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
842 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323);
843 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
844 }
845 if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) {
846 tg3_writephy(tp, 0x1c, 0x8d68);
847 tg3_writephy(tp, 0x1c, 0x8d68);
848 }
849 if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) {
850 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
851 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
852 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b);
853 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
854 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506);
855 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
856 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
857 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
858 }
859 /* Set Extended packet length bit (bit 14) on all chips that */
860 /* support jumbo frames */
861 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
862 /* Cannot do read-modify-write on 5401 */
863 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
864 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
2052da94
JL
865 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750 &&
866 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752) {
1da177e4
LT
867 u32 phy_reg;
868
869 /* Set bit 14 with read-modify-write to preserve other bits */
870 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) &&
871 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg))
872 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
873 }
874
875 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
876 * jumbo frames transmission.
877 */
878 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
2052da94
JL
879 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750 &&
880 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752) {
1da177e4
LT
881 u32 phy_reg;
882
883 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg))
884 tg3_writephy(tp, MII_TG3_EXT_CTRL,
885 phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
886 }
887
888 tg3_phy_set_wirespeed(tp);
889 return 0;
890}
891
892static void tg3_frob_aux_power(struct tg3 *tp)
893{
894 struct tg3 *tp_peer = tp;
895
896 if ((tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) != 0)
897 return;
898
899 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
900 tp_peer = pci_get_drvdata(tp->pdev_peer);
901 if (!tp_peer)
902 BUG();
903 }
904
905
906 if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
907 (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0) {
908 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
909 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
910 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
911 (GRC_LCLCTRL_GPIO_OE0 |
912 GRC_LCLCTRL_GPIO_OE1 |
913 GRC_LCLCTRL_GPIO_OE2 |
914 GRC_LCLCTRL_GPIO_OUTPUT0 |
915 GRC_LCLCTRL_GPIO_OUTPUT1));
916 udelay(100);
917 } else {
918 u32 no_gpio2;
919 u32 grc_local_ctrl;
920
921 if (tp_peer != tp &&
922 (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
923 return;
924
925 /* On 5753 and variants, GPIO2 cannot be used. */
926 no_gpio2 = tp->nic_sram_data_cfg &
927 NIC_SRAM_DATA_CFG_NO_GPIO2;
928
929 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
930 GRC_LCLCTRL_GPIO_OE1 |
931 GRC_LCLCTRL_GPIO_OE2 |
932 GRC_LCLCTRL_GPIO_OUTPUT1 |
933 GRC_LCLCTRL_GPIO_OUTPUT2;
934 if (no_gpio2) {
935 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
936 GRC_LCLCTRL_GPIO_OUTPUT2);
937 }
938 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
939 grc_local_ctrl);
940 udelay(100);
941
942 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
943
944 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
945 grc_local_ctrl);
946 udelay(100);
947
948 if (!no_gpio2) {
949 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
950 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
951 grc_local_ctrl);
952 udelay(100);
953 }
954 }
955 } else {
956 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
957 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
958 if (tp_peer != tp &&
959 (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
960 return;
961
962 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
963 (GRC_LCLCTRL_GPIO_OE1 |
964 GRC_LCLCTRL_GPIO_OUTPUT1));
965 udelay(100);
966
967 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
968 (GRC_LCLCTRL_GPIO_OE1));
969 udelay(100);
970
971 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
972 (GRC_LCLCTRL_GPIO_OE1 |
973 GRC_LCLCTRL_GPIO_OUTPUT1));
974 udelay(100);
975 }
976 }
977}
978
979static int tg3_setup_phy(struct tg3 *, int);
980
981#define RESET_KIND_SHUTDOWN 0
982#define RESET_KIND_INIT 1
983#define RESET_KIND_SUSPEND 2
984
985static void tg3_write_sig_post_reset(struct tg3 *, int);
986static int tg3_halt_cpu(struct tg3 *, u32);
987
988static int tg3_set_power_state(struct tg3 *tp, int state)
989{
990 u32 misc_host_ctrl;
991 u16 power_control, power_caps;
992 int pm = tp->pm_cap;
993
994 /* Make sure register accesses (indirect or otherwise)
995 * will function correctly.
996 */
997 pci_write_config_dword(tp->pdev,
998 TG3PCI_MISC_HOST_CTRL,
999 tp->misc_host_ctrl);
1000
1001 pci_read_config_word(tp->pdev,
1002 pm + PCI_PM_CTRL,
1003 &power_control);
1004 power_control |= PCI_PM_CTRL_PME_STATUS;
1005 power_control &= ~(PCI_PM_CTRL_STATE_MASK);
1006 switch (state) {
1007 case 0:
1008 power_control |= 0;
1009 pci_write_config_word(tp->pdev,
1010 pm + PCI_PM_CTRL,
1011 power_control);
1012 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
1013 udelay(100);
1014
1015 return 0;
1016
1017 case 1:
1018 power_control |= 1;
1019 break;
1020
1021 case 2:
1022 power_control |= 2;
1023 break;
1024
1025 case 3:
1026 power_control |= 3;
1027 break;
1028
1029 default:
1030 printk(KERN_WARNING PFX "%s: Invalid power state (%d) "
1031 "requested.\n",
1032 tp->dev->name, state);
1033 return -EINVAL;
1034 };
1035
1036 power_control |= PCI_PM_CTRL_PME_ENABLE;
1037
1038 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
1039 tw32(TG3PCI_MISC_HOST_CTRL,
1040 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
1041
1042 if (tp->link_config.phy_is_low_power == 0) {
1043 tp->link_config.phy_is_low_power = 1;
1044 tp->link_config.orig_speed = tp->link_config.speed;
1045 tp->link_config.orig_duplex = tp->link_config.duplex;
1046 tp->link_config.orig_autoneg = tp->link_config.autoneg;
1047 }
1048
1049 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
1050 tp->link_config.speed = SPEED_10;
1051 tp->link_config.duplex = DUPLEX_HALF;
1052 tp->link_config.autoneg = AUTONEG_ENABLE;
1053 tg3_setup_phy(tp, 0);
1054 }
1055
1056 pci_read_config_word(tp->pdev, pm + PCI_PM_PMC, &power_caps);
1057
1058 if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE) {
1059 u32 mac_mode;
1060
1061 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
1062 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
1063 udelay(40);
1064
1065 mac_mode = MAC_MODE_PORT_MODE_MII;
1066
1067 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 ||
1068 !(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB))
1069 mac_mode |= MAC_MODE_LINK_POLARITY;
1070 } else {
1071 mac_mode = MAC_MODE_PORT_MODE_TBI;
1072 }
1073
2052da94
JL
1074 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750 &&
1075 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752)
1da177e4
LT
1076 tw32(MAC_LED_CTRL, tp->led_ctrl);
1077
1078 if (((power_caps & PCI_PM_CAP_PME_D3cold) &&
1079 (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)))
1080 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
1081
1082 tw32_f(MAC_MODE, mac_mode);
1083 udelay(100);
1084
1085 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
1086 udelay(10);
1087 }
1088
1089 if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
1090 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1091 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
1092 u32 base_val;
1093
1094 base_val = tp->pci_clock_ctrl;
1095 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
1096 CLOCK_CTRL_TXCLK_DISABLE);
1097
1098 tw32_f(TG3PCI_CLOCK_CTRL, base_val |
1099 CLOCK_CTRL_ALTCLK |
1100 CLOCK_CTRL_PWRDOWN_PLL133);
1101 udelay(40);
1102 } else if (!((GET_ASIC_REV(tp->pci_chip_rev_id) == 5750) &&
1103 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
1104 u32 newbits1, newbits2;
1105
1106 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1107 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1108 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
1109 CLOCK_CTRL_TXCLK_DISABLE |
1110 CLOCK_CTRL_ALTCLK);
1111 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1112 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
1113 newbits1 = CLOCK_CTRL_625_CORE;
1114 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
1115 } else {
1116 newbits1 = CLOCK_CTRL_ALTCLK;
1117 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1118 }
1119
1120 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1);
1121 udelay(40);
1122
1123 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2);
1124 udelay(40);
1125
1126 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
1127 u32 newbits3;
1128
1129 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1130 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1131 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
1132 CLOCK_CTRL_TXCLK_DISABLE |
1133 CLOCK_CTRL_44MHZ_CORE);
1134 } else {
1135 newbits3 = CLOCK_CTRL_44MHZ_CORE;
1136 }
1137
1138 tw32_f(TG3PCI_CLOCK_CTRL,
1139 tp->pci_clock_ctrl | newbits3);
1140 udelay(40);
1141 }
1142 }
1143
1144 tg3_frob_aux_power(tp);
1145
1146 /* Workaround for unstable PLL clock */
1147 if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
1148 (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
1149 u32 val = tr32(0x7d00);
1150
1151 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
1152 tw32(0x7d00, val);
1153 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
1154 tg3_halt_cpu(tp, RX_CPU_BASE);
1155 }
1156
1157 /* Finally, set the new power state. */
1158 pci_write_config_word(tp->pdev, pm + PCI_PM_CTRL, power_control);
1159
1160 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
1161
1162 return 0;
1163}
1164
1165static void tg3_link_report(struct tg3 *tp)
1166{
1167 if (!netif_carrier_ok(tp->dev)) {
1168 printk(KERN_INFO PFX "%s: Link is down.\n", tp->dev->name);
1169 } else {
1170 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
1171 tp->dev->name,
1172 (tp->link_config.active_speed == SPEED_1000 ?
1173 1000 :
1174 (tp->link_config.active_speed == SPEED_100 ?
1175 100 : 10)),
1176 (tp->link_config.active_duplex == DUPLEX_FULL ?
1177 "full" : "half"));
1178
1179 printk(KERN_INFO PFX "%s: Flow control is %s for TX and "
1180 "%s for RX.\n",
1181 tp->dev->name,
1182 (tp->tg3_flags & TG3_FLAG_TX_PAUSE) ? "on" : "off",
1183 (tp->tg3_flags & TG3_FLAG_RX_PAUSE) ? "on" : "off");
1184 }
1185}
1186
1187static void tg3_setup_flow_control(struct tg3 *tp, u32 local_adv, u32 remote_adv)
1188{
1189 u32 new_tg3_flags = 0;
1190 u32 old_rx_mode = tp->rx_mode;
1191 u32 old_tx_mode = tp->tx_mode;
1192
1193 if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) {
1194 if (local_adv & ADVERTISE_PAUSE_CAP) {
1195 if (local_adv & ADVERTISE_PAUSE_ASYM) {
1196 if (remote_adv & LPA_PAUSE_CAP)
1197 new_tg3_flags |=
1198 (TG3_FLAG_RX_PAUSE |
1199 TG3_FLAG_TX_PAUSE);
1200 else if (remote_adv & LPA_PAUSE_ASYM)
1201 new_tg3_flags |=
1202 (TG3_FLAG_RX_PAUSE);
1203 } else {
1204 if (remote_adv & LPA_PAUSE_CAP)
1205 new_tg3_flags |=
1206 (TG3_FLAG_RX_PAUSE |
1207 TG3_FLAG_TX_PAUSE);
1208 }
1209 } else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1210 if ((remote_adv & LPA_PAUSE_CAP) &&
1211 (remote_adv & LPA_PAUSE_ASYM))
1212 new_tg3_flags |= TG3_FLAG_TX_PAUSE;
1213 }
1214
1215 tp->tg3_flags &= ~(TG3_FLAG_RX_PAUSE | TG3_FLAG_TX_PAUSE);
1216 tp->tg3_flags |= new_tg3_flags;
1217 } else {
1218 new_tg3_flags = tp->tg3_flags;
1219 }
1220
1221 if (new_tg3_flags & TG3_FLAG_RX_PAUSE)
1222 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1223 else
1224 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1225
1226 if (old_rx_mode != tp->rx_mode) {
1227 tw32_f(MAC_RX_MODE, tp->rx_mode);
1228 }
1229
1230 if (new_tg3_flags & TG3_FLAG_TX_PAUSE)
1231 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1232 else
1233 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1234
1235 if (old_tx_mode != tp->tx_mode) {
1236 tw32_f(MAC_TX_MODE, tp->tx_mode);
1237 }
1238}
1239
1240static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
1241{
1242 switch (val & MII_TG3_AUX_STAT_SPDMASK) {
1243 case MII_TG3_AUX_STAT_10HALF:
1244 *speed = SPEED_10;
1245 *duplex = DUPLEX_HALF;
1246 break;
1247
1248 case MII_TG3_AUX_STAT_10FULL:
1249 *speed = SPEED_10;
1250 *duplex = DUPLEX_FULL;
1251 break;
1252
1253 case MII_TG3_AUX_STAT_100HALF:
1254 *speed = SPEED_100;
1255 *duplex = DUPLEX_HALF;
1256 break;
1257
1258 case MII_TG3_AUX_STAT_100FULL:
1259 *speed = SPEED_100;
1260 *duplex = DUPLEX_FULL;
1261 break;
1262
1263 case MII_TG3_AUX_STAT_1000HALF:
1264 *speed = SPEED_1000;
1265 *duplex = DUPLEX_HALF;
1266 break;
1267
1268 case MII_TG3_AUX_STAT_1000FULL:
1269 *speed = SPEED_1000;
1270 *duplex = DUPLEX_FULL;
1271 break;
1272
1273 default:
1274 *speed = SPEED_INVALID;
1275 *duplex = DUPLEX_INVALID;
1276 break;
1277 };
1278}
1279
1280static void tg3_phy_copper_begin(struct tg3 *tp)
1281{
1282 u32 new_adv;
1283 int i;
1284
1285 if (tp->link_config.phy_is_low_power) {
1286 /* Entering low power mode. Disable gigabit and
1287 * 100baseT advertisements.
1288 */
1289 tg3_writephy(tp, MII_TG3_CTRL, 0);
1290
1291 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1292 ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1293 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
1294 new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
1295
1296 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1297 } else if (tp->link_config.speed == SPEED_INVALID) {
1298 tp->link_config.advertising =
1299 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
1300 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
1301 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
1302 ADVERTISED_Autoneg | ADVERTISED_MII);
1303
1304 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
1305 tp->link_config.advertising &=
1306 ~(ADVERTISED_1000baseT_Half |
1307 ADVERTISED_1000baseT_Full);
1308
1309 new_adv = (ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1310 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
1311 new_adv |= ADVERTISE_10HALF;
1312 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
1313 new_adv |= ADVERTISE_10FULL;
1314 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
1315 new_adv |= ADVERTISE_100HALF;
1316 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
1317 new_adv |= ADVERTISE_100FULL;
1318 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1319
1320 if (tp->link_config.advertising &
1321 (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
1322 new_adv = 0;
1323 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
1324 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
1325 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
1326 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
1327 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
1328 (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1329 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
1330 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1331 MII_TG3_CTRL_ENABLE_AS_MASTER);
1332 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1333 } else {
1334 tg3_writephy(tp, MII_TG3_CTRL, 0);
1335 }
1336 } else {
1337 /* Asking for a specific link mode. */
1338 if (tp->link_config.speed == SPEED_1000) {
1339 new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1340 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1341
1342 if (tp->link_config.duplex == DUPLEX_FULL)
1343 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
1344 else
1345 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
1346 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1347 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
1348 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1349 MII_TG3_CTRL_ENABLE_AS_MASTER);
1350 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1351 } else {
1352 tg3_writephy(tp, MII_TG3_CTRL, 0);
1353
1354 new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1355 if (tp->link_config.speed == SPEED_100) {
1356 if (tp->link_config.duplex == DUPLEX_FULL)
1357 new_adv |= ADVERTISE_100FULL;
1358 else
1359 new_adv |= ADVERTISE_100HALF;
1360 } else {
1361 if (tp->link_config.duplex == DUPLEX_FULL)
1362 new_adv |= ADVERTISE_10FULL;
1363 else
1364 new_adv |= ADVERTISE_10HALF;
1365 }
1366 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1367 }
1368 }
1369
1370 if (tp->link_config.autoneg == AUTONEG_DISABLE &&
1371 tp->link_config.speed != SPEED_INVALID) {
1372 u32 bmcr, orig_bmcr;
1373
1374 tp->link_config.active_speed = tp->link_config.speed;
1375 tp->link_config.active_duplex = tp->link_config.duplex;
1376
1377 bmcr = 0;
1378 switch (tp->link_config.speed) {
1379 default:
1380 case SPEED_10:
1381 break;
1382
1383 case SPEED_100:
1384 bmcr |= BMCR_SPEED100;
1385 break;
1386
1387 case SPEED_1000:
1388 bmcr |= TG3_BMCR_SPEED1000;
1389 break;
1390 };
1391
1392 if (tp->link_config.duplex == DUPLEX_FULL)
1393 bmcr |= BMCR_FULLDPLX;
1394
1395 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
1396 (bmcr != orig_bmcr)) {
1397 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
1398 for (i = 0; i < 1500; i++) {
1399 u32 tmp;
1400
1401 udelay(10);
1402 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
1403 tg3_readphy(tp, MII_BMSR, &tmp))
1404 continue;
1405 if (!(tmp & BMSR_LSTATUS)) {
1406 udelay(40);
1407 break;
1408 }
1409 }
1410 tg3_writephy(tp, MII_BMCR, bmcr);
1411 udelay(40);
1412 }
1413 } else {
1414 tg3_writephy(tp, MII_BMCR,
1415 BMCR_ANENABLE | BMCR_ANRESTART);
1416 }
1417}
1418
1419static int tg3_init_5401phy_dsp(struct tg3 *tp)
1420{
1421 int err;
1422
1423 /* Turn off tap power management. */
1424 /* Set Extended packet length bit */
1425 err = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1426
1427 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012);
1428 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804);
1429
1430 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013);
1431 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204);
1432
1433 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1434 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);
1435
1436 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1437 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);
1438
1439 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1440 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);
1441
1442 udelay(40);
1443
1444 return err;
1445}
1446
1447static int tg3_copper_is_advertising_all(struct tg3 *tp)
1448{
1449 u32 adv_reg, all_mask;
1450
1451 if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
1452 return 0;
1453
1454 all_mask = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1455 ADVERTISE_100HALF | ADVERTISE_100FULL);
1456 if ((adv_reg & all_mask) != all_mask)
1457 return 0;
1458 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
1459 u32 tg3_ctrl;
1460
1461 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
1462 return 0;
1463
1464 all_mask = (MII_TG3_CTRL_ADV_1000_HALF |
1465 MII_TG3_CTRL_ADV_1000_FULL);
1466 if ((tg3_ctrl & all_mask) != all_mask)
1467 return 0;
1468 }
1469 return 1;
1470}
1471
1472static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
1473{
1474 int current_link_up;
1475 u32 bmsr, dummy;
1476 u16 current_speed;
1477 u8 current_duplex;
1478 int i, err;
1479
1480 tw32(MAC_EVENT, 0);
1481
1482 tw32_f(MAC_STATUS,
1483 (MAC_STATUS_SYNC_CHANGED |
1484 MAC_STATUS_CFG_CHANGED |
1485 MAC_STATUS_MI_COMPLETION |
1486 MAC_STATUS_LNKSTATE_CHANGED));
1487 udelay(40);
1488
1489 tp->mi_mode = MAC_MI_MODE_BASE;
1490 tw32_f(MAC_MI_MODE, tp->mi_mode);
1491 udelay(80);
1492
1493 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
1494
1495 /* Some third-party PHYs need to be reset on link going
1496 * down.
1497 */
1498 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1499 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1500 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
1501 netif_carrier_ok(tp->dev)) {
1502 tg3_readphy(tp, MII_BMSR, &bmsr);
1503 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1504 !(bmsr & BMSR_LSTATUS))
1505 force_reset = 1;
1506 }
1507 if (force_reset)
1508 tg3_phy_reset(tp);
1509
1510 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1511 tg3_readphy(tp, MII_BMSR, &bmsr);
1512 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
1513 !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
1514 bmsr = 0;
1515
1516 if (!(bmsr & BMSR_LSTATUS)) {
1517 err = tg3_init_5401phy_dsp(tp);
1518 if (err)
1519 return err;
1520
1521 tg3_readphy(tp, MII_BMSR, &bmsr);
1522 for (i = 0; i < 1000; i++) {
1523 udelay(10);
1524 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1525 (bmsr & BMSR_LSTATUS)) {
1526 udelay(40);
1527 break;
1528 }
1529 }
1530
1531 if ((tp->phy_id & PHY_ID_REV_MASK) == PHY_REV_BCM5401_B0 &&
1532 !(bmsr & BMSR_LSTATUS) &&
1533 tp->link_config.active_speed == SPEED_1000) {
1534 err = tg3_phy_reset(tp);
1535 if (!err)
1536 err = tg3_init_5401phy_dsp(tp);
1537 if (err)
1538 return err;
1539 }
1540 }
1541 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1542 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
1543 /* 5701 {A0,B0} CRC bug workaround */
1544 tg3_writephy(tp, 0x15, 0x0a75);
1545 tg3_writephy(tp, 0x1c, 0x8c68);
1546 tg3_writephy(tp, 0x1c, 0x8d68);
1547 tg3_writephy(tp, 0x1c, 0x8c68);
1548 }
1549
1550 /* Clear pending interrupts... */
1551 tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1552 tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1553
1554 if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
1555 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
1556 else
1557 tg3_writephy(tp, MII_TG3_IMASK, ~0);
1558
1559 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1560 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1561 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
1562 tg3_writephy(tp, MII_TG3_EXT_CTRL,
1563 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
1564 else
1565 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
1566 }
1567
1568 current_link_up = 0;
1569 current_speed = SPEED_INVALID;
1570 current_duplex = DUPLEX_INVALID;
1571
1572 if (tp->tg3_flags2 & TG3_FLG2_CAPACITIVE_COUPLING) {
1573 u32 val;
1574
1575 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
1576 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
1577 if (!(val & (1 << 10))) {
1578 val |= (1 << 10);
1579 tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
1580 goto relink;
1581 }
1582 }
1583
1584 bmsr = 0;
1585 for (i = 0; i < 100; i++) {
1586 tg3_readphy(tp, MII_BMSR, &bmsr);
1587 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1588 (bmsr & BMSR_LSTATUS))
1589 break;
1590 udelay(40);
1591 }
1592
1593 if (bmsr & BMSR_LSTATUS) {
1594 u32 aux_stat, bmcr;
1595
1596 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
1597 for (i = 0; i < 2000; i++) {
1598 udelay(10);
1599 if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
1600 aux_stat)
1601 break;
1602 }
1603
1604 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
1605 &current_speed,
1606 &current_duplex);
1607
1608 bmcr = 0;
1609 for (i = 0; i < 200; i++) {
1610 tg3_readphy(tp, MII_BMCR, &bmcr);
1611 if (tg3_readphy(tp, MII_BMCR, &bmcr))
1612 continue;
1613 if (bmcr && bmcr != 0x7fff)
1614 break;
1615 udelay(10);
1616 }
1617
1618 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
1619 if (bmcr & BMCR_ANENABLE) {
1620 current_link_up = 1;
1621
1622 /* Force autoneg restart if we are exiting
1623 * low power mode.
1624 */
1625 if (!tg3_copper_is_advertising_all(tp))
1626 current_link_up = 0;
1627 } else {
1628 current_link_up = 0;
1629 }
1630 } else {
1631 if (!(bmcr & BMCR_ANENABLE) &&
1632 tp->link_config.speed == current_speed &&
1633 tp->link_config.duplex == current_duplex) {
1634 current_link_up = 1;
1635 } else {
1636 current_link_up = 0;
1637 }
1638 }
1639
1640 tp->link_config.active_speed = current_speed;
1641 tp->link_config.active_duplex = current_duplex;
1642 }
1643
1644 if (current_link_up == 1 &&
1645 (tp->link_config.active_duplex == DUPLEX_FULL) &&
1646 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
1647 u32 local_adv, remote_adv;
1648
1649 if (tg3_readphy(tp, MII_ADVERTISE, &local_adv))
1650 local_adv = 0;
1651 local_adv &= (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
1652
1653 if (tg3_readphy(tp, MII_LPA, &remote_adv))
1654 remote_adv = 0;
1655
1656 remote_adv &= (LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
1657
1658 /* If we are not advertising full pause capability,
1659 * something is wrong. Bring the link down and reconfigure.
1660 */
1661 if (local_adv != ADVERTISE_PAUSE_CAP) {
1662 current_link_up = 0;
1663 } else {
1664 tg3_setup_flow_control(tp, local_adv, remote_adv);
1665 }
1666 }
1667relink:
1668 if (current_link_up == 0) {
1669 u32 tmp;
1670
1671 tg3_phy_copper_begin(tp);
1672
1673 tg3_readphy(tp, MII_BMSR, &tmp);
1674 if (!tg3_readphy(tp, MII_BMSR, &tmp) &&
1675 (tmp & BMSR_LSTATUS))
1676 current_link_up = 1;
1677 }
1678
1679 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
1680 if (current_link_up == 1) {
1681 if (tp->link_config.active_speed == SPEED_100 ||
1682 tp->link_config.active_speed == SPEED_10)
1683 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
1684 else
1685 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1686 } else
1687 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1688
1689 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
1690 if (tp->link_config.active_duplex == DUPLEX_HALF)
1691 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
1692
1693 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
1694 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
1695 if ((tp->led_ctrl == LED_CTRL_MODE_PHY_2) ||
1696 (current_link_up == 1 &&
1697 tp->link_config.active_speed == SPEED_10))
1698 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1699 } else {
1700 if (current_link_up == 1)
1701 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1702 }
1703
1704 /* ??? Without this setting Netgear GA302T PHY does not
1705 * ??? send/receive packets...
1706 */
1707 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411 &&
1708 tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
1709 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
1710 tw32_f(MAC_MI_MODE, tp->mi_mode);
1711 udelay(80);
1712 }
1713
1714 tw32_f(MAC_MODE, tp->mac_mode);
1715 udelay(40);
1716
1717 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
1718 /* Polled via timer. */
1719 tw32_f(MAC_EVENT, 0);
1720 } else {
1721 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
1722 }
1723 udelay(40);
1724
1725 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
1726 current_link_up == 1 &&
1727 tp->link_config.active_speed == SPEED_1000 &&
1728 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
1729 (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
1730 udelay(120);
1731 tw32_f(MAC_STATUS,
1732 (MAC_STATUS_SYNC_CHANGED |
1733 MAC_STATUS_CFG_CHANGED));
1734 udelay(40);
1735 tg3_write_mem(tp,
1736 NIC_SRAM_FIRMWARE_MBOX,
1737 NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
1738 }
1739
1740 if (current_link_up != netif_carrier_ok(tp->dev)) {
1741 if (current_link_up)
1742 netif_carrier_on(tp->dev);
1743 else
1744 netif_carrier_off(tp->dev);
1745 tg3_link_report(tp);
1746 }
1747
1748 return 0;
1749}
1750
1751struct tg3_fiber_aneginfo {
1752 int state;
1753#define ANEG_STATE_UNKNOWN 0
1754#define ANEG_STATE_AN_ENABLE 1
1755#define ANEG_STATE_RESTART_INIT 2
1756#define ANEG_STATE_RESTART 3
1757#define ANEG_STATE_DISABLE_LINK_OK 4
1758#define ANEG_STATE_ABILITY_DETECT_INIT 5
1759#define ANEG_STATE_ABILITY_DETECT 6
1760#define ANEG_STATE_ACK_DETECT_INIT 7
1761#define ANEG_STATE_ACK_DETECT 8
1762#define ANEG_STATE_COMPLETE_ACK_INIT 9
1763#define ANEG_STATE_COMPLETE_ACK 10
1764#define ANEG_STATE_IDLE_DETECT_INIT 11
1765#define ANEG_STATE_IDLE_DETECT 12
1766#define ANEG_STATE_LINK_OK 13
1767#define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
1768#define ANEG_STATE_NEXT_PAGE_WAIT 15
1769
1770 u32 flags;
1771#define MR_AN_ENABLE 0x00000001
1772#define MR_RESTART_AN 0x00000002
1773#define MR_AN_COMPLETE 0x00000004
1774#define MR_PAGE_RX 0x00000008
1775#define MR_NP_LOADED 0x00000010
1776#define MR_TOGGLE_TX 0x00000020
1777#define MR_LP_ADV_FULL_DUPLEX 0x00000040
1778#define MR_LP_ADV_HALF_DUPLEX 0x00000080
1779#define MR_LP_ADV_SYM_PAUSE 0x00000100
1780#define MR_LP_ADV_ASYM_PAUSE 0x00000200
1781#define MR_LP_ADV_REMOTE_FAULT1 0x00000400
1782#define MR_LP_ADV_REMOTE_FAULT2 0x00000800
1783#define MR_LP_ADV_NEXT_PAGE 0x00001000
1784#define MR_TOGGLE_RX 0x00002000
1785#define MR_NP_RX 0x00004000
1786
1787#define MR_LINK_OK 0x80000000
1788
1789 unsigned long link_time, cur_time;
1790
1791 u32 ability_match_cfg;
1792 int ability_match_count;
1793
1794 char ability_match, idle_match, ack_match;
1795
1796 u32 txconfig, rxconfig;
1797#define ANEG_CFG_NP 0x00000080
1798#define ANEG_CFG_ACK 0x00000040
1799#define ANEG_CFG_RF2 0x00000020
1800#define ANEG_CFG_RF1 0x00000010
1801#define ANEG_CFG_PS2 0x00000001
1802#define ANEG_CFG_PS1 0x00008000
1803#define ANEG_CFG_HD 0x00004000
1804#define ANEG_CFG_FD 0x00002000
1805#define ANEG_CFG_INVAL 0x00001f06
1806
1807};
1808#define ANEG_OK 0
1809#define ANEG_DONE 1
1810#define ANEG_TIMER_ENAB 2
1811#define ANEG_FAILED -1
1812
1813#define ANEG_STATE_SETTLE_TIME 10000
1814
1815static int tg3_fiber_aneg_smachine(struct tg3 *tp,
1816 struct tg3_fiber_aneginfo *ap)
1817{
1818 unsigned long delta;
1819 u32 rx_cfg_reg;
1820 int ret;
1821
1822 if (ap->state == ANEG_STATE_UNKNOWN) {
1823 ap->rxconfig = 0;
1824 ap->link_time = 0;
1825 ap->cur_time = 0;
1826 ap->ability_match_cfg = 0;
1827 ap->ability_match_count = 0;
1828 ap->ability_match = 0;
1829 ap->idle_match = 0;
1830 ap->ack_match = 0;
1831 }
1832 ap->cur_time++;
1833
1834 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
1835 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
1836
1837 if (rx_cfg_reg != ap->ability_match_cfg) {
1838 ap->ability_match_cfg = rx_cfg_reg;
1839 ap->ability_match = 0;
1840 ap->ability_match_count = 0;
1841 } else {
1842 if (++ap->ability_match_count > 1) {
1843 ap->ability_match = 1;
1844 ap->ability_match_cfg = rx_cfg_reg;
1845 }
1846 }
1847 if (rx_cfg_reg & ANEG_CFG_ACK)
1848 ap->ack_match = 1;
1849 else
1850 ap->ack_match = 0;
1851
1852 ap->idle_match = 0;
1853 } else {
1854 ap->idle_match = 1;
1855 ap->ability_match_cfg = 0;
1856 ap->ability_match_count = 0;
1857 ap->ability_match = 0;
1858 ap->ack_match = 0;
1859
1860 rx_cfg_reg = 0;
1861 }
1862
1863 ap->rxconfig = rx_cfg_reg;
1864 ret = ANEG_OK;
1865
1866 switch(ap->state) {
1867 case ANEG_STATE_UNKNOWN:
1868 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
1869 ap->state = ANEG_STATE_AN_ENABLE;
1870
1871 /* fallthru */
1872 case ANEG_STATE_AN_ENABLE:
1873 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
1874 if (ap->flags & MR_AN_ENABLE) {
1875 ap->link_time = 0;
1876 ap->cur_time = 0;
1877 ap->ability_match_cfg = 0;
1878 ap->ability_match_count = 0;
1879 ap->ability_match = 0;
1880 ap->idle_match = 0;
1881 ap->ack_match = 0;
1882
1883 ap->state = ANEG_STATE_RESTART_INIT;
1884 } else {
1885 ap->state = ANEG_STATE_DISABLE_LINK_OK;
1886 }
1887 break;
1888
1889 case ANEG_STATE_RESTART_INIT:
1890 ap->link_time = ap->cur_time;
1891 ap->flags &= ~(MR_NP_LOADED);
1892 ap->txconfig = 0;
1893 tw32(MAC_TX_AUTO_NEG, 0);
1894 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
1895 tw32_f(MAC_MODE, tp->mac_mode);
1896 udelay(40);
1897
1898 ret = ANEG_TIMER_ENAB;
1899 ap->state = ANEG_STATE_RESTART;
1900
1901 /* fallthru */
1902 case ANEG_STATE_RESTART:
1903 delta = ap->cur_time - ap->link_time;
1904 if (delta > ANEG_STATE_SETTLE_TIME) {
1905 ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
1906 } else {
1907 ret = ANEG_TIMER_ENAB;
1908 }
1909 break;
1910
1911 case ANEG_STATE_DISABLE_LINK_OK:
1912 ret = ANEG_DONE;
1913 break;
1914
1915 case ANEG_STATE_ABILITY_DETECT_INIT:
1916 ap->flags &= ~(MR_TOGGLE_TX);
1917 ap->txconfig = (ANEG_CFG_FD | ANEG_CFG_PS1);
1918 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
1919 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
1920 tw32_f(MAC_MODE, tp->mac_mode);
1921 udelay(40);
1922
1923 ap->state = ANEG_STATE_ABILITY_DETECT;
1924 break;
1925
1926 case ANEG_STATE_ABILITY_DETECT:
1927 if (ap->ability_match != 0 && ap->rxconfig != 0) {
1928 ap->state = ANEG_STATE_ACK_DETECT_INIT;
1929 }
1930 break;
1931
1932 case ANEG_STATE_ACK_DETECT_INIT:
1933 ap->txconfig |= ANEG_CFG_ACK;
1934 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
1935 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
1936 tw32_f(MAC_MODE, tp->mac_mode);
1937 udelay(40);
1938
1939 ap->state = ANEG_STATE_ACK_DETECT;
1940
1941 /* fallthru */
1942 case ANEG_STATE_ACK_DETECT:
1943 if (ap->ack_match != 0) {
1944 if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
1945 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
1946 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
1947 } else {
1948 ap->state = ANEG_STATE_AN_ENABLE;
1949 }
1950 } else if (ap->ability_match != 0 &&
1951 ap->rxconfig == 0) {
1952 ap->state = ANEG_STATE_AN_ENABLE;
1953 }
1954 break;
1955
1956 case ANEG_STATE_COMPLETE_ACK_INIT:
1957 if (ap->rxconfig & ANEG_CFG_INVAL) {
1958 ret = ANEG_FAILED;
1959 break;
1960 }
1961 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
1962 MR_LP_ADV_HALF_DUPLEX |
1963 MR_LP_ADV_SYM_PAUSE |
1964 MR_LP_ADV_ASYM_PAUSE |
1965 MR_LP_ADV_REMOTE_FAULT1 |
1966 MR_LP_ADV_REMOTE_FAULT2 |
1967 MR_LP_ADV_NEXT_PAGE |
1968 MR_TOGGLE_RX |
1969 MR_NP_RX);
1970 if (ap->rxconfig & ANEG_CFG_FD)
1971 ap->flags |= MR_LP_ADV_FULL_DUPLEX;
1972 if (ap->rxconfig & ANEG_CFG_HD)
1973 ap->flags |= MR_LP_ADV_HALF_DUPLEX;
1974 if (ap->rxconfig & ANEG_CFG_PS1)
1975 ap->flags |= MR_LP_ADV_SYM_PAUSE;
1976 if (ap->rxconfig & ANEG_CFG_PS2)
1977 ap->flags |= MR_LP_ADV_ASYM_PAUSE;
1978 if (ap->rxconfig & ANEG_CFG_RF1)
1979 ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
1980 if (ap->rxconfig & ANEG_CFG_RF2)
1981 ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
1982 if (ap->rxconfig & ANEG_CFG_NP)
1983 ap->flags |= MR_LP_ADV_NEXT_PAGE;
1984
1985 ap->link_time = ap->cur_time;
1986
1987 ap->flags ^= (MR_TOGGLE_TX);
1988 if (ap->rxconfig & 0x0008)
1989 ap->flags |= MR_TOGGLE_RX;
1990 if (ap->rxconfig & ANEG_CFG_NP)
1991 ap->flags |= MR_NP_RX;
1992 ap->flags |= MR_PAGE_RX;
1993
1994 ap->state = ANEG_STATE_COMPLETE_ACK;
1995 ret = ANEG_TIMER_ENAB;
1996 break;
1997
1998 case ANEG_STATE_COMPLETE_ACK:
1999 if (ap->ability_match != 0 &&
2000 ap->rxconfig == 0) {
2001 ap->state = ANEG_STATE_AN_ENABLE;
2002 break;
2003 }
2004 delta = ap->cur_time - ap->link_time;
2005 if (delta > ANEG_STATE_SETTLE_TIME) {
2006 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
2007 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2008 } else {
2009 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
2010 !(ap->flags & MR_NP_RX)) {
2011 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2012 } else {
2013 ret = ANEG_FAILED;
2014 }
2015 }
2016 }
2017 break;
2018
2019 case ANEG_STATE_IDLE_DETECT_INIT:
2020 ap->link_time = ap->cur_time;
2021 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2022 tw32_f(MAC_MODE, tp->mac_mode);
2023 udelay(40);
2024
2025 ap->state = ANEG_STATE_IDLE_DETECT;
2026 ret = ANEG_TIMER_ENAB;
2027 break;
2028
2029 case ANEG_STATE_IDLE_DETECT:
2030 if (ap->ability_match != 0 &&
2031 ap->rxconfig == 0) {
2032 ap->state = ANEG_STATE_AN_ENABLE;
2033 break;
2034 }
2035 delta = ap->cur_time - ap->link_time;
2036 if (delta > ANEG_STATE_SETTLE_TIME) {
2037 /* XXX another gem from the Broadcom driver :( */
2038 ap->state = ANEG_STATE_LINK_OK;
2039 }
2040 break;
2041
2042 case ANEG_STATE_LINK_OK:
2043 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
2044 ret = ANEG_DONE;
2045 break;
2046
2047 case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
2048 /* ??? unimplemented */
2049 break;
2050
2051 case ANEG_STATE_NEXT_PAGE_WAIT:
2052 /* ??? unimplemented */
2053 break;
2054
2055 default:
2056 ret = ANEG_FAILED;
2057 break;
2058 };
2059
2060 return ret;
2061}
2062
2063static int fiber_autoneg(struct tg3 *tp, u32 *flags)
2064{
2065 int res = 0;
2066 struct tg3_fiber_aneginfo aninfo;
2067 int status = ANEG_FAILED;
2068 unsigned int tick;
2069 u32 tmp;
2070
2071 tw32_f(MAC_TX_AUTO_NEG, 0);
2072
2073 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
2074 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
2075 udelay(40);
2076
2077 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
2078 udelay(40);
2079
2080 memset(&aninfo, 0, sizeof(aninfo));
2081 aninfo.flags |= MR_AN_ENABLE;
2082 aninfo.state = ANEG_STATE_UNKNOWN;
2083 aninfo.cur_time = 0;
2084 tick = 0;
2085 while (++tick < 195000) {
2086 status = tg3_fiber_aneg_smachine(tp, &aninfo);
2087 if (status == ANEG_DONE || status == ANEG_FAILED)
2088 break;
2089
2090 udelay(1);
2091 }
2092
2093 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2094 tw32_f(MAC_MODE, tp->mac_mode);
2095 udelay(40);
2096
2097 *flags = aninfo.flags;
2098
2099 if (status == ANEG_DONE &&
2100 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
2101 MR_LP_ADV_FULL_DUPLEX)))
2102 res = 1;
2103
2104 return res;
2105}
2106
2107static void tg3_init_bcm8002(struct tg3 *tp)
2108{
2109 u32 mac_status = tr32(MAC_STATUS);
2110 int i;
2111
2112 /* Reset when initting first time or we have a link. */
2113 if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
2114 !(mac_status & MAC_STATUS_PCS_SYNCED))
2115 return;
2116
2117 /* Set PLL lock range. */
2118 tg3_writephy(tp, 0x16, 0x8007);
2119
2120 /* SW reset */
2121 tg3_writephy(tp, MII_BMCR, BMCR_RESET);
2122
2123 /* Wait for reset to complete. */
2124 /* XXX schedule_timeout() ... */
2125 for (i = 0; i < 500; i++)
2126 udelay(10);
2127
2128 /* Config mode; select PMA/Ch 1 regs. */
2129 tg3_writephy(tp, 0x10, 0x8411);
2130
2131 /* Enable auto-lock and comdet, select txclk for tx. */
2132 tg3_writephy(tp, 0x11, 0x0a10);
2133
2134 tg3_writephy(tp, 0x18, 0x00a0);
2135 tg3_writephy(tp, 0x16, 0x41ff);
2136
2137 /* Assert and deassert POR. */
2138 tg3_writephy(tp, 0x13, 0x0400);
2139 udelay(40);
2140 tg3_writephy(tp, 0x13, 0x0000);
2141
2142 tg3_writephy(tp, 0x11, 0x0a50);
2143 udelay(40);
2144 tg3_writephy(tp, 0x11, 0x0a10);
2145
2146 /* Wait for signal to stabilize */
2147 /* XXX schedule_timeout() ... */
2148 for (i = 0; i < 15000; i++)
2149 udelay(10);
2150
2151 /* Deselect the channel register so we can read the PHYID
2152 * later.
2153 */
2154 tg3_writephy(tp, 0x10, 0x8011);
2155}
2156
2157static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
2158{
2159 u32 sg_dig_ctrl, sg_dig_status;
2160 u32 serdes_cfg, expected_sg_dig_ctrl;
2161 int workaround, port_a;
2162 int current_link_up;
2163
2164 serdes_cfg = 0;
2165 expected_sg_dig_ctrl = 0;
2166 workaround = 0;
2167 port_a = 1;
2168 current_link_up = 0;
2169
2170 if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
2171 tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
2172 workaround = 1;
2173 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
2174 port_a = 0;
2175
2176 /* preserve bits 0-11,13,14 for signal pre-emphasis */
2177 /* preserve bits 20-23 for voltage regulator */
2178 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
2179 }
2180
2181 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2182
2183 if (tp->link_config.autoneg != AUTONEG_ENABLE) {
2184 if (sg_dig_ctrl & (1 << 31)) {
2185 if (workaround) {
2186 u32 val = serdes_cfg;
2187
2188 if (port_a)
2189 val |= 0xc010000;
2190 else
2191 val |= 0x4010000;
2192 tw32_f(MAC_SERDES_CFG, val);
2193 }
2194 tw32_f(SG_DIG_CTRL, 0x01388400);
2195 }
2196 if (mac_status & MAC_STATUS_PCS_SYNCED) {
2197 tg3_setup_flow_control(tp, 0, 0);
2198 current_link_up = 1;
2199 }
2200 goto out;
2201 }
2202
2203 /* Want auto-negotiation. */
2204 expected_sg_dig_ctrl = 0x81388400;
2205
2206 /* Pause capability */
2207 expected_sg_dig_ctrl |= (1 << 11);
2208
2209 /* Asymettric pause */
2210 expected_sg_dig_ctrl |= (1 << 12);
2211
2212 if (sg_dig_ctrl != expected_sg_dig_ctrl) {
2213 if (workaround)
2214 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
2215 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | (1 << 30));
2216 udelay(5);
2217 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
2218
2219 tp->tg3_flags2 |= TG3_FLG2_PHY_JUST_INITTED;
2220 } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
2221 MAC_STATUS_SIGNAL_DET)) {
2222 int i;
2223
2224 /* Giver time to negotiate (~200ms) */
2225 for (i = 0; i < 40000; i++) {
2226 sg_dig_status = tr32(SG_DIG_STATUS);
2227 if (sg_dig_status & (0x3))
2228 break;
2229 udelay(5);
2230 }
2231 mac_status = tr32(MAC_STATUS);
2232
2233 if ((sg_dig_status & (1 << 1)) &&
2234 (mac_status & MAC_STATUS_PCS_SYNCED)) {
2235 u32 local_adv, remote_adv;
2236
2237 local_adv = ADVERTISE_PAUSE_CAP;
2238 remote_adv = 0;
2239 if (sg_dig_status & (1 << 19))
2240 remote_adv |= LPA_PAUSE_CAP;
2241 if (sg_dig_status & (1 << 20))
2242 remote_adv |= LPA_PAUSE_ASYM;
2243
2244 tg3_setup_flow_control(tp, local_adv, remote_adv);
2245 current_link_up = 1;
2246 tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2247 } else if (!(sg_dig_status & (1 << 1))) {
2248 if (tp->tg3_flags2 & TG3_FLG2_PHY_JUST_INITTED)
2249 tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2250 else {
2251 if (workaround) {
2252 u32 val = serdes_cfg;
2253
2254 if (port_a)
2255 val |= 0xc010000;
2256 else
2257 val |= 0x4010000;
2258
2259 tw32_f(MAC_SERDES_CFG, val);
2260 }
2261
2262 tw32_f(SG_DIG_CTRL, 0x01388400);
2263 udelay(40);
2264
2265 /* Link parallel detection - link is up */
2266 /* only if we have PCS_SYNC and not */
2267 /* receiving config code words */
2268 mac_status = tr32(MAC_STATUS);
2269 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
2270 !(mac_status & MAC_STATUS_RCVD_CFG)) {
2271 tg3_setup_flow_control(tp, 0, 0);
2272 current_link_up = 1;
2273 }
2274 }
2275 }
2276 }
2277
2278out:
2279 return current_link_up;
2280}
2281
2282static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
2283{
2284 int current_link_up = 0;
2285
2286 if (!(mac_status & MAC_STATUS_PCS_SYNCED)) {
2287 tp->tg3_flags &= ~TG3_FLAG_GOT_SERDES_FLOWCTL;
2288 goto out;
2289 }
2290
2291 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2292 u32 flags;
2293 int i;
2294
2295 if (fiber_autoneg(tp, &flags)) {
2296 u32 local_adv, remote_adv;
2297
2298 local_adv = ADVERTISE_PAUSE_CAP;
2299 remote_adv = 0;
2300 if (flags & MR_LP_ADV_SYM_PAUSE)
2301 remote_adv |= LPA_PAUSE_CAP;
2302 if (flags & MR_LP_ADV_ASYM_PAUSE)
2303 remote_adv |= LPA_PAUSE_ASYM;
2304
2305 tg3_setup_flow_control(tp, local_adv, remote_adv);
2306
2307 tp->tg3_flags |= TG3_FLAG_GOT_SERDES_FLOWCTL;
2308 current_link_up = 1;
2309 }
2310 for (i = 0; i < 30; i++) {
2311 udelay(20);
2312 tw32_f(MAC_STATUS,
2313 (MAC_STATUS_SYNC_CHANGED |
2314 MAC_STATUS_CFG_CHANGED));
2315 udelay(40);
2316 if ((tr32(MAC_STATUS) &
2317 (MAC_STATUS_SYNC_CHANGED |
2318 MAC_STATUS_CFG_CHANGED)) == 0)
2319 break;
2320 }
2321
2322 mac_status = tr32(MAC_STATUS);
2323 if (current_link_up == 0 &&
2324 (mac_status & MAC_STATUS_PCS_SYNCED) &&
2325 !(mac_status & MAC_STATUS_RCVD_CFG))
2326 current_link_up = 1;
2327 } else {
2328 /* Forcing 1000FD link up. */
2329 current_link_up = 1;
2330 tp->tg3_flags |= TG3_FLAG_GOT_SERDES_FLOWCTL;
2331
2332 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
2333 udelay(40);
2334 }
2335
2336out:
2337 return current_link_up;
2338}
2339
2340static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
2341{
2342 u32 orig_pause_cfg;
2343 u16 orig_active_speed;
2344 u8 orig_active_duplex;
2345 u32 mac_status;
2346 int current_link_up;
2347 int i;
2348
2349 orig_pause_cfg =
2350 (tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2351 TG3_FLAG_TX_PAUSE));
2352 orig_active_speed = tp->link_config.active_speed;
2353 orig_active_duplex = tp->link_config.active_duplex;
2354
2355 if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
2356 netif_carrier_ok(tp->dev) &&
2357 (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
2358 mac_status = tr32(MAC_STATUS);
2359 mac_status &= (MAC_STATUS_PCS_SYNCED |
2360 MAC_STATUS_SIGNAL_DET |
2361 MAC_STATUS_CFG_CHANGED |
2362 MAC_STATUS_RCVD_CFG);
2363 if (mac_status == (MAC_STATUS_PCS_SYNCED |
2364 MAC_STATUS_SIGNAL_DET)) {
2365 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2366 MAC_STATUS_CFG_CHANGED));
2367 return 0;
2368 }
2369 }
2370
2371 tw32_f(MAC_TX_AUTO_NEG, 0);
2372
2373 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
2374 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
2375 tw32_f(MAC_MODE, tp->mac_mode);
2376 udelay(40);
2377
2378 if (tp->phy_id == PHY_ID_BCM8002)
2379 tg3_init_bcm8002(tp);
2380
2381 /* Enable link change event even when serdes polling. */
2382 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2383 udelay(40);
2384
2385 current_link_up = 0;
2386 mac_status = tr32(MAC_STATUS);
2387
2388 if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
2389 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
2390 else
2391 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
2392
2393 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
2394 tw32_f(MAC_MODE, tp->mac_mode);
2395 udelay(40);
2396
2397 tp->hw_status->status =
2398 (SD_STATUS_UPDATED |
2399 (tp->hw_status->status & ~SD_STATUS_LINK_CHG));
2400
2401 for (i = 0; i < 100; i++) {
2402 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2403 MAC_STATUS_CFG_CHANGED));
2404 udelay(5);
2405 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
2406 MAC_STATUS_CFG_CHANGED)) == 0)
2407 break;
2408 }
2409
2410 mac_status = tr32(MAC_STATUS);
2411 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
2412 current_link_up = 0;
2413 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2414 tw32_f(MAC_MODE, (tp->mac_mode |
2415 MAC_MODE_SEND_CONFIGS));
2416 udelay(1);
2417 tw32_f(MAC_MODE, tp->mac_mode);
2418 }
2419 }
2420
2421 if (current_link_up == 1) {
2422 tp->link_config.active_speed = SPEED_1000;
2423 tp->link_config.active_duplex = DUPLEX_FULL;
2424 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2425 LED_CTRL_LNKLED_OVERRIDE |
2426 LED_CTRL_1000MBPS_ON));
2427 } else {
2428 tp->link_config.active_speed = SPEED_INVALID;
2429 tp->link_config.active_duplex = DUPLEX_INVALID;
2430 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2431 LED_CTRL_LNKLED_OVERRIDE |
2432 LED_CTRL_TRAFFIC_OVERRIDE));
2433 }
2434
2435 if (current_link_up != netif_carrier_ok(tp->dev)) {
2436 if (current_link_up)
2437 netif_carrier_on(tp->dev);
2438 else
2439 netif_carrier_off(tp->dev);
2440 tg3_link_report(tp);
2441 } else {
2442 u32 now_pause_cfg =
2443 tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2444 TG3_FLAG_TX_PAUSE);
2445 if (orig_pause_cfg != now_pause_cfg ||
2446 orig_active_speed != tp->link_config.active_speed ||
2447 orig_active_duplex != tp->link_config.active_duplex)
2448 tg3_link_report(tp);
2449 }
2450
2451 return 0;
2452}
2453
2454static int tg3_setup_phy(struct tg3 *tp, int force_reset)
2455{
2456 int err;
2457
2458 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
2459 err = tg3_setup_fiber_phy(tp, force_reset);
2460 } else {
2461 err = tg3_setup_copper_phy(tp, force_reset);
2462 }
2463
2464 if (tp->link_config.active_speed == SPEED_1000 &&
2465 tp->link_config.active_duplex == DUPLEX_HALF)
2466 tw32(MAC_TX_LENGTHS,
2467 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2468 (6 << TX_LENGTHS_IPG_SHIFT) |
2469 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2470 else
2471 tw32(MAC_TX_LENGTHS,
2472 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2473 (6 << TX_LENGTHS_IPG_SHIFT) |
2474 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2475
2476 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
2477 if (netif_carrier_ok(tp->dev)) {
2478 tw32(HOSTCC_STAT_COAL_TICKS,
2479 DEFAULT_STAT_COAL_TICKS);
2480 } else {
2481 tw32(HOSTCC_STAT_COAL_TICKS, 0);
2482 }
2483 }
2484
2485 return err;
2486}
2487
2488/* Tigon3 never reports partial packet sends. So we do not
2489 * need special logic to handle SKBs that have not had all
2490 * of their frags sent yet, like SunGEM does.
2491 */
2492static void tg3_tx(struct tg3 *tp)
2493{
2494 u32 hw_idx = tp->hw_status->idx[0].tx_consumer;
2495 u32 sw_idx = tp->tx_cons;
2496
2497 while (sw_idx != hw_idx) {
2498 struct tx_ring_info *ri = &tp->tx_buffers[sw_idx];
2499 struct sk_buff *skb = ri->skb;
2500 int i;
2501
2502 if (unlikely(skb == NULL))
2503 BUG();
2504
2505 pci_unmap_single(tp->pdev,
2506 pci_unmap_addr(ri, mapping),
2507 skb_headlen(skb),
2508 PCI_DMA_TODEVICE);
2509
2510 ri->skb = NULL;
2511
2512 sw_idx = NEXT_TX(sw_idx);
2513
2514 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2515 if (unlikely(sw_idx == hw_idx))
2516 BUG();
2517
2518 ri = &tp->tx_buffers[sw_idx];
2519 if (unlikely(ri->skb != NULL))
2520 BUG();
2521
2522 pci_unmap_page(tp->pdev,
2523 pci_unmap_addr(ri, mapping),
2524 skb_shinfo(skb)->frags[i].size,
2525 PCI_DMA_TODEVICE);
2526
2527 sw_idx = NEXT_TX(sw_idx);
2528 }
2529
2530 dev_kfree_skb_irq(skb);
2531 }
2532
2533 tp->tx_cons = sw_idx;
2534
2535 if (netif_queue_stopped(tp->dev) &&
2536 (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH))
2537 netif_wake_queue(tp->dev);
2538}
2539
2540/* Returns size of skb allocated or < 0 on error.
2541 *
2542 * We only need to fill in the address because the other members
2543 * of the RX descriptor are invariant, see tg3_init_rings.
2544 *
2545 * Note the purposeful assymetry of cpu vs. chip accesses. For
2546 * posting buffers we only dirty the first cache line of the RX
2547 * descriptor (containing the address). Whereas for the RX status
2548 * buffers the cpu only reads the last cacheline of the RX descriptor
2549 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
2550 */
2551static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key,
2552 int src_idx, u32 dest_idx_unmasked)
2553{
2554 struct tg3_rx_buffer_desc *desc;
2555 struct ring_info *map, *src_map;
2556 struct sk_buff *skb;
2557 dma_addr_t mapping;
2558 int skb_size, dest_idx;
2559
2560 src_map = NULL;
2561 switch (opaque_key) {
2562 case RXD_OPAQUE_RING_STD:
2563 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
2564 desc = &tp->rx_std[dest_idx];
2565 map = &tp->rx_std_buffers[dest_idx];
2566 if (src_idx >= 0)
2567 src_map = &tp->rx_std_buffers[src_idx];
2568 skb_size = RX_PKT_BUF_SZ;
2569 break;
2570
2571 case RXD_OPAQUE_RING_JUMBO:
2572 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
2573 desc = &tp->rx_jumbo[dest_idx];
2574 map = &tp->rx_jumbo_buffers[dest_idx];
2575 if (src_idx >= 0)
2576 src_map = &tp->rx_jumbo_buffers[src_idx];
2577 skb_size = RX_JUMBO_PKT_BUF_SZ;
2578 break;
2579
2580 default:
2581 return -EINVAL;
2582 };
2583
2584 /* Do not overwrite any of the map or rp information
2585 * until we are sure we can commit to a new buffer.
2586 *
2587 * Callers depend upon this behavior and assume that
2588 * we leave everything unchanged if we fail.
2589 */
2590 skb = dev_alloc_skb(skb_size);
2591 if (skb == NULL)
2592 return -ENOMEM;
2593
2594 skb->dev = tp->dev;
2595 skb_reserve(skb, tp->rx_offset);
2596
2597 mapping = pci_map_single(tp->pdev, skb->data,
2598 skb_size - tp->rx_offset,
2599 PCI_DMA_FROMDEVICE);
2600
2601 map->skb = skb;
2602 pci_unmap_addr_set(map, mapping, mapping);
2603
2604 if (src_map != NULL)
2605 src_map->skb = NULL;
2606
2607 desc->addr_hi = ((u64)mapping >> 32);
2608 desc->addr_lo = ((u64)mapping & 0xffffffff);
2609
2610 return skb_size;
2611}
2612
2613/* We only need to move over in the address because the other
2614 * members of the RX descriptor are invariant. See notes above
2615 * tg3_alloc_rx_skb for full details.
2616 */
2617static void tg3_recycle_rx(struct tg3 *tp, u32 opaque_key,
2618 int src_idx, u32 dest_idx_unmasked)
2619{
2620 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
2621 struct ring_info *src_map, *dest_map;
2622 int dest_idx;
2623
2624 switch (opaque_key) {
2625 case RXD_OPAQUE_RING_STD:
2626 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
2627 dest_desc = &tp->rx_std[dest_idx];
2628 dest_map = &tp->rx_std_buffers[dest_idx];
2629 src_desc = &tp->rx_std[src_idx];
2630 src_map = &tp->rx_std_buffers[src_idx];
2631 break;
2632
2633 case RXD_OPAQUE_RING_JUMBO:
2634 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
2635 dest_desc = &tp->rx_jumbo[dest_idx];
2636 dest_map = &tp->rx_jumbo_buffers[dest_idx];
2637 src_desc = &tp->rx_jumbo[src_idx];
2638 src_map = &tp->rx_jumbo_buffers[src_idx];
2639 break;
2640
2641 default:
2642 return;
2643 };
2644
2645 dest_map->skb = src_map->skb;
2646 pci_unmap_addr_set(dest_map, mapping,
2647 pci_unmap_addr(src_map, mapping));
2648 dest_desc->addr_hi = src_desc->addr_hi;
2649 dest_desc->addr_lo = src_desc->addr_lo;
2650
2651 src_map->skb = NULL;
2652}
2653
2654#if TG3_VLAN_TAG_USED
2655static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag)
2656{
2657 return vlan_hwaccel_receive_skb(skb, tp->vlgrp, vlan_tag);
2658}
2659#endif
2660
2661/* The RX ring scheme is composed of multiple rings which post fresh
2662 * buffers to the chip, and one special ring the chip uses to report
2663 * status back to the host.
2664 *
2665 * The special ring reports the status of received packets to the
2666 * host. The chip does not write into the original descriptor the
2667 * RX buffer was obtained from. The chip simply takes the original
2668 * descriptor as provided by the host, updates the status and length
2669 * field, then writes this into the next status ring entry.
2670 *
2671 * Each ring the host uses to post buffers to the chip is described
2672 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
2673 * it is first placed into the on-chip ram. When the packet's length
2674 * is known, it walks down the TG3_BDINFO entries to select the ring.
2675 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
2676 * which is within the range of the new packet's length is chosen.
2677 *
2678 * The "separate ring for rx status" scheme may sound queer, but it makes
2679 * sense from a cache coherency perspective. If only the host writes
2680 * to the buffer post rings, and only the chip writes to the rx status
2681 * rings, then cache lines never move beyond shared-modified state.
2682 * If both the host and chip were to write into the same ring, cache line
2683 * eviction could occur since both entities want it in an exclusive state.
2684 */
2685static int tg3_rx(struct tg3 *tp, int budget)
2686{
2687 u32 work_mask;
2688 u32 rx_rcb_ptr = tp->rx_rcb_ptr;
2689 u16 hw_idx, sw_idx;
2690 int received;
2691
2692 hw_idx = tp->hw_status->idx[0].rx_producer;
2693 /*
2694 * We need to order the read of hw_idx and the read of
2695 * the opaque cookie.
2696 */
2697 rmb();
2698 sw_idx = rx_rcb_ptr % TG3_RX_RCB_RING_SIZE(tp);
2699 work_mask = 0;
2700 received = 0;
2701 while (sw_idx != hw_idx && budget > 0) {
2702 struct tg3_rx_buffer_desc *desc = &tp->rx_rcb[sw_idx];
2703 unsigned int len;
2704 struct sk_buff *skb;
2705 dma_addr_t dma_addr;
2706 u32 opaque_key, desc_idx, *post_ptr;
2707
2708 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
2709 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
2710 if (opaque_key == RXD_OPAQUE_RING_STD) {
2711 dma_addr = pci_unmap_addr(&tp->rx_std_buffers[desc_idx],
2712 mapping);
2713 skb = tp->rx_std_buffers[desc_idx].skb;
2714 post_ptr = &tp->rx_std_ptr;
2715 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
2716 dma_addr = pci_unmap_addr(&tp->rx_jumbo_buffers[desc_idx],
2717 mapping);
2718 skb = tp->rx_jumbo_buffers[desc_idx].skb;
2719 post_ptr = &tp->rx_jumbo_ptr;
2720 }
2721 else {
2722 goto next_pkt_nopost;
2723 }
2724
2725 work_mask |= opaque_key;
2726
2727 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
2728 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
2729 drop_it:
2730 tg3_recycle_rx(tp, opaque_key,
2731 desc_idx, *post_ptr);
2732 drop_it_no_recycle:
2733 /* Other statistics kept track of by card. */
2734 tp->net_stats.rx_dropped++;
2735 goto next_pkt;
2736 }
2737
2738 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4; /* omit crc */
2739
2740 if (len > RX_COPY_THRESHOLD
2741 && tp->rx_offset == 2
2742 /* rx_offset != 2 iff this is a 5701 card running
2743 * in PCI-X mode [see tg3_get_invariants()] */
2744 ) {
2745 int skb_size;
2746
2747 skb_size = tg3_alloc_rx_skb(tp, opaque_key,
2748 desc_idx, *post_ptr);
2749 if (skb_size < 0)
2750 goto drop_it;
2751
2752 pci_unmap_single(tp->pdev, dma_addr,
2753 skb_size - tp->rx_offset,
2754 PCI_DMA_FROMDEVICE);
2755
2756 skb_put(skb, len);
2757 } else {
2758 struct sk_buff *copy_skb;
2759
2760 tg3_recycle_rx(tp, opaque_key,
2761 desc_idx, *post_ptr);
2762
2763 copy_skb = dev_alloc_skb(len + 2);
2764 if (copy_skb == NULL)
2765 goto drop_it_no_recycle;
2766
2767 copy_skb->dev = tp->dev;
2768 skb_reserve(copy_skb, 2);
2769 skb_put(copy_skb, len);
2770 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
2771 memcpy(copy_skb->data, skb->data, len);
2772 pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
2773
2774 /* We'll reuse the original ring buffer. */
2775 skb = copy_skb;
2776 }
2777
2778 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
2779 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
2780 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
2781 >> RXD_TCPCSUM_SHIFT) == 0xffff))
2782 skb->ip_summed = CHECKSUM_UNNECESSARY;
2783 else
2784 skb->ip_summed = CHECKSUM_NONE;
2785
2786 skb->protocol = eth_type_trans(skb, tp->dev);
2787#if TG3_VLAN_TAG_USED
2788 if (tp->vlgrp != NULL &&
2789 desc->type_flags & RXD_FLAG_VLAN) {
2790 tg3_vlan_rx(tp, skb,
2791 desc->err_vlan & RXD_VLAN_MASK);
2792 } else
2793#endif
2794 netif_receive_skb(skb);
2795
2796 tp->dev->last_rx = jiffies;
2797 received++;
2798 budget--;
2799
2800next_pkt:
2801 (*post_ptr)++;
2802next_pkt_nopost:
2803 rx_rcb_ptr++;
2804 sw_idx = rx_rcb_ptr % TG3_RX_RCB_RING_SIZE(tp);
2805 }
2806
2807 /* ACK the status ring. */
2808 tp->rx_rcb_ptr = rx_rcb_ptr;
2809 tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW,
2810 (rx_rcb_ptr % TG3_RX_RCB_RING_SIZE(tp)));
2811
2812 /* Refill RX ring(s). */
2813 if (work_mask & RXD_OPAQUE_RING_STD) {
2814 sw_idx = tp->rx_std_ptr % TG3_RX_RING_SIZE;
2815 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
2816 sw_idx);
2817 }
2818 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
2819 sw_idx = tp->rx_jumbo_ptr % TG3_RX_JUMBO_RING_SIZE;
2820 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
2821 sw_idx);
2822 }
2823 mmiowb();
2824
2825 return received;
2826}
2827
2828static int tg3_poll(struct net_device *netdev, int *budget)
2829{
2830 struct tg3 *tp = netdev_priv(netdev);
2831 struct tg3_hw_status *sblk = tp->hw_status;
2832 unsigned long flags;
2833 int done;
2834
2835 spin_lock_irqsave(&tp->lock, flags);
2836
2837 /* handle link change and other phy events */
2838 if (!(tp->tg3_flags &
2839 (TG3_FLAG_USE_LINKCHG_REG |
2840 TG3_FLAG_POLL_SERDES))) {
2841 if (sblk->status & SD_STATUS_LINK_CHG) {
2842 sblk->status = SD_STATUS_UPDATED |
2843 (sblk->status & ~SD_STATUS_LINK_CHG);
2844 tg3_setup_phy(tp, 0);
2845 }
2846 }
2847
2848 /* run TX completion thread */
2849 if (sblk->idx[0].tx_consumer != tp->tx_cons) {
2850 spin_lock(&tp->tx_lock);
2851 tg3_tx(tp);
2852 spin_unlock(&tp->tx_lock);
2853 }
2854
2855 spin_unlock_irqrestore(&tp->lock, flags);
2856
2857 /* run RX thread, within the bounds set by NAPI.
2858 * All RX "locking" is done by ensuring outside
2859 * code synchronizes with dev->poll()
2860 */
2861 done = 1;
2862 if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr) {
2863 int orig_budget = *budget;
2864 int work_done;
2865
2866 if (orig_budget > netdev->quota)
2867 orig_budget = netdev->quota;
2868
2869 work_done = tg3_rx(tp, orig_budget);
2870
2871 *budget -= work_done;
2872 netdev->quota -= work_done;
2873
2874 if (work_done >= orig_budget)
2875 done = 0;
2876 }
2877
2878 /* if no more work, tell net stack and NIC we're done */
2879 if (done) {
2880 spin_lock_irqsave(&tp->lock, flags);
2881 __netif_rx_complete(netdev);
2882 tg3_restart_ints(tp);
2883 spin_unlock_irqrestore(&tp->lock, flags);
2884 }
2885
2886 return (done ? 0 : 1);
2887}
2888
2889static inline unsigned int tg3_has_work(struct net_device *dev, struct tg3 *tp)
2890{
2891 struct tg3_hw_status *sblk = tp->hw_status;
2892 unsigned int work_exists = 0;
2893
2894 /* check for phy events */
2895 if (!(tp->tg3_flags &
2896 (TG3_FLAG_USE_LINKCHG_REG |
2897 TG3_FLAG_POLL_SERDES))) {
2898 if (sblk->status & SD_STATUS_LINK_CHG)
2899 work_exists = 1;
2900 }
2901 /* check for RX/TX work to do */
2902 if (sblk->idx[0].tx_consumer != tp->tx_cons ||
2903 sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
2904 work_exists = 1;
2905
2906 return work_exists;
2907}
2908
2909static irqreturn_t tg3_interrupt(int irq, void *dev_id, struct pt_regs *regs)
2910{
2911 struct net_device *dev = dev_id;
2912 struct tg3 *tp = netdev_priv(dev);
2913 struct tg3_hw_status *sblk = tp->hw_status;
2914 unsigned long flags;
2915 unsigned int handled = 1;
2916
2917 spin_lock_irqsave(&tp->lock, flags);
2918
2919 /* In INTx mode, it is possible for the interrupt to arrive at
2920 * the CPU before the status block posted prior to the interrupt.
2921 * Reading the PCI State register will confirm whether the
2922 * interrupt is ours and will flush the status block.
2923 */
2924 if ((sblk->status & SD_STATUS_UPDATED) ||
2925 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
2926 /*
2927 * writing any value to intr-mbox-0 clears PCI INTA# and
2928 * chip-internal interrupt pending events.
2929 * writing non-zero to intr-mbox-0 additional tells the
2930 * NIC to stop sending us irqs, engaging "in-intr-handler"
2931 * event coalescing.
2932 */
2933 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
2934 0x00000001);
2935 /*
2936 * Flush PCI write. This also guarantees that our
2937 * status block has been flushed to host memory.
2938 */
2939 tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
2940 sblk->status &= ~SD_STATUS_UPDATED;
2941
2942 if (likely(tg3_has_work(dev, tp)))
2943 netif_rx_schedule(dev); /* schedule NAPI poll */
2944 else {
2945 /* no work, shared interrupt perhaps? re-enable
2946 * interrupts, and flush that PCI write
2947 */
2948 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
2949 0x00000000);
2950 tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
2951 }
2952 } else { /* shared interrupt */
2953 handled = 0;
2954 }
2955
2956 spin_unlock_irqrestore(&tp->lock, flags);
2957
2958 return IRQ_RETVAL(handled);
2959}
2960
2961static int tg3_init_hw(struct tg3 *);
2962static int tg3_halt(struct tg3 *);
2963
2964#ifdef CONFIG_NET_POLL_CONTROLLER
2965static void tg3_poll_controller(struct net_device *dev)
2966{
2967 tg3_interrupt(dev->irq, dev, NULL);
2968}
2969#endif
2970
2971static void tg3_reset_task(void *_data)
2972{
2973 struct tg3 *tp = _data;
2974 unsigned int restart_timer;
2975
2976 tg3_netif_stop(tp);
2977
2978 spin_lock_irq(&tp->lock);
2979 spin_lock(&tp->tx_lock);
2980
2981 restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
2982 tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
2983
2984 tg3_halt(tp);
2985 tg3_init_hw(tp);
2986
2987 tg3_netif_start(tp);
2988
2989 spin_unlock(&tp->tx_lock);
2990 spin_unlock_irq(&tp->lock);
2991
2992 if (restart_timer)
2993 mod_timer(&tp->timer, jiffies + 1);
2994}
2995
2996static void tg3_tx_timeout(struct net_device *dev)
2997{
2998 struct tg3 *tp = netdev_priv(dev);
2999
3000 printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
3001 dev->name);
3002
3003 schedule_work(&tp->reset_task);
3004}
3005
3006static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32);
3007
3008static int tigon3_4gb_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
3009 u32 guilty_entry, int guilty_len,
3010 u32 last_plus_one, u32 *start, u32 mss)
3011{
3012 struct sk_buff *new_skb = skb_copy(skb, GFP_ATOMIC);
3013 dma_addr_t new_addr;
3014 u32 entry = *start;
3015 int i;
3016
3017 if (!new_skb) {
3018 dev_kfree_skb(skb);
3019 return -1;
3020 }
3021
3022 /* New SKB is guaranteed to be linear. */
3023 entry = *start;
3024 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
3025 PCI_DMA_TODEVICE);
3026 tg3_set_txd(tp, entry, new_addr, new_skb->len,
3027 (skb->ip_summed == CHECKSUM_HW) ?
3028 TXD_FLAG_TCPUDP_CSUM : 0, 1 | (mss << 1));
3029 *start = NEXT_TX(entry);
3030
3031 /* Now clean up the sw ring entries. */
3032 i = 0;
3033 while (entry != last_plus_one) {
3034 int len;
3035
3036 if (i == 0)
3037 len = skb_headlen(skb);
3038 else
3039 len = skb_shinfo(skb)->frags[i-1].size;
3040 pci_unmap_single(tp->pdev,
3041 pci_unmap_addr(&tp->tx_buffers[entry], mapping),
3042 len, PCI_DMA_TODEVICE);
3043 if (i == 0) {
3044 tp->tx_buffers[entry].skb = new_skb;
3045 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, new_addr);
3046 } else {
3047 tp->tx_buffers[entry].skb = NULL;
3048 }
3049 entry = NEXT_TX(entry);
3050 i++;
3051 }
3052
3053 dev_kfree_skb(skb);
3054
3055 return 0;
3056}
3057
3058static void tg3_set_txd(struct tg3 *tp, int entry,
3059 dma_addr_t mapping, int len, u32 flags,
3060 u32 mss_and_is_end)
3061{
3062 struct tg3_tx_buffer_desc *txd = &tp->tx_ring[entry];
3063 int is_end = (mss_and_is_end & 0x1);
3064 u32 mss = (mss_and_is_end >> 1);
3065 u32 vlan_tag = 0;
3066
3067 if (is_end)
3068 flags |= TXD_FLAG_END;
3069 if (flags & TXD_FLAG_VLAN) {
3070 vlan_tag = flags >> 16;
3071 flags &= 0xffff;
3072 }
3073 vlan_tag |= (mss << TXD_MSS_SHIFT);
3074
3075 txd->addr_hi = ((u64) mapping >> 32);
3076 txd->addr_lo = ((u64) mapping & 0xffffffff);
3077 txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
3078 txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
3079}
3080
3081static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
3082{
3083 u32 base = (u32) mapping & 0xffffffff;
3084
3085 return ((base > 0xffffdcc0) &&
3086 (base + len + 8 < base));
3087}
3088
3089static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
3090{
3091 struct tg3 *tp = netdev_priv(dev);
3092 dma_addr_t mapping;
3093 unsigned int i;
3094 u32 len, entry, base_flags, mss;
3095 int would_hit_hwbug;
3096 unsigned long flags;
3097
3098 len = skb_headlen(skb);
3099
3100 /* No BH disabling for tx_lock here. We are running in BH disabled
3101 * context and TX reclaim runs via tp->poll inside of a software
3102 * interrupt. Rejoice!
3103 *
3104 * Actually, things are not so simple. If we are to take a hw
3105 * IRQ here, we can deadlock, consider:
3106 *
3107 * CPU1 CPU2
3108 * tg3_start_xmit
3109 * take tp->tx_lock
3110 * tg3_timer
3111 * take tp->lock
3112 * tg3_interrupt
3113 * spin on tp->lock
3114 * spin on tp->tx_lock
3115 *
3116 * So we really do need to disable interrupts when taking
3117 * tx_lock here.
3118 */
3119 local_irq_save(flags);
3120 if (!spin_trylock(&tp->tx_lock)) {
3121 local_irq_restore(flags);
3122 return NETDEV_TX_LOCKED;
3123 }
3124
3125 /* This is a hard error, log it. */
3126 if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
3127 netif_stop_queue(dev);
3128 spin_unlock_irqrestore(&tp->tx_lock, flags);
3129 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when queue awake!\n",
3130 dev->name);
3131 return NETDEV_TX_BUSY;
3132 }
3133
3134 entry = tp->tx_prod;
3135 base_flags = 0;
3136 if (skb->ip_summed == CHECKSUM_HW)
3137 base_flags |= TXD_FLAG_TCPUDP_CSUM;
3138#if TG3_TSO_SUPPORT != 0
3139 mss = 0;
3140 if (skb->len > (tp->dev->mtu + ETH_HLEN) &&
3141 (mss = skb_shinfo(skb)->tso_size) != 0) {
3142 int tcp_opt_len, ip_tcp_len;
3143
3144 if (skb_header_cloned(skb) &&
3145 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
3146 dev_kfree_skb(skb);
3147 goto out_unlock;
3148 }
3149
3150 tcp_opt_len = ((skb->h.th->doff - 5) * 4);
3151 ip_tcp_len = (skb->nh.iph->ihl * 4) + sizeof(struct tcphdr);
3152
3153 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
3154 TXD_FLAG_CPU_POST_DMA);
3155
3156 skb->nh.iph->check = 0;
3157 skb->nh.iph->tot_len = ntohs(mss + ip_tcp_len + tcp_opt_len);
3158 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
3159 skb->h.th->check = 0;
3160 base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
3161 }
3162 else {
3163 skb->h.th->check =
3164 ~csum_tcpudp_magic(skb->nh.iph->saddr,
3165 skb->nh.iph->daddr,
3166 0, IPPROTO_TCP, 0);
3167 }
3168
3169 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
3170 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) {
3171 if (tcp_opt_len || skb->nh.iph->ihl > 5) {
3172 int tsflags;
3173
3174 tsflags = ((skb->nh.iph->ihl - 5) +
3175 (tcp_opt_len >> 2));
3176 mss |= (tsflags << 11);
3177 }
3178 } else {
3179 if (tcp_opt_len || skb->nh.iph->ihl > 5) {
3180 int tsflags;
3181
3182 tsflags = ((skb->nh.iph->ihl - 5) +
3183 (tcp_opt_len >> 2));
3184 base_flags |= tsflags << 12;
3185 }
3186 }
3187 }
3188#else
3189 mss = 0;
3190#endif
3191#if TG3_VLAN_TAG_USED
3192 if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
3193 base_flags |= (TXD_FLAG_VLAN |
3194 (vlan_tx_tag_get(skb) << 16));
3195#endif
3196
3197 /* Queue skb data, a.k.a. the main skb fragment. */
3198 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
3199
3200 tp->tx_buffers[entry].skb = skb;
3201 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3202
3203 would_hit_hwbug = 0;
3204
3205 if (tg3_4g_overflow_test(mapping, len))
3206 would_hit_hwbug = entry + 1;
3207
3208 tg3_set_txd(tp, entry, mapping, len, base_flags,
3209 (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
3210
3211 entry = NEXT_TX(entry);
3212
3213 /* Now loop through additional data fragments, and queue them. */
3214 if (skb_shinfo(skb)->nr_frags > 0) {
3215 unsigned int i, last;
3216
3217 last = skb_shinfo(skb)->nr_frags - 1;
3218 for (i = 0; i <= last; i++) {
3219 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3220
3221 len = frag->size;
3222 mapping = pci_map_page(tp->pdev,
3223 frag->page,
3224 frag->page_offset,
3225 len, PCI_DMA_TODEVICE);
3226
3227 tp->tx_buffers[entry].skb = NULL;
3228 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3229
3230 if (tg3_4g_overflow_test(mapping, len)) {
3231 /* Only one should match. */
3232 if (would_hit_hwbug)
3233 BUG();
3234 would_hit_hwbug = entry + 1;
3235 }
3236
3237 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
3238 tg3_set_txd(tp, entry, mapping, len,
3239 base_flags, (i == last)|(mss << 1));
3240 else
3241 tg3_set_txd(tp, entry, mapping, len,
3242 base_flags, (i == last));
3243
3244 entry = NEXT_TX(entry);
3245 }
3246 }
3247
3248 if (would_hit_hwbug) {
3249 u32 last_plus_one = entry;
3250 u32 start;
3251 unsigned int len = 0;
3252
3253 would_hit_hwbug -= 1;
3254 entry = entry - 1 - skb_shinfo(skb)->nr_frags;
3255 entry &= (TG3_TX_RING_SIZE - 1);
3256 start = entry;
3257 i = 0;
3258 while (entry != last_plus_one) {
3259 if (i == 0)
3260 len = skb_headlen(skb);
3261 else
3262 len = skb_shinfo(skb)->frags[i-1].size;
3263
3264 if (entry == would_hit_hwbug)
3265 break;
3266
3267 i++;
3268 entry = NEXT_TX(entry);
3269
3270 }
3271
3272 /* If the workaround fails due to memory/mapping
3273 * failure, silently drop this packet.
3274 */
3275 if (tigon3_4gb_hwbug_workaround(tp, skb,
3276 entry, len,
3277 last_plus_one,
3278 &start, mss))
3279 goto out_unlock;
3280
3281 entry = start;
3282 }
3283
3284 /* Packets are ready, update Tx producer idx local and on card. */
3285 tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
3286
3287 tp->tx_prod = entry;
3288 if (TX_BUFFS_AVAIL(tp) <= (MAX_SKB_FRAGS + 1))
3289 netif_stop_queue(dev);
3290
3291out_unlock:
3292 mmiowb();
3293 spin_unlock_irqrestore(&tp->tx_lock, flags);
3294
3295 dev->trans_start = jiffies;
3296
3297 return NETDEV_TX_OK;
3298}
3299
3300static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
3301 int new_mtu)
3302{
3303 dev->mtu = new_mtu;
3304
3305 if (new_mtu > ETH_DATA_LEN)
3306 tp->tg3_flags |= TG3_FLAG_JUMBO_ENABLE;
3307 else
3308 tp->tg3_flags &= ~TG3_FLAG_JUMBO_ENABLE;
3309}
3310
3311static int tg3_change_mtu(struct net_device *dev, int new_mtu)
3312{
3313 struct tg3 *tp = netdev_priv(dev);
3314
3315 if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
3316 return -EINVAL;
3317
3318 if (!netif_running(dev)) {
3319 /* We'll just catch it later when the
3320 * device is up'd.
3321 */
3322 tg3_set_mtu(dev, tp, new_mtu);
3323 return 0;
3324 }
3325
3326 tg3_netif_stop(tp);
3327 spin_lock_irq(&tp->lock);
3328 spin_lock(&tp->tx_lock);
3329
3330 tg3_halt(tp);
3331
3332 tg3_set_mtu(dev, tp, new_mtu);
3333
3334 tg3_init_hw(tp);
3335
3336 tg3_netif_start(tp);
3337
3338 spin_unlock(&tp->tx_lock);
3339 spin_unlock_irq(&tp->lock);
3340
3341 return 0;
3342}
3343
3344/* Free up pending packets in all rx/tx rings.
3345 *
3346 * The chip has been shut down and the driver detached from
3347 * the networking, so no interrupts or new tx packets will
3348 * end up in the driver. tp->{tx,}lock is not held and we are not
3349 * in an interrupt context and thus may sleep.
3350 */
3351static void tg3_free_rings(struct tg3 *tp)
3352{
3353 struct ring_info *rxp;
3354 int i;
3355
3356 for (i = 0; i < TG3_RX_RING_SIZE; i++) {
3357 rxp = &tp->rx_std_buffers[i];
3358
3359 if (rxp->skb == NULL)
3360 continue;
3361 pci_unmap_single(tp->pdev,
3362 pci_unmap_addr(rxp, mapping),
3363 RX_PKT_BUF_SZ - tp->rx_offset,
3364 PCI_DMA_FROMDEVICE);
3365 dev_kfree_skb_any(rxp->skb);
3366 rxp->skb = NULL;
3367 }
3368
3369 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
3370 rxp = &tp->rx_jumbo_buffers[i];
3371
3372 if (rxp->skb == NULL)
3373 continue;
3374 pci_unmap_single(tp->pdev,
3375 pci_unmap_addr(rxp, mapping),
3376 RX_JUMBO_PKT_BUF_SZ - tp->rx_offset,
3377 PCI_DMA_FROMDEVICE);
3378 dev_kfree_skb_any(rxp->skb);
3379 rxp->skb = NULL;
3380 }
3381
3382 for (i = 0; i < TG3_TX_RING_SIZE; ) {
3383 struct tx_ring_info *txp;
3384 struct sk_buff *skb;
3385 int j;
3386
3387 txp = &tp->tx_buffers[i];
3388 skb = txp->skb;
3389
3390 if (skb == NULL) {
3391 i++;
3392 continue;
3393 }
3394
3395 pci_unmap_single(tp->pdev,
3396 pci_unmap_addr(txp, mapping),
3397 skb_headlen(skb),
3398 PCI_DMA_TODEVICE);
3399 txp->skb = NULL;
3400
3401 i++;
3402
3403 for (j = 0; j < skb_shinfo(skb)->nr_frags; j++) {
3404 txp = &tp->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
3405 pci_unmap_page(tp->pdev,
3406 pci_unmap_addr(txp, mapping),
3407 skb_shinfo(skb)->frags[j].size,
3408 PCI_DMA_TODEVICE);
3409 i++;
3410 }
3411
3412 dev_kfree_skb_any(skb);
3413 }
3414}
3415
3416/* Initialize tx/rx rings for packet processing.
3417 *
3418 * The chip has been shut down and the driver detached from
3419 * the networking, so no interrupts or new tx packets will
3420 * end up in the driver. tp->{tx,}lock are held and thus
3421 * we may not sleep.
3422 */
3423static void tg3_init_rings(struct tg3 *tp)
3424{
3425 u32 i;
3426
3427 /* Free up all the SKBs. */
3428 tg3_free_rings(tp);
3429
3430 /* Zero out all descriptors. */
3431 memset(tp->rx_std, 0, TG3_RX_RING_BYTES);
3432 memset(tp->rx_jumbo, 0, TG3_RX_JUMBO_RING_BYTES);
3433 memset(tp->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
3434 memset(tp->tx_ring, 0, TG3_TX_RING_BYTES);
3435
3436 /* Initialize invariants of the rings, we only set this
3437 * stuff once. This works because the card does not
3438 * write into the rx buffer posting rings.
3439 */
3440 for (i = 0; i < TG3_RX_RING_SIZE; i++) {
3441 struct tg3_rx_buffer_desc *rxd;
3442
3443 rxd = &tp->rx_std[i];
3444 rxd->idx_len = (RX_PKT_BUF_SZ - tp->rx_offset - 64)
3445 << RXD_LEN_SHIFT;
3446 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
3447 rxd->opaque = (RXD_OPAQUE_RING_STD |
3448 (i << RXD_OPAQUE_INDEX_SHIFT));
3449 }
3450
3451 if (tp->tg3_flags & TG3_FLAG_JUMBO_ENABLE) {
3452 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
3453 struct tg3_rx_buffer_desc *rxd;
3454
3455 rxd = &tp->rx_jumbo[i];
3456 rxd->idx_len = (RX_JUMBO_PKT_BUF_SZ - tp->rx_offset - 64)
3457 << RXD_LEN_SHIFT;
3458 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
3459 RXD_FLAG_JUMBO;
3460 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
3461 (i << RXD_OPAQUE_INDEX_SHIFT));
3462 }
3463 }
3464
3465 /* Now allocate fresh SKBs for each rx ring. */
3466 for (i = 0; i < tp->rx_pending; i++) {
3467 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD,
3468 -1, i) < 0)
3469 break;
3470 }
3471
3472 if (tp->tg3_flags & TG3_FLAG_JUMBO_ENABLE) {
3473 for (i = 0; i < tp->rx_jumbo_pending; i++) {
3474 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO,
3475 -1, i) < 0)
3476 break;
3477 }
3478 }
3479}
3480
3481/*
3482 * Must not be invoked with interrupt sources disabled and
3483 * the hardware shutdown down.
3484 */
3485static void tg3_free_consistent(struct tg3 *tp)
3486{
3487 if (tp->rx_std_buffers) {
3488 kfree(tp->rx_std_buffers);
3489 tp->rx_std_buffers = NULL;
3490 }
3491 if (tp->rx_std) {
3492 pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES,
3493 tp->rx_std, tp->rx_std_mapping);
3494 tp->rx_std = NULL;
3495 }
3496 if (tp->rx_jumbo) {
3497 pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
3498 tp->rx_jumbo, tp->rx_jumbo_mapping);
3499 tp->rx_jumbo = NULL;
3500 }
3501 if (tp->rx_rcb) {
3502 pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
3503 tp->rx_rcb, tp->rx_rcb_mapping);
3504 tp->rx_rcb = NULL;
3505 }
3506 if (tp->tx_ring) {
3507 pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
3508 tp->tx_ring, tp->tx_desc_mapping);
3509 tp->tx_ring = NULL;
3510 }
3511 if (tp->hw_status) {
3512 pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
3513 tp->hw_status, tp->status_mapping);
3514 tp->hw_status = NULL;
3515 }
3516 if (tp->hw_stats) {
3517 pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats),
3518 tp->hw_stats, tp->stats_mapping);
3519 tp->hw_stats = NULL;
3520 }
3521}
3522
3523/*
3524 * Must not be invoked with interrupt sources disabled and
3525 * the hardware shutdown down. Can sleep.
3526 */
3527static int tg3_alloc_consistent(struct tg3 *tp)
3528{
3529 tp->rx_std_buffers = kmalloc((sizeof(struct ring_info) *
3530 (TG3_RX_RING_SIZE +
3531 TG3_RX_JUMBO_RING_SIZE)) +
3532 (sizeof(struct tx_ring_info) *
3533 TG3_TX_RING_SIZE),
3534 GFP_KERNEL);
3535 if (!tp->rx_std_buffers)
3536 return -ENOMEM;
3537
3538 memset(tp->rx_std_buffers, 0,
3539 (sizeof(struct ring_info) *
3540 (TG3_RX_RING_SIZE +
3541 TG3_RX_JUMBO_RING_SIZE)) +
3542 (sizeof(struct tx_ring_info) *
3543 TG3_TX_RING_SIZE));
3544
3545 tp->rx_jumbo_buffers = &tp->rx_std_buffers[TG3_RX_RING_SIZE];
3546 tp->tx_buffers = (struct tx_ring_info *)
3547 &tp->rx_jumbo_buffers[TG3_RX_JUMBO_RING_SIZE];
3548
3549 tp->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES,
3550 &tp->rx_std_mapping);
3551 if (!tp->rx_std)
3552 goto err_out;
3553
3554 tp->rx_jumbo = pci_alloc_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
3555 &tp->rx_jumbo_mapping);
3556
3557 if (!tp->rx_jumbo)
3558 goto err_out;
3559
3560 tp->rx_rcb = pci_alloc_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
3561 &tp->rx_rcb_mapping);
3562 if (!tp->rx_rcb)
3563 goto err_out;
3564
3565 tp->tx_ring = pci_alloc_consistent(tp->pdev, TG3_TX_RING_BYTES,
3566 &tp->tx_desc_mapping);
3567 if (!tp->tx_ring)
3568 goto err_out;
3569
3570 tp->hw_status = pci_alloc_consistent(tp->pdev,
3571 TG3_HW_STATUS_SIZE,
3572 &tp->status_mapping);
3573 if (!tp->hw_status)
3574 goto err_out;
3575
3576 tp->hw_stats = pci_alloc_consistent(tp->pdev,
3577 sizeof(struct tg3_hw_stats),
3578 &tp->stats_mapping);
3579 if (!tp->hw_stats)
3580 goto err_out;
3581
3582 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
3583 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
3584
3585 return 0;
3586
3587err_out:
3588 tg3_free_consistent(tp);
3589 return -ENOMEM;
3590}
3591
3592#define MAX_WAIT_CNT 1000
3593
3594/* To stop a block, clear the enable bit and poll till it
3595 * clears. tp->lock is held.
3596 */
3597static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit)
3598{
3599 unsigned int i;
3600 u32 val;
3601
3602 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
3603 switch (ofs) {
3604 case RCVLSC_MODE:
3605 case DMAC_MODE:
3606 case MBFREE_MODE:
3607 case BUFMGR_MODE:
3608 case MEMARB_MODE:
3609 /* We can't enable/disable these bits of the
3610 * 5705/5750, just say success.
3611 */
3612 return 0;
3613
3614 default:
3615 break;
3616 };
3617 }
3618
3619 val = tr32(ofs);
3620 val &= ~enable_bit;
3621 tw32_f(ofs, val);
3622
3623 for (i = 0; i < MAX_WAIT_CNT; i++) {
3624 udelay(100);
3625 val = tr32(ofs);
3626 if ((val & enable_bit) == 0)
3627 break;
3628 }
3629
3630 if (i == MAX_WAIT_CNT) {
3631 printk(KERN_ERR PFX "tg3_stop_block timed out, "
3632 "ofs=%lx enable_bit=%x\n",
3633 ofs, enable_bit);
3634 return -ENODEV;
3635 }
3636
3637 return 0;
3638}
3639
3640/* tp->lock is held. */
3641static int tg3_abort_hw(struct tg3 *tp)
3642{
3643 int i, err;
3644
3645 tg3_disable_ints(tp);
3646
3647 tp->rx_mode &= ~RX_MODE_ENABLE;
3648 tw32_f(MAC_RX_MODE, tp->rx_mode);
3649 udelay(10);
3650
3651 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE);
3652 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE);
3653 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE);
3654 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE);
3655 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE);
3656 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE);
3657
3658 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE);
3659 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE);
3660 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
3661 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE);
3662 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
3663 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE);
3664 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE);
3665 if (err)
3666 goto out;
3667
3668 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
3669 tw32_f(MAC_MODE, tp->mac_mode);
3670 udelay(40);
3671
3672 tp->tx_mode &= ~TX_MODE_ENABLE;
3673 tw32_f(MAC_TX_MODE, tp->tx_mode);
3674
3675 for (i = 0; i < MAX_WAIT_CNT; i++) {
3676 udelay(100);
3677 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
3678 break;
3679 }
3680 if (i >= MAX_WAIT_CNT) {
3681 printk(KERN_ERR PFX "tg3_abort_hw timed out for %s, "
3682 "TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n",
3683 tp->dev->name, tr32(MAC_TX_MODE));
3684 return -ENODEV;
3685 }
3686
3687 err = tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE);
3688 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE);
3689 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE);
3690
3691 tw32(FTQ_RESET, 0xffffffff);
3692 tw32(FTQ_RESET, 0x00000000);
3693
3694 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE);
3695 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE);
3696 if (err)
3697 goto out;
3698
3699 if (tp->hw_status)
3700 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
3701 if (tp->hw_stats)
3702 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
3703
3704out:
3705 return err;
3706}
3707
3708/* tp->lock is held. */
3709static int tg3_nvram_lock(struct tg3 *tp)
3710{
3711 if (tp->tg3_flags & TG3_FLAG_NVRAM) {
3712 int i;
3713
3714 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
3715 for (i = 0; i < 8000; i++) {
3716 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
3717 break;
3718 udelay(20);
3719 }
3720 if (i == 8000)
3721 return -ENODEV;
3722 }
3723 return 0;
3724}
3725
3726/* tp->lock is held. */
3727static void tg3_nvram_unlock(struct tg3 *tp)
3728{
3729 if (tp->tg3_flags & TG3_FLAG_NVRAM)
3730 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
3731}
3732
3733/* tp->lock is held. */
3734static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
3735{
3736 if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X))
3737 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
3738 NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
3739
3740 if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
3741 switch (kind) {
3742 case RESET_KIND_INIT:
3743 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3744 DRV_STATE_START);
3745 break;
3746
3747 case RESET_KIND_SHUTDOWN:
3748 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3749 DRV_STATE_UNLOAD);
3750 break;
3751
3752 case RESET_KIND_SUSPEND:
3753 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3754 DRV_STATE_SUSPEND);
3755 break;
3756
3757 default:
3758 break;
3759 };
3760 }
3761}
3762
3763/* tp->lock is held. */
3764static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
3765{
3766 if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
3767 switch (kind) {
3768 case RESET_KIND_INIT:
3769 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3770 DRV_STATE_START_DONE);
3771 break;
3772
3773 case RESET_KIND_SHUTDOWN:
3774 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3775 DRV_STATE_UNLOAD_DONE);
3776 break;
3777
3778 default:
3779 break;
3780 };
3781 }
3782}
3783
3784/* tp->lock is held. */
3785static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
3786{
3787 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
3788 switch (kind) {
3789 case RESET_KIND_INIT:
3790 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3791 DRV_STATE_START);
3792 break;
3793
3794 case RESET_KIND_SHUTDOWN:
3795 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3796 DRV_STATE_UNLOAD);
3797 break;
3798
3799 case RESET_KIND_SUSPEND:
3800 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3801 DRV_STATE_SUSPEND);
3802 break;
3803
3804 default:
3805 break;
3806 };
3807 }
3808}
3809
3810static void tg3_stop_fw(struct tg3 *);
3811
3812/* tp->lock is held. */
3813static int tg3_chip_reset(struct tg3 *tp)
3814{
3815 u32 val;
3816 u32 flags_save;
3817 int i;
3818
3819 if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X))
3820 tg3_nvram_lock(tp);
3821
3822 /*
3823 * We must avoid the readl() that normally takes place.
3824 * It locks machines, causes machine checks, and other
3825 * fun things. So, temporarily disable the 5701
3826 * hardware workaround, while we do the reset.
3827 */
3828 flags_save = tp->tg3_flags;
3829 tp->tg3_flags &= ~TG3_FLAG_5701_REG_WRITE_BUG;
3830
3831 /* do the reset */
3832 val = GRC_MISC_CFG_CORECLK_RESET;
3833
3834 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
3835 if (tr32(0x7e2c) == 0x60) {
3836 tw32(0x7e2c, 0x20);
3837 }
3838 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
3839 tw32(GRC_MISC_CFG, (1 << 29));
3840 val |= (1 << 29);
3841 }
3842 }
3843
3844 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
3845 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
3846 tw32(GRC_MISC_CFG, val);
3847
3848 /* restore 5701 hardware bug workaround flag */
3849 tp->tg3_flags = flags_save;
3850
3851 /* Unfortunately, we have to delay before the PCI read back.
3852 * Some 575X chips even will not respond to a PCI cfg access
3853 * when the reset command is given to the chip.
3854 *
3855 * How do these hardware designers expect things to work
3856 * properly if the PCI write is posted for a long period
3857 * of time? It is always necessary to have some method by
3858 * which a register read back can occur to push the write
3859 * out which does the reset.
3860 *
3861 * For most tg3 variants the trick below was working.
3862 * Ho hum...
3863 */
3864 udelay(120);
3865
3866 /* Flush PCI posted writes. The normal MMIO registers
3867 * are inaccessible at this time so this is the only
3868 * way to make this reliably (actually, this is no longer
3869 * the case, see above). I tried to use indirect
3870 * register read/write but this upset some 5701 variants.
3871 */
3872 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
3873
3874 udelay(120);
3875
3876 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
3877 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
3878 int i;
3879 u32 cfg_val;
3880
3881 /* Wait for link training to complete. */
3882 for (i = 0; i < 5000; i++)
3883 udelay(100);
3884
3885 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
3886 pci_write_config_dword(tp->pdev, 0xc4,
3887 cfg_val | (1 << 15));
3888 }
3889 /* Set PCIE max payload size and clear error status. */
3890 pci_write_config_dword(tp->pdev, 0xd8, 0xf5000);
3891 }
3892
3893 /* Re-enable indirect register accesses. */
3894 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
3895 tp->misc_host_ctrl);
3896
3897 /* Set MAX PCI retry to zero. */
3898 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
3899 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
3900 (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
3901 val |= PCISTATE_RETRY_SAME_DMA;
3902 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
3903
3904 pci_restore_state(tp->pdev);
3905
3906 /* Make sure PCI-X relaxed ordering bit is clear. */
3907 pci_read_config_dword(tp->pdev, TG3PCI_X_CAPS, &val);
3908 val &= ~PCIX_CAPS_RELAXED_ORDERING;
3909 pci_write_config_dword(tp->pdev, TG3PCI_X_CAPS, val);
3910
3911 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
3912
3913 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
3914 tg3_stop_fw(tp);
3915 tw32(0x5000, 0x400);
3916 }
3917
3918 tw32(GRC_MODE, tp->grc_mode);
3919
3920 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
3921 u32 val = tr32(0xc4);
3922
3923 tw32(0xc4, val | (1 << 15));
3924 }
3925
3926 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
3927 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
3928 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
3929 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
3930 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
3931 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
3932 }
3933
3934 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
3935 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
3936 tw32_f(MAC_MODE, tp->mac_mode);
3937 } else
3938 tw32_f(MAC_MODE, 0);
3939 udelay(40);
3940
3941 if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X)) {
3942 /* Wait for firmware initialization to complete. */
3943 for (i = 0; i < 100000; i++) {
3944 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
3945 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
3946 break;
3947 udelay(10);
3948 }
3949 if (i >= 100000) {
3950 printk(KERN_ERR PFX "tg3_reset_hw timed out for %s, "
3951 "firmware will not restart magic=%08x\n",
3952 tp->dev->name, val);
3953 return -ENODEV;
3954 }
3955 }
3956
3957 if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
3958 tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
3959 u32 val = tr32(0x7c00);
3960
3961 tw32(0x7c00, val | (1 << 25));
3962 }
3963
3964 /* Reprobe ASF enable state. */
3965 tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF;
3966 tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE;
3967 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
3968 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
3969 u32 nic_cfg;
3970
3971 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
3972 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
3973 tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
2052da94
JL
3974 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
3975 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
1da177e4
LT
3976 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
3977 }
3978 }
3979
3980 return 0;
3981}
3982
3983/* tp->lock is held. */
3984static void tg3_stop_fw(struct tg3 *tp)
3985{
3986 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
3987 u32 val;
3988 int i;
3989
3990 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
3991 val = tr32(GRC_RX_CPU_EVENT);
3992 val |= (1 << 14);
3993 tw32(GRC_RX_CPU_EVENT, val);
3994
3995 /* Wait for RX cpu to ACK the event. */
3996 for (i = 0; i < 100; i++) {
3997 if (!(tr32(GRC_RX_CPU_EVENT) & (1 << 14)))
3998 break;
3999 udelay(1);
4000 }
4001 }
4002}
4003
4004/* tp->lock is held. */
4005static int tg3_halt(struct tg3 *tp)
4006{
4007 int err;
4008
4009 tg3_stop_fw(tp);
4010
4011 tg3_write_sig_pre_reset(tp, RESET_KIND_SHUTDOWN);
4012
4013 tg3_abort_hw(tp);
4014 err = tg3_chip_reset(tp);
4015
4016 tg3_write_sig_legacy(tp, RESET_KIND_SHUTDOWN);
4017 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
4018
4019 if (err)
4020 return err;
4021
4022 return 0;
4023}
4024
4025#define TG3_FW_RELEASE_MAJOR 0x0
4026#define TG3_FW_RELASE_MINOR 0x0
4027#define TG3_FW_RELEASE_FIX 0x0
4028#define TG3_FW_START_ADDR 0x08000000
4029#define TG3_FW_TEXT_ADDR 0x08000000
4030#define TG3_FW_TEXT_LEN 0x9c0
4031#define TG3_FW_RODATA_ADDR 0x080009c0
4032#define TG3_FW_RODATA_LEN 0x60
4033#define TG3_FW_DATA_ADDR 0x08000a40
4034#define TG3_FW_DATA_LEN 0x20
4035#define TG3_FW_SBSS_ADDR 0x08000a60
4036#define TG3_FW_SBSS_LEN 0xc
4037#define TG3_FW_BSS_ADDR 0x08000a70
4038#define TG3_FW_BSS_LEN 0x10
4039
4040static u32 tg3FwText[(TG3_FW_TEXT_LEN / sizeof(u32)) + 1] = {
4041 0x00000000, 0x10000003, 0x00000000, 0x0000000d, 0x0000000d, 0x3c1d0800,
4042 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100000, 0x0e000018, 0x00000000,
4043 0x0000000d, 0x3c1d0800, 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100034,
4044 0x0e00021c, 0x00000000, 0x0000000d, 0x00000000, 0x00000000, 0x00000000,
4045 0x27bdffe0, 0x3c1cc000, 0xafbf0018, 0xaf80680c, 0x0e00004c, 0x241b2105,
4046 0x97850000, 0x97870002, 0x9782002c, 0x9783002e, 0x3c040800, 0x248409c0,
4047 0xafa00014, 0x00021400, 0x00621825, 0x00052c00, 0xafa30010, 0x8f860010,
4048 0x00e52825, 0x0e000060, 0x24070102, 0x3c02ac00, 0x34420100, 0x3c03ac01,
4049 0x34630100, 0xaf820490, 0x3c02ffff, 0xaf820494, 0xaf830498, 0xaf82049c,
4050 0x24020001, 0xaf825ce0, 0x0e00003f, 0xaf825d00, 0x0e000140, 0x00000000,
4051 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x2402ffff, 0xaf825404, 0x8f835400,
4052 0x34630400, 0xaf835400, 0xaf825404, 0x3c020800, 0x24420034, 0xaf82541c,
4053 0x03e00008, 0xaf805400, 0x00000000, 0x00000000, 0x3c020800, 0x34423000,
4054 0x3c030800, 0x34633000, 0x3c040800, 0x348437ff, 0x3c010800, 0xac220a64,
4055 0x24020040, 0x3c010800, 0xac220a68, 0x3c010800, 0xac200a60, 0xac600000,
4056 0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
4057 0x00804821, 0x8faa0010, 0x3c020800, 0x8c420a60, 0x3c040800, 0x8c840a68,
4058 0x8fab0014, 0x24430001, 0x0044102b, 0x3c010800, 0xac230a60, 0x14400003,
4059 0x00004021, 0x3c010800, 0xac200a60, 0x3c020800, 0x8c420a60, 0x3c030800,
4060 0x8c630a64, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
4061 0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020800, 0x8c420a60,
4062 0x3c030800, 0x8c630a64, 0x8f84680c, 0x00021140, 0x00431021, 0xac440008,
4063 0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
4064 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4065 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4066 0, 0, 0, 0, 0, 0,
4067 0x02000008, 0x00000000, 0x0a0001e3, 0x3c0a0001, 0x0a0001e3, 0x3c0a0002,
4068 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4069 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4070 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4071 0x0a0001e3, 0x3c0a0007, 0x0a0001e3, 0x3c0a0008, 0x0a0001e3, 0x3c0a0009,
4072 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000b,
4073 0x0a0001e3, 0x3c0a000c, 0x0a0001e3, 0x3c0a000d, 0x0a0001e3, 0x00000000,
4074 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000e, 0x0a0001e3, 0x00000000,
4075 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4076 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4077 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a0013, 0x0a0001e3, 0x3c0a0014,
4078 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4079 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4080 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4081 0x27bdffe0, 0x00001821, 0x00001021, 0xafbf0018, 0xafb10014, 0xafb00010,
4082 0x3c010800, 0x00220821, 0xac200a70, 0x3c010800, 0x00220821, 0xac200a74,
4083 0x3c010800, 0x00220821, 0xac200a78, 0x24630001, 0x1860fff5, 0x2442000c,
4084 0x24110001, 0x8f906810, 0x32020004, 0x14400005, 0x24040001, 0x3c020800,
4085 0x8c420a78, 0x18400003, 0x00002021, 0x0e000182, 0x00000000, 0x32020001,
4086 0x10400003, 0x00000000, 0x0e000169, 0x00000000, 0x0a000153, 0xaf915028,
4087 0x8fbf0018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020, 0x3c050800,
4088 0x8ca50a70, 0x3c060800, 0x8cc60a80, 0x3c070800, 0x8ce70a78, 0x27bdffe0,
4089 0x3c040800, 0x248409d0, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014,
4090 0x0e00017b, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x24020001,
4091 0x8f836810, 0x00821004, 0x00021027, 0x00621824, 0x03e00008, 0xaf836810,
4092 0x27bdffd8, 0xafbf0024, 0x1080002e, 0xafb00020, 0x8f825cec, 0xafa20018,
4093 0x8f825cec, 0x3c100800, 0x26100a78, 0xafa2001c, 0x34028000, 0xaf825cec,
4094 0x8e020000, 0x18400016, 0x00000000, 0x3c020800, 0x94420a74, 0x8fa3001c,
4095 0x000221c0, 0xac830004, 0x8fa2001c, 0x3c010800, 0x0e000201, 0xac220a74,
4096 0x10400005, 0x00000000, 0x8e020000, 0x24420001, 0x0a0001df, 0xae020000,
4097 0x3c020800, 0x8c420a70, 0x00021c02, 0x000321c0, 0x0a0001c5, 0xafa2001c,
4098 0x0e000201, 0x00000000, 0x1040001f, 0x00000000, 0x8e020000, 0x8fa3001c,
4099 0x24420001, 0x3c010800, 0xac230a70, 0x3c010800, 0xac230a74, 0x0a0001df,
4100 0xae020000, 0x3c100800, 0x26100a78, 0x8e020000, 0x18400028, 0x00000000,
4101 0x0e000201, 0x00000000, 0x14400024, 0x00000000, 0x8e020000, 0x3c030800,
4102 0x8c630a70, 0x2442ffff, 0xafa3001c, 0x18400006, 0xae020000, 0x00031402,
4103 0x000221c0, 0x8c820004, 0x3c010800, 0xac220a70, 0x97a2001e, 0x2442ff00,
4104 0x2c420300, 0x1440000b, 0x24024000, 0x3c040800, 0x248409dc, 0xafa00010,
4105 0xafa00014, 0x8fa6001c, 0x24050008, 0x0e000060, 0x00003821, 0x0a0001df,
4106 0x00000000, 0xaf825cf8, 0x3c020800, 0x8c420a40, 0x8fa3001c, 0x24420001,
4107 0xaf835cf8, 0x3c010800, 0xac220a40, 0x8fbf0024, 0x8fb00020, 0x03e00008,
4108 0x27bd0028, 0x27bdffe0, 0x3c040800, 0x248409e8, 0x00002821, 0x00003021,
4109 0x00003821, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x8fbf0018,
4110 0x03e00008, 0x27bd0020, 0x8f82680c, 0x8f85680c, 0x00021827, 0x0003182b,
4111 0x00031823, 0x00431024, 0x00441021, 0x00a2282b, 0x10a00006, 0x00000000,
4112 0x00401821, 0x8f82680c, 0x0043102b, 0x1440fffd, 0x00000000, 0x03e00008,
4113 0x00000000, 0x3c040800, 0x8c840000, 0x3c030800, 0x8c630a40, 0x0064102b,
4114 0x54400002, 0x00831023, 0x00641023, 0x2c420008, 0x03e00008, 0x38420001,
4115 0x27bdffe0, 0x00802821, 0x3c040800, 0x24840a00, 0x00003021, 0x00003821,
4116 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x0a000216, 0x00000000,
4117 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000, 0x27bdffe0, 0x3c1cc000,
4118 0xafbf0018, 0x0e00004c, 0xaf80680c, 0x3c040800, 0x24840a10, 0x03802821,
4119 0x00003021, 0x00003821, 0xafa00010, 0x0e000060, 0xafa00014, 0x2402ffff,
4120 0xaf825404, 0x3c0200aa, 0x0e000234, 0xaf825434, 0x8fbf0018, 0x03e00008,
4121 0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe8, 0xafb00010,
4122 0x24100001, 0xafbf0014, 0x3c01c003, 0xac200000, 0x8f826810, 0x30422000,
4123 0x10400003, 0x00000000, 0x0e000246, 0x00000000, 0x0a00023a, 0xaf905428,
4124 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x27bdfff8, 0x8f845d0c,
4125 0x3c0200ff, 0x3c030800, 0x8c630a50, 0x3442fff8, 0x00821024, 0x1043001e,
4126 0x3c0500ff, 0x34a5fff8, 0x3c06c003, 0x3c074000, 0x00851824, 0x8c620010,
4127 0x3c010800, 0xac230a50, 0x30420008, 0x10400005, 0x00871025, 0x8cc20000,
4128 0x24420001, 0xacc20000, 0x00871025, 0xaf825d0c, 0x8fa20000, 0x24420001,
4129 0xafa20000, 0x8fa20000, 0x8fa20000, 0x24420001, 0xafa20000, 0x8fa20000,
4130 0x8f845d0c, 0x3c030800, 0x8c630a50, 0x00851024, 0x1443ffe8, 0x00851824,
4131 0x27bd0008, 0x03e00008, 0x00000000, 0x00000000, 0x00000000
4132};
4133
4134static u32 tg3FwRodata[(TG3_FW_RODATA_LEN / sizeof(u32)) + 1] = {
4135 0x35373031, 0x726c7341, 0x00000000, 0x00000000, 0x53774576, 0x656e7430,
4136 0x00000000, 0x726c7045, 0x76656e74, 0x31000000, 0x556e6b6e, 0x45766e74,
4137 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
4138 0x00000000, 0x00000000, 0x4d61696e, 0x43707542, 0x00000000, 0x00000000,
4139 0x00000000
4140};
4141
4142#if 0 /* All zeros, don't eat up space with it. */
4143u32 tg3FwData[(TG3_FW_DATA_LEN / sizeof(u32)) + 1] = {
4144 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
4145 0x00000000, 0x00000000, 0x00000000, 0x00000000
4146};
4147#endif
4148
4149#define RX_CPU_SCRATCH_BASE 0x30000
4150#define RX_CPU_SCRATCH_SIZE 0x04000
4151#define TX_CPU_SCRATCH_BASE 0x34000
4152#define TX_CPU_SCRATCH_SIZE 0x04000
4153
4154/* tp->lock is held. */
4155static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
4156{
4157 int i;
4158
4159 if (offset == TX_CPU_BASE &&
4160 (tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
4161 BUG();
4162
4163 if (offset == RX_CPU_BASE) {
4164 for (i = 0; i < 10000; i++) {
4165 tw32(offset + CPU_STATE, 0xffffffff);
4166 tw32(offset + CPU_MODE, CPU_MODE_HALT);
4167 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
4168 break;
4169 }
4170
4171 tw32(offset + CPU_STATE, 0xffffffff);
4172 tw32_f(offset + CPU_MODE, CPU_MODE_HALT);
4173 udelay(10);
4174 } else {
4175 for (i = 0; i < 10000; i++) {
4176 tw32(offset + CPU_STATE, 0xffffffff);
4177 tw32(offset + CPU_MODE, CPU_MODE_HALT);
4178 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
4179 break;
4180 }
4181 }
4182
4183 if (i >= 10000) {
4184 printk(KERN_ERR PFX "tg3_reset_cpu timed out for %s, "
4185 "and %s CPU\n",
4186 tp->dev->name,
4187 (offset == RX_CPU_BASE ? "RX" : "TX"));
4188 return -ENODEV;
4189 }
4190 return 0;
4191}
4192
4193struct fw_info {
4194 unsigned int text_base;
4195 unsigned int text_len;
4196 u32 *text_data;
4197 unsigned int rodata_base;
4198 unsigned int rodata_len;
4199 u32 *rodata_data;
4200 unsigned int data_base;
4201 unsigned int data_len;
4202 u32 *data_data;
4203};
4204
4205/* tp->lock is held. */
4206static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
4207 int cpu_scratch_size, struct fw_info *info)
4208{
4209 int err, i;
4210 u32 orig_tg3_flags = tp->tg3_flags;
4211 void (*write_op)(struct tg3 *, u32, u32);
4212
4213 if (cpu_base == TX_CPU_BASE &&
4214 (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
4215 printk(KERN_ERR PFX "tg3_load_firmware_cpu: Trying to load "
4216 "TX cpu firmware on %s which is 5705.\n",
4217 tp->dev->name);
4218 return -EINVAL;
4219 }
4220
4221 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
4222 write_op = tg3_write_mem;
4223 else
4224 write_op = tg3_write_indirect_reg32;
4225
4226 /* Force use of PCI config space for indirect register
4227 * write calls.
4228 */
4229 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
4230
4231 err = tg3_halt_cpu(tp, cpu_base);
4232 if (err)
4233 goto out;
4234
4235 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
4236 write_op(tp, cpu_scratch_base + i, 0);
4237 tw32(cpu_base + CPU_STATE, 0xffffffff);
4238 tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
4239 for (i = 0; i < (info->text_len / sizeof(u32)); i++)
4240 write_op(tp, (cpu_scratch_base +
4241 (info->text_base & 0xffff) +
4242 (i * sizeof(u32))),
4243 (info->text_data ?
4244 info->text_data[i] : 0));
4245 for (i = 0; i < (info->rodata_len / sizeof(u32)); i++)
4246 write_op(tp, (cpu_scratch_base +
4247 (info->rodata_base & 0xffff) +
4248 (i * sizeof(u32))),
4249 (info->rodata_data ?
4250 info->rodata_data[i] : 0));
4251 for (i = 0; i < (info->data_len / sizeof(u32)); i++)
4252 write_op(tp, (cpu_scratch_base +
4253 (info->data_base & 0xffff) +
4254 (i * sizeof(u32))),
4255 (info->data_data ?
4256 info->data_data[i] : 0));
4257
4258 err = 0;
4259
4260out:
4261 tp->tg3_flags = orig_tg3_flags;
4262 return err;
4263}
4264
4265/* tp->lock is held. */
4266static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
4267{
4268 struct fw_info info;
4269 int err, i;
4270
4271 info.text_base = TG3_FW_TEXT_ADDR;
4272 info.text_len = TG3_FW_TEXT_LEN;
4273 info.text_data = &tg3FwText[0];
4274 info.rodata_base = TG3_FW_RODATA_ADDR;
4275 info.rodata_len = TG3_FW_RODATA_LEN;
4276 info.rodata_data = &tg3FwRodata[0];
4277 info.data_base = TG3_FW_DATA_ADDR;
4278 info.data_len = TG3_FW_DATA_LEN;
4279 info.data_data = NULL;
4280
4281 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
4282 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
4283 &info);
4284 if (err)
4285 return err;
4286
4287 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
4288 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
4289 &info);
4290 if (err)
4291 return err;
4292
4293 /* Now startup only the RX cpu. */
4294 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
4295 tw32_f(RX_CPU_BASE + CPU_PC, TG3_FW_TEXT_ADDR);
4296
4297 for (i = 0; i < 5; i++) {
4298 if (tr32(RX_CPU_BASE + CPU_PC) == TG3_FW_TEXT_ADDR)
4299 break;
4300 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
4301 tw32(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
4302 tw32_f(RX_CPU_BASE + CPU_PC, TG3_FW_TEXT_ADDR);
4303 udelay(1000);
4304 }
4305 if (i >= 5) {
4306 printk(KERN_ERR PFX "tg3_load_firmware fails for %s "
4307 "to set RX CPU PC, is %08x should be %08x\n",
4308 tp->dev->name, tr32(RX_CPU_BASE + CPU_PC),
4309 TG3_FW_TEXT_ADDR);
4310 return -ENODEV;
4311 }
4312 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
4313 tw32_f(RX_CPU_BASE + CPU_MODE, 0x00000000);
4314
4315 return 0;
4316}
4317
4318#if TG3_TSO_SUPPORT != 0
4319
4320#define TG3_TSO_FW_RELEASE_MAJOR 0x1
4321#define TG3_TSO_FW_RELASE_MINOR 0x6
4322#define TG3_TSO_FW_RELEASE_FIX 0x0
4323#define TG3_TSO_FW_START_ADDR 0x08000000
4324#define TG3_TSO_FW_TEXT_ADDR 0x08000000
4325#define TG3_TSO_FW_TEXT_LEN 0x1aa0
4326#define TG3_TSO_FW_RODATA_ADDR 0x08001aa0
4327#define TG3_TSO_FW_RODATA_LEN 0x60
4328#define TG3_TSO_FW_DATA_ADDR 0x08001b20
4329#define TG3_TSO_FW_DATA_LEN 0x30
4330#define TG3_TSO_FW_SBSS_ADDR 0x08001b50
4331#define TG3_TSO_FW_SBSS_LEN 0x2c
4332#define TG3_TSO_FW_BSS_ADDR 0x08001b80
4333#define TG3_TSO_FW_BSS_LEN 0x894
4334
4335static u32 tg3TsoFwText[(TG3_TSO_FW_TEXT_LEN / 4) + 1] = {
4336 0x0e000003, 0x00000000, 0x08001b24, 0x00000000, 0x10000003, 0x00000000,
4337 0x0000000d, 0x0000000d, 0x3c1d0800, 0x37bd4000, 0x03a0f021, 0x3c100800,
4338 0x26100000, 0x0e000010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
4339 0xafbf0018, 0x0e0005d8, 0x34840002, 0x0e000668, 0x00000000, 0x3c030800,
4340 0x90631b68, 0x24020002, 0x3c040800, 0x24841aac, 0x14620003, 0x24050001,
4341 0x3c040800, 0x24841aa0, 0x24060006, 0x00003821, 0xafa00010, 0x0e00067c,
4342 0xafa00014, 0x8f625c50, 0x34420001, 0xaf625c50, 0x8f625c90, 0x34420001,
4343 0xaf625c90, 0x2402ffff, 0x0e000034, 0xaf625404, 0x8fbf0018, 0x03e00008,
4344 0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c,
4345 0xafb20018, 0xafb10014, 0x0e00005b, 0xafb00010, 0x24120002, 0x24110001,
4346 0x8f706820, 0x32020100, 0x10400003, 0x00000000, 0x0e0000bb, 0x00000000,
4347 0x8f706820, 0x32022000, 0x10400004, 0x32020001, 0x0e0001f0, 0x24040001,
4348 0x32020001, 0x10400003, 0x00000000, 0x0e0000a3, 0x00000000, 0x3c020800,
4349 0x90421b98, 0x14520003, 0x00000000, 0x0e0004c0, 0x00000000, 0x0a00003c,
4350 0xaf715028, 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008,
4351 0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ac0, 0x00002821, 0x00003021,
4352 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x3c040800,
4353 0x248423d8, 0xa4800000, 0x3c010800, 0xa0201b98, 0x3c010800, 0xac201b9c,
4354 0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
4355 0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bbc, 0x8f624434, 0x3c010800,
4356 0xac221b88, 0x8f624438, 0x3c010800, 0xac221b8c, 0x8f624410, 0xac80f7a8,
4357 0x3c010800, 0xac201b84, 0x3c010800, 0xac2023e0, 0x3c010800, 0xac2023c8,
4358 0x3c010800, 0xac2023cc, 0x3c010800, 0xac202400, 0x3c010800, 0xac221b90,
4359 0x8f620068, 0x24030007, 0x00021702, 0x10430005, 0x00000000, 0x8f620068,
4360 0x00021702, 0x14400004, 0x24020001, 0x3c010800, 0x0a000097, 0xac20240c,
4361 0xac820034, 0x3c040800, 0x24841acc, 0x3c050800, 0x8ca5240c, 0x00003021,
4362 0x00003821, 0xafa00010, 0x0e00067c, 0xafa00014, 0x8fbf0018, 0x03e00008,
4363 0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ad8, 0x00002821, 0x00003021,
4364 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x0e00005b,
4365 0x00000000, 0x0e0000b4, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020,
4366 0x24020001, 0x8f636820, 0x00821004, 0x00021027, 0x00621824, 0x03e00008,
4367 0xaf636820, 0x27bdffd0, 0xafbf002c, 0xafb60028, 0xafb50024, 0xafb40020,
4368 0xafb3001c, 0xafb20018, 0xafb10014, 0xafb00010, 0x8f675c5c, 0x3c030800,
4369 0x24631bbc, 0x8c620000, 0x14470005, 0x3c0200ff, 0x3c020800, 0x90421b98,
4370 0x14400119, 0x3c0200ff, 0x3442fff8, 0x00e28824, 0xac670000, 0x00111902,
4371 0x306300ff, 0x30e20003, 0x000211c0, 0x00622825, 0x00a04021, 0x00071602,
4372 0x3c030800, 0x90631b98, 0x3044000f, 0x14600036, 0x00804821, 0x24020001,
4373 0x3c010800, 0xa0221b98, 0x00051100, 0x00821025, 0x3c010800, 0xac201b9c,
4374 0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
4375 0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bb0, 0x3c010800, 0xac201bb4,
4376 0x3c010800, 0xa42223d8, 0x9622000c, 0x30437fff, 0x3c010800, 0xa4222410,
4377 0x30428000, 0x3c010800, 0xa4231bc6, 0x10400005, 0x24020001, 0x3c010800,
4378 0xac2223f4, 0x0a000102, 0x2406003e, 0x24060036, 0x3c010800, 0xac2023f4,
4379 0x9622000a, 0x3c030800, 0x94631bc6, 0x3c010800, 0xac2023f0, 0x3c010800,
4380 0xac2023f8, 0x00021302, 0x00021080, 0x00c21021, 0x00621821, 0x3c010800,
4381 0xa42223d0, 0x3c010800, 0x0a000115, 0xa4231b96, 0x9622000c, 0x3c010800,
4382 0xa42223ec, 0x3c040800, 0x24841b9c, 0x8c820000, 0x00021100, 0x3c010800,
4383 0x00220821, 0xac311bc8, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
4384 0xac271bcc, 0x8c820000, 0x25030001, 0x306601ff, 0x00021100, 0x3c010800,
4385 0x00220821, 0xac261bd0, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
4386 0xac291bd4, 0x96230008, 0x3c020800, 0x8c421bac, 0x00432821, 0x3c010800,
4387 0xac251bac, 0x9622000a, 0x30420004, 0x14400018, 0x00061100, 0x8f630c14,
4388 0x3063000f, 0x2c620002, 0x1440000b, 0x3c02c000, 0x8f630c14, 0x3c020800,
4389 0x8c421b40, 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002,
4390 0x1040fff7, 0x3c02c000, 0x00e21825, 0xaf635c5c, 0x8f625c50, 0x30420002,
4391 0x10400014, 0x00000000, 0x0a000147, 0x00000000, 0x3c030800, 0x8c631b80,
4392 0x3c040800, 0x94841b94, 0x01221025, 0x3c010800, 0xa42223da, 0x24020001,
4393 0x3c010800, 0xac221bb8, 0x24630001, 0x0085202a, 0x3c010800, 0x10800003,
4394 0xac231b80, 0x3c010800, 0xa4251b94, 0x3c060800, 0x24c61b9c, 0x8cc20000,
4395 0x24420001, 0xacc20000, 0x28420080, 0x14400005, 0x00000000, 0x0e000656,
4396 0x24040002, 0x0a0001e6, 0x00000000, 0x3c020800, 0x8c421bb8, 0x10400078,
4397 0x24020001, 0x3c050800, 0x90a51b98, 0x14a20072, 0x00000000, 0x3c150800,
4398 0x96b51b96, 0x3c040800, 0x8c841bac, 0x32a3ffff, 0x0083102a, 0x1440006c,
4399 0x00000000, 0x14830003, 0x00000000, 0x3c010800, 0xac2523f0, 0x1060005c,
4400 0x00009021, 0x24d60004, 0x0060a021, 0x24d30014, 0x8ec20000, 0x00028100,
4401 0x3c110800, 0x02308821, 0x0e000625, 0x8e311bc8, 0x00402821, 0x10a00054,
4402 0x00000000, 0x9628000a, 0x31020040, 0x10400005, 0x2407180c, 0x8e22000c,
4403 0x2407188c, 0x00021400, 0xaca20018, 0x3c030800, 0x00701821, 0x8c631bd0,
4404 0x3c020800, 0x00501021, 0x8c421bd4, 0x00031d00, 0x00021400, 0x00621825,
4405 0xaca30014, 0x8ec30004, 0x96220008, 0x00432023, 0x3242ffff, 0x3083ffff,
4406 0x00431021, 0x0282102a, 0x14400002, 0x02b23023, 0x00803021, 0x8e620000,
4407 0x30c4ffff, 0x00441021, 0xae620000, 0x8e220000, 0xaca20000, 0x8e220004,
4408 0x8e63fff4, 0x00431021, 0xaca20004, 0xa4a6000e, 0x8e62fff4, 0x00441021,
4409 0xae62fff4, 0x96230008, 0x0043102a, 0x14400005, 0x02469021, 0x8e62fff0,
4410 0xae60fff4, 0x24420001, 0xae62fff0, 0xaca00008, 0x3242ffff, 0x14540008,
4411 0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x24020905, 0xa4a2000c,
4412 0x0a0001cb, 0x34e70020, 0xa4a2000c, 0x3c020800, 0x8c4223f0, 0x10400003,
4413 0x3c024b65, 0x0a0001d3, 0x34427654, 0x3c02b49a, 0x344289ab, 0xaca2001c,
4414 0x30e2ffff, 0xaca20010, 0x0e0005a2, 0x00a02021, 0x3242ffff, 0x0054102b,
4415 0x1440ffa9, 0x00000000, 0x24020002, 0x3c010800, 0x0a0001e6, 0xa0221b98,
4416 0x8ec2083c, 0x24420001, 0x0a0001e6, 0xaec2083c, 0x0e0004c0, 0x00000000,
4417 0x8fbf002c, 0x8fb60028, 0x8fb50024, 0x8fb40020, 0x8fb3001c, 0x8fb20018,
4418 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0030, 0x27bdffd0, 0xafbf0028,
4419 0xafb30024, 0xafb20020, 0xafb1001c, 0xafb00018, 0x8f725c9c, 0x3c0200ff,
4420 0x3442fff8, 0x3c070800, 0x24e71bb4, 0x02428824, 0x9623000e, 0x8ce20000,
4421 0x00431021, 0xace20000, 0x8e220010, 0x30420020, 0x14400011, 0x00809821,
4422 0x0e00063b, 0x02202021, 0x3c02c000, 0x02421825, 0xaf635c9c, 0x8f625c90,
4423 0x30420002, 0x1040011e, 0x00000000, 0xaf635c9c, 0x8f625c90, 0x30420002,
4424 0x10400119, 0x00000000, 0x0a00020d, 0x00000000, 0x8e240008, 0x8e230014,
4425 0x00041402, 0x000231c0, 0x00031502, 0x304201ff, 0x2442ffff, 0x3042007f,
4426 0x00031942, 0x30637800, 0x00021100, 0x24424000, 0x00624821, 0x9522000a,
4427 0x3084ffff, 0x30420008, 0x104000b0, 0x000429c0, 0x3c020800, 0x8c422400,
4428 0x14400024, 0x24c50008, 0x94c20014, 0x3c010800, 0xa42223d0, 0x8cc40010,
4429 0x00041402, 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42423d4, 0x94c2000e,
4430 0x3083ffff, 0x00431023, 0x3c010800, 0xac222408, 0x94c2001a, 0x3c010800,
4431 0xac262400, 0x3c010800, 0xac322404, 0x3c010800, 0xac2223fc, 0x3c02c000,
4432 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e5, 0x00000000,
4433 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e0, 0x00000000, 0x0a000246,
4434 0x00000000, 0x94c2000e, 0x3c030800, 0x946323d4, 0x00434023, 0x3103ffff,
4435 0x2c620008, 0x1040001c, 0x00000000, 0x94c20014, 0x24420028, 0x00a22821,
4436 0x00031042, 0x1840000b, 0x00002021, 0x24e60848, 0x00403821, 0x94a30000,
4437 0x8cc20000, 0x24840001, 0x00431021, 0xacc20000, 0x0087102a, 0x1440fff9,
4438 0x24a50002, 0x31020001, 0x1040001f, 0x3c024000, 0x3c040800, 0x248423fc,
4439 0xa0a00001, 0x94a30000, 0x8c820000, 0x00431021, 0x0a000285, 0xac820000,
4440 0x8f626800, 0x3c030010, 0x00431024, 0x10400009, 0x00000000, 0x94c2001a,
4441 0x3c030800, 0x8c6323fc, 0x00431021, 0x3c010800, 0xac2223fc, 0x0a000286,
4442 0x3c024000, 0x94c2001a, 0x94c4001c, 0x3c030800, 0x8c6323fc, 0x00441023,
4443 0x00621821, 0x3c010800, 0xac2323fc, 0x3c024000, 0x02421825, 0xaf635c9c,
4444 0x8f625c90, 0x30420002, 0x1440fffc, 0x00000000, 0x9522000a, 0x30420010,
4445 0x1040009b, 0x00000000, 0x3c030800, 0x946323d4, 0x3c070800, 0x24e72400,
4446 0x8ce40000, 0x8f626800, 0x24630030, 0x00832821, 0x3c030010, 0x00431024,
4447 0x1440000a, 0x00000000, 0x94a20004, 0x3c040800, 0x8c842408, 0x3c030800,
4448 0x8c6323fc, 0x00441023, 0x00621821, 0x3c010800, 0xac2323fc, 0x3c040800,
4449 0x8c8423fc, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402, 0x00822021,
4450 0x00041027, 0xa4a20006, 0x3c030800, 0x8c632404, 0x3c0200ff, 0x3442fff8,
4451 0x00628824, 0x96220008, 0x24050001, 0x24034000, 0x000231c0, 0x00801021,
4452 0xa4c2001a, 0xa4c0001c, 0xace00000, 0x3c010800, 0xac251b60, 0xaf635cb8,
4453 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000, 0x3c010800, 0xac201b60,
4454 0x8e220008, 0xaf625cb8, 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000,
4455 0x3c010800, 0xac201b60, 0x3c020800, 0x8c421b60, 0x1040ffec, 0x00000000,
4456 0x3c040800, 0x0e00063b, 0x8c842404, 0x0a00032a, 0x00000000, 0x3c030800,
4457 0x90631b98, 0x24020002, 0x14620003, 0x3c034b65, 0x0a0002e1, 0x00008021,
4458 0x8e22001c, 0x34637654, 0x10430002, 0x24100002, 0x24100001, 0x00c02021,
4459 0x0e000350, 0x02003021, 0x24020003, 0x3c010800, 0xa0221b98, 0x24020002,
4460 0x1202000a, 0x24020001, 0x3c030800, 0x8c6323f0, 0x10620006, 0x00000000,
4461 0x3c020800, 0x944223d8, 0x00021400, 0x0a00031f, 0xae220014, 0x3c040800,
4462 0x248423da, 0x94820000, 0x00021400, 0xae220014, 0x3c020800, 0x8c421bbc,
4463 0x3c03c000, 0x3c010800, 0xa0201b98, 0x00431025, 0xaf625c5c, 0x8f625c50,
4464 0x30420002, 0x10400009, 0x00000000, 0x2484f7e2, 0x8c820000, 0x00431025,
4465 0xaf625c5c, 0x8f625c50, 0x30420002, 0x1440fffa, 0x00000000, 0x3c020800,
4466 0x24421b84, 0x8c430000, 0x24630001, 0xac430000, 0x8f630c14, 0x3063000f,
4467 0x2c620002, 0x1440000c, 0x3c024000, 0x8f630c14, 0x3c020800, 0x8c421b40,
4468 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7,
4469 0x00000000, 0x3c024000, 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002,
4470 0x1440fffc, 0x00000000, 0x12600003, 0x00000000, 0x0e0004c0, 0x00000000,
4471 0x8fbf0028, 0x8fb30024, 0x8fb20020, 0x8fb1001c, 0x8fb00018, 0x03e00008,
4472 0x27bd0030, 0x8f634450, 0x3c040800, 0x24841b88, 0x8c820000, 0x00031c02,
4473 0x0043102b, 0x14400007, 0x3c038000, 0x8c840004, 0x8f624450, 0x00021c02,
4474 0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
4475 0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3c024000,
4476 0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00000000,
4477 0x03e00008, 0x00000000, 0x27bdffe0, 0x00805821, 0x14c00011, 0x256e0008,
4478 0x3c020800, 0x8c4223f4, 0x10400007, 0x24020016, 0x3c010800, 0xa42223d2,
4479 0x2402002a, 0x3c010800, 0x0a000364, 0xa42223d4, 0x8d670010, 0x00071402,
4480 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42723d4, 0x3c040800, 0x948423d4,
4481 0x3c030800, 0x946323d2, 0x95cf0006, 0x3c020800, 0x944223d0, 0x00832023,
4482 0x01e2c023, 0x3065ffff, 0x24a20028, 0x01c24821, 0x3082ffff, 0x14c0001a,
4483 0x01226021, 0x9582000c, 0x3042003f, 0x3c010800, 0xa42223d6, 0x95820004,
4484 0x95830006, 0x3c010800, 0xac2023e4, 0x3c010800, 0xac2023e8, 0x00021400,
4485 0x00431025, 0x3c010800, 0xac221bc0, 0x95220004, 0x3c010800, 0xa4221bc4,
4486 0x95230002, 0x01e51023, 0x0043102a, 0x10400010, 0x24020001, 0x3c010800,
4487 0x0a000398, 0xac2223f8, 0x3c030800, 0x8c6323e8, 0x3c020800, 0x94421bc4,
4488 0x00431021, 0xa5220004, 0x3c020800, 0x94421bc0, 0xa5820004, 0x3c020800,
4489 0x8c421bc0, 0xa5820006, 0x3c020800, 0x8c4223f0, 0x3c0d0800, 0x8dad23e4,
4490 0x3c0a0800, 0x144000e5, 0x8d4a23e8, 0x3c020800, 0x94421bc4, 0x004a1821,
4491 0x3063ffff, 0x0062182b, 0x24020002, 0x10c2000d, 0x01435023, 0x3c020800,
4492 0x944223d6, 0x30420009, 0x10400008, 0x00000000, 0x9582000c, 0x3042fff6,
4493 0xa582000c, 0x3c020800, 0x944223d6, 0x30420009, 0x01a26823, 0x3c020800,
4494 0x8c4223f8, 0x1040004a, 0x01203821, 0x3c020800, 0x944223d2, 0x00004021,
4495 0xa520000a, 0x01e21023, 0xa5220002, 0x3082ffff, 0x00021042, 0x18400008,
4496 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021, 0x0103102a,
4497 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061402,
4498 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021, 0x2527000c,
4499 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004, 0x1440fffb,
4500 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023, 0x01803821,
4501 0x3082ffff, 0xa4e00010, 0x00621821, 0x00021042, 0x18400010, 0x00c33021,
4502 0x00404821, 0x94e20000, 0x24e70002, 0x00c23021, 0x30e2007f, 0x14400006,
4503 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80, 0x00625824, 0x25670008,
4504 0x0109102a, 0x1440fff3, 0x00000000, 0x30820001, 0x10400005, 0x00061c02,
4505 0xa0e00001, 0x94e20000, 0x00c23021, 0x00061c02, 0x30c2ffff, 0x00623021,
4506 0x00061402, 0x00c23021, 0x0a00047d, 0x30c6ffff, 0x24020002, 0x14c20081,
4507 0x00000000, 0x3c020800, 0x8c42240c, 0x14400007, 0x00000000, 0x3c020800,
4508 0x944223d2, 0x95230002, 0x01e21023, 0x10620077, 0x00000000, 0x3c020800,
4509 0x944223d2, 0x01e21023, 0xa5220002, 0x3c020800, 0x8c42240c, 0x1040001a,
4510 0x31e3ffff, 0x8dc70010, 0x3c020800, 0x94421b96, 0x00e04021, 0x00072c02,
4511 0x00aa2021, 0x00431023, 0x00823823, 0x00072402, 0x30e2ffff, 0x00823821,
4512 0x00071027, 0xa522000a, 0x3102ffff, 0x3c040800, 0x948423d4, 0x00453023,
4513 0x00e02821, 0x00641823, 0x006d1821, 0x00c33021, 0x00061c02, 0x30c2ffff,
4514 0x0a00047d, 0x00623021, 0x01203821, 0x00004021, 0x3082ffff, 0x00021042,
4515 0x18400008, 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021,
4516 0x0103102a, 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021,
4517 0x00061402, 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021,
4518 0x2527000c, 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004,
4519 0x1440fffb, 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023,
4520 0x01803821, 0x3082ffff, 0xa4e00010, 0x3c040800, 0x948423d4, 0x00621821,
4521 0x00c33021, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061c02, 0x3c020800,
4522 0x944223d0, 0x00c34821, 0x00441023, 0x00021fc2, 0x00431021, 0x00021043,
4523 0x18400010, 0x00003021, 0x00402021, 0x94e20000, 0x24e70002, 0x00c23021,
4524 0x30e2007f, 0x14400006, 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80,
4525 0x00625824, 0x25670008, 0x0104102a, 0x1440fff3, 0x00000000, 0x3c020800,
4526 0x944223ec, 0x00c23021, 0x3122ffff, 0x00c23021, 0x00061c02, 0x30c2ffff,
4527 0x00623021, 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010,
4528 0xadc00014, 0x0a00049d, 0xadc00000, 0x8dc70010, 0x00e04021, 0x11400007,
4529 0x00072c02, 0x00aa3021, 0x00061402, 0x30c3ffff, 0x00433021, 0x00061402,
4530 0x00c22821, 0x00051027, 0xa522000a, 0x3c030800, 0x946323d4, 0x3102ffff,
4531 0x01e21021, 0x00433023, 0x00cd3021, 0x00061c02, 0x30c2ffff, 0x00623021,
4532 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010, 0x3102ffff,
4533 0x00051c00, 0x00431025, 0xadc20010, 0x3c020800, 0x8c4223f4, 0x10400005,
4534 0x2de205eb, 0x14400002, 0x25e2fff2, 0x34028870, 0xa5c20034, 0x3c030800,
4535 0x246323e8, 0x8c620000, 0x24420001, 0xac620000, 0x3c040800, 0x8c8423e4,
4536 0x3c020800, 0x8c421bc0, 0x3303ffff, 0x00832021, 0x00431821, 0x0062102b,
4537 0x3c010800, 0xac2423e4, 0x10400003, 0x2482ffff, 0x3c010800, 0xac2223e4,
4538 0x3c010800, 0xac231bc0, 0x03e00008, 0x27bd0020, 0x27bdffb8, 0x3c050800,
4539 0x24a51b96, 0xafbf0044, 0xafbe0040, 0xafb7003c, 0xafb60038, 0xafb50034,
4540 0xafb40030, 0xafb3002c, 0xafb20028, 0xafb10024, 0xafb00020, 0x94a90000,
4541 0x3c020800, 0x944223d0, 0x3c030800, 0x8c631bb0, 0x3c040800, 0x8c841bac,
4542 0x01221023, 0x0064182a, 0xa7a9001e, 0x106000be, 0xa7a20016, 0x24be0022,
4543 0x97b6001e, 0x24b3001a, 0x24b70016, 0x8fc20000, 0x14400008, 0x00000000,
4544 0x8fc2fff8, 0x97a30016, 0x8fc4fff4, 0x00431021, 0x0082202a, 0x148000b0,
4545 0x00000000, 0x97d50818, 0x32a2ffff, 0x104000a3, 0x00009021, 0x0040a021,
4546 0x00008821, 0x0e000625, 0x00000000, 0x00403021, 0x14c00007, 0x00000000,
4547 0x3c020800, 0x8c4223dc, 0x24420001, 0x3c010800, 0x0a000596, 0xac2223dc,
4548 0x3c100800, 0x02118021, 0x8e101bc8, 0x9608000a, 0x31020040, 0x10400005,
4549 0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x31020080,
4550 0x54400001, 0x34e70010, 0x3c020800, 0x00511021, 0x8c421bd0, 0x3c030800,
4551 0x00711821, 0x8c631bd4, 0x00021500, 0x00031c00, 0x00431025, 0xacc20014,
4552 0x96040008, 0x3242ffff, 0x00821021, 0x0282102a, 0x14400002, 0x02b22823,
4553 0x00802821, 0x8e020000, 0x02459021, 0xacc20000, 0x8e020004, 0x00c02021,
4554 0x26310010, 0xac820004, 0x30e2ffff, 0xac800008, 0xa485000e, 0xac820010,
4555 0x24020305, 0x0e0005a2, 0xa482000c, 0x3242ffff, 0x0054102b, 0x1440ffc5,
4556 0x3242ffff, 0x0a00058e, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
4557 0x10400067, 0x00000000, 0x8e62fff0, 0x00028900, 0x3c100800, 0x02118021,
4558 0x0e000625, 0x8e101bc8, 0x00403021, 0x14c00005, 0x00000000, 0x8e62082c,
4559 0x24420001, 0x0a000596, 0xae62082c, 0x9608000a, 0x31020040, 0x10400005,
4560 0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x3c020800,
4561 0x00511021, 0x8c421bd0, 0x3c030800, 0x00711821, 0x8c631bd4, 0x00021500,
4562 0x00031c00, 0x00431025, 0xacc20014, 0x8e63fff4, 0x96020008, 0x00432023,
4563 0x3242ffff, 0x3083ffff, 0x00431021, 0x02c2102a, 0x10400003, 0x00802821,
4564 0x97a9001e, 0x01322823, 0x8e620000, 0x30a4ffff, 0x00441021, 0xae620000,
4565 0xa4c5000e, 0x8e020000, 0xacc20000, 0x8e020004, 0x8e63fff4, 0x00431021,
4566 0xacc20004, 0x8e63fff4, 0x96020008, 0x00641821, 0x0062102a, 0x14400006,
4567 0x02459021, 0x8e62fff0, 0xae60fff4, 0x24420001, 0x0a000571, 0xae62fff0,
4568 0xae63fff4, 0xacc00008, 0x3242ffff, 0x10560003, 0x31020004, 0x10400006,
4569 0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x34e70020, 0x24020905,
4570 0xa4c2000c, 0x8ee30000, 0x8ee20004, 0x14620007, 0x3c02b49a, 0x8ee20860,
4571 0x54400001, 0x34e70400, 0x3c024b65, 0x0a000588, 0x34427654, 0x344289ab,
4572 0xacc2001c, 0x30e2ffff, 0xacc20010, 0x0e0005a2, 0x00c02021, 0x3242ffff,
4573 0x0056102b, 0x1440ff9b, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
4574 0x1440ff48, 0x00000000, 0x8fbf0044, 0x8fbe0040, 0x8fb7003c, 0x8fb60038,
4575 0x8fb50034, 0x8fb40030, 0x8fb3002c, 0x8fb20028, 0x8fb10024, 0x8fb00020,
4576 0x03e00008, 0x27bd0048, 0x27bdffe8, 0xafbf0014, 0xafb00010, 0x8f624450,
4577 0x8f634410, 0x0a0005b1, 0x00808021, 0x8f626820, 0x30422000, 0x10400003,
4578 0x00000000, 0x0e0001f0, 0x00002021, 0x8f624450, 0x8f634410, 0x3042ffff,
4579 0x0043102b, 0x1440fff5, 0x00000000, 0x8f630c14, 0x3063000f, 0x2c620002,
4580 0x1440000b, 0x00000000, 0x8f630c14, 0x3c020800, 0x8c421b40, 0x3063000f,
4581 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7, 0x00000000,
4582 0xaf705c18, 0x8f625c10, 0x30420002, 0x10400009, 0x00000000, 0x8f626820,
4583 0x30422000, 0x1040fff8, 0x00000000, 0x0e0001f0, 0x00002021, 0x0a0005c4,
4584 0x00000000, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000,
4585 0x00000000, 0x00000000, 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010,
4586 0xaf60680c, 0x8f626804, 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50,
4587 0x3c010800, 0xac221b54, 0x24020b78, 0x3c010800, 0xac221b64, 0x34630002,
4588 0xaf634000, 0x0e000605, 0x00808021, 0x3c010800, 0xa0221b68, 0x304200ff,
4589 0x24030002, 0x14430005, 0x00000000, 0x3c020800, 0x8c421b54, 0x0a0005f8,
4590 0xac5000c0, 0x3c020800, 0x8c421b54, 0xac5000bc, 0x8f624434, 0x8f634438,
4591 0x8f644410, 0x3c010800, 0xac221b5c, 0x3c010800, 0xac231b6c, 0x3c010800,
4592 0xac241b58, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x3c040800,
4593 0x8c870000, 0x3c03aa55, 0x3463aa55, 0x3c06c003, 0xac830000, 0x8cc20000,
4594 0x14430007, 0x24050002, 0x3c0355aa, 0x346355aa, 0xac830000, 0x8cc20000,
4595 0x50430001, 0x24050001, 0x3c020800, 0xac470000, 0x03e00008, 0x00a01021,
4596 0x27bdfff8, 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe,
4597 0x00000000, 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008,
4598 0x27bd0008, 0x8f634450, 0x3c020800, 0x8c421b5c, 0x00031c02, 0x0043102b,
4599 0x14400008, 0x3c038000, 0x3c040800, 0x8c841b6c, 0x8f624450, 0x00021c02,
4600 0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
4601 0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff,
4602 0x2442e000, 0x2c422001, 0x14400003, 0x3c024000, 0x0a000648, 0x2402ffff,
4603 0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021,
4604 0x03e00008, 0x00000000, 0x8f624450, 0x3c030800, 0x8c631b58, 0x0a000651,
4605 0x3042ffff, 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000,
4606 0x03e00008, 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040800, 0x24841af0,
4607 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014,
4608 0x0a000660, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000,
4609 0x00000000, 0x00000000, 0x3c020800, 0x34423000, 0x3c030800, 0x34633000,
4610 0x3c040800, 0x348437ff, 0x3c010800, 0xac221b74, 0x24020040, 0x3c010800,
4611 0xac221b78, 0x3c010800, 0xac201b70, 0xac600000, 0x24630004, 0x0083102b,
4612 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000, 0x00804821, 0x8faa0010,
4613 0x3c020800, 0x8c421b70, 0x3c040800, 0x8c841b78, 0x8fab0014, 0x24430001,
4614 0x0044102b, 0x3c010800, 0xac231b70, 0x14400003, 0x00004021, 0x3c010800,
4615 0xac201b70, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74, 0x91240000,
4616 0x00021140, 0x00431021, 0x00481021, 0x25080001, 0xa0440000, 0x29020008,
4617 0x1440fff4, 0x25290001, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74,
4618 0x8f64680c, 0x00021140, 0x00431021, 0xac440008, 0xac45000c, 0xac460010,
4619 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c, 0x00000000, 0x00000000,
4620};
4621
4622static u32 tg3TsoFwRodata[] = {
4623 0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
4624 0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x496e0000, 0x73746b6f,
4625 0x66662a2a, 0x00000000, 0x53774576, 0x656e7430, 0x00000000, 0x00000000,
4626 0x00000000, 0x00000000, 0x66617461, 0x6c457272, 0x00000000, 0x00000000,
4627 0x00000000,
4628};
4629
4630static u32 tg3TsoFwData[] = {
4631 0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x362e3000, 0x00000000,
4632 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
4633 0x00000000,
4634};
4635
4636/* 5705 needs a special version of the TSO firmware. */
4637#define TG3_TSO5_FW_RELEASE_MAJOR 0x1
4638#define TG3_TSO5_FW_RELASE_MINOR 0x2
4639#define TG3_TSO5_FW_RELEASE_FIX 0x0
4640#define TG3_TSO5_FW_START_ADDR 0x00010000
4641#define TG3_TSO5_FW_TEXT_ADDR 0x00010000
4642#define TG3_TSO5_FW_TEXT_LEN 0xe90
4643#define TG3_TSO5_FW_RODATA_ADDR 0x00010e90
4644#define TG3_TSO5_FW_RODATA_LEN 0x50
4645#define TG3_TSO5_FW_DATA_ADDR 0x00010f00
4646#define TG3_TSO5_FW_DATA_LEN 0x20
4647#define TG3_TSO5_FW_SBSS_ADDR 0x00010f20
4648#define TG3_TSO5_FW_SBSS_LEN 0x28
4649#define TG3_TSO5_FW_BSS_ADDR 0x00010f50
4650#define TG3_TSO5_FW_BSS_LEN 0x88
4651
4652static u32 tg3Tso5FwText[(TG3_TSO5_FW_TEXT_LEN / 4) + 1] = {
4653 0x0c004003, 0x00000000, 0x00010f04, 0x00000000, 0x10000003, 0x00000000,
4654 0x0000000d, 0x0000000d, 0x3c1d0001, 0x37bde000, 0x03a0f021, 0x3c100001,
4655 0x26100000, 0x0c004010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
4656 0xafbf0018, 0x0c0042e8, 0x34840002, 0x0c004364, 0x00000000, 0x3c030001,
4657 0x90630f34, 0x24020002, 0x3c040001, 0x24840e9c, 0x14620003, 0x24050001,
4658 0x3c040001, 0x24840e90, 0x24060002, 0x00003821, 0xafa00010, 0x0c004378,
4659 0xafa00014, 0x0c00402c, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020,
4660 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c, 0xafb20018, 0xafb10014,
4661 0x0c0042d4, 0xafb00010, 0x3c128000, 0x24110001, 0x8f706810, 0x32020400,
4662 0x10400007, 0x00000000, 0x8f641008, 0x00921024, 0x14400003, 0x00000000,
4663 0x0c004064, 0x00000000, 0x3c020001, 0x90420f56, 0x10510003, 0x32020200,
4664 0x1040fff1, 0x00000000, 0x0c0041b4, 0x00000000, 0x08004034, 0x00000000,
4665 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020,
4666 0x27bdffe0, 0x3c040001, 0x24840eb0, 0x00002821, 0x00003021, 0x00003821,
4667 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130,
4668 0xaf625000, 0x3c010001, 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018,
4669 0x03e00008, 0x27bd0020, 0x00000000, 0x00000000, 0x3c030001, 0x24630f60,
4670 0x90620000, 0x27bdfff0, 0x14400003, 0x0080c021, 0x08004073, 0x00004821,
4671 0x3c022000, 0x03021024, 0x10400003, 0x24090002, 0x08004073, 0xa0600000,
4672 0x24090001, 0x00181040, 0x30431f80, 0x346f8008, 0x1520004b, 0x25eb0028,
4673 0x3c040001, 0x00832021, 0x8c848010, 0x3c050001, 0x24a50f7a, 0x00041402,
4674 0xa0a20000, 0x3c010001, 0xa0240f7b, 0x3c020001, 0x00431021, 0x94428014,
4675 0x3c010001, 0xa0220f7c, 0x3c0c0001, 0x01836021, 0x8d8c8018, 0x304200ff,
4676 0x24420008, 0x000220c3, 0x24020001, 0x3c010001, 0xa0220f60, 0x0124102b,
4677 0x1040000c, 0x00003821, 0x24a6000e, 0x01602821, 0x8ca20000, 0x8ca30004,
4678 0x24a50008, 0x24e70001, 0xacc20000, 0xacc30004, 0x00e4102b, 0x1440fff8,
4679 0x24c60008, 0x00003821, 0x3c080001, 0x25080f7b, 0x91060000, 0x3c020001,
4680 0x90420f7c, 0x2503000d, 0x00c32821, 0x00461023, 0x00021fc2, 0x00431021,
4681 0x00021043, 0x1840000c, 0x00002021, 0x91020001, 0x00461023, 0x00021fc2,
4682 0x00431021, 0x00021843, 0x94a20000, 0x24e70001, 0x00822021, 0x00e3102a,
4683 0x1440fffb, 0x24a50002, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
4684 0x00822021, 0x3c02ffff, 0x01821024, 0x3083ffff, 0x00431025, 0x3c010001,
4685 0x080040fa, 0xac220f80, 0x3c050001, 0x24a50f7c, 0x90a20000, 0x3c0c0001,
4686 0x01836021, 0x8d8c8018, 0x000220c2, 0x1080000e, 0x00003821, 0x01603021,
4687 0x24a5000c, 0x8ca20000, 0x8ca30004, 0x24a50008, 0x24e70001, 0xacc20000,
4688 0xacc30004, 0x00e4102b, 0x1440fff8, 0x24c60008, 0x3c050001, 0x24a50f7c,
4689 0x90a20000, 0x30430007, 0x24020004, 0x10620011, 0x28620005, 0x10400005,
4690 0x24020002, 0x10620008, 0x000710c0, 0x080040fa, 0x00000000, 0x24020006,
4691 0x1062000e, 0x000710c0, 0x080040fa, 0x00000000, 0x00a21821, 0x9463000c,
4692 0x004b1021, 0x080040fa, 0xa4430000, 0x000710c0, 0x00a21821, 0x8c63000c,
4693 0x004b1021, 0x080040fa, 0xac430000, 0x00a21821, 0x8c63000c, 0x004b2021,
4694 0x00a21021, 0xac830000, 0x94420010, 0xa4820004, 0x95e70006, 0x3c020001,
4695 0x90420f7c, 0x3c030001, 0x90630f7a, 0x00e2c823, 0x3c020001, 0x90420f7b,
4696 0x24630028, 0x01e34021, 0x24420028, 0x15200012, 0x01e23021, 0x94c2000c,
4697 0x3c010001, 0xa4220f78, 0x94c20004, 0x94c30006, 0x3c010001, 0xa4200f76,
4698 0x3c010001, 0xa4200f72, 0x00021400, 0x00431025, 0x3c010001, 0xac220f6c,
4699 0x95020004, 0x3c010001, 0x08004124, 0xa4220f70, 0x3c020001, 0x94420f70,
4700 0x3c030001, 0x94630f72, 0x00431021, 0xa5020004, 0x3c020001, 0x94420f6c,
4701 0xa4c20004, 0x3c020001, 0x8c420f6c, 0xa4c20006, 0x3c040001, 0x94840f72,
4702 0x3c020001, 0x94420f70, 0x3c0a0001, 0x954a0f76, 0x00441821, 0x3063ffff,
4703 0x0062182a, 0x24020002, 0x1122000b, 0x00832023, 0x3c030001, 0x94630f78,
4704 0x30620009, 0x10400006, 0x3062fff6, 0xa4c2000c, 0x3c020001, 0x94420f78,
4705 0x30420009, 0x01425023, 0x24020001, 0x1122001b, 0x29220002, 0x50400005,
4706 0x24020002, 0x11200007, 0x31a2ffff, 0x08004197, 0x00000000, 0x1122001d,
4707 0x24020016, 0x08004197, 0x31a2ffff, 0x3c0e0001, 0x95ce0f80, 0x10800005,
4708 0x01806821, 0x01c42021, 0x00041c02, 0x3082ffff, 0x00627021, 0x000e1027,
4709 0xa502000a, 0x3c030001, 0x90630f7b, 0x31a2ffff, 0x00e21021, 0x0800418d,
4710 0x00432023, 0x3c020001, 0x94420f80, 0x00442021, 0x00041c02, 0x3082ffff,
4711 0x00622021, 0x00807021, 0x00041027, 0x08004185, 0xa502000a, 0x3c050001,
4712 0x24a50f7a, 0x90a30000, 0x14620002, 0x24e2fff2, 0xa5e20034, 0x90a20000,
4713 0x00e21023, 0xa5020002, 0x3c030001, 0x94630f80, 0x3c020001, 0x94420f5a,
4714 0x30e5ffff, 0x00641821, 0x00451023, 0x00622023, 0x00041c02, 0x3082ffff,
4715 0x00622021, 0x00041027, 0xa502000a, 0x3c030001, 0x90630f7c, 0x24620001,
4716 0x14a20005, 0x00807021, 0x01631021, 0x90420000, 0x08004185, 0x00026200,
4717 0x24620002, 0x14a20003, 0x306200fe, 0x004b1021, 0x944c0000, 0x3c020001,
4718 0x94420f82, 0x3183ffff, 0x3c040001, 0x90840f7b, 0x00431021, 0x00e21021,
4719 0x00442023, 0x008a2021, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
4720 0x00822021, 0x00806821, 0x00041027, 0xa4c20010, 0x31a2ffff, 0x000e1c00,
4721 0x00431025, 0x3c040001, 0x24840f72, 0xade20010, 0x94820000, 0x3c050001,
4722 0x94a50f76, 0x3c030001, 0x8c630f6c, 0x24420001, 0x00b92821, 0xa4820000,
4723 0x3322ffff, 0x00622021, 0x0083182b, 0x3c010001, 0xa4250f76, 0x10600003,
4724 0x24a2ffff, 0x3c010001, 0xa4220f76, 0x3c024000, 0x03021025, 0x3c010001,
4725 0xac240f6c, 0xaf621008, 0x03e00008, 0x27bd0010, 0x3c030001, 0x90630f56,
4726 0x27bdffe8, 0x24020001, 0xafbf0014, 0x10620026, 0xafb00010, 0x8f620cf4,
4727 0x2442ffff, 0x3042007f, 0x00021100, 0x8c434000, 0x3c010001, 0xac230f64,
4728 0x8c434008, 0x24444000, 0x8c5c4004, 0x30620040, 0x14400002, 0x24020088,
4729 0x24020008, 0x3c010001, 0xa4220f68, 0x30620004, 0x10400005, 0x24020001,
4730 0x3c010001, 0xa0220f57, 0x080041d5, 0x00031402, 0x3c010001, 0xa0200f57,
4731 0x00031402, 0x3c010001, 0xa4220f54, 0x9483000c, 0x24020001, 0x3c010001,
4732 0xa4200f50, 0x3c010001, 0xa0220f56, 0x3c010001, 0xa4230f62, 0x24020001,
4733 0x1342001e, 0x00000000, 0x13400005, 0x24020003, 0x13420067, 0x00000000,
4734 0x080042cf, 0x00000000, 0x3c020001, 0x94420f62, 0x241a0001, 0x3c010001,
4735 0xa4200f5e, 0x3c010001, 0xa4200f52, 0x304407ff, 0x00021bc2, 0x00031823,
4736 0x3063003e, 0x34630036, 0x00021242, 0x3042003c, 0x00621821, 0x3c010001,
4737 0xa4240f58, 0x00832021, 0x24630030, 0x3c010001, 0xa4240f5a, 0x3c010001,
4738 0xa4230f5c, 0x3c060001, 0x24c60f52, 0x94c50000, 0x94c30002, 0x3c040001,
4739 0x94840f5a, 0x00651021, 0x0044102a, 0x10400013, 0x3c108000, 0x00a31021,
4740 0xa4c20000, 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008,
4741 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4,
4742 0x00501024, 0x104000b7, 0x00000000, 0x0800420f, 0x00000000, 0x3c030001,
4743 0x94630f50, 0x00851023, 0xa4c40000, 0x00621821, 0x3042ffff, 0x3c010001,
4744 0xa4230f50, 0xaf620ce8, 0x3c020001, 0x94420f68, 0x34420024, 0xaf620cec,
4745 0x94c30002, 0x3c020001, 0x94420f50, 0x14620012, 0x3c028000, 0x3c108000,
4746 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008, 0x00901024,
4747 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024,
4748 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003, 0xaf620cf4, 0x3c108000,
4749 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000,
4750 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003,
4751 0x3c070001, 0x24e70f50, 0x94e20000, 0x03821021, 0xaf620ce0, 0x3c020001,
4752 0x8c420f64, 0xaf620ce4, 0x3c050001, 0x94a50f54, 0x94e30000, 0x3c040001,
4753 0x94840f58, 0x3c020001, 0x94420f5e, 0x00a32823, 0x00822023, 0x30a6ffff,
4754 0x3083ffff, 0x00c3102b, 0x14400043, 0x00000000, 0x3c020001, 0x94420f5c,
4755 0x00021400, 0x00621025, 0xaf620ce8, 0x94e20000, 0x3c030001, 0x94630f54,
4756 0x00441021, 0xa4e20000, 0x3042ffff, 0x14430021, 0x3c020008, 0x3c020001,
4757 0x90420f57, 0x10400006, 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624,
4758 0x0800427c, 0x0000d021, 0x3c020001, 0x94420f68, 0x3c030008, 0x34630624,
4759 0x00431025, 0xaf620cec, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
4760 0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
4761 0x00000000, 0x8f620cf4, 0x00501024, 0x10400015, 0x00000000, 0x08004283,
4762 0x00000000, 0x3c030001, 0x94630f68, 0x34420624, 0x3c108000, 0x00621825,
4763 0x3c028000, 0xaf630cec, 0xaf620cf4, 0x8f641008, 0x00901024, 0x14400003,
4764 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7,
4765 0x00000000, 0x3c010001, 0x080042cf, 0xa4200f5e, 0x3c020001, 0x94420f5c,
4766 0x00021400, 0x00c21025, 0xaf620ce8, 0x3c020001, 0x90420f57, 0x10400009,
4767 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624, 0x0000d021, 0x00431025,
4768 0xaf620cec, 0x080042c1, 0x3c108000, 0x3c020001, 0x94420f68, 0x3c030008,
4769 0x34630604, 0x00431025, 0xaf620cec, 0x3c020001, 0x94420f5e, 0x00451021,
4770 0x3c010001, 0xa4220f5e, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
4771 0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
4772 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x8fbf0014,
4773 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000, 0x27bdffe0, 0x3c040001,
4774 0x24840ec0, 0x00002821, 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010,
4775 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130, 0xaf625000, 0x3c010001,
4776 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018, 0x03e00008, 0x27bd0020,
4777 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010, 0xaf60680c, 0x8f626804,
4778 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50, 0x3c010001, 0xac220f20,
4779 0x24020b78, 0x3c010001, 0xac220f30, 0x34630002, 0xaf634000, 0x0c004315,
4780 0x00808021, 0x3c010001, 0xa0220f34, 0x304200ff, 0x24030002, 0x14430005,
4781 0x00000000, 0x3c020001, 0x8c420f20, 0x08004308, 0xac5000c0, 0x3c020001,
4782 0x8c420f20, 0xac5000bc, 0x8f624434, 0x8f634438, 0x8f644410, 0x3c010001,
4783 0xac220f28, 0x3c010001, 0xac230f38, 0x3c010001, 0xac240f24, 0x8fbf0014,
4784 0x8fb00010, 0x03e00008, 0x27bd0018, 0x03e00008, 0x24020001, 0x27bdfff8,
4785 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe, 0x00000000,
4786 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008, 0x27bd0008,
4787 0x8f634450, 0x3c020001, 0x8c420f28, 0x00031c02, 0x0043102b, 0x14400008,
4788 0x3c038000, 0x3c040001, 0x8c840f38, 0x8f624450, 0x00021c02, 0x0083102b,
4789 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024, 0x1440fffd,
4790 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff, 0x2442e000,
4791 0x2c422001, 0x14400003, 0x3c024000, 0x08004347, 0x2402ffff, 0x00822025,
4792 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021, 0x03e00008,
4793 0x00000000, 0x8f624450, 0x3c030001, 0x8c630f24, 0x08004350, 0x3042ffff,
4794 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000, 0x03e00008,
4795 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040001, 0x24840ed0, 0x00003021,
4796 0x00003821, 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0800435f,
4797 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x3c020001, 0x3442d600,
4798 0x3c030001, 0x3463d600, 0x3c040001, 0x3484ddff, 0x3c010001, 0xac220f40,
4799 0x24020040, 0x3c010001, 0xac220f44, 0x3c010001, 0xac200f3c, 0xac600000,
4800 0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
4801 0x00804821, 0x8faa0010, 0x3c020001, 0x8c420f3c, 0x3c040001, 0x8c840f44,
4802 0x8fab0014, 0x24430001, 0x0044102b, 0x3c010001, 0xac230f3c, 0x14400003,
4803 0x00004021, 0x3c010001, 0xac200f3c, 0x3c020001, 0x8c420f3c, 0x3c030001,
4804 0x8c630f40, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
4805 0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020001, 0x8c420f3c,
4806 0x3c030001, 0x8c630f40, 0x8f64680c, 0x00021140, 0x00431021, 0xac440008,
4807 0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
4808 0x00000000, 0x00000000, 0x00000000,
4809};
4810
4811static u32 tg3Tso5FwRodata[(TG3_TSO5_FW_RODATA_LEN / 4) + 1] = {
4812 0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
4813 0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000,
4814 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
4815 0x00000000, 0x00000000, 0x00000000,
4816};
4817
4818static u32 tg3Tso5FwData[(TG3_TSO5_FW_DATA_LEN / 4) + 1] = {
4819 0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x322e3000, 0x00000000,
4820 0x00000000, 0x00000000, 0x00000000,
4821};
4822
4823/* tp->lock is held. */
4824static int tg3_load_tso_firmware(struct tg3 *tp)
4825{
4826 struct fw_info info;
4827 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
4828 int err, i;
4829
4830 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
4831 return 0;
4832
4833 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
4834 info.text_base = TG3_TSO5_FW_TEXT_ADDR;
4835 info.text_len = TG3_TSO5_FW_TEXT_LEN;
4836 info.text_data = &tg3Tso5FwText[0];
4837 info.rodata_base = TG3_TSO5_FW_RODATA_ADDR;
4838 info.rodata_len = TG3_TSO5_FW_RODATA_LEN;
4839 info.rodata_data = &tg3Tso5FwRodata[0];
4840 info.data_base = TG3_TSO5_FW_DATA_ADDR;
4841 info.data_len = TG3_TSO5_FW_DATA_LEN;
4842 info.data_data = &tg3Tso5FwData[0];
4843 cpu_base = RX_CPU_BASE;
4844 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
4845 cpu_scratch_size = (info.text_len +
4846 info.rodata_len +
4847 info.data_len +
4848 TG3_TSO5_FW_SBSS_LEN +
4849 TG3_TSO5_FW_BSS_LEN);
4850 } else {
4851 info.text_base = TG3_TSO_FW_TEXT_ADDR;
4852 info.text_len = TG3_TSO_FW_TEXT_LEN;
4853 info.text_data = &tg3TsoFwText[0];
4854 info.rodata_base = TG3_TSO_FW_RODATA_ADDR;
4855 info.rodata_len = TG3_TSO_FW_RODATA_LEN;
4856 info.rodata_data = &tg3TsoFwRodata[0];
4857 info.data_base = TG3_TSO_FW_DATA_ADDR;
4858 info.data_len = TG3_TSO_FW_DATA_LEN;
4859 info.data_data = &tg3TsoFwData[0];
4860 cpu_base = TX_CPU_BASE;
4861 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
4862 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
4863 }
4864
4865 err = tg3_load_firmware_cpu(tp, cpu_base,
4866 cpu_scratch_base, cpu_scratch_size,
4867 &info);
4868 if (err)
4869 return err;
4870
4871 /* Now startup the cpu. */
4872 tw32(cpu_base + CPU_STATE, 0xffffffff);
4873 tw32_f(cpu_base + CPU_PC, info.text_base);
4874
4875 for (i = 0; i < 5; i++) {
4876 if (tr32(cpu_base + CPU_PC) == info.text_base)
4877 break;
4878 tw32(cpu_base + CPU_STATE, 0xffffffff);
4879 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
4880 tw32_f(cpu_base + CPU_PC, info.text_base);
4881 udelay(1000);
4882 }
4883 if (i >= 5) {
4884 printk(KERN_ERR PFX "tg3_load_tso_firmware fails for %s "
4885 "to set CPU PC, is %08x should be %08x\n",
4886 tp->dev->name, tr32(cpu_base + CPU_PC),
4887 info.text_base);
4888 return -ENODEV;
4889 }
4890 tw32(cpu_base + CPU_STATE, 0xffffffff);
4891 tw32_f(cpu_base + CPU_MODE, 0x00000000);
4892 return 0;
4893}
4894
4895#endif /* TG3_TSO_SUPPORT != 0 */
4896
4897/* tp->lock is held. */
4898static void __tg3_set_mac_addr(struct tg3 *tp)
4899{
4900 u32 addr_high, addr_low;
4901 int i;
4902
4903 addr_high = ((tp->dev->dev_addr[0] << 8) |
4904 tp->dev->dev_addr[1]);
4905 addr_low = ((tp->dev->dev_addr[2] << 24) |
4906 (tp->dev->dev_addr[3] << 16) |
4907 (tp->dev->dev_addr[4] << 8) |
4908 (tp->dev->dev_addr[5] << 0));
4909 for (i = 0; i < 4; i++) {
4910 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
4911 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
4912 }
4913
4914 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
4915 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
4916 for (i = 0; i < 12; i++) {
4917 tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
4918 tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
4919 }
4920 }
4921
4922 addr_high = (tp->dev->dev_addr[0] +
4923 tp->dev->dev_addr[1] +
4924 tp->dev->dev_addr[2] +
4925 tp->dev->dev_addr[3] +
4926 tp->dev->dev_addr[4] +
4927 tp->dev->dev_addr[5]) &
4928 TX_BACKOFF_SEED_MASK;
4929 tw32(MAC_TX_BACKOFF_SEED, addr_high);
4930}
4931
4932static int tg3_set_mac_addr(struct net_device *dev, void *p)
4933{
4934 struct tg3 *tp = netdev_priv(dev);
4935 struct sockaddr *addr = p;
4936
4937 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
4938
4939 spin_lock_irq(&tp->lock);
4940 __tg3_set_mac_addr(tp);
4941 spin_unlock_irq(&tp->lock);
4942
4943 return 0;
4944}
4945
4946/* tp->lock is held. */
4947static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
4948 dma_addr_t mapping, u32 maxlen_flags,
4949 u32 nic_addr)
4950{
4951 tg3_write_mem(tp,
4952 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
4953 ((u64) mapping >> 32));
4954 tg3_write_mem(tp,
4955 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
4956 ((u64) mapping & 0xffffffff));
4957 tg3_write_mem(tp,
4958 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
4959 maxlen_flags);
4960
4961 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
4962 tg3_write_mem(tp,
4963 (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
4964 nic_addr);
4965}
4966
4967static void __tg3_set_rx_mode(struct net_device *);
4968
4969/* tp->lock is held. */
4970static int tg3_reset_hw(struct tg3 *tp)
4971{
4972 u32 val, rdmac_mode;
4973 int i, err, limit;
4974
4975 tg3_disable_ints(tp);
4976
4977 tg3_stop_fw(tp);
4978
4979 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
4980
4981 if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) {
4982 err = tg3_abort_hw(tp);
4983 if (err)
4984 return err;
4985 }
4986
4987 err = tg3_chip_reset(tp);
4988 if (err)
4989 return err;
4990
4991 tg3_write_sig_legacy(tp, RESET_KIND_INIT);
4992
4993 /* This works around an issue with Athlon chipsets on
4994 * B3 tigon3 silicon. This bit has no effect on any
4995 * other revision. But do not set this on PCI Express
4996 * chips.
4997 */
4998 if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
4999 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
5000 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
5001
5002 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
5003 (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
5004 val = tr32(TG3PCI_PCISTATE);
5005 val |= PCISTATE_RETRY_SAME_DMA;
5006 tw32(TG3PCI_PCISTATE, val);
5007 }
5008
5009 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
5010 /* Enable some hw fixes. */
5011 val = tr32(TG3PCI_MSI_DATA);
5012 val |= (1 << 26) | (1 << 28) | (1 << 29);
5013 tw32(TG3PCI_MSI_DATA, val);
5014 }
5015
5016 /* Descriptor ring init may make accesses to the
5017 * NIC SRAM area to setup the TX descriptors, so we
5018 * can only do this after the hardware has been
5019 * successfully reset.
5020 */
5021 tg3_init_rings(tp);
5022
5023 /* This value is determined during the probe time DMA
5024 * engine test, tg3_test_dma.
5025 */
5026 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
5027
5028 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
5029 GRC_MODE_4X_NIC_SEND_RINGS |
5030 GRC_MODE_NO_TX_PHDR_CSUM |
5031 GRC_MODE_NO_RX_PHDR_CSUM);
5032 tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
5033 if (tp->tg3_flags & TG3_FLAG_NO_TX_PSEUDO_CSUM)
5034 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
5035 if (tp->tg3_flags & TG3_FLAG_NO_RX_PSEUDO_CSUM)
5036 tp->grc_mode |= GRC_MODE_NO_RX_PHDR_CSUM;
5037
5038 tw32(GRC_MODE,
5039 tp->grc_mode |
5040 (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
5041
5042 /* Setup the timer prescalar register. Clock is always 66Mhz. */
5043 val = tr32(GRC_MISC_CFG);
5044 val &= ~0xff;
5045 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
5046 tw32(GRC_MISC_CFG, val);
5047
5048 /* Initialize MBUF/DESC pool. */
2052da94
JL
5049 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
5050 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752) {
1da177e4
LT
5051 /* Do nothing. */
5052 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
5053 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
5054 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
5055 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
5056 else
5057 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
5058 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
5059 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
5060 }
5061#if TG3_TSO_SUPPORT != 0
5062 else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
5063 int fw_len;
5064
5065 fw_len = (TG3_TSO5_FW_TEXT_LEN +
5066 TG3_TSO5_FW_RODATA_LEN +
5067 TG3_TSO5_FW_DATA_LEN +
5068 TG3_TSO5_FW_SBSS_LEN +
5069 TG3_TSO5_FW_BSS_LEN);
5070 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
5071 tw32(BUFMGR_MB_POOL_ADDR,
5072 NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
5073 tw32(BUFMGR_MB_POOL_SIZE,
5074 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
5075 }
5076#endif
5077
5078 if (!(tp->tg3_flags & TG3_FLAG_JUMBO_ENABLE)) {
5079 tw32(BUFMGR_MB_RDMA_LOW_WATER,
5080 tp->bufmgr_config.mbuf_read_dma_low_water);
5081 tw32(BUFMGR_MB_MACRX_LOW_WATER,
5082 tp->bufmgr_config.mbuf_mac_rx_low_water);
5083 tw32(BUFMGR_MB_HIGH_WATER,
5084 tp->bufmgr_config.mbuf_high_water);
5085 } else {
5086 tw32(BUFMGR_MB_RDMA_LOW_WATER,
5087 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
5088 tw32(BUFMGR_MB_MACRX_LOW_WATER,
5089 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
5090 tw32(BUFMGR_MB_HIGH_WATER,
5091 tp->bufmgr_config.mbuf_high_water_jumbo);
5092 }
5093 tw32(BUFMGR_DMA_LOW_WATER,
5094 tp->bufmgr_config.dma_low_water);
5095 tw32(BUFMGR_DMA_HIGH_WATER,
5096 tp->bufmgr_config.dma_high_water);
5097
5098 tw32(BUFMGR_MODE, BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE);
5099 for (i = 0; i < 2000; i++) {
5100 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
5101 break;
5102 udelay(10);
5103 }
5104 if (i >= 2000) {
5105 printk(KERN_ERR PFX "tg3_reset_hw cannot enable BUFMGR for %s.\n",
5106 tp->dev->name);
5107 return -ENODEV;
5108 }
5109
5110 /* Setup replenish threshold. */
5111 tw32(RCVBDI_STD_THRESH, tp->rx_pending / 8);
5112
5113 /* Initialize TG3_BDINFO's at:
5114 * RCVDBDI_STD_BD: standard eth size rx ring
5115 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
5116 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
5117 *
5118 * like so:
5119 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
5120 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
5121 * ring attribute flags
5122 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
5123 *
5124 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
5125 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
5126 *
5127 * The size of each ring is fixed in the firmware, but the location is
5128 * configurable.
5129 */
5130 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
5131 ((u64) tp->rx_std_mapping >> 32));
5132 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
5133 ((u64) tp->rx_std_mapping & 0xffffffff));
5134 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
5135 NIC_SRAM_RX_BUFFER_DESC);
5136
5137 /* Don't even try to program the JUMBO/MINI buffer descriptor
5138 * configs on 5705.
5139 */
5140 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
5141 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
5142 RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT);
5143 } else {
5144 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
5145 RX_STD_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
5146
5147 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
5148 BDINFO_FLAGS_DISABLED);
5149
5150 /* Setup replenish threshold. */
5151 tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8);
5152
5153 if (tp->tg3_flags & TG3_FLAG_JUMBO_ENABLE) {
5154 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
5155 ((u64) tp->rx_jumbo_mapping >> 32));
5156 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
5157 ((u64) tp->rx_jumbo_mapping & 0xffffffff));
5158 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
5159 RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
5160 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
5161 NIC_SRAM_RX_JUMBO_BUFFER_DESC);
5162 } else {
5163 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
5164 BDINFO_FLAGS_DISABLED);
5165 }
5166
5167 }
5168
5169 /* There is only one send ring on 5705/5750, no need to explicitly
5170 * disable the others.
5171 */
5172 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5173 /* Clear out send RCB ring in SRAM. */
5174 for (i = NIC_SRAM_SEND_RCB; i < NIC_SRAM_RCV_RET_RCB; i += TG3_BDINFO_SIZE)
5175 tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
5176 BDINFO_FLAGS_DISABLED);
5177 }
5178
5179 tp->tx_prod = 0;
5180 tp->tx_cons = 0;
5181 tw32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
5182 tw32_tx_mbox(MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
5183
5184 tg3_set_bdinfo(tp, NIC_SRAM_SEND_RCB,
5185 tp->tx_desc_mapping,
5186 (TG3_TX_RING_SIZE <<
5187 BDINFO_FLAGS_MAXLEN_SHIFT),
5188 NIC_SRAM_TX_BUFFER_DESC);
5189
5190 /* There is only one receive return ring on 5705/5750, no need
5191 * to explicitly disable the others.
5192 */
5193 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5194 for (i = NIC_SRAM_RCV_RET_RCB; i < NIC_SRAM_STATS_BLK;
5195 i += TG3_BDINFO_SIZE) {
5196 tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
5197 BDINFO_FLAGS_DISABLED);
5198 }
5199 }
5200
5201 tp->rx_rcb_ptr = 0;
5202 tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, 0);
5203
5204 tg3_set_bdinfo(tp, NIC_SRAM_RCV_RET_RCB,
5205 tp->rx_rcb_mapping,
5206 (TG3_RX_RCB_RING_SIZE(tp) <<
5207 BDINFO_FLAGS_MAXLEN_SHIFT),
5208 0);
5209
5210 tp->rx_std_ptr = tp->rx_pending;
5211 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
5212 tp->rx_std_ptr);
5213
5214 tp->rx_jumbo_ptr = (tp->tg3_flags & TG3_FLAG_JUMBO_ENABLE) ?
5215 tp->rx_jumbo_pending : 0;
5216 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
5217 tp->rx_jumbo_ptr);
5218
5219 /* Initialize MAC address and backoff seed. */
5220 __tg3_set_mac_addr(tp);
5221
5222 /* MTU + ethernet header + FCS + optional VLAN tag */
5223 tw32(MAC_RX_MTU_SIZE, tp->dev->mtu + ETH_HLEN + 8);
5224
5225 /* The slot time is changed by tg3_setup_phy if we
5226 * run at gigabit with half duplex.
5227 */
5228 tw32(MAC_TX_LENGTHS,
5229 (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
5230 (6 << TX_LENGTHS_IPG_SHIFT) |
5231 (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
5232
5233 /* Receive rules. */
5234 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
5235 tw32(RCVLPC_CONFIG, 0x0181);
5236
5237 /* Calculate RDMAC_MODE setting early, we need it to determine
5238 * the RCVLPC_STATE_ENABLE mask.
5239 */
5240 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
5241 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
5242 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
5243 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
5244 RDMAC_MODE_LNGREAD_ENAB);
5245 if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE)
5246 rdmac_mode |= RDMAC_MODE_SPLIT_ENABLE;
5247 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
5248 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
2052da94
JL
5249 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
5250 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)) {
1da177e4
LT
5251 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE &&
5252 (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
5253 tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
5254 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
5255 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
5256 !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
5257 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
5258 }
5259 }
5260
5261#if TG3_TSO_SUPPORT != 0
5262 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
5263 rdmac_mode |= (1 << 27);
5264#endif
5265
5266 /* Receive/send statistics. */
5267 if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
5268 (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
5269 val = tr32(RCVLPC_STATS_ENABLE);
5270 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
5271 tw32(RCVLPC_STATS_ENABLE, val);
5272 } else {
5273 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
5274 }
5275 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
5276 tw32(SNDDATAI_STATSENAB, 0xffffff);
5277 tw32(SNDDATAI_STATSCTRL,
5278 (SNDDATAI_SCTRL_ENABLE |
5279 SNDDATAI_SCTRL_FASTUPD));
5280
5281 /* Setup host coalescing engine. */
5282 tw32(HOSTCC_MODE, 0);
5283 for (i = 0; i < 2000; i++) {
5284 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
5285 break;
5286 udelay(10);
5287 }
5288
5289 tw32(HOSTCC_RXCOL_TICKS, 0);
5290 tw32(HOSTCC_TXCOL_TICKS, LOW_TXCOL_TICKS);
5291 tw32(HOSTCC_RXMAX_FRAMES, 1);
5292 tw32(HOSTCC_TXMAX_FRAMES, LOW_RXMAX_FRAMES);
5293 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5294 tw32(HOSTCC_RXCOAL_TICK_INT, 0);
5295 tw32(HOSTCC_TXCOAL_TICK_INT, 0);
5296 }
5297 tw32(HOSTCC_RXCOAL_MAXF_INT, 1);
5298 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
5299
5300 /* set status block DMA address */
5301 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
5302 ((u64) tp->status_mapping >> 32));
5303 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
5304 ((u64) tp->status_mapping & 0xffffffff));
5305
5306 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5307 /* Status/statistics block address. See tg3_timer,
5308 * the tg3_periodic_fetch_stats call there, and
5309 * tg3_get_stats to see how this works for 5705/5750 chips.
5310 */
5311 tw32(HOSTCC_STAT_COAL_TICKS,
5312 DEFAULT_STAT_COAL_TICKS);
5313 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
5314 ((u64) tp->stats_mapping >> 32));
5315 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
5316 ((u64) tp->stats_mapping & 0xffffffff));
5317 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
5318 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
5319 }
5320
5321 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
5322
5323 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
5324 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
5325 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
5326 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
5327
5328 /* Clear statistics/status block in chip, and status block in ram. */
5329 for (i = NIC_SRAM_STATS_BLK;
5330 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
5331 i += sizeof(u32)) {
5332 tg3_write_mem(tp, i, 0);
5333 udelay(40);
5334 }
5335 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
5336
5337 tp->mac_mode = MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
5338 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
5339 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
5340 udelay(40);
5341
5342 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
5343 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
5344 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
5345 GRC_LCLCTRL_GPIO_OUTPUT1);
5346 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
5347 udelay(100);
5348
5349 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0);
5350 tr32(MAILBOX_INTERRUPT_0);
5351
5352 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5353 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
5354 udelay(40);
5355 }
5356
5357 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
5358 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
5359 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
5360 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
5361 WDMAC_MODE_LNGREAD_ENAB);
5362
5363 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
5364 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
2052da94
JL
5365 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
5366 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)) {
1da177e4
LT
5367 if ((tp->tg3_flags & TG3_FLG2_TSO_CAPABLE) &&
5368 (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
5369 tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
5370 /* nothing */
5371 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
5372 !(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
5373 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
5374 val |= WDMAC_MODE_RX_ACCEL;
5375 }
5376 }
5377
5378 tw32_f(WDMAC_MODE, val);
5379 udelay(40);
5380
5381 if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0) {
5382 val = tr32(TG3PCI_X_CAPS);
5383 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
5384 val &= ~PCIX_CAPS_BURST_MASK;
5385 val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT);
5386 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
5387 val &= ~(PCIX_CAPS_SPLIT_MASK | PCIX_CAPS_BURST_MASK);
5388 val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT);
5389 if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE)
5390 val |= (tp->split_mode_max_reqs <<
5391 PCIX_CAPS_SPLIT_SHIFT);
5392 }
5393 tw32(TG3PCI_X_CAPS, val);
5394 }
5395
5396 tw32_f(RDMAC_MODE, rdmac_mode);
5397 udelay(40);
5398
5399 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
5400 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
5401 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
5402 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
5403 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
5404 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
5405 tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ);
5406 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
5407#if TG3_TSO_SUPPORT != 0
5408 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
5409 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
5410#endif
5411 tw32(SNDBDI_MODE, SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE);
5412 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
5413
5414 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
5415 err = tg3_load_5701_a0_firmware_fix(tp);
5416 if (err)
5417 return err;
5418 }
5419
5420#if TG3_TSO_SUPPORT != 0
5421 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
5422 err = tg3_load_tso_firmware(tp);
5423 if (err)
5424 return err;
5425 }
5426#endif
5427
5428 tp->tx_mode = TX_MODE_ENABLE;
5429 tw32_f(MAC_TX_MODE, tp->tx_mode);
5430 udelay(100);
5431
5432 tp->rx_mode = RX_MODE_ENABLE;
5433 tw32_f(MAC_RX_MODE, tp->rx_mode);
5434 udelay(10);
5435
5436 if (tp->link_config.phy_is_low_power) {
5437 tp->link_config.phy_is_low_power = 0;
5438 tp->link_config.speed = tp->link_config.orig_speed;
5439 tp->link_config.duplex = tp->link_config.orig_duplex;
5440 tp->link_config.autoneg = tp->link_config.orig_autoneg;
5441 }
5442
5443 tp->mi_mode = MAC_MI_MODE_BASE;
5444 tw32_f(MAC_MI_MODE, tp->mi_mode);
5445 udelay(80);
5446
5447 tw32(MAC_LED_CTRL, tp->led_ctrl);
5448
5449 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
5450 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
5451 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
5452 udelay(10);
5453 }
5454 tw32_f(MAC_RX_MODE, tp->rx_mode);
5455 udelay(10);
5456
5457 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
5458 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
5459 !(tp->tg3_flags2 & TG3_FLG2_SERDES_PREEMPHASIS)) {
5460 /* Set drive transmission level to 1.2V */
5461 /* only if the signal pre-emphasis bit is not set */
5462 val = tr32(MAC_SERDES_CFG);
5463 val &= 0xfffff000;
5464 val |= 0x880;
5465 tw32(MAC_SERDES_CFG, val);
5466 }
5467 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
5468 tw32(MAC_SERDES_CFG, 0x616000);
5469 }
5470
5471 /* Prevent chip from dropping frames when flow control
5472 * is enabled.
5473 */
5474 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, 2);
5475
5476 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
5477 (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
5478 /* Use hardware link auto-negotiation */
5479 tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG;
5480 }
5481
5482 err = tg3_setup_phy(tp, 1);
5483 if (err)
5484 return err;
5485
5486 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
5487 u32 tmp;
5488
5489 /* Clear CRC stats. */
5490 if (!tg3_readphy(tp, 0x1e, &tmp)) {
5491 tg3_writephy(tp, 0x1e, tmp | 0x8000);
5492 tg3_readphy(tp, 0x14, &tmp);
5493 }
5494 }
5495
5496 __tg3_set_rx_mode(tp->dev);
5497
5498 /* Initialize receive rules. */
5499 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
5500 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
5501 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
5502 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
5503
5504 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
5505 limit = 8;
5506 else
5507 limit = 16;
5508 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
5509 limit -= 4;
5510 switch (limit) {
5511 case 16:
5512 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
5513 case 15:
5514 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
5515 case 14:
5516 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
5517 case 13:
5518 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
5519 case 12:
5520 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
5521 case 11:
5522 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
5523 case 10:
5524 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
5525 case 9:
5526 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
5527 case 8:
5528 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
5529 case 7:
5530 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
5531 case 6:
5532 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
5533 case 5:
5534 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
5535 case 4:
5536 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
5537 case 3:
5538 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
5539 case 2:
5540 case 1:
5541
5542 default:
5543 break;
5544 };
5545
5546 tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
5547
5548 if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)
5549 tg3_enable_ints(tp);
5550
5551 return 0;
5552}
5553
5554/* Called at device open time to get the chip ready for
5555 * packet processing. Invoked with tp->lock held.
5556 */
5557static int tg3_init_hw(struct tg3 *tp)
5558{
5559 int err;
5560
5561 /* Force the chip into D0. */
5562 err = tg3_set_power_state(tp, 0);
5563 if (err)
5564 goto out;
5565
5566 tg3_switch_clocks(tp);
5567
5568 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
5569
5570 err = tg3_reset_hw(tp);
5571
5572out:
5573 return err;
5574}
5575
5576#define TG3_STAT_ADD32(PSTAT, REG) \
5577do { u32 __val = tr32(REG); \
5578 (PSTAT)->low += __val; \
5579 if ((PSTAT)->low < __val) \
5580 (PSTAT)->high += 1; \
5581} while (0)
5582
5583static void tg3_periodic_fetch_stats(struct tg3 *tp)
5584{
5585 struct tg3_hw_stats *sp = tp->hw_stats;
5586
5587 if (!netif_carrier_ok(tp->dev))
5588 return;
5589
5590 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
5591 TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
5592 TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
5593 TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
5594 TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
5595 TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
5596 TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
5597 TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
5598 TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
5599 TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
5600 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
5601 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
5602 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
5603
5604 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
5605 TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
5606 TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
5607 TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
5608 TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
5609 TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
5610 TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
5611 TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
5612 TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
5613 TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
5614 TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
5615 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
5616 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
5617 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
5618}
5619
5620static void tg3_timer(unsigned long __opaque)
5621{
5622 struct tg3 *tp = (struct tg3 *) __opaque;
5623 unsigned long flags;
5624
5625 spin_lock_irqsave(&tp->lock, flags);
5626 spin_lock(&tp->tx_lock);
5627
5628 /* All of this garbage is because when using non-tagged
5629 * IRQ status the mailbox/status_block protocol the chip
5630 * uses with the cpu is race prone.
5631 */
5632 if (tp->hw_status->status & SD_STATUS_UPDATED) {
5633 tw32(GRC_LOCAL_CTRL,
5634 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
5635 } else {
5636 tw32(HOSTCC_MODE, tp->coalesce_mode |
5637 (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
5638 }
5639
5640 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
5641 tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
5642 spin_unlock(&tp->tx_lock);
5643 spin_unlock_irqrestore(&tp->lock, flags);
5644 schedule_work(&tp->reset_task);
5645 return;
5646 }
5647
5648 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
5649 tg3_periodic_fetch_stats(tp);
5650
5651 /* This part only runs once per second. */
5652 if (!--tp->timer_counter) {
5653 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
5654 u32 mac_stat;
5655 int phy_event;
5656
5657 mac_stat = tr32(MAC_STATUS);
5658
5659 phy_event = 0;
5660 if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) {
5661 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
5662 phy_event = 1;
5663 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
5664 phy_event = 1;
5665
5666 if (phy_event)
5667 tg3_setup_phy(tp, 0);
5668 } else if (tp->tg3_flags & TG3_FLAG_POLL_SERDES) {
5669 u32 mac_stat = tr32(MAC_STATUS);
5670 int need_setup = 0;
5671
5672 if (netif_carrier_ok(tp->dev) &&
5673 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
5674 need_setup = 1;
5675 }
5676 if (! netif_carrier_ok(tp->dev) &&
5677 (mac_stat & (MAC_STATUS_PCS_SYNCED |
5678 MAC_STATUS_SIGNAL_DET))) {
5679 need_setup = 1;
5680 }
5681 if (need_setup) {
5682 tw32_f(MAC_MODE,
5683 (tp->mac_mode &
5684 ~MAC_MODE_PORT_MODE_MASK));
5685 udelay(40);
5686 tw32_f(MAC_MODE, tp->mac_mode);
5687 udelay(40);
5688 tg3_setup_phy(tp, 0);
5689 }
5690 }
5691
5692 tp->timer_counter = tp->timer_multiplier;
5693 }
5694
5695 /* Heartbeat is only sent once every 120 seconds. */
5696 if (!--tp->asf_counter) {
5697 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
5698 u32 val;
5699
5700 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_ALIVE);
5701 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
5702 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 3);
5703 val = tr32(GRC_RX_CPU_EVENT);
5704 val |= (1 << 14);
5705 tw32(GRC_RX_CPU_EVENT, val);
5706 }
5707 tp->asf_counter = tp->asf_multiplier;
5708 }
5709
5710 spin_unlock(&tp->tx_lock);
5711 spin_unlock_irqrestore(&tp->lock, flags);
5712
5713 tp->timer.expires = jiffies + tp->timer_offset;
5714 add_timer(&tp->timer);
5715}
5716
5717static int tg3_open(struct net_device *dev)
5718{
5719 struct tg3 *tp = netdev_priv(dev);
5720 int err;
5721
5722 spin_lock_irq(&tp->lock);
5723 spin_lock(&tp->tx_lock);
5724
5725 tg3_disable_ints(tp);
5726 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
5727
5728 spin_unlock(&tp->tx_lock);
5729 spin_unlock_irq(&tp->lock);
5730
5731 /* The placement of this call is tied
5732 * to the setup and use of Host TX descriptors.
5733 */
5734 err = tg3_alloc_consistent(tp);
5735 if (err)
5736 return err;
5737
5738 err = request_irq(dev->irq, tg3_interrupt,
5739 SA_SHIRQ, dev->name, dev);
5740
5741 if (err) {
5742 tg3_free_consistent(tp);
5743 return err;
5744 }
5745
5746 spin_lock_irq(&tp->lock);
5747 spin_lock(&tp->tx_lock);
5748
5749 err = tg3_init_hw(tp);
5750 if (err) {
5751 tg3_halt(tp);
5752 tg3_free_rings(tp);
5753 } else {
5754 tp->timer_offset = HZ / 10;
5755 tp->timer_counter = tp->timer_multiplier = 10;
5756 tp->asf_counter = tp->asf_multiplier = (10 * 120);
5757
5758 init_timer(&tp->timer);
5759 tp->timer.expires = jiffies + tp->timer_offset;
5760 tp->timer.data = (unsigned long) tp;
5761 tp->timer.function = tg3_timer;
5762 add_timer(&tp->timer);
5763
5764 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
5765 }
5766
5767 spin_unlock(&tp->tx_lock);
5768 spin_unlock_irq(&tp->lock);
5769
5770 if (err) {
5771 free_irq(dev->irq, dev);
5772 tg3_free_consistent(tp);
5773 return err;
5774 }
5775
5776 spin_lock_irq(&tp->lock);
5777 spin_lock(&tp->tx_lock);
5778
5779 tg3_enable_ints(tp);
5780
5781 spin_unlock(&tp->tx_lock);
5782 spin_unlock_irq(&tp->lock);
5783
5784 netif_start_queue(dev);
5785
5786 return 0;
5787}
5788
5789#if 0
5790/*static*/ void tg3_dump_state(struct tg3 *tp)
5791{
5792 u32 val32, val32_2, val32_3, val32_4, val32_5;
5793 u16 val16;
5794 int i;
5795
5796 pci_read_config_word(tp->pdev, PCI_STATUS, &val16);
5797 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &val32);
5798 printk("DEBUG: PCI status [%04x] TG3PCI state[%08x]\n",
5799 val16, val32);
5800
5801 /* MAC block */
5802 printk("DEBUG: MAC_MODE[%08x] MAC_STATUS[%08x]\n",
5803 tr32(MAC_MODE), tr32(MAC_STATUS));
5804 printk(" MAC_EVENT[%08x] MAC_LED_CTRL[%08x]\n",
5805 tr32(MAC_EVENT), tr32(MAC_LED_CTRL));
5806 printk("DEBUG: MAC_TX_MODE[%08x] MAC_TX_STATUS[%08x]\n",
5807 tr32(MAC_TX_MODE), tr32(MAC_TX_STATUS));
5808 printk(" MAC_RX_MODE[%08x] MAC_RX_STATUS[%08x]\n",
5809 tr32(MAC_RX_MODE), tr32(MAC_RX_STATUS));
5810
5811 /* Send data initiator control block */
5812 printk("DEBUG: SNDDATAI_MODE[%08x] SNDDATAI_STATUS[%08x]\n",
5813 tr32(SNDDATAI_MODE), tr32(SNDDATAI_STATUS));
5814 printk(" SNDDATAI_STATSCTRL[%08x]\n",
5815 tr32(SNDDATAI_STATSCTRL));
5816
5817 /* Send data completion control block */
5818 printk("DEBUG: SNDDATAC_MODE[%08x]\n", tr32(SNDDATAC_MODE));
5819
5820 /* Send BD ring selector block */
5821 printk("DEBUG: SNDBDS_MODE[%08x] SNDBDS_STATUS[%08x]\n",
5822 tr32(SNDBDS_MODE), tr32(SNDBDS_STATUS));
5823
5824 /* Send BD initiator control block */
5825 printk("DEBUG: SNDBDI_MODE[%08x] SNDBDI_STATUS[%08x]\n",
5826 tr32(SNDBDI_MODE), tr32(SNDBDI_STATUS));
5827
5828 /* Send BD completion control block */
5829 printk("DEBUG: SNDBDC_MODE[%08x]\n", tr32(SNDBDC_MODE));
5830
5831 /* Receive list placement control block */
5832 printk("DEBUG: RCVLPC_MODE[%08x] RCVLPC_STATUS[%08x]\n",
5833 tr32(RCVLPC_MODE), tr32(RCVLPC_STATUS));
5834 printk(" RCVLPC_STATSCTRL[%08x]\n",
5835 tr32(RCVLPC_STATSCTRL));
5836
5837 /* Receive data and receive BD initiator control block */
5838 printk("DEBUG: RCVDBDI_MODE[%08x] RCVDBDI_STATUS[%08x]\n",
5839 tr32(RCVDBDI_MODE), tr32(RCVDBDI_STATUS));
5840
5841 /* Receive data completion control block */
5842 printk("DEBUG: RCVDCC_MODE[%08x]\n",
5843 tr32(RCVDCC_MODE));
5844
5845 /* Receive BD initiator control block */
5846 printk("DEBUG: RCVBDI_MODE[%08x] RCVBDI_STATUS[%08x]\n",
5847 tr32(RCVBDI_MODE), tr32(RCVBDI_STATUS));
5848
5849 /* Receive BD completion control block */
5850 printk("DEBUG: RCVCC_MODE[%08x] RCVCC_STATUS[%08x]\n",
5851 tr32(RCVCC_MODE), tr32(RCVCC_STATUS));
5852
5853 /* Receive list selector control block */
5854 printk("DEBUG: RCVLSC_MODE[%08x] RCVLSC_STATUS[%08x]\n",
5855 tr32(RCVLSC_MODE), tr32(RCVLSC_STATUS));
5856
5857 /* Mbuf cluster free block */
5858 printk("DEBUG: MBFREE_MODE[%08x] MBFREE_STATUS[%08x]\n",
5859 tr32(MBFREE_MODE), tr32(MBFREE_STATUS));
5860
5861 /* Host coalescing control block */
5862 printk("DEBUG: HOSTCC_MODE[%08x] HOSTCC_STATUS[%08x]\n",
5863 tr32(HOSTCC_MODE), tr32(HOSTCC_STATUS));
5864 printk("DEBUG: HOSTCC_STATS_BLK_HOST_ADDR[%08x%08x]\n",
5865 tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
5866 tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
5867 printk("DEBUG: HOSTCC_STATUS_BLK_HOST_ADDR[%08x%08x]\n",
5868 tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
5869 tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
5870 printk("DEBUG: HOSTCC_STATS_BLK_NIC_ADDR[%08x]\n",
5871 tr32(HOSTCC_STATS_BLK_NIC_ADDR));
5872 printk("DEBUG: HOSTCC_STATUS_BLK_NIC_ADDR[%08x]\n",
5873 tr32(HOSTCC_STATUS_BLK_NIC_ADDR));
5874
5875 /* Memory arbiter control block */
5876 printk("DEBUG: MEMARB_MODE[%08x] MEMARB_STATUS[%08x]\n",
5877 tr32(MEMARB_MODE), tr32(MEMARB_STATUS));
5878
5879 /* Buffer manager control block */
5880 printk("DEBUG: BUFMGR_MODE[%08x] BUFMGR_STATUS[%08x]\n",
5881 tr32(BUFMGR_MODE), tr32(BUFMGR_STATUS));
5882 printk("DEBUG: BUFMGR_MB_POOL_ADDR[%08x] BUFMGR_MB_POOL_SIZE[%08x]\n",
5883 tr32(BUFMGR_MB_POOL_ADDR), tr32(BUFMGR_MB_POOL_SIZE));
5884 printk("DEBUG: BUFMGR_DMA_DESC_POOL_ADDR[%08x] "
5885 "BUFMGR_DMA_DESC_POOL_SIZE[%08x]\n",
5886 tr32(BUFMGR_DMA_DESC_POOL_ADDR),
5887 tr32(BUFMGR_DMA_DESC_POOL_SIZE));
5888
5889 /* Read DMA control block */
5890 printk("DEBUG: RDMAC_MODE[%08x] RDMAC_STATUS[%08x]\n",
5891 tr32(RDMAC_MODE), tr32(RDMAC_STATUS));
5892
5893 /* Write DMA control block */
5894 printk("DEBUG: WDMAC_MODE[%08x] WDMAC_STATUS[%08x]\n",
5895 tr32(WDMAC_MODE), tr32(WDMAC_STATUS));
5896
5897 /* DMA completion block */
5898 printk("DEBUG: DMAC_MODE[%08x]\n",
5899 tr32(DMAC_MODE));
5900
5901 /* GRC block */
5902 printk("DEBUG: GRC_MODE[%08x] GRC_MISC_CFG[%08x]\n",
5903 tr32(GRC_MODE), tr32(GRC_MISC_CFG));
5904 printk("DEBUG: GRC_LOCAL_CTRL[%08x]\n",
5905 tr32(GRC_LOCAL_CTRL));
5906
5907 /* TG3_BDINFOs */
5908 printk("DEBUG: RCVDBDI_JUMBO_BD[%08x%08x:%08x:%08x]\n",
5909 tr32(RCVDBDI_JUMBO_BD + 0x0),
5910 tr32(RCVDBDI_JUMBO_BD + 0x4),
5911 tr32(RCVDBDI_JUMBO_BD + 0x8),
5912 tr32(RCVDBDI_JUMBO_BD + 0xc));
5913 printk("DEBUG: RCVDBDI_STD_BD[%08x%08x:%08x:%08x]\n",
5914 tr32(RCVDBDI_STD_BD + 0x0),
5915 tr32(RCVDBDI_STD_BD + 0x4),
5916 tr32(RCVDBDI_STD_BD + 0x8),
5917 tr32(RCVDBDI_STD_BD + 0xc));
5918 printk("DEBUG: RCVDBDI_MINI_BD[%08x%08x:%08x:%08x]\n",
5919 tr32(RCVDBDI_MINI_BD + 0x0),
5920 tr32(RCVDBDI_MINI_BD + 0x4),
5921 tr32(RCVDBDI_MINI_BD + 0x8),
5922 tr32(RCVDBDI_MINI_BD + 0xc));
5923
5924 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x0, &val32);
5925 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x4, &val32_2);
5926 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x8, &val32_3);
5927 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0xc, &val32_4);
5928 printk("DEBUG: SRAM_SEND_RCB_0[%08x%08x:%08x:%08x]\n",
5929 val32, val32_2, val32_3, val32_4);
5930
5931 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x0, &val32);
5932 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x4, &val32_2);
5933 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x8, &val32_3);
5934 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0xc, &val32_4);
5935 printk("DEBUG: SRAM_RCV_RET_RCB_0[%08x%08x:%08x:%08x]\n",
5936 val32, val32_2, val32_3, val32_4);
5937
5938 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x0, &val32);
5939 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x4, &val32_2);
5940 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x8, &val32_3);
5941 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0xc, &val32_4);
5942 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x10, &val32_5);
5943 printk("DEBUG: SRAM_STATUS_BLK[%08x:%08x:%08x:%08x:%08x]\n",
5944 val32, val32_2, val32_3, val32_4, val32_5);
5945
5946 /* SW status block */
5947 printk("DEBUG: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
5948 tp->hw_status->status,
5949 tp->hw_status->status_tag,
5950 tp->hw_status->rx_jumbo_consumer,
5951 tp->hw_status->rx_consumer,
5952 tp->hw_status->rx_mini_consumer,
5953 tp->hw_status->idx[0].rx_producer,
5954 tp->hw_status->idx[0].tx_consumer);
5955
5956 /* SW statistics block */
5957 printk("DEBUG: Host statistics block [%08x:%08x:%08x:%08x]\n",
5958 ((u32 *)tp->hw_stats)[0],
5959 ((u32 *)tp->hw_stats)[1],
5960 ((u32 *)tp->hw_stats)[2],
5961 ((u32 *)tp->hw_stats)[3]);
5962
5963 /* Mailboxes */
5964 printk("DEBUG: SNDHOST_PROD[%08x%08x] SNDNIC_PROD[%08x%08x]\n",
5965 tr32(MAILBOX_SNDHOST_PROD_IDX_0 + 0x0),
5966 tr32(MAILBOX_SNDHOST_PROD_IDX_0 + 0x4),
5967 tr32(MAILBOX_SNDNIC_PROD_IDX_0 + 0x0),
5968 tr32(MAILBOX_SNDNIC_PROD_IDX_0 + 0x4));
5969
5970 /* NIC side send descriptors. */
5971 for (i = 0; i < 6; i++) {
5972 unsigned long txd;
5973
5974 txd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_TX_BUFFER_DESC
5975 + (i * sizeof(struct tg3_tx_buffer_desc));
5976 printk("DEBUG: NIC TXD(%d)[%08x:%08x:%08x:%08x]\n",
5977 i,
5978 readl(txd + 0x0), readl(txd + 0x4),
5979 readl(txd + 0x8), readl(txd + 0xc));
5980 }
5981
5982 /* NIC side RX descriptors. */
5983 for (i = 0; i < 6; i++) {
5984 unsigned long rxd;
5985
5986 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_BUFFER_DESC
5987 + (i * sizeof(struct tg3_rx_buffer_desc));
5988 printk("DEBUG: NIC RXD_STD(%d)[0][%08x:%08x:%08x:%08x]\n",
5989 i,
5990 readl(rxd + 0x0), readl(rxd + 0x4),
5991 readl(rxd + 0x8), readl(rxd + 0xc));
5992 rxd += (4 * sizeof(u32));
5993 printk("DEBUG: NIC RXD_STD(%d)[1][%08x:%08x:%08x:%08x]\n",
5994 i,
5995 readl(rxd + 0x0), readl(rxd + 0x4),
5996 readl(rxd + 0x8), readl(rxd + 0xc));
5997 }
5998
5999 for (i = 0; i < 6; i++) {
6000 unsigned long rxd;
6001
6002 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_JUMBO_BUFFER_DESC
6003 + (i * sizeof(struct tg3_rx_buffer_desc));
6004 printk("DEBUG: NIC RXD_JUMBO(%d)[0][%08x:%08x:%08x:%08x]\n",
6005 i,
6006 readl(rxd + 0x0), readl(rxd + 0x4),
6007 readl(rxd + 0x8), readl(rxd + 0xc));
6008 rxd += (4 * sizeof(u32));
6009 printk("DEBUG: NIC RXD_JUMBO(%d)[1][%08x:%08x:%08x:%08x]\n",
6010 i,
6011 readl(rxd + 0x0), readl(rxd + 0x4),
6012 readl(rxd + 0x8), readl(rxd + 0xc));
6013 }
6014}
6015#endif
6016
6017static struct net_device_stats *tg3_get_stats(struct net_device *);
6018static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
6019
6020static int tg3_close(struct net_device *dev)
6021{
6022 struct tg3 *tp = netdev_priv(dev);
6023
6024 netif_stop_queue(dev);
6025
6026 del_timer_sync(&tp->timer);
6027
6028 spin_lock_irq(&tp->lock);
6029 spin_lock(&tp->tx_lock);
6030#if 0
6031 tg3_dump_state(tp);
6032#endif
6033
6034 tg3_disable_ints(tp);
6035
6036 tg3_halt(tp);
6037 tg3_free_rings(tp);
6038 tp->tg3_flags &=
6039 ~(TG3_FLAG_INIT_COMPLETE |
6040 TG3_FLAG_GOT_SERDES_FLOWCTL);
6041 netif_carrier_off(tp->dev);
6042
6043 spin_unlock(&tp->tx_lock);
6044 spin_unlock_irq(&tp->lock);
6045
6046 free_irq(dev->irq, dev);
6047
6048 memcpy(&tp->net_stats_prev, tg3_get_stats(tp->dev),
6049 sizeof(tp->net_stats_prev));
6050 memcpy(&tp->estats_prev, tg3_get_estats(tp),
6051 sizeof(tp->estats_prev));
6052
6053 tg3_free_consistent(tp);
6054
6055 return 0;
6056}
6057
6058static inline unsigned long get_stat64(tg3_stat64_t *val)
6059{
6060 unsigned long ret;
6061
6062#if (BITS_PER_LONG == 32)
6063 ret = val->low;
6064#else
6065 ret = ((u64)val->high << 32) | ((u64)val->low);
6066#endif
6067 return ret;
6068}
6069
6070static unsigned long calc_crc_errors(struct tg3 *tp)
6071{
6072 struct tg3_hw_stats *hw_stats = tp->hw_stats;
6073
6074 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
6075 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
6076 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
6077 unsigned long flags;
6078 u32 val;
6079
6080 spin_lock_irqsave(&tp->lock, flags);
6081 if (!tg3_readphy(tp, 0x1e, &val)) {
6082 tg3_writephy(tp, 0x1e, val | 0x8000);
6083 tg3_readphy(tp, 0x14, &val);
6084 } else
6085 val = 0;
6086 spin_unlock_irqrestore(&tp->lock, flags);
6087
6088 tp->phy_crc_errors += val;
6089
6090 return tp->phy_crc_errors;
6091 }
6092
6093 return get_stat64(&hw_stats->rx_fcs_errors);
6094}
6095
6096#define ESTAT_ADD(member) \
6097 estats->member = old_estats->member + \
6098 get_stat64(&hw_stats->member)
6099
6100static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
6101{
6102 struct tg3_ethtool_stats *estats = &tp->estats;
6103 struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
6104 struct tg3_hw_stats *hw_stats = tp->hw_stats;
6105
6106 if (!hw_stats)
6107 return old_estats;
6108
6109 ESTAT_ADD(rx_octets);
6110 ESTAT_ADD(rx_fragments);
6111 ESTAT_ADD(rx_ucast_packets);
6112 ESTAT_ADD(rx_mcast_packets);
6113 ESTAT_ADD(rx_bcast_packets);
6114 ESTAT_ADD(rx_fcs_errors);
6115 ESTAT_ADD(rx_align_errors);
6116 ESTAT_ADD(rx_xon_pause_rcvd);
6117 ESTAT_ADD(rx_xoff_pause_rcvd);
6118 ESTAT_ADD(rx_mac_ctrl_rcvd);
6119 ESTAT_ADD(rx_xoff_entered);
6120 ESTAT_ADD(rx_frame_too_long_errors);
6121 ESTAT_ADD(rx_jabbers);
6122 ESTAT_ADD(rx_undersize_packets);
6123 ESTAT_ADD(rx_in_length_errors);
6124 ESTAT_ADD(rx_out_length_errors);
6125 ESTAT_ADD(rx_64_or_less_octet_packets);
6126 ESTAT_ADD(rx_65_to_127_octet_packets);
6127 ESTAT_ADD(rx_128_to_255_octet_packets);
6128 ESTAT_ADD(rx_256_to_511_octet_packets);
6129 ESTAT_ADD(rx_512_to_1023_octet_packets);
6130 ESTAT_ADD(rx_1024_to_1522_octet_packets);
6131 ESTAT_ADD(rx_1523_to_2047_octet_packets);
6132 ESTAT_ADD(rx_2048_to_4095_octet_packets);
6133 ESTAT_ADD(rx_4096_to_8191_octet_packets);
6134 ESTAT_ADD(rx_8192_to_9022_octet_packets);
6135
6136 ESTAT_ADD(tx_octets);
6137 ESTAT_ADD(tx_collisions);
6138 ESTAT_ADD(tx_xon_sent);
6139 ESTAT_ADD(tx_xoff_sent);
6140 ESTAT_ADD(tx_flow_control);
6141 ESTAT_ADD(tx_mac_errors);
6142 ESTAT_ADD(tx_single_collisions);
6143 ESTAT_ADD(tx_mult_collisions);
6144 ESTAT_ADD(tx_deferred);
6145 ESTAT_ADD(tx_excessive_collisions);
6146 ESTAT_ADD(tx_late_collisions);
6147 ESTAT_ADD(tx_collide_2times);
6148 ESTAT_ADD(tx_collide_3times);
6149 ESTAT_ADD(tx_collide_4times);
6150 ESTAT_ADD(tx_collide_5times);
6151 ESTAT_ADD(tx_collide_6times);
6152 ESTAT_ADD(tx_collide_7times);
6153 ESTAT_ADD(tx_collide_8times);
6154 ESTAT_ADD(tx_collide_9times);
6155 ESTAT_ADD(tx_collide_10times);
6156 ESTAT_ADD(tx_collide_11times);
6157 ESTAT_ADD(tx_collide_12times);
6158 ESTAT_ADD(tx_collide_13times);
6159 ESTAT_ADD(tx_collide_14times);
6160 ESTAT_ADD(tx_collide_15times);
6161 ESTAT_ADD(tx_ucast_packets);
6162 ESTAT_ADD(tx_mcast_packets);
6163 ESTAT_ADD(tx_bcast_packets);
6164 ESTAT_ADD(tx_carrier_sense_errors);
6165 ESTAT_ADD(tx_discards);
6166 ESTAT_ADD(tx_errors);
6167
6168 ESTAT_ADD(dma_writeq_full);
6169 ESTAT_ADD(dma_write_prioq_full);
6170 ESTAT_ADD(rxbds_empty);
6171 ESTAT_ADD(rx_discards);
6172 ESTAT_ADD(rx_errors);
6173 ESTAT_ADD(rx_threshold_hit);
6174
6175 ESTAT_ADD(dma_readq_full);
6176 ESTAT_ADD(dma_read_prioq_full);
6177 ESTAT_ADD(tx_comp_queue_full);
6178
6179 ESTAT_ADD(ring_set_send_prod_index);
6180 ESTAT_ADD(ring_status_update);
6181 ESTAT_ADD(nic_irqs);
6182 ESTAT_ADD(nic_avoided_irqs);
6183 ESTAT_ADD(nic_tx_threshold_hit);
6184
6185 return estats;
6186}
6187
6188static struct net_device_stats *tg3_get_stats(struct net_device *dev)
6189{
6190 struct tg3 *tp = netdev_priv(dev);
6191 struct net_device_stats *stats = &tp->net_stats;
6192 struct net_device_stats *old_stats = &tp->net_stats_prev;
6193 struct tg3_hw_stats *hw_stats = tp->hw_stats;
6194
6195 if (!hw_stats)
6196 return old_stats;
6197
6198 stats->rx_packets = old_stats->rx_packets +
6199 get_stat64(&hw_stats->rx_ucast_packets) +
6200 get_stat64(&hw_stats->rx_mcast_packets) +
6201 get_stat64(&hw_stats->rx_bcast_packets);
6202
6203 stats->tx_packets = old_stats->tx_packets +
6204 get_stat64(&hw_stats->tx_ucast_packets) +
6205 get_stat64(&hw_stats->tx_mcast_packets) +
6206 get_stat64(&hw_stats->tx_bcast_packets);
6207
6208 stats->rx_bytes = old_stats->rx_bytes +
6209 get_stat64(&hw_stats->rx_octets);
6210 stats->tx_bytes = old_stats->tx_bytes +
6211 get_stat64(&hw_stats->tx_octets);
6212
6213 stats->rx_errors = old_stats->rx_errors +
6214 get_stat64(&hw_stats->rx_errors) +
6215 get_stat64(&hw_stats->rx_discards);
6216 stats->tx_errors = old_stats->tx_errors +
6217 get_stat64(&hw_stats->tx_errors) +
6218 get_stat64(&hw_stats->tx_mac_errors) +
6219 get_stat64(&hw_stats->tx_carrier_sense_errors) +
6220 get_stat64(&hw_stats->tx_discards);
6221
6222 stats->multicast = old_stats->multicast +
6223 get_stat64(&hw_stats->rx_mcast_packets);
6224 stats->collisions = old_stats->collisions +
6225 get_stat64(&hw_stats->tx_collisions);
6226
6227 stats->rx_length_errors = old_stats->rx_length_errors +
6228 get_stat64(&hw_stats->rx_frame_too_long_errors) +
6229 get_stat64(&hw_stats->rx_undersize_packets);
6230
6231 stats->rx_over_errors = old_stats->rx_over_errors +
6232 get_stat64(&hw_stats->rxbds_empty);
6233 stats->rx_frame_errors = old_stats->rx_frame_errors +
6234 get_stat64(&hw_stats->rx_align_errors);
6235 stats->tx_aborted_errors = old_stats->tx_aborted_errors +
6236 get_stat64(&hw_stats->tx_discards);
6237 stats->tx_carrier_errors = old_stats->tx_carrier_errors +
6238 get_stat64(&hw_stats->tx_carrier_sense_errors);
6239
6240 stats->rx_crc_errors = old_stats->rx_crc_errors +
6241 calc_crc_errors(tp);
6242
6243 return stats;
6244}
6245
6246static inline u32 calc_crc(unsigned char *buf, int len)
6247{
6248 u32 reg;
6249 u32 tmp;
6250 int j, k;
6251
6252 reg = 0xffffffff;
6253
6254 for (j = 0; j < len; j++) {
6255 reg ^= buf[j];
6256
6257 for (k = 0; k < 8; k++) {
6258 tmp = reg & 0x01;
6259
6260 reg >>= 1;
6261
6262 if (tmp) {
6263 reg ^= 0xedb88320;
6264 }
6265 }
6266 }
6267
6268 return ~reg;
6269}
6270
6271static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
6272{
6273 /* accept or reject all multicast frames */
6274 tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
6275 tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
6276 tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
6277 tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
6278}
6279
6280static void __tg3_set_rx_mode(struct net_device *dev)
6281{
6282 struct tg3 *tp = netdev_priv(dev);
6283 u32 rx_mode;
6284
6285 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
6286 RX_MODE_KEEP_VLAN_TAG);
6287
6288 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
6289 * flag clear.
6290 */
6291#if TG3_VLAN_TAG_USED
6292 if (!tp->vlgrp &&
6293 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
6294 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
6295#else
6296 /* By definition, VLAN is disabled always in this
6297 * case.
6298 */
6299 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
6300 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
6301#endif
6302
6303 if (dev->flags & IFF_PROMISC) {
6304 /* Promiscuous mode. */
6305 rx_mode |= RX_MODE_PROMISC;
6306 } else if (dev->flags & IFF_ALLMULTI) {
6307 /* Accept all multicast. */
6308 tg3_set_multi (tp, 1);
6309 } else if (dev->mc_count < 1) {
6310 /* Reject all multicast. */
6311 tg3_set_multi (tp, 0);
6312 } else {
6313 /* Accept one or more multicast(s). */
6314 struct dev_mc_list *mclist;
6315 unsigned int i;
6316 u32 mc_filter[4] = { 0, };
6317 u32 regidx;
6318 u32 bit;
6319 u32 crc;
6320
6321 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
6322 i++, mclist = mclist->next) {
6323
6324 crc = calc_crc (mclist->dmi_addr, ETH_ALEN);
6325 bit = ~crc & 0x7f;
6326 regidx = (bit & 0x60) >> 5;
6327 bit &= 0x1f;
6328 mc_filter[regidx] |= (1 << bit);
6329 }
6330
6331 tw32(MAC_HASH_REG_0, mc_filter[0]);
6332 tw32(MAC_HASH_REG_1, mc_filter[1]);
6333 tw32(MAC_HASH_REG_2, mc_filter[2]);
6334 tw32(MAC_HASH_REG_3, mc_filter[3]);
6335 }
6336
6337 if (rx_mode != tp->rx_mode) {
6338 tp->rx_mode = rx_mode;
6339 tw32_f(MAC_RX_MODE, rx_mode);
6340 udelay(10);
6341 }
6342}
6343
6344static void tg3_set_rx_mode(struct net_device *dev)
6345{
6346 struct tg3 *tp = netdev_priv(dev);
6347
6348 spin_lock_irq(&tp->lock);
6349 spin_lock(&tp->tx_lock);
6350 __tg3_set_rx_mode(dev);
6351 spin_unlock(&tp->tx_lock);
6352 spin_unlock_irq(&tp->lock);
6353}
6354
6355#define TG3_REGDUMP_LEN (32 * 1024)
6356
6357static int tg3_get_regs_len(struct net_device *dev)
6358{
6359 return TG3_REGDUMP_LEN;
6360}
6361
6362static void tg3_get_regs(struct net_device *dev,
6363 struct ethtool_regs *regs, void *_p)
6364{
6365 u32 *p = _p;
6366 struct tg3 *tp = netdev_priv(dev);
6367 u8 *orig_p = _p;
6368 int i;
6369
6370 regs->version = 0;
6371
6372 memset(p, 0, TG3_REGDUMP_LEN);
6373
6374 spin_lock_irq(&tp->lock);
6375 spin_lock(&tp->tx_lock);
6376
6377#define __GET_REG32(reg) (*(p)++ = tr32(reg))
6378#define GET_REG32_LOOP(base,len) \
6379do { p = (u32 *)(orig_p + (base)); \
6380 for (i = 0; i < len; i += 4) \
6381 __GET_REG32((base) + i); \
6382} while (0)
6383#define GET_REG32_1(reg) \
6384do { p = (u32 *)(orig_p + (reg)); \
6385 __GET_REG32((reg)); \
6386} while (0)
6387
6388 GET_REG32_LOOP(TG3PCI_VENDOR, 0xb0);
6389 GET_REG32_LOOP(MAILBOX_INTERRUPT_0, 0x200);
6390 GET_REG32_LOOP(MAC_MODE, 0x4f0);
6391 GET_REG32_LOOP(SNDDATAI_MODE, 0xe0);
6392 GET_REG32_1(SNDDATAC_MODE);
6393 GET_REG32_LOOP(SNDBDS_MODE, 0x80);
6394 GET_REG32_LOOP(SNDBDI_MODE, 0x48);
6395 GET_REG32_1(SNDBDC_MODE);
6396 GET_REG32_LOOP(RCVLPC_MODE, 0x20);
6397 GET_REG32_LOOP(RCVLPC_SELLST_BASE, 0x15c);
6398 GET_REG32_LOOP(RCVDBDI_MODE, 0x0c);
6399 GET_REG32_LOOP(RCVDBDI_JUMBO_BD, 0x3c);
6400 GET_REG32_LOOP(RCVDBDI_BD_PROD_IDX_0, 0x44);
6401 GET_REG32_1(RCVDCC_MODE);
6402 GET_REG32_LOOP(RCVBDI_MODE, 0x20);
6403 GET_REG32_LOOP(RCVCC_MODE, 0x14);
6404 GET_REG32_LOOP(RCVLSC_MODE, 0x08);
6405 GET_REG32_1(MBFREE_MODE);
6406 GET_REG32_LOOP(HOSTCC_MODE, 0x100);
6407 GET_REG32_LOOP(MEMARB_MODE, 0x10);
6408 GET_REG32_LOOP(BUFMGR_MODE, 0x58);
6409 GET_REG32_LOOP(RDMAC_MODE, 0x08);
6410 GET_REG32_LOOP(WDMAC_MODE, 0x08);
6411 GET_REG32_LOOP(RX_CPU_BASE, 0x280);
6412 GET_REG32_LOOP(TX_CPU_BASE, 0x280);
6413 GET_REG32_LOOP(GRCMBOX_INTERRUPT_0, 0x110);
6414 GET_REG32_LOOP(FTQ_RESET, 0x120);
6415 GET_REG32_LOOP(MSGINT_MODE, 0x0c);
6416 GET_REG32_1(DMAC_MODE);
6417 GET_REG32_LOOP(GRC_MODE, 0x4c);
6418 if (tp->tg3_flags & TG3_FLAG_NVRAM)
6419 GET_REG32_LOOP(NVRAM_CMD, 0x24);
6420
6421#undef __GET_REG32
6422#undef GET_REG32_LOOP
6423#undef GET_REG32_1
6424
6425 spin_unlock(&tp->tx_lock);
6426 spin_unlock_irq(&tp->lock);
6427}
6428
6429static int tg3_get_eeprom_len(struct net_device *dev)
6430{
6431 struct tg3 *tp = netdev_priv(dev);
6432
6433 return tp->nvram_size;
6434}
6435
6436static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val);
6437
6438static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
6439{
6440 struct tg3 *tp = netdev_priv(dev);
6441 int ret;
6442 u8 *pd;
6443 u32 i, offset, len, val, b_offset, b_count;
6444
6445 offset = eeprom->offset;
6446 len = eeprom->len;
6447 eeprom->len = 0;
6448
6449 eeprom->magic = TG3_EEPROM_MAGIC;
6450
6451 if (offset & 3) {
6452 /* adjustments to start on required 4 byte boundary */
6453 b_offset = offset & 3;
6454 b_count = 4 - b_offset;
6455 if (b_count > len) {
6456 /* i.e. offset=1 len=2 */
6457 b_count = len;
6458 }
6459 ret = tg3_nvram_read(tp, offset-b_offset, &val);
6460 if (ret)
6461 return ret;
6462 val = cpu_to_le32(val);
6463 memcpy(data, ((char*)&val) + b_offset, b_count);
6464 len -= b_count;
6465 offset += b_count;
6466 eeprom->len += b_count;
6467 }
6468
6469 /* read bytes upto the last 4 byte boundary */
6470 pd = &data[eeprom->len];
6471 for (i = 0; i < (len - (len & 3)); i += 4) {
6472 ret = tg3_nvram_read(tp, offset + i, &val);
6473 if (ret) {
6474 eeprom->len += i;
6475 return ret;
6476 }
6477 val = cpu_to_le32(val);
6478 memcpy(pd + i, &val, 4);
6479 }
6480 eeprom->len += i;
6481
6482 if (len & 3) {
6483 /* read last bytes not ending on 4 byte boundary */
6484 pd = &data[eeprom->len];
6485 b_count = len & 3;
6486 b_offset = offset + len - b_count;
6487 ret = tg3_nvram_read(tp, b_offset, &val);
6488 if (ret)
6489 return ret;
6490 val = cpu_to_le32(val);
6491 memcpy(pd, ((char*)&val), b_count);
6492 eeprom->len += b_count;
6493 }
6494 return 0;
6495}
6496
6497static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf);
6498
6499static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
6500{
6501 struct tg3 *tp = netdev_priv(dev);
6502 int ret;
6503 u32 offset, len, b_offset, odd_len, start, end;
6504 u8 *buf;
6505
6506 if (eeprom->magic != TG3_EEPROM_MAGIC)
6507 return -EINVAL;
6508
6509 offset = eeprom->offset;
6510 len = eeprom->len;
6511
6512 if ((b_offset = (offset & 3))) {
6513 /* adjustments to start on required 4 byte boundary */
6514 ret = tg3_nvram_read(tp, offset-b_offset, &start);
6515 if (ret)
6516 return ret;
6517 start = cpu_to_le32(start);
6518 len += b_offset;
6519 offset &= ~3;
6520 }
6521
6522 odd_len = 0;
6523 if ((len & 3) && ((len > 4) || (b_offset == 0))) {
6524 /* adjustments to end on required 4 byte boundary */
6525 odd_len = 1;
6526 len = (len + 3) & ~3;
6527 ret = tg3_nvram_read(tp, offset+len-4, &end);
6528 if (ret)
6529 return ret;
6530 end = cpu_to_le32(end);
6531 }
6532
6533 buf = data;
6534 if (b_offset || odd_len) {
6535 buf = kmalloc(len, GFP_KERNEL);
6536 if (buf == 0)
6537 return -ENOMEM;
6538 if (b_offset)
6539 memcpy(buf, &start, 4);
6540 if (odd_len)
6541 memcpy(buf+len-4, &end, 4);
6542 memcpy(buf + b_offset, data, eeprom->len);
6543 }
6544
6545 ret = tg3_nvram_write_block(tp, offset, len, buf);
6546
6547 if (buf != data)
6548 kfree(buf);
6549
6550 return ret;
6551}
6552
6553static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6554{
6555 struct tg3 *tp = netdev_priv(dev);
6556
6557 cmd->supported = (SUPPORTED_Autoneg);
6558
6559 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
6560 cmd->supported |= (SUPPORTED_1000baseT_Half |
6561 SUPPORTED_1000baseT_Full);
6562
6563 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES))
6564 cmd->supported |= (SUPPORTED_100baseT_Half |
6565 SUPPORTED_100baseT_Full |
6566 SUPPORTED_10baseT_Half |
6567 SUPPORTED_10baseT_Full |
6568 SUPPORTED_MII);
6569 else
6570 cmd->supported |= SUPPORTED_FIBRE;
6571
6572 cmd->advertising = tp->link_config.advertising;
6573 if (netif_running(dev)) {
6574 cmd->speed = tp->link_config.active_speed;
6575 cmd->duplex = tp->link_config.active_duplex;
6576 }
6577 cmd->port = 0;
6578 cmd->phy_address = PHY_ADDR;
6579 cmd->transceiver = 0;
6580 cmd->autoneg = tp->link_config.autoneg;
6581 cmd->maxtxpkt = 0;
6582 cmd->maxrxpkt = 0;
6583 return 0;
6584}
6585
6586static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6587{
6588 struct tg3 *tp = netdev_priv(dev);
6589
6590 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
6591 /* These are the only valid advertisement bits allowed. */
6592 if (cmd->autoneg == AUTONEG_ENABLE &&
6593 (cmd->advertising & ~(ADVERTISED_1000baseT_Half |
6594 ADVERTISED_1000baseT_Full |
6595 ADVERTISED_Autoneg |
6596 ADVERTISED_FIBRE)))
6597 return -EINVAL;
6598 }
6599
6600 spin_lock_irq(&tp->lock);
6601 spin_lock(&tp->tx_lock);
6602
6603 tp->link_config.autoneg = cmd->autoneg;
6604 if (cmd->autoneg == AUTONEG_ENABLE) {
6605 tp->link_config.advertising = cmd->advertising;
6606 tp->link_config.speed = SPEED_INVALID;
6607 tp->link_config.duplex = DUPLEX_INVALID;
6608 } else {
6609 tp->link_config.advertising = 0;
6610 tp->link_config.speed = cmd->speed;
6611 tp->link_config.duplex = cmd->duplex;
6612 }
6613
6614 if (netif_running(dev))
6615 tg3_setup_phy(tp, 1);
6616
6617 spin_unlock(&tp->tx_lock);
6618 spin_unlock_irq(&tp->lock);
6619
6620 return 0;
6621}
6622
6623static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
6624{
6625 struct tg3 *tp = netdev_priv(dev);
6626
6627 strcpy(info->driver, DRV_MODULE_NAME);
6628 strcpy(info->version, DRV_MODULE_VERSION);
6629 strcpy(info->bus_info, pci_name(tp->pdev));
6630}
6631
6632static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6633{
6634 struct tg3 *tp = netdev_priv(dev);
6635
6636 wol->supported = WAKE_MAGIC;
6637 wol->wolopts = 0;
6638 if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)
6639 wol->wolopts = WAKE_MAGIC;
6640 memset(&wol->sopass, 0, sizeof(wol->sopass));
6641}
6642
6643static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6644{
6645 struct tg3 *tp = netdev_priv(dev);
6646
6647 if (wol->wolopts & ~WAKE_MAGIC)
6648 return -EINVAL;
6649 if ((wol->wolopts & WAKE_MAGIC) &&
6650 tp->tg3_flags2 & TG3_FLG2_PHY_SERDES &&
6651 !(tp->tg3_flags & TG3_FLAG_SERDES_WOL_CAP))
6652 return -EINVAL;
6653
6654 spin_lock_irq(&tp->lock);
6655 if (wol->wolopts & WAKE_MAGIC)
6656 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
6657 else
6658 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
6659 spin_unlock_irq(&tp->lock);
6660
6661 return 0;
6662}
6663
6664static u32 tg3_get_msglevel(struct net_device *dev)
6665{
6666 struct tg3 *tp = netdev_priv(dev);
6667 return tp->msg_enable;
6668}
6669
6670static void tg3_set_msglevel(struct net_device *dev, u32 value)
6671{
6672 struct tg3 *tp = netdev_priv(dev);
6673 tp->msg_enable = value;
6674}
6675
6676#if TG3_TSO_SUPPORT != 0
6677static int tg3_set_tso(struct net_device *dev, u32 value)
6678{
6679 struct tg3 *tp = netdev_priv(dev);
6680
6681 if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
6682 if (value)
6683 return -EINVAL;
6684 return 0;
6685 }
6686 return ethtool_op_set_tso(dev, value);
6687}
6688#endif
6689
6690static int tg3_nway_reset(struct net_device *dev)
6691{
6692 struct tg3 *tp = netdev_priv(dev);
6693 u32 bmcr;
6694 int r;
6695
6696 if (!netif_running(dev))
6697 return -EAGAIN;
6698
6699 spin_lock_irq(&tp->lock);
6700 r = -EINVAL;
6701 tg3_readphy(tp, MII_BMCR, &bmcr);
6702 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
6703 (bmcr & BMCR_ANENABLE)) {
6704 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART);
6705 r = 0;
6706 }
6707 spin_unlock_irq(&tp->lock);
6708
6709 return r;
6710}
6711
6712static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
6713{
6714 struct tg3 *tp = netdev_priv(dev);
6715
6716 ering->rx_max_pending = TG3_RX_RING_SIZE - 1;
6717 ering->rx_mini_max_pending = 0;
6718 ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1;
6719
6720 ering->rx_pending = tp->rx_pending;
6721 ering->rx_mini_pending = 0;
6722 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
6723 ering->tx_pending = tp->tx_pending;
6724}
6725
6726static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
6727{
6728 struct tg3 *tp = netdev_priv(dev);
6729
6730 if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) ||
6731 (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) ||
6732 (ering->tx_pending > TG3_TX_RING_SIZE - 1))
6733 return -EINVAL;
6734
6735 if (netif_running(dev))
6736 tg3_netif_stop(tp);
6737
6738 spin_lock_irq(&tp->lock);
6739 spin_lock(&tp->tx_lock);
6740
6741 tp->rx_pending = ering->rx_pending;
6742
6743 if ((tp->tg3_flags2 & TG3_FLG2_MAX_RXPEND_64) &&
6744 tp->rx_pending > 63)
6745 tp->rx_pending = 63;
6746 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
6747 tp->tx_pending = ering->tx_pending;
6748
6749 if (netif_running(dev)) {
6750 tg3_halt(tp);
6751 tg3_init_hw(tp);
6752 tg3_netif_start(tp);
6753 }
6754
6755 spin_unlock(&tp->tx_lock);
6756 spin_unlock_irq(&tp->lock);
6757
6758 return 0;
6759}
6760
6761static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
6762{
6763 struct tg3 *tp = netdev_priv(dev);
6764
6765 epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0;
6766 epause->rx_pause = (tp->tg3_flags & TG3_FLAG_RX_PAUSE) != 0;
6767 epause->tx_pause = (tp->tg3_flags & TG3_FLAG_TX_PAUSE) != 0;
6768}
6769
6770static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
6771{
6772 struct tg3 *tp = netdev_priv(dev);
6773
6774 if (netif_running(dev))
6775 tg3_netif_stop(tp);
6776
6777 spin_lock_irq(&tp->lock);
6778 spin_lock(&tp->tx_lock);
6779 if (epause->autoneg)
6780 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
6781 else
6782 tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
6783 if (epause->rx_pause)
6784 tp->tg3_flags |= TG3_FLAG_RX_PAUSE;
6785 else
6786 tp->tg3_flags &= ~TG3_FLAG_RX_PAUSE;
6787 if (epause->tx_pause)
6788 tp->tg3_flags |= TG3_FLAG_TX_PAUSE;
6789 else
6790 tp->tg3_flags &= ~TG3_FLAG_TX_PAUSE;
6791
6792 if (netif_running(dev)) {
6793 tg3_halt(tp);
6794 tg3_init_hw(tp);
6795 tg3_netif_start(tp);
6796 }
6797 spin_unlock(&tp->tx_lock);
6798 spin_unlock_irq(&tp->lock);
6799
6800 return 0;
6801}
6802
6803static u32 tg3_get_rx_csum(struct net_device *dev)
6804{
6805 struct tg3 *tp = netdev_priv(dev);
6806 return (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0;
6807}
6808
6809static int tg3_set_rx_csum(struct net_device *dev, u32 data)
6810{
6811 struct tg3 *tp = netdev_priv(dev);
6812
6813 if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
6814 if (data != 0)
6815 return -EINVAL;
6816 return 0;
6817 }
6818
6819 spin_lock_irq(&tp->lock);
6820 if (data)
6821 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
6822 else
6823 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
6824 spin_unlock_irq(&tp->lock);
6825
6826 return 0;
6827}
6828
6829static int tg3_set_tx_csum(struct net_device *dev, u32 data)
6830{
6831 struct tg3 *tp = netdev_priv(dev);
6832
6833 if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
6834 if (data != 0)
6835 return -EINVAL;
6836 return 0;
6837 }
6838
6839 if (data)
6840 dev->features |= NETIF_F_IP_CSUM;
6841 else
6842 dev->features &= ~NETIF_F_IP_CSUM;
6843
6844 return 0;
6845}
6846
6847static int tg3_get_stats_count (struct net_device *dev)
6848{
6849 return TG3_NUM_STATS;
6850}
6851
6852static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
6853{
6854 switch (stringset) {
6855 case ETH_SS_STATS:
6856 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
6857 break;
6858 default:
6859 WARN_ON(1); /* we need a WARN() */
6860 break;
6861 }
6862}
6863
6864static void tg3_get_ethtool_stats (struct net_device *dev,
6865 struct ethtool_stats *estats, u64 *tmp_stats)
6866{
6867 struct tg3 *tp = netdev_priv(dev);
6868 memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
6869}
6870
6871static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
6872{
6873 struct mii_ioctl_data *data = if_mii(ifr);
6874 struct tg3 *tp = netdev_priv(dev);
6875 int err;
6876
6877 switch(cmd) {
6878 case SIOCGMIIPHY:
6879 data->phy_id = PHY_ADDR;
6880
6881 /* fallthru */
6882 case SIOCGMIIREG: {
6883 u32 mii_regval;
6884
6885 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
6886 break; /* We have no PHY */
6887
6888 spin_lock_irq(&tp->lock);
6889 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
6890 spin_unlock_irq(&tp->lock);
6891
6892 data->val_out = mii_regval;
6893
6894 return err;
6895 }
6896
6897 case SIOCSMIIREG:
6898 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
6899 break; /* We have no PHY */
6900
6901 if (!capable(CAP_NET_ADMIN))
6902 return -EPERM;
6903
6904 spin_lock_irq(&tp->lock);
6905 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
6906 spin_unlock_irq(&tp->lock);
6907
6908 return err;
6909
6910 default:
6911 /* do nothing */
6912 break;
6913 }
6914 return -EOPNOTSUPP;
6915}
6916
6917#if TG3_VLAN_TAG_USED
6918static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
6919{
6920 struct tg3 *tp = netdev_priv(dev);
6921
6922 spin_lock_irq(&tp->lock);
6923 spin_lock(&tp->tx_lock);
6924
6925 tp->vlgrp = grp;
6926
6927 /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
6928 __tg3_set_rx_mode(dev);
6929
6930 spin_unlock(&tp->tx_lock);
6931 spin_unlock_irq(&tp->lock);
6932}
6933
6934static void tg3_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
6935{
6936 struct tg3 *tp = netdev_priv(dev);
6937
6938 spin_lock_irq(&tp->lock);
6939 spin_lock(&tp->tx_lock);
6940 if (tp->vlgrp)
6941 tp->vlgrp->vlan_devices[vid] = NULL;
6942 spin_unlock(&tp->tx_lock);
6943 spin_unlock_irq(&tp->lock);
6944}
6945#endif
6946
6947static struct ethtool_ops tg3_ethtool_ops = {
6948 .get_settings = tg3_get_settings,
6949 .set_settings = tg3_set_settings,
6950 .get_drvinfo = tg3_get_drvinfo,
6951 .get_regs_len = tg3_get_regs_len,
6952 .get_regs = tg3_get_regs,
6953 .get_wol = tg3_get_wol,
6954 .set_wol = tg3_set_wol,
6955 .get_msglevel = tg3_get_msglevel,
6956 .set_msglevel = tg3_set_msglevel,
6957 .nway_reset = tg3_nway_reset,
6958 .get_link = ethtool_op_get_link,
6959 .get_eeprom_len = tg3_get_eeprom_len,
6960 .get_eeprom = tg3_get_eeprom,
6961 .set_eeprom = tg3_set_eeprom,
6962 .get_ringparam = tg3_get_ringparam,
6963 .set_ringparam = tg3_set_ringparam,
6964 .get_pauseparam = tg3_get_pauseparam,
6965 .set_pauseparam = tg3_set_pauseparam,
6966 .get_rx_csum = tg3_get_rx_csum,
6967 .set_rx_csum = tg3_set_rx_csum,
6968 .get_tx_csum = ethtool_op_get_tx_csum,
6969 .set_tx_csum = tg3_set_tx_csum,
6970 .get_sg = ethtool_op_get_sg,
6971 .set_sg = ethtool_op_set_sg,
6972#if TG3_TSO_SUPPORT != 0
6973 .get_tso = ethtool_op_get_tso,
6974 .set_tso = tg3_set_tso,
6975#endif
6976 .get_strings = tg3_get_strings,
6977 .get_stats_count = tg3_get_stats_count,
6978 .get_ethtool_stats = tg3_get_ethtool_stats,
6979};
6980
6981static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
6982{
6983 u32 cursize, val;
6984
6985 tp->nvram_size = EEPROM_CHIP_SIZE;
6986
6987 if (tg3_nvram_read(tp, 0, &val) != 0)
6988 return;
6989
6990 if (swab32(val) != TG3_EEPROM_MAGIC)
6991 return;
6992
6993 /*
6994 * Size the chip by reading offsets at increasing powers of two.
6995 * When we encounter our validation signature, we know the addressing
6996 * has wrapped around, and thus have our chip size.
6997 */
6998 cursize = 0x800;
6999
7000 while (cursize < tp->nvram_size) {
7001 if (tg3_nvram_read(tp, cursize, &val) != 0)
7002 return;
7003
7004 if (swab32(val) == TG3_EEPROM_MAGIC)
7005 break;
7006
7007 cursize <<= 1;
7008 }
7009
7010 tp->nvram_size = cursize;
7011}
7012
7013static void __devinit tg3_get_nvram_size(struct tg3 *tp)
7014{
7015 u32 val;
7016
7017 if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
7018 if (val != 0) {
7019 tp->nvram_size = (val >> 16) * 1024;
7020 return;
7021 }
7022 }
7023 tp->nvram_size = 0x20000;
7024}
7025
7026static void __devinit tg3_get_nvram_info(struct tg3 *tp)
7027{
7028 u32 nvcfg1;
7029
7030 nvcfg1 = tr32(NVRAM_CFG1);
7031 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
7032 tp->tg3_flags2 |= TG3_FLG2_FLASH;
7033 }
7034 else {
7035 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
7036 tw32(NVRAM_CFG1, nvcfg1);
7037 }
7038
2052da94
JL
7039 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
7040 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752) {
1da177e4
LT
7041 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
7042 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
7043 tp->nvram_jedecnum = JEDEC_ATMEL;
7044 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
7045 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
7046 break;
7047 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
7048 tp->nvram_jedecnum = JEDEC_ATMEL;
7049 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
7050 break;
7051 case FLASH_VENDOR_ATMEL_EEPROM:
7052 tp->nvram_jedecnum = JEDEC_ATMEL;
7053 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
7054 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
7055 break;
7056 case FLASH_VENDOR_ST:
7057 tp->nvram_jedecnum = JEDEC_ST;
7058 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
7059 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
7060 break;
7061 case FLASH_VENDOR_SAIFUN:
7062 tp->nvram_jedecnum = JEDEC_SAIFUN;
7063 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
7064 break;
7065 case FLASH_VENDOR_SST_SMALL:
7066 case FLASH_VENDOR_SST_LARGE:
7067 tp->nvram_jedecnum = JEDEC_SST;
7068 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
7069 break;
7070 }
7071 }
7072 else {
7073 tp->nvram_jedecnum = JEDEC_ATMEL;
7074 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
7075 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
7076 }
7077}
7078
7079/* Chips other than 5700/5701 use the NVRAM for fetching info. */
7080static void __devinit tg3_nvram_init(struct tg3 *tp)
7081{
7082 int j;
7083
7084 if (tp->tg3_flags2 & TG3_FLG2_SUN_570X)
7085 return;
7086
7087 tw32_f(GRC_EEPROM_ADDR,
7088 (EEPROM_ADDR_FSM_RESET |
7089 (EEPROM_DEFAULT_CLOCK_PERIOD <<
7090 EEPROM_ADDR_CLKPERD_SHIFT)));
7091
7092 /* XXX schedule_timeout() ... */
7093 for (j = 0; j < 100; j++)
7094 udelay(10);
7095
7096 /* Enable seeprom accesses. */
7097 tw32_f(GRC_LOCAL_CTRL,
7098 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
7099 udelay(100);
7100
7101 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
7102 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
7103 tp->tg3_flags |= TG3_FLAG_NVRAM;
7104
2052da94
JL
7105 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
7106 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752) {
1da177e4
LT
7107 u32 nvaccess = tr32(NVRAM_ACCESS);
7108
7109 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
7110 }
7111
7112 tg3_get_nvram_info(tp);
7113 tg3_get_nvram_size(tp);
7114
2052da94
JL
7115 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
7116 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752) {
1da177e4
LT
7117 u32 nvaccess = tr32(NVRAM_ACCESS);
7118
7119 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
7120 }
7121
7122 } else {
7123 tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED);
7124
7125 tg3_get_eeprom_size(tp);
7126 }
7127}
7128
7129static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
7130 u32 offset, u32 *val)
7131{
7132 u32 tmp;
7133 int i;
7134
7135 if (offset > EEPROM_ADDR_ADDR_MASK ||
7136 (offset % 4) != 0)
7137 return -EINVAL;
7138
7139 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
7140 EEPROM_ADDR_DEVID_MASK |
7141 EEPROM_ADDR_READ);
7142 tw32(GRC_EEPROM_ADDR,
7143 tmp |
7144 (0 << EEPROM_ADDR_DEVID_SHIFT) |
7145 ((offset << EEPROM_ADDR_ADDR_SHIFT) &
7146 EEPROM_ADDR_ADDR_MASK) |
7147 EEPROM_ADDR_READ | EEPROM_ADDR_START);
7148
7149 for (i = 0; i < 10000; i++) {
7150 tmp = tr32(GRC_EEPROM_ADDR);
7151
7152 if (tmp & EEPROM_ADDR_COMPLETE)
7153 break;
7154 udelay(100);
7155 }
7156 if (!(tmp & EEPROM_ADDR_COMPLETE))
7157 return -EBUSY;
7158
7159 *val = tr32(GRC_EEPROM_DATA);
7160 return 0;
7161}
7162
7163#define NVRAM_CMD_TIMEOUT 10000
7164
7165static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
7166{
7167 int i;
7168
7169 tw32(NVRAM_CMD, nvram_cmd);
7170 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
7171 udelay(10);
7172 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
7173 udelay(10);
7174 break;
7175 }
7176 }
7177 if (i == NVRAM_CMD_TIMEOUT) {
7178 return -EBUSY;
7179 }
7180 return 0;
7181}
7182
7183static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
7184{
7185 int ret;
7186
7187 if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) {
7188 printk(KERN_ERR PFX "Attempt to do nvram_read on Sun 570X\n");
7189 return -EINVAL;
7190 }
7191
7192 if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
7193 return tg3_nvram_read_using_eeprom(tp, offset, val);
7194
7195 if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
7196 (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
7197 (tp->nvram_jedecnum == JEDEC_ATMEL)) {
7198
7199 offset = ((offset / tp->nvram_pagesize) <<
7200 ATMEL_AT45DB0X1B_PAGE_POS) +
7201 (offset % tp->nvram_pagesize);
7202 }
7203
7204 if (offset > NVRAM_ADDR_MSK)
7205 return -EINVAL;
7206
7207 tg3_nvram_lock(tp);
7208
2052da94
JL
7209 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
7210 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752) {
1da177e4
LT
7211 u32 nvaccess = tr32(NVRAM_ACCESS);
7212
7213 tw32_f(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
7214 }
7215
7216 tw32(NVRAM_ADDR, offset);
7217 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
7218 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
7219
7220 if (ret == 0)
7221 *val = swab32(tr32(NVRAM_RDDATA));
7222
7223 tg3_nvram_unlock(tp);
7224
2052da94
JL
7225 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
7226 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752) {
1da177e4
LT
7227 u32 nvaccess = tr32(NVRAM_ACCESS);
7228
7229 tw32_f(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
7230 }
7231
7232 return ret;
7233}
7234
7235static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
7236 u32 offset, u32 len, u8 *buf)
7237{
7238 int i, j, rc = 0;
7239 u32 val;
7240
7241 for (i = 0; i < len; i += 4) {
7242 u32 addr, data;
7243
7244 addr = offset + i;
7245
7246 memcpy(&data, buf + i, 4);
7247
7248 tw32(GRC_EEPROM_DATA, cpu_to_le32(data));
7249
7250 val = tr32(GRC_EEPROM_ADDR);
7251 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
7252
7253 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
7254 EEPROM_ADDR_READ);
7255 tw32(GRC_EEPROM_ADDR, val |
7256 (0 << EEPROM_ADDR_DEVID_SHIFT) |
7257 (addr & EEPROM_ADDR_ADDR_MASK) |
7258 EEPROM_ADDR_START |
7259 EEPROM_ADDR_WRITE);
7260
7261 for (j = 0; j < 10000; j++) {
7262 val = tr32(GRC_EEPROM_ADDR);
7263
7264 if (val & EEPROM_ADDR_COMPLETE)
7265 break;
7266 udelay(100);
7267 }
7268 if (!(val & EEPROM_ADDR_COMPLETE)) {
7269 rc = -EBUSY;
7270 break;
7271 }
7272 }
7273
7274 return rc;
7275}
7276
7277/* offset and length are dword aligned */
7278static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
7279 u8 *buf)
7280{
7281 int ret = 0;
7282 u32 pagesize = tp->nvram_pagesize;
7283 u32 pagemask = pagesize - 1;
7284 u32 nvram_cmd;
7285 u8 *tmp;
7286
7287 tmp = kmalloc(pagesize, GFP_KERNEL);
7288 if (tmp == NULL)
7289 return -ENOMEM;
7290
7291 while (len) {
7292 int j;
7293 u32 phy_addr, page_off, size, nvaccess;
7294
7295 phy_addr = offset & ~pagemask;
7296
7297 for (j = 0; j < pagesize; j += 4) {
7298 if ((ret = tg3_nvram_read(tp, phy_addr + j,
7299 (u32 *) (tmp + j))))
7300 break;
7301 }
7302 if (ret)
7303 break;
7304
7305 page_off = offset & pagemask;
7306 size = pagesize;
7307 if (len < size)
7308 size = len;
7309
7310 len -= size;
7311
7312 memcpy(tmp + page_off, buf, size);
7313
7314 offset = offset + (pagesize - page_off);
7315
7316 nvaccess = tr32(NVRAM_ACCESS);
7317 tw32_f(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
7318
7319 /*
7320 * Before we can erase the flash page, we need
7321 * to issue a special "write enable" command.
7322 */
7323 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
7324
7325 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
7326 break;
7327
7328 /* Erase the target page */
7329 tw32(NVRAM_ADDR, phy_addr);
7330
7331 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
7332 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
7333
7334 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
7335 break;
7336
7337 /* Issue another write enable to start the write. */
7338 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
7339
7340 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
7341 break;
7342
7343 for (j = 0; j < pagesize; j += 4) {
7344 u32 data;
7345
7346 data = *((u32 *) (tmp + j));
7347 tw32(NVRAM_WRDATA, cpu_to_be32(data));
7348
7349 tw32(NVRAM_ADDR, phy_addr + j);
7350
7351 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
7352 NVRAM_CMD_WR;
7353
7354 if (j == 0)
7355 nvram_cmd |= NVRAM_CMD_FIRST;
7356 else if (j == (pagesize - 4))
7357 nvram_cmd |= NVRAM_CMD_LAST;
7358
7359 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
7360 break;
7361 }
7362 if (ret)
7363 break;
7364 }
7365
7366 nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
7367 tg3_nvram_exec_cmd(tp, nvram_cmd);
7368
7369 kfree(tmp);
7370
7371 return ret;
7372}
7373
7374/* offset and length are dword aligned */
7375static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
7376 u8 *buf)
7377{
7378 int i, ret = 0;
7379
7380 for (i = 0; i < len; i += 4, offset += 4) {
7381 u32 data, page_off, phy_addr, nvram_cmd;
7382
7383 memcpy(&data, buf + i, 4);
7384 tw32(NVRAM_WRDATA, cpu_to_be32(data));
7385
7386 page_off = offset % tp->nvram_pagesize;
7387
7388 if ((tp->tg3_flags2 & TG3_FLG2_FLASH) &&
7389 (tp->nvram_jedecnum == JEDEC_ATMEL)) {
7390
7391 phy_addr = ((offset / tp->nvram_pagesize) <<
7392 ATMEL_AT45DB0X1B_PAGE_POS) + page_off;
7393 }
7394 else {
7395 phy_addr = offset;
7396 }
7397
7398 tw32(NVRAM_ADDR, phy_addr);
7399
7400 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
7401
7402 if ((page_off == 0) || (i == 0))
7403 nvram_cmd |= NVRAM_CMD_FIRST;
7404 else if (page_off == (tp->nvram_pagesize - 4))
7405 nvram_cmd |= NVRAM_CMD_LAST;
7406
7407 if (i == (len - 4))
7408 nvram_cmd |= NVRAM_CMD_LAST;
7409
7410 if ((tp->nvram_jedecnum == JEDEC_ST) &&
7411 (nvram_cmd & NVRAM_CMD_FIRST)) {
7412
7413 if ((ret = tg3_nvram_exec_cmd(tp,
7414 NVRAM_CMD_WREN | NVRAM_CMD_GO |
7415 NVRAM_CMD_DONE)))
7416
7417 break;
7418 }
7419 if (!(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
7420 /* We always do complete word writes to eeprom. */
7421 nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
7422 }
7423
7424 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
7425 break;
7426 }
7427 return ret;
7428}
7429
7430/* offset and length are dword aligned */
7431static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
7432{
7433 int ret;
7434
7435 if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) {
7436 printk(KERN_ERR PFX "Attempt to do nvram_write on Sun 570X\n");
7437 return -EINVAL;
7438 }
7439
7440 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
7441 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
7442 GRC_LCLCTRL_GPIO_OE1);
7443 udelay(40);
7444 }
7445
7446 if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) {
7447 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
7448 }
7449 else {
7450 u32 grc_mode;
7451
7452 tg3_nvram_lock(tp);
7453
2052da94
JL
7454 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
7455 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752) {
1da177e4
LT
7456 u32 nvaccess = tr32(NVRAM_ACCESS);
7457
7458 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
7459
7460 tw32(NVRAM_WRITE1, 0x406);
7461 }
7462
7463 grc_mode = tr32(GRC_MODE);
7464 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
7465
7466 if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) ||
7467 !(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
7468
7469 ret = tg3_nvram_write_block_buffered(tp, offset, len,
7470 buf);
7471 }
7472 else {
7473 ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
7474 buf);
7475 }
7476
7477 grc_mode = tr32(GRC_MODE);
7478 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
7479
2052da94
JL
7480 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
7481 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752) {
1da177e4
LT
7482 u32 nvaccess = tr32(NVRAM_ACCESS);
7483
7484 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
7485 }
7486 tg3_nvram_unlock(tp);
7487 }
7488
7489 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
7490 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
7491 GRC_LCLCTRL_GPIO_OE1 | GRC_LCLCTRL_GPIO_OUTPUT1);
7492 udelay(40);
7493 }
7494
7495 return ret;
7496}
7497
7498struct subsys_tbl_ent {
7499 u16 subsys_vendor, subsys_devid;
7500 u32 phy_id;
7501};
7502
7503static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
7504 /* Broadcom boards. */
7505 { PCI_VENDOR_ID_BROADCOM, 0x1644, PHY_ID_BCM5401 }, /* BCM95700A6 */
7506 { PCI_VENDOR_ID_BROADCOM, 0x0001, PHY_ID_BCM5701 }, /* BCM95701A5 */
7507 { PCI_VENDOR_ID_BROADCOM, 0x0002, PHY_ID_BCM8002 }, /* BCM95700T6 */
7508 { PCI_VENDOR_ID_BROADCOM, 0x0003, 0 }, /* BCM95700A9 */
7509 { PCI_VENDOR_ID_BROADCOM, 0x0005, PHY_ID_BCM5701 }, /* BCM95701T1 */
7510 { PCI_VENDOR_ID_BROADCOM, 0x0006, PHY_ID_BCM5701 }, /* BCM95701T8 */
7511 { PCI_VENDOR_ID_BROADCOM, 0x0007, 0 }, /* BCM95701A7 */
7512 { PCI_VENDOR_ID_BROADCOM, 0x0008, PHY_ID_BCM5701 }, /* BCM95701A10 */
7513 { PCI_VENDOR_ID_BROADCOM, 0x8008, PHY_ID_BCM5701 }, /* BCM95701A12 */
7514 { PCI_VENDOR_ID_BROADCOM, 0x0009, PHY_ID_BCM5703 }, /* BCM95703Ax1 */
7515 { PCI_VENDOR_ID_BROADCOM, 0x8009, PHY_ID_BCM5703 }, /* BCM95703Ax2 */
7516
7517 /* 3com boards. */
7518 { PCI_VENDOR_ID_3COM, 0x1000, PHY_ID_BCM5401 }, /* 3C996T */
7519 { PCI_VENDOR_ID_3COM, 0x1006, PHY_ID_BCM5701 }, /* 3C996BT */
7520 { PCI_VENDOR_ID_3COM, 0x1004, 0 }, /* 3C996SX */
7521 { PCI_VENDOR_ID_3COM, 0x1007, PHY_ID_BCM5701 }, /* 3C1000T */
7522 { PCI_VENDOR_ID_3COM, 0x1008, PHY_ID_BCM5701 }, /* 3C940BR01 */
7523
7524 /* DELL boards. */
7525 { PCI_VENDOR_ID_DELL, 0x00d1, PHY_ID_BCM5401 }, /* VIPER */
7526 { PCI_VENDOR_ID_DELL, 0x0106, PHY_ID_BCM5401 }, /* JAGUAR */
7527 { PCI_VENDOR_ID_DELL, 0x0109, PHY_ID_BCM5411 }, /* MERLOT */
7528 { PCI_VENDOR_ID_DELL, 0x010a, PHY_ID_BCM5411 }, /* SLIM_MERLOT */
7529
7530 /* Compaq boards. */
7531 { PCI_VENDOR_ID_COMPAQ, 0x007c, PHY_ID_BCM5701 }, /* BANSHEE */
7532 { PCI_VENDOR_ID_COMPAQ, 0x009a, PHY_ID_BCM5701 }, /* BANSHEE_2 */
7533 { PCI_VENDOR_ID_COMPAQ, 0x007d, 0 }, /* CHANGELING */
7534 { PCI_VENDOR_ID_COMPAQ, 0x0085, PHY_ID_BCM5701 }, /* NC7780 */
7535 { PCI_VENDOR_ID_COMPAQ, 0x0099, PHY_ID_BCM5701 }, /* NC7780_2 */
7536
7537 /* IBM boards. */
7538 { PCI_VENDOR_ID_IBM, 0x0281, 0 } /* IBM??? */
7539};
7540
7541static inline struct subsys_tbl_ent *lookup_by_subsys(struct tg3 *tp)
7542{
7543 int i;
7544
7545 for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
7546 if ((subsys_id_to_phy_id[i].subsys_vendor ==
7547 tp->pdev->subsystem_vendor) &&
7548 (subsys_id_to_phy_id[i].subsys_devid ==
7549 tp->pdev->subsystem_device))
7550 return &subsys_id_to_phy_id[i];
7551 }
7552 return NULL;
7553}
7554
7555static int __devinit tg3_phy_probe(struct tg3 *tp)
7556{
7557 u32 eeprom_phy_id, hw_phy_id_1, hw_phy_id_2;
7558 u32 hw_phy_id, hw_phy_id_masked;
7559 u32 val;
7560 int eeprom_signature_found, eeprom_phy_serdes, err;
7561
7562 tp->phy_id = PHY_ID_INVALID;
7563 eeprom_phy_id = PHY_ID_INVALID;
7564 eeprom_phy_serdes = 0;
7565 eeprom_signature_found = 0;
7566 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
7567 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
7568 u32 nic_cfg, led_cfg;
7569 u32 nic_phy_id, ver, cfg2 = 0;
7570
7571 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
7572 tp->nic_sram_data_cfg = nic_cfg;
7573
7574 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
7575 ver >>= NIC_SRAM_DATA_VER_SHIFT;
7576 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
7577 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
7578 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703) &&
7579 (ver > 0) && (ver < 0x100))
7580 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
7581
7582 eeprom_signature_found = 1;
7583
7584 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
7585 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
7586 eeprom_phy_serdes = 1;
7587
7588 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
7589 if (nic_phy_id != 0) {
7590 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
7591 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
7592
7593 eeprom_phy_id = (id1 >> 16) << 10;
7594 eeprom_phy_id |= (id2 & 0xfc00) << 16;
7595 eeprom_phy_id |= (id2 & 0x03ff) << 0;
7596 } else
7597 eeprom_phy_id = 0;
7598
2052da94
JL
7599 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
7600 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752) {
1da177e4
LT
7601 led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
7602 SHASTA_EXT_LED_MODE_MASK);
7603 } else
7604 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
7605
7606 switch (led_cfg) {
7607 default:
7608 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
7609 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
7610 break;
7611
7612 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
7613 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
7614 break;
7615
7616 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
7617 tp->led_ctrl = LED_CTRL_MODE_MAC;
7618 break;
7619
7620 case SHASTA_EXT_LED_SHARED:
7621 tp->led_ctrl = LED_CTRL_MODE_SHARED;
7622 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
7623 tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
7624 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
7625 LED_CTRL_MODE_PHY_2);
7626 break;
7627
7628 case SHASTA_EXT_LED_MAC:
7629 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
7630 break;
7631
7632 case SHASTA_EXT_LED_COMBO:
7633 tp->led_ctrl = LED_CTRL_MODE_COMBO;
7634 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
7635 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
7636 LED_CTRL_MODE_PHY_2);
7637 break;
7638
7639 };
7640
7641 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
7642 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
7643 tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
7644 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
7645
7646 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
7647 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
7648 (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP))
7649 tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
7650
7651 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
7652 tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
2052da94
JL
7653 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
7654 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
1da177e4
LT
7655 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
7656 }
7657 if (nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL)
7658 tp->tg3_flags |= TG3_FLAG_SERDES_WOL_CAP;
7659
7660 if (cfg2 & (1 << 17))
7661 tp->tg3_flags2 |= TG3_FLG2_CAPACITIVE_COUPLING;
7662
7663 /* serdes signal pre-emphasis in register 0x590 set by */
7664 /* bootcode if bit 18 is set */
7665 if (cfg2 & (1 << 18))
7666 tp->tg3_flags2 |= TG3_FLG2_SERDES_PREEMPHASIS;
7667 }
7668
7669 /* Reading the PHY ID register can conflict with ASF
7670 * firwmare access to the PHY hardware.
7671 */
7672 err = 0;
7673 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
7674 hw_phy_id = hw_phy_id_masked = PHY_ID_INVALID;
7675 } else {
7676 /* Now read the physical PHY_ID from the chip and verify
7677 * that it is sane. If it doesn't look good, we fall back
7678 * to either the hard-coded table based PHY_ID and failing
7679 * that the value found in the eeprom area.
7680 */
7681 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
7682 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
7683
7684 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10;
7685 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
7686 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0;
7687
7688 hw_phy_id_masked = hw_phy_id & PHY_ID_MASK;
7689 }
7690
7691 if (!err && KNOWN_PHY_ID(hw_phy_id_masked)) {
7692 tp->phy_id = hw_phy_id;
7693 if (hw_phy_id_masked == PHY_ID_BCM8002)
7694 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
7695 } else {
7696 if (eeprom_signature_found) {
7697 tp->phy_id = eeprom_phy_id;
7698 if (eeprom_phy_serdes)
7699 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
7700 } else {
7701 struct subsys_tbl_ent *p;
7702
7703 /* No eeprom signature? Try the hardcoded
7704 * subsys device table.
7705 */
7706 p = lookup_by_subsys(tp);
7707 if (!p)
7708 return -ENODEV;
7709
7710 tp->phy_id = p->phy_id;
7711 if (!tp->phy_id ||
7712 tp->phy_id == PHY_ID_BCM8002)
7713 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
7714 }
7715 }
7716
7717 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
7718 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
7719 u32 bmsr, adv_reg, tg3_ctrl;
7720
7721 tg3_readphy(tp, MII_BMSR, &bmsr);
7722 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
7723 (bmsr & BMSR_LSTATUS))
7724 goto skip_phy_reset;
7725
7726 err = tg3_phy_reset(tp);
7727 if (err)
7728 return err;
7729
7730 adv_reg = (ADVERTISE_10HALF | ADVERTISE_10FULL |
7731 ADVERTISE_100HALF | ADVERTISE_100FULL |
7732 ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
7733 tg3_ctrl = 0;
7734 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
7735 tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF |
7736 MII_TG3_CTRL_ADV_1000_FULL);
7737 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
7738 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
7739 tg3_ctrl |= (MII_TG3_CTRL_AS_MASTER |
7740 MII_TG3_CTRL_ENABLE_AS_MASTER);
7741 }
7742
7743 if (!tg3_copper_is_advertising_all(tp)) {
7744 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
7745
7746 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
7747 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
7748
7749 tg3_writephy(tp, MII_BMCR,
7750 BMCR_ANENABLE | BMCR_ANRESTART);
7751 }
7752 tg3_phy_set_wirespeed(tp);
7753
7754 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
7755 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
7756 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
7757 }
7758
7759skip_phy_reset:
7760 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
7761 err = tg3_init_5401phy_dsp(tp);
7762 if (err)
7763 return err;
7764 }
7765
7766 if (!err && ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)) {
7767 err = tg3_init_5401phy_dsp(tp);
7768 }
7769
7770 if (!eeprom_signature_found)
7771 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
7772
7773 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
7774 tp->link_config.advertising =
7775 (ADVERTISED_1000baseT_Half |
7776 ADVERTISED_1000baseT_Full |
7777 ADVERTISED_Autoneg |
7778 ADVERTISED_FIBRE);
7779 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
7780 tp->link_config.advertising &=
7781 ~(ADVERTISED_1000baseT_Half |
7782 ADVERTISED_1000baseT_Full);
7783
7784 return err;
7785}
7786
7787static void __devinit tg3_read_partno(struct tg3 *tp)
7788{
7789 unsigned char vpd_data[256];
7790 int i;
7791
7792 if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) {
7793 /* Sun decided not to put the necessary bits in the
7794 * NVRAM of their onboard tg3 parts :(
7795 */
7796 strcpy(tp->board_part_number, "Sun 570X");
7797 return;
7798 }
7799
7800 for (i = 0; i < 256; i += 4) {
7801 u32 tmp;
7802
7803 if (tg3_nvram_read(tp, 0x100 + i, &tmp))
7804 goto out_not_found;
7805
7806 vpd_data[i + 0] = ((tmp >> 0) & 0xff);
7807 vpd_data[i + 1] = ((tmp >> 8) & 0xff);
7808 vpd_data[i + 2] = ((tmp >> 16) & 0xff);
7809 vpd_data[i + 3] = ((tmp >> 24) & 0xff);
7810 }
7811
7812 /* Now parse and find the part number. */
7813 for (i = 0; i < 256; ) {
7814 unsigned char val = vpd_data[i];
7815 int block_end;
7816
7817 if (val == 0x82 || val == 0x91) {
7818 i = (i + 3 +
7819 (vpd_data[i + 1] +
7820 (vpd_data[i + 2] << 8)));
7821 continue;
7822 }
7823
7824 if (val != 0x90)
7825 goto out_not_found;
7826
7827 block_end = (i + 3 +
7828 (vpd_data[i + 1] +
7829 (vpd_data[i + 2] << 8)));
7830 i += 3;
7831 while (i < block_end) {
7832 if (vpd_data[i + 0] == 'P' &&
7833 vpd_data[i + 1] == 'N') {
7834 int partno_len = vpd_data[i + 2];
7835
7836 if (partno_len > 24)
7837 goto out_not_found;
7838
7839 memcpy(tp->board_part_number,
7840 &vpd_data[i + 3],
7841 partno_len);
7842
7843 /* Success. */
7844 return;
7845 }
7846 }
7847
7848 /* Part number not found. */
7849 goto out_not_found;
7850 }
7851
7852out_not_found:
7853 strcpy(tp->board_part_number, "none");
7854}
7855
7856#ifdef CONFIG_SPARC64
7857static int __devinit tg3_is_sun_570X(struct tg3 *tp)
7858{
7859 struct pci_dev *pdev = tp->pdev;
7860 struct pcidev_cookie *pcp = pdev->sysdata;
7861
7862 if (pcp != NULL) {
7863 int node = pcp->prom_node;
7864 u32 venid;
7865 int err;
7866
7867 err = prom_getproperty(node, "subsystem-vendor-id",
7868 (char *) &venid, sizeof(venid));
7869 if (err == 0 || err == -1)
7870 return 0;
7871 if (venid == PCI_VENDOR_ID_SUN)
7872 return 1;
7873 }
7874 return 0;
7875}
7876#endif
7877
7878static int __devinit tg3_get_invariants(struct tg3 *tp)
7879{
7880 static struct pci_device_id write_reorder_chipsets[] = {
7881 { PCI_DEVICE(PCI_VENDOR_ID_INTEL,
7882 PCI_DEVICE_ID_INTEL_82801AA_8) },
7883 { PCI_DEVICE(PCI_VENDOR_ID_INTEL,
7884 PCI_DEVICE_ID_INTEL_82801AB_8) },
7885 { PCI_DEVICE(PCI_VENDOR_ID_INTEL,
7886 PCI_DEVICE_ID_INTEL_82801BA_11) },
7887 { PCI_DEVICE(PCI_VENDOR_ID_INTEL,
7888 PCI_DEVICE_ID_INTEL_82801BA_6) },
7889 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
7890 PCI_DEVICE_ID_AMD_FE_GATE_700C) },
7891 { },
7892 };
7893 u32 misc_ctrl_reg;
7894 u32 cacheline_sz_reg;
7895 u32 pci_state_reg, grc_misc_cfg;
7896 u32 val;
7897 u16 pci_cmd;
7898 int err;
7899
7900#ifdef CONFIG_SPARC64
7901 if (tg3_is_sun_570X(tp))
7902 tp->tg3_flags2 |= TG3_FLG2_SUN_570X;
7903#endif
7904
7905 /* If we have an AMD 762 or Intel ICH/ICH0/ICH2 chipset, write
7906 * reordering to the mailbox registers done by the host
7907 * controller can cause major troubles. We read back from
7908 * every mailbox register write to force the writes to be
7909 * posted to the chip in order.
7910 */
7911 if (pci_dev_present(write_reorder_chipsets))
7912 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
7913
7914 /* Force memory write invalidate off. If we leave it on,
7915 * then on 5700_BX chips we have to enable a workaround.
7916 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
7917 * to match the cacheline size. The Broadcom driver have this
7918 * workaround but turns MWI off all the times so never uses
7919 * it. This seems to suggest that the workaround is insufficient.
7920 */
7921 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
7922 pci_cmd &= ~PCI_COMMAND_INVALIDATE;
7923 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
7924
7925 /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
7926 * has the register indirect write enable bit set before
7927 * we try to access any of the MMIO registers. It is also
7928 * critical that the PCI-X hw workaround situation is decided
7929 * before that as well.
7930 */
7931 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
7932 &misc_ctrl_reg);
7933
7934 tp->pci_chip_rev_id = (misc_ctrl_reg >>
7935 MISC_HOST_CTRL_CHIPREV_SHIFT);
7936
7937 /* Initialize misc host control in PCI block. */
7938 tp->misc_host_ctrl |= (misc_ctrl_reg &
7939 MISC_HOST_CTRL_CHIPREV);
7940 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
7941 tp->misc_host_ctrl);
7942
7943 pci_read_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
7944 &cacheline_sz_reg);
7945
7946 tp->pci_cacheline_sz = (cacheline_sz_reg >> 0) & 0xff;
7947 tp->pci_lat_timer = (cacheline_sz_reg >> 8) & 0xff;
7948 tp->pci_hdr_type = (cacheline_sz_reg >> 16) & 0xff;
7949 tp->pci_bist = (cacheline_sz_reg >> 24) & 0xff;
7950
7951 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) ||
2052da94
JL
7952 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) ||
7953 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752))
1da177e4
LT
7954 tp->tg3_flags2 |= TG3_FLG2_5705_PLUS;
7955
2052da94
JL
7956 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
7957 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
1da177e4
LT
7958 tp->tg3_flags2 |= TG3_FLG2_HW_TSO;
7959
7960 if (pci_find_capability(tp->pdev, PCI_CAP_ID_EXP) != 0)
7961 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
7962
7963 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
7964 tp->pci_lat_timer < 64) {
7965 tp->pci_lat_timer = 64;
7966
7967 cacheline_sz_reg = ((tp->pci_cacheline_sz & 0xff) << 0);
7968 cacheline_sz_reg |= ((tp->pci_lat_timer & 0xff) << 8);
7969 cacheline_sz_reg |= ((tp->pci_hdr_type & 0xff) << 16);
7970 cacheline_sz_reg |= ((tp->pci_bist & 0xff) << 24);
7971
7972 pci_write_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
7973 cacheline_sz_reg);
7974 }
7975
7976 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
7977 &pci_state_reg);
7978
7979 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0) {
7980 tp->tg3_flags |= TG3_FLAG_PCIX_MODE;
7981
7982 /* If this is a 5700 BX chipset, and we are in PCI-X
7983 * mode, enable register write workaround.
7984 *
7985 * The workaround is to use indirect register accesses
7986 * for all chip writes not to mailbox registers.
7987 */
7988 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
7989 u32 pm_reg;
7990 u16 pci_cmd;
7991
7992 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
7993
7994 /* The chip can have it's power management PCI config
7995 * space registers clobbered due to this bug.
7996 * So explicitly force the chip into D0 here.
7997 */
7998 pci_read_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT,
7999 &pm_reg);
8000 pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
8001 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
8002 pci_write_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT,
8003 pm_reg);
8004
8005 /* Also, force SERR#/PERR# in PCI command. */
8006 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
8007 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
8008 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
8009 }
8010 }
8011
8012 /* Back to back register writes can cause problems on this chip,
8013 * the workaround is to read back all reg writes except those to
8014 * mailbox regs. See tg3_write_indirect_reg32().
8015 *
8016 * PCI Express 5750_A0 rev chips need this workaround too.
8017 */
8018 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
8019 ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
8020 tp->pci_chip_rev_id == CHIPREV_ID_5750_A0))
8021 tp->tg3_flags |= TG3_FLAG_5701_REG_WRITE_BUG;
8022
8023 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
8024 tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED;
8025 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
8026 tp->tg3_flags |= TG3_FLAG_PCI_32BIT;
8027
8028 /* Chip-specific fixup from Broadcom driver */
8029 if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
8030 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
8031 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
8032 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
8033 }
8034
8035 /* Force the chip into D0. */
8036 err = tg3_set_power_state(tp, 0);
8037 if (err) {
8038 printk(KERN_ERR PFX "(%s) transition to D0 failed\n",
8039 pci_name(tp->pdev));
8040 return err;
8041 }
8042
8043 /* 5700 B0 chips do not support checksumming correctly due
8044 * to hardware bugs.
8045 */
8046 if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0)
8047 tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS;
8048
8049 /* Pseudo-header checksum is done by hardware logic and not
8050 * the offload processers, so make the chip do the pseudo-
8051 * header checksums on receive. For transmit it is more
8052 * convenient to do the pseudo-header checksum in software
8053 * as Linux does that on transmit for us in all cases.
8054 */
8055 tp->tg3_flags |= TG3_FLAG_NO_TX_PSEUDO_CSUM;
8056 tp->tg3_flags &= ~TG3_FLAG_NO_RX_PSEUDO_CSUM;
8057
8058 /* Derive initial jumbo mode from MTU assigned in
8059 * ether_setup() via the alloc_etherdev() call
8060 */
8061 if (tp->dev->mtu > ETH_DATA_LEN)
8062 tp->tg3_flags |= TG3_FLAG_JUMBO_ENABLE;
8063
8064 /* Determine WakeOnLan speed to use. */
8065 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
8066 tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
8067 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
8068 tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
8069 tp->tg3_flags &= ~(TG3_FLAG_WOL_SPEED_100MB);
8070 } else {
8071 tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB;
8072 }
8073
8074 /* A few boards don't want Ethernet@WireSpeed phy feature */
8075 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
8076 ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
8077 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
8078 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)))
8079 tp->tg3_flags2 |= TG3_FLG2_NO_ETH_WIRE_SPEED;
8080
8081 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
8082 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
8083 tp->tg3_flags2 |= TG3_FLG2_PHY_ADC_BUG;
8084 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
8085 tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG;
8086
8087 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
2052da94
JL
8088 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
8089 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
1da177e4
LT
8090 tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
8091
8092 /* Only 5701 and later support tagged irq status mode.
8093 * Also, 5788 chips cannot use tagged irq status.
8094 *
8095 * However, since we are using NAPI avoid tagged irq status
8096 * because the interrupt condition is more difficult to
8097 * fully clear in that mode.
8098 */
8099 tp->coalesce_mode = 0;
8100
8101 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
8102 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
8103 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
8104
8105 /* Initialize MAC MI mode, polling disabled. */
8106 tw32_f(MAC_MI_MODE, tp->mi_mode);
8107 udelay(80);
8108
8109 /* Initialize data/descriptor byte/word swapping. */
8110 val = tr32(GRC_MODE);
8111 val &= GRC_MODE_HOST_STACKUP;
8112 tw32(GRC_MODE, val | tp->grc_mode);
8113
8114 tg3_switch_clocks(tp);
8115
8116 /* Clear this out for sanity. */
8117 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
8118
8119 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
8120 &pci_state_reg);
8121 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
8122 (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) == 0) {
8123 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
8124
8125 if (chiprevid == CHIPREV_ID_5701_A0 ||
8126 chiprevid == CHIPREV_ID_5701_B0 ||
8127 chiprevid == CHIPREV_ID_5701_B2 ||
8128 chiprevid == CHIPREV_ID_5701_B5) {
8129 void __iomem *sram_base;
8130
8131 /* Write some dummy words into the SRAM status block
8132 * area, see if it reads back correctly. If the return
8133 * value is bad, force enable the PCIX workaround.
8134 */
8135 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
8136
8137 writel(0x00000000, sram_base);
8138 writel(0x00000000, sram_base + 4);
8139 writel(0xffffffff, sram_base + 4);
8140 if (readl(sram_base) != 0x00000000)
8141 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
8142 }
8143 }
8144
8145 udelay(50);
8146 tg3_nvram_init(tp);
8147
8148 grc_misc_cfg = tr32(GRC_MISC_CFG);
8149 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
8150
8151 /* Broadcom's driver says that CIOBE multisplit has a bug */
8152#if 0
8153 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
8154 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5704CIOBE) {
8155 tp->tg3_flags |= TG3_FLAG_SPLIT_MODE;
8156 tp->split_mode_max_reqs = SPLIT_MODE_5704_MAX_REQ;
8157 }
8158#endif
8159 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8160 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
8161 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
8162 tp->tg3_flags2 |= TG3_FLG2_IS_5788;
8163
8164 /* these are limited to 10/100 only */
8165 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
8166 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
8167 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8168 tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
8169 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
8170 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
8171 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
8172 (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
8173 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
8174 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F)))
8175 tp->tg3_flags |= TG3_FLAG_10_100_ONLY;
8176
8177 err = tg3_phy_probe(tp);
8178 if (err) {
8179 printk(KERN_ERR PFX "(%s) phy probe failed, err %d\n",
8180 pci_name(tp->pdev), err);
8181 /* ... but do not return immediately ... */
8182 }
8183
8184 tg3_read_partno(tp);
8185
8186 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
8187 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
8188 } else {
8189 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
8190 tp->tg3_flags |= TG3_FLAG_USE_MI_INTERRUPT;
8191 else
8192 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
8193 }
8194
8195 /* 5700 {AX,BX} chips have a broken status block link
8196 * change bit implementation, so we must use the
8197 * status register in those cases.
8198 */
8199 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
8200 tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
8201 else
8202 tp->tg3_flags &= ~TG3_FLAG_USE_LINKCHG_REG;
8203
8204 /* The led_ctrl is set during tg3_phy_probe, here we might
8205 * have to force the link status polling mechanism based
8206 * upon subsystem IDs.
8207 */
8208 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
8209 !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
8210 tp->tg3_flags |= (TG3_FLAG_USE_MI_INTERRUPT |
8211 TG3_FLAG_USE_LINKCHG_REG);
8212 }
8213
8214 /* For all SERDES we poll the MAC status register. */
8215 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
8216 tp->tg3_flags |= TG3_FLAG_POLL_SERDES;
8217 else
8218 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
8219
8220 /* 5700 BX chips need to have their TX producer index mailboxes
8221 * written twice to workaround a bug.
8222 */
8223 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX)
8224 tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
8225 else
8226 tp->tg3_flags &= ~TG3_FLAG_TXD_MBOX_HWBUG;
8227
8228 /* It seems all chips can get confused if TX buffers
8229 * straddle the 4GB address boundary in some cases.
8230 */
8231 tp->dev->hard_start_xmit = tg3_start_xmit;
8232
8233 tp->rx_offset = 2;
8234 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
8235 (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0)
8236 tp->rx_offset = 0;
8237
8238 /* By default, disable wake-on-lan. User can change this
8239 * using ETHTOOL_SWOL.
8240 */
8241 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
8242
8243 return err;
8244}
8245
8246#ifdef CONFIG_SPARC64
8247static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
8248{
8249 struct net_device *dev = tp->dev;
8250 struct pci_dev *pdev = tp->pdev;
8251 struct pcidev_cookie *pcp = pdev->sysdata;
8252
8253 if (pcp != NULL) {
8254 int node = pcp->prom_node;
8255
8256 if (prom_getproplen(node, "local-mac-address") == 6) {
8257 prom_getproperty(node, "local-mac-address",
8258 dev->dev_addr, 6);
8259 return 0;
8260 }
8261 }
8262 return -ENODEV;
8263}
8264
8265static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
8266{
8267 struct net_device *dev = tp->dev;
8268
8269 memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
8270 return 0;
8271}
8272#endif
8273
8274static int __devinit tg3_get_device_address(struct tg3 *tp)
8275{
8276 struct net_device *dev = tp->dev;
8277 u32 hi, lo, mac_offset;
8278
8279#ifdef CONFIG_SPARC64
8280 if (!tg3_get_macaddr_sparc(tp))
8281 return 0;
8282#endif
8283
8284 mac_offset = 0x7c;
8285 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
8286 !(tp->tg3_flags & TG3_FLG2_SUN_570X)) {
8287 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
8288 mac_offset = 0xcc;
8289 if (tg3_nvram_lock(tp))
8290 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
8291 else
8292 tg3_nvram_unlock(tp);
8293 }
8294
8295 /* First try to get it from MAC address mailbox. */
8296 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
8297 if ((hi >> 16) == 0x484b) {
8298 dev->dev_addr[0] = (hi >> 8) & 0xff;
8299 dev->dev_addr[1] = (hi >> 0) & 0xff;
8300
8301 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
8302 dev->dev_addr[2] = (lo >> 24) & 0xff;
8303 dev->dev_addr[3] = (lo >> 16) & 0xff;
8304 dev->dev_addr[4] = (lo >> 8) & 0xff;
8305 dev->dev_addr[5] = (lo >> 0) & 0xff;
8306 }
8307 /* Next, try NVRAM. */
8308 else if (!(tp->tg3_flags & TG3_FLG2_SUN_570X) &&
8309 !tg3_nvram_read(tp, mac_offset + 0, &hi) &&
8310 !tg3_nvram_read(tp, mac_offset + 4, &lo)) {
8311 dev->dev_addr[0] = ((hi >> 16) & 0xff);
8312 dev->dev_addr[1] = ((hi >> 24) & 0xff);
8313 dev->dev_addr[2] = ((lo >> 0) & 0xff);
8314 dev->dev_addr[3] = ((lo >> 8) & 0xff);
8315 dev->dev_addr[4] = ((lo >> 16) & 0xff);
8316 dev->dev_addr[5] = ((lo >> 24) & 0xff);
8317 }
8318 /* Finally just fetch it out of the MAC control regs. */
8319 else {
8320 hi = tr32(MAC_ADDR_0_HIGH);
8321 lo = tr32(MAC_ADDR_0_LOW);
8322
8323 dev->dev_addr[5] = lo & 0xff;
8324 dev->dev_addr[4] = (lo >> 8) & 0xff;
8325 dev->dev_addr[3] = (lo >> 16) & 0xff;
8326 dev->dev_addr[2] = (lo >> 24) & 0xff;
8327 dev->dev_addr[1] = hi & 0xff;
8328 dev->dev_addr[0] = (hi >> 8) & 0xff;
8329 }
8330
8331 if (!is_valid_ether_addr(&dev->dev_addr[0])) {
8332#ifdef CONFIG_SPARC64
8333 if (!tg3_get_default_macaddr_sparc(tp))
8334 return 0;
8335#endif
8336 return -EINVAL;
8337 }
8338 return 0;
8339}
8340
8341static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
8342{
8343 struct tg3_internal_buffer_desc test_desc;
8344 u32 sram_dma_descs;
8345 int i, ret;
8346
8347 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
8348
8349 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
8350 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
8351 tw32(RDMAC_STATUS, 0);
8352 tw32(WDMAC_STATUS, 0);
8353
8354 tw32(BUFMGR_MODE, 0);
8355 tw32(FTQ_RESET, 0);
8356
8357 test_desc.addr_hi = ((u64) buf_dma) >> 32;
8358 test_desc.addr_lo = buf_dma & 0xffffffff;
8359 test_desc.nic_mbuf = 0x00002100;
8360 test_desc.len = size;
8361
8362 /*
8363 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
8364 * the *second* time the tg3 driver was getting loaded after an
8365 * initial scan.
8366 *
8367 * Broadcom tells me:
8368 * ...the DMA engine is connected to the GRC block and a DMA
8369 * reset may affect the GRC block in some unpredictable way...
8370 * The behavior of resets to individual blocks has not been tested.
8371 *
8372 * Broadcom noted the GRC reset will also reset all sub-components.
8373 */
8374 if (to_device) {
8375 test_desc.cqid_sqid = (13 << 8) | 2;
8376
8377 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
8378 udelay(40);
8379 } else {
8380 test_desc.cqid_sqid = (16 << 8) | 7;
8381
8382 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
8383 udelay(40);
8384 }
8385 test_desc.flags = 0x00000005;
8386
8387 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
8388 u32 val;
8389
8390 val = *(((u32 *)&test_desc) + i);
8391 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
8392 sram_dma_descs + (i * sizeof(u32)));
8393 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
8394 }
8395 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
8396
8397 if (to_device) {
8398 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
8399 } else {
8400 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
8401 }
8402
8403 ret = -ENODEV;
8404 for (i = 0; i < 40; i++) {
8405 u32 val;
8406
8407 if (to_device)
8408 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
8409 else
8410 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
8411 if ((val & 0xffff) == sram_dma_descs) {
8412 ret = 0;
8413 break;
8414 }
8415
8416 udelay(100);
8417 }
8418
8419 return ret;
8420}
8421
8422#define TEST_BUFFER_SIZE 0x400
8423
8424static int __devinit tg3_test_dma(struct tg3 *tp)
8425{
8426 dma_addr_t buf_dma;
8427 u32 *buf;
8428 int ret;
8429
8430 buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
8431 if (!buf) {
8432 ret = -ENOMEM;
8433 goto out_nofree;
8434 }
8435
8436 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
8437 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
8438
8439#ifndef CONFIG_X86
8440 {
8441 u8 byte;
8442 int cacheline_size;
8443 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
8444
8445 if (byte == 0)
8446 cacheline_size = 1024;
8447 else
8448 cacheline_size = (int) byte * 4;
8449
8450 switch (cacheline_size) {
8451 case 16:
8452 case 32:
8453 case 64:
8454 case 128:
8455 if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
8456 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
8457 tp->dma_rwctrl |=
8458 DMA_RWCTRL_WRITE_BNDRY_384_PCIX;
8459 break;
8460 } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
8461 tp->dma_rwctrl &=
8462 ~(DMA_RWCTRL_PCI_WRITE_CMD);
8463 tp->dma_rwctrl |=
8464 DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
8465 break;
8466 }
8467 /* fallthrough */
8468 case 256:
8469 if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
8470 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
8471 tp->dma_rwctrl |=
8472 DMA_RWCTRL_WRITE_BNDRY_256;
8473 else if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
8474 tp->dma_rwctrl |=
8475 DMA_RWCTRL_WRITE_BNDRY_256_PCIX;
8476 };
8477 }
8478#endif
8479
8480 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
8481 /* DMA read watermark not used on PCIE */
8482 tp->dma_rwctrl |= 0x00180000;
8483 } else if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
8484 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
2052da94
JL
8485 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
8486 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
1da177e4
LT
8487 tp->dma_rwctrl |= 0x003f0000;
8488 else
8489 tp->dma_rwctrl |= 0x003f000f;
8490 } else {
8491 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
8492 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
8493 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
8494
8495 if (ccval == 0x6 || ccval == 0x7)
8496 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
8497
8498 /* Set bit 23 to renable PCIX hw bug fix */
8499 tp->dma_rwctrl |= 0x009f0000;
8500 } else {
8501 tp->dma_rwctrl |= 0x001b000f;
8502 }
8503 }
8504
8505 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
8506 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
8507 tp->dma_rwctrl &= 0xfffffff0;
8508
8509 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
8510 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
8511 /* Remove this if it causes problems for some boards. */
8512 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
8513
8514 /* On 5700/5701 chips, we need to set this bit.
8515 * Otherwise the chip will issue cacheline transactions
8516 * to streamable DMA memory with not all the byte
8517 * enables turned on. This is an error on several
8518 * RISC PCI controllers, in particular sparc64.
8519 *
8520 * On 5703/5704 chips, this bit has been reassigned
8521 * a different meaning. In particular, it is used
8522 * on those chips to enable a PCI-X workaround.
8523 */
8524 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
8525 }
8526
8527 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
8528
8529#if 0
8530 /* Unneeded, already done by tg3_get_invariants. */
8531 tg3_switch_clocks(tp);
8532#endif
8533
8534 ret = 0;
8535 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
8536 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
8537 goto out;
8538
8539 while (1) {
8540 u32 *p = buf, i;
8541
8542 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
8543 p[i] = i;
8544
8545 /* Send the buffer to the chip. */
8546 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
8547 if (ret) {
8548 printk(KERN_ERR "tg3_test_dma() Write the buffer failed %d\n", ret);
8549 break;
8550 }
8551
8552#if 0
8553 /* validate data reached card RAM correctly. */
8554 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
8555 u32 val;
8556 tg3_read_mem(tp, 0x2100 + (i*4), &val);
8557 if (le32_to_cpu(val) != p[i]) {
8558 printk(KERN_ERR " tg3_test_dma() Card buffer corrupted on write! (%d != %d)\n", val, i);
8559 /* ret = -ENODEV here? */
8560 }
8561 p[i] = 0;
8562 }
8563#endif
8564 /* Now read it back. */
8565 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
8566 if (ret) {
8567 printk(KERN_ERR "tg3_test_dma() Read the buffer failed %d\n", ret);
8568
8569 break;
8570 }
8571
8572 /* Verify it. */
8573 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
8574 if (p[i] == i)
8575 continue;
8576
8577 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) ==
8578 DMA_RWCTRL_WRITE_BNDRY_DISAB) {
8579 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
8580 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
8581 break;
8582 } else {
8583 printk(KERN_ERR "tg3_test_dma() buffer corrupted on read back! (%d != %d)\n", p[i], i);
8584 ret = -ENODEV;
8585 goto out;
8586 }
8587 }
8588
8589 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
8590 /* Success. */
8591 ret = 0;
8592 break;
8593 }
8594 }
8595
8596out:
8597 pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma);
8598out_nofree:
8599 return ret;
8600}
8601
8602static void __devinit tg3_init_link_config(struct tg3 *tp)
8603{
8604 tp->link_config.advertising =
8605 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
8606 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
8607 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
8608 ADVERTISED_Autoneg | ADVERTISED_MII);
8609 tp->link_config.speed = SPEED_INVALID;
8610 tp->link_config.duplex = DUPLEX_INVALID;
8611 tp->link_config.autoneg = AUTONEG_ENABLE;
8612 netif_carrier_off(tp->dev);
8613 tp->link_config.active_speed = SPEED_INVALID;
8614 tp->link_config.active_duplex = DUPLEX_INVALID;
8615 tp->link_config.phy_is_low_power = 0;
8616 tp->link_config.orig_speed = SPEED_INVALID;
8617 tp->link_config.orig_duplex = DUPLEX_INVALID;
8618 tp->link_config.orig_autoneg = AUTONEG_INVALID;
8619}
8620
8621static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
8622{
8623 tp->bufmgr_config.mbuf_read_dma_low_water =
8624 DEFAULT_MB_RDMA_LOW_WATER;
8625 tp->bufmgr_config.mbuf_mac_rx_low_water =
8626 DEFAULT_MB_MACRX_LOW_WATER;
8627 tp->bufmgr_config.mbuf_high_water =
8628 DEFAULT_MB_HIGH_WATER;
8629
8630 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
8631 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
8632 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
8633 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
8634 tp->bufmgr_config.mbuf_high_water_jumbo =
8635 DEFAULT_MB_HIGH_WATER_JUMBO;
8636
8637 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
8638 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
8639}
8640
8641static char * __devinit tg3_phy_string(struct tg3 *tp)
8642{
8643 switch (tp->phy_id & PHY_ID_MASK) {
8644 case PHY_ID_BCM5400: return "5400";
8645 case PHY_ID_BCM5401: return "5401";
8646 case PHY_ID_BCM5411: return "5411";
8647 case PHY_ID_BCM5701: return "5701";
8648 case PHY_ID_BCM5703: return "5703";
8649 case PHY_ID_BCM5704: return "5704";
8650 case PHY_ID_BCM5705: return "5705";
8651 case PHY_ID_BCM5750: return "5750";
8652 case PHY_ID_BCM8002: return "8002/serdes";
8653 case 0: return "serdes";
8654 default: return "unknown";
8655 };
8656}
8657
8658static struct pci_dev * __devinit tg3_find_5704_peer(struct tg3 *tp)
8659{
8660 struct pci_dev *peer;
8661 unsigned int func, devnr = tp->pdev->devfn & ~7;
8662
8663 for (func = 0; func < 8; func++) {
8664 peer = pci_get_slot(tp->pdev->bus, devnr | func);
8665 if (peer && peer != tp->pdev)
8666 break;
8667 pci_dev_put(peer);
8668 }
8669 if (!peer || peer == tp->pdev)
8670 BUG();
8671
8672 /*
8673 * We don't need to keep the refcount elevated; there's no way
8674 * to remove one half of this device without removing the other
8675 */
8676 pci_dev_put(peer);
8677
8678 return peer;
8679}
8680
8681static int __devinit tg3_init_one(struct pci_dev *pdev,
8682 const struct pci_device_id *ent)
8683{
8684 static int tg3_version_printed = 0;
8685 unsigned long tg3reg_base, tg3reg_len;
8686 struct net_device *dev;
8687 struct tg3 *tp;
8688 int i, err, pci_using_dac, pm_cap;
8689
8690 if (tg3_version_printed++ == 0)
8691 printk(KERN_INFO "%s", version);
8692
8693 err = pci_enable_device(pdev);
8694 if (err) {
8695 printk(KERN_ERR PFX "Cannot enable PCI device, "
8696 "aborting.\n");
8697 return err;
8698 }
8699
8700 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
8701 printk(KERN_ERR PFX "Cannot find proper PCI device "
8702 "base address, aborting.\n");
8703 err = -ENODEV;
8704 goto err_out_disable_pdev;
8705 }
8706
8707 err = pci_request_regions(pdev, DRV_MODULE_NAME);
8708 if (err) {
8709 printk(KERN_ERR PFX "Cannot obtain PCI resources, "
8710 "aborting.\n");
8711 goto err_out_disable_pdev;
8712 }
8713
8714 pci_set_master(pdev);
8715
8716 /* Find power-management capability. */
8717 pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
8718 if (pm_cap == 0) {
8719 printk(KERN_ERR PFX "Cannot find PowerManagement capability, "
8720 "aborting.\n");
8721 err = -EIO;
8722 goto err_out_free_res;
8723 }
8724
8725 /* Configure DMA attributes. */
8726 err = pci_set_dma_mask(pdev, 0xffffffffffffffffULL);
8727 if (!err) {
8728 pci_using_dac = 1;
8729 err = pci_set_consistent_dma_mask(pdev, 0xffffffffffffffffULL);
8730 if (err < 0) {
8731 printk(KERN_ERR PFX "Unable to obtain 64 bit DMA "
8732 "for consistent allocations\n");
8733 goto err_out_free_res;
8734 }
8735 } else {
8736 err = pci_set_dma_mask(pdev, 0xffffffffULL);
8737 if (err) {
8738 printk(KERN_ERR PFX "No usable DMA configuration, "
8739 "aborting.\n");
8740 goto err_out_free_res;
8741 }
8742 pci_using_dac = 0;
8743 }
8744
8745 tg3reg_base = pci_resource_start(pdev, 0);
8746 tg3reg_len = pci_resource_len(pdev, 0);
8747
8748 dev = alloc_etherdev(sizeof(*tp));
8749 if (!dev) {
8750 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
8751 err = -ENOMEM;
8752 goto err_out_free_res;
8753 }
8754
8755 SET_MODULE_OWNER(dev);
8756 SET_NETDEV_DEV(dev, &pdev->dev);
8757
8758 if (pci_using_dac)
8759 dev->features |= NETIF_F_HIGHDMA;
8760 dev->features |= NETIF_F_LLTX;
8761#if TG3_VLAN_TAG_USED
8762 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
8763 dev->vlan_rx_register = tg3_vlan_rx_register;
8764 dev->vlan_rx_kill_vid = tg3_vlan_rx_kill_vid;
8765#endif
8766
8767 tp = netdev_priv(dev);
8768 tp->pdev = pdev;
8769 tp->dev = dev;
8770 tp->pm_cap = pm_cap;
8771 tp->mac_mode = TG3_DEF_MAC_MODE;
8772 tp->rx_mode = TG3_DEF_RX_MODE;
8773 tp->tx_mode = TG3_DEF_TX_MODE;
8774 tp->mi_mode = MAC_MI_MODE_BASE;
8775 if (tg3_debug > 0)
8776 tp->msg_enable = tg3_debug;
8777 else
8778 tp->msg_enable = TG3_DEF_MSG_ENABLE;
8779
8780 /* The word/byte swap controls here control register access byte
8781 * swapping. DMA data byte swapping is controlled in the GRC_MODE
8782 * setting below.
8783 */
8784 tp->misc_host_ctrl =
8785 MISC_HOST_CTRL_MASK_PCI_INT |
8786 MISC_HOST_CTRL_WORD_SWAP |
8787 MISC_HOST_CTRL_INDIR_ACCESS |
8788 MISC_HOST_CTRL_PCISTATE_RW;
8789
8790 /* The NONFRM (non-frame) byte/word swap controls take effect
8791 * on descriptor entries, anything which isn't packet data.
8792 *
8793 * The StrongARM chips on the board (one for tx, one for rx)
8794 * are running in big-endian mode.
8795 */
8796 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
8797 GRC_MODE_WSWAP_NONFRM_DATA);
8798#ifdef __BIG_ENDIAN
8799 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
8800#endif
8801 spin_lock_init(&tp->lock);
8802 spin_lock_init(&tp->tx_lock);
8803 spin_lock_init(&tp->indirect_lock);
8804 INIT_WORK(&tp->reset_task, tg3_reset_task, tp);
8805
8806 tp->regs = ioremap_nocache(tg3reg_base, tg3reg_len);
8807 if (tp->regs == 0UL) {
8808 printk(KERN_ERR PFX "Cannot map device registers, "
8809 "aborting.\n");
8810 err = -ENOMEM;
8811 goto err_out_free_dev;
8812 }
8813
8814 tg3_init_link_config(tp);
8815
8816 tg3_init_bufmgr_config(tp);
8817
8818 tp->rx_pending = TG3_DEF_RX_RING_PENDING;
8819 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
8820 tp->tx_pending = TG3_DEF_TX_RING_PENDING;
8821
8822 dev->open = tg3_open;
8823 dev->stop = tg3_close;
8824 dev->get_stats = tg3_get_stats;
8825 dev->set_multicast_list = tg3_set_rx_mode;
8826 dev->set_mac_address = tg3_set_mac_addr;
8827 dev->do_ioctl = tg3_ioctl;
8828 dev->tx_timeout = tg3_tx_timeout;
8829 dev->poll = tg3_poll;
8830 dev->ethtool_ops = &tg3_ethtool_ops;
8831 dev->weight = 64;
8832 dev->watchdog_timeo = TG3_TX_TIMEOUT;
8833 dev->change_mtu = tg3_change_mtu;
8834 dev->irq = pdev->irq;
8835#ifdef CONFIG_NET_POLL_CONTROLLER
8836 dev->poll_controller = tg3_poll_controller;
8837#endif
8838
8839 err = tg3_get_invariants(tp);
8840 if (err) {
8841 printk(KERN_ERR PFX "Problem fetching invariants of chip, "
8842 "aborting.\n");
8843 goto err_out_iounmap;
8844 }
8845
8846 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
8847 tp->bufmgr_config.mbuf_read_dma_low_water =
8848 DEFAULT_MB_RDMA_LOW_WATER_5705;
8849 tp->bufmgr_config.mbuf_mac_rx_low_water =
8850 DEFAULT_MB_MACRX_LOW_WATER_5705;
8851 tp->bufmgr_config.mbuf_high_water =
8852 DEFAULT_MB_HIGH_WATER_5705;
8853 }
8854
8855#if TG3_TSO_SUPPORT != 0
8856 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
8857 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
8858 }
8859 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
8860 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
8861 tp->pci_chip_rev_id == CHIPREV_ID_5705_A0 ||
8862 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
8863 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
8864 } else {
8865 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
8866 }
8867
8868 /* TSO is off by default, user can enable using ethtool. */
8869#if 0
8870 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)
8871 dev->features |= NETIF_F_TSO;
8872#endif
8873
8874#endif
8875
8876 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
8877 !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
8878 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
8879 tp->tg3_flags2 |= TG3_FLG2_MAX_RXPEND_64;
8880 tp->rx_pending = 63;
8881 }
8882
8883 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
8884 tp->pdev_peer = tg3_find_5704_peer(tp);
8885
8886 err = tg3_get_device_address(tp);
8887 if (err) {
8888 printk(KERN_ERR PFX "Could not obtain valid ethernet address, "
8889 "aborting.\n");
8890 goto err_out_iounmap;
8891 }
8892
8893 /*
8894 * Reset chip in case UNDI or EFI driver did not shutdown
8895 * DMA self test will enable WDMAC and we'll see (spurious)
8896 * pending DMA on the PCI bus at that point.
8897 */
8898 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
8899 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
8900 pci_save_state(tp->pdev);
8901 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
8902 tg3_halt(tp);
8903 }
8904
8905 err = tg3_test_dma(tp);
8906 if (err) {
8907 printk(KERN_ERR PFX "DMA engine test failed, aborting.\n");
8908 goto err_out_iounmap;
8909 }
8910
8911 /* Tigon3 can do ipv4 only... and some chips have buggy
8912 * checksumming.
8913 */
8914 if ((tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) == 0) {
8915 dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
8916 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
8917 } else
8918 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
8919
8920 if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
8921 dev->features &= ~NETIF_F_HIGHDMA;
8922
8923 /* flow control autonegotiation is default behavior */
8924 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
8925
8926 err = register_netdev(dev);
8927 if (err) {
8928 printk(KERN_ERR PFX "Cannot register net device, "
8929 "aborting.\n");
8930 goto err_out_iounmap;
8931 }
8932
8933 pci_set_drvdata(pdev, dev);
8934
8935 /* Now that we have fully setup the chip, save away a snapshot
8936 * of the PCI config space. We need to restore this after
8937 * GRC_MISC_CFG core clock resets and some resume events.
8938 */
8939 pci_save_state(tp->pdev);
8940
8941 printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x PHY(%s)] (PCI%s:%s:%s) %sBaseT Ethernet ",
8942 dev->name,
8943 tp->board_part_number,
8944 tp->pci_chip_rev_id,
8945 tg3_phy_string(tp),
8946 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ? "X" : ""),
8947 ((tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED) ?
8948 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ? "133MHz" : "66MHz") :
8949 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ? "100MHz" : "33MHz")),
8950 ((tp->tg3_flags & TG3_FLAG_PCI_32BIT) ? "32-bit" : "64-bit"),
8951 (tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100" : "10/100/1000");
8952
8953 for (i = 0; i < 6; i++)
8954 printk("%2.2x%c", dev->dev_addr[i],
8955 i == 5 ? '\n' : ':');
8956
8957 printk(KERN_INFO "%s: RXcsums[%d] LinkChgREG[%d] "
8958 "MIirq[%d] ASF[%d] Split[%d] WireSpeed[%d] "
8959 "TSOcap[%d] \n",
8960 dev->name,
8961 (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0,
8962 (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
8963 (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) != 0,
8964 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0,
8965 (tp->tg3_flags & TG3_FLAG_SPLIT_MODE) != 0,
8966 (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0,
8967 (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
8968
8969 return 0;
8970
8971err_out_iounmap:
8972 iounmap(tp->regs);
8973
8974err_out_free_dev:
8975 free_netdev(dev);
8976
8977err_out_free_res:
8978 pci_release_regions(pdev);
8979
8980err_out_disable_pdev:
8981 pci_disable_device(pdev);
8982 pci_set_drvdata(pdev, NULL);
8983 return err;
8984}
8985
8986static void __devexit tg3_remove_one(struct pci_dev *pdev)
8987{
8988 struct net_device *dev = pci_get_drvdata(pdev);
8989
8990 if (dev) {
8991 struct tg3 *tp = netdev_priv(dev);
8992
8993 unregister_netdev(dev);
8994 iounmap(tp->regs);
8995 free_netdev(dev);
8996 pci_release_regions(pdev);
8997 pci_disable_device(pdev);
8998 pci_set_drvdata(pdev, NULL);
8999 }
9000}
9001
9002static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
9003{
9004 struct net_device *dev = pci_get_drvdata(pdev);
9005 struct tg3 *tp = netdev_priv(dev);
9006 int err;
9007
9008 if (!netif_running(dev))
9009 return 0;
9010
9011 tg3_netif_stop(tp);
9012
9013 del_timer_sync(&tp->timer);
9014
9015 spin_lock_irq(&tp->lock);
9016 spin_lock(&tp->tx_lock);
9017 tg3_disable_ints(tp);
9018 spin_unlock(&tp->tx_lock);
9019 spin_unlock_irq(&tp->lock);
9020
9021 netif_device_detach(dev);
9022
9023 spin_lock_irq(&tp->lock);
9024 spin_lock(&tp->tx_lock);
9025 tg3_halt(tp);
9026 spin_unlock(&tp->tx_lock);
9027 spin_unlock_irq(&tp->lock);
9028
9029 err = tg3_set_power_state(tp, pci_choose_state(pdev, state));
9030 if (err) {
9031 spin_lock_irq(&tp->lock);
9032 spin_lock(&tp->tx_lock);
9033
9034 tg3_init_hw(tp);
9035
9036 tp->timer.expires = jiffies + tp->timer_offset;
9037 add_timer(&tp->timer);
9038
9039 netif_device_attach(dev);
9040 tg3_netif_start(tp);
9041
9042 spin_unlock(&tp->tx_lock);
9043 spin_unlock_irq(&tp->lock);
9044 }
9045
9046 return err;
9047}
9048
9049static int tg3_resume(struct pci_dev *pdev)
9050{
9051 struct net_device *dev = pci_get_drvdata(pdev);
9052 struct tg3 *tp = netdev_priv(dev);
9053 int err;
9054
9055 if (!netif_running(dev))
9056 return 0;
9057
9058 pci_restore_state(tp->pdev);
9059
9060 err = tg3_set_power_state(tp, 0);
9061 if (err)
9062 return err;
9063
9064 netif_device_attach(dev);
9065
9066 spin_lock_irq(&tp->lock);
9067 spin_lock(&tp->tx_lock);
9068
9069 tg3_init_hw(tp);
9070
9071 tp->timer.expires = jiffies + tp->timer_offset;
9072 add_timer(&tp->timer);
9073
9074 tg3_enable_ints(tp);
9075
9076 tg3_netif_start(tp);
9077
9078 spin_unlock(&tp->tx_lock);
9079 spin_unlock_irq(&tp->lock);
9080
9081 return 0;
9082}
9083
9084static struct pci_driver tg3_driver = {
9085 .name = DRV_MODULE_NAME,
9086 .id_table = tg3_pci_tbl,
9087 .probe = tg3_init_one,
9088 .remove = __devexit_p(tg3_remove_one),
9089 .suspend = tg3_suspend,
9090 .resume = tg3_resume
9091};
9092
9093static int __init tg3_init(void)
9094{
9095 return pci_module_init(&tg3_driver);
9096}
9097
9098static void __exit tg3_cleanup(void)
9099{
9100 pci_unregister_driver(&tg3_driver);
9101}
9102
9103module_init(tg3_init);
9104module_exit(tg3_cleanup);
This page took 0.37638 seconds and 5 git commands to generate.