[TG3]: Add 1000T & 1000X flowctrl resolvers
[deliverable/linux.git] / drivers / net / tg3.c
1 /*
2 * tg3.c: Broadcom Tigon3 ethernet driver.
3 *
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005-2007 Broadcom Corporation.
8 *
9 * Firmware is:
10 * Derived from proprietary unpublished source code,
11 * Copyright (C) 2000-2003 Broadcom Corporation.
12 *
13 * Permission is hereby granted for the distribution of this firmware
14 * data in hexadecimal or equivalent format, provided this copyright
15 * notice is accompanying it.
16 */
17
18
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/kernel.h>
22 #include <linux/types.h>
23 #include <linux/compiler.h>
24 #include <linux/slab.h>
25 #include <linux/delay.h>
26 #include <linux/in.h>
27 #include <linux/init.h>
28 #include <linux/ioport.h>
29 #include <linux/pci.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/ethtool.h>
34 #include <linux/mii.h>
35 #include <linux/if_vlan.h>
36 #include <linux/ip.h>
37 #include <linux/tcp.h>
38 #include <linux/workqueue.h>
39 #include <linux/prefetch.h>
40 #include <linux/dma-mapping.h>
41
42 #include <net/checksum.h>
43 #include <net/ip.h>
44
45 #include <asm/system.h>
46 #include <asm/io.h>
47 #include <asm/byteorder.h>
48 #include <asm/uaccess.h>
49
50 #ifdef CONFIG_SPARC
51 #include <asm/idprom.h>
52 #include <asm/prom.h>
53 #endif
54
55 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
56 #define TG3_VLAN_TAG_USED 1
57 #else
58 #define TG3_VLAN_TAG_USED 0
59 #endif
60
61 #define TG3_TSO_SUPPORT 1
62
63 #include "tg3.h"
64
65 #define DRV_MODULE_NAME "tg3"
66 #define PFX DRV_MODULE_NAME ": "
67 #define DRV_MODULE_VERSION "3.86"
68 #define DRV_MODULE_RELDATE "November 9, 2007"
69
70 #define TG3_DEF_MAC_MODE 0
71 #define TG3_DEF_RX_MODE 0
72 #define TG3_DEF_TX_MODE 0
73 #define TG3_DEF_MSG_ENABLE \
74 (NETIF_MSG_DRV | \
75 NETIF_MSG_PROBE | \
76 NETIF_MSG_LINK | \
77 NETIF_MSG_TIMER | \
78 NETIF_MSG_IFDOWN | \
79 NETIF_MSG_IFUP | \
80 NETIF_MSG_RX_ERR | \
81 NETIF_MSG_TX_ERR)
82
83 /* length of time before we decide the hardware is borked,
84 * and dev->tx_timeout() should be called to fix the problem
85 */
86 #define TG3_TX_TIMEOUT (5 * HZ)
87
88 /* hardware minimum and maximum for a single frame's data payload */
89 #define TG3_MIN_MTU 60
90 #define TG3_MAX_MTU(tp) \
91 ((tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) ? 9000 : 1500)
92
93 /* These numbers seem to be hard coded in the NIC firmware somehow.
94 * You can't change the ring sizes, but you can change where you place
95 * them in the NIC onboard memory.
96 */
97 #define TG3_RX_RING_SIZE 512
98 #define TG3_DEF_RX_RING_PENDING 200
99 #define TG3_RX_JUMBO_RING_SIZE 256
100 #define TG3_DEF_RX_JUMBO_RING_PENDING 100
101
102 /* Do not place this n-ring entries value into the tp struct itself,
103 * we really want to expose these constants to GCC so that modulo et
104 * al. operations are done with shifts and masks instead of with
105 * hw multiply/modulo instructions. Another solution would be to
106 * replace things like '% foo' with '& (foo - 1)'.
107 */
108 #define TG3_RX_RCB_RING_SIZE(tp) \
109 ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ? 512 : 1024)
110
111 #define TG3_TX_RING_SIZE 512
112 #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
113
114 #define TG3_RX_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
115 TG3_RX_RING_SIZE)
116 #define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
117 TG3_RX_JUMBO_RING_SIZE)
118 #define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
119 TG3_RX_RCB_RING_SIZE(tp))
120 #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
121 TG3_TX_RING_SIZE)
122 #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
123
124 #define RX_PKT_BUF_SZ (1536 + tp->rx_offset + 64)
125 #define RX_JUMBO_PKT_BUF_SZ (9046 + tp->rx_offset + 64)
126
127 /* minimum number of free TX descriptors required to wake up TX process */
128 #define TG3_TX_WAKEUP_THRESH(tp) ((tp)->tx_pending / 4)
129
130 /* number of ETHTOOL_GSTATS u64's */
131 #define TG3_NUM_STATS (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
132
133 #define TG3_NUM_TEST 6
134
135 static char version[] __devinitdata =
136 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
137
138 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
139 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
140 MODULE_LICENSE("GPL");
141 MODULE_VERSION(DRV_MODULE_VERSION);
142
143 static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
144 module_param(tg3_debug, int, 0);
145 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
146
147 static struct pci_device_id tg3_pci_tbl[] = {
148 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
149 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
150 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
151 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
152 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
153 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
154 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
155 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
156 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
157 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
158 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
159 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
160 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
161 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
162 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
163 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
164 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
165 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
166 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
167 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
168 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
169 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
170 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720)},
171 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
172 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
173 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
174 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
175 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M)},
176 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
177 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
178 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
179 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
180 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
181 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
182 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
183 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
184 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
185 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
186 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
187 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
188 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
189 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
190 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
191 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
192 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
193 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
194 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
195 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
196 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
197 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
198 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
199 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
200 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
201 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
202 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
203 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
204 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
205 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
206 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
207 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
208 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
209 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
210 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
211 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
212 {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
213 {}
214 };
215
216 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
217
218 static const struct {
219 const char string[ETH_GSTRING_LEN];
220 } ethtool_stats_keys[TG3_NUM_STATS] = {
221 { "rx_octets" },
222 { "rx_fragments" },
223 { "rx_ucast_packets" },
224 { "rx_mcast_packets" },
225 { "rx_bcast_packets" },
226 { "rx_fcs_errors" },
227 { "rx_align_errors" },
228 { "rx_xon_pause_rcvd" },
229 { "rx_xoff_pause_rcvd" },
230 { "rx_mac_ctrl_rcvd" },
231 { "rx_xoff_entered" },
232 { "rx_frame_too_long_errors" },
233 { "rx_jabbers" },
234 { "rx_undersize_packets" },
235 { "rx_in_length_errors" },
236 { "rx_out_length_errors" },
237 { "rx_64_or_less_octet_packets" },
238 { "rx_65_to_127_octet_packets" },
239 { "rx_128_to_255_octet_packets" },
240 { "rx_256_to_511_octet_packets" },
241 { "rx_512_to_1023_octet_packets" },
242 { "rx_1024_to_1522_octet_packets" },
243 { "rx_1523_to_2047_octet_packets" },
244 { "rx_2048_to_4095_octet_packets" },
245 { "rx_4096_to_8191_octet_packets" },
246 { "rx_8192_to_9022_octet_packets" },
247
248 { "tx_octets" },
249 { "tx_collisions" },
250
251 { "tx_xon_sent" },
252 { "tx_xoff_sent" },
253 { "tx_flow_control" },
254 { "tx_mac_errors" },
255 { "tx_single_collisions" },
256 { "tx_mult_collisions" },
257 { "tx_deferred" },
258 { "tx_excessive_collisions" },
259 { "tx_late_collisions" },
260 { "tx_collide_2times" },
261 { "tx_collide_3times" },
262 { "tx_collide_4times" },
263 { "tx_collide_5times" },
264 { "tx_collide_6times" },
265 { "tx_collide_7times" },
266 { "tx_collide_8times" },
267 { "tx_collide_9times" },
268 { "tx_collide_10times" },
269 { "tx_collide_11times" },
270 { "tx_collide_12times" },
271 { "tx_collide_13times" },
272 { "tx_collide_14times" },
273 { "tx_collide_15times" },
274 { "tx_ucast_packets" },
275 { "tx_mcast_packets" },
276 { "tx_bcast_packets" },
277 { "tx_carrier_sense_errors" },
278 { "tx_discards" },
279 { "tx_errors" },
280
281 { "dma_writeq_full" },
282 { "dma_write_prioq_full" },
283 { "rxbds_empty" },
284 { "rx_discards" },
285 { "rx_errors" },
286 { "rx_threshold_hit" },
287
288 { "dma_readq_full" },
289 { "dma_read_prioq_full" },
290 { "tx_comp_queue_full" },
291
292 { "ring_set_send_prod_index" },
293 { "ring_status_update" },
294 { "nic_irqs" },
295 { "nic_avoided_irqs" },
296 { "nic_tx_threshold_hit" }
297 };
298
299 static const struct {
300 const char string[ETH_GSTRING_LEN];
301 } ethtool_test_keys[TG3_NUM_TEST] = {
302 { "nvram test (online) " },
303 { "link test (online) " },
304 { "register test (offline)" },
305 { "memory test (offline)" },
306 { "loopback test (offline)" },
307 { "interrupt test (offline)" },
308 };
309
310 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
311 {
312 writel(val, tp->regs + off);
313 }
314
315 static u32 tg3_read32(struct tg3 *tp, u32 off)
316 {
317 return (readl(tp->regs + off));
318 }
319
320 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
321 {
322 writel(val, tp->aperegs + off);
323 }
324
325 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
326 {
327 return (readl(tp->aperegs + off));
328 }
329
330 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
331 {
332 unsigned long flags;
333
334 spin_lock_irqsave(&tp->indirect_lock, flags);
335 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
336 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
337 spin_unlock_irqrestore(&tp->indirect_lock, flags);
338 }
339
340 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
341 {
342 writel(val, tp->regs + off);
343 readl(tp->regs + off);
344 }
345
346 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
347 {
348 unsigned long flags;
349 u32 val;
350
351 spin_lock_irqsave(&tp->indirect_lock, flags);
352 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
353 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
354 spin_unlock_irqrestore(&tp->indirect_lock, flags);
355 return val;
356 }
357
358 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
359 {
360 unsigned long flags;
361
362 if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
363 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
364 TG3_64BIT_REG_LOW, val);
365 return;
366 }
367 if (off == (MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW)) {
368 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
369 TG3_64BIT_REG_LOW, val);
370 return;
371 }
372
373 spin_lock_irqsave(&tp->indirect_lock, flags);
374 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
375 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
376 spin_unlock_irqrestore(&tp->indirect_lock, flags);
377
378 /* In indirect mode when disabling interrupts, we also need
379 * to clear the interrupt bit in the GRC local ctrl register.
380 */
381 if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
382 (val == 0x1)) {
383 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
384 tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
385 }
386 }
387
388 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
389 {
390 unsigned long flags;
391 u32 val;
392
393 spin_lock_irqsave(&tp->indirect_lock, flags);
394 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
395 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
396 spin_unlock_irqrestore(&tp->indirect_lock, flags);
397 return val;
398 }
399
400 /* usec_wait specifies the wait time in usec when writing to certain registers
401 * where it is unsafe to read back the register without some delay.
402 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
403 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
404 */
405 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
406 {
407 if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) ||
408 (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
409 /* Non-posted methods */
410 tp->write32(tp, off, val);
411 else {
412 /* Posted method */
413 tg3_write32(tp, off, val);
414 if (usec_wait)
415 udelay(usec_wait);
416 tp->read32(tp, off);
417 }
418 /* Wait again after the read for the posted method to guarantee that
419 * the wait time is met.
420 */
421 if (usec_wait)
422 udelay(usec_wait);
423 }
424
425 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
426 {
427 tp->write32_mbox(tp, off, val);
428 if (!(tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) &&
429 !(tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
430 tp->read32_mbox(tp, off);
431 }
432
433 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
434 {
435 void __iomem *mbox = tp->regs + off;
436 writel(val, mbox);
437 if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
438 writel(val, mbox);
439 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
440 readl(mbox);
441 }
442
443 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
444 {
445 return (readl(tp->regs + off + GRCMBOX_BASE));
446 }
447
448 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
449 {
450 writel(val, tp->regs + off + GRCMBOX_BASE);
451 }
452
453 #define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
454 #define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
455 #define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
456 #define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
457 #define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
458
459 #define tw32(reg,val) tp->write32(tp, reg, val)
460 #define tw32_f(reg,val) _tw32_flush(tp,(reg),(val), 0)
461 #define tw32_wait_f(reg,val,us) _tw32_flush(tp,(reg),(val), (us))
462 #define tr32(reg) tp->read32(tp, reg)
463
464 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
465 {
466 unsigned long flags;
467
468 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
469 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
470 return;
471
472 spin_lock_irqsave(&tp->indirect_lock, flags);
473 if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
474 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
475 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
476
477 /* Always leave this as zero. */
478 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
479 } else {
480 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
481 tw32_f(TG3PCI_MEM_WIN_DATA, val);
482
483 /* Always leave this as zero. */
484 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
485 }
486 spin_unlock_irqrestore(&tp->indirect_lock, flags);
487 }
488
489 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
490 {
491 unsigned long flags;
492
493 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
494 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
495 *val = 0;
496 return;
497 }
498
499 spin_lock_irqsave(&tp->indirect_lock, flags);
500 if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
501 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
502 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
503
504 /* Always leave this as zero. */
505 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
506 } else {
507 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
508 *val = tr32(TG3PCI_MEM_WIN_DATA);
509
510 /* Always leave this as zero. */
511 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
512 }
513 spin_unlock_irqrestore(&tp->indirect_lock, flags);
514 }
515
516 static void tg3_ape_lock_init(struct tg3 *tp)
517 {
518 int i;
519
520 /* Make sure the driver hasn't any stale locks. */
521 for (i = 0; i < 8; i++)
522 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + 4 * i,
523 APE_LOCK_GRANT_DRIVER);
524 }
525
526 static int tg3_ape_lock(struct tg3 *tp, int locknum)
527 {
528 int i, off;
529 int ret = 0;
530 u32 status;
531
532 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
533 return 0;
534
535 switch (locknum) {
536 case TG3_APE_LOCK_MEM:
537 break;
538 default:
539 return -EINVAL;
540 }
541
542 off = 4 * locknum;
543
544 tg3_ape_write32(tp, TG3_APE_LOCK_REQ + off, APE_LOCK_REQ_DRIVER);
545
546 /* Wait for up to 1 millisecond to acquire lock. */
547 for (i = 0; i < 100; i++) {
548 status = tg3_ape_read32(tp, TG3_APE_LOCK_GRANT + off);
549 if (status == APE_LOCK_GRANT_DRIVER)
550 break;
551 udelay(10);
552 }
553
554 if (status != APE_LOCK_GRANT_DRIVER) {
555 /* Revoke the lock request. */
556 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + off,
557 APE_LOCK_GRANT_DRIVER);
558
559 ret = -EBUSY;
560 }
561
562 return ret;
563 }
564
565 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
566 {
567 int off;
568
569 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
570 return;
571
572 switch (locknum) {
573 case TG3_APE_LOCK_MEM:
574 break;
575 default:
576 return;
577 }
578
579 off = 4 * locknum;
580 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + off, APE_LOCK_GRANT_DRIVER);
581 }
582
583 static void tg3_disable_ints(struct tg3 *tp)
584 {
585 tw32(TG3PCI_MISC_HOST_CTRL,
586 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
587 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
588 }
589
590 static inline void tg3_cond_int(struct tg3 *tp)
591 {
592 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
593 (tp->hw_status->status & SD_STATUS_UPDATED))
594 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
595 else
596 tw32(HOSTCC_MODE, tp->coalesce_mode |
597 (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
598 }
599
600 static void tg3_enable_ints(struct tg3 *tp)
601 {
602 tp->irq_sync = 0;
603 wmb();
604
605 tw32(TG3PCI_MISC_HOST_CTRL,
606 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
607 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
608 (tp->last_tag << 24));
609 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
610 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
611 (tp->last_tag << 24));
612 tg3_cond_int(tp);
613 }
614
615 static inline unsigned int tg3_has_work(struct tg3 *tp)
616 {
617 struct tg3_hw_status *sblk = tp->hw_status;
618 unsigned int work_exists = 0;
619
620 /* check for phy events */
621 if (!(tp->tg3_flags &
622 (TG3_FLAG_USE_LINKCHG_REG |
623 TG3_FLAG_POLL_SERDES))) {
624 if (sblk->status & SD_STATUS_LINK_CHG)
625 work_exists = 1;
626 }
627 /* check for RX/TX work to do */
628 if (sblk->idx[0].tx_consumer != tp->tx_cons ||
629 sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
630 work_exists = 1;
631
632 return work_exists;
633 }
634
635 /* tg3_restart_ints
636 * similar to tg3_enable_ints, but it accurately determines whether there
637 * is new work pending and can return without flushing the PIO write
638 * which reenables interrupts
639 */
640 static void tg3_restart_ints(struct tg3 *tp)
641 {
642 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
643 tp->last_tag << 24);
644 mmiowb();
645
646 /* When doing tagged status, this work check is unnecessary.
647 * The last_tag we write above tells the chip which piece of
648 * work we've completed.
649 */
650 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
651 tg3_has_work(tp))
652 tw32(HOSTCC_MODE, tp->coalesce_mode |
653 (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
654 }
655
656 static inline void tg3_netif_stop(struct tg3 *tp)
657 {
658 tp->dev->trans_start = jiffies; /* prevent tx timeout */
659 napi_disable(&tp->napi);
660 netif_tx_disable(tp->dev);
661 }
662
663 static inline void tg3_netif_start(struct tg3 *tp)
664 {
665 netif_wake_queue(tp->dev);
666 /* NOTE: unconditional netif_wake_queue is only appropriate
667 * so long as all callers are assured to have free tx slots
668 * (such as after tg3_init_hw)
669 */
670 napi_enable(&tp->napi);
671 tp->hw_status->status |= SD_STATUS_UPDATED;
672 tg3_enable_ints(tp);
673 }
674
675 static void tg3_switch_clocks(struct tg3 *tp)
676 {
677 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
678 u32 orig_clock_ctrl;
679
680 if ((tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
681 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
682 return;
683
684 orig_clock_ctrl = clock_ctrl;
685 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
686 CLOCK_CTRL_CLKRUN_OENABLE |
687 0x1f);
688 tp->pci_clock_ctrl = clock_ctrl;
689
690 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
691 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
692 tw32_wait_f(TG3PCI_CLOCK_CTRL,
693 clock_ctrl | CLOCK_CTRL_625_CORE, 40);
694 }
695 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
696 tw32_wait_f(TG3PCI_CLOCK_CTRL,
697 clock_ctrl |
698 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
699 40);
700 tw32_wait_f(TG3PCI_CLOCK_CTRL,
701 clock_ctrl | (CLOCK_CTRL_ALTCLK),
702 40);
703 }
704 tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
705 }
706
707 #define PHY_BUSY_LOOPS 5000
708
709 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
710 {
711 u32 frame_val;
712 unsigned int loops;
713 int ret;
714
715 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
716 tw32_f(MAC_MI_MODE,
717 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
718 udelay(80);
719 }
720
721 *val = 0x0;
722
723 frame_val = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
724 MI_COM_PHY_ADDR_MASK);
725 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
726 MI_COM_REG_ADDR_MASK);
727 frame_val |= (MI_COM_CMD_READ | MI_COM_START);
728
729 tw32_f(MAC_MI_COM, frame_val);
730
731 loops = PHY_BUSY_LOOPS;
732 while (loops != 0) {
733 udelay(10);
734 frame_val = tr32(MAC_MI_COM);
735
736 if ((frame_val & MI_COM_BUSY) == 0) {
737 udelay(5);
738 frame_val = tr32(MAC_MI_COM);
739 break;
740 }
741 loops -= 1;
742 }
743
744 ret = -EBUSY;
745 if (loops != 0) {
746 *val = frame_val & MI_COM_DATA_MASK;
747 ret = 0;
748 }
749
750 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
751 tw32_f(MAC_MI_MODE, tp->mi_mode);
752 udelay(80);
753 }
754
755 return ret;
756 }
757
758 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
759 {
760 u32 frame_val;
761 unsigned int loops;
762 int ret;
763
764 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
765 (reg == MII_TG3_CTRL || reg == MII_TG3_AUX_CTRL))
766 return 0;
767
768 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
769 tw32_f(MAC_MI_MODE,
770 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
771 udelay(80);
772 }
773
774 frame_val = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
775 MI_COM_PHY_ADDR_MASK);
776 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
777 MI_COM_REG_ADDR_MASK);
778 frame_val |= (val & MI_COM_DATA_MASK);
779 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
780
781 tw32_f(MAC_MI_COM, frame_val);
782
783 loops = PHY_BUSY_LOOPS;
784 while (loops != 0) {
785 udelay(10);
786 frame_val = tr32(MAC_MI_COM);
787 if ((frame_val & MI_COM_BUSY) == 0) {
788 udelay(5);
789 frame_val = tr32(MAC_MI_COM);
790 break;
791 }
792 loops -= 1;
793 }
794
795 ret = -EBUSY;
796 if (loops != 0)
797 ret = 0;
798
799 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
800 tw32_f(MAC_MI_MODE, tp->mi_mode);
801 udelay(80);
802 }
803
804 return ret;
805 }
806
807 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
808 {
809 u32 phy;
810
811 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
812 (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
813 return;
814
815 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
816 u32 ephy;
817
818 if (!tg3_readphy(tp, MII_TG3_EPHY_TEST, &ephy)) {
819 tg3_writephy(tp, MII_TG3_EPHY_TEST,
820 ephy | MII_TG3_EPHY_SHADOW_EN);
821 if (!tg3_readphy(tp, MII_TG3_EPHYTST_MISCCTRL, &phy)) {
822 if (enable)
823 phy |= MII_TG3_EPHYTST_MISCCTRL_MDIX;
824 else
825 phy &= ~MII_TG3_EPHYTST_MISCCTRL_MDIX;
826 tg3_writephy(tp, MII_TG3_EPHYTST_MISCCTRL, phy);
827 }
828 tg3_writephy(tp, MII_TG3_EPHY_TEST, ephy);
829 }
830 } else {
831 phy = MII_TG3_AUXCTL_MISC_RDSEL_MISC |
832 MII_TG3_AUXCTL_SHDWSEL_MISC;
833 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, phy) &&
834 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy)) {
835 if (enable)
836 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
837 else
838 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
839 phy |= MII_TG3_AUXCTL_MISC_WREN;
840 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
841 }
842 }
843 }
844
845 static void tg3_phy_set_wirespeed(struct tg3 *tp)
846 {
847 u32 val;
848
849 if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
850 return;
851
852 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) &&
853 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
854 tg3_writephy(tp, MII_TG3_AUX_CTRL,
855 (val | (1 << 15) | (1 << 4)));
856 }
857
858 static int tg3_bmcr_reset(struct tg3 *tp)
859 {
860 u32 phy_control;
861 int limit, err;
862
863 /* OK, reset it, and poll the BMCR_RESET bit until it
864 * clears or we time out.
865 */
866 phy_control = BMCR_RESET;
867 err = tg3_writephy(tp, MII_BMCR, phy_control);
868 if (err != 0)
869 return -EBUSY;
870
871 limit = 5000;
872 while (limit--) {
873 err = tg3_readphy(tp, MII_BMCR, &phy_control);
874 if (err != 0)
875 return -EBUSY;
876
877 if ((phy_control & BMCR_RESET) == 0) {
878 udelay(40);
879 break;
880 }
881 udelay(10);
882 }
883 if (limit <= 0)
884 return -EBUSY;
885
886 return 0;
887 }
888
889 static int tg3_wait_macro_done(struct tg3 *tp)
890 {
891 int limit = 100;
892
893 while (limit--) {
894 u32 tmp32;
895
896 if (!tg3_readphy(tp, 0x16, &tmp32)) {
897 if ((tmp32 & 0x1000) == 0)
898 break;
899 }
900 }
901 if (limit <= 0)
902 return -EBUSY;
903
904 return 0;
905 }
906
907 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
908 {
909 static const u32 test_pat[4][6] = {
910 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
911 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
912 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
913 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
914 };
915 int chan;
916
917 for (chan = 0; chan < 4; chan++) {
918 int i;
919
920 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
921 (chan * 0x2000) | 0x0200);
922 tg3_writephy(tp, 0x16, 0x0002);
923
924 for (i = 0; i < 6; i++)
925 tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
926 test_pat[chan][i]);
927
928 tg3_writephy(tp, 0x16, 0x0202);
929 if (tg3_wait_macro_done(tp)) {
930 *resetp = 1;
931 return -EBUSY;
932 }
933
934 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
935 (chan * 0x2000) | 0x0200);
936 tg3_writephy(tp, 0x16, 0x0082);
937 if (tg3_wait_macro_done(tp)) {
938 *resetp = 1;
939 return -EBUSY;
940 }
941
942 tg3_writephy(tp, 0x16, 0x0802);
943 if (tg3_wait_macro_done(tp)) {
944 *resetp = 1;
945 return -EBUSY;
946 }
947
948 for (i = 0; i < 6; i += 2) {
949 u32 low, high;
950
951 if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
952 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
953 tg3_wait_macro_done(tp)) {
954 *resetp = 1;
955 return -EBUSY;
956 }
957 low &= 0x7fff;
958 high &= 0x000f;
959 if (low != test_pat[chan][i] ||
960 high != test_pat[chan][i+1]) {
961 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
962 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
963 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
964
965 return -EBUSY;
966 }
967 }
968 }
969
970 return 0;
971 }
972
973 static int tg3_phy_reset_chanpat(struct tg3 *tp)
974 {
975 int chan;
976
977 for (chan = 0; chan < 4; chan++) {
978 int i;
979
980 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
981 (chan * 0x2000) | 0x0200);
982 tg3_writephy(tp, 0x16, 0x0002);
983 for (i = 0; i < 6; i++)
984 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
985 tg3_writephy(tp, 0x16, 0x0202);
986 if (tg3_wait_macro_done(tp))
987 return -EBUSY;
988 }
989
990 return 0;
991 }
992
993 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
994 {
995 u32 reg32, phy9_orig;
996 int retries, do_phy_reset, err;
997
998 retries = 10;
999 do_phy_reset = 1;
1000 do {
1001 if (do_phy_reset) {
1002 err = tg3_bmcr_reset(tp);
1003 if (err)
1004 return err;
1005 do_phy_reset = 0;
1006 }
1007
1008 /* Disable transmitter and interrupt. */
1009 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
1010 continue;
1011
1012 reg32 |= 0x3000;
1013 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1014
1015 /* Set full-duplex, 1000 mbps. */
1016 tg3_writephy(tp, MII_BMCR,
1017 BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
1018
1019 /* Set to master mode. */
1020 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
1021 continue;
1022
1023 tg3_writephy(tp, MII_TG3_CTRL,
1024 (MII_TG3_CTRL_AS_MASTER |
1025 MII_TG3_CTRL_ENABLE_AS_MASTER));
1026
1027 /* Enable SM_DSP_CLOCK and 6dB. */
1028 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1029
1030 /* Block the PHY control access. */
1031 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
1032 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
1033
1034 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
1035 if (!err)
1036 break;
1037 } while (--retries);
1038
1039 err = tg3_phy_reset_chanpat(tp);
1040 if (err)
1041 return err;
1042
1043 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
1044 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
1045
1046 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
1047 tg3_writephy(tp, 0x16, 0x0000);
1048
1049 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1050 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
1051 /* Set Extended packet length bit for jumbo frames */
1052 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
1053 }
1054 else {
1055 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1056 }
1057
1058 tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
1059
1060 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
1061 reg32 &= ~0x3000;
1062 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1063 } else if (!err)
1064 err = -EBUSY;
1065
1066 return err;
1067 }
1068
1069 static void tg3_link_report(struct tg3 *);
1070
1071 /* This will reset the tigon3 PHY if there is no valid
1072 * link unless the FORCE argument is non-zero.
1073 */
1074 static int tg3_phy_reset(struct tg3 *tp)
1075 {
1076 u32 phy_status;
1077 int err;
1078
1079 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1080 u32 val;
1081
1082 val = tr32(GRC_MISC_CFG);
1083 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
1084 udelay(40);
1085 }
1086 err = tg3_readphy(tp, MII_BMSR, &phy_status);
1087 err |= tg3_readphy(tp, MII_BMSR, &phy_status);
1088 if (err != 0)
1089 return -EBUSY;
1090
1091 if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
1092 netif_carrier_off(tp->dev);
1093 tg3_link_report(tp);
1094 }
1095
1096 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1097 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1098 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
1099 err = tg3_phy_reset_5703_4_5(tp);
1100 if (err)
1101 return err;
1102 goto out;
1103 }
1104
1105 err = tg3_bmcr_reset(tp);
1106 if (err)
1107 return err;
1108
1109 if (tp->tg3_flags3 & TG3_FLG3_5761_5784_AX_FIXES) {
1110 u32 val;
1111
1112 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
1113 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
1114 CPMU_LSPD_1000MB_MACCLK_12_5) {
1115 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
1116 udelay(40);
1117 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
1118 }
1119
1120 /* Disable GPHY autopowerdown. */
1121 tg3_writephy(tp, MII_TG3_MISC_SHDW,
1122 MII_TG3_MISC_SHDW_WREN |
1123 MII_TG3_MISC_SHDW_APD_SEL |
1124 MII_TG3_MISC_SHDW_APD_WKTM_84MS);
1125 }
1126
1127 out:
1128 if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
1129 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1130 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1131 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
1132 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1133 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323);
1134 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1135 }
1136 if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) {
1137 tg3_writephy(tp, 0x1c, 0x8d68);
1138 tg3_writephy(tp, 0x1c, 0x8d68);
1139 }
1140 if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) {
1141 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1142 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1143 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b);
1144 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1145 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506);
1146 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
1147 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
1148 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1149 }
1150 else if (tp->tg3_flags2 & TG3_FLG2_PHY_JITTER_BUG) {
1151 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1152 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1153 if (tp->tg3_flags2 & TG3_FLG2_PHY_ADJUST_TRIM) {
1154 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
1155 tg3_writephy(tp, MII_TG3_TEST1,
1156 MII_TG3_TEST1_TRIM_EN | 0x4);
1157 } else
1158 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
1159 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1160 }
1161 /* Set Extended packet length bit (bit 14) on all chips that */
1162 /* support jumbo frames */
1163 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1164 /* Cannot do read-modify-write on 5401 */
1165 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1166 } else if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1167 u32 phy_reg;
1168
1169 /* Set bit 14 with read-modify-write to preserve other bits */
1170 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) &&
1171 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg))
1172 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
1173 }
1174
1175 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
1176 * jumbo frames transmission.
1177 */
1178 if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1179 u32 phy_reg;
1180
1181 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg))
1182 tg3_writephy(tp, MII_TG3_EXT_CTRL,
1183 phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
1184 }
1185
1186 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1187 /* adjust output voltage */
1188 tg3_writephy(tp, MII_TG3_EPHY_PTEST, 0x12);
1189 }
1190
1191 tg3_phy_toggle_automdix(tp, 1);
1192 tg3_phy_set_wirespeed(tp);
1193 return 0;
1194 }
1195
1196 static void tg3_frob_aux_power(struct tg3 *tp)
1197 {
1198 struct tg3 *tp_peer = tp;
1199
1200 if ((tp->tg3_flags2 & TG3_FLG2_IS_NIC) == 0)
1201 return;
1202
1203 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
1204 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
1205 struct net_device *dev_peer;
1206
1207 dev_peer = pci_get_drvdata(tp->pdev_peer);
1208 /* remove_one() may have been run on the peer. */
1209 if (!dev_peer)
1210 tp_peer = tp;
1211 else
1212 tp_peer = netdev_priv(dev_peer);
1213 }
1214
1215 if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1216 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0 ||
1217 (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1218 (tp_peer->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
1219 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1220 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1221 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1222 (GRC_LCLCTRL_GPIO_OE0 |
1223 GRC_LCLCTRL_GPIO_OE1 |
1224 GRC_LCLCTRL_GPIO_OE2 |
1225 GRC_LCLCTRL_GPIO_OUTPUT0 |
1226 GRC_LCLCTRL_GPIO_OUTPUT1),
1227 100);
1228 } else {
1229 u32 no_gpio2;
1230 u32 grc_local_ctrl = 0;
1231
1232 if (tp_peer != tp &&
1233 (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1234 return;
1235
1236 /* Workaround to prevent overdrawing Amps. */
1237 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
1238 ASIC_REV_5714) {
1239 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
1240 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1241 grc_local_ctrl, 100);
1242 }
1243
1244 /* On 5753 and variants, GPIO2 cannot be used. */
1245 no_gpio2 = tp->nic_sram_data_cfg &
1246 NIC_SRAM_DATA_CFG_NO_GPIO2;
1247
1248 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
1249 GRC_LCLCTRL_GPIO_OE1 |
1250 GRC_LCLCTRL_GPIO_OE2 |
1251 GRC_LCLCTRL_GPIO_OUTPUT1 |
1252 GRC_LCLCTRL_GPIO_OUTPUT2;
1253 if (no_gpio2) {
1254 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
1255 GRC_LCLCTRL_GPIO_OUTPUT2);
1256 }
1257 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1258 grc_local_ctrl, 100);
1259
1260 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
1261
1262 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1263 grc_local_ctrl, 100);
1264
1265 if (!no_gpio2) {
1266 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
1267 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1268 grc_local_ctrl, 100);
1269 }
1270 }
1271 } else {
1272 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
1273 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
1274 if (tp_peer != tp &&
1275 (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1276 return;
1277
1278 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1279 (GRC_LCLCTRL_GPIO_OE1 |
1280 GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1281
1282 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1283 GRC_LCLCTRL_GPIO_OE1, 100);
1284
1285 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1286 (GRC_LCLCTRL_GPIO_OE1 |
1287 GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1288 }
1289 }
1290 }
1291
1292 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
1293 {
1294 if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
1295 return 1;
1296 else if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411) {
1297 if (speed != SPEED_10)
1298 return 1;
1299 } else if (speed == SPEED_10)
1300 return 1;
1301
1302 return 0;
1303 }
1304
1305 static int tg3_setup_phy(struct tg3 *, int);
1306
1307 #define RESET_KIND_SHUTDOWN 0
1308 #define RESET_KIND_INIT 1
1309 #define RESET_KIND_SUSPEND 2
1310
1311 static void tg3_write_sig_post_reset(struct tg3 *, int);
1312 static int tg3_halt_cpu(struct tg3 *, u32);
1313 static int tg3_nvram_lock(struct tg3 *);
1314 static void tg3_nvram_unlock(struct tg3 *);
1315
1316 static void tg3_power_down_phy(struct tg3 *tp)
1317 {
1318 u32 val;
1319
1320 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
1321 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
1322 u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
1323 u32 serdes_cfg = tr32(MAC_SERDES_CFG);
1324
1325 sg_dig_ctrl |=
1326 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
1327 tw32(SG_DIG_CTRL, sg_dig_ctrl);
1328 tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
1329 }
1330 return;
1331 }
1332
1333 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1334 tg3_bmcr_reset(tp);
1335 val = tr32(GRC_MISC_CFG);
1336 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
1337 udelay(40);
1338 return;
1339 } else {
1340 tg3_writephy(tp, MII_TG3_EXT_CTRL,
1341 MII_TG3_EXT_CTRL_FORCE_LED_OFF);
1342 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x01b2);
1343 }
1344
1345 /* The PHY should not be powered down on some chips because
1346 * of bugs.
1347 */
1348 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1349 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1350 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
1351 (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)))
1352 return;
1353
1354 if (tp->tg3_flags3 & TG3_FLG3_5761_5784_AX_FIXES) {
1355 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
1356 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
1357 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
1358 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
1359 }
1360
1361 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
1362 }
1363
1364 static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
1365 {
1366 u32 misc_host_ctrl;
1367 u16 power_control, power_caps;
1368 int pm = tp->pm_cap;
1369
1370 /* Make sure register accesses (indirect or otherwise)
1371 * will function correctly.
1372 */
1373 pci_write_config_dword(tp->pdev,
1374 TG3PCI_MISC_HOST_CTRL,
1375 tp->misc_host_ctrl);
1376
1377 pci_read_config_word(tp->pdev,
1378 pm + PCI_PM_CTRL,
1379 &power_control);
1380 power_control |= PCI_PM_CTRL_PME_STATUS;
1381 power_control &= ~(PCI_PM_CTRL_STATE_MASK);
1382 switch (state) {
1383 case PCI_D0:
1384 power_control |= 0;
1385 pci_write_config_word(tp->pdev,
1386 pm + PCI_PM_CTRL,
1387 power_control);
1388 udelay(100); /* Delay after power state change */
1389
1390 /* Switch out of Vaux if it is a NIC */
1391 if (tp->tg3_flags2 & TG3_FLG2_IS_NIC)
1392 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
1393
1394 return 0;
1395
1396 case PCI_D1:
1397 power_control |= 1;
1398 break;
1399
1400 case PCI_D2:
1401 power_control |= 2;
1402 break;
1403
1404 case PCI_D3hot:
1405 power_control |= 3;
1406 break;
1407
1408 default:
1409 printk(KERN_WARNING PFX "%s: Invalid power state (%d) "
1410 "requested.\n",
1411 tp->dev->name, state);
1412 return -EINVAL;
1413 };
1414
1415 power_control |= PCI_PM_CTRL_PME_ENABLE;
1416
1417 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
1418 tw32(TG3PCI_MISC_HOST_CTRL,
1419 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
1420
1421 if (tp->link_config.phy_is_low_power == 0) {
1422 tp->link_config.phy_is_low_power = 1;
1423 tp->link_config.orig_speed = tp->link_config.speed;
1424 tp->link_config.orig_duplex = tp->link_config.duplex;
1425 tp->link_config.orig_autoneg = tp->link_config.autoneg;
1426 }
1427
1428 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
1429 tp->link_config.speed = SPEED_10;
1430 tp->link_config.duplex = DUPLEX_HALF;
1431 tp->link_config.autoneg = AUTONEG_ENABLE;
1432 tg3_setup_phy(tp, 0);
1433 }
1434
1435 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1436 u32 val;
1437
1438 val = tr32(GRC_VCPU_EXT_CTRL);
1439 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
1440 } else if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
1441 int i;
1442 u32 val;
1443
1444 for (i = 0; i < 200; i++) {
1445 tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
1446 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1447 break;
1448 msleep(1);
1449 }
1450 }
1451 if (tp->tg3_flags & TG3_FLAG_WOL_CAP)
1452 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
1453 WOL_DRV_STATE_SHUTDOWN |
1454 WOL_DRV_WOL |
1455 WOL_SET_MAGIC_PKT);
1456
1457 pci_read_config_word(tp->pdev, pm + PCI_PM_PMC, &power_caps);
1458
1459 if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE) {
1460 u32 mac_mode;
1461
1462 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
1463 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
1464 udelay(40);
1465
1466 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
1467 mac_mode = MAC_MODE_PORT_MODE_GMII;
1468 else
1469 mac_mode = MAC_MODE_PORT_MODE_MII;
1470
1471 mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
1472 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
1473 ASIC_REV_5700) {
1474 u32 speed = (tp->tg3_flags &
1475 TG3_FLAG_WOL_SPEED_100MB) ?
1476 SPEED_100 : SPEED_10;
1477 if (tg3_5700_link_polarity(tp, speed))
1478 mac_mode |= MAC_MODE_LINK_POLARITY;
1479 else
1480 mac_mode &= ~MAC_MODE_LINK_POLARITY;
1481 }
1482 } else {
1483 mac_mode = MAC_MODE_PORT_MODE_TBI;
1484 }
1485
1486 if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
1487 tw32(MAC_LED_CTRL, tp->led_ctrl);
1488
1489 if (((power_caps & PCI_PM_CAP_PME_D3cold) &&
1490 (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)))
1491 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
1492
1493 tw32_f(MAC_MODE, mac_mode);
1494 udelay(100);
1495
1496 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
1497 udelay(10);
1498 }
1499
1500 if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
1501 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1502 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
1503 u32 base_val;
1504
1505 base_val = tp->pci_clock_ctrl;
1506 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
1507 CLOCK_CTRL_TXCLK_DISABLE);
1508
1509 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
1510 CLOCK_CTRL_PWRDOWN_PLL133, 40);
1511 } else if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
1512 (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
1513 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)) {
1514 /* do nothing */
1515 } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
1516 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
1517 u32 newbits1, newbits2;
1518
1519 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1520 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1521 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
1522 CLOCK_CTRL_TXCLK_DISABLE |
1523 CLOCK_CTRL_ALTCLK);
1524 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1525 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
1526 newbits1 = CLOCK_CTRL_625_CORE;
1527 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
1528 } else {
1529 newbits1 = CLOCK_CTRL_ALTCLK;
1530 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1531 }
1532
1533 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
1534 40);
1535
1536 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
1537 40);
1538
1539 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
1540 u32 newbits3;
1541
1542 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1543 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1544 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
1545 CLOCK_CTRL_TXCLK_DISABLE |
1546 CLOCK_CTRL_44MHZ_CORE);
1547 } else {
1548 newbits3 = CLOCK_CTRL_44MHZ_CORE;
1549 }
1550
1551 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1552 tp->pci_clock_ctrl | newbits3, 40);
1553 }
1554 }
1555
1556 if (!(tp->tg3_flags & TG3_FLAG_WOL_ENABLE) &&
1557 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
1558 !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
1559 tg3_power_down_phy(tp);
1560
1561 tg3_frob_aux_power(tp);
1562
1563 /* Workaround for unstable PLL clock */
1564 if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
1565 (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
1566 u32 val = tr32(0x7d00);
1567
1568 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
1569 tw32(0x7d00, val);
1570 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
1571 int err;
1572
1573 err = tg3_nvram_lock(tp);
1574 tg3_halt_cpu(tp, RX_CPU_BASE);
1575 if (!err)
1576 tg3_nvram_unlock(tp);
1577 }
1578 }
1579
1580 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
1581
1582 /* Finally, set the new power state. */
1583 pci_write_config_word(tp->pdev, pm + PCI_PM_CTRL, power_control);
1584 udelay(100); /* Delay after power state change */
1585
1586 return 0;
1587 }
1588
1589 static void tg3_link_report(struct tg3 *tp)
1590 {
1591 if (!netif_carrier_ok(tp->dev)) {
1592 if (netif_msg_link(tp))
1593 printk(KERN_INFO PFX "%s: Link is down.\n",
1594 tp->dev->name);
1595 } else if (netif_msg_link(tp)) {
1596 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
1597 tp->dev->name,
1598 (tp->link_config.active_speed == SPEED_1000 ?
1599 1000 :
1600 (tp->link_config.active_speed == SPEED_100 ?
1601 100 : 10)),
1602 (tp->link_config.active_duplex == DUPLEX_FULL ?
1603 "full" : "half"));
1604
1605 printk(KERN_INFO PFX
1606 "%s: Flow control is %s for TX and %s for RX.\n",
1607 tp->dev->name,
1608 (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_TX) ?
1609 "on" : "off",
1610 (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_RX) ?
1611 "on" : "off");
1612 }
1613 }
1614
1615 static u8 tg3_resolve_flowctrl_1000T(u16 lcladv, u16 rmtadv)
1616 {
1617 u8 cap = 0;
1618
1619 if (lcladv & ADVERTISE_PAUSE_CAP) {
1620 if (lcladv & ADVERTISE_PAUSE_ASYM) {
1621 if (rmtadv & LPA_PAUSE_CAP)
1622 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
1623 else if (rmtadv & LPA_PAUSE_ASYM)
1624 cap = TG3_FLOW_CTRL_RX;
1625 } else {
1626 if (rmtadv & LPA_PAUSE_CAP)
1627 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
1628 }
1629 } else if (lcladv & ADVERTISE_PAUSE_ASYM) {
1630 if ((rmtadv & LPA_PAUSE_CAP) && (rmtadv & LPA_PAUSE_ASYM))
1631 cap = TG3_FLOW_CTRL_TX;
1632 }
1633
1634 return cap;
1635 }
1636
1637 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1638 {
1639 u8 cap = 0;
1640
1641 if (lcladv & ADVERTISE_1000XPAUSE) {
1642 if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1643 if (rmtadv & LPA_1000XPAUSE)
1644 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
1645 else if (rmtadv & LPA_1000XPAUSE_ASYM)
1646 cap = TG3_FLOW_CTRL_RX;
1647 } else {
1648 if (rmtadv & LPA_1000XPAUSE)
1649 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
1650 }
1651 } else if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1652 if ((rmtadv & LPA_1000XPAUSE) && (rmtadv & LPA_1000XPAUSE_ASYM))
1653 cap = TG3_FLOW_CTRL_TX;
1654 }
1655
1656 return cap;
1657 }
1658
1659 static void tg3_setup_flow_control(struct tg3 *tp, u32 local_adv, u32 remote_adv)
1660 {
1661 u8 new_tg3_flags = 0;
1662 u32 old_rx_mode = tp->rx_mode;
1663 u32 old_tx_mode = tp->tx_mode;
1664
1665 if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) {
1666 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
1667 new_tg3_flags = tg3_resolve_flowctrl_1000X(local_adv,
1668 remote_adv);
1669 else
1670 new_tg3_flags = tg3_resolve_flowctrl_1000T(local_adv,
1671 remote_adv);
1672 } else {
1673 new_tg3_flags = tp->link_config.flowctrl;
1674 }
1675
1676 tp->link_config.active_flowctrl = new_tg3_flags;
1677
1678 if (new_tg3_flags & TG3_FLOW_CTRL_RX)
1679 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1680 else
1681 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1682
1683 if (old_rx_mode != tp->rx_mode) {
1684 tw32_f(MAC_RX_MODE, tp->rx_mode);
1685 }
1686
1687 if (new_tg3_flags & TG3_FLOW_CTRL_TX)
1688 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1689 else
1690 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1691
1692 if (old_tx_mode != tp->tx_mode) {
1693 tw32_f(MAC_TX_MODE, tp->tx_mode);
1694 }
1695 }
1696
1697 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
1698 {
1699 switch (val & MII_TG3_AUX_STAT_SPDMASK) {
1700 case MII_TG3_AUX_STAT_10HALF:
1701 *speed = SPEED_10;
1702 *duplex = DUPLEX_HALF;
1703 break;
1704
1705 case MII_TG3_AUX_STAT_10FULL:
1706 *speed = SPEED_10;
1707 *duplex = DUPLEX_FULL;
1708 break;
1709
1710 case MII_TG3_AUX_STAT_100HALF:
1711 *speed = SPEED_100;
1712 *duplex = DUPLEX_HALF;
1713 break;
1714
1715 case MII_TG3_AUX_STAT_100FULL:
1716 *speed = SPEED_100;
1717 *duplex = DUPLEX_FULL;
1718 break;
1719
1720 case MII_TG3_AUX_STAT_1000HALF:
1721 *speed = SPEED_1000;
1722 *duplex = DUPLEX_HALF;
1723 break;
1724
1725 case MII_TG3_AUX_STAT_1000FULL:
1726 *speed = SPEED_1000;
1727 *duplex = DUPLEX_FULL;
1728 break;
1729
1730 default:
1731 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1732 *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
1733 SPEED_10;
1734 *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
1735 DUPLEX_HALF;
1736 break;
1737 }
1738 *speed = SPEED_INVALID;
1739 *duplex = DUPLEX_INVALID;
1740 break;
1741 };
1742 }
1743
1744 static void tg3_phy_copper_begin(struct tg3 *tp)
1745 {
1746 u32 new_adv;
1747 int i;
1748
1749 if (tp->link_config.phy_is_low_power) {
1750 /* Entering low power mode. Disable gigabit and
1751 * 100baseT advertisements.
1752 */
1753 tg3_writephy(tp, MII_TG3_CTRL, 0);
1754
1755 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1756 ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1757 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
1758 new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
1759
1760 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1761 } else if (tp->link_config.speed == SPEED_INVALID) {
1762 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
1763 tp->link_config.advertising &=
1764 ~(ADVERTISED_1000baseT_Half |
1765 ADVERTISED_1000baseT_Full);
1766
1767 new_adv = (ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1768 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
1769 new_adv |= ADVERTISE_10HALF;
1770 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
1771 new_adv |= ADVERTISE_10FULL;
1772 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
1773 new_adv |= ADVERTISE_100HALF;
1774 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
1775 new_adv |= ADVERTISE_100FULL;
1776 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1777
1778 if (tp->link_config.advertising &
1779 (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
1780 new_adv = 0;
1781 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
1782 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
1783 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
1784 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
1785 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
1786 (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1787 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
1788 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1789 MII_TG3_CTRL_ENABLE_AS_MASTER);
1790 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1791 } else {
1792 tg3_writephy(tp, MII_TG3_CTRL, 0);
1793 }
1794 } else {
1795 /* Asking for a specific link mode. */
1796 if (tp->link_config.speed == SPEED_1000) {
1797 new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1798 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1799
1800 if (tp->link_config.duplex == DUPLEX_FULL)
1801 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
1802 else
1803 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
1804 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1805 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
1806 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1807 MII_TG3_CTRL_ENABLE_AS_MASTER);
1808 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1809 } else {
1810 tg3_writephy(tp, MII_TG3_CTRL, 0);
1811
1812 new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1813 if (tp->link_config.speed == SPEED_100) {
1814 if (tp->link_config.duplex == DUPLEX_FULL)
1815 new_adv |= ADVERTISE_100FULL;
1816 else
1817 new_adv |= ADVERTISE_100HALF;
1818 } else {
1819 if (tp->link_config.duplex == DUPLEX_FULL)
1820 new_adv |= ADVERTISE_10FULL;
1821 else
1822 new_adv |= ADVERTISE_10HALF;
1823 }
1824 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1825 }
1826 }
1827
1828 if (tp->link_config.autoneg == AUTONEG_DISABLE &&
1829 tp->link_config.speed != SPEED_INVALID) {
1830 u32 bmcr, orig_bmcr;
1831
1832 tp->link_config.active_speed = tp->link_config.speed;
1833 tp->link_config.active_duplex = tp->link_config.duplex;
1834
1835 bmcr = 0;
1836 switch (tp->link_config.speed) {
1837 default:
1838 case SPEED_10:
1839 break;
1840
1841 case SPEED_100:
1842 bmcr |= BMCR_SPEED100;
1843 break;
1844
1845 case SPEED_1000:
1846 bmcr |= TG3_BMCR_SPEED1000;
1847 break;
1848 };
1849
1850 if (tp->link_config.duplex == DUPLEX_FULL)
1851 bmcr |= BMCR_FULLDPLX;
1852
1853 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
1854 (bmcr != orig_bmcr)) {
1855 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
1856 for (i = 0; i < 1500; i++) {
1857 u32 tmp;
1858
1859 udelay(10);
1860 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
1861 tg3_readphy(tp, MII_BMSR, &tmp))
1862 continue;
1863 if (!(tmp & BMSR_LSTATUS)) {
1864 udelay(40);
1865 break;
1866 }
1867 }
1868 tg3_writephy(tp, MII_BMCR, bmcr);
1869 udelay(40);
1870 }
1871 } else {
1872 tg3_writephy(tp, MII_BMCR,
1873 BMCR_ANENABLE | BMCR_ANRESTART);
1874 }
1875 }
1876
1877 static int tg3_init_5401phy_dsp(struct tg3 *tp)
1878 {
1879 int err;
1880
1881 /* Turn off tap power management. */
1882 /* Set Extended packet length bit */
1883 err = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1884
1885 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012);
1886 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804);
1887
1888 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013);
1889 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204);
1890
1891 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1892 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);
1893
1894 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1895 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);
1896
1897 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1898 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);
1899
1900 udelay(40);
1901
1902 return err;
1903 }
1904
1905 static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask)
1906 {
1907 u32 adv_reg, all_mask = 0;
1908
1909 if (mask & ADVERTISED_10baseT_Half)
1910 all_mask |= ADVERTISE_10HALF;
1911 if (mask & ADVERTISED_10baseT_Full)
1912 all_mask |= ADVERTISE_10FULL;
1913 if (mask & ADVERTISED_100baseT_Half)
1914 all_mask |= ADVERTISE_100HALF;
1915 if (mask & ADVERTISED_100baseT_Full)
1916 all_mask |= ADVERTISE_100FULL;
1917
1918 if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
1919 return 0;
1920
1921 if ((adv_reg & all_mask) != all_mask)
1922 return 0;
1923 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
1924 u32 tg3_ctrl;
1925
1926 all_mask = 0;
1927 if (mask & ADVERTISED_1000baseT_Half)
1928 all_mask |= ADVERTISE_1000HALF;
1929 if (mask & ADVERTISED_1000baseT_Full)
1930 all_mask |= ADVERTISE_1000FULL;
1931
1932 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
1933 return 0;
1934
1935 if ((tg3_ctrl & all_mask) != all_mask)
1936 return 0;
1937 }
1938 return 1;
1939 }
1940
1941 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
1942 {
1943 int current_link_up;
1944 u32 bmsr, dummy;
1945 u16 current_speed;
1946 u8 current_duplex;
1947 int i, err;
1948
1949 tw32(MAC_EVENT, 0);
1950
1951 tw32_f(MAC_STATUS,
1952 (MAC_STATUS_SYNC_CHANGED |
1953 MAC_STATUS_CFG_CHANGED |
1954 MAC_STATUS_MI_COMPLETION |
1955 MAC_STATUS_LNKSTATE_CHANGED));
1956 udelay(40);
1957
1958 tp->mi_mode = MAC_MI_MODE_BASE;
1959 tw32_f(MAC_MI_MODE, tp->mi_mode);
1960 udelay(80);
1961
1962 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
1963
1964 /* Some third-party PHYs need to be reset on link going
1965 * down.
1966 */
1967 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1968 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1969 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
1970 netif_carrier_ok(tp->dev)) {
1971 tg3_readphy(tp, MII_BMSR, &bmsr);
1972 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1973 !(bmsr & BMSR_LSTATUS))
1974 force_reset = 1;
1975 }
1976 if (force_reset)
1977 tg3_phy_reset(tp);
1978
1979 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1980 tg3_readphy(tp, MII_BMSR, &bmsr);
1981 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
1982 !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
1983 bmsr = 0;
1984
1985 if (!(bmsr & BMSR_LSTATUS)) {
1986 err = tg3_init_5401phy_dsp(tp);
1987 if (err)
1988 return err;
1989
1990 tg3_readphy(tp, MII_BMSR, &bmsr);
1991 for (i = 0; i < 1000; i++) {
1992 udelay(10);
1993 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1994 (bmsr & BMSR_LSTATUS)) {
1995 udelay(40);
1996 break;
1997 }
1998 }
1999
2000 if ((tp->phy_id & PHY_ID_REV_MASK) == PHY_REV_BCM5401_B0 &&
2001 !(bmsr & BMSR_LSTATUS) &&
2002 tp->link_config.active_speed == SPEED_1000) {
2003 err = tg3_phy_reset(tp);
2004 if (!err)
2005 err = tg3_init_5401phy_dsp(tp);
2006 if (err)
2007 return err;
2008 }
2009 }
2010 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2011 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
2012 /* 5701 {A0,B0} CRC bug workaround */
2013 tg3_writephy(tp, 0x15, 0x0a75);
2014 tg3_writephy(tp, 0x1c, 0x8c68);
2015 tg3_writephy(tp, 0x1c, 0x8d68);
2016 tg3_writephy(tp, 0x1c, 0x8c68);
2017 }
2018
2019 /* Clear pending interrupts... */
2020 tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
2021 tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
2022
2023 if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
2024 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
2025 else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)
2026 tg3_writephy(tp, MII_TG3_IMASK, ~0);
2027
2028 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2029 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2030 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
2031 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2032 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
2033 else
2034 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
2035 }
2036
2037 current_link_up = 0;
2038 current_speed = SPEED_INVALID;
2039 current_duplex = DUPLEX_INVALID;
2040
2041 if (tp->tg3_flags2 & TG3_FLG2_CAPACITIVE_COUPLING) {
2042 u32 val;
2043
2044 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
2045 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
2046 if (!(val & (1 << 10))) {
2047 val |= (1 << 10);
2048 tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
2049 goto relink;
2050 }
2051 }
2052
2053 bmsr = 0;
2054 for (i = 0; i < 100; i++) {
2055 tg3_readphy(tp, MII_BMSR, &bmsr);
2056 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
2057 (bmsr & BMSR_LSTATUS))
2058 break;
2059 udelay(40);
2060 }
2061
2062 if (bmsr & BMSR_LSTATUS) {
2063 u32 aux_stat, bmcr;
2064
2065 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
2066 for (i = 0; i < 2000; i++) {
2067 udelay(10);
2068 if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
2069 aux_stat)
2070 break;
2071 }
2072
2073 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
2074 &current_speed,
2075 &current_duplex);
2076
2077 bmcr = 0;
2078 for (i = 0; i < 200; i++) {
2079 tg3_readphy(tp, MII_BMCR, &bmcr);
2080 if (tg3_readphy(tp, MII_BMCR, &bmcr))
2081 continue;
2082 if (bmcr && bmcr != 0x7fff)
2083 break;
2084 udelay(10);
2085 }
2086
2087 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2088 if (bmcr & BMCR_ANENABLE) {
2089 current_link_up = 1;
2090
2091 /* Force autoneg restart if we are exiting
2092 * low power mode.
2093 */
2094 if (!tg3_copper_is_advertising_all(tp,
2095 tp->link_config.advertising))
2096 current_link_up = 0;
2097 } else {
2098 current_link_up = 0;
2099 }
2100 } else {
2101 if (!(bmcr & BMCR_ANENABLE) &&
2102 tp->link_config.speed == current_speed &&
2103 tp->link_config.duplex == current_duplex) {
2104 current_link_up = 1;
2105 } else {
2106 current_link_up = 0;
2107 }
2108 }
2109
2110 tp->link_config.active_speed = current_speed;
2111 tp->link_config.active_duplex = current_duplex;
2112 }
2113
2114 if (current_link_up == 1 &&
2115 (tp->link_config.active_duplex == DUPLEX_FULL) &&
2116 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
2117 u32 local_adv, remote_adv;
2118
2119 if (tg3_readphy(tp, MII_ADVERTISE, &local_adv))
2120 local_adv = 0;
2121 local_adv &= (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
2122
2123 if (tg3_readphy(tp, MII_LPA, &remote_adv))
2124 remote_adv = 0;
2125
2126 remote_adv &= (LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
2127
2128 /* If we are not advertising full pause capability,
2129 * something is wrong. Bring the link down and reconfigure.
2130 */
2131 if (local_adv != ADVERTISE_PAUSE_CAP) {
2132 current_link_up = 0;
2133 } else {
2134 tg3_setup_flow_control(tp, local_adv, remote_adv);
2135 }
2136 }
2137 relink:
2138 if (current_link_up == 0 || tp->link_config.phy_is_low_power) {
2139 u32 tmp;
2140
2141 tg3_phy_copper_begin(tp);
2142
2143 tg3_readphy(tp, MII_BMSR, &tmp);
2144 if (!tg3_readphy(tp, MII_BMSR, &tmp) &&
2145 (tmp & BMSR_LSTATUS))
2146 current_link_up = 1;
2147 }
2148
2149 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
2150 if (current_link_up == 1) {
2151 if (tp->link_config.active_speed == SPEED_100 ||
2152 tp->link_config.active_speed == SPEED_10)
2153 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
2154 else
2155 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2156 } else
2157 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2158
2159 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
2160 if (tp->link_config.active_duplex == DUPLEX_HALF)
2161 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
2162
2163 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
2164 if (current_link_up == 1 &&
2165 tg3_5700_link_polarity(tp, tp->link_config.active_speed))
2166 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
2167 else
2168 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
2169 }
2170
2171 /* ??? Without this setting Netgear GA302T PHY does not
2172 * ??? send/receive packets...
2173 */
2174 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411 &&
2175 tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
2176 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
2177 tw32_f(MAC_MI_MODE, tp->mi_mode);
2178 udelay(80);
2179 }
2180
2181 tw32_f(MAC_MODE, tp->mac_mode);
2182 udelay(40);
2183
2184 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
2185 /* Polled via timer. */
2186 tw32_f(MAC_EVENT, 0);
2187 } else {
2188 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2189 }
2190 udelay(40);
2191
2192 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
2193 current_link_up == 1 &&
2194 tp->link_config.active_speed == SPEED_1000 &&
2195 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
2196 (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
2197 udelay(120);
2198 tw32_f(MAC_STATUS,
2199 (MAC_STATUS_SYNC_CHANGED |
2200 MAC_STATUS_CFG_CHANGED));
2201 udelay(40);
2202 tg3_write_mem(tp,
2203 NIC_SRAM_FIRMWARE_MBOX,
2204 NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
2205 }
2206
2207 if (current_link_up != netif_carrier_ok(tp->dev)) {
2208 if (current_link_up)
2209 netif_carrier_on(tp->dev);
2210 else
2211 netif_carrier_off(tp->dev);
2212 tg3_link_report(tp);
2213 }
2214
2215 return 0;
2216 }
2217
2218 struct tg3_fiber_aneginfo {
2219 int state;
2220 #define ANEG_STATE_UNKNOWN 0
2221 #define ANEG_STATE_AN_ENABLE 1
2222 #define ANEG_STATE_RESTART_INIT 2
2223 #define ANEG_STATE_RESTART 3
2224 #define ANEG_STATE_DISABLE_LINK_OK 4
2225 #define ANEG_STATE_ABILITY_DETECT_INIT 5
2226 #define ANEG_STATE_ABILITY_DETECT 6
2227 #define ANEG_STATE_ACK_DETECT_INIT 7
2228 #define ANEG_STATE_ACK_DETECT 8
2229 #define ANEG_STATE_COMPLETE_ACK_INIT 9
2230 #define ANEG_STATE_COMPLETE_ACK 10
2231 #define ANEG_STATE_IDLE_DETECT_INIT 11
2232 #define ANEG_STATE_IDLE_DETECT 12
2233 #define ANEG_STATE_LINK_OK 13
2234 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
2235 #define ANEG_STATE_NEXT_PAGE_WAIT 15
2236
2237 u32 flags;
2238 #define MR_AN_ENABLE 0x00000001
2239 #define MR_RESTART_AN 0x00000002
2240 #define MR_AN_COMPLETE 0x00000004
2241 #define MR_PAGE_RX 0x00000008
2242 #define MR_NP_LOADED 0x00000010
2243 #define MR_TOGGLE_TX 0x00000020
2244 #define MR_LP_ADV_FULL_DUPLEX 0x00000040
2245 #define MR_LP_ADV_HALF_DUPLEX 0x00000080
2246 #define MR_LP_ADV_SYM_PAUSE 0x00000100
2247 #define MR_LP_ADV_ASYM_PAUSE 0x00000200
2248 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
2249 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
2250 #define MR_LP_ADV_NEXT_PAGE 0x00001000
2251 #define MR_TOGGLE_RX 0x00002000
2252 #define MR_NP_RX 0x00004000
2253
2254 #define MR_LINK_OK 0x80000000
2255
2256 unsigned long link_time, cur_time;
2257
2258 u32 ability_match_cfg;
2259 int ability_match_count;
2260
2261 char ability_match, idle_match, ack_match;
2262
2263 u32 txconfig, rxconfig;
2264 #define ANEG_CFG_NP 0x00000080
2265 #define ANEG_CFG_ACK 0x00000040
2266 #define ANEG_CFG_RF2 0x00000020
2267 #define ANEG_CFG_RF1 0x00000010
2268 #define ANEG_CFG_PS2 0x00000001
2269 #define ANEG_CFG_PS1 0x00008000
2270 #define ANEG_CFG_HD 0x00004000
2271 #define ANEG_CFG_FD 0x00002000
2272 #define ANEG_CFG_INVAL 0x00001f06
2273
2274 };
2275 #define ANEG_OK 0
2276 #define ANEG_DONE 1
2277 #define ANEG_TIMER_ENAB 2
2278 #define ANEG_FAILED -1
2279
2280 #define ANEG_STATE_SETTLE_TIME 10000
2281
2282 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
2283 struct tg3_fiber_aneginfo *ap)
2284 {
2285 unsigned long delta;
2286 u32 rx_cfg_reg;
2287 int ret;
2288
2289 if (ap->state == ANEG_STATE_UNKNOWN) {
2290 ap->rxconfig = 0;
2291 ap->link_time = 0;
2292 ap->cur_time = 0;
2293 ap->ability_match_cfg = 0;
2294 ap->ability_match_count = 0;
2295 ap->ability_match = 0;
2296 ap->idle_match = 0;
2297 ap->ack_match = 0;
2298 }
2299 ap->cur_time++;
2300
2301 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
2302 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
2303
2304 if (rx_cfg_reg != ap->ability_match_cfg) {
2305 ap->ability_match_cfg = rx_cfg_reg;
2306 ap->ability_match = 0;
2307 ap->ability_match_count = 0;
2308 } else {
2309 if (++ap->ability_match_count > 1) {
2310 ap->ability_match = 1;
2311 ap->ability_match_cfg = rx_cfg_reg;
2312 }
2313 }
2314 if (rx_cfg_reg & ANEG_CFG_ACK)
2315 ap->ack_match = 1;
2316 else
2317 ap->ack_match = 0;
2318
2319 ap->idle_match = 0;
2320 } else {
2321 ap->idle_match = 1;
2322 ap->ability_match_cfg = 0;
2323 ap->ability_match_count = 0;
2324 ap->ability_match = 0;
2325 ap->ack_match = 0;
2326
2327 rx_cfg_reg = 0;
2328 }
2329
2330 ap->rxconfig = rx_cfg_reg;
2331 ret = ANEG_OK;
2332
2333 switch(ap->state) {
2334 case ANEG_STATE_UNKNOWN:
2335 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
2336 ap->state = ANEG_STATE_AN_ENABLE;
2337
2338 /* fallthru */
2339 case ANEG_STATE_AN_ENABLE:
2340 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
2341 if (ap->flags & MR_AN_ENABLE) {
2342 ap->link_time = 0;
2343 ap->cur_time = 0;
2344 ap->ability_match_cfg = 0;
2345 ap->ability_match_count = 0;
2346 ap->ability_match = 0;
2347 ap->idle_match = 0;
2348 ap->ack_match = 0;
2349
2350 ap->state = ANEG_STATE_RESTART_INIT;
2351 } else {
2352 ap->state = ANEG_STATE_DISABLE_LINK_OK;
2353 }
2354 break;
2355
2356 case ANEG_STATE_RESTART_INIT:
2357 ap->link_time = ap->cur_time;
2358 ap->flags &= ~(MR_NP_LOADED);
2359 ap->txconfig = 0;
2360 tw32(MAC_TX_AUTO_NEG, 0);
2361 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2362 tw32_f(MAC_MODE, tp->mac_mode);
2363 udelay(40);
2364
2365 ret = ANEG_TIMER_ENAB;
2366 ap->state = ANEG_STATE_RESTART;
2367
2368 /* fallthru */
2369 case ANEG_STATE_RESTART:
2370 delta = ap->cur_time - ap->link_time;
2371 if (delta > ANEG_STATE_SETTLE_TIME) {
2372 ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
2373 } else {
2374 ret = ANEG_TIMER_ENAB;
2375 }
2376 break;
2377
2378 case ANEG_STATE_DISABLE_LINK_OK:
2379 ret = ANEG_DONE;
2380 break;
2381
2382 case ANEG_STATE_ABILITY_DETECT_INIT:
2383 ap->flags &= ~(MR_TOGGLE_TX);
2384 ap->txconfig = (ANEG_CFG_FD | ANEG_CFG_PS1);
2385 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2386 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2387 tw32_f(MAC_MODE, tp->mac_mode);
2388 udelay(40);
2389
2390 ap->state = ANEG_STATE_ABILITY_DETECT;
2391 break;
2392
2393 case ANEG_STATE_ABILITY_DETECT:
2394 if (ap->ability_match != 0 && ap->rxconfig != 0) {
2395 ap->state = ANEG_STATE_ACK_DETECT_INIT;
2396 }
2397 break;
2398
2399 case ANEG_STATE_ACK_DETECT_INIT:
2400 ap->txconfig |= ANEG_CFG_ACK;
2401 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2402 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2403 tw32_f(MAC_MODE, tp->mac_mode);
2404 udelay(40);
2405
2406 ap->state = ANEG_STATE_ACK_DETECT;
2407
2408 /* fallthru */
2409 case ANEG_STATE_ACK_DETECT:
2410 if (ap->ack_match != 0) {
2411 if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
2412 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
2413 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
2414 } else {
2415 ap->state = ANEG_STATE_AN_ENABLE;
2416 }
2417 } else if (ap->ability_match != 0 &&
2418 ap->rxconfig == 0) {
2419 ap->state = ANEG_STATE_AN_ENABLE;
2420 }
2421 break;
2422
2423 case ANEG_STATE_COMPLETE_ACK_INIT:
2424 if (ap->rxconfig & ANEG_CFG_INVAL) {
2425 ret = ANEG_FAILED;
2426 break;
2427 }
2428 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
2429 MR_LP_ADV_HALF_DUPLEX |
2430 MR_LP_ADV_SYM_PAUSE |
2431 MR_LP_ADV_ASYM_PAUSE |
2432 MR_LP_ADV_REMOTE_FAULT1 |
2433 MR_LP_ADV_REMOTE_FAULT2 |
2434 MR_LP_ADV_NEXT_PAGE |
2435 MR_TOGGLE_RX |
2436 MR_NP_RX);
2437 if (ap->rxconfig & ANEG_CFG_FD)
2438 ap->flags |= MR_LP_ADV_FULL_DUPLEX;
2439 if (ap->rxconfig & ANEG_CFG_HD)
2440 ap->flags |= MR_LP_ADV_HALF_DUPLEX;
2441 if (ap->rxconfig & ANEG_CFG_PS1)
2442 ap->flags |= MR_LP_ADV_SYM_PAUSE;
2443 if (ap->rxconfig & ANEG_CFG_PS2)
2444 ap->flags |= MR_LP_ADV_ASYM_PAUSE;
2445 if (ap->rxconfig & ANEG_CFG_RF1)
2446 ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
2447 if (ap->rxconfig & ANEG_CFG_RF2)
2448 ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
2449 if (ap->rxconfig & ANEG_CFG_NP)
2450 ap->flags |= MR_LP_ADV_NEXT_PAGE;
2451
2452 ap->link_time = ap->cur_time;
2453
2454 ap->flags ^= (MR_TOGGLE_TX);
2455 if (ap->rxconfig & 0x0008)
2456 ap->flags |= MR_TOGGLE_RX;
2457 if (ap->rxconfig & ANEG_CFG_NP)
2458 ap->flags |= MR_NP_RX;
2459 ap->flags |= MR_PAGE_RX;
2460
2461 ap->state = ANEG_STATE_COMPLETE_ACK;
2462 ret = ANEG_TIMER_ENAB;
2463 break;
2464
2465 case ANEG_STATE_COMPLETE_ACK:
2466 if (ap->ability_match != 0 &&
2467 ap->rxconfig == 0) {
2468 ap->state = ANEG_STATE_AN_ENABLE;
2469 break;
2470 }
2471 delta = ap->cur_time - ap->link_time;
2472 if (delta > ANEG_STATE_SETTLE_TIME) {
2473 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
2474 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2475 } else {
2476 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
2477 !(ap->flags & MR_NP_RX)) {
2478 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2479 } else {
2480 ret = ANEG_FAILED;
2481 }
2482 }
2483 }
2484 break;
2485
2486 case ANEG_STATE_IDLE_DETECT_INIT:
2487 ap->link_time = ap->cur_time;
2488 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2489 tw32_f(MAC_MODE, tp->mac_mode);
2490 udelay(40);
2491
2492 ap->state = ANEG_STATE_IDLE_DETECT;
2493 ret = ANEG_TIMER_ENAB;
2494 break;
2495
2496 case ANEG_STATE_IDLE_DETECT:
2497 if (ap->ability_match != 0 &&
2498 ap->rxconfig == 0) {
2499 ap->state = ANEG_STATE_AN_ENABLE;
2500 break;
2501 }
2502 delta = ap->cur_time - ap->link_time;
2503 if (delta > ANEG_STATE_SETTLE_TIME) {
2504 /* XXX another gem from the Broadcom driver :( */
2505 ap->state = ANEG_STATE_LINK_OK;
2506 }
2507 break;
2508
2509 case ANEG_STATE_LINK_OK:
2510 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
2511 ret = ANEG_DONE;
2512 break;
2513
2514 case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
2515 /* ??? unimplemented */
2516 break;
2517
2518 case ANEG_STATE_NEXT_PAGE_WAIT:
2519 /* ??? unimplemented */
2520 break;
2521
2522 default:
2523 ret = ANEG_FAILED;
2524 break;
2525 };
2526
2527 return ret;
2528 }
2529
2530 static int fiber_autoneg(struct tg3 *tp, u32 *flags)
2531 {
2532 int res = 0;
2533 struct tg3_fiber_aneginfo aninfo;
2534 int status = ANEG_FAILED;
2535 unsigned int tick;
2536 u32 tmp;
2537
2538 tw32_f(MAC_TX_AUTO_NEG, 0);
2539
2540 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
2541 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
2542 udelay(40);
2543
2544 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
2545 udelay(40);
2546
2547 memset(&aninfo, 0, sizeof(aninfo));
2548 aninfo.flags |= MR_AN_ENABLE;
2549 aninfo.state = ANEG_STATE_UNKNOWN;
2550 aninfo.cur_time = 0;
2551 tick = 0;
2552 while (++tick < 195000) {
2553 status = tg3_fiber_aneg_smachine(tp, &aninfo);
2554 if (status == ANEG_DONE || status == ANEG_FAILED)
2555 break;
2556
2557 udelay(1);
2558 }
2559
2560 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2561 tw32_f(MAC_MODE, tp->mac_mode);
2562 udelay(40);
2563
2564 *flags = aninfo.flags;
2565
2566 if (status == ANEG_DONE &&
2567 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
2568 MR_LP_ADV_FULL_DUPLEX)))
2569 res = 1;
2570
2571 return res;
2572 }
2573
2574 static void tg3_init_bcm8002(struct tg3 *tp)
2575 {
2576 u32 mac_status = tr32(MAC_STATUS);
2577 int i;
2578
2579 /* Reset when initting first time or we have a link. */
2580 if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
2581 !(mac_status & MAC_STATUS_PCS_SYNCED))
2582 return;
2583
2584 /* Set PLL lock range. */
2585 tg3_writephy(tp, 0x16, 0x8007);
2586
2587 /* SW reset */
2588 tg3_writephy(tp, MII_BMCR, BMCR_RESET);
2589
2590 /* Wait for reset to complete. */
2591 /* XXX schedule_timeout() ... */
2592 for (i = 0; i < 500; i++)
2593 udelay(10);
2594
2595 /* Config mode; select PMA/Ch 1 regs. */
2596 tg3_writephy(tp, 0x10, 0x8411);
2597
2598 /* Enable auto-lock and comdet, select txclk for tx. */
2599 tg3_writephy(tp, 0x11, 0x0a10);
2600
2601 tg3_writephy(tp, 0x18, 0x00a0);
2602 tg3_writephy(tp, 0x16, 0x41ff);
2603
2604 /* Assert and deassert POR. */
2605 tg3_writephy(tp, 0x13, 0x0400);
2606 udelay(40);
2607 tg3_writephy(tp, 0x13, 0x0000);
2608
2609 tg3_writephy(tp, 0x11, 0x0a50);
2610 udelay(40);
2611 tg3_writephy(tp, 0x11, 0x0a10);
2612
2613 /* Wait for signal to stabilize */
2614 /* XXX schedule_timeout() ... */
2615 for (i = 0; i < 15000; i++)
2616 udelay(10);
2617
2618 /* Deselect the channel register so we can read the PHYID
2619 * later.
2620 */
2621 tg3_writephy(tp, 0x10, 0x8011);
2622 }
2623
2624 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
2625 {
2626 u32 sg_dig_ctrl, sg_dig_status;
2627 u32 serdes_cfg, expected_sg_dig_ctrl;
2628 int workaround, port_a;
2629 int current_link_up;
2630
2631 serdes_cfg = 0;
2632 expected_sg_dig_ctrl = 0;
2633 workaround = 0;
2634 port_a = 1;
2635 current_link_up = 0;
2636
2637 if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
2638 tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
2639 workaround = 1;
2640 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
2641 port_a = 0;
2642
2643 /* preserve bits 0-11,13,14 for signal pre-emphasis */
2644 /* preserve bits 20-23 for voltage regulator */
2645 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
2646 }
2647
2648 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2649
2650 if (tp->link_config.autoneg != AUTONEG_ENABLE) {
2651 if (sg_dig_ctrl & (1 << 31)) {
2652 if (workaround) {
2653 u32 val = serdes_cfg;
2654
2655 if (port_a)
2656 val |= 0xc010000;
2657 else
2658 val |= 0x4010000;
2659 tw32_f(MAC_SERDES_CFG, val);
2660 }
2661 tw32_f(SG_DIG_CTRL, 0x01388400);
2662 }
2663 if (mac_status & MAC_STATUS_PCS_SYNCED) {
2664 tg3_setup_flow_control(tp, 0, 0);
2665 current_link_up = 1;
2666 }
2667 goto out;
2668 }
2669
2670 /* Want auto-negotiation. */
2671 expected_sg_dig_ctrl = 0x81388400;
2672
2673 /* Pause capability */
2674 expected_sg_dig_ctrl |= (1 << 11);
2675
2676 /* Asymettric pause */
2677 expected_sg_dig_ctrl |= (1 << 12);
2678
2679 if (sg_dig_ctrl != expected_sg_dig_ctrl) {
2680 if ((tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT) &&
2681 tp->serdes_counter &&
2682 ((mac_status & (MAC_STATUS_PCS_SYNCED |
2683 MAC_STATUS_RCVD_CFG)) ==
2684 MAC_STATUS_PCS_SYNCED)) {
2685 tp->serdes_counter--;
2686 current_link_up = 1;
2687 goto out;
2688 }
2689 restart_autoneg:
2690 if (workaround)
2691 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
2692 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | (1 << 30));
2693 udelay(5);
2694 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
2695
2696 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
2697 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2698 } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
2699 MAC_STATUS_SIGNAL_DET)) {
2700 sg_dig_status = tr32(SG_DIG_STATUS);
2701 mac_status = tr32(MAC_STATUS);
2702
2703 if ((sg_dig_status & (1 << 1)) &&
2704 (mac_status & MAC_STATUS_PCS_SYNCED)) {
2705 u32 local_adv, remote_adv;
2706
2707 local_adv = ADVERTISE_PAUSE_CAP;
2708 remote_adv = 0;
2709 if (sg_dig_status & (1 << 19))
2710 remote_adv |= LPA_PAUSE_CAP;
2711 if (sg_dig_status & (1 << 20))
2712 remote_adv |= LPA_PAUSE_ASYM;
2713
2714 tg3_setup_flow_control(tp, local_adv, remote_adv);
2715 current_link_up = 1;
2716 tp->serdes_counter = 0;
2717 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2718 } else if (!(sg_dig_status & (1 << 1))) {
2719 if (tp->serdes_counter)
2720 tp->serdes_counter--;
2721 else {
2722 if (workaround) {
2723 u32 val = serdes_cfg;
2724
2725 if (port_a)
2726 val |= 0xc010000;
2727 else
2728 val |= 0x4010000;
2729
2730 tw32_f(MAC_SERDES_CFG, val);
2731 }
2732
2733 tw32_f(SG_DIG_CTRL, 0x01388400);
2734 udelay(40);
2735
2736 /* Link parallel detection - link is up */
2737 /* only if we have PCS_SYNC and not */
2738 /* receiving config code words */
2739 mac_status = tr32(MAC_STATUS);
2740 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
2741 !(mac_status & MAC_STATUS_RCVD_CFG)) {
2742 tg3_setup_flow_control(tp, 0, 0);
2743 current_link_up = 1;
2744 tp->tg3_flags2 |=
2745 TG3_FLG2_PARALLEL_DETECT;
2746 tp->serdes_counter =
2747 SERDES_PARALLEL_DET_TIMEOUT;
2748 } else
2749 goto restart_autoneg;
2750 }
2751 }
2752 } else {
2753 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
2754 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2755 }
2756
2757 out:
2758 return current_link_up;
2759 }
2760
2761 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
2762 {
2763 int current_link_up = 0;
2764
2765 if (!(mac_status & MAC_STATUS_PCS_SYNCED))
2766 goto out;
2767
2768 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2769 u32 flags;
2770 int i;
2771
2772 if (fiber_autoneg(tp, &flags)) {
2773 u32 local_adv, remote_adv;
2774
2775 local_adv = ADVERTISE_PAUSE_CAP;
2776 remote_adv = 0;
2777 if (flags & MR_LP_ADV_SYM_PAUSE)
2778 remote_adv |= LPA_PAUSE_CAP;
2779 if (flags & MR_LP_ADV_ASYM_PAUSE)
2780 remote_adv |= LPA_PAUSE_ASYM;
2781
2782 tg3_setup_flow_control(tp, local_adv, remote_adv);
2783
2784 current_link_up = 1;
2785 }
2786 for (i = 0; i < 30; i++) {
2787 udelay(20);
2788 tw32_f(MAC_STATUS,
2789 (MAC_STATUS_SYNC_CHANGED |
2790 MAC_STATUS_CFG_CHANGED));
2791 udelay(40);
2792 if ((tr32(MAC_STATUS) &
2793 (MAC_STATUS_SYNC_CHANGED |
2794 MAC_STATUS_CFG_CHANGED)) == 0)
2795 break;
2796 }
2797
2798 mac_status = tr32(MAC_STATUS);
2799 if (current_link_up == 0 &&
2800 (mac_status & MAC_STATUS_PCS_SYNCED) &&
2801 !(mac_status & MAC_STATUS_RCVD_CFG))
2802 current_link_up = 1;
2803 } else {
2804 /* Forcing 1000FD link up. */
2805 current_link_up = 1;
2806
2807 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
2808 udelay(40);
2809
2810 tw32_f(MAC_MODE, tp->mac_mode);
2811 udelay(40);
2812 }
2813
2814 out:
2815 return current_link_up;
2816 }
2817
2818 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
2819 {
2820 u32 orig_pause_cfg;
2821 u16 orig_active_speed;
2822 u8 orig_active_duplex;
2823 u32 mac_status;
2824 int current_link_up;
2825 int i;
2826
2827 orig_pause_cfg = tp->link_config.active_flowctrl;
2828 orig_active_speed = tp->link_config.active_speed;
2829 orig_active_duplex = tp->link_config.active_duplex;
2830
2831 if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
2832 netif_carrier_ok(tp->dev) &&
2833 (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
2834 mac_status = tr32(MAC_STATUS);
2835 mac_status &= (MAC_STATUS_PCS_SYNCED |
2836 MAC_STATUS_SIGNAL_DET |
2837 MAC_STATUS_CFG_CHANGED |
2838 MAC_STATUS_RCVD_CFG);
2839 if (mac_status == (MAC_STATUS_PCS_SYNCED |
2840 MAC_STATUS_SIGNAL_DET)) {
2841 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2842 MAC_STATUS_CFG_CHANGED));
2843 return 0;
2844 }
2845 }
2846
2847 tw32_f(MAC_TX_AUTO_NEG, 0);
2848
2849 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
2850 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
2851 tw32_f(MAC_MODE, tp->mac_mode);
2852 udelay(40);
2853
2854 if (tp->phy_id == PHY_ID_BCM8002)
2855 tg3_init_bcm8002(tp);
2856
2857 /* Enable link change event even when serdes polling. */
2858 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2859 udelay(40);
2860
2861 current_link_up = 0;
2862 mac_status = tr32(MAC_STATUS);
2863
2864 if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
2865 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
2866 else
2867 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
2868
2869 tp->hw_status->status =
2870 (SD_STATUS_UPDATED |
2871 (tp->hw_status->status & ~SD_STATUS_LINK_CHG));
2872
2873 for (i = 0; i < 100; i++) {
2874 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2875 MAC_STATUS_CFG_CHANGED));
2876 udelay(5);
2877 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
2878 MAC_STATUS_CFG_CHANGED |
2879 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
2880 break;
2881 }
2882
2883 mac_status = tr32(MAC_STATUS);
2884 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
2885 current_link_up = 0;
2886 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2887 tp->serdes_counter == 0) {
2888 tw32_f(MAC_MODE, (tp->mac_mode |
2889 MAC_MODE_SEND_CONFIGS));
2890 udelay(1);
2891 tw32_f(MAC_MODE, tp->mac_mode);
2892 }
2893 }
2894
2895 if (current_link_up == 1) {
2896 tp->link_config.active_speed = SPEED_1000;
2897 tp->link_config.active_duplex = DUPLEX_FULL;
2898 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2899 LED_CTRL_LNKLED_OVERRIDE |
2900 LED_CTRL_1000MBPS_ON));
2901 } else {
2902 tp->link_config.active_speed = SPEED_INVALID;
2903 tp->link_config.active_duplex = DUPLEX_INVALID;
2904 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2905 LED_CTRL_LNKLED_OVERRIDE |
2906 LED_CTRL_TRAFFIC_OVERRIDE));
2907 }
2908
2909 if (current_link_up != netif_carrier_ok(tp->dev)) {
2910 if (current_link_up)
2911 netif_carrier_on(tp->dev);
2912 else
2913 netif_carrier_off(tp->dev);
2914 tg3_link_report(tp);
2915 } else {
2916 u32 now_pause_cfg = tp->link_config.active_flowctrl;
2917 if (orig_pause_cfg != now_pause_cfg ||
2918 orig_active_speed != tp->link_config.active_speed ||
2919 orig_active_duplex != tp->link_config.active_duplex)
2920 tg3_link_report(tp);
2921 }
2922
2923 return 0;
2924 }
2925
2926 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
2927 {
2928 int current_link_up, err = 0;
2929 u32 bmsr, bmcr;
2930 u16 current_speed;
2931 u8 current_duplex;
2932
2933 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2934 tw32_f(MAC_MODE, tp->mac_mode);
2935 udelay(40);
2936
2937 tw32(MAC_EVENT, 0);
2938
2939 tw32_f(MAC_STATUS,
2940 (MAC_STATUS_SYNC_CHANGED |
2941 MAC_STATUS_CFG_CHANGED |
2942 MAC_STATUS_MI_COMPLETION |
2943 MAC_STATUS_LNKSTATE_CHANGED));
2944 udelay(40);
2945
2946 if (force_reset)
2947 tg3_phy_reset(tp);
2948
2949 current_link_up = 0;
2950 current_speed = SPEED_INVALID;
2951 current_duplex = DUPLEX_INVALID;
2952
2953 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2954 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2955 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
2956 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
2957 bmsr |= BMSR_LSTATUS;
2958 else
2959 bmsr &= ~BMSR_LSTATUS;
2960 }
2961
2962 err |= tg3_readphy(tp, MII_BMCR, &bmcr);
2963
2964 if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
2965 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
2966 /* do nothing, just check for link up at the end */
2967 } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2968 u32 adv, new_adv;
2969
2970 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
2971 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
2972 ADVERTISE_1000XPAUSE |
2973 ADVERTISE_1000XPSE_ASYM |
2974 ADVERTISE_SLCT);
2975
2976 /* Always advertise symmetric PAUSE just like copper */
2977 new_adv |= ADVERTISE_1000XPAUSE;
2978
2979 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
2980 new_adv |= ADVERTISE_1000XHALF;
2981 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
2982 new_adv |= ADVERTISE_1000XFULL;
2983
2984 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
2985 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2986 bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
2987 tg3_writephy(tp, MII_BMCR, bmcr);
2988
2989 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2990 tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
2991 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2992
2993 return err;
2994 }
2995 } else {
2996 u32 new_bmcr;
2997
2998 bmcr &= ~BMCR_SPEED1000;
2999 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
3000
3001 if (tp->link_config.duplex == DUPLEX_FULL)
3002 new_bmcr |= BMCR_FULLDPLX;
3003
3004 if (new_bmcr != bmcr) {
3005 /* BMCR_SPEED1000 is a reserved bit that needs
3006 * to be set on write.
3007 */
3008 new_bmcr |= BMCR_SPEED1000;
3009
3010 /* Force a linkdown */
3011 if (netif_carrier_ok(tp->dev)) {
3012 u32 adv;
3013
3014 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
3015 adv &= ~(ADVERTISE_1000XFULL |
3016 ADVERTISE_1000XHALF |
3017 ADVERTISE_SLCT);
3018 tg3_writephy(tp, MII_ADVERTISE, adv);
3019 tg3_writephy(tp, MII_BMCR, bmcr |
3020 BMCR_ANRESTART |
3021 BMCR_ANENABLE);
3022 udelay(10);
3023 netif_carrier_off(tp->dev);
3024 }
3025 tg3_writephy(tp, MII_BMCR, new_bmcr);
3026 bmcr = new_bmcr;
3027 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
3028 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
3029 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
3030 ASIC_REV_5714) {
3031 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
3032 bmsr |= BMSR_LSTATUS;
3033 else
3034 bmsr &= ~BMSR_LSTATUS;
3035 }
3036 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3037 }
3038 }
3039
3040 if (bmsr & BMSR_LSTATUS) {
3041 current_speed = SPEED_1000;
3042 current_link_up = 1;
3043 if (bmcr & BMCR_FULLDPLX)
3044 current_duplex = DUPLEX_FULL;
3045 else
3046 current_duplex = DUPLEX_HALF;
3047
3048 if (bmcr & BMCR_ANENABLE) {
3049 u32 local_adv, remote_adv, common;
3050
3051 err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
3052 err |= tg3_readphy(tp, MII_LPA, &remote_adv);
3053 common = local_adv & remote_adv;
3054 if (common & (ADVERTISE_1000XHALF |
3055 ADVERTISE_1000XFULL)) {
3056 if (common & ADVERTISE_1000XFULL)
3057 current_duplex = DUPLEX_FULL;
3058 else
3059 current_duplex = DUPLEX_HALF;
3060
3061 tg3_setup_flow_control(tp, local_adv,
3062 remote_adv);
3063 }
3064 else
3065 current_link_up = 0;
3066 }
3067 }
3068
3069 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
3070 if (tp->link_config.active_duplex == DUPLEX_HALF)
3071 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
3072
3073 tw32_f(MAC_MODE, tp->mac_mode);
3074 udelay(40);
3075
3076 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3077
3078 tp->link_config.active_speed = current_speed;
3079 tp->link_config.active_duplex = current_duplex;
3080
3081 if (current_link_up != netif_carrier_ok(tp->dev)) {
3082 if (current_link_up)
3083 netif_carrier_on(tp->dev);
3084 else {
3085 netif_carrier_off(tp->dev);
3086 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3087 }
3088 tg3_link_report(tp);
3089 }
3090 return err;
3091 }
3092
3093 static void tg3_serdes_parallel_detect(struct tg3 *tp)
3094 {
3095 if (tp->serdes_counter) {
3096 /* Give autoneg time to complete. */
3097 tp->serdes_counter--;
3098 return;
3099 }
3100 if (!netif_carrier_ok(tp->dev) &&
3101 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
3102 u32 bmcr;
3103
3104 tg3_readphy(tp, MII_BMCR, &bmcr);
3105 if (bmcr & BMCR_ANENABLE) {
3106 u32 phy1, phy2;
3107
3108 /* Select shadow register 0x1f */
3109 tg3_writephy(tp, 0x1c, 0x7c00);
3110 tg3_readphy(tp, 0x1c, &phy1);
3111
3112 /* Select expansion interrupt status register */
3113 tg3_writephy(tp, 0x17, 0x0f01);
3114 tg3_readphy(tp, 0x15, &phy2);
3115 tg3_readphy(tp, 0x15, &phy2);
3116
3117 if ((phy1 & 0x10) && !(phy2 & 0x20)) {
3118 /* We have signal detect and not receiving
3119 * config code words, link is up by parallel
3120 * detection.
3121 */
3122
3123 bmcr &= ~BMCR_ANENABLE;
3124 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
3125 tg3_writephy(tp, MII_BMCR, bmcr);
3126 tp->tg3_flags2 |= TG3_FLG2_PARALLEL_DETECT;
3127 }
3128 }
3129 }
3130 else if (netif_carrier_ok(tp->dev) &&
3131 (tp->link_config.autoneg == AUTONEG_ENABLE) &&
3132 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
3133 u32 phy2;
3134
3135 /* Select expansion interrupt status register */
3136 tg3_writephy(tp, 0x17, 0x0f01);
3137 tg3_readphy(tp, 0x15, &phy2);
3138 if (phy2 & 0x20) {
3139 u32 bmcr;
3140
3141 /* Config code words received, turn on autoneg. */
3142 tg3_readphy(tp, MII_BMCR, &bmcr);
3143 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
3144
3145 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3146
3147 }
3148 }
3149 }
3150
3151 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
3152 {
3153 int err;
3154
3155 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
3156 err = tg3_setup_fiber_phy(tp, force_reset);
3157 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
3158 err = tg3_setup_fiber_mii_phy(tp, force_reset);
3159 } else {
3160 err = tg3_setup_copper_phy(tp, force_reset);
3161 }
3162
3163 if (tp->pci_chip_rev_id == CHIPREV_ID_5784_A0 ||
3164 tp->pci_chip_rev_id == CHIPREV_ID_5784_A1) {
3165 u32 val, scale;
3166
3167 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
3168 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
3169 scale = 65;
3170 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
3171 scale = 6;
3172 else
3173 scale = 12;
3174
3175 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
3176 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
3177 tw32(GRC_MISC_CFG, val);
3178 }
3179
3180 if (tp->link_config.active_speed == SPEED_1000 &&
3181 tp->link_config.active_duplex == DUPLEX_HALF)
3182 tw32(MAC_TX_LENGTHS,
3183 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
3184 (6 << TX_LENGTHS_IPG_SHIFT) |
3185 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
3186 else
3187 tw32(MAC_TX_LENGTHS,
3188 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
3189 (6 << TX_LENGTHS_IPG_SHIFT) |
3190 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
3191
3192 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
3193 if (netif_carrier_ok(tp->dev)) {
3194 tw32(HOSTCC_STAT_COAL_TICKS,
3195 tp->coal.stats_block_coalesce_usecs);
3196 } else {
3197 tw32(HOSTCC_STAT_COAL_TICKS, 0);
3198 }
3199 }
3200
3201 if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND) {
3202 u32 val = tr32(PCIE_PWR_MGMT_THRESH);
3203 if (!netif_carrier_ok(tp->dev))
3204 val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
3205 tp->pwrmgmt_thresh;
3206 else
3207 val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
3208 tw32(PCIE_PWR_MGMT_THRESH, val);
3209 }
3210
3211 return err;
3212 }
3213
3214 /* This is called whenever we suspect that the system chipset is re-
3215 * ordering the sequence of MMIO to the tx send mailbox. The symptom
3216 * is bogus tx completions. We try to recover by setting the
3217 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
3218 * in the workqueue.
3219 */
3220 static void tg3_tx_recover(struct tg3 *tp)
3221 {
3222 BUG_ON((tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) ||
3223 tp->write32_tx_mbox == tg3_write_indirect_mbox);
3224
3225 printk(KERN_WARNING PFX "%s: The system may be re-ordering memory-"
3226 "mapped I/O cycles to the network device, attempting to "
3227 "recover. Please report the problem to the driver maintainer "
3228 "and include system chipset information.\n", tp->dev->name);
3229
3230 spin_lock(&tp->lock);
3231 tp->tg3_flags |= TG3_FLAG_TX_RECOVERY_PENDING;
3232 spin_unlock(&tp->lock);
3233 }
3234
3235 static inline u32 tg3_tx_avail(struct tg3 *tp)
3236 {
3237 smp_mb();
3238 return (tp->tx_pending -
3239 ((tp->tx_prod - tp->tx_cons) & (TG3_TX_RING_SIZE - 1)));
3240 }
3241
3242 /* Tigon3 never reports partial packet sends. So we do not
3243 * need special logic to handle SKBs that have not had all
3244 * of their frags sent yet, like SunGEM does.
3245 */
3246 static void tg3_tx(struct tg3 *tp)
3247 {
3248 u32 hw_idx = tp->hw_status->idx[0].tx_consumer;
3249 u32 sw_idx = tp->tx_cons;
3250
3251 while (sw_idx != hw_idx) {
3252 struct tx_ring_info *ri = &tp->tx_buffers[sw_idx];
3253 struct sk_buff *skb = ri->skb;
3254 int i, tx_bug = 0;
3255
3256 if (unlikely(skb == NULL)) {
3257 tg3_tx_recover(tp);
3258 return;
3259 }
3260
3261 pci_unmap_single(tp->pdev,
3262 pci_unmap_addr(ri, mapping),
3263 skb_headlen(skb),
3264 PCI_DMA_TODEVICE);
3265
3266 ri->skb = NULL;
3267
3268 sw_idx = NEXT_TX(sw_idx);
3269
3270 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3271 ri = &tp->tx_buffers[sw_idx];
3272 if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
3273 tx_bug = 1;
3274
3275 pci_unmap_page(tp->pdev,
3276 pci_unmap_addr(ri, mapping),
3277 skb_shinfo(skb)->frags[i].size,
3278 PCI_DMA_TODEVICE);
3279
3280 sw_idx = NEXT_TX(sw_idx);
3281 }
3282
3283 dev_kfree_skb(skb);
3284
3285 if (unlikely(tx_bug)) {
3286 tg3_tx_recover(tp);
3287 return;
3288 }
3289 }
3290
3291 tp->tx_cons = sw_idx;
3292
3293 /* Need to make the tx_cons update visible to tg3_start_xmit()
3294 * before checking for netif_queue_stopped(). Without the
3295 * memory barrier, there is a small possibility that tg3_start_xmit()
3296 * will miss it and cause the queue to be stopped forever.
3297 */
3298 smp_mb();
3299
3300 if (unlikely(netif_queue_stopped(tp->dev) &&
3301 (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp)))) {
3302 netif_tx_lock(tp->dev);
3303 if (netif_queue_stopped(tp->dev) &&
3304 (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp)))
3305 netif_wake_queue(tp->dev);
3306 netif_tx_unlock(tp->dev);
3307 }
3308 }
3309
3310 /* Returns size of skb allocated or < 0 on error.
3311 *
3312 * We only need to fill in the address because the other members
3313 * of the RX descriptor are invariant, see tg3_init_rings.
3314 *
3315 * Note the purposeful assymetry of cpu vs. chip accesses. For
3316 * posting buffers we only dirty the first cache line of the RX
3317 * descriptor (containing the address). Whereas for the RX status
3318 * buffers the cpu only reads the last cacheline of the RX descriptor
3319 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
3320 */
3321 static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key,
3322 int src_idx, u32 dest_idx_unmasked)
3323 {
3324 struct tg3_rx_buffer_desc *desc;
3325 struct ring_info *map, *src_map;
3326 struct sk_buff *skb;
3327 dma_addr_t mapping;
3328 int skb_size, dest_idx;
3329
3330 src_map = NULL;
3331 switch (opaque_key) {
3332 case RXD_OPAQUE_RING_STD:
3333 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3334 desc = &tp->rx_std[dest_idx];
3335 map = &tp->rx_std_buffers[dest_idx];
3336 if (src_idx >= 0)
3337 src_map = &tp->rx_std_buffers[src_idx];
3338 skb_size = tp->rx_pkt_buf_sz;
3339 break;
3340
3341 case RXD_OPAQUE_RING_JUMBO:
3342 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
3343 desc = &tp->rx_jumbo[dest_idx];
3344 map = &tp->rx_jumbo_buffers[dest_idx];
3345 if (src_idx >= 0)
3346 src_map = &tp->rx_jumbo_buffers[src_idx];
3347 skb_size = RX_JUMBO_PKT_BUF_SZ;
3348 break;
3349
3350 default:
3351 return -EINVAL;
3352 };
3353
3354 /* Do not overwrite any of the map or rp information
3355 * until we are sure we can commit to a new buffer.
3356 *
3357 * Callers depend upon this behavior and assume that
3358 * we leave everything unchanged if we fail.
3359 */
3360 skb = netdev_alloc_skb(tp->dev, skb_size);
3361 if (skb == NULL)
3362 return -ENOMEM;
3363
3364 skb_reserve(skb, tp->rx_offset);
3365
3366 mapping = pci_map_single(tp->pdev, skb->data,
3367 skb_size - tp->rx_offset,
3368 PCI_DMA_FROMDEVICE);
3369
3370 map->skb = skb;
3371 pci_unmap_addr_set(map, mapping, mapping);
3372
3373 if (src_map != NULL)
3374 src_map->skb = NULL;
3375
3376 desc->addr_hi = ((u64)mapping >> 32);
3377 desc->addr_lo = ((u64)mapping & 0xffffffff);
3378
3379 return skb_size;
3380 }
3381
3382 /* We only need to move over in the address because the other
3383 * members of the RX descriptor are invariant. See notes above
3384 * tg3_alloc_rx_skb for full details.
3385 */
3386 static void tg3_recycle_rx(struct tg3 *tp, u32 opaque_key,
3387 int src_idx, u32 dest_idx_unmasked)
3388 {
3389 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
3390 struct ring_info *src_map, *dest_map;
3391 int dest_idx;
3392
3393 switch (opaque_key) {
3394 case RXD_OPAQUE_RING_STD:
3395 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3396 dest_desc = &tp->rx_std[dest_idx];
3397 dest_map = &tp->rx_std_buffers[dest_idx];
3398 src_desc = &tp->rx_std[src_idx];
3399 src_map = &tp->rx_std_buffers[src_idx];
3400 break;
3401
3402 case RXD_OPAQUE_RING_JUMBO:
3403 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
3404 dest_desc = &tp->rx_jumbo[dest_idx];
3405 dest_map = &tp->rx_jumbo_buffers[dest_idx];
3406 src_desc = &tp->rx_jumbo[src_idx];
3407 src_map = &tp->rx_jumbo_buffers[src_idx];
3408 break;
3409
3410 default:
3411 return;
3412 };
3413
3414 dest_map->skb = src_map->skb;
3415 pci_unmap_addr_set(dest_map, mapping,
3416 pci_unmap_addr(src_map, mapping));
3417 dest_desc->addr_hi = src_desc->addr_hi;
3418 dest_desc->addr_lo = src_desc->addr_lo;
3419
3420 src_map->skb = NULL;
3421 }
3422
3423 #if TG3_VLAN_TAG_USED
3424 static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag)
3425 {
3426 return vlan_hwaccel_receive_skb(skb, tp->vlgrp, vlan_tag);
3427 }
3428 #endif
3429
3430 /* The RX ring scheme is composed of multiple rings which post fresh
3431 * buffers to the chip, and one special ring the chip uses to report
3432 * status back to the host.
3433 *
3434 * The special ring reports the status of received packets to the
3435 * host. The chip does not write into the original descriptor the
3436 * RX buffer was obtained from. The chip simply takes the original
3437 * descriptor as provided by the host, updates the status and length
3438 * field, then writes this into the next status ring entry.
3439 *
3440 * Each ring the host uses to post buffers to the chip is described
3441 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
3442 * it is first placed into the on-chip ram. When the packet's length
3443 * is known, it walks down the TG3_BDINFO entries to select the ring.
3444 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
3445 * which is within the range of the new packet's length is chosen.
3446 *
3447 * The "separate ring for rx status" scheme may sound queer, but it makes
3448 * sense from a cache coherency perspective. If only the host writes
3449 * to the buffer post rings, and only the chip writes to the rx status
3450 * rings, then cache lines never move beyond shared-modified state.
3451 * If both the host and chip were to write into the same ring, cache line
3452 * eviction could occur since both entities want it in an exclusive state.
3453 */
3454 static int tg3_rx(struct tg3 *tp, int budget)
3455 {
3456 u32 work_mask, rx_std_posted = 0;
3457 u32 sw_idx = tp->rx_rcb_ptr;
3458 u16 hw_idx;
3459 int received;
3460
3461 hw_idx = tp->hw_status->idx[0].rx_producer;
3462 /*
3463 * We need to order the read of hw_idx and the read of
3464 * the opaque cookie.
3465 */
3466 rmb();
3467 work_mask = 0;
3468 received = 0;
3469 while (sw_idx != hw_idx && budget > 0) {
3470 struct tg3_rx_buffer_desc *desc = &tp->rx_rcb[sw_idx];
3471 unsigned int len;
3472 struct sk_buff *skb;
3473 dma_addr_t dma_addr;
3474 u32 opaque_key, desc_idx, *post_ptr;
3475
3476 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
3477 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
3478 if (opaque_key == RXD_OPAQUE_RING_STD) {
3479 dma_addr = pci_unmap_addr(&tp->rx_std_buffers[desc_idx],
3480 mapping);
3481 skb = tp->rx_std_buffers[desc_idx].skb;
3482 post_ptr = &tp->rx_std_ptr;
3483 rx_std_posted++;
3484 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
3485 dma_addr = pci_unmap_addr(&tp->rx_jumbo_buffers[desc_idx],
3486 mapping);
3487 skb = tp->rx_jumbo_buffers[desc_idx].skb;
3488 post_ptr = &tp->rx_jumbo_ptr;
3489 }
3490 else {
3491 goto next_pkt_nopost;
3492 }
3493
3494 work_mask |= opaque_key;
3495
3496 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
3497 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
3498 drop_it:
3499 tg3_recycle_rx(tp, opaque_key,
3500 desc_idx, *post_ptr);
3501 drop_it_no_recycle:
3502 /* Other statistics kept track of by card. */
3503 tp->net_stats.rx_dropped++;
3504 goto next_pkt;
3505 }
3506
3507 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4; /* omit crc */
3508
3509 if (len > RX_COPY_THRESHOLD
3510 && tp->rx_offset == 2
3511 /* rx_offset != 2 iff this is a 5701 card running
3512 * in PCI-X mode [see tg3_get_invariants()] */
3513 ) {
3514 int skb_size;
3515
3516 skb_size = tg3_alloc_rx_skb(tp, opaque_key,
3517 desc_idx, *post_ptr);
3518 if (skb_size < 0)
3519 goto drop_it;
3520
3521 pci_unmap_single(tp->pdev, dma_addr,
3522 skb_size - tp->rx_offset,
3523 PCI_DMA_FROMDEVICE);
3524
3525 skb_put(skb, len);
3526 } else {
3527 struct sk_buff *copy_skb;
3528
3529 tg3_recycle_rx(tp, opaque_key,
3530 desc_idx, *post_ptr);
3531
3532 copy_skb = netdev_alloc_skb(tp->dev, len + 2);
3533 if (copy_skb == NULL)
3534 goto drop_it_no_recycle;
3535
3536 skb_reserve(copy_skb, 2);
3537 skb_put(copy_skb, len);
3538 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3539 skb_copy_from_linear_data(skb, copy_skb->data, len);
3540 pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3541
3542 /* We'll reuse the original ring buffer. */
3543 skb = copy_skb;
3544 }
3545
3546 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
3547 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
3548 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
3549 >> RXD_TCPCSUM_SHIFT) == 0xffff))
3550 skb->ip_summed = CHECKSUM_UNNECESSARY;
3551 else
3552 skb->ip_summed = CHECKSUM_NONE;
3553
3554 skb->protocol = eth_type_trans(skb, tp->dev);
3555 #if TG3_VLAN_TAG_USED
3556 if (tp->vlgrp != NULL &&
3557 desc->type_flags & RXD_FLAG_VLAN) {
3558 tg3_vlan_rx(tp, skb,
3559 desc->err_vlan & RXD_VLAN_MASK);
3560 } else
3561 #endif
3562 netif_receive_skb(skb);
3563
3564 tp->dev->last_rx = jiffies;
3565 received++;
3566 budget--;
3567
3568 next_pkt:
3569 (*post_ptr)++;
3570
3571 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
3572 u32 idx = *post_ptr % TG3_RX_RING_SIZE;
3573
3574 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX +
3575 TG3_64BIT_REG_LOW, idx);
3576 work_mask &= ~RXD_OPAQUE_RING_STD;
3577 rx_std_posted = 0;
3578 }
3579 next_pkt_nopost:
3580 sw_idx++;
3581 sw_idx &= (TG3_RX_RCB_RING_SIZE(tp) - 1);
3582
3583 /* Refresh hw_idx to see if there is new work */
3584 if (sw_idx == hw_idx) {
3585 hw_idx = tp->hw_status->idx[0].rx_producer;
3586 rmb();
3587 }
3588 }
3589
3590 /* ACK the status ring. */
3591 tp->rx_rcb_ptr = sw_idx;
3592 tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, sw_idx);
3593
3594 /* Refill RX ring(s). */
3595 if (work_mask & RXD_OPAQUE_RING_STD) {
3596 sw_idx = tp->rx_std_ptr % TG3_RX_RING_SIZE;
3597 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
3598 sw_idx);
3599 }
3600 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
3601 sw_idx = tp->rx_jumbo_ptr % TG3_RX_JUMBO_RING_SIZE;
3602 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
3603 sw_idx);
3604 }
3605 mmiowb();
3606
3607 return received;
3608 }
3609
3610 static int tg3_poll_work(struct tg3 *tp, int work_done, int budget)
3611 {
3612 struct tg3_hw_status *sblk = tp->hw_status;
3613
3614 /* handle link change and other phy events */
3615 if (!(tp->tg3_flags &
3616 (TG3_FLAG_USE_LINKCHG_REG |
3617 TG3_FLAG_POLL_SERDES))) {
3618 if (sblk->status & SD_STATUS_LINK_CHG) {
3619 sblk->status = SD_STATUS_UPDATED |
3620 (sblk->status & ~SD_STATUS_LINK_CHG);
3621 spin_lock(&tp->lock);
3622 tg3_setup_phy(tp, 0);
3623 spin_unlock(&tp->lock);
3624 }
3625 }
3626
3627 /* run TX completion thread */
3628 if (sblk->idx[0].tx_consumer != tp->tx_cons) {
3629 tg3_tx(tp);
3630 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
3631 return work_done;
3632 }
3633
3634 /* run RX thread, within the bounds set by NAPI.
3635 * All RX "locking" is done by ensuring outside
3636 * code synchronizes with tg3->napi.poll()
3637 */
3638 if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
3639 work_done += tg3_rx(tp, budget - work_done);
3640
3641 return work_done;
3642 }
3643
3644 static int tg3_poll(struct napi_struct *napi, int budget)
3645 {
3646 struct tg3 *tp = container_of(napi, struct tg3, napi);
3647 int work_done = 0;
3648 struct tg3_hw_status *sblk = tp->hw_status;
3649
3650 while (1) {
3651 work_done = tg3_poll_work(tp, work_done, budget);
3652
3653 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
3654 goto tx_recovery;
3655
3656 if (unlikely(work_done >= budget))
3657 break;
3658
3659 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
3660 /* tp->last_tag is used in tg3_restart_ints() below
3661 * to tell the hw how much work has been processed,
3662 * so we must read it before checking for more work.
3663 */
3664 tp->last_tag = sblk->status_tag;
3665 rmb();
3666 } else
3667 sblk->status &= ~SD_STATUS_UPDATED;
3668
3669 if (likely(!tg3_has_work(tp))) {
3670 netif_rx_complete(tp->dev, napi);
3671 tg3_restart_ints(tp);
3672 break;
3673 }
3674 }
3675
3676 return work_done;
3677
3678 tx_recovery:
3679 /* work_done is guaranteed to be less than budget. */
3680 netif_rx_complete(tp->dev, napi);
3681 schedule_work(&tp->reset_task);
3682 return work_done;
3683 }
3684
3685 static void tg3_irq_quiesce(struct tg3 *tp)
3686 {
3687 BUG_ON(tp->irq_sync);
3688
3689 tp->irq_sync = 1;
3690 smp_mb();
3691
3692 synchronize_irq(tp->pdev->irq);
3693 }
3694
3695 static inline int tg3_irq_sync(struct tg3 *tp)
3696 {
3697 return tp->irq_sync;
3698 }
3699
3700 /* Fully shutdown all tg3 driver activity elsewhere in the system.
3701 * If irq_sync is non-zero, then the IRQ handler must be synchronized
3702 * with as well. Most of the time, this is not necessary except when
3703 * shutting down the device.
3704 */
3705 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
3706 {
3707 spin_lock_bh(&tp->lock);
3708 if (irq_sync)
3709 tg3_irq_quiesce(tp);
3710 }
3711
3712 static inline void tg3_full_unlock(struct tg3 *tp)
3713 {
3714 spin_unlock_bh(&tp->lock);
3715 }
3716
3717 /* One-shot MSI handler - Chip automatically disables interrupt
3718 * after sending MSI so driver doesn't have to do it.
3719 */
3720 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
3721 {
3722 struct net_device *dev = dev_id;
3723 struct tg3 *tp = netdev_priv(dev);
3724
3725 prefetch(tp->hw_status);
3726 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3727
3728 if (likely(!tg3_irq_sync(tp)))
3729 netif_rx_schedule(dev, &tp->napi);
3730
3731 return IRQ_HANDLED;
3732 }
3733
3734 /* MSI ISR - No need to check for interrupt sharing and no need to
3735 * flush status block and interrupt mailbox. PCI ordering rules
3736 * guarantee that MSI will arrive after the status block.
3737 */
3738 static irqreturn_t tg3_msi(int irq, void *dev_id)
3739 {
3740 struct net_device *dev = dev_id;
3741 struct tg3 *tp = netdev_priv(dev);
3742
3743 prefetch(tp->hw_status);
3744 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3745 /*
3746 * Writing any value to intr-mbox-0 clears PCI INTA# and
3747 * chip-internal interrupt pending events.
3748 * Writing non-zero to intr-mbox-0 additional tells the
3749 * NIC to stop sending us irqs, engaging "in-intr-handler"
3750 * event coalescing.
3751 */
3752 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
3753 if (likely(!tg3_irq_sync(tp)))
3754 netif_rx_schedule(dev, &tp->napi);
3755
3756 return IRQ_RETVAL(1);
3757 }
3758
3759 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
3760 {
3761 struct net_device *dev = dev_id;
3762 struct tg3 *tp = netdev_priv(dev);
3763 struct tg3_hw_status *sblk = tp->hw_status;
3764 unsigned int handled = 1;
3765
3766 /* In INTx mode, it is possible for the interrupt to arrive at
3767 * the CPU before the status block posted prior to the interrupt.
3768 * Reading the PCI State register will confirm whether the
3769 * interrupt is ours and will flush the status block.
3770 */
3771 if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
3772 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
3773 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3774 handled = 0;
3775 goto out;
3776 }
3777 }
3778
3779 /*
3780 * Writing any value to intr-mbox-0 clears PCI INTA# and
3781 * chip-internal interrupt pending events.
3782 * Writing non-zero to intr-mbox-0 additional tells the
3783 * NIC to stop sending us irqs, engaging "in-intr-handler"
3784 * event coalescing.
3785 *
3786 * Flush the mailbox to de-assert the IRQ immediately to prevent
3787 * spurious interrupts. The flush impacts performance but
3788 * excessive spurious interrupts can be worse in some cases.
3789 */
3790 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
3791 if (tg3_irq_sync(tp))
3792 goto out;
3793 sblk->status &= ~SD_STATUS_UPDATED;
3794 if (likely(tg3_has_work(tp))) {
3795 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3796 netif_rx_schedule(dev, &tp->napi);
3797 } else {
3798 /* No work, shared interrupt perhaps? re-enable
3799 * interrupts, and flush that PCI write
3800 */
3801 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3802 0x00000000);
3803 }
3804 out:
3805 return IRQ_RETVAL(handled);
3806 }
3807
3808 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
3809 {
3810 struct net_device *dev = dev_id;
3811 struct tg3 *tp = netdev_priv(dev);
3812 struct tg3_hw_status *sblk = tp->hw_status;
3813 unsigned int handled = 1;
3814
3815 /* In INTx mode, it is possible for the interrupt to arrive at
3816 * the CPU before the status block posted prior to the interrupt.
3817 * Reading the PCI State register will confirm whether the
3818 * interrupt is ours and will flush the status block.
3819 */
3820 if (unlikely(sblk->status_tag == tp->last_tag)) {
3821 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
3822 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3823 handled = 0;
3824 goto out;
3825 }
3826 }
3827
3828 /*
3829 * writing any value to intr-mbox-0 clears PCI INTA# and
3830 * chip-internal interrupt pending events.
3831 * writing non-zero to intr-mbox-0 additional tells the
3832 * NIC to stop sending us irqs, engaging "in-intr-handler"
3833 * event coalescing.
3834 *
3835 * Flush the mailbox to de-assert the IRQ immediately to prevent
3836 * spurious interrupts. The flush impacts performance but
3837 * excessive spurious interrupts can be worse in some cases.
3838 */
3839 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
3840 if (tg3_irq_sync(tp))
3841 goto out;
3842 if (netif_rx_schedule_prep(dev, &tp->napi)) {
3843 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3844 /* Update last_tag to mark that this status has been
3845 * seen. Because interrupt may be shared, we may be
3846 * racing with tg3_poll(), so only update last_tag
3847 * if tg3_poll() is not scheduled.
3848 */
3849 tp->last_tag = sblk->status_tag;
3850 __netif_rx_schedule(dev, &tp->napi);
3851 }
3852 out:
3853 return IRQ_RETVAL(handled);
3854 }
3855
3856 /* ISR for interrupt test */
3857 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
3858 {
3859 struct net_device *dev = dev_id;
3860 struct tg3 *tp = netdev_priv(dev);
3861 struct tg3_hw_status *sblk = tp->hw_status;
3862
3863 if ((sblk->status & SD_STATUS_UPDATED) ||
3864 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3865 tg3_disable_ints(tp);
3866 return IRQ_RETVAL(1);
3867 }
3868 return IRQ_RETVAL(0);
3869 }
3870
3871 static int tg3_init_hw(struct tg3 *, int);
3872 static int tg3_halt(struct tg3 *, int, int);
3873
3874 /* Restart hardware after configuration changes, self-test, etc.
3875 * Invoked with tp->lock held.
3876 */
3877 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
3878 {
3879 int err;
3880
3881 err = tg3_init_hw(tp, reset_phy);
3882 if (err) {
3883 printk(KERN_ERR PFX "%s: Failed to re-initialize device, "
3884 "aborting.\n", tp->dev->name);
3885 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
3886 tg3_full_unlock(tp);
3887 del_timer_sync(&tp->timer);
3888 tp->irq_sync = 0;
3889 napi_enable(&tp->napi);
3890 dev_close(tp->dev);
3891 tg3_full_lock(tp, 0);
3892 }
3893 return err;
3894 }
3895
3896 #ifdef CONFIG_NET_POLL_CONTROLLER
3897 static void tg3_poll_controller(struct net_device *dev)
3898 {
3899 struct tg3 *tp = netdev_priv(dev);
3900
3901 tg3_interrupt(tp->pdev->irq, dev);
3902 }
3903 #endif
3904
3905 static void tg3_reset_task(struct work_struct *work)
3906 {
3907 struct tg3 *tp = container_of(work, struct tg3, reset_task);
3908 unsigned int restart_timer;
3909
3910 tg3_full_lock(tp, 0);
3911
3912 if (!netif_running(tp->dev)) {
3913 tg3_full_unlock(tp);
3914 return;
3915 }
3916
3917 tg3_full_unlock(tp);
3918
3919 tg3_netif_stop(tp);
3920
3921 tg3_full_lock(tp, 1);
3922
3923 restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
3924 tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
3925
3926 if (tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING) {
3927 tp->write32_tx_mbox = tg3_write32_tx_mbox;
3928 tp->write32_rx_mbox = tg3_write_flush_reg32;
3929 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
3930 tp->tg3_flags &= ~TG3_FLAG_TX_RECOVERY_PENDING;
3931 }
3932
3933 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
3934 if (tg3_init_hw(tp, 1))
3935 goto out;
3936
3937 tg3_netif_start(tp);
3938
3939 if (restart_timer)
3940 mod_timer(&tp->timer, jiffies + 1);
3941
3942 out:
3943 tg3_full_unlock(tp);
3944 }
3945
3946 static void tg3_dump_short_state(struct tg3 *tp)
3947 {
3948 printk(KERN_ERR PFX "DEBUG: MAC_TX_STATUS[%08x] MAC_RX_STATUS[%08x]\n",
3949 tr32(MAC_TX_STATUS), tr32(MAC_RX_STATUS));
3950 printk(KERN_ERR PFX "DEBUG: RDMAC_STATUS[%08x] WDMAC_STATUS[%08x]\n",
3951 tr32(RDMAC_STATUS), tr32(WDMAC_STATUS));
3952 }
3953
3954 static void tg3_tx_timeout(struct net_device *dev)
3955 {
3956 struct tg3 *tp = netdev_priv(dev);
3957
3958 if (netif_msg_tx_err(tp)) {
3959 printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
3960 dev->name);
3961 tg3_dump_short_state(tp);
3962 }
3963
3964 schedule_work(&tp->reset_task);
3965 }
3966
3967 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
3968 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
3969 {
3970 u32 base = (u32) mapping & 0xffffffff;
3971
3972 return ((base > 0xffffdcc0) &&
3973 (base + len + 8 < base));
3974 }
3975
3976 /* Test for DMA addresses > 40-bit */
3977 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
3978 int len)
3979 {
3980 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
3981 if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG)
3982 return (((u64) mapping + len) > DMA_40BIT_MASK);
3983 return 0;
3984 #else
3985 return 0;
3986 #endif
3987 }
3988
3989 static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32);
3990
3991 /* Workaround 4GB and 40-bit hardware DMA bugs. */
3992 static int tigon3_dma_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
3993 u32 last_plus_one, u32 *start,
3994 u32 base_flags, u32 mss)
3995 {
3996 struct sk_buff *new_skb = skb_copy(skb, GFP_ATOMIC);
3997 dma_addr_t new_addr = 0;
3998 u32 entry = *start;
3999 int i, ret = 0;
4000
4001 if (!new_skb) {
4002 ret = -1;
4003 } else {
4004 /* New SKB is guaranteed to be linear. */
4005 entry = *start;
4006 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
4007 PCI_DMA_TODEVICE);
4008 /* Make sure new skb does not cross any 4G boundaries.
4009 * Drop the packet if it does.
4010 */
4011 if (tg3_4g_overflow_test(new_addr, new_skb->len)) {
4012 ret = -1;
4013 dev_kfree_skb(new_skb);
4014 new_skb = NULL;
4015 } else {
4016 tg3_set_txd(tp, entry, new_addr, new_skb->len,
4017 base_flags, 1 | (mss << 1));
4018 *start = NEXT_TX(entry);
4019 }
4020 }
4021
4022 /* Now clean up the sw ring entries. */
4023 i = 0;
4024 while (entry != last_plus_one) {
4025 int len;
4026
4027 if (i == 0)
4028 len = skb_headlen(skb);
4029 else
4030 len = skb_shinfo(skb)->frags[i-1].size;
4031 pci_unmap_single(tp->pdev,
4032 pci_unmap_addr(&tp->tx_buffers[entry], mapping),
4033 len, PCI_DMA_TODEVICE);
4034 if (i == 0) {
4035 tp->tx_buffers[entry].skb = new_skb;
4036 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, new_addr);
4037 } else {
4038 tp->tx_buffers[entry].skb = NULL;
4039 }
4040 entry = NEXT_TX(entry);
4041 i++;
4042 }
4043
4044 dev_kfree_skb(skb);
4045
4046 return ret;
4047 }
4048
4049 static void tg3_set_txd(struct tg3 *tp, int entry,
4050 dma_addr_t mapping, int len, u32 flags,
4051 u32 mss_and_is_end)
4052 {
4053 struct tg3_tx_buffer_desc *txd = &tp->tx_ring[entry];
4054 int is_end = (mss_and_is_end & 0x1);
4055 u32 mss = (mss_and_is_end >> 1);
4056 u32 vlan_tag = 0;
4057
4058 if (is_end)
4059 flags |= TXD_FLAG_END;
4060 if (flags & TXD_FLAG_VLAN) {
4061 vlan_tag = flags >> 16;
4062 flags &= 0xffff;
4063 }
4064 vlan_tag |= (mss << TXD_MSS_SHIFT);
4065
4066 txd->addr_hi = ((u64) mapping >> 32);
4067 txd->addr_lo = ((u64) mapping & 0xffffffff);
4068 txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
4069 txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
4070 }
4071
4072 /* hard_start_xmit for devices that don't have any bugs and
4073 * support TG3_FLG2_HW_TSO_2 only.
4074 */
4075 static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
4076 {
4077 struct tg3 *tp = netdev_priv(dev);
4078 dma_addr_t mapping;
4079 u32 len, entry, base_flags, mss;
4080
4081 len = skb_headlen(skb);
4082
4083 /* We are running in BH disabled context with netif_tx_lock
4084 * and TX reclaim runs via tp->napi.poll inside of a software
4085 * interrupt. Furthermore, IRQ processing runs lockless so we have
4086 * no IRQ context deadlocks to worry about either. Rejoice!
4087 */
4088 if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
4089 if (!netif_queue_stopped(dev)) {
4090 netif_stop_queue(dev);
4091
4092 /* This is a hard error, log it. */
4093 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
4094 "queue awake!\n", dev->name);
4095 }
4096 return NETDEV_TX_BUSY;
4097 }
4098
4099 entry = tp->tx_prod;
4100 base_flags = 0;
4101 mss = 0;
4102 if ((mss = skb_shinfo(skb)->gso_size) != 0) {
4103 int tcp_opt_len, ip_tcp_len;
4104
4105 if (skb_header_cloned(skb) &&
4106 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4107 dev_kfree_skb(skb);
4108 goto out_unlock;
4109 }
4110
4111 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
4112 mss |= (skb_headlen(skb) - ETH_HLEN) << 9;
4113 else {
4114 struct iphdr *iph = ip_hdr(skb);
4115
4116 tcp_opt_len = tcp_optlen(skb);
4117 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
4118
4119 iph->check = 0;
4120 iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
4121 mss |= (ip_tcp_len + tcp_opt_len) << 9;
4122 }
4123
4124 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
4125 TXD_FLAG_CPU_POST_DMA);
4126
4127 tcp_hdr(skb)->check = 0;
4128
4129 }
4130 else if (skb->ip_summed == CHECKSUM_PARTIAL)
4131 base_flags |= TXD_FLAG_TCPUDP_CSUM;
4132 #if TG3_VLAN_TAG_USED
4133 if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
4134 base_flags |= (TXD_FLAG_VLAN |
4135 (vlan_tx_tag_get(skb) << 16));
4136 #endif
4137
4138 /* Queue skb data, a.k.a. the main skb fragment. */
4139 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
4140
4141 tp->tx_buffers[entry].skb = skb;
4142 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4143
4144 tg3_set_txd(tp, entry, mapping, len, base_flags,
4145 (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
4146
4147 entry = NEXT_TX(entry);
4148
4149 /* Now loop through additional data fragments, and queue them. */
4150 if (skb_shinfo(skb)->nr_frags > 0) {
4151 unsigned int i, last;
4152
4153 last = skb_shinfo(skb)->nr_frags - 1;
4154 for (i = 0; i <= last; i++) {
4155 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4156
4157 len = frag->size;
4158 mapping = pci_map_page(tp->pdev,
4159 frag->page,
4160 frag->page_offset,
4161 len, PCI_DMA_TODEVICE);
4162
4163 tp->tx_buffers[entry].skb = NULL;
4164 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4165
4166 tg3_set_txd(tp, entry, mapping, len,
4167 base_flags, (i == last) | (mss << 1));
4168
4169 entry = NEXT_TX(entry);
4170 }
4171 }
4172
4173 /* Packets are ready, update Tx producer idx local and on card. */
4174 tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
4175
4176 tp->tx_prod = entry;
4177 if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
4178 netif_stop_queue(dev);
4179 if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp))
4180 netif_wake_queue(tp->dev);
4181 }
4182
4183 out_unlock:
4184 mmiowb();
4185
4186 dev->trans_start = jiffies;
4187
4188 return NETDEV_TX_OK;
4189 }
4190
4191 static int tg3_start_xmit_dma_bug(struct sk_buff *, struct net_device *);
4192
4193 /* Use GSO to workaround a rare TSO bug that may be triggered when the
4194 * TSO header is greater than 80 bytes.
4195 */
4196 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
4197 {
4198 struct sk_buff *segs, *nskb;
4199
4200 /* Estimate the number of fragments in the worst case */
4201 if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))) {
4202 netif_stop_queue(tp->dev);
4203 if (tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))
4204 return NETDEV_TX_BUSY;
4205
4206 netif_wake_queue(tp->dev);
4207 }
4208
4209 segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
4210 if (unlikely(IS_ERR(segs)))
4211 goto tg3_tso_bug_end;
4212
4213 do {
4214 nskb = segs;
4215 segs = segs->next;
4216 nskb->next = NULL;
4217 tg3_start_xmit_dma_bug(nskb, tp->dev);
4218 } while (segs);
4219
4220 tg3_tso_bug_end:
4221 dev_kfree_skb(skb);
4222
4223 return NETDEV_TX_OK;
4224 }
4225
4226 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
4227 * support TG3_FLG2_HW_TSO_1 or firmware TSO only.
4228 */
4229 static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
4230 {
4231 struct tg3 *tp = netdev_priv(dev);
4232 dma_addr_t mapping;
4233 u32 len, entry, base_flags, mss;
4234 int would_hit_hwbug;
4235
4236 len = skb_headlen(skb);
4237
4238 /* We are running in BH disabled context with netif_tx_lock
4239 * and TX reclaim runs via tp->napi.poll inside of a software
4240 * interrupt. Furthermore, IRQ processing runs lockless so we have
4241 * no IRQ context deadlocks to worry about either. Rejoice!
4242 */
4243 if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
4244 if (!netif_queue_stopped(dev)) {
4245 netif_stop_queue(dev);
4246
4247 /* This is a hard error, log it. */
4248 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
4249 "queue awake!\n", dev->name);
4250 }
4251 return NETDEV_TX_BUSY;
4252 }
4253
4254 entry = tp->tx_prod;
4255 base_flags = 0;
4256 if (skb->ip_summed == CHECKSUM_PARTIAL)
4257 base_flags |= TXD_FLAG_TCPUDP_CSUM;
4258 mss = 0;
4259 if ((mss = skb_shinfo(skb)->gso_size) != 0) {
4260 struct iphdr *iph;
4261 int tcp_opt_len, ip_tcp_len, hdr_len;
4262
4263 if (skb_header_cloned(skb) &&
4264 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4265 dev_kfree_skb(skb);
4266 goto out_unlock;
4267 }
4268
4269 tcp_opt_len = tcp_optlen(skb);
4270 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
4271
4272 hdr_len = ip_tcp_len + tcp_opt_len;
4273 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
4274 (tp->tg3_flags2 & TG3_FLG2_TSO_BUG))
4275 return (tg3_tso_bug(tp, skb));
4276
4277 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
4278 TXD_FLAG_CPU_POST_DMA);
4279
4280 iph = ip_hdr(skb);
4281 iph->check = 0;
4282 iph->tot_len = htons(mss + hdr_len);
4283 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
4284 tcp_hdr(skb)->check = 0;
4285 base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
4286 } else
4287 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
4288 iph->daddr, 0,
4289 IPPROTO_TCP,
4290 0);
4291
4292 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
4293 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) {
4294 if (tcp_opt_len || iph->ihl > 5) {
4295 int tsflags;
4296
4297 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
4298 mss |= (tsflags << 11);
4299 }
4300 } else {
4301 if (tcp_opt_len || iph->ihl > 5) {
4302 int tsflags;
4303
4304 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
4305 base_flags |= tsflags << 12;
4306 }
4307 }
4308 }
4309 #if TG3_VLAN_TAG_USED
4310 if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
4311 base_flags |= (TXD_FLAG_VLAN |
4312 (vlan_tx_tag_get(skb) << 16));
4313 #endif
4314
4315 /* Queue skb data, a.k.a. the main skb fragment. */
4316 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
4317
4318 tp->tx_buffers[entry].skb = skb;
4319 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4320
4321 would_hit_hwbug = 0;
4322
4323 if (tg3_4g_overflow_test(mapping, len))
4324 would_hit_hwbug = 1;
4325
4326 tg3_set_txd(tp, entry, mapping, len, base_flags,
4327 (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
4328
4329 entry = NEXT_TX(entry);
4330
4331 /* Now loop through additional data fragments, and queue them. */
4332 if (skb_shinfo(skb)->nr_frags > 0) {
4333 unsigned int i, last;
4334
4335 last = skb_shinfo(skb)->nr_frags - 1;
4336 for (i = 0; i <= last; i++) {
4337 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4338
4339 len = frag->size;
4340 mapping = pci_map_page(tp->pdev,
4341 frag->page,
4342 frag->page_offset,
4343 len, PCI_DMA_TODEVICE);
4344
4345 tp->tx_buffers[entry].skb = NULL;
4346 pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4347
4348 if (tg3_4g_overflow_test(mapping, len))
4349 would_hit_hwbug = 1;
4350
4351 if (tg3_40bit_overflow_test(tp, mapping, len))
4352 would_hit_hwbug = 1;
4353
4354 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
4355 tg3_set_txd(tp, entry, mapping, len,
4356 base_flags, (i == last)|(mss << 1));
4357 else
4358 tg3_set_txd(tp, entry, mapping, len,
4359 base_flags, (i == last));
4360
4361 entry = NEXT_TX(entry);
4362 }
4363 }
4364
4365 if (would_hit_hwbug) {
4366 u32 last_plus_one = entry;
4367 u32 start;
4368
4369 start = entry - 1 - skb_shinfo(skb)->nr_frags;
4370 start &= (TG3_TX_RING_SIZE - 1);
4371
4372 /* If the workaround fails due to memory/mapping
4373 * failure, silently drop this packet.
4374 */
4375 if (tigon3_dma_hwbug_workaround(tp, skb, last_plus_one,
4376 &start, base_flags, mss))
4377 goto out_unlock;
4378
4379 entry = start;
4380 }
4381
4382 /* Packets are ready, update Tx producer idx local and on card. */
4383 tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
4384
4385 tp->tx_prod = entry;
4386 if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
4387 netif_stop_queue(dev);
4388 if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp))
4389 netif_wake_queue(tp->dev);
4390 }
4391
4392 out_unlock:
4393 mmiowb();
4394
4395 dev->trans_start = jiffies;
4396
4397 return NETDEV_TX_OK;
4398 }
4399
4400 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
4401 int new_mtu)
4402 {
4403 dev->mtu = new_mtu;
4404
4405 if (new_mtu > ETH_DATA_LEN) {
4406 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
4407 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
4408 ethtool_op_set_tso(dev, 0);
4409 }
4410 else
4411 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
4412 } else {
4413 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
4414 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
4415 tp->tg3_flags &= ~TG3_FLAG_JUMBO_RING_ENABLE;
4416 }
4417 }
4418
4419 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
4420 {
4421 struct tg3 *tp = netdev_priv(dev);
4422 int err;
4423
4424 if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
4425 return -EINVAL;
4426
4427 if (!netif_running(dev)) {
4428 /* We'll just catch it later when the
4429 * device is up'd.
4430 */
4431 tg3_set_mtu(dev, tp, new_mtu);
4432 return 0;
4433 }
4434
4435 tg3_netif_stop(tp);
4436
4437 tg3_full_lock(tp, 1);
4438
4439 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
4440
4441 tg3_set_mtu(dev, tp, new_mtu);
4442
4443 err = tg3_restart_hw(tp, 0);
4444
4445 if (!err)
4446 tg3_netif_start(tp);
4447
4448 tg3_full_unlock(tp);
4449
4450 return err;
4451 }
4452
4453 /* Free up pending packets in all rx/tx rings.
4454 *
4455 * The chip has been shut down and the driver detached from
4456 * the networking, so no interrupts or new tx packets will
4457 * end up in the driver. tp->{tx,}lock is not held and we are not
4458 * in an interrupt context and thus may sleep.
4459 */
4460 static void tg3_free_rings(struct tg3 *tp)
4461 {
4462 struct ring_info *rxp;
4463 int i;
4464
4465 for (i = 0; i < TG3_RX_RING_SIZE; i++) {
4466 rxp = &tp->rx_std_buffers[i];
4467
4468 if (rxp->skb == NULL)
4469 continue;
4470 pci_unmap_single(tp->pdev,
4471 pci_unmap_addr(rxp, mapping),
4472 tp->rx_pkt_buf_sz - tp->rx_offset,
4473 PCI_DMA_FROMDEVICE);
4474 dev_kfree_skb_any(rxp->skb);
4475 rxp->skb = NULL;
4476 }
4477
4478 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
4479 rxp = &tp->rx_jumbo_buffers[i];
4480
4481 if (rxp->skb == NULL)
4482 continue;
4483 pci_unmap_single(tp->pdev,
4484 pci_unmap_addr(rxp, mapping),
4485 RX_JUMBO_PKT_BUF_SZ - tp->rx_offset,
4486 PCI_DMA_FROMDEVICE);
4487 dev_kfree_skb_any(rxp->skb);
4488 rxp->skb = NULL;
4489 }
4490
4491 for (i = 0; i < TG3_TX_RING_SIZE; ) {
4492 struct tx_ring_info *txp;
4493 struct sk_buff *skb;
4494 int j;
4495
4496 txp = &tp->tx_buffers[i];
4497 skb = txp->skb;
4498
4499 if (skb == NULL) {
4500 i++;
4501 continue;
4502 }
4503
4504 pci_unmap_single(tp->pdev,
4505 pci_unmap_addr(txp, mapping),
4506 skb_headlen(skb),
4507 PCI_DMA_TODEVICE);
4508 txp->skb = NULL;
4509
4510 i++;
4511
4512 for (j = 0; j < skb_shinfo(skb)->nr_frags; j++) {
4513 txp = &tp->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
4514 pci_unmap_page(tp->pdev,
4515 pci_unmap_addr(txp, mapping),
4516 skb_shinfo(skb)->frags[j].size,
4517 PCI_DMA_TODEVICE);
4518 i++;
4519 }
4520
4521 dev_kfree_skb_any(skb);
4522 }
4523 }
4524
4525 /* Initialize tx/rx rings for packet processing.
4526 *
4527 * The chip has been shut down and the driver detached from
4528 * the networking, so no interrupts or new tx packets will
4529 * end up in the driver. tp->{tx,}lock are held and thus
4530 * we may not sleep.
4531 */
4532 static int tg3_init_rings(struct tg3 *tp)
4533 {
4534 u32 i;
4535
4536 /* Free up all the SKBs. */
4537 tg3_free_rings(tp);
4538
4539 /* Zero out all descriptors. */
4540 memset(tp->rx_std, 0, TG3_RX_RING_BYTES);
4541 memset(tp->rx_jumbo, 0, TG3_RX_JUMBO_RING_BYTES);
4542 memset(tp->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
4543 memset(tp->tx_ring, 0, TG3_TX_RING_BYTES);
4544
4545 tp->rx_pkt_buf_sz = RX_PKT_BUF_SZ;
4546 if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) &&
4547 (tp->dev->mtu > ETH_DATA_LEN))
4548 tp->rx_pkt_buf_sz = RX_JUMBO_PKT_BUF_SZ;
4549
4550 /* Initialize invariants of the rings, we only set this
4551 * stuff once. This works because the card does not
4552 * write into the rx buffer posting rings.
4553 */
4554 for (i = 0; i < TG3_RX_RING_SIZE; i++) {
4555 struct tg3_rx_buffer_desc *rxd;
4556
4557 rxd = &tp->rx_std[i];
4558 rxd->idx_len = (tp->rx_pkt_buf_sz - tp->rx_offset - 64)
4559 << RXD_LEN_SHIFT;
4560 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
4561 rxd->opaque = (RXD_OPAQUE_RING_STD |
4562 (i << RXD_OPAQUE_INDEX_SHIFT));
4563 }
4564
4565 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
4566 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
4567 struct tg3_rx_buffer_desc *rxd;
4568
4569 rxd = &tp->rx_jumbo[i];
4570 rxd->idx_len = (RX_JUMBO_PKT_BUF_SZ - tp->rx_offset - 64)
4571 << RXD_LEN_SHIFT;
4572 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
4573 RXD_FLAG_JUMBO;
4574 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
4575 (i << RXD_OPAQUE_INDEX_SHIFT));
4576 }
4577 }
4578
4579 /* Now allocate fresh SKBs for each rx ring. */
4580 for (i = 0; i < tp->rx_pending; i++) {
4581 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD, -1, i) < 0) {
4582 printk(KERN_WARNING PFX
4583 "%s: Using a smaller RX standard ring, "
4584 "only %d out of %d buffers were allocated "
4585 "successfully.\n",
4586 tp->dev->name, i, tp->rx_pending);
4587 if (i == 0)
4588 return -ENOMEM;
4589 tp->rx_pending = i;
4590 break;
4591 }
4592 }
4593
4594 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
4595 for (i = 0; i < tp->rx_jumbo_pending; i++) {
4596 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO,
4597 -1, i) < 0) {
4598 printk(KERN_WARNING PFX
4599 "%s: Using a smaller RX jumbo ring, "
4600 "only %d out of %d buffers were "
4601 "allocated successfully.\n",
4602 tp->dev->name, i, tp->rx_jumbo_pending);
4603 if (i == 0) {
4604 tg3_free_rings(tp);
4605 return -ENOMEM;
4606 }
4607 tp->rx_jumbo_pending = i;
4608 break;
4609 }
4610 }
4611 }
4612 return 0;
4613 }
4614
4615 /*
4616 * Must not be invoked with interrupt sources disabled and
4617 * the hardware shutdown down.
4618 */
4619 static void tg3_free_consistent(struct tg3 *tp)
4620 {
4621 kfree(tp->rx_std_buffers);
4622 tp->rx_std_buffers = NULL;
4623 if (tp->rx_std) {
4624 pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES,
4625 tp->rx_std, tp->rx_std_mapping);
4626 tp->rx_std = NULL;
4627 }
4628 if (tp->rx_jumbo) {
4629 pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
4630 tp->rx_jumbo, tp->rx_jumbo_mapping);
4631 tp->rx_jumbo = NULL;
4632 }
4633 if (tp->rx_rcb) {
4634 pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
4635 tp->rx_rcb, tp->rx_rcb_mapping);
4636 tp->rx_rcb = NULL;
4637 }
4638 if (tp->tx_ring) {
4639 pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
4640 tp->tx_ring, tp->tx_desc_mapping);
4641 tp->tx_ring = NULL;
4642 }
4643 if (tp->hw_status) {
4644 pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
4645 tp->hw_status, tp->status_mapping);
4646 tp->hw_status = NULL;
4647 }
4648 if (tp->hw_stats) {
4649 pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats),
4650 tp->hw_stats, tp->stats_mapping);
4651 tp->hw_stats = NULL;
4652 }
4653 }
4654
4655 /*
4656 * Must not be invoked with interrupt sources disabled and
4657 * the hardware shutdown down. Can sleep.
4658 */
4659 static int tg3_alloc_consistent(struct tg3 *tp)
4660 {
4661 tp->rx_std_buffers = kzalloc((sizeof(struct ring_info) *
4662 (TG3_RX_RING_SIZE +
4663 TG3_RX_JUMBO_RING_SIZE)) +
4664 (sizeof(struct tx_ring_info) *
4665 TG3_TX_RING_SIZE),
4666 GFP_KERNEL);
4667 if (!tp->rx_std_buffers)
4668 return -ENOMEM;
4669
4670 tp->rx_jumbo_buffers = &tp->rx_std_buffers[TG3_RX_RING_SIZE];
4671 tp->tx_buffers = (struct tx_ring_info *)
4672 &tp->rx_jumbo_buffers[TG3_RX_JUMBO_RING_SIZE];
4673
4674 tp->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES,
4675 &tp->rx_std_mapping);
4676 if (!tp->rx_std)
4677 goto err_out;
4678
4679 tp->rx_jumbo = pci_alloc_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
4680 &tp->rx_jumbo_mapping);
4681
4682 if (!tp->rx_jumbo)
4683 goto err_out;
4684
4685 tp->rx_rcb = pci_alloc_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
4686 &tp->rx_rcb_mapping);
4687 if (!tp->rx_rcb)
4688 goto err_out;
4689
4690 tp->tx_ring = pci_alloc_consistent(tp->pdev, TG3_TX_RING_BYTES,
4691 &tp->tx_desc_mapping);
4692 if (!tp->tx_ring)
4693 goto err_out;
4694
4695 tp->hw_status = pci_alloc_consistent(tp->pdev,
4696 TG3_HW_STATUS_SIZE,
4697 &tp->status_mapping);
4698 if (!tp->hw_status)
4699 goto err_out;
4700
4701 tp->hw_stats = pci_alloc_consistent(tp->pdev,
4702 sizeof(struct tg3_hw_stats),
4703 &tp->stats_mapping);
4704 if (!tp->hw_stats)
4705 goto err_out;
4706
4707 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
4708 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
4709
4710 return 0;
4711
4712 err_out:
4713 tg3_free_consistent(tp);
4714 return -ENOMEM;
4715 }
4716
4717 #define MAX_WAIT_CNT 1000
4718
4719 /* To stop a block, clear the enable bit and poll till it
4720 * clears. tp->lock is held.
4721 */
4722 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
4723 {
4724 unsigned int i;
4725 u32 val;
4726
4727 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
4728 switch (ofs) {
4729 case RCVLSC_MODE:
4730 case DMAC_MODE:
4731 case MBFREE_MODE:
4732 case BUFMGR_MODE:
4733 case MEMARB_MODE:
4734 /* We can't enable/disable these bits of the
4735 * 5705/5750, just say success.
4736 */
4737 return 0;
4738
4739 default:
4740 break;
4741 };
4742 }
4743
4744 val = tr32(ofs);
4745 val &= ~enable_bit;
4746 tw32_f(ofs, val);
4747
4748 for (i = 0; i < MAX_WAIT_CNT; i++) {
4749 udelay(100);
4750 val = tr32(ofs);
4751 if ((val & enable_bit) == 0)
4752 break;
4753 }
4754
4755 if (i == MAX_WAIT_CNT && !silent) {
4756 printk(KERN_ERR PFX "tg3_stop_block timed out, "
4757 "ofs=%lx enable_bit=%x\n",
4758 ofs, enable_bit);
4759 return -ENODEV;
4760 }
4761
4762 return 0;
4763 }
4764
4765 /* tp->lock is held. */
4766 static int tg3_abort_hw(struct tg3 *tp, int silent)
4767 {
4768 int i, err;
4769
4770 tg3_disable_ints(tp);
4771
4772 tp->rx_mode &= ~RX_MODE_ENABLE;
4773 tw32_f(MAC_RX_MODE, tp->rx_mode);
4774 udelay(10);
4775
4776 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
4777 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
4778 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
4779 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
4780 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
4781 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
4782
4783 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
4784 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
4785 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
4786 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
4787 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
4788 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
4789 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
4790
4791 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
4792 tw32_f(MAC_MODE, tp->mac_mode);
4793 udelay(40);
4794
4795 tp->tx_mode &= ~TX_MODE_ENABLE;
4796 tw32_f(MAC_TX_MODE, tp->tx_mode);
4797
4798 for (i = 0; i < MAX_WAIT_CNT; i++) {
4799 udelay(100);
4800 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
4801 break;
4802 }
4803 if (i >= MAX_WAIT_CNT) {
4804 printk(KERN_ERR PFX "tg3_abort_hw timed out for %s, "
4805 "TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n",
4806 tp->dev->name, tr32(MAC_TX_MODE));
4807 err |= -ENODEV;
4808 }
4809
4810 err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
4811 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
4812 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
4813
4814 tw32(FTQ_RESET, 0xffffffff);
4815 tw32(FTQ_RESET, 0x00000000);
4816
4817 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
4818 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
4819
4820 if (tp->hw_status)
4821 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
4822 if (tp->hw_stats)
4823 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
4824
4825 return err;
4826 }
4827
4828 /* tp->lock is held. */
4829 static int tg3_nvram_lock(struct tg3 *tp)
4830 {
4831 if (tp->tg3_flags & TG3_FLAG_NVRAM) {
4832 int i;
4833
4834 if (tp->nvram_lock_cnt == 0) {
4835 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
4836 for (i = 0; i < 8000; i++) {
4837 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
4838 break;
4839 udelay(20);
4840 }
4841 if (i == 8000) {
4842 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
4843 return -ENODEV;
4844 }
4845 }
4846 tp->nvram_lock_cnt++;
4847 }
4848 return 0;
4849 }
4850
4851 /* tp->lock is held. */
4852 static void tg3_nvram_unlock(struct tg3 *tp)
4853 {
4854 if (tp->tg3_flags & TG3_FLAG_NVRAM) {
4855 if (tp->nvram_lock_cnt > 0)
4856 tp->nvram_lock_cnt--;
4857 if (tp->nvram_lock_cnt == 0)
4858 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
4859 }
4860 }
4861
4862 /* tp->lock is held. */
4863 static void tg3_enable_nvram_access(struct tg3 *tp)
4864 {
4865 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
4866 !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
4867 u32 nvaccess = tr32(NVRAM_ACCESS);
4868
4869 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
4870 }
4871 }
4872
4873 /* tp->lock is held. */
4874 static void tg3_disable_nvram_access(struct tg3 *tp)
4875 {
4876 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
4877 !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
4878 u32 nvaccess = tr32(NVRAM_ACCESS);
4879
4880 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
4881 }
4882 }
4883
4884 static void tg3_ape_send_event(struct tg3 *tp, u32 event)
4885 {
4886 int i;
4887 u32 apedata;
4888
4889 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
4890 if (apedata != APE_SEG_SIG_MAGIC)
4891 return;
4892
4893 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
4894 if (apedata != APE_FW_STATUS_READY)
4895 return;
4896
4897 /* Wait for up to 1 millisecond for APE to service previous event. */
4898 for (i = 0; i < 10; i++) {
4899 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
4900 return;
4901
4902 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
4903
4904 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
4905 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
4906 event | APE_EVENT_STATUS_EVENT_PENDING);
4907
4908 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
4909
4910 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
4911 break;
4912
4913 udelay(100);
4914 }
4915
4916 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
4917 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
4918 }
4919
4920 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
4921 {
4922 u32 event;
4923 u32 apedata;
4924
4925 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
4926 return;
4927
4928 switch (kind) {
4929 case RESET_KIND_INIT:
4930 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
4931 APE_HOST_SEG_SIG_MAGIC);
4932 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
4933 APE_HOST_SEG_LEN_MAGIC);
4934 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
4935 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
4936 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
4937 APE_HOST_DRIVER_ID_MAGIC);
4938 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
4939 APE_HOST_BEHAV_NO_PHYLOCK);
4940
4941 event = APE_EVENT_STATUS_STATE_START;
4942 break;
4943 case RESET_KIND_SHUTDOWN:
4944 event = APE_EVENT_STATUS_STATE_UNLOAD;
4945 break;
4946 case RESET_KIND_SUSPEND:
4947 event = APE_EVENT_STATUS_STATE_SUSPEND;
4948 break;
4949 default:
4950 return;
4951 }
4952
4953 event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
4954
4955 tg3_ape_send_event(tp, event);
4956 }
4957
4958 /* tp->lock is held. */
4959 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
4960 {
4961 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
4962 NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
4963
4964 if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
4965 switch (kind) {
4966 case RESET_KIND_INIT:
4967 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4968 DRV_STATE_START);
4969 break;
4970
4971 case RESET_KIND_SHUTDOWN:
4972 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4973 DRV_STATE_UNLOAD);
4974 break;
4975
4976 case RESET_KIND_SUSPEND:
4977 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4978 DRV_STATE_SUSPEND);
4979 break;
4980
4981 default:
4982 break;
4983 };
4984 }
4985
4986 if (kind == RESET_KIND_INIT ||
4987 kind == RESET_KIND_SUSPEND)
4988 tg3_ape_driver_state_change(tp, kind);
4989 }
4990
4991 /* tp->lock is held. */
4992 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
4993 {
4994 if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
4995 switch (kind) {
4996 case RESET_KIND_INIT:
4997 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4998 DRV_STATE_START_DONE);
4999 break;
5000
5001 case RESET_KIND_SHUTDOWN:
5002 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5003 DRV_STATE_UNLOAD_DONE);
5004 break;
5005
5006 default:
5007 break;
5008 };
5009 }
5010
5011 if (kind == RESET_KIND_SHUTDOWN)
5012 tg3_ape_driver_state_change(tp, kind);
5013 }
5014
5015 /* tp->lock is held. */
5016 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
5017 {
5018 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
5019 switch (kind) {
5020 case RESET_KIND_INIT:
5021 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5022 DRV_STATE_START);
5023 break;
5024
5025 case RESET_KIND_SHUTDOWN:
5026 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5027 DRV_STATE_UNLOAD);
5028 break;
5029
5030 case RESET_KIND_SUSPEND:
5031 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5032 DRV_STATE_SUSPEND);
5033 break;
5034
5035 default:
5036 break;
5037 };
5038 }
5039 }
5040
5041 static int tg3_poll_fw(struct tg3 *tp)
5042 {
5043 int i;
5044 u32 val;
5045
5046 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
5047 /* Wait up to 20ms for init done. */
5048 for (i = 0; i < 200; i++) {
5049 if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
5050 return 0;
5051 udelay(100);
5052 }
5053 return -ENODEV;
5054 }
5055
5056 /* Wait for firmware initialization to complete. */
5057 for (i = 0; i < 100000; i++) {
5058 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
5059 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
5060 break;
5061 udelay(10);
5062 }
5063
5064 /* Chip might not be fitted with firmware. Some Sun onboard
5065 * parts are configured like that. So don't signal the timeout
5066 * of the above loop as an error, but do report the lack of
5067 * running firmware once.
5068 */
5069 if (i >= 100000 &&
5070 !(tp->tg3_flags2 & TG3_FLG2_NO_FWARE_REPORTED)) {
5071 tp->tg3_flags2 |= TG3_FLG2_NO_FWARE_REPORTED;
5072
5073 printk(KERN_INFO PFX "%s: No firmware running.\n",
5074 tp->dev->name);
5075 }
5076
5077 return 0;
5078 }
5079
5080 /* Save PCI command register before chip reset */
5081 static void tg3_save_pci_state(struct tg3 *tp)
5082 {
5083 pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
5084 }
5085
5086 /* Restore PCI state after chip reset */
5087 static void tg3_restore_pci_state(struct tg3 *tp)
5088 {
5089 u32 val;
5090
5091 /* Re-enable indirect register accesses. */
5092 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
5093 tp->misc_host_ctrl);
5094
5095 /* Set MAX PCI retry to zero. */
5096 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
5097 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
5098 (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
5099 val |= PCISTATE_RETRY_SAME_DMA;
5100 /* Allow reads and writes to the APE register and memory space. */
5101 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
5102 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
5103 PCISTATE_ALLOW_APE_SHMEM_WR;
5104 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
5105
5106 pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
5107
5108 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
5109 pcie_set_readrq(tp->pdev, 4096);
5110 else {
5111 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
5112 tp->pci_cacheline_sz);
5113 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
5114 tp->pci_lat_timer);
5115 }
5116
5117 /* Make sure PCI-X relaxed ordering bit is clear. */
5118 if (tp->pcix_cap) {
5119 u16 pcix_cmd;
5120
5121 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
5122 &pcix_cmd);
5123 pcix_cmd &= ~PCI_X_CMD_ERO;
5124 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
5125 pcix_cmd);
5126 }
5127
5128 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
5129
5130 /* Chip reset on 5780 will reset MSI enable bit,
5131 * so need to restore it.
5132 */
5133 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
5134 u16 ctrl;
5135
5136 pci_read_config_word(tp->pdev,
5137 tp->msi_cap + PCI_MSI_FLAGS,
5138 &ctrl);
5139 pci_write_config_word(tp->pdev,
5140 tp->msi_cap + PCI_MSI_FLAGS,
5141 ctrl | PCI_MSI_FLAGS_ENABLE);
5142 val = tr32(MSGINT_MODE);
5143 tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
5144 }
5145 }
5146 }
5147
5148 static void tg3_stop_fw(struct tg3 *);
5149
5150 /* tp->lock is held. */
5151 static int tg3_chip_reset(struct tg3 *tp)
5152 {
5153 u32 val;
5154 void (*write_op)(struct tg3 *, u32, u32);
5155 int err;
5156
5157 tg3_nvram_lock(tp);
5158
5159 /* No matching tg3_nvram_unlock() after this because
5160 * chip reset below will undo the nvram lock.
5161 */
5162 tp->nvram_lock_cnt = 0;
5163
5164 /* GRC_MISC_CFG core clock reset will clear the memory
5165 * enable bit in PCI register 4 and the MSI enable bit
5166 * on some chips, so we save relevant registers here.
5167 */
5168 tg3_save_pci_state(tp);
5169
5170 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
5171 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
5172 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
5173 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
5174 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
5175 tw32(GRC_FASTBOOT_PC, 0);
5176
5177 /*
5178 * We must avoid the readl() that normally takes place.
5179 * It locks machines, causes machine checks, and other
5180 * fun things. So, temporarily disable the 5701
5181 * hardware workaround, while we do the reset.
5182 */
5183 write_op = tp->write32;
5184 if (write_op == tg3_write_flush_reg32)
5185 tp->write32 = tg3_write32;
5186
5187 /* Prevent the irq handler from reading or writing PCI registers
5188 * during chip reset when the memory enable bit in the PCI command
5189 * register may be cleared. The chip does not generate interrupt
5190 * at this time, but the irq handler may still be called due to irq
5191 * sharing or irqpoll.
5192 */
5193 tp->tg3_flags |= TG3_FLAG_CHIP_RESETTING;
5194 if (tp->hw_status) {
5195 tp->hw_status->status = 0;
5196 tp->hw_status->status_tag = 0;
5197 }
5198 tp->last_tag = 0;
5199 smp_mb();
5200 synchronize_irq(tp->pdev->irq);
5201
5202 /* do the reset */
5203 val = GRC_MISC_CFG_CORECLK_RESET;
5204
5205 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
5206 if (tr32(0x7e2c) == 0x60) {
5207 tw32(0x7e2c, 0x20);
5208 }
5209 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
5210 tw32(GRC_MISC_CFG, (1 << 29));
5211 val |= (1 << 29);
5212 }
5213 }
5214
5215 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
5216 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
5217 tw32(GRC_VCPU_EXT_CTRL,
5218 tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
5219 }
5220
5221 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
5222 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
5223 tw32(GRC_MISC_CFG, val);
5224
5225 /* restore 5701 hardware bug workaround write method */
5226 tp->write32 = write_op;
5227
5228 /* Unfortunately, we have to delay before the PCI read back.
5229 * Some 575X chips even will not respond to a PCI cfg access
5230 * when the reset command is given to the chip.
5231 *
5232 * How do these hardware designers expect things to work
5233 * properly if the PCI write is posted for a long period
5234 * of time? It is always necessary to have some method by
5235 * which a register read back can occur to push the write
5236 * out which does the reset.
5237 *
5238 * For most tg3 variants the trick below was working.
5239 * Ho hum...
5240 */
5241 udelay(120);
5242
5243 /* Flush PCI posted writes. The normal MMIO registers
5244 * are inaccessible at this time so this is the only
5245 * way to make this reliably (actually, this is no longer
5246 * the case, see above). I tried to use indirect
5247 * register read/write but this upset some 5701 variants.
5248 */
5249 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
5250
5251 udelay(120);
5252
5253 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
5254 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
5255 int i;
5256 u32 cfg_val;
5257
5258 /* Wait for link training to complete. */
5259 for (i = 0; i < 5000; i++)
5260 udelay(100);
5261
5262 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
5263 pci_write_config_dword(tp->pdev, 0xc4,
5264 cfg_val | (1 << 15));
5265 }
5266 /* Set PCIE max payload size and clear error status. */
5267 pci_write_config_dword(tp->pdev, 0xd8, 0xf5000);
5268 }
5269
5270 tg3_restore_pci_state(tp);
5271
5272 tp->tg3_flags &= ~TG3_FLAG_CHIP_RESETTING;
5273
5274 val = 0;
5275 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
5276 val = tr32(MEMARB_MODE);
5277 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
5278
5279 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
5280 tg3_stop_fw(tp);
5281 tw32(0x5000, 0x400);
5282 }
5283
5284 tw32(GRC_MODE, tp->grc_mode);
5285
5286 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
5287 val = tr32(0xc4);
5288
5289 tw32(0xc4, val | (1 << 15));
5290 }
5291
5292 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
5293 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
5294 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
5295 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
5296 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
5297 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
5298 }
5299
5300 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
5301 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
5302 tw32_f(MAC_MODE, tp->mac_mode);
5303 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
5304 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
5305 tw32_f(MAC_MODE, tp->mac_mode);
5306 } else
5307 tw32_f(MAC_MODE, 0);
5308 udelay(40);
5309
5310 err = tg3_poll_fw(tp);
5311 if (err)
5312 return err;
5313
5314 if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
5315 tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
5316 val = tr32(0x7c00);
5317
5318 tw32(0x7c00, val | (1 << 25));
5319 }
5320
5321 /* Reprobe ASF enable state. */
5322 tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF;
5323 tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE;
5324 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
5325 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
5326 u32 nic_cfg;
5327
5328 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
5329 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
5330 tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
5331 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
5332 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
5333 }
5334 }
5335
5336 return 0;
5337 }
5338
5339 /* tp->lock is held. */
5340 static void tg3_stop_fw(struct tg3 *tp)
5341 {
5342 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
5343 !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
5344 u32 val;
5345 int i;
5346
5347 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
5348 val = tr32(GRC_RX_CPU_EVENT);
5349 val |= (1 << 14);
5350 tw32(GRC_RX_CPU_EVENT, val);
5351
5352 /* Wait for RX cpu to ACK the event. */
5353 for (i = 0; i < 100; i++) {
5354 if (!(tr32(GRC_RX_CPU_EVENT) & (1 << 14)))
5355 break;
5356 udelay(1);
5357 }
5358 }
5359 }
5360
5361 /* tp->lock is held. */
5362 static int tg3_halt(struct tg3 *tp, int kind, int silent)
5363 {
5364 int err;
5365
5366 tg3_stop_fw(tp);
5367
5368 tg3_write_sig_pre_reset(tp, kind);
5369
5370 tg3_abort_hw(tp, silent);
5371 err = tg3_chip_reset(tp);
5372
5373 tg3_write_sig_legacy(tp, kind);
5374 tg3_write_sig_post_reset(tp, kind);
5375
5376 if (err)
5377 return err;
5378
5379 return 0;
5380 }
5381
5382 #define TG3_FW_RELEASE_MAJOR 0x0
5383 #define TG3_FW_RELASE_MINOR 0x0
5384 #define TG3_FW_RELEASE_FIX 0x0
5385 #define TG3_FW_START_ADDR 0x08000000
5386 #define TG3_FW_TEXT_ADDR 0x08000000
5387 #define TG3_FW_TEXT_LEN 0x9c0
5388 #define TG3_FW_RODATA_ADDR 0x080009c0
5389 #define TG3_FW_RODATA_LEN 0x60
5390 #define TG3_FW_DATA_ADDR 0x08000a40
5391 #define TG3_FW_DATA_LEN 0x20
5392 #define TG3_FW_SBSS_ADDR 0x08000a60
5393 #define TG3_FW_SBSS_LEN 0xc
5394 #define TG3_FW_BSS_ADDR 0x08000a70
5395 #define TG3_FW_BSS_LEN 0x10
5396
5397 static const u32 tg3FwText[(TG3_FW_TEXT_LEN / sizeof(u32)) + 1] = {
5398 0x00000000, 0x10000003, 0x00000000, 0x0000000d, 0x0000000d, 0x3c1d0800,
5399 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100000, 0x0e000018, 0x00000000,
5400 0x0000000d, 0x3c1d0800, 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100034,
5401 0x0e00021c, 0x00000000, 0x0000000d, 0x00000000, 0x00000000, 0x00000000,
5402 0x27bdffe0, 0x3c1cc000, 0xafbf0018, 0xaf80680c, 0x0e00004c, 0x241b2105,
5403 0x97850000, 0x97870002, 0x9782002c, 0x9783002e, 0x3c040800, 0x248409c0,
5404 0xafa00014, 0x00021400, 0x00621825, 0x00052c00, 0xafa30010, 0x8f860010,
5405 0x00e52825, 0x0e000060, 0x24070102, 0x3c02ac00, 0x34420100, 0x3c03ac01,
5406 0x34630100, 0xaf820490, 0x3c02ffff, 0xaf820494, 0xaf830498, 0xaf82049c,
5407 0x24020001, 0xaf825ce0, 0x0e00003f, 0xaf825d00, 0x0e000140, 0x00000000,
5408 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x2402ffff, 0xaf825404, 0x8f835400,
5409 0x34630400, 0xaf835400, 0xaf825404, 0x3c020800, 0x24420034, 0xaf82541c,
5410 0x03e00008, 0xaf805400, 0x00000000, 0x00000000, 0x3c020800, 0x34423000,
5411 0x3c030800, 0x34633000, 0x3c040800, 0x348437ff, 0x3c010800, 0xac220a64,
5412 0x24020040, 0x3c010800, 0xac220a68, 0x3c010800, 0xac200a60, 0xac600000,
5413 0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
5414 0x00804821, 0x8faa0010, 0x3c020800, 0x8c420a60, 0x3c040800, 0x8c840a68,
5415 0x8fab0014, 0x24430001, 0x0044102b, 0x3c010800, 0xac230a60, 0x14400003,
5416 0x00004021, 0x3c010800, 0xac200a60, 0x3c020800, 0x8c420a60, 0x3c030800,
5417 0x8c630a64, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
5418 0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020800, 0x8c420a60,
5419 0x3c030800, 0x8c630a64, 0x8f84680c, 0x00021140, 0x00431021, 0xac440008,
5420 0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
5421 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5422 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5423 0, 0, 0, 0, 0, 0,
5424 0x02000008, 0x00000000, 0x0a0001e3, 0x3c0a0001, 0x0a0001e3, 0x3c0a0002,
5425 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5426 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5427 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5428 0x0a0001e3, 0x3c0a0007, 0x0a0001e3, 0x3c0a0008, 0x0a0001e3, 0x3c0a0009,
5429 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000b,
5430 0x0a0001e3, 0x3c0a000c, 0x0a0001e3, 0x3c0a000d, 0x0a0001e3, 0x00000000,
5431 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000e, 0x0a0001e3, 0x00000000,
5432 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5433 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5434 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a0013, 0x0a0001e3, 0x3c0a0014,
5435 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5436 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5437 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5438 0x27bdffe0, 0x00001821, 0x00001021, 0xafbf0018, 0xafb10014, 0xafb00010,
5439 0x3c010800, 0x00220821, 0xac200a70, 0x3c010800, 0x00220821, 0xac200a74,
5440 0x3c010800, 0x00220821, 0xac200a78, 0x24630001, 0x1860fff5, 0x2442000c,
5441 0x24110001, 0x8f906810, 0x32020004, 0x14400005, 0x24040001, 0x3c020800,
5442 0x8c420a78, 0x18400003, 0x00002021, 0x0e000182, 0x00000000, 0x32020001,
5443 0x10400003, 0x00000000, 0x0e000169, 0x00000000, 0x0a000153, 0xaf915028,
5444 0x8fbf0018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020, 0x3c050800,
5445 0x8ca50a70, 0x3c060800, 0x8cc60a80, 0x3c070800, 0x8ce70a78, 0x27bdffe0,
5446 0x3c040800, 0x248409d0, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014,
5447 0x0e00017b, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x24020001,
5448 0x8f836810, 0x00821004, 0x00021027, 0x00621824, 0x03e00008, 0xaf836810,
5449 0x27bdffd8, 0xafbf0024, 0x1080002e, 0xafb00020, 0x8f825cec, 0xafa20018,
5450 0x8f825cec, 0x3c100800, 0x26100a78, 0xafa2001c, 0x34028000, 0xaf825cec,
5451 0x8e020000, 0x18400016, 0x00000000, 0x3c020800, 0x94420a74, 0x8fa3001c,
5452 0x000221c0, 0xac830004, 0x8fa2001c, 0x3c010800, 0x0e000201, 0xac220a74,
5453 0x10400005, 0x00000000, 0x8e020000, 0x24420001, 0x0a0001df, 0xae020000,
5454 0x3c020800, 0x8c420a70, 0x00021c02, 0x000321c0, 0x0a0001c5, 0xafa2001c,
5455 0x0e000201, 0x00000000, 0x1040001f, 0x00000000, 0x8e020000, 0x8fa3001c,
5456 0x24420001, 0x3c010800, 0xac230a70, 0x3c010800, 0xac230a74, 0x0a0001df,
5457 0xae020000, 0x3c100800, 0x26100a78, 0x8e020000, 0x18400028, 0x00000000,
5458 0x0e000201, 0x00000000, 0x14400024, 0x00000000, 0x8e020000, 0x3c030800,
5459 0x8c630a70, 0x2442ffff, 0xafa3001c, 0x18400006, 0xae020000, 0x00031402,
5460 0x000221c0, 0x8c820004, 0x3c010800, 0xac220a70, 0x97a2001e, 0x2442ff00,
5461 0x2c420300, 0x1440000b, 0x24024000, 0x3c040800, 0x248409dc, 0xafa00010,
5462 0xafa00014, 0x8fa6001c, 0x24050008, 0x0e000060, 0x00003821, 0x0a0001df,
5463 0x00000000, 0xaf825cf8, 0x3c020800, 0x8c420a40, 0x8fa3001c, 0x24420001,
5464 0xaf835cf8, 0x3c010800, 0xac220a40, 0x8fbf0024, 0x8fb00020, 0x03e00008,
5465 0x27bd0028, 0x27bdffe0, 0x3c040800, 0x248409e8, 0x00002821, 0x00003021,
5466 0x00003821, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x8fbf0018,
5467 0x03e00008, 0x27bd0020, 0x8f82680c, 0x8f85680c, 0x00021827, 0x0003182b,
5468 0x00031823, 0x00431024, 0x00441021, 0x00a2282b, 0x10a00006, 0x00000000,
5469 0x00401821, 0x8f82680c, 0x0043102b, 0x1440fffd, 0x00000000, 0x03e00008,
5470 0x00000000, 0x3c040800, 0x8c840000, 0x3c030800, 0x8c630a40, 0x0064102b,
5471 0x54400002, 0x00831023, 0x00641023, 0x2c420008, 0x03e00008, 0x38420001,
5472 0x27bdffe0, 0x00802821, 0x3c040800, 0x24840a00, 0x00003021, 0x00003821,
5473 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x0a000216, 0x00000000,
5474 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000, 0x27bdffe0, 0x3c1cc000,
5475 0xafbf0018, 0x0e00004c, 0xaf80680c, 0x3c040800, 0x24840a10, 0x03802821,
5476 0x00003021, 0x00003821, 0xafa00010, 0x0e000060, 0xafa00014, 0x2402ffff,
5477 0xaf825404, 0x3c0200aa, 0x0e000234, 0xaf825434, 0x8fbf0018, 0x03e00008,
5478 0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe8, 0xafb00010,
5479 0x24100001, 0xafbf0014, 0x3c01c003, 0xac200000, 0x8f826810, 0x30422000,
5480 0x10400003, 0x00000000, 0x0e000246, 0x00000000, 0x0a00023a, 0xaf905428,
5481 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x27bdfff8, 0x8f845d0c,
5482 0x3c0200ff, 0x3c030800, 0x8c630a50, 0x3442fff8, 0x00821024, 0x1043001e,
5483 0x3c0500ff, 0x34a5fff8, 0x3c06c003, 0x3c074000, 0x00851824, 0x8c620010,
5484 0x3c010800, 0xac230a50, 0x30420008, 0x10400005, 0x00871025, 0x8cc20000,
5485 0x24420001, 0xacc20000, 0x00871025, 0xaf825d0c, 0x8fa20000, 0x24420001,
5486 0xafa20000, 0x8fa20000, 0x8fa20000, 0x24420001, 0xafa20000, 0x8fa20000,
5487 0x8f845d0c, 0x3c030800, 0x8c630a50, 0x00851024, 0x1443ffe8, 0x00851824,
5488 0x27bd0008, 0x03e00008, 0x00000000, 0x00000000, 0x00000000
5489 };
5490
5491 static const u32 tg3FwRodata[(TG3_FW_RODATA_LEN / sizeof(u32)) + 1] = {
5492 0x35373031, 0x726c7341, 0x00000000, 0x00000000, 0x53774576, 0x656e7430,
5493 0x00000000, 0x726c7045, 0x76656e74, 0x31000000, 0x556e6b6e, 0x45766e74,
5494 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
5495 0x00000000, 0x00000000, 0x4d61696e, 0x43707542, 0x00000000, 0x00000000,
5496 0x00000000
5497 };
5498
5499 #if 0 /* All zeros, don't eat up space with it. */
5500 u32 tg3FwData[(TG3_FW_DATA_LEN / sizeof(u32)) + 1] = {
5501 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
5502 0x00000000, 0x00000000, 0x00000000, 0x00000000
5503 };
5504 #endif
5505
5506 #define RX_CPU_SCRATCH_BASE 0x30000
5507 #define RX_CPU_SCRATCH_SIZE 0x04000
5508 #define TX_CPU_SCRATCH_BASE 0x34000
5509 #define TX_CPU_SCRATCH_SIZE 0x04000
5510
5511 /* tp->lock is held. */
5512 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
5513 {
5514 int i;
5515
5516 BUG_ON(offset == TX_CPU_BASE &&
5517 (tp->tg3_flags2 & TG3_FLG2_5705_PLUS));
5518
5519 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
5520 u32 val = tr32(GRC_VCPU_EXT_CTRL);
5521
5522 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
5523 return 0;
5524 }
5525 if (offset == RX_CPU_BASE) {
5526 for (i = 0; i < 10000; i++) {
5527 tw32(offset + CPU_STATE, 0xffffffff);
5528 tw32(offset + CPU_MODE, CPU_MODE_HALT);
5529 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
5530 break;
5531 }
5532
5533 tw32(offset + CPU_STATE, 0xffffffff);
5534 tw32_f(offset + CPU_MODE, CPU_MODE_HALT);
5535 udelay(10);
5536 } else {
5537 for (i = 0; i < 10000; i++) {
5538 tw32(offset + CPU_STATE, 0xffffffff);
5539 tw32(offset + CPU_MODE, CPU_MODE_HALT);
5540 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
5541 break;
5542 }
5543 }
5544
5545 if (i >= 10000) {
5546 printk(KERN_ERR PFX "tg3_reset_cpu timed out for %s, "
5547 "and %s CPU\n",
5548 tp->dev->name,
5549 (offset == RX_CPU_BASE ? "RX" : "TX"));
5550 return -ENODEV;
5551 }
5552
5553 /* Clear firmware's nvram arbitration. */
5554 if (tp->tg3_flags & TG3_FLAG_NVRAM)
5555 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
5556 return 0;
5557 }
5558
5559 struct fw_info {
5560 unsigned int text_base;
5561 unsigned int text_len;
5562 const u32 *text_data;
5563 unsigned int rodata_base;
5564 unsigned int rodata_len;
5565 const u32 *rodata_data;
5566 unsigned int data_base;
5567 unsigned int data_len;
5568 const u32 *data_data;
5569 };
5570
5571 /* tp->lock is held. */
5572 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
5573 int cpu_scratch_size, struct fw_info *info)
5574 {
5575 int err, lock_err, i;
5576 void (*write_op)(struct tg3 *, u32, u32);
5577
5578 if (cpu_base == TX_CPU_BASE &&
5579 (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5580 printk(KERN_ERR PFX "tg3_load_firmware_cpu: Trying to load "
5581 "TX cpu firmware on %s which is 5705.\n",
5582 tp->dev->name);
5583 return -EINVAL;
5584 }
5585
5586 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
5587 write_op = tg3_write_mem;
5588 else
5589 write_op = tg3_write_indirect_reg32;
5590
5591 /* It is possible that bootcode is still loading at this point.
5592 * Get the nvram lock first before halting the cpu.
5593 */
5594 lock_err = tg3_nvram_lock(tp);
5595 err = tg3_halt_cpu(tp, cpu_base);
5596 if (!lock_err)
5597 tg3_nvram_unlock(tp);
5598 if (err)
5599 goto out;
5600
5601 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
5602 write_op(tp, cpu_scratch_base + i, 0);
5603 tw32(cpu_base + CPU_STATE, 0xffffffff);
5604 tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
5605 for (i = 0; i < (info->text_len / sizeof(u32)); i++)
5606 write_op(tp, (cpu_scratch_base +
5607 (info->text_base & 0xffff) +
5608 (i * sizeof(u32))),
5609 (info->text_data ?
5610 info->text_data[i] : 0));
5611 for (i = 0; i < (info->rodata_len / sizeof(u32)); i++)
5612 write_op(tp, (cpu_scratch_base +
5613 (info->rodata_base & 0xffff) +
5614 (i * sizeof(u32))),
5615 (info->rodata_data ?
5616 info->rodata_data[i] : 0));
5617 for (i = 0; i < (info->data_len / sizeof(u32)); i++)
5618 write_op(tp, (cpu_scratch_base +
5619 (info->data_base & 0xffff) +
5620 (i * sizeof(u32))),
5621 (info->data_data ?
5622 info->data_data[i] : 0));
5623
5624 err = 0;
5625
5626 out:
5627 return err;
5628 }
5629
5630 /* tp->lock is held. */
5631 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
5632 {
5633 struct fw_info info;
5634 int err, i;
5635
5636 info.text_base = TG3_FW_TEXT_ADDR;
5637 info.text_len = TG3_FW_TEXT_LEN;
5638 info.text_data = &tg3FwText[0];
5639 info.rodata_base = TG3_FW_RODATA_ADDR;
5640 info.rodata_len = TG3_FW_RODATA_LEN;
5641 info.rodata_data = &tg3FwRodata[0];
5642 info.data_base = TG3_FW_DATA_ADDR;
5643 info.data_len = TG3_FW_DATA_LEN;
5644 info.data_data = NULL;
5645
5646 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
5647 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
5648 &info);
5649 if (err)
5650 return err;
5651
5652 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
5653 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
5654 &info);
5655 if (err)
5656 return err;
5657
5658 /* Now startup only the RX cpu. */
5659 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5660 tw32_f(RX_CPU_BASE + CPU_PC, TG3_FW_TEXT_ADDR);
5661
5662 for (i = 0; i < 5; i++) {
5663 if (tr32(RX_CPU_BASE + CPU_PC) == TG3_FW_TEXT_ADDR)
5664 break;
5665 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5666 tw32(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
5667 tw32_f(RX_CPU_BASE + CPU_PC, TG3_FW_TEXT_ADDR);
5668 udelay(1000);
5669 }
5670 if (i >= 5) {
5671 printk(KERN_ERR PFX "tg3_load_firmware fails for %s "
5672 "to set RX CPU PC, is %08x should be %08x\n",
5673 tp->dev->name, tr32(RX_CPU_BASE + CPU_PC),
5674 TG3_FW_TEXT_ADDR);
5675 return -ENODEV;
5676 }
5677 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5678 tw32_f(RX_CPU_BASE + CPU_MODE, 0x00000000);
5679
5680 return 0;
5681 }
5682
5683
5684 #define TG3_TSO_FW_RELEASE_MAJOR 0x1
5685 #define TG3_TSO_FW_RELASE_MINOR 0x6
5686 #define TG3_TSO_FW_RELEASE_FIX 0x0
5687 #define TG3_TSO_FW_START_ADDR 0x08000000
5688 #define TG3_TSO_FW_TEXT_ADDR 0x08000000
5689 #define TG3_TSO_FW_TEXT_LEN 0x1aa0
5690 #define TG3_TSO_FW_RODATA_ADDR 0x08001aa0
5691 #define TG3_TSO_FW_RODATA_LEN 0x60
5692 #define TG3_TSO_FW_DATA_ADDR 0x08001b20
5693 #define TG3_TSO_FW_DATA_LEN 0x30
5694 #define TG3_TSO_FW_SBSS_ADDR 0x08001b50
5695 #define TG3_TSO_FW_SBSS_LEN 0x2c
5696 #define TG3_TSO_FW_BSS_ADDR 0x08001b80
5697 #define TG3_TSO_FW_BSS_LEN 0x894
5698
5699 static const u32 tg3TsoFwText[(TG3_TSO_FW_TEXT_LEN / 4) + 1] = {
5700 0x0e000003, 0x00000000, 0x08001b24, 0x00000000, 0x10000003, 0x00000000,
5701 0x0000000d, 0x0000000d, 0x3c1d0800, 0x37bd4000, 0x03a0f021, 0x3c100800,
5702 0x26100000, 0x0e000010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
5703 0xafbf0018, 0x0e0005d8, 0x34840002, 0x0e000668, 0x00000000, 0x3c030800,
5704 0x90631b68, 0x24020002, 0x3c040800, 0x24841aac, 0x14620003, 0x24050001,
5705 0x3c040800, 0x24841aa0, 0x24060006, 0x00003821, 0xafa00010, 0x0e00067c,
5706 0xafa00014, 0x8f625c50, 0x34420001, 0xaf625c50, 0x8f625c90, 0x34420001,
5707 0xaf625c90, 0x2402ffff, 0x0e000034, 0xaf625404, 0x8fbf0018, 0x03e00008,
5708 0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c,
5709 0xafb20018, 0xafb10014, 0x0e00005b, 0xafb00010, 0x24120002, 0x24110001,
5710 0x8f706820, 0x32020100, 0x10400003, 0x00000000, 0x0e0000bb, 0x00000000,
5711 0x8f706820, 0x32022000, 0x10400004, 0x32020001, 0x0e0001f0, 0x24040001,
5712 0x32020001, 0x10400003, 0x00000000, 0x0e0000a3, 0x00000000, 0x3c020800,
5713 0x90421b98, 0x14520003, 0x00000000, 0x0e0004c0, 0x00000000, 0x0a00003c,
5714 0xaf715028, 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008,
5715 0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ac0, 0x00002821, 0x00003021,
5716 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x3c040800,
5717 0x248423d8, 0xa4800000, 0x3c010800, 0xa0201b98, 0x3c010800, 0xac201b9c,
5718 0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
5719 0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bbc, 0x8f624434, 0x3c010800,
5720 0xac221b88, 0x8f624438, 0x3c010800, 0xac221b8c, 0x8f624410, 0xac80f7a8,
5721 0x3c010800, 0xac201b84, 0x3c010800, 0xac2023e0, 0x3c010800, 0xac2023c8,
5722 0x3c010800, 0xac2023cc, 0x3c010800, 0xac202400, 0x3c010800, 0xac221b90,
5723 0x8f620068, 0x24030007, 0x00021702, 0x10430005, 0x00000000, 0x8f620068,
5724 0x00021702, 0x14400004, 0x24020001, 0x3c010800, 0x0a000097, 0xac20240c,
5725 0xac820034, 0x3c040800, 0x24841acc, 0x3c050800, 0x8ca5240c, 0x00003021,
5726 0x00003821, 0xafa00010, 0x0e00067c, 0xafa00014, 0x8fbf0018, 0x03e00008,
5727 0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ad8, 0x00002821, 0x00003021,
5728 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x0e00005b,
5729 0x00000000, 0x0e0000b4, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5730 0x24020001, 0x8f636820, 0x00821004, 0x00021027, 0x00621824, 0x03e00008,
5731 0xaf636820, 0x27bdffd0, 0xafbf002c, 0xafb60028, 0xafb50024, 0xafb40020,
5732 0xafb3001c, 0xafb20018, 0xafb10014, 0xafb00010, 0x8f675c5c, 0x3c030800,
5733 0x24631bbc, 0x8c620000, 0x14470005, 0x3c0200ff, 0x3c020800, 0x90421b98,
5734 0x14400119, 0x3c0200ff, 0x3442fff8, 0x00e28824, 0xac670000, 0x00111902,
5735 0x306300ff, 0x30e20003, 0x000211c0, 0x00622825, 0x00a04021, 0x00071602,
5736 0x3c030800, 0x90631b98, 0x3044000f, 0x14600036, 0x00804821, 0x24020001,
5737 0x3c010800, 0xa0221b98, 0x00051100, 0x00821025, 0x3c010800, 0xac201b9c,
5738 0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
5739 0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bb0, 0x3c010800, 0xac201bb4,
5740 0x3c010800, 0xa42223d8, 0x9622000c, 0x30437fff, 0x3c010800, 0xa4222410,
5741 0x30428000, 0x3c010800, 0xa4231bc6, 0x10400005, 0x24020001, 0x3c010800,
5742 0xac2223f4, 0x0a000102, 0x2406003e, 0x24060036, 0x3c010800, 0xac2023f4,
5743 0x9622000a, 0x3c030800, 0x94631bc6, 0x3c010800, 0xac2023f0, 0x3c010800,
5744 0xac2023f8, 0x00021302, 0x00021080, 0x00c21021, 0x00621821, 0x3c010800,
5745 0xa42223d0, 0x3c010800, 0x0a000115, 0xa4231b96, 0x9622000c, 0x3c010800,
5746 0xa42223ec, 0x3c040800, 0x24841b9c, 0x8c820000, 0x00021100, 0x3c010800,
5747 0x00220821, 0xac311bc8, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
5748 0xac271bcc, 0x8c820000, 0x25030001, 0x306601ff, 0x00021100, 0x3c010800,
5749 0x00220821, 0xac261bd0, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
5750 0xac291bd4, 0x96230008, 0x3c020800, 0x8c421bac, 0x00432821, 0x3c010800,
5751 0xac251bac, 0x9622000a, 0x30420004, 0x14400018, 0x00061100, 0x8f630c14,
5752 0x3063000f, 0x2c620002, 0x1440000b, 0x3c02c000, 0x8f630c14, 0x3c020800,
5753 0x8c421b40, 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002,
5754 0x1040fff7, 0x3c02c000, 0x00e21825, 0xaf635c5c, 0x8f625c50, 0x30420002,
5755 0x10400014, 0x00000000, 0x0a000147, 0x00000000, 0x3c030800, 0x8c631b80,
5756 0x3c040800, 0x94841b94, 0x01221025, 0x3c010800, 0xa42223da, 0x24020001,
5757 0x3c010800, 0xac221bb8, 0x24630001, 0x0085202a, 0x3c010800, 0x10800003,
5758 0xac231b80, 0x3c010800, 0xa4251b94, 0x3c060800, 0x24c61b9c, 0x8cc20000,
5759 0x24420001, 0xacc20000, 0x28420080, 0x14400005, 0x00000000, 0x0e000656,
5760 0x24040002, 0x0a0001e6, 0x00000000, 0x3c020800, 0x8c421bb8, 0x10400078,
5761 0x24020001, 0x3c050800, 0x90a51b98, 0x14a20072, 0x00000000, 0x3c150800,
5762 0x96b51b96, 0x3c040800, 0x8c841bac, 0x32a3ffff, 0x0083102a, 0x1440006c,
5763 0x00000000, 0x14830003, 0x00000000, 0x3c010800, 0xac2523f0, 0x1060005c,
5764 0x00009021, 0x24d60004, 0x0060a021, 0x24d30014, 0x8ec20000, 0x00028100,
5765 0x3c110800, 0x02308821, 0x0e000625, 0x8e311bc8, 0x00402821, 0x10a00054,
5766 0x00000000, 0x9628000a, 0x31020040, 0x10400005, 0x2407180c, 0x8e22000c,
5767 0x2407188c, 0x00021400, 0xaca20018, 0x3c030800, 0x00701821, 0x8c631bd0,
5768 0x3c020800, 0x00501021, 0x8c421bd4, 0x00031d00, 0x00021400, 0x00621825,
5769 0xaca30014, 0x8ec30004, 0x96220008, 0x00432023, 0x3242ffff, 0x3083ffff,
5770 0x00431021, 0x0282102a, 0x14400002, 0x02b23023, 0x00803021, 0x8e620000,
5771 0x30c4ffff, 0x00441021, 0xae620000, 0x8e220000, 0xaca20000, 0x8e220004,
5772 0x8e63fff4, 0x00431021, 0xaca20004, 0xa4a6000e, 0x8e62fff4, 0x00441021,
5773 0xae62fff4, 0x96230008, 0x0043102a, 0x14400005, 0x02469021, 0x8e62fff0,
5774 0xae60fff4, 0x24420001, 0xae62fff0, 0xaca00008, 0x3242ffff, 0x14540008,
5775 0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x24020905, 0xa4a2000c,
5776 0x0a0001cb, 0x34e70020, 0xa4a2000c, 0x3c020800, 0x8c4223f0, 0x10400003,
5777 0x3c024b65, 0x0a0001d3, 0x34427654, 0x3c02b49a, 0x344289ab, 0xaca2001c,
5778 0x30e2ffff, 0xaca20010, 0x0e0005a2, 0x00a02021, 0x3242ffff, 0x0054102b,
5779 0x1440ffa9, 0x00000000, 0x24020002, 0x3c010800, 0x0a0001e6, 0xa0221b98,
5780 0x8ec2083c, 0x24420001, 0x0a0001e6, 0xaec2083c, 0x0e0004c0, 0x00000000,
5781 0x8fbf002c, 0x8fb60028, 0x8fb50024, 0x8fb40020, 0x8fb3001c, 0x8fb20018,
5782 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0030, 0x27bdffd0, 0xafbf0028,
5783 0xafb30024, 0xafb20020, 0xafb1001c, 0xafb00018, 0x8f725c9c, 0x3c0200ff,
5784 0x3442fff8, 0x3c070800, 0x24e71bb4, 0x02428824, 0x9623000e, 0x8ce20000,
5785 0x00431021, 0xace20000, 0x8e220010, 0x30420020, 0x14400011, 0x00809821,
5786 0x0e00063b, 0x02202021, 0x3c02c000, 0x02421825, 0xaf635c9c, 0x8f625c90,
5787 0x30420002, 0x1040011e, 0x00000000, 0xaf635c9c, 0x8f625c90, 0x30420002,
5788 0x10400119, 0x00000000, 0x0a00020d, 0x00000000, 0x8e240008, 0x8e230014,
5789 0x00041402, 0x000231c0, 0x00031502, 0x304201ff, 0x2442ffff, 0x3042007f,
5790 0x00031942, 0x30637800, 0x00021100, 0x24424000, 0x00624821, 0x9522000a,
5791 0x3084ffff, 0x30420008, 0x104000b0, 0x000429c0, 0x3c020800, 0x8c422400,
5792 0x14400024, 0x24c50008, 0x94c20014, 0x3c010800, 0xa42223d0, 0x8cc40010,
5793 0x00041402, 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42423d4, 0x94c2000e,
5794 0x3083ffff, 0x00431023, 0x3c010800, 0xac222408, 0x94c2001a, 0x3c010800,
5795 0xac262400, 0x3c010800, 0xac322404, 0x3c010800, 0xac2223fc, 0x3c02c000,
5796 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e5, 0x00000000,
5797 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e0, 0x00000000, 0x0a000246,
5798 0x00000000, 0x94c2000e, 0x3c030800, 0x946323d4, 0x00434023, 0x3103ffff,
5799 0x2c620008, 0x1040001c, 0x00000000, 0x94c20014, 0x24420028, 0x00a22821,
5800 0x00031042, 0x1840000b, 0x00002021, 0x24e60848, 0x00403821, 0x94a30000,
5801 0x8cc20000, 0x24840001, 0x00431021, 0xacc20000, 0x0087102a, 0x1440fff9,
5802 0x24a50002, 0x31020001, 0x1040001f, 0x3c024000, 0x3c040800, 0x248423fc,
5803 0xa0a00001, 0x94a30000, 0x8c820000, 0x00431021, 0x0a000285, 0xac820000,
5804 0x8f626800, 0x3c030010, 0x00431024, 0x10400009, 0x00000000, 0x94c2001a,
5805 0x3c030800, 0x8c6323fc, 0x00431021, 0x3c010800, 0xac2223fc, 0x0a000286,
5806 0x3c024000, 0x94c2001a, 0x94c4001c, 0x3c030800, 0x8c6323fc, 0x00441023,
5807 0x00621821, 0x3c010800, 0xac2323fc, 0x3c024000, 0x02421825, 0xaf635c9c,
5808 0x8f625c90, 0x30420002, 0x1440fffc, 0x00000000, 0x9522000a, 0x30420010,
5809 0x1040009b, 0x00000000, 0x3c030800, 0x946323d4, 0x3c070800, 0x24e72400,
5810 0x8ce40000, 0x8f626800, 0x24630030, 0x00832821, 0x3c030010, 0x00431024,
5811 0x1440000a, 0x00000000, 0x94a20004, 0x3c040800, 0x8c842408, 0x3c030800,
5812 0x8c6323fc, 0x00441023, 0x00621821, 0x3c010800, 0xac2323fc, 0x3c040800,
5813 0x8c8423fc, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402, 0x00822021,
5814 0x00041027, 0xa4a20006, 0x3c030800, 0x8c632404, 0x3c0200ff, 0x3442fff8,
5815 0x00628824, 0x96220008, 0x24050001, 0x24034000, 0x000231c0, 0x00801021,
5816 0xa4c2001a, 0xa4c0001c, 0xace00000, 0x3c010800, 0xac251b60, 0xaf635cb8,
5817 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000, 0x3c010800, 0xac201b60,
5818 0x8e220008, 0xaf625cb8, 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000,
5819 0x3c010800, 0xac201b60, 0x3c020800, 0x8c421b60, 0x1040ffec, 0x00000000,
5820 0x3c040800, 0x0e00063b, 0x8c842404, 0x0a00032a, 0x00000000, 0x3c030800,
5821 0x90631b98, 0x24020002, 0x14620003, 0x3c034b65, 0x0a0002e1, 0x00008021,
5822 0x8e22001c, 0x34637654, 0x10430002, 0x24100002, 0x24100001, 0x00c02021,
5823 0x0e000350, 0x02003021, 0x24020003, 0x3c010800, 0xa0221b98, 0x24020002,
5824 0x1202000a, 0x24020001, 0x3c030800, 0x8c6323f0, 0x10620006, 0x00000000,
5825 0x3c020800, 0x944223d8, 0x00021400, 0x0a00031f, 0xae220014, 0x3c040800,
5826 0x248423da, 0x94820000, 0x00021400, 0xae220014, 0x3c020800, 0x8c421bbc,
5827 0x3c03c000, 0x3c010800, 0xa0201b98, 0x00431025, 0xaf625c5c, 0x8f625c50,
5828 0x30420002, 0x10400009, 0x00000000, 0x2484f7e2, 0x8c820000, 0x00431025,
5829 0xaf625c5c, 0x8f625c50, 0x30420002, 0x1440fffa, 0x00000000, 0x3c020800,
5830 0x24421b84, 0x8c430000, 0x24630001, 0xac430000, 0x8f630c14, 0x3063000f,
5831 0x2c620002, 0x1440000c, 0x3c024000, 0x8f630c14, 0x3c020800, 0x8c421b40,
5832 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7,
5833 0x00000000, 0x3c024000, 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002,
5834 0x1440fffc, 0x00000000, 0x12600003, 0x00000000, 0x0e0004c0, 0x00000000,
5835 0x8fbf0028, 0x8fb30024, 0x8fb20020, 0x8fb1001c, 0x8fb00018, 0x03e00008,
5836 0x27bd0030, 0x8f634450, 0x3c040800, 0x24841b88, 0x8c820000, 0x00031c02,
5837 0x0043102b, 0x14400007, 0x3c038000, 0x8c840004, 0x8f624450, 0x00021c02,
5838 0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
5839 0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3c024000,
5840 0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00000000,
5841 0x03e00008, 0x00000000, 0x27bdffe0, 0x00805821, 0x14c00011, 0x256e0008,
5842 0x3c020800, 0x8c4223f4, 0x10400007, 0x24020016, 0x3c010800, 0xa42223d2,
5843 0x2402002a, 0x3c010800, 0x0a000364, 0xa42223d4, 0x8d670010, 0x00071402,
5844 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42723d4, 0x3c040800, 0x948423d4,
5845 0x3c030800, 0x946323d2, 0x95cf0006, 0x3c020800, 0x944223d0, 0x00832023,
5846 0x01e2c023, 0x3065ffff, 0x24a20028, 0x01c24821, 0x3082ffff, 0x14c0001a,
5847 0x01226021, 0x9582000c, 0x3042003f, 0x3c010800, 0xa42223d6, 0x95820004,
5848 0x95830006, 0x3c010800, 0xac2023e4, 0x3c010800, 0xac2023e8, 0x00021400,
5849 0x00431025, 0x3c010800, 0xac221bc0, 0x95220004, 0x3c010800, 0xa4221bc4,
5850 0x95230002, 0x01e51023, 0x0043102a, 0x10400010, 0x24020001, 0x3c010800,
5851 0x0a000398, 0xac2223f8, 0x3c030800, 0x8c6323e8, 0x3c020800, 0x94421bc4,
5852 0x00431021, 0xa5220004, 0x3c020800, 0x94421bc0, 0xa5820004, 0x3c020800,
5853 0x8c421bc0, 0xa5820006, 0x3c020800, 0x8c4223f0, 0x3c0d0800, 0x8dad23e4,
5854 0x3c0a0800, 0x144000e5, 0x8d4a23e8, 0x3c020800, 0x94421bc4, 0x004a1821,
5855 0x3063ffff, 0x0062182b, 0x24020002, 0x10c2000d, 0x01435023, 0x3c020800,
5856 0x944223d6, 0x30420009, 0x10400008, 0x00000000, 0x9582000c, 0x3042fff6,
5857 0xa582000c, 0x3c020800, 0x944223d6, 0x30420009, 0x01a26823, 0x3c020800,
5858 0x8c4223f8, 0x1040004a, 0x01203821, 0x3c020800, 0x944223d2, 0x00004021,
5859 0xa520000a, 0x01e21023, 0xa5220002, 0x3082ffff, 0x00021042, 0x18400008,
5860 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021, 0x0103102a,
5861 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061402,
5862 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021, 0x2527000c,
5863 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004, 0x1440fffb,
5864 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023, 0x01803821,
5865 0x3082ffff, 0xa4e00010, 0x00621821, 0x00021042, 0x18400010, 0x00c33021,
5866 0x00404821, 0x94e20000, 0x24e70002, 0x00c23021, 0x30e2007f, 0x14400006,
5867 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80, 0x00625824, 0x25670008,
5868 0x0109102a, 0x1440fff3, 0x00000000, 0x30820001, 0x10400005, 0x00061c02,
5869 0xa0e00001, 0x94e20000, 0x00c23021, 0x00061c02, 0x30c2ffff, 0x00623021,
5870 0x00061402, 0x00c23021, 0x0a00047d, 0x30c6ffff, 0x24020002, 0x14c20081,
5871 0x00000000, 0x3c020800, 0x8c42240c, 0x14400007, 0x00000000, 0x3c020800,
5872 0x944223d2, 0x95230002, 0x01e21023, 0x10620077, 0x00000000, 0x3c020800,
5873 0x944223d2, 0x01e21023, 0xa5220002, 0x3c020800, 0x8c42240c, 0x1040001a,
5874 0x31e3ffff, 0x8dc70010, 0x3c020800, 0x94421b96, 0x00e04021, 0x00072c02,
5875 0x00aa2021, 0x00431023, 0x00823823, 0x00072402, 0x30e2ffff, 0x00823821,
5876 0x00071027, 0xa522000a, 0x3102ffff, 0x3c040800, 0x948423d4, 0x00453023,
5877 0x00e02821, 0x00641823, 0x006d1821, 0x00c33021, 0x00061c02, 0x30c2ffff,
5878 0x0a00047d, 0x00623021, 0x01203821, 0x00004021, 0x3082ffff, 0x00021042,
5879 0x18400008, 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021,
5880 0x0103102a, 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021,
5881 0x00061402, 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021,
5882 0x2527000c, 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004,
5883 0x1440fffb, 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023,
5884 0x01803821, 0x3082ffff, 0xa4e00010, 0x3c040800, 0x948423d4, 0x00621821,
5885 0x00c33021, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061c02, 0x3c020800,
5886 0x944223d0, 0x00c34821, 0x00441023, 0x00021fc2, 0x00431021, 0x00021043,
5887 0x18400010, 0x00003021, 0x00402021, 0x94e20000, 0x24e70002, 0x00c23021,
5888 0x30e2007f, 0x14400006, 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80,
5889 0x00625824, 0x25670008, 0x0104102a, 0x1440fff3, 0x00000000, 0x3c020800,
5890 0x944223ec, 0x00c23021, 0x3122ffff, 0x00c23021, 0x00061c02, 0x30c2ffff,
5891 0x00623021, 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010,
5892 0xadc00014, 0x0a00049d, 0xadc00000, 0x8dc70010, 0x00e04021, 0x11400007,
5893 0x00072c02, 0x00aa3021, 0x00061402, 0x30c3ffff, 0x00433021, 0x00061402,
5894 0x00c22821, 0x00051027, 0xa522000a, 0x3c030800, 0x946323d4, 0x3102ffff,
5895 0x01e21021, 0x00433023, 0x00cd3021, 0x00061c02, 0x30c2ffff, 0x00623021,
5896 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010, 0x3102ffff,
5897 0x00051c00, 0x00431025, 0xadc20010, 0x3c020800, 0x8c4223f4, 0x10400005,
5898 0x2de205eb, 0x14400002, 0x25e2fff2, 0x34028870, 0xa5c20034, 0x3c030800,
5899 0x246323e8, 0x8c620000, 0x24420001, 0xac620000, 0x3c040800, 0x8c8423e4,
5900 0x3c020800, 0x8c421bc0, 0x3303ffff, 0x00832021, 0x00431821, 0x0062102b,
5901 0x3c010800, 0xac2423e4, 0x10400003, 0x2482ffff, 0x3c010800, 0xac2223e4,
5902 0x3c010800, 0xac231bc0, 0x03e00008, 0x27bd0020, 0x27bdffb8, 0x3c050800,
5903 0x24a51b96, 0xafbf0044, 0xafbe0040, 0xafb7003c, 0xafb60038, 0xafb50034,
5904 0xafb40030, 0xafb3002c, 0xafb20028, 0xafb10024, 0xafb00020, 0x94a90000,
5905 0x3c020800, 0x944223d0, 0x3c030800, 0x8c631bb0, 0x3c040800, 0x8c841bac,
5906 0x01221023, 0x0064182a, 0xa7a9001e, 0x106000be, 0xa7a20016, 0x24be0022,
5907 0x97b6001e, 0x24b3001a, 0x24b70016, 0x8fc20000, 0x14400008, 0x00000000,
5908 0x8fc2fff8, 0x97a30016, 0x8fc4fff4, 0x00431021, 0x0082202a, 0x148000b0,
5909 0x00000000, 0x97d50818, 0x32a2ffff, 0x104000a3, 0x00009021, 0x0040a021,
5910 0x00008821, 0x0e000625, 0x00000000, 0x00403021, 0x14c00007, 0x00000000,
5911 0x3c020800, 0x8c4223dc, 0x24420001, 0x3c010800, 0x0a000596, 0xac2223dc,
5912 0x3c100800, 0x02118021, 0x8e101bc8, 0x9608000a, 0x31020040, 0x10400005,
5913 0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x31020080,
5914 0x54400001, 0x34e70010, 0x3c020800, 0x00511021, 0x8c421bd0, 0x3c030800,
5915 0x00711821, 0x8c631bd4, 0x00021500, 0x00031c00, 0x00431025, 0xacc20014,
5916 0x96040008, 0x3242ffff, 0x00821021, 0x0282102a, 0x14400002, 0x02b22823,
5917 0x00802821, 0x8e020000, 0x02459021, 0xacc20000, 0x8e020004, 0x00c02021,
5918 0x26310010, 0xac820004, 0x30e2ffff, 0xac800008, 0xa485000e, 0xac820010,
5919 0x24020305, 0x0e0005a2, 0xa482000c, 0x3242ffff, 0x0054102b, 0x1440ffc5,
5920 0x3242ffff, 0x0a00058e, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
5921 0x10400067, 0x00000000, 0x8e62fff0, 0x00028900, 0x3c100800, 0x02118021,
5922 0x0e000625, 0x8e101bc8, 0x00403021, 0x14c00005, 0x00000000, 0x8e62082c,
5923 0x24420001, 0x0a000596, 0xae62082c, 0x9608000a, 0x31020040, 0x10400005,
5924 0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x3c020800,
5925 0x00511021, 0x8c421bd0, 0x3c030800, 0x00711821, 0x8c631bd4, 0x00021500,
5926 0x00031c00, 0x00431025, 0xacc20014, 0x8e63fff4, 0x96020008, 0x00432023,
5927 0x3242ffff, 0x3083ffff, 0x00431021, 0x02c2102a, 0x10400003, 0x00802821,
5928 0x97a9001e, 0x01322823, 0x8e620000, 0x30a4ffff, 0x00441021, 0xae620000,
5929 0xa4c5000e, 0x8e020000, 0xacc20000, 0x8e020004, 0x8e63fff4, 0x00431021,
5930 0xacc20004, 0x8e63fff4, 0x96020008, 0x00641821, 0x0062102a, 0x14400006,
5931 0x02459021, 0x8e62fff0, 0xae60fff4, 0x24420001, 0x0a000571, 0xae62fff0,
5932 0xae63fff4, 0xacc00008, 0x3242ffff, 0x10560003, 0x31020004, 0x10400006,
5933 0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x34e70020, 0x24020905,
5934 0xa4c2000c, 0x8ee30000, 0x8ee20004, 0x14620007, 0x3c02b49a, 0x8ee20860,
5935 0x54400001, 0x34e70400, 0x3c024b65, 0x0a000588, 0x34427654, 0x344289ab,
5936 0xacc2001c, 0x30e2ffff, 0xacc20010, 0x0e0005a2, 0x00c02021, 0x3242ffff,
5937 0x0056102b, 0x1440ff9b, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
5938 0x1440ff48, 0x00000000, 0x8fbf0044, 0x8fbe0040, 0x8fb7003c, 0x8fb60038,
5939 0x8fb50034, 0x8fb40030, 0x8fb3002c, 0x8fb20028, 0x8fb10024, 0x8fb00020,
5940 0x03e00008, 0x27bd0048, 0x27bdffe8, 0xafbf0014, 0xafb00010, 0x8f624450,
5941 0x8f634410, 0x0a0005b1, 0x00808021, 0x8f626820, 0x30422000, 0x10400003,
5942 0x00000000, 0x0e0001f0, 0x00002021, 0x8f624450, 0x8f634410, 0x3042ffff,
5943 0x0043102b, 0x1440fff5, 0x00000000, 0x8f630c14, 0x3063000f, 0x2c620002,
5944 0x1440000b, 0x00000000, 0x8f630c14, 0x3c020800, 0x8c421b40, 0x3063000f,
5945 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7, 0x00000000,
5946 0xaf705c18, 0x8f625c10, 0x30420002, 0x10400009, 0x00000000, 0x8f626820,
5947 0x30422000, 0x1040fff8, 0x00000000, 0x0e0001f0, 0x00002021, 0x0a0005c4,
5948 0x00000000, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000,
5949 0x00000000, 0x00000000, 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010,
5950 0xaf60680c, 0x8f626804, 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50,
5951 0x3c010800, 0xac221b54, 0x24020b78, 0x3c010800, 0xac221b64, 0x34630002,
5952 0xaf634000, 0x0e000605, 0x00808021, 0x3c010800, 0xa0221b68, 0x304200ff,
5953 0x24030002, 0x14430005, 0x00000000, 0x3c020800, 0x8c421b54, 0x0a0005f8,
5954 0xac5000c0, 0x3c020800, 0x8c421b54, 0xac5000bc, 0x8f624434, 0x8f634438,
5955 0x8f644410, 0x3c010800, 0xac221b5c, 0x3c010800, 0xac231b6c, 0x3c010800,
5956 0xac241b58, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x3c040800,
5957 0x8c870000, 0x3c03aa55, 0x3463aa55, 0x3c06c003, 0xac830000, 0x8cc20000,
5958 0x14430007, 0x24050002, 0x3c0355aa, 0x346355aa, 0xac830000, 0x8cc20000,
5959 0x50430001, 0x24050001, 0x3c020800, 0xac470000, 0x03e00008, 0x00a01021,
5960 0x27bdfff8, 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe,
5961 0x00000000, 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008,
5962 0x27bd0008, 0x8f634450, 0x3c020800, 0x8c421b5c, 0x00031c02, 0x0043102b,
5963 0x14400008, 0x3c038000, 0x3c040800, 0x8c841b6c, 0x8f624450, 0x00021c02,
5964 0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
5965 0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff,
5966 0x2442e000, 0x2c422001, 0x14400003, 0x3c024000, 0x0a000648, 0x2402ffff,
5967 0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021,
5968 0x03e00008, 0x00000000, 0x8f624450, 0x3c030800, 0x8c631b58, 0x0a000651,
5969 0x3042ffff, 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000,
5970 0x03e00008, 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040800, 0x24841af0,
5971 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014,
5972 0x0a000660, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000,
5973 0x00000000, 0x00000000, 0x3c020800, 0x34423000, 0x3c030800, 0x34633000,
5974 0x3c040800, 0x348437ff, 0x3c010800, 0xac221b74, 0x24020040, 0x3c010800,
5975 0xac221b78, 0x3c010800, 0xac201b70, 0xac600000, 0x24630004, 0x0083102b,
5976 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000, 0x00804821, 0x8faa0010,
5977 0x3c020800, 0x8c421b70, 0x3c040800, 0x8c841b78, 0x8fab0014, 0x24430001,
5978 0x0044102b, 0x3c010800, 0xac231b70, 0x14400003, 0x00004021, 0x3c010800,
5979 0xac201b70, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74, 0x91240000,
5980 0x00021140, 0x00431021, 0x00481021, 0x25080001, 0xa0440000, 0x29020008,
5981 0x1440fff4, 0x25290001, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74,
5982 0x8f64680c, 0x00021140, 0x00431021, 0xac440008, 0xac45000c, 0xac460010,
5983 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c, 0x00000000, 0x00000000,
5984 };
5985
5986 static const u32 tg3TsoFwRodata[] = {
5987 0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
5988 0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x496e0000, 0x73746b6f,
5989 0x66662a2a, 0x00000000, 0x53774576, 0x656e7430, 0x00000000, 0x00000000,
5990 0x00000000, 0x00000000, 0x66617461, 0x6c457272, 0x00000000, 0x00000000,
5991 0x00000000,
5992 };
5993
5994 static const u32 tg3TsoFwData[] = {
5995 0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x362e3000, 0x00000000,
5996 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
5997 0x00000000,
5998 };
5999
6000 /* 5705 needs a special version of the TSO firmware. */
6001 #define TG3_TSO5_FW_RELEASE_MAJOR 0x1
6002 #define TG3_TSO5_FW_RELASE_MINOR 0x2
6003 #define TG3_TSO5_FW_RELEASE_FIX 0x0
6004 #define TG3_TSO5_FW_START_ADDR 0x00010000
6005 #define TG3_TSO5_FW_TEXT_ADDR 0x00010000
6006 #define TG3_TSO5_FW_TEXT_LEN 0xe90
6007 #define TG3_TSO5_FW_RODATA_ADDR 0x00010e90
6008 #define TG3_TSO5_FW_RODATA_LEN 0x50
6009 #define TG3_TSO5_FW_DATA_ADDR 0x00010f00
6010 #define TG3_TSO5_FW_DATA_LEN 0x20
6011 #define TG3_TSO5_FW_SBSS_ADDR 0x00010f20
6012 #define TG3_TSO5_FW_SBSS_LEN 0x28
6013 #define TG3_TSO5_FW_BSS_ADDR 0x00010f50
6014 #define TG3_TSO5_FW_BSS_LEN 0x88
6015
6016 static const u32 tg3Tso5FwText[(TG3_TSO5_FW_TEXT_LEN / 4) + 1] = {
6017 0x0c004003, 0x00000000, 0x00010f04, 0x00000000, 0x10000003, 0x00000000,
6018 0x0000000d, 0x0000000d, 0x3c1d0001, 0x37bde000, 0x03a0f021, 0x3c100001,
6019 0x26100000, 0x0c004010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
6020 0xafbf0018, 0x0c0042e8, 0x34840002, 0x0c004364, 0x00000000, 0x3c030001,
6021 0x90630f34, 0x24020002, 0x3c040001, 0x24840e9c, 0x14620003, 0x24050001,
6022 0x3c040001, 0x24840e90, 0x24060002, 0x00003821, 0xafa00010, 0x0c004378,
6023 0xafa00014, 0x0c00402c, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020,
6024 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c, 0xafb20018, 0xafb10014,
6025 0x0c0042d4, 0xafb00010, 0x3c128000, 0x24110001, 0x8f706810, 0x32020400,
6026 0x10400007, 0x00000000, 0x8f641008, 0x00921024, 0x14400003, 0x00000000,
6027 0x0c004064, 0x00000000, 0x3c020001, 0x90420f56, 0x10510003, 0x32020200,
6028 0x1040fff1, 0x00000000, 0x0c0041b4, 0x00000000, 0x08004034, 0x00000000,
6029 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020,
6030 0x27bdffe0, 0x3c040001, 0x24840eb0, 0x00002821, 0x00003021, 0x00003821,
6031 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130,
6032 0xaf625000, 0x3c010001, 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018,
6033 0x03e00008, 0x27bd0020, 0x00000000, 0x00000000, 0x3c030001, 0x24630f60,
6034 0x90620000, 0x27bdfff0, 0x14400003, 0x0080c021, 0x08004073, 0x00004821,
6035 0x3c022000, 0x03021024, 0x10400003, 0x24090002, 0x08004073, 0xa0600000,
6036 0x24090001, 0x00181040, 0x30431f80, 0x346f8008, 0x1520004b, 0x25eb0028,
6037 0x3c040001, 0x00832021, 0x8c848010, 0x3c050001, 0x24a50f7a, 0x00041402,
6038 0xa0a20000, 0x3c010001, 0xa0240f7b, 0x3c020001, 0x00431021, 0x94428014,
6039 0x3c010001, 0xa0220f7c, 0x3c0c0001, 0x01836021, 0x8d8c8018, 0x304200ff,
6040 0x24420008, 0x000220c3, 0x24020001, 0x3c010001, 0xa0220f60, 0x0124102b,
6041 0x1040000c, 0x00003821, 0x24a6000e, 0x01602821, 0x8ca20000, 0x8ca30004,
6042 0x24a50008, 0x24e70001, 0xacc20000, 0xacc30004, 0x00e4102b, 0x1440fff8,
6043 0x24c60008, 0x00003821, 0x3c080001, 0x25080f7b, 0x91060000, 0x3c020001,
6044 0x90420f7c, 0x2503000d, 0x00c32821, 0x00461023, 0x00021fc2, 0x00431021,
6045 0x00021043, 0x1840000c, 0x00002021, 0x91020001, 0x00461023, 0x00021fc2,
6046 0x00431021, 0x00021843, 0x94a20000, 0x24e70001, 0x00822021, 0x00e3102a,
6047 0x1440fffb, 0x24a50002, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
6048 0x00822021, 0x3c02ffff, 0x01821024, 0x3083ffff, 0x00431025, 0x3c010001,
6049 0x080040fa, 0xac220f80, 0x3c050001, 0x24a50f7c, 0x90a20000, 0x3c0c0001,
6050 0x01836021, 0x8d8c8018, 0x000220c2, 0x1080000e, 0x00003821, 0x01603021,
6051 0x24a5000c, 0x8ca20000, 0x8ca30004, 0x24a50008, 0x24e70001, 0xacc20000,
6052 0xacc30004, 0x00e4102b, 0x1440fff8, 0x24c60008, 0x3c050001, 0x24a50f7c,
6053 0x90a20000, 0x30430007, 0x24020004, 0x10620011, 0x28620005, 0x10400005,
6054 0x24020002, 0x10620008, 0x000710c0, 0x080040fa, 0x00000000, 0x24020006,
6055 0x1062000e, 0x000710c0, 0x080040fa, 0x00000000, 0x00a21821, 0x9463000c,
6056 0x004b1021, 0x080040fa, 0xa4430000, 0x000710c0, 0x00a21821, 0x8c63000c,
6057 0x004b1021, 0x080040fa, 0xac430000, 0x00a21821, 0x8c63000c, 0x004b2021,
6058 0x00a21021, 0xac830000, 0x94420010, 0xa4820004, 0x95e70006, 0x3c020001,
6059 0x90420f7c, 0x3c030001, 0x90630f7a, 0x00e2c823, 0x3c020001, 0x90420f7b,
6060 0x24630028, 0x01e34021, 0x24420028, 0x15200012, 0x01e23021, 0x94c2000c,
6061 0x3c010001, 0xa4220f78, 0x94c20004, 0x94c30006, 0x3c010001, 0xa4200f76,
6062 0x3c010001, 0xa4200f72, 0x00021400, 0x00431025, 0x3c010001, 0xac220f6c,
6063 0x95020004, 0x3c010001, 0x08004124, 0xa4220f70, 0x3c020001, 0x94420f70,
6064 0x3c030001, 0x94630f72, 0x00431021, 0xa5020004, 0x3c020001, 0x94420f6c,
6065 0xa4c20004, 0x3c020001, 0x8c420f6c, 0xa4c20006, 0x3c040001, 0x94840f72,
6066 0x3c020001, 0x94420f70, 0x3c0a0001, 0x954a0f76, 0x00441821, 0x3063ffff,
6067 0x0062182a, 0x24020002, 0x1122000b, 0x00832023, 0x3c030001, 0x94630f78,
6068 0x30620009, 0x10400006, 0x3062fff6, 0xa4c2000c, 0x3c020001, 0x94420f78,
6069 0x30420009, 0x01425023, 0x24020001, 0x1122001b, 0x29220002, 0x50400005,
6070 0x24020002, 0x11200007, 0x31a2ffff, 0x08004197, 0x00000000, 0x1122001d,
6071 0x24020016, 0x08004197, 0x31a2ffff, 0x3c0e0001, 0x95ce0f80, 0x10800005,
6072 0x01806821, 0x01c42021, 0x00041c02, 0x3082ffff, 0x00627021, 0x000e1027,
6073 0xa502000a, 0x3c030001, 0x90630f7b, 0x31a2ffff, 0x00e21021, 0x0800418d,
6074 0x00432023, 0x3c020001, 0x94420f80, 0x00442021, 0x00041c02, 0x3082ffff,
6075 0x00622021, 0x00807021, 0x00041027, 0x08004185, 0xa502000a, 0x3c050001,
6076 0x24a50f7a, 0x90a30000, 0x14620002, 0x24e2fff2, 0xa5e20034, 0x90a20000,
6077 0x00e21023, 0xa5020002, 0x3c030001, 0x94630f80, 0x3c020001, 0x94420f5a,
6078 0x30e5ffff, 0x00641821, 0x00451023, 0x00622023, 0x00041c02, 0x3082ffff,
6079 0x00622021, 0x00041027, 0xa502000a, 0x3c030001, 0x90630f7c, 0x24620001,
6080 0x14a20005, 0x00807021, 0x01631021, 0x90420000, 0x08004185, 0x00026200,
6081 0x24620002, 0x14a20003, 0x306200fe, 0x004b1021, 0x944c0000, 0x3c020001,
6082 0x94420f82, 0x3183ffff, 0x3c040001, 0x90840f7b, 0x00431021, 0x00e21021,
6083 0x00442023, 0x008a2021, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
6084 0x00822021, 0x00806821, 0x00041027, 0xa4c20010, 0x31a2ffff, 0x000e1c00,
6085 0x00431025, 0x3c040001, 0x24840f72, 0xade20010, 0x94820000, 0x3c050001,
6086 0x94a50f76, 0x3c030001, 0x8c630f6c, 0x24420001, 0x00b92821, 0xa4820000,
6087 0x3322ffff, 0x00622021, 0x0083182b, 0x3c010001, 0xa4250f76, 0x10600003,
6088 0x24a2ffff, 0x3c010001, 0xa4220f76, 0x3c024000, 0x03021025, 0x3c010001,
6089 0xac240f6c, 0xaf621008, 0x03e00008, 0x27bd0010, 0x3c030001, 0x90630f56,
6090 0x27bdffe8, 0x24020001, 0xafbf0014, 0x10620026, 0xafb00010, 0x8f620cf4,
6091 0x2442ffff, 0x3042007f, 0x00021100, 0x8c434000, 0x3c010001, 0xac230f64,
6092 0x8c434008, 0x24444000, 0x8c5c4004, 0x30620040, 0x14400002, 0x24020088,
6093 0x24020008, 0x3c010001, 0xa4220f68, 0x30620004, 0x10400005, 0x24020001,
6094 0x3c010001, 0xa0220f57, 0x080041d5, 0x00031402, 0x3c010001, 0xa0200f57,
6095 0x00031402, 0x3c010001, 0xa4220f54, 0x9483000c, 0x24020001, 0x3c010001,
6096 0xa4200f50, 0x3c010001, 0xa0220f56, 0x3c010001, 0xa4230f62, 0x24020001,
6097 0x1342001e, 0x00000000, 0x13400005, 0x24020003, 0x13420067, 0x00000000,
6098 0x080042cf, 0x00000000, 0x3c020001, 0x94420f62, 0x241a0001, 0x3c010001,
6099 0xa4200f5e, 0x3c010001, 0xa4200f52, 0x304407ff, 0x00021bc2, 0x00031823,
6100 0x3063003e, 0x34630036, 0x00021242, 0x3042003c, 0x00621821, 0x3c010001,
6101 0xa4240f58, 0x00832021, 0x24630030, 0x3c010001, 0xa4240f5a, 0x3c010001,
6102 0xa4230f5c, 0x3c060001, 0x24c60f52, 0x94c50000, 0x94c30002, 0x3c040001,
6103 0x94840f5a, 0x00651021, 0x0044102a, 0x10400013, 0x3c108000, 0x00a31021,
6104 0xa4c20000, 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008,
6105 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4,
6106 0x00501024, 0x104000b7, 0x00000000, 0x0800420f, 0x00000000, 0x3c030001,
6107 0x94630f50, 0x00851023, 0xa4c40000, 0x00621821, 0x3042ffff, 0x3c010001,
6108 0xa4230f50, 0xaf620ce8, 0x3c020001, 0x94420f68, 0x34420024, 0xaf620cec,
6109 0x94c30002, 0x3c020001, 0x94420f50, 0x14620012, 0x3c028000, 0x3c108000,
6110 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008, 0x00901024,
6111 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024,
6112 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003, 0xaf620cf4, 0x3c108000,
6113 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000,
6114 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003,
6115 0x3c070001, 0x24e70f50, 0x94e20000, 0x03821021, 0xaf620ce0, 0x3c020001,
6116 0x8c420f64, 0xaf620ce4, 0x3c050001, 0x94a50f54, 0x94e30000, 0x3c040001,
6117 0x94840f58, 0x3c020001, 0x94420f5e, 0x00a32823, 0x00822023, 0x30a6ffff,
6118 0x3083ffff, 0x00c3102b, 0x14400043, 0x00000000, 0x3c020001, 0x94420f5c,
6119 0x00021400, 0x00621025, 0xaf620ce8, 0x94e20000, 0x3c030001, 0x94630f54,
6120 0x00441021, 0xa4e20000, 0x3042ffff, 0x14430021, 0x3c020008, 0x3c020001,
6121 0x90420f57, 0x10400006, 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624,
6122 0x0800427c, 0x0000d021, 0x3c020001, 0x94420f68, 0x3c030008, 0x34630624,
6123 0x00431025, 0xaf620cec, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
6124 0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
6125 0x00000000, 0x8f620cf4, 0x00501024, 0x10400015, 0x00000000, 0x08004283,
6126 0x00000000, 0x3c030001, 0x94630f68, 0x34420624, 0x3c108000, 0x00621825,
6127 0x3c028000, 0xaf630cec, 0xaf620cf4, 0x8f641008, 0x00901024, 0x14400003,
6128 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7,
6129 0x00000000, 0x3c010001, 0x080042cf, 0xa4200f5e, 0x3c020001, 0x94420f5c,
6130 0x00021400, 0x00c21025, 0xaf620ce8, 0x3c020001, 0x90420f57, 0x10400009,
6131 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624, 0x0000d021, 0x00431025,
6132 0xaf620cec, 0x080042c1, 0x3c108000, 0x3c020001, 0x94420f68, 0x3c030008,
6133 0x34630604, 0x00431025, 0xaf620cec, 0x3c020001, 0x94420f5e, 0x00451021,
6134 0x3c010001, 0xa4220f5e, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
6135 0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
6136 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x8fbf0014,
6137 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000, 0x27bdffe0, 0x3c040001,
6138 0x24840ec0, 0x00002821, 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010,
6139 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130, 0xaf625000, 0x3c010001,
6140 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018, 0x03e00008, 0x27bd0020,
6141 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010, 0xaf60680c, 0x8f626804,
6142 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50, 0x3c010001, 0xac220f20,
6143 0x24020b78, 0x3c010001, 0xac220f30, 0x34630002, 0xaf634000, 0x0c004315,
6144 0x00808021, 0x3c010001, 0xa0220f34, 0x304200ff, 0x24030002, 0x14430005,
6145 0x00000000, 0x3c020001, 0x8c420f20, 0x08004308, 0xac5000c0, 0x3c020001,
6146 0x8c420f20, 0xac5000bc, 0x8f624434, 0x8f634438, 0x8f644410, 0x3c010001,
6147 0xac220f28, 0x3c010001, 0xac230f38, 0x3c010001, 0xac240f24, 0x8fbf0014,
6148 0x8fb00010, 0x03e00008, 0x27bd0018, 0x03e00008, 0x24020001, 0x27bdfff8,
6149 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe, 0x00000000,
6150 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008, 0x27bd0008,
6151 0x8f634450, 0x3c020001, 0x8c420f28, 0x00031c02, 0x0043102b, 0x14400008,
6152 0x3c038000, 0x3c040001, 0x8c840f38, 0x8f624450, 0x00021c02, 0x0083102b,
6153 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024, 0x1440fffd,
6154 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff, 0x2442e000,
6155 0x2c422001, 0x14400003, 0x3c024000, 0x08004347, 0x2402ffff, 0x00822025,
6156 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021, 0x03e00008,
6157 0x00000000, 0x8f624450, 0x3c030001, 0x8c630f24, 0x08004350, 0x3042ffff,
6158 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000, 0x03e00008,
6159 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040001, 0x24840ed0, 0x00003021,
6160 0x00003821, 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0800435f,
6161 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x3c020001, 0x3442d600,
6162 0x3c030001, 0x3463d600, 0x3c040001, 0x3484ddff, 0x3c010001, 0xac220f40,
6163 0x24020040, 0x3c010001, 0xac220f44, 0x3c010001, 0xac200f3c, 0xac600000,
6164 0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
6165 0x00804821, 0x8faa0010, 0x3c020001, 0x8c420f3c, 0x3c040001, 0x8c840f44,
6166 0x8fab0014, 0x24430001, 0x0044102b, 0x3c010001, 0xac230f3c, 0x14400003,
6167 0x00004021, 0x3c010001, 0xac200f3c, 0x3c020001, 0x8c420f3c, 0x3c030001,
6168 0x8c630f40, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
6169 0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020001, 0x8c420f3c,
6170 0x3c030001, 0x8c630f40, 0x8f64680c, 0x00021140, 0x00431021, 0xac440008,
6171 0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
6172 0x00000000, 0x00000000, 0x00000000,
6173 };
6174
6175 static const u32 tg3Tso5FwRodata[(TG3_TSO5_FW_RODATA_LEN / 4) + 1] = {
6176 0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
6177 0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000,
6178 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
6179 0x00000000, 0x00000000, 0x00000000,
6180 };
6181
6182 static const u32 tg3Tso5FwData[(TG3_TSO5_FW_DATA_LEN / 4) + 1] = {
6183 0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x322e3000, 0x00000000,
6184 0x00000000, 0x00000000, 0x00000000,
6185 };
6186
6187 /* tp->lock is held. */
6188 static int tg3_load_tso_firmware(struct tg3 *tp)
6189 {
6190 struct fw_info info;
6191 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
6192 int err, i;
6193
6194 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6195 return 0;
6196
6197 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
6198 info.text_base = TG3_TSO5_FW_TEXT_ADDR;
6199 info.text_len = TG3_TSO5_FW_TEXT_LEN;
6200 info.text_data = &tg3Tso5FwText[0];
6201 info.rodata_base = TG3_TSO5_FW_RODATA_ADDR;
6202 info.rodata_len = TG3_TSO5_FW_RODATA_LEN;
6203 info.rodata_data = &tg3Tso5FwRodata[0];
6204 info.data_base = TG3_TSO5_FW_DATA_ADDR;
6205 info.data_len = TG3_TSO5_FW_DATA_LEN;
6206 info.data_data = &tg3Tso5FwData[0];
6207 cpu_base = RX_CPU_BASE;
6208 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
6209 cpu_scratch_size = (info.text_len +
6210 info.rodata_len +
6211 info.data_len +
6212 TG3_TSO5_FW_SBSS_LEN +
6213 TG3_TSO5_FW_BSS_LEN);
6214 } else {
6215 info.text_base = TG3_TSO_FW_TEXT_ADDR;
6216 info.text_len = TG3_TSO_FW_TEXT_LEN;
6217 info.text_data = &tg3TsoFwText[0];
6218 info.rodata_base = TG3_TSO_FW_RODATA_ADDR;
6219 info.rodata_len = TG3_TSO_FW_RODATA_LEN;
6220 info.rodata_data = &tg3TsoFwRodata[0];
6221 info.data_base = TG3_TSO_FW_DATA_ADDR;
6222 info.data_len = TG3_TSO_FW_DATA_LEN;
6223 info.data_data = &tg3TsoFwData[0];
6224 cpu_base = TX_CPU_BASE;
6225 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
6226 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
6227 }
6228
6229 err = tg3_load_firmware_cpu(tp, cpu_base,
6230 cpu_scratch_base, cpu_scratch_size,
6231 &info);
6232 if (err)
6233 return err;
6234
6235 /* Now startup the cpu. */
6236 tw32(cpu_base + CPU_STATE, 0xffffffff);
6237 tw32_f(cpu_base + CPU_PC, info.text_base);
6238
6239 for (i = 0; i < 5; i++) {
6240 if (tr32(cpu_base + CPU_PC) == info.text_base)
6241 break;
6242 tw32(cpu_base + CPU_STATE, 0xffffffff);
6243 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
6244 tw32_f(cpu_base + CPU_PC, info.text_base);
6245 udelay(1000);
6246 }
6247 if (i >= 5) {
6248 printk(KERN_ERR PFX "tg3_load_tso_firmware fails for %s "
6249 "to set CPU PC, is %08x should be %08x\n",
6250 tp->dev->name, tr32(cpu_base + CPU_PC),
6251 info.text_base);
6252 return -ENODEV;
6253 }
6254 tw32(cpu_base + CPU_STATE, 0xffffffff);
6255 tw32_f(cpu_base + CPU_MODE, 0x00000000);
6256 return 0;
6257 }
6258
6259
6260 /* tp->lock is held. */
6261 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
6262 {
6263 u32 addr_high, addr_low;
6264 int i;
6265
6266 addr_high = ((tp->dev->dev_addr[0] << 8) |
6267 tp->dev->dev_addr[1]);
6268 addr_low = ((tp->dev->dev_addr[2] << 24) |
6269 (tp->dev->dev_addr[3] << 16) |
6270 (tp->dev->dev_addr[4] << 8) |
6271 (tp->dev->dev_addr[5] << 0));
6272 for (i = 0; i < 4; i++) {
6273 if (i == 1 && skip_mac_1)
6274 continue;
6275 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
6276 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
6277 }
6278
6279 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
6280 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
6281 for (i = 0; i < 12; i++) {
6282 tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
6283 tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
6284 }
6285 }
6286
6287 addr_high = (tp->dev->dev_addr[0] +
6288 tp->dev->dev_addr[1] +
6289 tp->dev->dev_addr[2] +
6290 tp->dev->dev_addr[3] +
6291 tp->dev->dev_addr[4] +
6292 tp->dev->dev_addr[5]) &
6293 TX_BACKOFF_SEED_MASK;
6294 tw32(MAC_TX_BACKOFF_SEED, addr_high);
6295 }
6296
6297 static int tg3_set_mac_addr(struct net_device *dev, void *p)
6298 {
6299 struct tg3 *tp = netdev_priv(dev);
6300 struct sockaddr *addr = p;
6301 int err = 0, skip_mac_1 = 0;
6302
6303 if (!is_valid_ether_addr(addr->sa_data))
6304 return -EINVAL;
6305
6306 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
6307
6308 if (!netif_running(dev))
6309 return 0;
6310
6311 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
6312 u32 addr0_high, addr0_low, addr1_high, addr1_low;
6313
6314 addr0_high = tr32(MAC_ADDR_0_HIGH);
6315 addr0_low = tr32(MAC_ADDR_0_LOW);
6316 addr1_high = tr32(MAC_ADDR_1_HIGH);
6317 addr1_low = tr32(MAC_ADDR_1_LOW);
6318
6319 /* Skip MAC addr 1 if ASF is using it. */
6320 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
6321 !(addr1_high == 0 && addr1_low == 0))
6322 skip_mac_1 = 1;
6323 }
6324 spin_lock_bh(&tp->lock);
6325 __tg3_set_mac_addr(tp, skip_mac_1);
6326 spin_unlock_bh(&tp->lock);
6327
6328 return err;
6329 }
6330
6331 /* tp->lock is held. */
6332 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
6333 dma_addr_t mapping, u32 maxlen_flags,
6334 u32 nic_addr)
6335 {
6336 tg3_write_mem(tp,
6337 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
6338 ((u64) mapping >> 32));
6339 tg3_write_mem(tp,
6340 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
6341 ((u64) mapping & 0xffffffff));
6342 tg3_write_mem(tp,
6343 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
6344 maxlen_flags);
6345
6346 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6347 tg3_write_mem(tp,
6348 (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
6349 nic_addr);
6350 }
6351
6352 static void __tg3_set_rx_mode(struct net_device *);
6353 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
6354 {
6355 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
6356 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
6357 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
6358 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
6359 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6360 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
6361 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
6362 }
6363 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
6364 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
6365 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6366 u32 val = ec->stats_block_coalesce_usecs;
6367
6368 if (!netif_carrier_ok(tp->dev))
6369 val = 0;
6370
6371 tw32(HOSTCC_STAT_COAL_TICKS, val);
6372 }
6373 }
6374
6375 /* tp->lock is held. */
6376 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
6377 {
6378 u32 val, rdmac_mode;
6379 int i, err, limit;
6380
6381 tg3_disable_ints(tp);
6382
6383 tg3_stop_fw(tp);
6384
6385 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
6386
6387 if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) {
6388 tg3_abort_hw(tp, 1);
6389 }
6390
6391 if (reset_phy)
6392 tg3_phy_reset(tp);
6393
6394 err = tg3_chip_reset(tp);
6395 if (err)
6396 return err;
6397
6398 tg3_write_sig_legacy(tp, RESET_KIND_INIT);
6399
6400 if (tp->pci_chip_rev_id == CHIPREV_ID_5784_A0 ||
6401 tp->pci_chip_rev_id == CHIPREV_ID_5784_A1) {
6402 val = tr32(TG3_CPMU_CTRL);
6403 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
6404 tw32(TG3_CPMU_CTRL, val);
6405
6406 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
6407 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
6408 val |= CPMU_LSPD_10MB_MACCLK_6_25;
6409 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
6410
6411 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
6412 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
6413 val |= CPMU_LNK_AWARE_MACCLK_6_25;
6414 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
6415
6416 val = tr32(TG3_CPMU_HST_ACC);
6417 val &= ~CPMU_HST_ACC_MACCLK_MASK;
6418 val |= CPMU_HST_ACC_MACCLK_6_25;
6419 tw32(TG3_CPMU_HST_ACC, val);
6420 }
6421
6422 /* This works around an issue with Athlon chipsets on
6423 * B3 tigon3 silicon. This bit has no effect on any
6424 * other revision. But do not set this on PCI Express
6425 * chips and don't even touch the clocks if the CPMU is present.
6426 */
6427 if (!(tp->tg3_flags & TG3_FLAG_CPMU_PRESENT)) {
6428 if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
6429 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
6430 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
6431 }
6432
6433 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
6434 (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
6435 val = tr32(TG3PCI_PCISTATE);
6436 val |= PCISTATE_RETRY_SAME_DMA;
6437 tw32(TG3PCI_PCISTATE, val);
6438 }
6439
6440 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
6441 /* Allow reads and writes to the
6442 * APE register and memory space.
6443 */
6444 val = tr32(TG3PCI_PCISTATE);
6445 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
6446 PCISTATE_ALLOW_APE_SHMEM_WR;
6447 tw32(TG3PCI_PCISTATE, val);
6448 }
6449
6450 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
6451 /* Enable some hw fixes. */
6452 val = tr32(TG3PCI_MSI_DATA);
6453 val |= (1 << 26) | (1 << 28) | (1 << 29);
6454 tw32(TG3PCI_MSI_DATA, val);
6455 }
6456
6457 /* Descriptor ring init may make accesses to the
6458 * NIC SRAM area to setup the TX descriptors, so we
6459 * can only do this after the hardware has been
6460 * successfully reset.
6461 */
6462 err = tg3_init_rings(tp);
6463 if (err)
6464 return err;
6465
6466 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
6467 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
6468 /* This value is determined during the probe time DMA
6469 * engine test, tg3_test_dma.
6470 */
6471 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
6472 }
6473
6474 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
6475 GRC_MODE_4X_NIC_SEND_RINGS |
6476 GRC_MODE_NO_TX_PHDR_CSUM |
6477 GRC_MODE_NO_RX_PHDR_CSUM);
6478 tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
6479
6480 /* Pseudo-header checksum is done by hardware logic and not
6481 * the offload processers, so make the chip do the pseudo-
6482 * header checksums on receive. For transmit it is more
6483 * convenient to do the pseudo-header checksum in software
6484 * as Linux does that on transmit for us in all cases.
6485 */
6486 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
6487
6488 tw32(GRC_MODE,
6489 tp->grc_mode |
6490 (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
6491
6492 /* Setup the timer prescalar register. Clock is always 66Mhz. */
6493 val = tr32(GRC_MISC_CFG);
6494 val &= ~0xff;
6495 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
6496 tw32(GRC_MISC_CFG, val);
6497
6498 /* Initialize MBUF/DESC pool. */
6499 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
6500 /* Do nothing. */
6501 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
6502 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
6503 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
6504 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
6505 else
6506 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
6507 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
6508 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
6509 }
6510 else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
6511 int fw_len;
6512
6513 fw_len = (TG3_TSO5_FW_TEXT_LEN +
6514 TG3_TSO5_FW_RODATA_LEN +
6515 TG3_TSO5_FW_DATA_LEN +
6516 TG3_TSO5_FW_SBSS_LEN +
6517 TG3_TSO5_FW_BSS_LEN);
6518 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
6519 tw32(BUFMGR_MB_POOL_ADDR,
6520 NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
6521 tw32(BUFMGR_MB_POOL_SIZE,
6522 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
6523 }
6524
6525 if (tp->dev->mtu <= ETH_DATA_LEN) {
6526 tw32(BUFMGR_MB_RDMA_LOW_WATER,
6527 tp->bufmgr_config.mbuf_read_dma_low_water);
6528 tw32(BUFMGR_MB_MACRX_LOW_WATER,
6529 tp->bufmgr_config.mbuf_mac_rx_low_water);
6530 tw32(BUFMGR_MB_HIGH_WATER,
6531 tp->bufmgr_config.mbuf_high_water);
6532 } else {
6533 tw32(BUFMGR_MB_RDMA_LOW_WATER,
6534 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
6535 tw32(BUFMGR_MB_MACRX_LOW_WATER,
6536 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
6537 tw32(BUFMGR_MB_HIGH_WATER,
6538 tp->bufmgr_config.mbuf_high_water_jumbo);
6539 }
6540 tw32(BUFMGR_DMA_LOW_WATER,
6541 tp->bufmgr_config.dma_low_water);
6542 tw32(BUFMGR_DMA_HIGH_WATER,
6543 tp->bufmgr_config.dma_high_water);
6544
6545 tw32(BUFMGR_MODE, BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE);
6546 for (i = 0; i < 2000; i++) {
6547 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
6548 break;
6549 udelay(10);
6550 }
6551 if (i >= 2000) {
6552 printk(KERN_ERR PFX "tg3_reset_hw cannot enable BUFMGR for %s.\n",
6553 tp->dev->name);
6554 return -ENODEV;
6555 }
6556
6557 /* Setup replenish threshold. */
6558 val = tp->rx_pending / 8;
6559 if (val == 0)
6560 val = 1;
6561 else if (val > tp->rx_std_max_post)
6562 val = tp->rx_std_max_post;
6563 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
6564 if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
6565 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
6566
6567 if (val > (TG3_RX_INTERNAL_RING_SZ_5906 / 2))
6568 val = TG3_RX_INTERNAL_RING_SZ_5906 / 2;
6569 }
6570
6571 tw32(RCVBDI_STD_THRESH, val);
6572
6573 /* Initialize TG3_BDINFO's at:
6574 * RCVDBDI_STD_BD: standard eth size rx ring
6575 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
6576 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
6577 *
6578 * like so:
6579 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
6580 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
6581 * ring attribute flags
6582 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
6583 *
6584 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
6585 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
6586 *
6587 * The size of each ring is fixed in the firmware, but the location is
6588 * configurable.
6589 */
6590 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
6591 ((u64) tp->rx_std_mapping >> 32));
6592 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
6593 ((u64) tp->rx_std_mapping & 0xffffffff));
6594 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
6595 NIC_SRAM_RX_BUFFER_DESC);
6596
6597 /* Don't even try to program the JUMBO/MINI buffer descriptor
6598 * configs on 5705.
6599 */
6600 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
6601 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
6602 RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT);
6603 } else {
6604 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
6605 RX_STD_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
6606
6607 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
6608 BDINFO_FLAGS_DISABLED);
6609
6610 /* Setup replenish threshold. */
6611 tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8);
6612
6613 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
6614 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
6615 ((u64) tp->rx_jumbo_mapping >> 32));
6616 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
6617 ((u64) tp->rx_jumbo_mapping & 0xffffffff));
6618 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
6619 RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
6620 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
6621 NIC_SRAM_RX_JUMBO_BUFFER_DESC);
6622 } else {
6623 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
6624 BDINFO_FLAGS_DISABLED);
6625 }
6626
6627 }
6628
6629 /* There is only one send ring on 5705/5750, no need to explicitly
6630 * disable the others.
6631 */
6632 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6633 /* Clear out send RCB ring in SRAM. */
6634 for (i = NIC_SRAM_SEND_RCB; i < NIC_SRAM_RCV_RET_RCB; i += TG3_BDINFO_SIZE)
6635 tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
6636 BDINFO_FLAGS_DISABLED);
6637 }
6638
6639 tp->tx_prod = 0;
6640 tp->tx_cons = 0;
6641 tw32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
6642 tw32_tx_mbox(MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
6643
6644 tg3_set_bdinfo(tp, NIC_SRAM_SEND_RCB,
6645 tp->tx_desc_mapping,
6646 (TG3_TX_RING_SIZE <<
6647 BDINFO_FLAGS_MAXLEN_SHIFT),
6648 NIC_SRAM_TX_BUFFER_DESC);
6649
6650 /* There is only one receive return ring on 5705/5750, no need
6651 * to explicitly disable the others.
6652 */
6653 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6654 for (i = NIC_SRAM_RCV_RET_RCB; i < NIC_SRAM_STATS_BLK;
6655 i += TG3_BDINFO_SIZE) {
6656 tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
6657 BDINFO_FLAGS_DISABLED);
6658 }
6659 }
6660
6661 tp->rx_rcb_ptr = 0;
6662 tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, 0);
6663
6664 tg3_set_bdinfo(tp, NIC_SRAM_RCV_RET_RCB,
6665 tp->rx_rcb_mapping,
6666 (TG3_RX_RCB_RING_SIZE(tp) <<
6667 BDINFO_FLAGS_MAXLEN_SHIFT),
6668 0);
6669
6670 tp->rx_std_ptr = tp->rx_pending;
6671 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
6672 tp->rx_std_ptr);
6673
6674 tp->rx_jumbo_ptr = (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) ?
6675 tp->rx_jumbo_pending : 0;
6676 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
6677 tp->rx_jumbo_ptr);
6678
6679 /* Initialize MAC address and backoff seed. */
6680 __tg3_set_mac_addr(tp, 0);
6681
6682 /* MTU + ethernet header + FCS + optional VLAN tag */
6683 tw32(MAC_RX_MTU_SIZE, tp->dev->mtu + ETH_HLEN + 8);
6684
6685 /* The slot time is changed by tg3_setup_phy if we
6686 * run at gigabit with half duplex.
6687 */
6688 tw32(MAC_TX_LENGTHS,
6689 (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
6690 (6 << TX_LENGTHS_IPG_SHIFT) |
6691 (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
6692
6693 /* Receive rules. */
6694 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
6695 tw32(RCVLPC_CONFIG, 0x0181);
6696
6697 /* Calculate RDMAC_MODE setting early, we need it to determine
6698 * the RCVLPC_STATE_ENABLE mask.
6699 */
6700 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
6701 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
6702 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
6703 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
6704 RDMAC_MODE_LNGREAD_ENAB);
6705
6706 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784)
6707 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
6708 RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
6709 RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
6710
6711 /* If statement applies to 5705 and 5750 PCI devices only */
6712 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
6713 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
6714 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)) {
6715 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE &&
6716 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
6717 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
6718 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
6719 !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
6720 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
6721 }
6722 }
6723
6724 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
6725 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
6726
6727 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6728 rdmac_mode |= (1 << 27);
6729
6730 /* Receive/send statistics. */
6731 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
6732 val = tr32(RCVLPC_STATS_ENABLE);
6733 val &= ~RCVLPC_STATSENAB_DACK_FIX;
6734 tw32(RCVLPC_STATS_ENABLE, val);
6735 } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
6736 (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
6737 val = tr32(RCVLPC_STATS_ENABLE);
6738 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
6739 tw32(RCVLPC_STATS_ENABLE, val);
6740 } else {
6741 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
6742 }
6743 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
6744 tw32(SNDDATAI_STATSENAB, 0xffffff);
6745 tw32(SNDDATAI_STATSCTRL,
6746 (SNDDATAI_SCTRL_ENABLE |
6747 SNDDATAI_SCTRL_FASTUPD));
6748
6749 /* Setup host coalescing engine. */
6750 tw32(HOSTCC_MODE, 0);
6751 for (i = 0; i < 2000; i++) {
6752 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
6753 break;
6754 udelay(10);
6755 }
6756
6757 __tg3_set_coalesce(tp, &tp->coal);
6758
6759 /* set status block DMA address */
6760 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
6761 ((u64) tp->status_mapping >> 32));
6762 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
6763 ((u64) tp->status_mapping & 0xffffffff));
6764
6765 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6766 /* Status/statistics block address. See tg3_timer,
6767 * the tg3_periodic_fetch_stats call there, and
6768 * tg3_get_stats to see how this works for 5705/5750 chips.
6769 */
6770 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
6771 ((u64) tp->stats_mapping >> 32));
6772 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
6773 ((u64) tp->stats_mapping & 0xffffffff));
6774 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
6775 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
6776 }
6777
6778 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
6779
6780 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
6781 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
6782 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6783 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
6784
6785 /* Clear statistics/status block in chip, and status block in ram. */
6786 for (i = NIC_SRAM_STATS_BLK;
6787 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
6788 i += sizeof(u32)) {
6789 tg3_write_mem(tp, i, 0);
6790 udelay(40);
6791 }
6792 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
6793
6794 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
6795 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
6796 /* reset to prevent losing 1st rx packet intermittently */
6797 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
6798 udelay(10);
6799 }
6800
6801 tp->mac_mode = MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
6802 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
6803 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
6804 !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
6805 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
6806 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
6807 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
6808 udelay(40);
6809
6810 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
6811 * If TG3_FLG2_IS_NIC is zero, we should read the
6812 * register to preserve the GPIO settings for LOMs. The GPIOs,
6813 * whether used as inputs or outputs, are set by boot code after
6814 * reset.
6815 */
6816 if (!(tp->tg3_flags2 & TG3_FLG2_IS_NIC)) {
6817 u32 gpio_mask;
6818
6819 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
6820 GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
6821 GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
6822
6823 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
6824 gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
6825 GRC_LCLCTRL_GPIO_OUTPUT3;
6826
6827 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
6828 gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
6829
6830 tp->grc_local_ctrl &= ~gpio_mask;
6831 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
6832
6833 /* GPIO1 must be driven high for eeprom write protect */
6834 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT)
6835 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
6836 GRC_LCLCTRL_GPIO_OUTPUT1);
6837 }
6838 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
6839 udelay(100);
6840
6841 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0);
6842 tp->last_tag = 0;
6843
6844 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6845 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
6846 udelay(40);
6847 }
6848
6849 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
6850 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
6851 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
6852 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
6853 WDMAC_MODE_LNGREAD_ENAB);
6854
6855 /* If statement applies to 5705 and 5750 PCI devices only */
6856 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
6857 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
6858 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
6859 if ((tp->tg3_flags & TG3_FLG2_TSO_CAPABLE) &&
6860 (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
6861 tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
6862 /* nothing */
6863 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
6864 !(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
6865 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
6866 val |= WDMAC_MODE_RX_ACCEL;
6867 }
6868 }
6869
6870 /* Enable host coalescing bug fix */
6871 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755) ||
6872 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) ||
6873 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784) ||
6874 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761))
6875 val |= (1 << 29);
6876
6877 tw32_f(WDMAC_MODE, val);
6878 udelay(40);
6879
6880 if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
6881 u16 pcix_cmd;
6882
6883 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
6884 &pcix_cmd);
6885 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
6886 pcix_cmd &= ~PCI_X_CMD_MAX_READ;
6887 pcix_cmd |= PCI_X_CMD_READ_2K;
6888 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
6889 pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
6890 pcix_cmd |= PCI_X_CMD_READ_2K;
6891 }
6892 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
6893 pcix_cmd);
6894 }
6895
6896 tw32_f(RDMAC_MODE, rdmac_mode);
6897 udelay(40);
6898
6899 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
6900 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6901 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
6902
6903 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
6904 tw32(SNDDATAC_MODE,
6905 SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
6906 else
6907 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
6908
6909 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
6910 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
6911 tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ);
6912 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
6913 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6914 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
6915 tw32(SNDBDI_MODE, SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE);
6916 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
6917
6918 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
6919 err = tg3_load_5701_a0_firmware_fix(tp);
6920 if (err)
6921 return err;
6922 }
6923
6924 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
6925 err = tg3_load_tso_firmware(tp);
6926 if (err)
6927 return err;
6928 }
6929
6930 tp->tx_mode = TX_MODE_ENABLE;
6931 tw32_f(MAC_TX_MODE, tp->tx_mode);
6932 udelay(100);
6933
6934 tp->rx_mode = RX_MODE_ENABLE;
6935 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
6936 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
6937 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
6938
6939 tw32_f(MAC_RX_MODE, tp->rx_mode);
6940 udelay(10);
6941
6942 if (tp->link_config.phy_is_low_power) {
6943 tp->link_config.phy_is_low_power = 0;
6944 tp->link_config.speed = tp->link_config.orig_speed;
6945 tp->link_config.duplex = tp->link_config.orig_duplex;
6946 tp->link_config.autoneg = tp->link_config.orig_autoneg;
6947 }
6948
6949 tp->mi_mode = MAC_MI_MODE_BASE;
6950 tw32_f(MAC_MI_MODE, tp->mi_mode);
6951 udelay(80);
6952
6953 tw32(MAC_LED_CTRL, tp->led_ctrl);
6954
6955 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
6956 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
6957 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
6958 udelay(10);
6959 }
6960 tw32_f(MAC_RX_MODE, tp->rx_mode);
6961 udelay(10);
6962
6963 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
6964 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
6965 !(tp->tg3_flags2 & TG3_FLG2_SERDES_PREEMPHASIS)) {
6966 /* Set drive transmission level to 1.2V */
6967 /* only if the signal pre-emphasis bit is not set */
6968 val = tr32(MAC_SERDES_CFG);
6969 val &= 0xfffff000;
6970 val |= 0x880;
6971 tw32(MAC_SERDES_CFG, val);
6972 }
6973 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
6974 tw32(MAC_SERDES_CFG, 0x616000);
6975 }
6976
6977 /* Prevent chip from dropping frames when flow control
6978 * is enabled.
6979 */
6980 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, 2);
6981
6982 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
6983 (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
6984 /* Use hardware link auto-negotiation */
6985 tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG;
6986 }
6987
6988 if ((tp->tg3_flags2 & TG3_FLG2_MII_SERDES) &&
6989 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
6990 u32 tmp;
6991
6992 tmp = tr32(SERDES_RX_CTRL);
6993 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
6994 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
6995 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
6996 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
6997 }
6998
6999 err = tg3_setup_phy(tp, 0);
7000 if (err)
7001 return err;
7002
7003 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
7004 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906) {
7005 u32 tmp;
7006
7007 /* Clear CRC stats. */
7008 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
7009 tg3_writephy(tp, MII_TG3_TEST1,
7010 tmp | MII_TG3_TEST1_CRC_EN);
7011 tg3_readphy(tp, 0x14, &tmp);
7012 }
7013 }
7014
7015 __tg3_set_rx_mode(tp->dev);
7016
7017 /* Initialize receive rules. */
7018 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
7019 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
7020 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
7021 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
7022
7023 if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
7024 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
7025 limit = 8;
7026 else
7027 limit = 16;
7028 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
7029 limit -= 4;
7030 switch (limit) {
7031 case 16:
7032 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
7033 case 15:
7034 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
7035 case 14:
7036 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
7037 case 13:
7038 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
7039 case 12:
7040 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
7041 case 11:
7042 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
7043 case 10:
7044 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
7045 case 9:
7046 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
7047 case 8:
7048 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
7049 case 7:
7050 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
7051 case 6:
7052 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
7053 case 5:
7054 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
7055 case 4:
7056 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
7057 case 3:
7058 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
7059 case 2:
7060 case 1:
7061
7062 default:
7063 break;
7064 };
7065
7066 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
7067 /* Write our heartbeat update interval to APE. */
7068 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
7069 APE_HOST_HEARTBEAT_INT_DISABLE);
7070
7071 tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
7072
7073 return 0;
7074 }
7075
7076 /* Called at device open time to get the chip ready for
7077 * packet processing. Invoked with tp->lock held.
7078 */
7079 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
7080 {
7081 int err;
7082
7083 /* Force the chip into D0. */
7084 err = tg3_set_power_state(tp, PCI_D0);
7085 if (err)
7086 goto out;
7087
7088 tg3_switch_clocks(tp);
7089
7090 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
7091
7092 err = tg3_reset_hw(tp, reset_phy);
7093
7094 out:
7095 return err;
7096 }
7097
7098 #define TG3_STAT_ADD32(PSTAT, REG) \
7099 do { u32 __val = tr32(REG); \
7100 (PSTAT)->low += __val; \
7101 if ((PSTAT)->low < __val) \
7102 (PSTAT)->high += 1; \
7103 } while (0)
7104
7105 static void tg3_periodic_fetch_stats(struct tg3 *tp)
7106 {
7107 struct tg3_hw_stats *sp = tp->hw_stats;
7108
7109 if (!netif_carrier_ok(tp->dev))
7110 return;
7111
7112 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
7113 TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
7114 TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
7115 TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
7116 TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
7117 TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
7118 TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
7119 TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
7120 TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
7121 TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
7122 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
7123 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
7124 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
7125
7126 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
7127 TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
7128 TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
7129 TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
7130 TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
7131 TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
7132 TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
7133 TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
7134 TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
7135 TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
7136 TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
7137 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
7138 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
7139 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
7140
7141 TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
7142 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
7143 TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
7144 }
7145
7146 static void tg3_timer(unsigned long __opaque)
7147 {
7148 struct tg3 *tp = (struct tg3 *) __opaque;
7149
7150 if (tp->irq_sync)
7151 goto restart_timer;
7152
7153 spin_lock(&tp->lock);
7154
7155 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
7156 /* All of this garbage is because when using non-tagged
7157 * IRQ status the mailbox/status_block protocol the chip
7158 * uses with the cpu is race prone.
7159 */
7160 if (tp->hw_status->status & SD_STATUS_UPDATED) {
7161 tw32(GRC_LOCAL_CTRL,
7162 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
7163 } else {
7164 tw32(HOSTCC_MODE, tp->coalesce_mode |
7165 (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
7166 }
7167
7168 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
7169 tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
7170 spin_unlock(&tp->lock);
7171 schedule_work(&tp->reset_task);
7172 return;
7173 }
7174 }
7175
7176 /* This part only runs once per second. */
7177 if (!--tp->timer_counter) {
7178 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
7179 tg3_periodic_fetch_stats(tp);
7180
7181 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
7182 u32 mac_stat;
7183 int phy_event;
7184
7185 mac_stat = tr32(MAC_STATUS);
7186
7187 phy_event = 0;
7188 if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) {
7189 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
7190 phy_event = 1;
7191 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
7192 phy_event = 1;
7193
7194 if (phy_event)
7195 tg3_setup_phy(tp, 0);
7196 } else if (tp->tg3_flags & TG3_FLAG_POLL_SERDES) {
7197 u32 mac_stat = tr32(MAC_STATUS);
7198 int need_setup = 0;
7199
7200 if (netif_carrier_ok(tp->dev) &&
7201 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
7202 need_setup = 1;
7203 }
7204 if (! netif_carrier_ok(tp->dev) &&
7205 (mac_stat & (MAC_STATUS_PCS_SYNCED |
7206 MAC_STATUS_SIGNAL_DET))) {
7207 need_setup = 1;
7208 }
7209 if (need_setup) {
7210 if (!tp->serdes_counter) {
7211 tw32_f(MAC_MODE,
7212 (tp->mac_mode &
7213 ~MAC_MODE_PORT_MODE_MASK));
7214 udelay(40);
7215 tw32_f(MAC_MODE, tp->mac_mode);
7216 udelay(40);
7217 }
7218 tg3_setup_phy(tp, 0);
7219 }
7220 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
7221 tg3_serdes_parallel_detect(tp);
7222
7223 tp->timer_counter = tp->timer_multiplier;
7224 }
7225
7226 /* Heartbeat is only sent once every 2 seconds.
7227 *
7228 * The heartbeat is to tell the ASF firmware that the host
7229 * driver is still alive. In the event that the OS crashes,
7230 * ASF needs to reset the hardware to free up the FIFO space
7231 * that may be filled with rx packets destined for the host.
7232 * If the FIFO is full, ASF will no longer function properly.
7233 *
7234 * Unintended resets have been reported on real time kernels
7235 * where the timer doesn't run on time. Netpoll will also have
7236 * same problem.
7237 *
7238 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
7239 * to check the ring condition when the heartbeat is expiring
7240 * before doing the reset. This will prevent most unintended
7241 * resets.
7242 */
7243 if (!--tp->asf_counter) {
7244 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
7245 u32 val;
7246
7247 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
7248 FWCMD_NICDRV_ALIVE3);
7249 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
7250 /* 5 seconds timeout */
7251 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 5);
7252 val = tr32(GRC_RX_CPU_EVENT);
7253 val |= (1 << 14);
7254 tw32(GRC_RX_CPU_EVENT, val);
7255 }
7256 tp->asf_counter = tp->asf_multiplier;
7257 }
7258
7259 spin_unlock(&tp->lock);
7260
7261 restart_timer:
7262 tp->timer.expires = jiffies + tp->timer_offset;
7263 add_timer(&tp->timer);
7264 }
7265
7266 static int tg3_request_irq(struct tg3 *tp)
7267 {
7268 irq_handler_t fn;
7269 unsigned long flags;
7270 struct net_device *dev = tp->dev;
7271
7272 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7273 fn = tg3_msi;
7274 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
7275 fn = tg3_msi_1shot;
7276 flags = IRQF_SAMPLE_RANDOM;
7277 } else {
7278 fn = tg3_interrupt;
7279 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
7280 fn = tg3_interrupt_tagged;
7281 flags = IRQF_SHARED | IRQF_SAMPLE_RANDOM;
7282 }
7283 return (request_irq(tp->pdev->irq, fn, flags, dev->name, dev));
7284 }
7285
7286 static int tg3_test_interrupt(struct tg3 *tp)
7287 {
7288 struct net_device *dev = tp->dev;
7289 int err, i, intr_ok = 0;
7290
7291 if (!netif_running(dev))
7292 return -ENODEV;
7293
7294 tg3_disable_ints(tp);
7295
7296 free_irq(tp->pdev->irq, dev);
7297
7298 err = request_irq(tp->pdev->irq, tg3_test_isr,
7299 IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, dev);
7300 if (err)
7301 return err;
7302
7303 tp->hw_status->status &= ~SD_STATUS_UPDATED;
7304 tg3_enable_ints(tp);
7305
7306 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
7307 HOSTCC_MODE_NOW);
7308
7309 for (i = 0; i < 5; i++) {
7310 u32 int_mbox, misc_host_ctrl;
7311
7312 int_mbox = tr32_mailbox(MAILBOX_INTERRUPT_0 +
7313 TG3_64BIT_REG_LOW);
7314 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
7315
7316 if ((int_mbox != 0) ||
7317 (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
7318 intr_ok = 1;
7319 break;
7320 }
7321
7322 msleep(10);
7323 }
7324
7325 tg3_disable_ints(tp);
7326
7327 free_irq(tp->pdev->irq, dev);
7328
7329 err = tg3_request_irq(tp);
7330
7331 if (err)
7332 return err;
7333
7334 if (intr_ok)
7335 return 0;
7336
7337 return -EIO;
7338 }
7339
7340 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
7341 * successfully restored
7342 */
7343 static int tg3_test_msi(struct tg3 *tp)
7344 {
7345 struct net_device *dev = tp->dev;
7346 int err;
7347 u16 pci_cmd;
7348
7349 if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSI))
7350 return 0;
7351
7352 /* Turn off SERR reporting in case MSI terminates with Master
7353 * Abort.
7354 */
7355 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
7356 pci_write_config_word(tp->pdev, PCI_COMMAND,
7357 pci_cmd & ~PCI_COMMAND_SERR);
7358
7359 err = tg3_test_interrupt(tp);
7360
7361 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
7362
7363 if (!err)
7364 return 0;
7365
7366 /* other failures */
7367 if (err != -EIO)
7368 return err;
7369
7370 /* MSI test failed, go back to INTx mode */
7371 printk(KERN_WARNING PFX "%s: No interrupt was generated using MSI, "
7372 "switching to INTx mode. Please report this failure to "
7373 "the PCI maintainer and include system chipset information.\n",
7374 tp->dev->name);
7375
7376 free_irq(tp->pdev->irq, dev);
7377 pci_disable_msi(tp->pdev);
7378
7379 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7380
7381 err = tg3_request_irq(tp);
7382 if (err)
7383 return err;
7384
7385 /* Need to reset the chip because the MSI cycle may have terminated
7386 * with Master Abort.
7387 */
7388 tg3_full_lock(tp, 1);
7389
7390 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7391 err = tg3_init_hw(tp, 1);
7392
7393 tg3_full_unlock(tp);
7394
7395 if (err)
7396 free_irq(tp->pdev->irq, dev);
7397
7398 return err;
7399 }
7400
7401 static int tg3_open(struct net_device *dev)
7402 {
7403 struct tg3 *tp = netdev_priv(dev);
7404 int err;
7405
7406 netif_carrier_off(tp->dev);
7407
7408 tg3_full_lock(tp, 0);
7409
7410 err = tg3_set_power_state(tp, PCI_D0);
7411 if (err) {
7412 tg3_full_unlock(tp);
7413 return err;
7414 }
7415
7416 tg3_disable_ints(tp);
7417 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
7418
7419 tg3_full_unlock(tp);
7420
7421 /* The placement of this call is tied
7422 * to the setup and use of Host TX descriptors.
7423 */
7424 err = tg3_alloc_consistent(tp);
7425 if (err)
7426 return err;
7427
7428 if (tp->tg3_flags & TG3_FLAG_SUPPORT_MSI) {
7429 /* All MSI supporting chips should support tagged
7430 * status. Assert that this is the case.
7431 */
7432 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
7433 printk(KERN_WARNING PFX "%s: MSI without TAGGED? "
7434 "Not using MSI.\n", tp->dev->name);
7435 } else if (pci_enable_msi(tp->pdev) == 0) {
7436 u32 msi_mode;
7437
7438 msi_mode = tr32(MSGINT_MODE);
7439 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
7440 tp->tg3_flags2 |= TG3_FLG2_USING_MSI;
7441 }
7442 }
7443 err = tg3_request_irq(tp);
7444
7445 if (err) {
7446 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7447 pci_disable_msi(tp->pdev);
7448 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7449 }
7450 tg3_free_consistent(tp);
7451 return err;
7452 }
7453
7454 napi_enable(&tp->napi);
7455
7456 tg3_full_lock(tp, 0);
7457
7458 err = tg3_init_hw(tp, 1);
7459 if (err) {
7460 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7461 tg3_free_rings(tp);
7462 } else {
7463 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
7464 tp->timer_offset = HZ;
7465 else
7466 tp->timer_offset = HZ / 10;
7467
7468 BUG_ON(tp->timer_offset > HZ);
7469 tp->timer_counter = tp->timer_multiplier =
7470 (HZ / tp->timer_offset);
7471 tp->asf_counter = tp->asf_multiplier =
7472 ((HZ / tp->timer_offset) * 2);
7473
7474 init_timer(&tp->timer);
7475 tp->timer.expires = jiffies + tp->timer_offset;
7476 tp->timer.data = (unsigned long) tp;
7477 tp->timer.function = tg3_timer;
7478 }
7479
7480 tg3_full_unlock(tp);
7481
7482 if (err) {
7483 napi_disable(&tp->napi);
7484 free_irq(tp->pdev->irq, dev);
7485 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7486 pci_disable_msi(tp->pdev);
7487 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7488 }
7489 tg3_free_consistent(tp);
7490 return err;
7491 }
7492
7493 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7494 err = tg3_test_msi(tp);
7495
7496 if (err) {
7497 tg3_full_lock(tp, 0);
7498
7499 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7500 pci_disable_msi(tp->pdev);
7501 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7502 }
7503 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7504 tg3_free_rings(tp);
7505 tg3_free_consistent(tp);
7506
7507 tg3_full_unlock(tp);
7508
7509 napi_disable(&tp->napi);
7510
7511 return err;
7512 }
7513
7514 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7515 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI) {
7516 u32 val = tr32(PCIE_TRANSACTION_CFG);
7517
7518 tw32(PCIE_TRANSACTION_CFG,
7519 val | PCIE_TRANS_CFG_1SHOT_MSI);
7520 }
7521 }
7522 }
7523
7524 tg3_full_lock(tp, 0);
7525
7526 add_timer(&tp->timer);
7527 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
7528 tg3_enable_ints(tp);
7529
7530 tg3_full_unlock(tp);
7531
7532 netif_start_queue(dev);
7533
7534 return 0;
7535 }
7536
7537 #if 0
7538 /*static*/ void tg3_dump_state(struct tg3 *tp)
7539 {
7540 u32 val32, val32_2, val32_3, val32_4, val32_5;
7541 u16 val16;
7542 int i;
7543
7544 pci_read_config_word(tp->pdev, PCI_STATUS, &val16);
7545 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &val32);
7546 printk("DEBUG: PCI status [%04x] TG3PCI state[%08x]\n",
7547 val16, val32);
7548
7549 /* MAC block */
7550 printk("DEBUG: MAC_MODE[%08x] MAC_STATUS[%08x]\n",
7551 tr32(MAC_MODE), tr32(MAC_STATUS));
7552 printk(" MAC_EVENT[%08x] MAC_LED_CTRL[%08x]\n",
7553 tr32(MAC_EVENT), tr32(MAC_LED_CTRL));
7554 printk("DEBUG: MAC_TX_MODE[%08x] MAC_TX_STATUS[%08x]\n",
7555 tr32(MAC_TX_MODE), tr32(MAC_TX_STATUS));
7556 printk(" MAC_RX_MODE[%08x] MAC_RX_STATUS[%08x]\n",
7557 tr32(MAC_RX_MODE), tr32(MAC_RX_STATUS));
7558
7559 /* Send data initiator control block */
7560 printk("DEBUG: SNDDATAI_MODE[%08x] SNDDATAI_STATUS[%08x]\n",
7561 tr32(SNDDATAI_MODE), tr32(SNDDATAI_STATUS));
7562 printk(" SNDDATAI_STATSCTRL[%08x]\n",
7563 tr32(SNDDATAI_STATSCTRL));
7564
7565 /* Send data completion control block */
7566 printk("DEBUG: SNDDATAC_MODE[%08x]\n", tr32(SNDDATAC_MODE));
7567
7568 /* Send BD ring selector block */
7569 printk("DEBUG: SNDBDS_MODE[%08x] SNDBDS_STATUS[%08x]\n",
7570 tr32(SNDBDS_MODE), tr32(SNDBDS_STATUS));
7571
7572 /* Send BD initiator control block */
7573 printk("DEBUG: SNDBDI_MODE[%08x] SNDBDI_STATUS[%08x]\n",
7574 tr32(SNDBDI_MODE), tr32(SNDBDI_STATUS));
7575
7576 /* Send BD completion control block */
7577 printk("DEBUG: SNDBDC_MODE[%08x]\n", tr32(SNDBDC_MODE));
7578
7579 /* Receive list placement control block */
7580 printk("DEBUG: RCVLPC_MODE[%08x] RCVLPC_STATUS[%08x]\n",
7581 tr32(RCVLPC_MODE), tr32(RCVLPC_STATUS));
7582 printk(" RCVLPC_STATSCTRL[%08x]\n",
7583 tr32(RCVLPC_STATSCTRL));
7584
7585 /* Receive data and receive BD initiator control block */
7586 printk("DEBUG: RCVDBDI_MODE[%08x] RCVDBDI_STATUS[%08x]\n",
7587 tr32(RCVDBDI_MODE), tr32(RCVDBDI_STATUS));
7588
7589 /* Receive data completion control block */
7590 printk("DEBUG: RCVDCC_MODE[%08x]\n",
7591 tr32(RCVDCC_MODE));
7592
7593 /* Receive BD initiator control block */
7594 printk("DEBUG: RCVBDI_MODE[%08x] RCVBDI_STATUS[%08x]\n",
7595 tr32(RCVBDI_MODE), tr32(RCVBDI_STATUS));
7596
7597 /* Receive BD completion control block */
7598 printk("DEBUG: RCVCC_MODE[%08x] RCVCC_STATUS[%08x]\n",
7599 tr32(RCVCC_MODE), tr32(RCVCC_STATUS));
7600
7601 /* Receive list selector control block */
7602 printk("DEBUG: RCVLSC_MODE[%08x] RCVLSC_STATUS[%08x]\n",
7603 tr32(RCVLSC_MODE), tr32(RCVLSC_STATUS));
7604
7605 /* Mbuf cluster free block */
7606 printk("DEBUG: MBFREE_MODE[%08x] MBFREE_STATUS[%08x]\n",
7607 tr32(MBFREE_MODE), tr32(MBFREE_STATUS));
7608
7609 /* Host coalescing control block */
7610 printk("DEBUG: HOSTCC_MODE[%08x] HOSTCC_STATUS[%08x]\n",
7611 tr32(HOSTCC_MODE), tr32(HOSTCC_STATUS));
7612 printk("DEBUG: HOSTCC_STATS_BLK_HOST_ADDR[%08x%08x]\n",
7613 tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
7614 tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
7615 printk("DEBUG: HOSTCC_STATUS_BLK_HOST_ADDR[%08x%08x]\n",
7616 tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
7617 tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
7618 printk("DEBUG: HOSTCC_STATS_BLK_NIC_ADDR[%08x]\n",
7619 tr32(HOSTCC_STATS_BLK_NIC_ADDR));
7620 printk("DEBUG: HOSTCC_STATUS_BLK_NIC_ADDR[%08x]\n",
7621 tr32(HOSTCC_STATUS_BLK_NIC_ADDR));
7622
7623 /* Memory arbiter control block */
7624 printk("DEBUG: MEMARB_MODE[%08x] MEMARB_STATUS[%08x]\n",
7625 tr32(MEMARB_MODE), tr32(MEMARB_STATUS));
7626
7627 /* Buffer manager control block */
7628 printk("DEBUG: BUFMGR_MODE[%08x] BUFMGR_STATUS[%08x]\n",
7629 tr32(BUFMGR_MODE), tr32(BUFMGR_STATUS));
7630 printk("DEBUG: BUFMGR_MB_POOL_ADDR[%08x] BUFMGR_MB_POOL_SIZE[%08x]\n",
7631 tr32(BUFMGR_MB_POOL_ADDR), tr32(BUFMGR_MB_POOL_SIZE));
7632 printk("DEBUG: BUFMGR_DMA_DESC_POOL_ADDR[%08x] "
7633 "BUFMGR_DMA_DESC_POOL_SIZE[%08x]\n",
7634 tr32(BUFMGR_DMA_DESC_POOL_ADDR),
7635 tr32(BUFMGR_DMA_DESC_POOL_SIZE));
7636
7637 /* Read DMA control block */
7638 printk("DEBUG: RDMAC_MODE[%08x] RDMAC_STATUS[%08x]\n",
7639 tr32(RDMAC_MODE), tr32(RDMAC_STATUS));
7640
7641 /* Write DMA control block */
7642 printk("DEBUG: WDMAC_MODE[%08x] WDMAC_STATUS[%08x]\n",
7643 tr32(WDMAC_MODE), tr32(WDMAC_STATUS));
7644
7645 /* DMA completion block */
7646 printk("DEBUG: DMAC_MODE[%08x]\n",
7647 tr32(DMAC_MODE));
7648
7649 /* GRC block */
7650 printk("DEBUG: GRC_MODE[%08x] GRC_MISC_CFG[%08x]\n",
7651 tr32(GRC_MODE), tr32(GRC_MISC_CFG));
7652 printk("DEBUG: GRC_LOCAL_CTRL[%08x]\n",
7653 tr32(GRC_LOCAL_CTRL));
7654
7655 /* TG3_BDINFOs */
7656 printk("DEBUG: RCVDBDI_JUMBO_BD[%08x%08x:%08x:%08x]\n",
7657 tr32(RCVDBDI_JUMBO_BD + 0x0),
7658 tr32(RCVDBDI_JUMBO_BD + 0x4),
7659 tr32(RCVDBDI_JUMBO_BD + 0x8),
7660 tr32(RCVDBDI_JUMBO_BD + 0xc));
7661 printk("DEBUG: RCVDBDI_STD_BD[%08x%08x:%08x:%08x]\n",
7662 tr32(RCVDBDI_STD_BD + 0x0),
7663 tr32(RCVDBDI_STD_BD + 0x4),
7664 tr32(RCVDBDI_STD_BD + 0x8),
7665 tr32(RCVDBDI_STD_BD + 0xc));
7666 printk("DEBUG: RCVDBDI_MINI_BD[%08x%08x:%08x:%08x]\n",
7667 tr32(RCVDBDI_MINI_BD + 0x0),
7668 tr32(RCVDBDI_MINI_BD + 0x4),
7669 tr32(RCVDBDI_MINI_BD + 0x8),
7670 tr32(RCVDBDI_MINI_BD + 0xc));
7671
7672 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x0, &val32);
7673 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x4, &val32_2);
7674 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x8, &val32_3);
7675 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0xc, &val32_4);
7676 printk("DEBUG: SRAM_SEND_RCB_0[%08x%08x:%08x:%08x]\n",
7677 val32, val32_2, val32_3, val32_4);
7678
7679 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x0, &val32);
7680 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x4, &val32_2);
7681 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x8, &val32_3);
7682 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0xc, &val32_4);
7683 printk("DEBUG: SRAM_RCV_RET_RCB_0[%08x%08x:%08x:%08x]\n",
7684 val32, val32_2, val32_3, val32_4);
7685
7686 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x0, &val32);
7687 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x4, &val32_2);
7688 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x8, &val32_3);
7689 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0xc, &val32_4);
7690 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x10, &val32_5);
7691 printk("DEBUG: SRAM_STATUS_BLK[%08x:%08x:%08x:%08x:%08x]\n",
7692 val32, val32_2, val32_3, val32_4, val32_5);
7693
7694 /* SW status block */
7695 printk("DEBUG: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
7696 tp->hw_status->status,
7697 tp->hw_status->status_tag,
7698 tp->hw_status->rx_jumbo_consumer,
7699 tp->hw_status->rx_consumer,
7700 tp->hw_status->rx_mini_consumer,
7701 tp->hw_status->idx[0].rx_producer,
7702 tp->hw_status->idx[0].tx_consumer);
7703
7704 /* SW statistics block */
7705 printk("DEBUG: Host statistics block [%08x:%08x:%08x:%08x]\n",
7706 ((u32 *)tp->hw_stats)[0],
7707 ((u32 *)tp->hw_stats)[1],
7708 ((u32 *)tp->hw_stats)[2],
7709 ((u32 *)tp->hw_stats)[3]);
7710
7711 /* Mailboxes */
7712 printk("DEBUG: SNDHOST_PROD[%08x%08x] SNDNIC_PROD[%08x%08x]\n",
7713 tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x0),
7714 tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x4),
7715 tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x0),
7716 tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x4));
7717
7718 /* NIC side send descriptors. */
7719 for (i = 0; i < 6; i++) {
7720 unsigned long txd;
7721
7722 txd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_TX_BUFFER_DESC
7723 + (i * sizeof(struct tg3_tx_buffer_desc));
7724 printk("DEBUG: NIC TXD(%d)[%08x:%08x:%08x:%08x]\n",
7725 i,
7726 readl(txd + 0x0), readl(txd + 0x4),
7727 readl(txd + 0x8), readl(txd + 0xc));
7728 }
7729
7730 /* NIC side RX descriptors. */
7731 for (i = 0; i < 6; i++) {
7732 unsigned long rxd;
7733
7734 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_BUFFER_DESC
7735 + (i * sizeof(struct tg3_rx_buffer_desc));
7736 printk("DEBUG: NIC RXD_STD(%d)[0][%08x:%08x:%08x:%08x]\n",
7737 i,
7738 readl(rxd + 0x0), readl(rxd + 0x4),
7739 readl(rxd + 0x8), readl(rxd + 0xc));
7740 rxd += (4 * sizeof(u32));
7741 printk("DEBUG: NIC RXD_STD(%d)[1][%08x:%08x:%08x:%08x]\n",
7742 i,
7743 readl(rxd + 0x0), readl(rxd + 0x4),
7744 readl(rxd + 0x8), readl(rxd + 0xc));
7745 }
7746
7747 for (i = 0; i < 6; i++) {
7748 unsigned long rxd;
7749
7750 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_JUMBO_BUFFER_DESC
7751 + (i * sizeof(struct tg3_rx_buffer_desc));
7752 printk("DEBUG: NIC RXD_JUMBO(%d)[0][%08x:%08x:%08x:%08x]\n",
7753 i,
7754 readl(rxd + 0x0), readl(rxd + 0x4),
7755 readl(rxd + 0x8), readl(rxd + 0xc));
7756 rxd += (4 * sizeof(u32));
7757 printk("DEBUG: NIC RXD_JUMBO(%d)[1][%08x:%08x:%08x:%08x]\n",
7758 i,
7759 readl(rxd + 0x0), readl(rxd + 0x4),
7760 readl(rxd + 0x8), readl(rxd + 0xc));
7761 }
7762 }
7763 #endif
7764
7765 static struct net_device_stats *tg3_get_stats(struct net_device *);
7766 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
7767
7768 static int tg3_close(struct net_device *dev)
7769 {
7770 struct tg3 *tp = netdev_priv(dev);
7771
7772 napi_disable(&tp->napi);
7773 cancel_work_sync(&tp->reset_task);
7774
7775 netif_stop_queue(dev);
7776
7777 del_timer_sync(&tp->timer);
7778
7779 tg3_full_lock(tp, 1);
7780 #if 0
7781 tg3_dump_state(tp);
7782 #endif
7783
7784 tg3_disable_ints(tp);
7785
7786 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7787 tg3_free_rings(tp);
7788 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
7789
7790 tg3_full_unlock(tp);
7791
7792 free_irq(tp->pdev->irq, dev);
7793 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7794 pci_disable_msi(tp->pdev);
7795 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7796 }
7797
7798 memcpy(&tp->net_stats_prev, tg3_get_stats(tp->dev),
7799 sizeof(tp->net_stats_prev));
7800 memcpy(&tp->estats_prev, tg3_get_estats(tp),
7801 sizeof(tp->estats_prev));
7802
7803 tg3_free_consistent(tp);
7804
7805 tg3_set_power_state(tp, PCI_D3hot);
7806
7807 netif_carrier_off(tp->dev);
7808
7809 return 0;
7810 }
7811
7812 static inline unsigned long get_stat64(tg3_stat64_t *val)
7813 {
7814 unsigned long ret;
7815
7816 #if (BITS_PER_LONG == 32)
7817 ret = val->low;
7818 #else
7819 ret = ((u64)val->high << 32) | ((u64)val->low);
7820 #endif
7821 return ret;
7822 }
7823
7824 static unsigned long calc_crc_errors(struct tg3 *tp)
7825 {
7826 struct tg3_hw_stats *hw_stats = tp->hw_stats;
7827
7828 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
7829 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
7830 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
7831 u32 val;
7832
7833 spin_lock_bh(&tp->lock);
7834 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
7835 tg3_writephy(tp, MII_TG3_TEST1,
7836 val | MII_TG3_TEST1_CRC_EN);
7837 tg3_readphy(tp, 0x14, &val);
7838 } else
7839 val = 0;
7840 spin_unlock_bh(&tp->lock);
7841
7842 tp->phy_crc_errors += val;
7843
7844 return tp->phy_crc_errors;
7845 }
7846
7847 return get_stat64(&hw_stats->rx_fcs_errors);
7848 }
7849
7850 #define ESTAT_ADD(member) \
7851 estats->member = old_estats->member + \
7852 get_stat64(&hw_stats->member)
7853
7854 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
7855 {
7856 struct tg3_ethtool_stats *estats = &tp->estats;
7857 struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
7858 struct tg3_hw_stats *hw_stats = tp->hw_stats;
7859
7860 if (!hw_stats)
7861 return old_estats;
7862
7863 ESTAT_ADD(rx_octets);
7864 ESTAT_ADD(rx_fragments);
7865 ESTAT_ADD(rx_ucast_packets);
7866 ESTAT_ADD(rx_mcast_packets);
7867 ESTAT_ADD(rx_bcast_packets);
7868 ESTAT_ADD(rx_fcs_errors);
7869 ESTAT_ADD(rx_align_errors);
7870 ESTAT_ADD(rx_xon_pause_rcvd);
7871 ESTAT_ADD(rx_xoff_pause_rcvd);
7872 ESTAT_ADD(rx_mac_ctrl_rcvd);
7873 ESTAT_ADD(rx_xoff_entered);
7874 ESTAT_ADD(rx_frame_too_long_errors);
7875 ESTAT_ADD(rx_jabbers);
7876 ESTAT_ADD(rx_undersize_packets);
7877 ESTAT_ADD(rx_in_length_errors);
7878 ESTAT_ADD(rx_out_length_errors);
7879 ESTAT_ADD(rx_64_or_less_octet_packets);
7880 ESTAT_ADD(rx_65_to_127_octet_packets);
7881 ESTAT_ADD(rx_128_to_255_octet_packets);
7882 ESTAT_ADD(rx_256_to_511_octet_packets);
7883 ESTAT_ADD(rx_512_to_1023_octet_packets);
7884 ESTAT_ADD(rx_1024_to_1522_octet_packets);
7885 ESTAT_ADD(rx_1523_to_2047_octet_packets);
7886 ESTAT_ADD(rx_2048_to_4095_octet_packets);
7887 ESTAT_ADD(rx_4096_to_8191_octet_packets);
7888 ESTAT_ADD(rx_8192_to_9022_octet_packets);
7889
7890 ESTAT_ADD(tx_octets);
7891 ESTAT_ADD(tx_collisions);
7892 ESTAT_ADD(tx_xon_sent);
7893 ESTAT_ADD(tx_xoff_sent);
7894 ESTAT_ADD(tx_flow_control);
7895 ESTAT_ADD(tx_mac_errors);
7896 ESTAT_ADD(tx_single_collisions);
7897 ESTAT_ADD(tx_mult_collisions);
7898 ESTAT_ADD(tx_deferred);
7899 ESTAT_ADD(tx_excessive_collisions);
7900 ESTAT_ADD(tx_late_collisions);
7901 ESTAT_ADD(tx_collide_2times);
7902 ESTAT_ADD(tx_collide_3times);
7903 ESTAT_ADD(tx_collide_4times);
7904 ESTAT_ADD(tx_collide_5times);
7905 ESTAT_ADD(tx_collide_6times);
7906 ESTAT_ADD(tx_collide_7times);
7907 ESTAT_ADD(tx_collide_8times);
7908 ESTAT_ADD(tx_collide_9times);
7909 ESTAT_ADD(tx_collide_10times);
7910 ESTAT_ADD(tx_collide_11times);
7911 ESTAT_ADD(tx_collide_12times);
7912 ESTAT_ADD(tx_collide_13times);
7913 ESTAT_ADD(tx_collide_14times);
7914 ESTAT_ADD(tx_collide_15times);
7915 ESTAT_ADD(tx_ucast_packets);
7916 ESTAT_ADD(tx_mcast_packets);
7917 ESTAT_ADD(tx_bcast_packets);
7918 ESTAT_ADD(tx_carrier_sense_errors);
7919 ESTAT_ADD(tx_discards);
7920 ESTAT_ADD(tx_errors);
7921
7922 ESTAT_ADD(dma_writeq_full);
7923 ESTAT_ADD(dma_write_prioq_full);
7924 ESTAT_ADD(rxbds_empty);
7925 ESTAT_ADD(rx_discards);
7926 ESTAT_ADD(rx_errors);
7927 ESTAT_ADD(rx_threshold_hit);
7928
7929 ESTAT_ADD(dma_readq_full);
7930 ESTAT_ADD(dma_read_prioq_full);
7931 ESTAT_ADD(tx_comp_queue_full);
7932
7933 ESTAT_ADD(ring_set_send_prod_index);
7934 ESTAT_ADD(ring_status_update);
7935 ESTAT_ADD(nic_irqs);
7936 ESTAT_ADD(nic_avoided_irqs);
7937 ESTAT_ADD(nic_tx_threshold_hit);
7938
7939 return estats;
7940 }
7941
7942 static struct net_device_stats *tg3_get_stats(struct net_device *dev)
7943 {
7944 struct tg3 *tp = netdev_priv(dev);
7945 struct net_device_stats *stats = &tp->net_stats;
7946 struct net_device_stats *old_stats = &tp->net_stats_prev;
7947 struct tg3_hw_stats *hw_stats = tp->hw_stats;
7948
7949 if (!hw_stats)
7950 return old_stats;
7951
7952 stats->rx_packets = old_stats->rx_packets +
7953 get_stat64(&hw_stats->rx_ucast_packets) +
7954 get_stat64(&hw_stats->rx_mcast_packets) +
7955 get_stat64(&hw_stats->rx_bcast_packets);
7956
7957 stats->tx_packets = old_stats->tx_packets +
7958 get_stat64(&hw_stats->tx_ucast_packets) +
7959 get_stat64(&hw_stats->tx_mcast_packets) +
7960 get_stat64(&hw_stats->tx_bcast_packets);
7961
7962 stats->rx_bytes = old_stats->rx_bytes +
7963 get_stat64(&hw_stats->rx_octets);
7964 stats->tx_bytes = old_stats->tx_bytes +
7965 get_stat64(&hw_stats->tx_octets);
7966
7967 stats->rx_errors = old_stats->rx_errors +
7968 get_stat64(&hw_stats->rx_errors);
7969 stats->tx_errors = old_stats->tx_errors +
7970 get_stat64(&hw_stats->tx_errors) +
7971 get_stat64(&hw_stats->tx_mac_errors) +
7972 get_stat64(&hw_stats->tx_carrier_sense_errors) +
7973 get_stat64(&hw_stats->tx_discards);
7974
7975 stats->multicast = old_stats->multicast +
7976 get_stat64(&hw_stats->rx_mcast_packets);
7977 stats->collisions = old_stats->collisions +
7978 get_stat64(&hw_stats->tx_collisions);
7979
7980 stats->rx_length_errors = old_stats->rx_length_errors +
7981 get_stat64(&hw_stats->rx_frame_too_long_errors) +
7982 get_stat64(&hw_stats->rx_undersize_packets);
7983
7984 stats->rx_over_errors = old_stats->rx_over_errors +
7985 get_stat64(&hw_stats->rxbds_empty);
7986 stats->rx_frame_errors = old_stats->rx_frame_errors +
7987 get_stat64(&hw_stats->rx_align_errors);
7988 stats->tx_aborted_errors = old_stats->tx_aborted_errors +
7989 get_stat64(&hw_stats->tx_discards);
7990 stats->tx_carrier_errors = old_stats->tx_carrier_errors +
7991 get_stat64(&hw_stats->tx_carrier_sense_errors);
7992
7993 stats->rx_crc_errors = old_stats->rx_crc_errors +
7994 calc_crc_errors(tp);
7995
7996 stats->rx_missed_errors = old_stats->rx_missed_errors +
7997 get_stat64(&hw_stats->rx_discards);
7998
7999 return stats;
8000 }
8001
8002 static inline u32 calc_crc(unsigned char *buf, int len)
8003 {
8004 u32 reg;
8005 u32 tmp;
8006 int j, k;
8007
8008 reg = 0xffffffff;
8009
8010 for (j = 0; j < len; j++) {
8011 reg ^= buf[j];
8012
8013 for (k = 0; k < 8; k++) {
8014 tmp = reg & 0x01;
8015
8016 reg >>= 1;
8017
8018 if (tmp) {
8019 reg ^= 0xedb88320;
8020 }
8021 }
8022 }
8023
8024 return ~reg;
8025 }
8026
8027 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
8028 {
8029 /* accept or reject all multicast frames */
8030 tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
8031 tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
8032 tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
8033 tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
8034 }
8035
8036 static void __tg3_set_rx_mode(struct net_device *dev)
8037 {
8038 struct tg3 *tp = netdev_priv(dev);
8039 u32 rx_mode;
8040
8041 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
8042 RX_MODE_KEEP_VLAN_TAG);
8043
8044 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
8045 * flag clear.
8046 */
8047 #if TG3_VLAN_TAG_USED
8048 if (!tp->vlgrp &&
8049 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
8050 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
8051 #else
8052 /* By definition, VLAN is disabled always in this
8053 * case.
8054 */
8055 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
8056 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
8057 #endif
8058
8059 if (dev->flags & IFF_PROMISC) {
8060 /* Promiscuous mode. */
8061 rx_mode |= RX_MODE_PROMISC;
8062 } else if (dev->flags & IFF_ALLMULTI) {
8063 /* Accept all multicast. */
8064 tg3_set_multi (tp, 1);
8065 } else if (dev->mc_count < 1) {
8066 /* Reject all multicast. */
8067 tg3_set_multi (tp, 0);
8068 } else {
8069 /* Accept one or more multicast(s). */
8070 struct dev_mc_list *mclist;
8071 unsigned int i;
8072 u32 mc_filter[4] = { 0, };
8073 u32 regidx;
8074 u32 bit;
8075 u32 crc;
8076
8077 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
8078 i++, mclist = mclist->next) {
8079
8080 crc = calc_crc (mclist->dmi_addr, ETH_ALEN);
8081 bit = ~crc & 0x7f;
8082 regidx = (bit & 0x60) >> 5;
8083 bit &= 0x1f;
8084 mc_filter[regidx] |= (1 << bit);
8085 }
8086
8087 tw32(MAC_HASH_REG_0, mc_filter[0]);
8088 tw32(MAC_HASH_REG_1, mc_filter[1]);
8089 tw32(MAC_HASH_REG_2, mc_filter[2]);
8090 tw32(MAC_HASH_REG_3, mc_filter[3]);
8091 }
8092
8093 if (rx_mode != tp->rx_mode) {
8094 tp->rx_mode = rx_mode;
8095 tw32_f(MAC_RX_MODE, rx_mode);
8096 udelay(10);
8097 }
8098 }
8099
8100 static void tg3_set_rx_mode(struct net_device *dev)
8101 {
8102 struct tg3 *tp = netdev_priv(dev);
8103
8104 if (!netif_running(dev))
8105 return;
8106
8107 tg3_full_lock(tp, 0);
8108 __tg3_set_rx_mode(dev);
8109 tg3_full_unlock(tp);
8110 }
8111
8112 #define TG3_REGDUMP_LEN (32 * 1024)
8113
8114 static int tg3_get_regs_len(struct net_device *dev)
8115 {
8116 return TG3_REGDUMP_LEN;
8117 }
8118
8119 static void tg3_get_regs(struct net_device *dev,
8120 struct ethtool_regs *regs, void *_p)
8121 {
8122 u32 *p = _p;
8123 struct tg3 *tp = netdev_priv(dev);
8124 u8 *orig_p = _p;
8125 int i;
8126
8127 regs->version = 0;
8128
8129 memset(p, 0, TG3_REGDUMP_LEN);
8130
8131 if (tp->link_config.phy_is_low_power)
8132 return;
8133
8134 tg3_full_lock(tp, 0);
8135
8136 #define __GET_REG32(reg) (*(p)++ = tr32(reg))
8137 #define GET_REG32_LOOP(base,len) \
8138 do { p = (u32 *)(orig_p + (base)); \
8139 for (i = 0; i < len; i += 4) \
8140 __GET_REG32((base) + i); \
8141 } while (0)
8142 #define GET_REG32_1(reg) \
8143 do { p = (u32 *)(orig_p + (reg)); \
8144 __GET_REG32((reg)); \
8145 } while (0)
8146
8147 GET_REG32_LOOP(TG3PCI_VENDOR, 0xb0);
8148 GET_REG32_LOOP(MAILBOX_INTERRUPT_0, 0x200);
8149 GET_REG32_LOOP(MAC_MODE, 0x4f0);
8150 GET_REG32_LOOP(SNDDATAI_MODE, 0xe0);
8151 GET_REG32_1(SNDDATAC_MODE);
8152 GET_REG32_LOOP(SNDBDS_MODE, 0x80);
8153 GET_REG32_LOOP(SNDBDI_MODE, 0x48);
8154 GET_REG32_1(SNDBDC_MODE);
8155 GET_REG32_LOOP(RCVLPC_MODE, 0x20);
8156 GET_REG32_LOOP(RCVLPC_SELLST_BASE, 0x15c);
8157 GET_REG32_LOOP(RCVDBDI_MODE, 0x0c);
8158 GET_REG32_LOOP(RCVDBDI_JUMBO_BD, 0x3c);
8159 GET_REG32_LOOP(RCVDBDI_BD_PROD_IDX_0, 0x44);
8160 GET_REG32_1(RCVDCC_MODE);
8161 GET_REG32_LOOP(RCVBDI_MODE, 0x20);
8162 GET_REG32_LOOP(RCVCC_MODE, 0x14);
8163 GET_REG32_LOOP(RCVLSC_MODE, 0x08);
8164 GET_REG32_1(MBFREE_MODE);
8165 GET_REG32_LOOP(HOSTCC_MODE, 0x100);
8166 GET_REG32_LOOP(MEMARB_MODE, 0x10);
8167 GET_REG32_LOOP(BUFMGR_MODE, 0x58);
8168 GET_REG32_LOOP(RDMAC_MODE, 0x08);
8169 GET_REG32_LOOP(WDMAC_MODE, 0x08);
8170 GET_REG32_1(RX_CPU_MODE);
8171 GET_REG32_1(RX_CPU_STATE);
8172 GET_REG32_1(RX_CPU_PGMCTR);
8173 GET_REG32_1(RX_CPU_HWBKPT);
8174 GET_REG32_1(TX_CPU_MODE);
8175 GET_REG32_1(TX_CPU_STATE);
8176 GET_REG32_1(TX_CPU_PGMCTR);
8177 GET_REG32_LOOP(GRCMBOX_INTERRUPT_0, 0x110);
8178 GET_REG32_LOOP(FTQ_RESET, 0x120);
8179 GET_REG32_LOOP(MSGINT_MODE, 0x0c);
8180 GET_REG32_1(DMAC_MODE);
8181 GET_REG32_LOOP(GRC_MODE, 0x4c);
8182 if (tp->tg3_flags & TG3_FLAG_NVRAM)
8183 GET_REG32_LOOP(NVRAM_CMD, 0x24);
8184
8185 #undef __GET_REG32
8186 #undef GET_REG32_LOOP
8187 #undef GET_REG32_1
8188
8189 tg3_full_unlock(tp);
8190 }
8191
8192 static int tg3_get_eeprom_len(struct net_device *dev)
8193 {
8194 struct tg3 *tp = netdev_priv(dev);
8195
8196 return tp->nvram_size;
8197 }
8198
8199 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val);
8200 static int tg3_nvram_read_le(struct tg3 *tp, u32 offset, __le32 *val);
8201 static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val);
8202
8203 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
8204 {
8205 struct tg3 *tp = netdev_priv(dev);
8206 int ret;
8207 u8 *pd;
8208 u32 i, offset, len, b_offset, b_count;
8209 __le32 val;
8210
8211 if (tp->link_config.phy_is_low_power)
8212 return -EAGAIN;
8213
8214 offset = eeprom->offset;
8215 len = eeprom->len;
8216 eeprom->len = 0;
8217
8218 eeprom->magic = TG3_EEPROM_MAGIC;
8219
8220 if (offset & 3) {
8221 /* adjustments to start on required 4 byte boundary */
8222 b_offset = offset & 3;
8223 b_count = 4 - b_offset;
8224 if (b_count > len) {
8225 /* i.e. offset=1 len=2 */
8226 b_count = len;
8227 }
8228 ret = tg3_nvram_read_le(tp, offset-b_offset, &val);
8229 if (ret)
8230 return ret;
8231 memcpy(data, ((char*)&val) + b_offset, b_count);
8232 len -= b_count;
8233 offset += b_count;
8234 eeprom->len += b_count;
8235 }
8236
8237 /* read bytes upto the last 4 byte boundary */
8238 pd = &data[eeprom->len];
8239 for (i = 0; i < (len - (len & 3)); i += 4) {
8240 ret = tg3_nvram_read_le(tp, offset + i, &val);
8241 if (ret) {
8242 eeprom->len += i;
8243 return ret;
8244 }
8245 memcpy(pd + i, &val, 4);
8246 }
8247 eeprom->len += i;
8248
8249 if (len & 3) {
8250 /* read last bytes not ending on 4 byte boundary */
8251 pd = &data[eeprom->len];
8252 b_count = len & 3;
8253 b_offset = offset + len - b_count;
8254 ret = tg3_nvram_read_le(tp, b_offset, &val);
8255 if (ret)
8256 return ret;
8257 memcpy(pd, &val, b_count);
8258 eeprom->len += b_count;
8259 }
8260 return 0;
8261 }
8262
8263 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf);
8264
8265 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
8266 {
8267 struct tg3 *tp = netdev_priv(dev);
8268 int ret;
8269 u32 offset, len, b_offset, odd_len;
8270 u8 *buf;
8271 __le32 start, end;
8272
8273 if (tp->link_config.phy_is_low_power)
8274 return -EAGAIN;
8275
8276 if (eeprom->magic != TG3_EEPROM_MAGIC)
8277 return -EINVAL;
8278
8279 offset = eeprom->offset;
8280 len = eeprom->len;
8281
8282 if ((b_offset = (offset & 3))) {
8283 /* adjustments to start on required 4 byte boundary */
8284 ret = tg3_nvram_read_le(tp, offset-b_offset, &start);
8285 if (ret)
8286 return ret;
8287 len += b_offset;
8288 offset &= ~3;
8289 if (len < 4)
8290 len = 4;
8291 }
8292
8293 odd_len = 0;
8294 if (len & 3) {
8295 /* adjustments to end on required 4 byte boundary */
8296 odd_len = 1;
8297 len = (len + 3) & ~3;
8298 ret = tg3_nvram_read_le(tp, offset+len-4, &end);
8299 if (ret)
8300 return ret;
8301 }
8302
8303 buf = data;
8304 if (b_offset || odd_len) {
8305 buf = kmalloc(len, GFP_KERNEL);
8306 if (!buf)
8307 return -ENOMEM;
8308 if (b_offset)
8309 memcpy(buf, &start, 4);
8310 if (odd_len)
8311 memcpy(buf+len-4, &end, 4);
8312 memcpy(buf + b_offset, data, eeprom->len);
8313 }
8314
8315 ret = tg3_nvram_write_block(tp, offset, len, buf);
8316
8317 if (buf != data)
8318 kfree(buf);
8319
8320 return ret;
8321 }
8322
8323 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8324 {
8325 struct tg3 *tp = netdev_priv(dev);
8326
8327 cmd->supported = (SUPPORTED_Autoneg);
8328
8329 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
8330 cmd->supported |= (SUPPORTED_1000baseT_Half |
8331 SUPPORTED_1000baseT_Full);
8332
8333 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
8334 cmd->supported |= (SUPPORTED_100baseT_Half |
8335 SUPPORTED_100baseT_Full |
8336 SUPPORTED_10baseT_Half |
8337 SUPPORTED_10baseT_Full |
8338 SUPPORTED_TP);
8339 cmd->port = PORT_TP;
8340 } else {
8341 cmd->supported |= SUPPORTED_FIBRE;
8342 cmd->port = PORT_FIBRE;
8343 }
8344
8345 cmd->advertising = tp->link_config.advertising;
8346 if (netif_running(dev)) {
8347 cmd->speed = tp->link_config.active_speed;
8348 cmd->duplex = tp->link_config.active_duplex;
8349 }
8350 cmd->phy_address = PHY_ADDR;
8351 cmd->transceiver = 0;
8352 cmd->autoneg = tp->link_config.autoneg;
8353 cmd->maxtxpkt = 0;
8354 cmd->maxrxpkt = 0;
8355 return 0;
8356 }
8357
8358 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8359 {
8360 struct tg3 *tp = netdev_priv(dev);
8361
8362 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) {
8363 /* These are the only valid advertisement bits allowed. */
8364 if (cmd->autoneg == AUTONEG_ENABLE &&
8365 (cmd->advertising & ~(ADVERTISED_1000baseT_Half |
8366 ADVERTISED_1000baseT_Full |
8367 ADVERTISED_Autoneg |
8368 ADVERTISED_FIBRE)))
8369 return -EINVAL;
8370 /* Fiber can only do SPEED_1000. */
8371 else if ((cmd->autoneg != AUTONEG_ENABLE) &&
8372 (cmd->speed != SPEED_1000))
8373 return -EINVAL;
8374 /* Copper cannot force SPEED_1000. */
8375 } else if ((cmd->autoneg != AUTONEG_ENABLE) &&
8376 (cmd->speed == SPEED_1000))
8377 return -EINVAL;
8378 else if ((cmd->speed == SPEED_1000) &&
8379 (tp->tg3_flags2 & TG3_FLAG_10_100_ONLY))
8380 return -EINVAL;
8381
8382 tg3_full_lock(tp, 0);
8383
8384 tp->link_config.autoneg = cmd->autoneg;
8385 if (cmd->autoneg == AUTONEG_ENABLE) {
8386 tp->link_config.advertising = (cmd->advertising |
8387 ADVERTISED_Autoneg);
8388 tp->link_config.speed = SPEED_INVALID;
8389 tp->link_config.duplex = DUPLEX_INVALID;
8390 } else {
8391 tp->link_config.advertising = 0;
8392 tp->link_config.speed = cmd->speed;
8393 tp->link_config.duplex = cmd->duplex;
8394 }
8395
8396 tp->link_config.orig_speed = tp->link_config.speed;
8397 tp->link_config.orig_duplex = tp->link_config.duplex;
8398 tp->link_config.orig_autoneg = tp->link_config.autoneg;
8399
8400 if (netif_running(dev))
8401 tg3_setup_phy(tp, 1);
8402
8403 tg3_full_unlock(tp);
8404
8405 return 0;
8406 }
8407
8408 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
8409 {
8410 struct tg3 *tp = netdev_priv(dev);
8411
8412 strcpy(info->driver, DRV_MODULE_NAME);
8413 strcpy(info->version, DRV_MODULE_VERSION);
8414 strcpy(info->fw_version, tp->fw_ver);
8415 strcpy(info->bus_info, pci_name(tp->pdev));
8416 }
8417
8418 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8419 {
8420 struct tg3 *tp = netdev_priv(dev);
8421
8422 if (tp->tg3_flags & TG3_FLAG_WOL_CAP)
8423 wol->supported = WAKE_MAGIC;
8424 else
8425 wol->supported = 0;
8426 wol->wolopts = 0;
8427 if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)
8428 wol->wolopts = WAKE_MAGIC;
8429 memset(&wol->sopass, 0, sizeof(wol->sopass));
8430 }
8431
8432 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8433 {
8434 struct tg3 *tp = netdev_priv(dev);
8435
8436 if (wol->wolopts & ~WAKE_MAGIC)
8437 return -EINVAL;
8438 if ((wol->wolopts & WAKE_MAGIC) &&
8439 !(tp->tg3_flags & TG3_FLAG_WOL_CAP))
8440 return -EINVAL;
8441
8442 spin_lock_bh(&tp->lock);
8443 if (wol->wolopts & WAKE_MAGIC)
8444 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
8445 else
8446 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
8447 spin_unlock_bh(&tp->lock);
8448
8449 return 0;
8450 }
8451
8452 static u32 tg3_get_msglevel(struct net_device *dev)
8453 {
8454 struct tg3 *tp = netdev_priv(dev);
8455 return tp->msg_enable;
8456 }
8457
8458 static void tg3_set_msglevel(struct net_device *dev, u32 value)
8459 {
8460 struct tg3 *tp = netdev_priv(dev);
8461 tp->msg_enable = value;
8462 }
8463
8464 static int tg3_set_tso(struct net_device *dev, u32 value)
8465 {
8466 struct tg3 *tp = netdev_priv(dev);
8467
8468 if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
8469 if (value)
8470 return -EINVAL;
8471 return 0;
8472 }
8473 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) &&
8474 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)) {
8475 if (value) {
8476 dev->features |= NETIF_F_TSO6;
8477 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
8478 dev->features |= NETIF_F_TSO_ECN;
8479 } else
8480 dev->features &= ~(NETIF_F_TSO6 | NETIF_F_TSO_ECN);
8481 }
8482 return ethtool_op_set_tso(dev, value);
8483 }
8484
8485 static int tg3_nway_reset(struct net_device *dev)
8486 {
8487 struct tg3 *tp = netdev_priv(dev);
8488 u32 bmcr;
8489 int r;
8490
8491 if (!netif_running(dev))
8492 return -EAGAIN;
8493
8494 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
8495 return -EINVAL;
8496
8497 spin_lock_bh(&tp->lock);
8498 r = -EINVAL;
8499 tg3_readphy(tp, MII_BMCR, &bmcr);
8500 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
8501 ((bmcr & BMCR_ANENABLE) ||
8502 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT))) {
8503 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
8504 BMCR_ANENABLE);
8505 r = 0;
8506 }
8507 spin_unlock_bh(&tp->lock);
8508
8509 return r;
8510 }
8511
8512 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
8513 {
8514 struct tg3 *tp = netdev_priv(dev);
8515
8516 ering->rx_max_pending = TG3_RX_RING_SIZE - 1;
8517 ering->rx_mini_max_pending = 0;
8518 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
8519 ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1;
8520 else
8521 ering->rx_jumbo_max_pending = 0;
8522
8523 ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
8524
8525 ering->rx_pending = tp->rx_pending;
8526 ering->rx_mini_pending = 0;
8527 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
8528 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
8529 else
8530 ering->rx_jumbo_pending = 0;
8531
8532 ering->tx_pending = tp->tx_pending;
8533 }
8534
8535 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
8536 {
8537 struct tg3 *tp = netdev_priv(dev);
8538 int irq_sync = 0, err = 0;
8539
8540 if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) ||
8541 (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) ||
8542 (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
8543 (ering->tx_pending <= MAX_SKB_FRAGS) ||
8544 ((tp->tg3_flags2 & TG3_FLG2_TSO_BUG) &&
8545 (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
8546 return -EINVAL;
8547
8548 if (netif_running(dev)) {
8549 tg3_netif_stop(tp);
8550 irq_sync = 1;
8551 }
8552
8553 tg3_full_lock(tp, irq_sync);
8554
8555 tp->rx_pending = ering->rx_pending;
8556
8557 if ((tp->tg3_flags2 & TG3_FLG2_MAX_RXPEND_64) &&
8558 tp->rx_pending > 63)
8559 tp->rx_pending = 63;
8560 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
8561 tp->tx_pending = ering->tx_pending;
8562
8563 if (netif_running(dev)) {
8564 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8565 err = tg3_restart_hw(tp, 1);
8566 if (!err)
8567 tg3_netif_start(tp);
8568 }
8569
8570 tg3_full_unlock(tp);
8571
8572 return err;
8573 }
8574
8575 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
8576 {
8577 struct tg3 *tp = netdev_priv(dev);
8578
8579 epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0;
8580
8581 if (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_RX)
8582 epause->rx_pause = 1;
8583 else
8584 epause->rx_pause = 0;
8585
8586 if (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_TX)
8587 epause->tx_pause = 1;
8588 else
8589 epause->tx_pause = 0;
8590 }
8591
8592 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
8593 {
8594 struct tg3 *tp = netdev_priv(dev);
8595 int irq_sync = 0, err = 0;
8596
8597 if (netif_running(dev)) {
8598 tg3_netif_stop(tp);
8599 irq_sync = 1;
8600 }
8601
8602 tg3_full_lock(tp, irq_sync);
8603
8604 if (epause->autoneg)
8605 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
8606 else
8607 tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
8608 if (epause->rx_pause)
8609 tp->link_config.flowctrl |= TG3_FLOW_CTRL_RX;
8610 else
8611 tp->link_config.flowctrl &= ~TG3_FLOW_CTRL_RX;
8612 if (epause->tx_pause)
8613 tp->link_config.flowctrl |= TG3_FLOW_CTRL_TX;
8614 else
8615 tp->link_config.flowctrl &= ~TG3_FLOW_CTRL_TX;
8616
8617 if (netif_running(dev)) {
8618 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8619 err = tg3_restart_hw(tp, 1);
8620 if (!err)
8621 tg3_netif_start(tp);
8622 }
8623
8624 tg3_full_unlock(tp);
8625
8626 return err;
8627 }
8628
8629 static u32 tg3_get_rx_csum(struct net_device *dev)
8630 {
8631 struct tg3 *tp = netdev_priv(dev);
8632 return (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0;
8633 }
8634
8635 static int tg3_set_rx_csum(struct net_device *dev, u32 data)
8636 {
8637 struct tg3 *tp = netdev_priv(dev);
8638
8639 if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
8640 if (data != 0)
8641 return -EINVAL;
8642 return 0;
8643 }
8644
8645 spin_lock_bh(&tp->lock);
8646 if (data)
8647 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
8648 else
8649 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
8650 spin_unlock_bh(&tp->lock);
8651
8652 return 0;
8653 }
8654
8655 static int tg3_set_tx_csum(struct net_device *dev, u32 data)
8656 {
8657 struct tg3 *tp = netdev_priv(dev);
8658
8659 if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
8660 if (data != 0)
8661 return -EINVAL;
8662 return 0;
8663 }
8664
8665 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8666 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
8667 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8668 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
8669 ethtool_op_set_tx_ipv6_csum(dev, data);
8670 else
8671 ethtool_op_set_tx_csum(dev, data);
8672
8673 return 0;
8674 }
8675
8676 static int tg3_get_sset_count (struct net_device *dev, int sset)
8677 {
8678 switch (sset) {
8679 case ETH_SS_TEST:
8680 return TG3_NUM_TEST;
8681 case ETH_SS_STATS:
8682 return TG3_NUM_STATS;
8683 default:
8684 return -EOPNOTSUPP;
8685 }
8686 }
8687
8688 static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
8689 {
8690 switch (stringset) {
8691 case ETH_SS_STATS:
8692 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
8693 break;
8694 case ETH_SS_TEST:
8695 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
8696 break;
8697 default:
8698 WARN_ON(1); /* we need a WARN() */
8699 break;
8700 }
8701 }
8702
8703 static int tg3_phys_id(struct net_device *dev, u32 data)
8704 {
8705 struct tg3 *tp = netdev_priv(dev);
8706 int i;
8707
8708 if (!netif_running(tp->dev))
8709 return -EAGAIN;
8710
8711 if (data == 0)
8712 data = 2;
8713
8714 for (i = 0; i < (data * 2); i++) {
8715 if ((i % 2) == 0)
8716 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
8717 LED_CTRL_1000MBPS_ON |
8718 LED_CTRL_100MBPS_ON |
8719 LED_CTRL_10MBPS_ON |
8720 LED_CTRL_TRAFFIC_OVERRIDE |
8721 LED_CTRL_TRAFFIC_BLINK |
8722 LED_CTRL_TRAFFIC_LED);
8723
8724 else
8725 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
8726 LED_CTRL_TRAFFIC_OVERRIDE);
8727
8728 if (msleep_interruptible(500))
8729 break;
8730 }
8731 tw32(MAC_LED_CTRL, tp->led_ctrl);
8732 return 0;
8733 }
8734
8735 static void tg3_get_ethtool_stats (struct net_device *dev,
8736 struct ethtool_stats *estats, u64 *tmp_stats)
8737 {
8738 struct tg3 *tp = netdev_priv(dev);
8739 memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
8740 }
8741
8742 #define NVRAM_TEST_SIZE 0x100
8743 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14
8744 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18
8745 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c
8746 #define NVRAM_SELFBOOT_HW_SIZE 0x20
8747 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
8748
8749 static int tg3_test_nvram(struct tg3 *tp)
8750 {
8751 u32 csum, magic;
8752 __le32 *buf;
8753 int i, j, k, err = 0, size;
8754
8755 if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
8756 return -EIO;
8757
8758 if (magic == TG3_EEPROM_MAGIC)
8759 size = NVRAM_TEST_SIZE;
8760 else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
8761 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
8762 TG3_EEPROM_SB_FORMAT_1) {
8763 switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
8764 case TG3_EEPROM_SB_REVISION_0:
8765 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
8766 break;
8767 case TG3_EEPROM_SB_REVISION_2:
8768 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
8769 break;
8770 case TG3_EEPROM_SB_REVISION_3:
8771 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
8772 break;
8773 default:
8774 return 0;
8775 }
8776 } else
8777 return 0;
8778 } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
8779 size = NVRAM_SELFBOOT_HW_SIZE;
8780 else
8781 return -EIO;
8782
8783 buf = kmalloc(size, GFP_KERNEL);
8784 if (buf == NULL)
8785 return -ENOMEM;
8786
8787 err = -EIO;
8788 for (i = 0, j = 0; i < size; i += 4, j++) {
8789 if ((err = tg3_nvram_read_le(tp, i, &buf[j])) != 0)
8790 break;
8791 }
8792 if (i < size)
8793 goto out;
8794
8795 /* Selfboot format */
8796 magic = swab32(le32_to_cpu(buf[0]));
8797 if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
8798 TG3_EEPROM_MAGIC_FW) {
8799 u8 *buf8 = (u8 *) buf, csum8 = 0;
8800
8801 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
8802 TG3_EEPROM_SB_REVISION_2) {
8803 /* For rev 2, the csum doesn't include the MBA. */
8804 for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
8805 csum8 += buf8[i];
8806 for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
8807 csum8 += buf8[i];
8808 } else {
8809 for (i = 0; i < size; i++)
8810 csum8 += buf8[i];
8811 }
8812
8813 if (csum8 == 0) {
8814 err = 0;
8815 goto out;
8816 }
8817
8818 err = -EIO;
8819 goto out;
8820 }
8821
8822 if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
8823 TG3_EEPROM_MAGIC_HW) {
8824 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
8825 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
8826 u8 *buf8 = (u8 *) buf;
8827
8828 /* Separate the parity bits and the data bytes. */
8829 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
8830 if ((i == 0) || (i == 8)) {
8831 int l;
8832 u8 msk;
8833
8834 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
8835 parity[k++] = buf8[i] & msk;
8836 i++;
8837 }
8838 else if (i == 16) {
8839 int l;
8840 u8 msk;
8841
8842 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
8843 parity[k++] = buf8[i] & msk;
8844 i++;
8845
8846 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
8847 parity[k++] = buf8[i] & msk;
8848 i++;
8849 }
8850 data[j++] = buf8[i];
8851 }
8852
8853 err = -EIO;
8854 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
8855 u8 hw8 = hweight8(data[i]);
8856
8857 if ((hw8 & 0x1) && parity[i])
8858 goto out;
8859 else if (!(hw8 & 0x1) && !parity[i])
8860 goto out;
8861 }
8862 err = 0;
8863 goto out;
8864 }
8865
8866 /* Bootstrap checksum at offset 0x10 */
8867 csum = calc_crc((unsigned char *) buf, 0x10);
8868 if(csum != le32_to_cpu(buf[0x10/4]))
8869 goto out;
8870
8871 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
8872 csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
8873 if (csum != le32_to_cpu(buf[0xfc/4]))
8874 goto out;
8875
8876 err = 0;
8877
8878 out:
8879 kfree(buf);
8880 return err;
8881 }
8882
8883 #define TG3_SERDES_TIMEOUT_SEC 2
8884 #define TG3_COPPER_TIMEOUT_SEC 6
8885
8886 static int tg3_test_link(struct tg3 *tp)
8887 {
8888 int i, max;
8889
8890 if (!netif_running(tp->dev))
8891 return -ENODEV;
8892
8893 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
8894 max = TG3_SERDES_TIMEOUT_SEC;
8895 else
8896 max = TG3_COPPER_TIMEOUT_SEC;
8897
8898 for (i = 0; i < max; i++) {
8899 if (netif_carrier_ok(tp->dev))
8900 return 0;
8901
8902 if (msleep_interruptible(1000))
8903 break;
8904 }
8905
8906 return -EIO;
8907 }
8908
8909 /* Only test the commonly used registers */
8910 static int tg3_test_registers(struct tg3 *tp)
8911 {
8912 int i, is_5705, is_5750;
8913 u32 offset, read_mask, write_mask, val, save_val, read_val;
8914 static struct {
8915 u16 offset;
8916 u16 flags;
8917 #define TG3_FL_5705 0x1
8918 #define TG3_FL_NOT_5705 0x2
8919 #define TG3_FL_NOT_5788 0x4
8920 #define TG3_FL_NOT_5750 0x8
8921 u32 read_mask;
8922 u32 write_mask;
8923 } reg_tbl[] = {
8924 /* MAC Control Registers */
8925 { MAC_MODE, TG3_FL_NOT_5705,
8926 0x00000000, 0x00ef6f8c },
8927 { MAC_MODE, TG3_FL_5705,
8928 0x00000000, 0x01ef6b8c },
8929 { MAC_STATUS, TG3_FL_NOT_5705,
8930 0x03800107, 0x00000000 },
8931 { MAC_STATUS, TG3_FL_5705,
8932 0x03800100, 0x00000000 },
8933 { MAC_ADDR_0_HIGH, 0x0000,
8934 0x00000000, 0x0000ffff },
8935 { MAC_ADDR_0_LOW, 0x0000,
8936 0x00000000, 0xffffffff },
8937 { MAC_RX_MTU_SIZE, 0x0000,
8938 0x00000000, 0x0000ffff },
8939 { MAC_TX_MODE, 0x0000,
8940 0x00000000, 0x00000070 },
8941 { MAC_TX_LENGTHS, 0x0000,
8942 0x00000000, 0x00003fff },
8943 { MAC_RX_MODE, TG3_FL_NOT_5705,
8944 0x00000000, 0x000007fc },
8945 { MAC_RX_MODE, TG3_FL_5705,
8946 0x00000000, 0x000007dc },
8947 { MAC_HASH_REG_0, 0x0000,
8948 0x00000000, 0xffffffff },
8949 { MAC_HASH_REG_1, 0x0000,
8950 0x00000000, 0xffffffff },
8951 { MAC_HASH_REG_2, 0x0000,
8952 0x00000000, 0xffffffff },
8953 { MAC_HASH_REG_3, 0x0000,
8954 0x00000000, 0xffffffff },
8955
8956 /* Receive Data and Receive BD Initiator Control Registers. */
8957 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
8958 0x00000000, 0xffffffff },
8959 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
8960 0x00000000, 0xffffffff },
8961 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
8962 0x00000000, 0x00000003 },
8963 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
8964 0x00000000, 0xffffffff },
8965 { RCVDBDI_STD_BD+0, 0x0000,
8966 0x00000000, 0xffffffff },
8967 { RCVDBDI_STD_BD+4, 0x0000,
8968 0x00000000, 0xffffffff },
8969 { RCVDBDI_STD_BD+8, 0x0000,
8970 0x00000000, 0xffff0002 },
8971 { RCVDBDI_STD_BD+0xc, 0x0000,
8972 0x00000000, 0xffffffff },
8973
8974 /* Receive BD Initiator Control Registers. */
8975 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
8976 0x00000000, 0xffffffff },
8977 { RCVBDI_STD_THRESH, TG3_FL_5705,
8978 0x00000000, 0x000003ff },
8979 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
8980 0x00000000, 0xffffffff },
8981
8982 /* Host Coalescing Control Registers. */
8983 { HOSTCC_MODE, TG3_FL_NOT_5705,
8984 0x00000000, 0x00000004 },
8985 { HOSTCC_MODE, TG3_FL_5705,
8986 0x00000000, 0x000000f6 },
8987 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
8988 0x00000000, 0xffffffff },
8989 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
8990 0x00000000, 0x000003ff },
8991 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
8992 0x00000000, 0xffffffff },
8993 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
8994 0x00000000, 0x000003ff },
8995 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
8996 0x00000000, 0xffffffff },
8997 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
8998 0x00000000, 0x000000ff },
8999 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
9000 0x00000000, 0xffffffff },
9001 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
9002 0x00000000, 0x000000ff },
9003 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
9004 0x00000000, 0xffffffff },
9005 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
9006 0x00000000, 0xffffffff },
9007 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
9008 0x00000000, 0xffffffff },
9009 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
9010 0x00000000, 0x000000ff },
9011 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
9012 0x00000000, 0xffffffff },
9013 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
9014 0x00000000, 0x000000ff },
9015 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
9016 0x00000000, 0xffffffff },
9017 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
9018 0x00000000, 0xffffffff },
9019 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
9020 0x00000000, 0xffffffff },
9021 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
9022 0x00000000, 0xffffffff },
9023 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
9024 0x00000000, 0xffffffff },
9025 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
9026 0xffffffff, 0x00000000 },
9027 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
9028 0xffffffff, 0x00000000 },
9029
9030 /* Buffer Manager Control Registers. */
9031 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
9032 0x00000000, 0x007fff80 },
9033 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
9034 0x00000000, 0x007fffff },
9035 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
9036 0x00000000, 0x0000003f },
9037 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
9038 0x00000000, 0x000001ff },
9039 { BUFMGR_MB_HIGH_WATER, 0x0000,
9040 0x00000000, 0x000001ff },
9041 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
9042 0xffffffff, 0x00000000 },
9043 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
9044 0xffffffff, 0x00000000 },
9045
9046 /* Mailbox Registers */
9047 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
9048 0x00000000, 0x000001ff },
9049 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
9050 0x00000000, 0x000001ff },
9051 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
9052 0x00000000, 0x000007ff },
9053 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
9054 0x00000000, 0x000001ff },
9055
9056 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
9057 };
9058
9059 is_5705 = is_5750 = 0;
9060 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
9061 is_5705 = 1;
9062 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
9063 is_5750 = 1;
9064 }
9065
9066 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
9067 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
9068 continue;
9069
9070 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
9071 continue;
9072
9073 if ((tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
9074 (reg_tbl[i].flags & TG3_FL_NOT_5788))
9075 continue;
9076
9077 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
9078 continue;
9079
9080 offset = (u32) reg_tbl[i].offset;
9081 read_mask = reg_tbl[i].read_mask;
9082 write_mask = reg_tbl[i].write_mask;
9083
9084 /* Save the original register content */
9085 save_val = tr32(offset);
9086
9087 /* Determine the read-only value. */
9088 read_val = save_val & read_mask;
9089
9090 /* Write zero to the register, then make sure the read-only bits
9091 * are not changed and the read/write bits are all zeros.
9092 */
9093 tw32(offset, 0);
9094
9095 val = tr32(offset);
9096
9097 /* Test the read-only and read/write bits. */
9098 if (((val & read_mask) != read_val) || (val & write_mask))
9099 goto out;
9100
9101 /* Write ones to all the bits defined by RdMask and WrMask, then
9102 * make sure the read-only bits are not changed and the
9103 * read/write bits are all ones.
9104 */
9105 tw32(offset, read_mask | write_mask);
9106
9107 val = tr32(offset);
9108
9109 /* Test the read-only bits. */
9110 if ((val & read_mask) != read_val)
9111 goto out;
9112
9113 /* Test the read/write bits. */
9114 if ((val & write_mask) != write_mask)
9115 goto out;
9116
9117 tw32(offset, save_val);
9118 }
9119
9120 return 0;
9121
9122 out:
9123 if (netif_msg_hw(tp))
9124 printk(KERN_ERR PFX "Register test failed at offset %x\n",
9125 offset);
9126 tw32(offset, save_val);
9127 return -EIO;
9128 }
9129
9130 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
9131 {
9132 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
9133 int i;
9134 u32 j;
9135
9136 for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
9137 for (j = 0; j < len; j += 4) {
9138 u32 val;
9139
9140 tg3_write_mem(tp, offset + j, test_pattern[i]);
9141 tg3_read_mem(tp, offset + j, &val);
9142 if (val != test_pattern[i])
9143 return -EIO;
9144 }
9145 }
9146 return 0;
9147 }
9148
9149 static int tg3_test_memory(struct tg3 *tp)
9150 {
9151 static struct mem_entry {
9152 u32 offset;
9153 u32 len;
9154 } mem_tbl_570x[] = {
9155 { 0x00000000, 0x00b50},
9156 { 0x00002000, 0x1c000},
9157 { 0xffffffff, 0x00000}
9158 }, mem_tbl_5705[] = {
9159 { 0x00000100, 0x0000c},
9160 { 0x00000200, 0x00008},
9161 { 0x00004000, 0x00800},
9162 { 0x00006000, 0x01000},
9163 { 0x00008000, 0x02000},
9164 { 0x00010000, 0x0e000},
9165 { 0xffffffff, 0x00000}
9166 }, mem_tbl_5755[] = {
9167 { 0x00000200, 0x00008},
9168 { 0x00004000, 0x00800},
9169 { 0x00006000, 0x00800},
9170 { 0x00008000, 0x02000},
9171 { 0x00010000, 0x0c000},
9172 { 0xffffffff, 0x00000}
9173 }, mem_tbl_5906[] = {
9174 { 0x00000200, 0x00008},
9175 { 0x00004000, 0x00400},
9176 { 0x00006000, 0x00400},
9177 { 0x00008000, 0x01000},
9178 { 0x00010000, 0x01000},
9179 { 0xffffffff, 0x00000}
9180 };
9181 struct mem_entry *mem_tbl;
9182 int err = 0;
9183 int i;
9184
9185 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
9186 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
9187 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
9188 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
9189 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
9190 mem_tbl = mem_tbl_5755;
9191 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
9192 mem_tbl = mem_tbl_5906;
9193 else
9194 mem_tbl = mem_tbl_5705;
9195 } else
9196 mem_tbl = mem_tbl_570x;
9197
9198 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
9199 if ((err = tg3_do_mem_test(tp, mem_tbl[i].offset,
9200 mem_tbl[i].len)) != 0)
9201 break;
9202 }
9203
9204 return err;
9205 }
9206
9207 #define TG3_MAC_LOOPBACK 0
9208 #define TG3_PHY_LOOPBACK 1
9209
9210 static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
9211 {
9212 u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key;
9213 u32 desc_idx;
9214 struct sk_buff *skb, *rx_skb;
9215 u8 *tx_data;
9216 dma_addr_t map;
9217 int num_pkts, tx_len, rx_len, i, err;
9218 struct tg3_rx_buffer_desc *desc;
9219
9220 if (loopback_mode == TG3_MAC_LOOPBACK) {
9221 /* HW errata - mac loopback fails in some cases on 5780.
9222 * Normal traffic and PHY loopback are not affected by
9223 * errata.
9224 */
9225 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
9226 return 0;
9227
9228 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
9229 MAC_MODE_PORT_INT_LPBACK;
9230 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
9231 mac_mode |= MAC_MODE_LINK_POLARITY;
9232 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
9233 mac_mode |= MAC_MODE_PORT_MODE_MII;
9234 else
9235 mac_mode |= MAC_MODE_PORT_MODE_GMII;
9236 tw32(MAC_MODE, mac_mode);
9237 } else if (loopback_mode == TG3_PHY_LOOPBACK) {
9238 u32 val;
9239
9240 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
9241 u32 phytest;
9242
9243 if (!tg3_readphy(tp, MII_TG3_EPHY_TEST, &phytest)) {
9244 u32 phy;
9245
9246 tg3_writephy(tp, MII_TG3_EPHY_TEST,
9247 phytest | MII_TG3_EPHY_SHADOW_EN);
9248 if (!tg3_readphy(tp, 0x1b, &phy))
9249 tg3_writephy(tp, 0x1b, phy & ~0x20);
9250 tg3_writephy(tp, MII_TG3_EPHY_TEST, phytest);
9251 }
9252 val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED100;
9253 } else
9254 val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED1000;
9255
9256 tg3_phy_toggle_automdix(tp, 0);
9257
9258 tg3_writephy(tp, MII_BMCR, val);
9259 udelay(40);
9260
9261 mac_mode = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
9262 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
9263 tg3_writephy(tp, MII_TG3_EPHY_PTEST, 0x1800);
9264 mac_mode |= MAC_MODE_PORT_MODE_MII;
9265 } else
9266 mac_mode |= MAC_MODE_PORT_MODE_GMII;
9267
9268 /* reset to prevent losing 1st rx packet intermittently */
9269 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
9270 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
9271 udelay(10);
9272 tw32_f(MAC_RX_MODE, tp->rx_mode);
9273 }
9274 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
9275 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)
9276 mac_mode &= ~MAC_MODE_LINK_POLARITY;
9277 else if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411)
9278 mac_mode |= MAC_MODE_LINK_POLARITY;
9279 tg3_writephy(tp, MII_TG3_EXT_CTRL,
9280 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
9281 }
9282 tw32(MAC_MODE, mac_mode);
9283 }
9284 else
9285 return -EINVAL;
9286
9287 err = -EIO;
9288
9289 tx_len = 1514;
9290 skb = netdev_alloc_skb(tp->dev, tx_len);
9291 if (!skb)
9292 return -ENOMEM;
9293
9294 tx_data = skb_put(skb, tx_len);
9295 memcpy(tx_data, tp->dev->dev_addr, 6);
9296 memset(tx_data + 6, 0x0, 8);
9297
9298 tw32(MAC_RX_MTU_SIZE, tx_len + 4);
9299
9300 for (i = 14; i < tx_len; i++)
9301 tx_data[i] = (u8) (i & 0xff);
9302
9303 map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
9304
9305 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
9306 HOSTCC_MODE_NOW);
9307
9308 udelay(10);
9309
9310 rx_start_idx = tp->hw_status->idx[0].rx_producer;
9311
9312 num_pkts = 0;
9313
9314 tg3_set_txd(tp, tp->tx_prod, map, tx_len, 0, 1);
9315
9316 tp->tx_prod++;
9317 num_pkts++;
9318
9319 tw32_tx_mbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW,
9320 tp->tx_prod);
9321 tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW);
9322
9323 udelay(10);
9324
9325 /* 250 usec to allow enough time on some 10/100 Mbps devices. */
9326 for (i = 0; i < 25; i++) {
9327 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
9328 HOSTCC_MODE_NOW);
9329
9330 udelay(10);
9331
9332 tx_idx = tp->hw_status->idx[0].tx_consumer;
9333 rx_idx = tp->hw_status->idx[0].rx_producer;
9334 if ((tx_idx == tp->tx_prod) &&
9335 (rx_idx == (rx_start_idx + num_pkts)))
9336 break;
9337 }
9338
9339 pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
9340 dev_kfree_skb(skb);
9341
9342 if (tx_idx != tp->tx_prod)
9343 goto out;
9344
9345 if (rx_idx != rx_start_idx + num_pkts)
9346 goto out;
9347
9348 desc = &tp->rx_rcb[rx_start_idx];
9349 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
9350 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
9351 if (opaque_key != RXD_OPAQUE_RING_STD)
9352 goto out;
9353
9354 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
9355 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
9356 goto out;
9357
9358 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4;
9359 if (rx_len != tx_len)
9360 goto out;
9361
9362 rx_skb = tp->rx_std_buffers[desc_idx].skb;
9363
9364 map = pci_unmap_addr(&tp->rx_std_buffers[desc_idx], mapping);
9365 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, PCI_DMA_FROMDEVICE);
9366
9367 for (i = 14; i < tx_len; i++) {
9368 if (*(rx_skb->data + i) != (u8) (i & 0xff))
9369 goto out;
9370 }
9371 err = 0;
9372
9373 /* tg3_free_rings will unmap and free the rx_skb */
9374 out:
9375 return err;
9376 }
9377
9378 #define TG3_MAC_LOOPBACK_FAILED 1
9379 #define TG3_PHY_LOOPBACK_FAILED 2
9380 #define TG3_LOOPBACK_FAILED (TG3_MAC_LOOPBACK_FAILED | \
9381 TG3_PHY_LOOPBACK_FAILED)
9382
9383 static int tg3_test_loopback(struct tg3 *tp)
9384 {
9385 int err = 0;
9386 u32 cpmuctrl = 0;
9387
9388 if (!netif_running(tp->dev))
9389 return TG3_LOOPBACK_FAILED;
9390
9391 err = tg3_reset_hw(tp, 1);
9392 if (err)
9393 return TG3_LOOPBACK_FAILED;
9394
9395 if (tp->tg3_flags3 & TG3_FLG3_5761_5784_AX_FIXES) {
9396 int i;
9397 u32 status;
9398
9399 tw32(TG3_CPMU_MUTEX_REQ, CPMU_MUTEX_REQ_DRIVER);
9400
9401 /* Wait for up to 40 microseconds to acquire lock. */
9402 for (i = 0; i < 4; i++) {
9403 status = tr32(TG3_CPMU_MUTEX_GNT);
9404 if (status == CPMU_MUTEX_GNT_DRIVER)
9405 break;
9406 udelay(10);
9407 }
9408
9409 if (status != CPMU_MUTEX_GNT_DRIVER)
9410 return TG3_LOOPBACK_FAILED;
9411
9412 /* Turn off power management based on link speed. */
9413 cpmuctrl = tr32(TG3_CPMU_CTRL);
9414 tw32(TG3_CPMU_CTRL,
9415 cpmuctrl & ~(CPMU_CTRL_LINK_SPEED_MODE |
9416 CPMU_CTRL_LINK_AWARE_MODE));
9417 }
9418
9419 if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK))
9420 err |= TG3_MAC_LOOPBACK_FAILED;
9421
9422 if (tp->tg3_flags3 & TG3_FLG3_5761_5784_AX_FIXES) {
9423 tw32(TG3_CPMU_CTRL, cpmuctrl);
9424
9425 /* Release the mutex */
9426 tw32(TG3_CPMU_MUTEX_GNT, CPMU_MUTEX_GNT_DRIVER);
9427 }
9428
9429 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
9430 if (tg3_run_loopback(tp, TG3_PHY_LOOPBACK))
9431 err |= TG3_PHY_LOOPBACK_FAILED;
9432 }
9433
9434 return err;
9435 }
9436
9437 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
9438 u64 *data)
9439 {
9440 struct tg3 *tp = netdev_priv(dev);
9441
9442 if (tp->link_config.phy_is_low_power)
9443 tg3_set_power_state(tp, PCI_D0);
9444
9445 memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
9446
9447 if (tg3_test_nvram(tp) != 0) {
9448 etest->flags |= ETH_TEST_FL_FAILED;
9449 data[0] = 1;
9450 }
9451 if (tg3_test_link(tp) != 0) {
9452 etest->flags |= ETH_TEST_FL_FAILED;
9453 data[1] = 1;
9454 }
9455 if (etest->flags & ETH_TEST_FL_OFFLINE) {
9456 int err, irq_sync = 0;
9457
9458 if (netif_running(dev)) {
9459 tg3_netif_stop(tp);
9460 irq_sync = 1;
9461 }
9462
9463 tg3_full_lock(tp, irq_sync);
9464
9465 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
9466 err = tg3_nvram_lock(tp);
9467 tg3_halt_cpu(tp, RX_CPU_BASE);
9468 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
9469 tg3_halt_cpu(tp, TX_CPU_BASE);
9470 if (!err)
9471 tg3_nvram_unlock(tp);
9472
9473 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
9474 tg3_phy_reset(tp);
9475
9476 if (tg3_test_registers(tp) != 0) {
9477 etest->flags |= ETH_TEST_FL_FAILED;
9478 data[2] = 1;
9479 }
9480 if (tg3_test_memory(tp) != 0) {
9481 etest->flags |= ETH_TEST_FL_FAILED;
9482 data[3] = 1;
9483 }
9484 if ((data[4] = tg3_test_loopback(tp)) != 0)
9485 etest->flags |= ETH_TEST_FL_FAILED;
9486
9487 tg3_full_unlock(tp);
9488
9489 if (tg3_test_interrupt(tp) != 0) {
9490 etest->flags |= ETH_TEST_FL_FAILED;
9491 data[5] = 1;
9492 }
9493
9494 tg3_full_lock(tp, 0);
9495
9496 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9497 if (netif_running(dev)) {
9498 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
9499 if (!tg3_restart_hw(tp, 1))
9500 tg3_netif_start(tp);
9501 }
9502
9503 tg3_full_unlock(tp);
9504 }
9505 if (tp->link_config.phy_is_low_power)
9506 tg3_set_power_state(tp, PCI_D3hot);
9507
9508 }
9509
9510 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
9511 {
9512 struct mii_ioctl_data *data = if_mii(ifr);
9513 struct tg3 *tp = netdev_priv(dev);
9514 int err;
9515
9516 switch(cmd) {
9517 case SIOCGMIIPHY:
9518 data->phy_id = PHY_ADDR;
9519
9520 /* fallthru */
9521 case SIOCGMIIREG: {
9522 u32 mii_regval;
9523
9524 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
9525 break; /* We have no PHY */
9526
9527 if (tp->link_config.phy_is_low_power)
9528 return -EAGAIN;
9529
9530 spin_lock_bh(&tp->lock);
9531 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
9532 spin_unlock_bh(&tp->lock);
9533
9534 data->val_out = mii_regval;
9535
9536 return err;
9537 }
9538
9539 case SIOCSMIIREG:
9540 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
9541 break; /* We have no PHY */
9542
9543 if (!capable(CAP_NET_ADMIN))
9544 return -EPERM;
9545
9546 if (tp->link_config.phy_is_low_power)
9547 return -EAGAIN;
9548
9549 spin_lock_bh(&tp->lock);
9550 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
9551 spin_unlock_bh(&tp->lock);
9552
9553 return err;
9554
9555 default:
9556 /* do nothing */
9557 break;
9558 }
9559 return -EOPNOTSUPP;
9560 }
9561
9562 #if TG3_VLAN_TAG_USED
9563 static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
9564 {
9565 struct tg3 *tp = netdev_priv(dev);
9566
9567 if (netif_running(dev))
9568 tg3_netif_stop(tp);
9569
9570 tg3_full_lock(tp, 0);
9571
9572 tp->vlgrp = grp;
9573
9574 /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
9575 __tg3_set_rx_mode(dev);
9576
9577 if (netif_running(dev))
9578 tg3_netif_start(tp);
9579
9580 tg3_full_unlock(tp);
9581 }
9582 #endif
9583
9584 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
9585 {
9586 struct tg3 *tp = netdev_priv(dev);
9587
9588 memcpy(ec, &tp->coal, sizeof(*ec));
9589 return 0;
9590 }
9591
9592 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
9593 {
9594 struct tg3 *tp = netdev_priv(dev);
9595 u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
9596 u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
9597
9598 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
9599 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
9600 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
9601 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
9602 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
9603 }
9604
9605 if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
9606 (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
9607 (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
9608 (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
9609 (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
9610 (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
9611 (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
9612 (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
9613 (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
9614 (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
9615 return -EINVAL;
9616
9617 /* No rx interrupts will be generated if both are zero */
9618 if ((ec->rx_coalesce_usecs == 0) &&
9619 (ec->rx_max_coalesced_frames == 0))
9620 return -EINVAL;
9621
9622 /* No tx interrupts will be generated if both are zero */
9623 if ((ec->tx_coalesce_usecs == 0) &&
9624 (ec->tx_max_coalesced_frames == 0))
9625 return -EINVAL;
9626
9627 /* Only copy relevant parameters, ignore all others. */
9628 tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
9629 tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
9630 tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
9631 tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
9632 tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
9633 tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
9634 tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
9635 tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
9636 tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
9637
9638 if (netif_running(dev)) {
9639 tg3_full_lock(tp, 0);
9640 __tg3_set_coalesce(tp, &tp->coal);
9641 tg3_full_unlock(tp);
9642 }
9643 return 0;
9644 }
9645
9646 static const struct ethtool_ops tg3_ethtool_ops = {
9647 .get_settings = tg3_get_settings,
9648 .set_settings = tg3_set_settings,
9649 .get_drvinfo = tg3_get_drvinfo,
9650 .get_regs_len = tg3_get_regs_len,
9651 .get_regs = tg3_get_regs,
9652 .get_wol = tg3_get_wol,
9653 .set_wol = tg3_set_wol,
9654 .get_msglevel = tg3_get_msglevel,
9655 .set_msglevel = tg3_set_msglevel,
9656 .nway_reset = tg3_nway_reset,
9657 .get_link = ethtool_op_get_link,
9658 .get_eeprom_len = tg3_get_eeprom_len,
9659 .get_eeprom = tg3_get_eeprom,
9660 .set_eeprom = tg3_set_eeprom,
9661 .get_ringparam = tg3_get_ringparam,
9662 .set_ringparam = tg3_set_ringparam,
9663 .get_pauseparam = tg3_get_pauseparam,
9664 .set_pauseparam = tg3_set_pauseparam,
9665 .get_rx_csum = tg3_get_rx_csum,
9666 .set_rx_csum = tg3_set_rx_csum,
9667 .set_tx_csum = tg3_set_tx_csum,
9668 .set_sg = ethtool_op_set_sg,
9669 .set_tso = tg3_set_tso,
9670 .self_test = tg3_self_test,
9671 .get_strings = tg3_get_strings,
9672 .phys_id = tg3_phys_id,
9673 .get_ethtool_stats = tg3_get_ethtool_stats,
9674 .get_coalesce = tg3_get_coalesce,
9675 .set_coalesce = tg3_set_coalesce,
9676 .get_sset_count = tg3_get_sset_count,
9677 };
9678
9679 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
9680 {
9681 u32 cursize, val, magic;
9682
9683 tp->nvram_size = EEPROM_CHIP_SIZE;
9684
9685 if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
9686 return;
9687
9688 if ((magic != TG3_EEPROM_MAGIC) &&
9689 ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
9690 ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
9691 return;
9692
9693 /*
9694 * Size the chip by reading offsets at increasing powers of two.
9695 * When we encounter our validation signature, we know the addressing
9696 * has wrapped around, and thus have our chip size.
9697 */
9698 cursize = 0x10;
9699
9700 while (cursize < tp->nvram_size) {
9701 if (tg3_nvram_read_swab(tp, cursize, &val) != 0)
9702 return;
9703
9704 if (val == magic)
9705 break;
9706
9707 cursize <<= 1;
9708 }
9709
9710 tp->nvram_size = cursize;
9711 }
9712
9713 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
9714 {
9715 u32 val;
9716
9717 if (tg3_nvram_read_swab(tp, 0, &val) != 0)
9718 return;
9719
9720 /* Selfboot format */
9721 if (val != TG3_EEPROM_MAGIC) {
9722 tg3_get_eeprom_size(tp);
9723 return;
9724 }
9725
9726 if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
9727 if (val != 0) {
9728 tp->nvram_size = (val >> 16) * 1024;
9729 return;
9730 }
9731 }
9732 tp->nvram_size = 0x80000;
9733 }
9734
9735 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
9736 {
9737 u32 nvcfg1;
9738
9739 nvcfg1 = tr32(NVRAM_CFG1);
9740 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
9741 tp->tg3_flags2 |= TG3_FLG2_FLASH;
9742 }
9743 else {
9744 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
9745 tw32(NVRAM_CFG1, nvcfg1);
9746 }
9747
9748 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) ||
9749 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
9750 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
9751 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
9752 tp->nvram_jedecnum = JEDEC_ATMEL;
9753 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
9754 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9755 break;
9756 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
9757 tp->nvram_jedecnum = JEDEC_ATMEL;
9758 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
9759 break;
9760 case FLASH_VENDOR_ATMEL_EEPROM:
9761 tp->nvram_jedecnum = JEDEC_ATMEL;
9762 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9763 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9764 break;
9765 case FLASH_VENDOR_ST:
9766 tp->nvram_jedecnum = JEDEC_ST;
9767 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
9768 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9769 break;
9770 case FLASH_VENDOR_SAIFUN:
9771 tp->nvram_jedecnum = JEDEC_SAIFUN;
9772 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
9773 break;
9774 case FLASH_VENDOR_SST_SMALL:
9775 case FLASH_VENDOR_SST_LARGE:
9776 tp->nvram_jedecnum = JEDEC_SST;
9777 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
9778 break;
9779 }
9780 }
9781 else {
9782 tp->nvram_jedecnum = JEDEC_ATMEL;
9783 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
9784 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9785 }
9786 }
9787
9788 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
9789 {
9790 u32 nvcfg1;
9791
9792 nvcfg1 = tr32(NVRAM_CFG1);
9793
9794 /* NVRAM protection for TPM */
9795 if (nvcfg1 & (1 << 27))
9796 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
9797
9798 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
9799 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
9800 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
9801 tp->nvram_jedecnum = JEDEC_ATMEL;
9802 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9803 break;
9804 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
9805 tp->nvram_jedecnum = JEDEC_ATMEL;
9806 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9807 tp->tg3_flags2 |= TG3_FLG2_FLASH;
9808 break;
9809 case FLASH_5752VENDOR_ST_M45PE10:
9810 case FLASH_5752VENDOR_ST_M45PE20:
9811 case FLASH_5752VENDOR_ST_M45PE40:
9812 tp->nvram_jedecnum = JEDEC_ST;
9813 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9814 tp->tg3_flags2 |= TG3_FLG2_FLASH;
9815 break;
9816 }
9817
9818 if (tp->tg3_flags2 & TG3_FLG2_FLASH) {
9819 switch (nvcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
9820 case FLASH_5752PAGE_SIZE_256:
9821 tp->nvram_pagesize = 256;
9822 break;
9823 case FLASH_5752PAGE_SIZE_512:
9824 tp->nvram_pagesize = 512;
9825 break;
9826 case FLASH_5752PAGE_SIZE_1K:
9827 tp->nvram_pagesize = 1024;
9828 break;
9829 case FLASH_5752PAGE_SIZE_2K:
9830 tp->nvram_pagesize = 2048;
9831 break;
9832 case FLASH_5752PAGE_SIZE_4K:
9833 tp->nvram_pagesize = 4096;
9834 break;
9835 case FLASH_5752PAGE_SIZE_264:
9836 tp->nvram_pagesize = 264;
9837 break;
9838 }
9839 }
9840 else {
9841 /* For eeprom, set pagesize to maximum eeprom size */
9842 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9843
9844 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
9845 tw32(NVRAM_CFG1, nvcfg1);
9846 }
9847 }
9848
9849 static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
9850 {
9851 u32 nvcfg1, protect = 0;
9852
9853 nvcfg1 = tr32(NVRAM_CFG1);
9854
9855 /* NVRAM protection for TPM */
9856 if (nvcfg1 & (1 << 27)) {
9857 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
9858 protect = 1;
9859 }
9860
9861 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
9862 switch (nvcfg1) {
9863 case FLASH_5755VENDOR_ATMEL_FLASH_1:
9864 case FLASH_5755VENDOR_ATMEL_FLASH_2:
9865 case FLASH_5755VENDOR_ATMEL_FLASH_3:
9866 case FLASH_5755VENDOR_ATMEL_FLASH_5:
9867 tp->nvram_jedecnum = JEDEC_ATMEL;
9868 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9869 tp->tg3_flags2 |= TG3_FLG2_FLASH;
9870 tp->nvram_pagesize = 264;
9871 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
9872 nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
9873 tp->nvram_size = (protect ? 0x3e200 : 0x80000);
9874 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
9875 tp->nvram_size = (protect ? 0x1f200 : 0x40000);
9876 else
9877 tp->nvram_size = (protect ? 0x1f200 : 0x20000);
9878 break;
9879 case FLASH_5752VENDOR_ST_M45PE10:
9880 case FLASH_5752VENDOR_ST_M45PE20:
9881 case FLASH_5752VENDOR_ST_M45PE40:
9882 tp->nvram_jedecnum = JEDEC_ST;
9883 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9884 tp->tg3_flags2 |= TG3_FLG2_FLASH;
9885 tp->nvram_pagesize = 256;
9886 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
9887 tp->nvram_size = (protect ? 0x10000 : 0x20000);
9888 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
9889 tp->nvram_size = (protect ? 0x10000 : 0x40000);
9890 else
9891 tp->nvram_size = (protect ? 0x20000 : 0x80000);
9892 break;
9893 }
9894 }
9895
9896 static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
9897 {
9898 u32 nvcfg1;
9899
9900 nvcfg1 = tr32(NVRAM_CFG1);
9901
9902 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
9903 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
9904 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
9905 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
9906 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
9907 tp->nvram_jedecnum = JEDEC_ATMEL;
9908 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9909 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9910
9911 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
9912 tw32(NVRAM_CFG1, nvcfg1);
9913 break;
9914 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
9915 case FLASH_5755VENDOR_ATMEL_FLASH_1:
9916 case FLASH_5755VENDOR_ATMEL_FLASH_2:
9917 case FLASH_5755VENDOR_ATMEL_FLASH_3:
9918 tp->nvram_jedecnum = JEDEC_ATMEL;
9919 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9920 tp->tg3_flags2 |= TG3_FLG2_FLASH;
9921 tp->nvram_pagesize = 264;
9922 break;
9923 case FLASH_5752VENDOR_ST_M45PE10:
9924 case FLASH_5752VENDOR_ST_M45PE20:
9925 case FLASH_5752VENDOR_ST_M45PE40:
9926 tp->nvram_jedecnum = JEDEC_ST;
9927 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9928 tp->tg3_flags2 |= TG3_FLG2_FLASH;
9929 tp->nvram_pagesize = 256;
9930 break;
9931 }
9932 }
9933
9934 static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
9935 {
9936 u32 nvcfg1, protect = 0;
9937
9938 nvcfg1 = tr32(NVRAM_CFG1);
9939
9940 /* NVRAM protection for TPM */
9941 if (nvcfg1 & (1 << 27)) {
9942 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
9943 protect = 1;
9944 }
9945
9946 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
9947 switch (nvcfg1) {
9948 case FLASH_5761VENDOR_ATMEL_ADB021D:
9949 case FLASH_5761VENDOR_ATMEL_ADB041D:
9950 case FLASH_5761VENDOR_ATMEL_ADB081D:
9951 case FLASH_5761VENDOR_ATMEL_ADB161D:
9952 case FLASH_5761VENDOR_ATMEL_MDB021D:
9953 case FLASH_5761VENDOR_ATMEL_MDB041D:
9954 case FLASH_5761VENDOR_ATMEL_MDB081D:
9955 case FLASH_5761VENDOR_ATMEL_MDB161D:
9956 tp->nvram_jedecnum = JEDEC_ATMEL;
9957 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9958 tp->tg3_flags2 |= TG3_FLG2_FLASH;
9959 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
9960 tp->nvram_pagesize = 256;
9961 break;
9962 case FLASH_5761VENDOR_ST_A_M45PE20:
9963 case FLASH_5761VENDOR_ST_A_M45PE40:
9964 case FLASH_5761VENDOR_ST_A_M45PE80:
9965 case FLASH_5761VENDOR_ST_A_M45PE16:
9966 case FLASH_5761VENDOR_ST_M_M45PE20:
9967 case FLASH_5761VENDOR_ST_M_M45PE40:
9968 case FLASH_5761VENDOR_ST_M_M45PE80:
9969 case FLASH_5761VENDOR_ST_M_M45PE16:
9970 tp->nvram_jedecnum = JEDEC_ST;
9971 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9972 tp->tg3_flags2 |= TG3_FLG2_FLASH;
9973 tp->nvram_pagesize = 256;
9974 break;
9975 }
9976
9977 if (protect) {
9978 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
9979 } else {
9980 switch (nvcfg1) {
9981 case FLASH_5761VENDOR_ATMEL_ADB161D:
9982 case FLASH_5761VENDOR_ATMEL_MDB161D:
9983 case FLASH_5761VENDOR_ST_A_M45PE16:
9984 case FLASH_5761VENDOR_ST_M_M45PE16:
9985 tp->nvram_size = 0x100000;
9986 break;
9987 case FLASH_5761VENDOR_ATMEL_ADB081D:
9988 case FLASH_5761VENDOR_ATMEL_MDB081D:
9989 case FLASH_5761VENDOR_ST_A_M45PE80:
9990 case FLASH_5761VENDOR_ST_M_M45PE80:
9991 tp->nvram_size = 0x80000;
9992 break;
9993 case FLASH_5761VENDOR_ATMEL_ADB041D:
9994 case FLASH_5761VENDOR_ATMEL_MDB041D:
9995 case FLASH_5761VENDOR_ST_A_M45PE40:
9996 case FLASH_5761VENDOR_ST_M_M45PE40:
9997 tp->nvram_size = 0x40000;
9998 break;
9999 case FLASH_5761VENDOR_ATMEL_ADB021D:
10000 case FLASH_5761VENDOR_ATMEL_MDB021D:
10001 case FLASH_5761VENDOR_ST_A_M45PE20:
10002 case FLASH_5761VENDOR_ST_M_M45PE20:
10003 tp->nvram_size = 0x20000;
10004 break;
10005 }
10006 }
10007 }
10008
10009 static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
10010 {
10011 tp->nvram_jedecnum = JEDEC_ATMEL;
10012 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10013 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10014 }
10015
10016 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
10017 static void __devinit tg3_nvram_init(struct tg3 *tp)
10018 {
10019 tw32_f(GRC_EEPROM_ADDR,
10020 (EEPROM_ADDR_FSM_RESET |
10021 (EEPROM_DEFAULT_CLOCK_PERIOD <<
10022 EEPROM_ADDR_CLKPERD_SHIFT)));
10023
10024 msleep(1);
10025
10026 /* Enable seeprom accesses. */
10027 tw32_f(GRC_LOCAL_CTRL,
10028 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
10029 udelay(100);
10030
10031 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
10032 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
10033 tp->tg3_flags |= TG3_FLAG_NVRAM;
10034
10035 if (tg3_nvram_lock(tp)) {
10036 printk(KERN_WARNING PFX "%s: Cannot get nvarm lock, "
10037 "tg3_nvram_init failed.\n", tp->dev->name);
10038 return;
10039 }
10040 tg3_enable_nvram_access(tp);
10041
10042 tp->nvram_size = 0;
10043
10044 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
10045 tg3_get_5752_nvram_info(tp);
10046 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
10047 tg3_get_5755_nvram_info(tp);
10048 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
10049 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784)
10050 tg3_get_5787_nvram_info(tp);
10051 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
10052 tg3_get_5761_nvram_info(tp);
10053 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
10054 tg3_get_5906_nvram_info(tp);
10055 else
10056 tg3_get_nvram_info(tp);
10057
10058 if (tp->nvram_size == 0)
10059 tg3_get_nvram_size(tp);
10060
10061 tg3_disable_nvram_access(tp);
10062 tg3_nvram_unlock(tp);
10063
10064 } else {
10065 tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED);
10066
10067 tg3_get_eeprom_size(tp);
10068 }
10069 }
10070
10071 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
10072 u32 offset, u32 *val)
10073 {
10074 u32 tmp;
10075 int i;
10076
10077 if (offset > EEPROM_ADDR_ADDR_MASK ||
10078 (offset % 4) != 0)
10079 return -EINVAL;
10080
10081 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
10082 EEPROM_ADDR_DEVID_MASK |
10083 EEPROM_ADDR_READ);
10084 tw32(GRC_EEPROM_ADDR,
10085 tmp |
10086 (0 << EEPROM_ADDR_DEVID_SHIFT) |
10087 ((offset << EEPROM_ADDR_ADDR_SHIFT) &
10088 EEPROM_ADDR_ADDR_MASK) |
10089 EEPROM_ADDR_READ | EEPROM_ADDR_START);
10090
10091 for (i = 0; i < 1000; i++) {
10092 tmp = tr32(GRC_EEPROM_ADDR);
10093
10094 if (tmp & EEPROM_ADDR_COMPLETE)
10095 break;
10096 msleep(1);
10097 }
10098 if (!(tmp & EEPROM_ADDR_COMPLETE))
10099 return -EBUSY;
10100
10101 *val = tr32(GRC_EEPROM_DATA);
10102 return 0;
10103 }
10104
10105 #define NVRAM_CMD_TIMEOUT 10000
10106
10107 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
10108 {
10109 int i;
10110
10111 tw32(NVRAM_CMD, nvram_cmd);
10112 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
10113 udelay(10);
10114 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
10115 udelay(10);
10116 break;
10117 }
10118 }
10119 if (i == NVRAM_CMD_TIMEOUT) {
10120 return -EBUSY;
10121 }
10122 return 0;
10123 }
10124
10125 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
10126 {
10127 if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
10128 (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
10129 (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
10130 !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) &&
10131 (tp->nvram_jedecnum == JEDEC_ATMEL))
10132
10133 addr = ((addr / tp->nvram_pagesize) <<
10134 ATMEL_AT45DB0X1B_PAGE_POS) +
10135 (addr % tp->nvram_pagesize);
10136
10137 return addr;
10138 }
10139
10140 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
10141 {
10142 if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
10143 (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
10144 (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
10145 !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) &&
10146 (tp->nvram_jedecnum == JEDEC_ATMEL))
10147
10148 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
10149 tp->nvram_pagesize) +
10150 (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
10151
10152 return addr;
10153 }
10154
10155 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
10156 {
10157 int ret;
10158
10159 if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
10160 return tg3_nvram_read_using_eeprom(tp, offset, val);
10161
10162 offset = tg3_nvram_phys_addr(tp, offset);
10163
10164 if (offset > NVRAM_ADDR_MSK)
10165 return -EINVAL;
10166
10167 ret = tg3_nvram_lock(tp);
10168 if (ret)
10169 return ret;
10170
10171 tg3_enable_nvram_access(tp);
10172
10173 tw32(NVRAM_ADDR, offset);
10174 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
10175 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
10176
10177 if (ret == 0)
10178 *val = swab32(tr32(NVRAM_RDDATA));
10179
10180 tg3_disable_nvram_access(tp);
10181
10182 tg3_nvram_unlock(tp);
10183
10184 return ret;
10185 }
10186
10187 static int tg3_nvram_read_le(struct tg3 *tp, u32 offset, __le32 *val)
10188 {
10189 u32 v;
10190 int res = tg3_nvram_read(tp, offset, &v);
10191 if (!res)
10192 *val = cpu_to_le32(v);
10193 return res;
10194 }
10195
10196 static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val)
10197 {
10198 int err;
10199 u32 tmp;
10200
10201 err = tg3_nvram_read(tp, offset, &tmp);
10202 *val = swab32(tmp);
10203 return err;
10204 }
10205
10206 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
10207 u32 offset, u32 len, u8 *buf)
10208 {
10209 int i, j, rc = 0;
10210 u32 val;
10211
10212 for (i = 0; i < len; i += 4) {
10213 u32 addr;
10214 __le32 data;
10215
10216 addr = offset + i;
10217
10218 memcpy(&data, buf + i, 4);
10219
10220 tw32(GRC_EEPROM_DATA, le32_to_cpu(data));
10221
10222 val = tr32(GRC_EEPROM_ADDR);
10223 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
10224
10225 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
10226 EEPROM_ADDR_READ);
10227 tw32(GRC_EEPROM_ADDR, val |
10228 (0 << EEPROM_ADDR_DEVID_SHIFT) |
10229 (addr & EEPROM_ADDR_ADDR_MASK) |
10230 EEPROM_ADDR_START |
10231 EEPROM_ADDR_WRITE);
10232
10233 for (j = 0; j < 1000; j++) {
10234 val = tr32(GRC_EEPROM_ADDR);
10235
10236 if (val & EEPROM_ADDR_COMPLETE)
10237 break;
10238 msleep(1);
10239 }
10240 if (!(val & EEPROM_ADDR_COMPLETE)) {
10241 rc = -EBUSY;
10242 break;
10243 }
10244 }
10245
10246 return rc;
10247 }
10248
10249 /* offset and length are dword aligned */
10250 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
10251 u8 *buf)
10252 {
10253 int ret = 0;
10254 u32 pagesize = tp->nvram_pagesize;
10255 u32 pagemask = pagesize - 1;
10256 u32 nvram_cmd;
10257 u8 *tmp;
10258
10259 tmp = kmalloc(pagesize, GFP_KERNEL);
10260 if (tmp == NULL)
10261 return -ENOMEM;
10262
10263 while (len) {
10264 int j;
10265 u32 phy_addr, page_off, size;
10266
10267 phy_addr = offset & ~pagemask;
10268
10269 for (j = 0; j < pagesize; j += 4) {
10270 if ((ret = tg3_nvram_read_le(tp, phy_addr + j,
10271 (__le32 *) (tmp + j))))
10272 break;
10273 }
10274 if (ret)
10275 break;
10276
10277 page_off = offset & pagemask;
10278 size = pagesize;
10279 if (len < size)
10280 size = len;
10281
10282 len -= size;
10283
10284 memcpy(tmp + page_off, buf, size);
10285
10286 offset = offset + (pagesize - page_off);
10287
10288 tg3_enable_nvram_access(tp);
10289
10290 /*
10291 * Before we can erase the flash page, we need
10292 * to issue a special "write enable" command.
10293 */
10294 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
10295
10296 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
10297 break;
10298
10299 /* Erase the target page */
10300 tw32(NVRAM_ADDR, phy_addr);
10301
10302 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
10303 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
10304
10305 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
10306 break;
10307
10308 /* Issue another write enable to start the write. */
10309 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
10310
10311 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
10312 break;
10313
10314 for (j = 0; j < pagesize; j += 4) {
10315 __be32 data;
10316
10317 data = *((__be32 *) (tmp + j));
10318 /* swab32(le32_to_cpu(data)), actually */
10319 tw32(NVRAM_WRDATA, be32_to_cpu(data));
10320
10321 tw32(NVRAM_ADDR, phy_addr + j);
10322
10323 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
10324 NVRAM_CMD_WR;
10325
10326 if (j == 0)
10327 nvram_cmd |= NVRAM_CMD_FIRST;
10328 else if (j == (pagesize - 4))
10329 nvram_cmd |= NVRAM_CMD_LAST;
10330
10331 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
10332 break;
10333 }
10334 if (ret)
10335 break;
10336 }
10337
10338 nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
10339 tg3_nvram_exec_cmd(tp, nvram_cmd);
10340
10341 kfree(tmp);
10342
10343 return ret;
10344 }
10345
10346 /* offset and length are dword aligned */
10347 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
10348 u8 *buf)
10349 {
10350 int i, ret = 0;
10351
10352 for (i = 0; i < len; i += 4, offset += 4) {
10353 u32 page_off, phy_addr, nvram_cmd;
10354 __be32 data;
10355
10356 memcpy(&data, buf + i, 4);
10357 tw32(NVRAM_WRDATA, be32_to_cpu(data));
10358
10359 page_off = offset % tp->nvram_pagesize;
10360
10361 phy_addr = tg3_nvram_phys_addr(tp, offset);
10362
10363 tw32(NVRAM_ADDR, phy_addr);
10364
10365 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
10366
10367 if ((page_off == 0) || (i == 0))
10368 nvram_cmd |= NVRAM_CMD_FIRST;
10369 if (page_off == (tp->nvram_pagesize - 4))
10370 nvram_cmd |= NVRAM_CMD_LAST;
10371
10372 if (i == (len - 4))
10373 nvram_cmd |= NVRAM_CMD_LAST;
10374
10375 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752) &&
10376 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755) &&
10377 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787) &&
10378 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784) &&
10379 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) &&
10380 (tp->nvram_jedecnum == JEDEC_ST) &&
10381 (nvram_cmd & NVRAM_CMD_FIRST)) {
10382
10383 if ((ret = tg3_nvram_exec_cmd(tp,
10384 NVRAM_CMD_WREN | NVRAM_CMD_GO |
10385 NVRAM_CMD_DONE)))
10386
10387 break;
10388 }
10389 if (!(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
10390 /* We always do complete word writes to eeprom. */
10391 nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
10392 }
10393
10394 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
10395 break;
10396 }
10397 return ret;
10398 }
10399
10400 /* offset and length are dword aligned */
10401 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
10402 {
10403 int ret;
10404
10405 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
10406 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
10407 ~GRC_LCLCTRL_GPIO_OUTPUT1);
10408 udelay(40);
10409 }
10410
10411 if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) {
10412 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
10413 }
10414 else {
10415 u32 grc_mode;
10416
10417 ret = tg3_nvram_lock(tp);
10418 if (ret)
10419 return ret;
10420
10421 tg3_enable_nvram_access(tp);
10422 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
10423 !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM))
10424 tw32(NVRAM_WRITE1, 0x406);
10425
10426 grc_mode = tr32(GRC_MODE);
10427 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
10428
10429 if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) ||
10430 !(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
10431
10432 ret = tg3_nvram_write_block_buffered(tp, offset, len,
10433 buf);
10434 }
10435 else {
10436 ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
10437 buf);
10438 }
10439
10440 grc_mode = tr32(GRC_MODE);
10441 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
10442
10443 tg3_disable_nvram_access(tp);
10444 tg3_nvram_unlock(tp);
10445 }
10446
10447 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
10448 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10449 udelay(40);
10450 }
10451
10452 return ret;
10453 }
10454
10455 struct subsys_tbl_ent {
10456 u16 subsys_vendor, subsys_devid;
10457 u32 phy_id;
10458 };
10459
10460 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
10461 /* Broadcom boards. */
10462 { PCI_VENDOR_ID_BROADCOM, 0x1644, PHY_ID_BCM5401 }, /* BCM95700A6 */
10463 { PCI_VENDOR_ID_BROADCOM, 0x0001, PHY_ID_BCM5701 }, /* BCM95701A5 */
10464 { PCI_VENDOR_ID_BROADCOM, 0x0002, PHY_ID_BCM8002 }, /* BCM95700T6 */
10465 { PCI_VENDOR_ID_BROADCOM, 0x0003, 0 }, /* BCM95700A9 */
10466 { PCI_VENDOR_ID_BROADCOM, 0x0005, PHY_ID_BCM5701 }, /* BCM95701T1 */
10467 { PCI_VENDOR_ID_BROADCOM, 0x0006, PHY_ID_BCM5701 }, /* BCM95701T8 */
10468 { PCI_VENDOR_ID_BROADCOM, 0x0007, 0 }, /* BCM95701A7 */
10469 { PCI_VENDOR_ID_BROADCOM, 0x0008, PHY_ID_BCM5701 }, /* BCM95701A10 */
10470 { PCI_VENDOR_ID_BROADCOM, 0x8008, PHY_ID_BCM5701 }, /* BCM95701A12 */
10471 { PCI_VENDOR_ID_BROADCOM, 0x0009, PHY_ID_BCM5703 }, /* BCM95703Ax1 */
10472 { PCI_VENDOR_ID_BROADCOM, 0x8009, PHY_ID_BCM5703 }, /* BCM95703Ax2 */
10473
10474 /* 3com boards. */
10475 { PCI_VENDOR_ID_3COM, 0x1000, PHY_ID_BCM5401 }, /* 3C996T */
10476 { PCI_VENDOR_ID_3COM, 0x1006, PHY_ID_BCM5701 }, /* 3C996BT */
10477 { PCI_VENDOR_ID_3COM, 0x1004, 0 }, /* 3C996SX */
10478 { PCI_VENDOR_ID_3COM, 0x1007, PHY_ID_BCM5701 }, /* 3C1000T */
10479 { PCI_VENDOR_ID_3COM, 0x1008, PHY_ID_BCM5701 }, /* 3C940BR01 */
10480
10481 /* DELL boards. */
10482 { PCI_VENDOR_ID_DELL, 0x00d1, PHY_ID_BCM5401 }, /* VIPER */
10483 { PCI_VENDOR_ID_DELL, 0x0106, PHY_ID_BCM5401 }, /* JAGUAR */
10484 { PCI_VENDOR_ID_DELL, 0x0109, PHY_ID_BCM5411 }, /* MERLOT */
10485 { PCI_VENDOR_ID_DELL, 0x010a, PHY_ID_BCM5411 }, /* SLIM_MERLOT */
10486
10487 /* Compaq boards. */
10488 { PCI_VENDOR_ID_COMPAQ, 0x007c, PHY_ID_BCM5701 }, /* BANSHEE */
10489 { PCI_VENDOR_ID_COMPAQ, 0x009a, PHY_ID_BCM5701 }, /* BANSHEE_2 */
10490 { PCI_VENDOR_ID_COMPAQ, 0x007d, 0 }, /* CHANGELING */
10491 { PCI_VENDOR_ID_COMPAQ, 0x0085, PHY_ID_BCM5701 }, /* NC7780 */
10492 { PCI_VENDOR_ID_COMPAQ, 0x0099, PHY_ID_BCM5701 }, /* NC7780_2 */
10493
10494 /* IBM boards. */
10495 { PCI_VENDOR_ID_IBM, 0x0281, 0 } /* IBM??? */
10496 };
10497
10498 static inline struct subsys_tbl_ent *lookup_by_subsys(struct tg3 *tp)
10499 {
10500 int i;
10501
10502 for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
10503 if ((subsys_id_to_phy_id[i].subsys_vendor ==
10504 tp->pdev->subsystem_vendor) &&
10505 (subsys_id_to_phy_id[i].subsys_devid ==
10506 tp->pdev->subsystem_device))
10507 return &subsys_id_to_phy_id[i];
10508 }
10509 return NULL;
10510 }
10511
10512 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
10513 {
10514 u32 val;
10515 u16 pmcsr;
10516
10517 /* On some early chips the SRAM cannot be accessed in D3hot state,
10518 * so need make sure we're in D0.
10519 */
10520 pci_read_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, &pmcsr);
10521 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
10522 pci_write_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, pmcsr);
10523 msleep(1);
10524
10525 /* Make sure register accesses (indirect or otherwise)
10526 * will function correctly.
10527 */
10528 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
10529 tp->misc_host_ctrl);
10530
10531 /* The memory arbiter has to be enabled in order for SRAM accesses
10532 * to succeed. Normally on powerup the tg3 chip firmware will make
10533 * sure it is enabled, but other entities such as system netboot
10534 * code might disable it.
10535 */
10536 val = tr32(MEMARB_MODE);
10537 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
10538
10539 tp->phy_id = PHY_ID_INVALID;
10540 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
10541
10542 /* Assume an onboard device and WOL capable by default. */
10543 tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT | TG3_FLAG_WOL_CAP;
10544
10545 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
10546 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
10547 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
10548 tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
10549 }
10550 val = tr32(VCPU_CFGSHDW);
10551 if (val & VCPU_CFGSHDW_ASPM_DBNC)
10552 tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
10553 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
10554 (val & VCPU_CFGSHDW_WOL_MAGPKT))
10555 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
10556 return;
10557 }
10558
10559 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
10560 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
10561 u32 nic_cfg, led_cfg;
10562 u32 nic_phy_id, ver, cfg2 = 0, eeprom_phy_id;
10563 int eeprom_phy_serdes = 0;
10564
10565 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
10566 tp->nic_sram_data_cfg = nic_cfg;
10567
10568 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
10569 ver >>= NIC_SRAM_DATA_VER_SHIFT;
10570 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
10571 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
10572 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703) &&
10573 (ver > 0) && (ver < 0x100))
10574 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
10575
10576 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
10577 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
10578 eeprom_phy_serdes = 1;
10579
10580 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
10581 if (nic_phy_id != 0) {
10582 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
10583 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
10584
10585 eeprom_phy_id = (id1 >> 16) << 10;
10586 eeprom_phy_id |= (id2 & 0xfc00) << 16;
10587 eeprom_phy_id |= (id2 & 0x03ff) << 0;
10588 } else
10589 eeprom_phy_id = 0;
10590
10591 tp->phy_id = eeprom_phy_id;
10592 if (eeprom_phy_serdes) {
10593 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
10594 tp->tg3_flags2 |= TG3_FLG2_MII_SERDES;
10595 else
10596 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
10597 }
10598
10599 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
10600 led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
10601 SHASTA_EXT_LED_MODE_MASK);
10602 else
10603 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
10604
10605 switch (led_cfg) {
10606 default:
10607 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
10608 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
10609 break;
10610
10611 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
10612 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
10613 break;
10614
10615 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
10616 tp->led_ctrl = LED_CTRL_MODE_MAC;
10617
10618 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
10619 * read on some older 5700/5701 bootcode.
10620 */
10621 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
10622 ASIC_REV_5700 ||
10623 GET_ASIC_REV(tp->pci_chip_rev_id) ==
10624 ASIC_REV_5701)
10625 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
10626
10627 break;
10628
10629 case SHASTA_EXT_LED_SHARED:
10630 tp->led_ctrl = LED_CTRL_MODE_SHARED;
10631 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
10632 tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
10633 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
10634 LED_CTRL_MODE_PHY_2);
10635 break;
10636
10637 case SHASTA_EXT_LED_MAC:
10638 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
10639 break;
10640
10641 case SHASTA_EXT_LED_COMBO:
10642 tp->led_ctrl = LED_CTRL_MODE_COMBO;
10643 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
10644 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
10645 LED_CTRL_MODE_PHY_2);
10646 break;
10647
10648 };
10649
10650 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10651 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
10652 tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
10653 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
10654
10655 if (tp->pci_chip_rev_id == CHIPREV_ID_5784_A0 ||
10656 tp->pci_chip_rev_id == CHIPREV_ID_5784_A1)
10657 tp->led_ctrl = LED_CTRL_MODE_MAC;
10658
10659 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
10660 tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
10661 if ((tp->pdev->subsystem_vendor ==
10662 PCI_VENDOR_ID_ARIMA) &&
10663 (tp->pdev->subsystem_device == 0x205a ||
10664 tp->pdev->subsystem_device == 0x2063))
10665 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
10666 } else {
10667 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
10668 tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
10669 }
10670
10671 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
10672 tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
10673 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
10674 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
10675 }
10676 if (nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE)
10677 tp->tg3_flags3 |= TG3_FLG3_ENABLE_APE;
10678 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES &&
10679 !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
10680 tp->tg3_flags &= ~TG3_FLAG_WOL_CAP;
10681
10682 if (tp->tg3_flags & TG3_FLAG_WOL_CAP &&
10683 nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)
10684 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
10685
10686 if (cfg2 & (1 << 17))
10687 tp->tg3_flags2 |= TG3_FLG2_CAPACITIVE_COUPLING;
10688
10689 /* serdes signal pre-emphasis in register 0x590 set by */
10690 /* bootcode if bit 18 is set */
10691 if (cfg2 & (1 << 18))
10692 tp->tg3_flags2 |= TG3_FLG2_SERDES_PREEMPHASIS;
10693
10694 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
10695 u32 cfg3;
10696
10697 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
10698 if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
10699 tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
10700 }
10701 }
10702 }
10703
10704 static int __devinit tg3_phy_probe(struct tg3 *tp)
10705 {
10706 u32 hw_phy_id_1, hw_phy_id_2;
10707 u32 hw_phy_id, hw_phy_id_masked;
10708 int err;
10709
10710 /* Reading the PHY ID register can conflict with ASF
10711 * firwmare access to the PHY hardware.
10712 */
10713 err = 0;
10714 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
10715 (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
10716 hw_phy_id = hw_phy_id_masked = PHY_ID_INVALID;
10717 } else {
10718 /* Now read the physical PHY_ID from the chip and verify
10719 * that it is sane. If it doesn't look good, we fall back
10720 * to either the hard-coded table based PHY_ID and failing
10721 * that the value found in the eeprom area.
10722 */
10723 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
10724 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
10725
10726 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10;
10727 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
10728 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0;
10729
10730 hw_phy_id_masked = hw_phy_id & PHY_ID_MASK;
10731 }
10732
10733 if (!err && KNOWN_PHY_ID(hw_phy_id_masked)) {
10734 tp->phy_id = hw_phy_id;
10735 if (hw_phy_id_masked == PHY_ID_BCM8002)
10736 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
10737 else
10738 tp->tg3_flags2 &= ~TG3_FLG2_PHY_SERDES;
10739 } else {
10740 if (tp->phy_id != PHY_ID_INVALID) {
10741 /* Do nothing, phy ID already set up in
10742 * tg3_get_eeprom_hw_cfg().
10743 */
10744 } else {
10745 struct subsys_tbl_ent *p;
10746
10747 /* No eeprom signature? Try the hardcoded
10748 * subsys device table.
10749 */
10750 p = lookup_by_subsys(tp);
10751 if (!p)
10752 return -ENODEV;
10753
10754 tp->phy_id = p->phy_id;
10755 if (!tp->phy_id ||
10756 tp->phy_id == PHY_ID_BCM8002)
10757 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
10758 }
10759 }
10760
10761 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) &&
10762 !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) &&
10763 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
10764 u32 bmsr, adv_reg, tg3_ctrl, mask;
10765
10766 tg3_readphy(tp, MII_BMSR, &bmsr);
10767 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
10768 (bmsr & BMSR_LSTATUS))
10769 goto skip_phy_reset;
10770
10771 err = tg3_phy_reset(tp);
10772 if (err)
10773 return err;
10774
10775 adv_reg = (ADVERTISE_10HALF | ADVERTISE_10FULL |
10776 ADVERTISE_100HALF | ADVERTISE_100FULL |
10777 ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
10778 tg3_ctrl = 0;
10779 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
10780 tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF |
10781 MII_TG3_CTRL_ADV_1000_FULL);
10782 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
10783 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
10784 tg3_ctrl |= (MII_TG3_CTRL_AS_MASTER |
10785 MII_TG3_CTRL_ENABLE_AS_MASTER);
10786 }
10787
10788 mask = (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
10789 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
10790 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full);
10791 if (!tg3_copper_is_advertising_all(tp, mask)) {
10792 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
10793
10794 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
10795 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
10796
10797 tg3_writephy(tp, MII_BMCR,
10798 BMCR_ANENABLE | BMCR_ANRESTART);
10799 }
10800 tg3_phy_set_wirespeed(tp);
10801
10802 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
10803 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
10804 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
10805 }
10806
10807 skip_phy_reset:
10808 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
10809 err = tg3_init_5401phy_dsp(tp);
10810 if (err)
10811 return err;
10812 }
10813
10814 if (!err && ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)) {
10815 err = tg3_init_5401phy_dsp(tp);
10816 }
10817
10818 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
10819 tp->link_config.advertising =
10820 (ADVERTISED_1000baseT_Half |
10821 ADVERTISED_1000baseT_Full |
10822 ADVERTISED_Autoneg |
10823 ADVERTISED_FIBRE);
10824 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
10825 tp->link_config.advertising &=
10826 ~(ADVERTISED_1000baseT_Half |
10827 ADVERTISED_1000baseT_Full);
10828
10829 return err;
10830 }
10831
10832 static void __devinit tg3_read_partno(struct tg3 *tp)
10833 {
10834 unsigned char vpd_data[256];
10835 unsigned int i;
10836 u32 magic;
10837
10838 if (tg3_nvram_read_swab(tp, 0x0, &magic))
10839 goto out_not_found;
10840
10841 if (magic == TG3_EEPROM_MAGIC) {
10842 for (i = 0; i < 256; i += 4) {
10843 u32 tmp;
10844
10845 if (tg3_nvram_read(tp, 0x100 + i, &tmp))
10846 goto out_not_found;
10847
10848 vpd_data[i + 0] = ((tmp >> 0) & 0xff);
10849 vpd_data[i + 1] = ((tmp >> 8) & 0xff);
10850 vpd_data[i + 2] = ((tmp >> 16) & 0xff);
10851 vpd_data[i + 3] = ((tmp >> 24) & 0xff);
10852 }
10853 } else {
10854 int vpd_cap;
10855
10856 vpd_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_VPD);
10857 for (i = 0; i < 256; i += 4) {
10858 u32 tmp, j = 0;
10859 __le32 v;
10860 u16 tmp16;
10861
10862 pci_write_config_word(tp->pdev, vpd_cap + PCI_VPD_ADDR,
10863 i);
10864 while (j++ < 100) {
10865 pci_read_config_word(tp->pdev, vpd_cap +
10866 PCI_VPD_ADDR, &tmp16);
10867 if (tmp16 & 0x8000)
10868 break;
10869 msleep(1);
10870 }
10871 if (!(tmp16 & 0x8000))
10872 goto out_not_found;
10873
10874 pci_read_config_dword(tp->pdev, vpd_cap + PCI_VPD_DATA,
10875 &tmp);
10876 v = cpu_to_le32(tmp);
10877 memcpy(&vpd_data[i], &v, 4);
10878 }
10879 }
10880
10881 /* Now parse and find the part number. */
10882 for (i = 0; i < 254; ) {
10883 unsigned char val = vpd_data[i];
10884 unsigned int block_end;
10885
10886 if (val == 0x82 || val == 0x91) {
10887 i = (i + 3 +
10888 (vpd_data[i + 1] +
10889 (vpd_data[i + 2] << 8)));
10890 continue;
10891 }
10892
10893 if (val != 0x90)
10894 goto out_not_found;
10895
10896 block_end = (i + 3 +
10897 (vpd_data[i + 1] +
10898 (vpd_data[i + 2] << 8)));
10899 i += 3;
10900
10901 if (block_end > 256)
10902 goto out_not_found;
10903
10904 while (i < (block_end - 2)) {
10905 if (vpd_data[i + 0] == 'P' &&
10906 vpd_data[i + 1] == 'N') {
10907 int partno_len = vpd_data[i + 2];
10908
10909 i += 3;
10910 if (partno_len > 24 || (partno_len + i) > 256)
10911 goto out_not_found;
10912
10913 memcpy(tp->board_part_number,
10914 &vpd_data[i], partno_len);
10915
10916 /* Success. */
10917 return;
10918 }
10919 i += 3 + vpd_data[i + 2];
10920 }
10921
10922 /* Part number not found. */
10923 goto out_not_found;
10924 }
10925
10926 out_not_found:
10927 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
10928 strcpy(tp->board_part_number, "BCM95906");
10929 else
10930 strcpy(tp->board_part_number, "none");
10931 }
10932
10933 static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
10934 {
10935 u32 val;
10936
10937 if (tg3_nvram_read_swab(tp, offset, &val) ||
10938 (val & 0xfc000000) != 0x0c000000 ||
10939 tg3_nvram_read_swab(tp, offset + 4, &val) ||
10940 val != 0)
10941 return 0;
10942
10943 return 1;
10944 }
10945
10946 static void __devinit tg3_read_fw_ver(struct tg3 *tp)
10947 {
10948 u32 val, offset, start;
10949 u32 ver_offset;
10950 int i, bcnt;
10951
10952 if (tg3_nvram_read_swab(tp, 0, &val))
10953 return;
10954
10955 if (val != TG3_EEPROM_MAGIC)
10956 return;
10957
10958 if (tg3_nvram_read_swab(tp, 0xc, &offset) ||
10959 tg3_nvram_read_swab(tp, 0x4, &start))
10960 return;
10961
10962 offset = tg3_nvram_logical_addr(tp, offset);
10963
10964 if (!tg3_fw_img_is_valid(tp, offset) ||
10965 tg3_nvram_read_swab(tp, offset + 8, &ver_offset))
10966 return;
10967
10968 offset = offset + ver_offset - start;
10969 for (i = 0; i < 16; i += 4) {
10970 __le32 v;
10971 if (tg3_nvram_read_le(tp, offset + i, &v))
10972 return;
10973
10974 memcpy(tp->fw_ver + i, &v, 4);
10975 }
10976
10977 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
10978 (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
10979 return;
10980
10981 for (offset = TG3_NVM_DIR_START;
10982 offset < TG3_NVM_DIR_END;
10983 offset += TG3_NVM_DIRENT_SIZE) {
10984 if (tg3_nvram_read_swab(tp, offset, &val))
10985 return;
10986
10987 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
10988 break;
10989 }
10990
10991 if (offset == TG3_NVM_DIR_END)
10992 return;
10993
10994 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
10995 start = 0x08000000;
10996 else if (tg3_nvram_read_swab(tp, offset - 4, &start))
10997 return;
10998
10999 if (tg3_nvram_read_swab(tp, offset + 4, &offset) ||
11000 !tg3_fw_img_is_valid(tp, offset) ||
11001 tg3_nvram_read_swab(tp, offset + 8, &val))
11002 return;
11003
11004 offset += val - start;
11005
11006 bcnt = strlen(tp->fw_ver);
11007
11008 tp->fw_ver[bcnt++] = ',';
11009 tp->fw_ver[bcnt++] = ' ';
11010
11011 for (i = 0; i < 4; i++) {
11012 __le32 v;
11013 if (tg3_nvram_read_le(tp, offset, &v))
11014 return;
11015
11016 offset += sizeof(v);
11017
11018 if (bcnt > TG3_VER_SIZE - sizeof(v)) {
11019 memcpy(&tp->fw_ver[bcnt], &v, TG3_VER_SIZE - bcnt);
11020 break;
11021 }
11022
11023 memcpy(&tp->fw_ver[bcnt], &v, sizeof(v));
11024 bcnt += sizeof(v);
11025 }
11026
11027 tp->fw_ver[TG3_VER_SIZE - 1] = 0;
11028 }
11029
11030 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *);
11031
11032 static int __devinit tg3_get_invariants(struct tg3 *tp)
11033 {
11034 static struct pci_device_id write_reorder_chipsets[] = {
11035 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
11036 PCI_DEVICE_ID_AMD_FE_GATE_700C) },
11037 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
11038 PCI_DEVICE_ID_AMD_8131_BRIDGE) },
11039 { PCI_DEVICE(PCI_VENDOR_ID_VIA,
11040 PCI_DEVICE_ID_VIA_8385_0) },
11041 { },
11042 };
11043 u32 misc_ctrl_reg;
11044 u32 cacheline_sz_reg;
11045 u32 pci_state_reg, grc_misc_cfg;
11046 u32 val;
11047 u16 pci_cmd;
11048 int err, pcie_cap;
11049
11050 /* Force memory write invalidate off. If we leave it on,
11051 * then on 5700_BX chips we have to enable a workaround.
11052 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
11053 * to match the cacheline size. The Broadcom driver have this
11054 * workaround but turns MWI off all the times so never uses
11055 * it. This seems to suggest that the workaround is insufficient.
11056 */
11057 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11058 pci_cmd &= ~PCI_COMMAND_INVALIDATE;
11059 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11060
11061 /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
11062 * has the register indirect write enable bit set before
11063 * we try to access any of the MMIO registers. It is also
11064 * critical that the PCI-X hw workaround situation is decided
11065 * before that as well.
11066 */
11067 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
11068 &misc_ctrl_reg);
11069
11070 tp->pci_chip_rev_id = (misc_ctrl_reg >>
11071 MISC_HOST_CTRL_CHIPREV_SHIFT);
11072 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
11073 u32 prod_id_asic_rev;
11074
11075 pci_read_config_dword(tp->pdev, TG3PCI_PRODID_ASICREV,
11076 &prod_id_asic_rev);
11077 tp->pci_chip_rev_id = prod_id_asic_rev & PROD_ID_ASIC_REV_MASK;
11078 }
11079
11080 /* Wrong chip ID in 5752 A0. This code can be removed later
11081 * as A0 is not in production.
11082 */
11083 if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
11084 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
11085
11086 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
11087 * we need to disable memory and use config. cycles
11088 * only to access all registers. The 5702/03 chips
11089 * can mistakenly decode the special cycles from the
11090 * ICH chipsets as memory write cycles, causing corruption
11091 * of register and memory space. Only certain ICH bridges
11092 * will drive special cycles with non-zero data during the
11093 * address phase which can fall within the 5703's address
11094 * range. This is not an ICH bug as the PCI spec allows
11095 * non-zero address during special cycles. However, only
11096 * these ICH bridges are known to drive non-zero addresses
11097 * during special cycles.
11098 *
11099 * Since special cycles do not cross PCI bridges, we only
11100 * enable this workaround if the 5703 is on the secondary
11101 * bus of these ICH bridges.
11102 */
11103 if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
11104 (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
11105 static struct tg3_dev_id {
11106 u32 vendor;
11107 u32 device;
11108 u32 rev;
11109 } ich_chipsets[] = {
11110 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
11111 PCI_ANY_ID },
11112 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
11113 PCI_ANY_ID },
11114 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
11115 0xa },
11116 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
11117 PCI_ANY_ID },
11118 { },
11119 };
11120 struct tg3_dev_id *pci_id = &ich_chipsets[0];
11121 struct pci_dev *bridge = NULL;
11122
11123 while (pci_id->vendor != 0) {
11124 bridge = pci_get_device(pci_id->vendor, pci_id->device,
11125 bridge);
11126 if (!bridge) {
11127 pci_id++;
11128 continue;
11129 }
11130 if (pci_id->rev != PCI_ANY_ID) {
11131 if (bridge->revision > pci_id->rev)
11132 continue;
11133 }
11134 if (bridge->subordinate &&
11135 (bridge->subordinate->number ==
11136 tp->pdev->bus->number)) {
11137
11138 tp->tg3_flags2 |= TG3_FLG2_ICH_WORKAROUND;
11139 pci_dev_put(bridge);
11140 break;
11141 }
11142 }
11143 }
11144
11145 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
11146 * DMA addresses > 40-bit. This bridge may have other additional
11147 * 57xx devices behind it in some 4-port NIC designs for example.
11148 * Any tg3 device found behind the bridge will also need the 40-bit
11149 * DMA workaround.
11150 */
11151 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
11152 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
11153 tp->tg3_flags2 |= TG3_FLG2_5780_CLASS;
11154 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
11155 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
11156 }
11157 else {
11158 struct pci_dev *bridge = NULL;
11159
11160 do {
11161 bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
11162 PCI_DEVICE_ID_SERVERWORKS_EPB,
11163 bridge);
11164 if (bridge && bridge->subordinate &&
11165 (bridge->subordinate->number <=
11166 tp->pdev->bus->number) &&
11167 (bridge->subordinate->subordinate >=
11168 tp->pdev->bus->number)) {
11169 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
11170 pci_dev_put(bridge);
11171 break;
11172 }
11173 } while (bridge);
11174 }
11175
11176 /* Initialize misc host control in PCI block. */
11177 tp->misc_host_ctrl |= (misc_ctrl_reg &
11178 MISC_HOST_CTRL_CHIPREV);
11179 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
11180 tp->misc_host_ctrl);
11181
11182 pci_read_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
11183 &cacheline_sz_reg);
11184
11185 tp->pci_cacheline_sz = (cacheline_sz_reg >> 0) & 0xff;
11186 tp->pci_lat_timer = (cacheline_sz_reg >> 8) & 0xff;
11187 tp->pci_hdr_type = (cacheline_sz_reg >> 16) & 0xff;
11188 tp->pci_bist = (cacheline_sz_reg >> 24) & 0xff;
11189
11190 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
11191 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714))
11192 tp->pdev_peer = tg3_find_peer(tp);
11193
11194 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
11195 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
11196 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
11197 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
11198 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
11199 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
11200 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
11201 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
11202 tp->tg3_flags2 |= TG3_FLG2_5750_PLUS;
11203
11204 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) ||
11205 (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
11206 tp->tg3_flags2 |= TG3_FLG2_5705_PLUS;
11207
11208 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
11209 tp->tg3_flags |= TG3_FLAG_SUPPORT_MSI;
11210 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
11211 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
11212 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
11213 tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
11214 tp->pdev_peer == tp->pdev))
11215 tp->tg3_flags &= ~TG3_FLAG_SUPPORT_MSI;
11216
11217 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
11218 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
11219 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
11220 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
11221 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
11222 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_2;
11223 tp->tg3_flags2 |= TG3_FLG2_1SHOT_MSI;
11224 } else {
11225 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_1 | TG3_FLG2_TSO_BUG;
11226 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
11227 ASIC_REV_5750 &&
11228 tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
11229 tp->tg3_flags2 &= ~TG3_FLG2_TSO_BUG;
11230 }
11231 }
11232
11233 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
11234 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750 &&
11235 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
11236 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755 &&
11237 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787 &&
11238 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
11239 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761 &&
11240 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)
11241 tp->tg3_flags2 |= TG3_FLG2_JUMBO_CAPABLE;
11242
11243 pcie_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_EXP);
11244 if (pcie_cap != 0) {
11245 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
11246
11247 pcie_set_readrq(tp->pdev, 4096);
11248
11249 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
11250 u16 lnkctl;
11251
11252 pci_read_config_word(tp->pdev,
11253 pcie_cap + PCI_EXP_LNKCTL,
11254 &lnkctl);
11255 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN)
11256 tp->tg3_flags2 &= ~TG3_FLG2_HW_TSO_2;
11257 }
11258 }
11259
11260 /* If we have an AMD 762 or VIA K8T800 chipset, write
11261 * reordering to the mailbox registers done by the host
11262 * controller can cause major troubles. We read back from
11263 * every mailbox register write to force the writes to be
11264 * posted to the chip in order.
11265 */
11266 if (pci_dev_present(write_reorder_chipsets) &&
11267 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
11268 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
11269
11270 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
11271 tp->pci_lat_timer < 64) {
11272 tp->pci_lat_timer = 64;
11273
11274 cacheline_sz_reg = ((tp->pci_cacheline_sz & 0xff) << 0);
11275 cacheline_sz_reg |= ((tp->pci_lat_timer & 0xff) << 8);
11276 cacheline_sz_reg |= ((tp->pci_hdr_type & 0xff) << 16);
11277 cacheline_sz_reg |= ((tp->pci_bist & 0xff) << 24);
11278
11279 pci_write_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
11280 cacheline_sz_reg);
11281 }
11282
11283 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
11284 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
11285 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
11286 if (!tp->pcix_cap) {
11287 printk(KERN_ERR PFX "Cannot find PCI-X "
11288 "capability, aborting.\n");
11289 return -EIO;
11290 }
11291 }
11292
11293 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
11294 &pci_state_reg);
11295
11296 if (tp->pcix_cap && (pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0) {
11297 tp->tg3_flags |= TG3_FLAG_PCIX_MODE;
11298
11299 /* If this is a 5700 BX chipset, and we are in PCI-X
11300 * mode, enable register write workaround.
11301 *
11302 * The workaround is to use indirect register accesses
11303 * for all chip writes not to mailbox registers.
11304 */
11305 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
11306 u32 pm_reg;
11307
11308 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
11309
11310 /* The chip can have it's power management PCI config
11311 * space registers clobbered due to this bug.
11312 * So explicitly force the chip into D0 here.
11313 */
11314 pci_read_config_dword(tp->pdev,
11315 tp->pm_cap + PCI_PM_CTRL,
11316 &pm_reg);
11317 pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
11318 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
11319 pci_write_config_dword(tp->pdev,
11320 tp->pm_cap + PCI_PM_CTRL,
11321 pm_reg);
11322
11323 /* Also, force SERR#/PERR# in PCI command. */
11324 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11325 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
11326 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11327 }
11328 }
11329
11330 /* 5700 BX chips need to have their TX producer index mailboxes
11331 * written twice to workaround a bug.
11332 */
11333 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX)
11334 tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
11335
11336 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
11337 tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED;
11338 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
11339 tp->tg3_flags |= TG3_FLAG_PCI_32BIT;
11340
11341 /* Chip-specific fixup from Broadcom driver */
11342 if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
11343 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
11344 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
11345 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
11346 }
11347
11348 /* Default fast path register access methods */
11349 tp->read32 = tg3_read32;
11350 tp->write32 = tg3_write32;
11351 tp->read32_mbox = tg3_read32;
11352 tp->write32_mbox = tg3_write32;
11353 tp->write32_tx_mbox = tg3_write32;
11354 tp->write32_rx_mbox = tg3_write32;
11355
11356 /* Various workaround register access methods */
11357 if (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG)
11358 tp->write32 = tg3_write_indirect_reg32;
11359 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
11360 ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
11361 tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
11362 /*
11363 * Back to back register writes can cause problems on these
11364 * chips, the workaround is to read back all reg writes
11365 * except those to mailbox regs.
11366 *
11367 * See tg3_write_indirect_reg32().
11368 */
11369 tp->write32 = tg3_write_flush_reg32;
11370 }
11371
11372
11373 if ((tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG) ||
11374 (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)) {
11375 tp->write32_tx_mbox = tg3_write32_tx_mbox;
11376 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
11377 tp->write32_rx_mbox = tg3_write_flush_reg32;
11378 }
11379
11380 if (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND) {
11381 tp->read32 = tg3_read_indirect_reg32;
11382 tp->write32 = tg3_write_indirect_reg32;
11383 tp->read32_mbox = tg3_read_indirect_mbox;
11384 tp->write32_mbox = tg3_write_indirect_mbox;
11385 tp->write32_tx_mbox = tg3_write_indirect_mbox;
11386 tp->write32_rx_mbox = tg3_write_indirect_mbox;
11387
11388 iounmap(tp->regs);
11389 tp->regs = NULL;
11390
11391 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11392 pci_cmd &= ~PCI_COMMAND_MEMORY;
11393 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11394 }
11395 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
11396 tp->read32_mbox = tg3_read32_mbox_5906;
11397 tp->write32_mbox = tg3_write32_mbox_5906;
11398 tp->write32_tx_mbox = tg3_write32_mbox_5906;
11399 tp->write32_rx_mbox = tg3_write32_mbox_5906;
11400 }
11401
11402 if (tp->write32 == tg3_write_indirect_reg32 ||
11403 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
11404 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
11405 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
11406 tp->tg3_flags |= TG3_FLAG_SRAM_USE_CONFIG;
11407
11408 /* Get eeprom hw config before calling tg3_set_power_state().
11409 * In particular, the TG3_FLG2_IS_NIC flag must be
11410 * determined before calling tg3_set_power_state() so that
11411 * we know whether or not to switch out of Vaux power.
11412 * When the flag is set, it means that GPIO1 is used for eeprom
11413 * write protect and also implies that it is a LOM where GPIOs
11414 * are not used to switch power.
11415 */
11416 tg3_get_eeprom_hw_cfg(tp);
11417
11418 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
11419 /* Allow reads and writes to the
11420 * APE register and memory space.
11421 */
11422 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
11423 PCISTATE_ALLOW_APE_SHMEM_WR;
11424 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
11425 pci_state_reg);
11426 }
11427
11428 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
11429 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
11430 tp->tg3_flags |= TG3_FLAG_CPMU_PRESENT;
11431
11432 if (tp->pci_chip_rev_id == CHIPREV_ID_5784_A0 ||
11433 tp->pci_chip_rev_id == CHIPREV_ID_5784_A1 ||
11434 tp->pci_chip_rev_id == CHIPREV_ID_5761_A0 ||
11435 tp->pci_chip_rev_id == CHIPREV_ID_5761_A1)
11436 tp->tg3_flags3 |= TG3_FLG3_5761_5784_AX_FIXES;
11437 }
11438
11439 /* Set up tp->grc_local_ctrl before calling tg3_set_power_state().
11440 * GPIO1 driven high will bring 5700's external PHY out of reset.
11441 * It is also used as eeprom write protect on LOMs.
11442 */
11443 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
11444 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
11445 (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
11446 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
11447 GRC_LCLCTRL_GPIO_OUTPUT1);
11448 /* Unused GPIO3 must be driven as output on 5752 because there
11449 * are no pull-up resistors on unused GPIO pins.
11450 */
11451 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
11452 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
11453
11454 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
11455 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
11456
11457 /* Force the chip into D0. */
11458 err = tg3_set_power_state(tp, PCI_D0);
11459 if (err) {
11460 printk(KERN_ERR PFX "(%s) transition to D0 failed\n",
11461 pci_name(tp->pdev));
11462 return err;
11463 }
11464
11465 /* 5700 B0 chips do not support checksumming correctly due
11466 * to hardware bugs.
11467 */
11468 if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0)
11469 tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS;
11470
11471 /* Derive initial jumbo mode from MTU assigned in
11472 * ether_setup() via the alloc_etherdev() call
11473 */
11474 if (tp->dev->mtu > ETH_DATA_LEN &&
11475 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
11476 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
11477
11478 /* Determine WakeOnLan speed to use. */
11479 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
11480 tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
11481 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
11482 tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
11483 tp->tg3_flags &= ~(TG3_FLAG_WOL_SPEED_100MB);
11484 } else {
11485 tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB;
11486 }
11487
11488 /* A few boards don't want Ethernet@WireSpeed phy feature */
11489 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
11490 ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
11491 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
11492 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
11493 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) ||
11494 (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
11495 tp->tg3_flags2 |= TG3_FLG2_NO_ETH_WIRE_SPEED;
11496
11497 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
11498 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
11499 tp->tg3_flags2 |= TG3_FLG2_PHY_ADC_BUG;
11500 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
11501 tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG;
11502
11503 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
11504 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
11505 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
11506 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
11507 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
11508 if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
11509 tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
11510 tp->tg3_flags2 |= TG3_FLG2_PHY_JITTER_BUG;
11511 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
11512 tp->tg3_flags2 |= TG3_FLG2_PHY_ADJUST_TRIM;
11513 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)
11514 tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
11515 }
11516
11517 tp->coalesce_mode = 0;
11518 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
11519 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
11520 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
11521
11522 /* Initialize MAC MI mode, polling disabled. */
11523 tw32_f(MAC_MI_MODE, tp->mi_mode);
11524 udelay(80);
11525
11526 /* Initialize data/descriptor byte/word swapping. */
11527 val = tr32(GRC_MODE);
11528 val &= GRC_MODE_HOST_STACKUP;
11529 tw32(GRC_MODE, val | tp->grc_mode);
11530
11531 tg3_switch_clocks(tp);
11532
11533 /* Clear this out for sanity. */
11534 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
11535
11536 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
11537 &pci_state_reg);
11538 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
11539 (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) == 0) {
11540 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
11541
11542 if (chiprevid == CHIPREV_ID_5701_A0 ||
11543 chiprevid == CHIPREV_ID_5701_B0 ||
11544 chiprevid == CHIPREV_ID_5701_B2 ||
11545 chiprevid == CHIPREV_ID_5701_B5) {
11546 void __iomem *sram_base;
11547
11548 /* Write some dummy words into the SRAM status block
11549 * area, see if it reads back correctly. If the return
11550 * value is bad, force enable the PCIX workaround.
11551 */
11552 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
11553
11554 writel(0x00000000, sram_base);
11555 writel(0x00000000, sram_base + 4);
11556 writel(0xffffffff, sram_base + 4);
11557 if (readl(sram_base) != 0x00000000)
11558 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
11559 }
11560 }
11561
11562 udelay(50);
11563 tg3_nvram_init(tp);
11564
11565 grc_misc_cfg = tr32(GRC_MISC_CFG);
11566 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
11567
11568 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
11569 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
11570 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
11571 tp->tg3_flags2 |= TG3_FLG2_IS_5788;
11572
11573 if (!(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
11574 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700))
11575 tp->tg3_flags |= TG3_FLAG_TAGGED_STATUS;
11576 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
11577 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
11578 HOSTCC_MODE_CLRTICK_TXBD);
11579
11580 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
11581 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
11582 tp->misc_host_ctrl);
11583 }
11584
11585 /* these are limited to 10/100 only */
11586 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
11587 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
11588 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
11589 tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
11590 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
11591 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
11592 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
11593 (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
11594 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
11595 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
11596 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
11597 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11598 tp->tg3_flags |= TG3_FLAG_10_100_ONLY;
11599
11600 err = tg3_phy_probe(tp);
11601 if (err) {
11602 printk(KERN_ERR PFX "(%s) phy probe failed, err %d\n",
11603 pci_name(tp->pdev), err);
11604 /* ... but do not return immediately ... */
11605 }
11606
11607 tg3_read_partno(tp);
11608 tg3_read_fw_ver(tp);
11609
11610 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
11611 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
11612 } else {
11613 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
11614 tp->tg3_flags |= TG3_FLAG_USE_MI_INTERRUPT;
11615 else
11616 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
11617 }
11618
11619 /* 5700 {AX,BX} chips have a broken status block link
11620 * change bit implementation, so we must use the
11621 * status register in those cases.
11622 */
11623 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
11624 tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
11625 else
11626 tp->tg3_flags &= ~TG3_FLAG_USE_LINKCHG_REG;
11627
11628 /* The led_ctrl is set during tg3_phy_probe, here we might
11629 * have to force the link status polling mechanism based
11630 * upon subsystem IDs.
11631 */
11632 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
11633 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
11634 !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
11635 tp->tg3_flags |= (TG3_FLAG_USE_MI_INTERRUPT |
11636 TG3_FLAG_USE_LINKCHG_REG);
11637 }
11638
11639 /* For all SERDES we poll the MAC status register. */
11640 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
11641 tp->tg3_flags |= TG3_FLAG_POLL_SERDES;
11642 else
11643 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
11644
11645 /* All chips before 5787 can get confused if TX buffers
11646 * straddle the 4GB address boundary in some cases.
11647 */
11648 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
11649 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
11650 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
11651 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
11652 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11653 tp->dev->hard_start_xmit = tg3_start_xmit;
11654 else
11655 tp->dev->hard_start_xmit = tg3_start_xmit_dma_bug;
11656
11657 tp->rx_offset = 2;
11658 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
11659 (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0)
11660 tp->rx_offset = 0;
11661
11662 tp->rx_std_max_post = TG3_RX_RING_SIZE;
11663
11664 /* Increment the rx prod index on the rx std ring by at most
11665 * 8 for these chips to workaround hw errata.
11666 */
11667 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
11668 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
11669 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
11670 tp->rx_std_max_post = 8;
11671
11672 if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND)
11673 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
11674 PCIE_PWR_MGMT_L1_THRESH_MSK;
11675
11676 return err;
11677 }
11678
11679 #ifdef CONFIG_SPARC
11680 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
11681 {
11682 struct net_device *dev = tp->dev;
11683 struct pci_dev *pdev = tp->pdev;
11684 struct device_node *dp = pci_device_to_OF_node(pdev);
11685 const unsigned char *addr;
11686 int len;
11687
11688 addr = of_get_property(dp, "local-mac-address", &len);
11689 if (addr && len == 6) {
11690 memcpy(dev->dev_addr, addr, 6);
11691 memcpy(dev->perm_addr, dev->dev_addr, 6);
11692 return 0;
11693 }
11694 return -ENODEV;
11695 }
11696
11697 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
11698 {
11699 struct net_device *dev = tp->dev;
11700
11701 memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
11702 memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
11703 return 0;
11704 }
11705 #endif
11706
11707 static int __devinit tg3_get_device_address(struct tg3 *tp)
11708 {
11709 struct net_device *dev = tp->dev;
11710 u32 hi, lo, mac_offset;
11711 int addr_ok = 0;
11712
11713 #ifdef CONFIG_SPARC
11714 if (!tg3_get_macaddr_sparc(tp))
11715 return 0;
11716 #endif
11717
11718 mac_offset = 0x7c;
11719 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
11720 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
11721 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
11722 mac_offset = 0xcc;
11723 if (tg3_nvram_lock(tp))
11724 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
11725 else
11726 tg3_nvram_unlock(tp);
11727 }
11728 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11729 mac_offset = 0x10;
11730
11731 /* First try to get it from MAC address mailbox. */
11732 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
11733 if ((hi >> 16) == 0x484b) {
11734 dev->dev_addr[0] = (hi >> 8) & 0xff;
11735 dev->dev_addr[1] = (hi >> 0) & 0xff;
11736
11737 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
11738 dev->dev_addr[2] = (lo >> 24) & 0xff;
11739 dev->dev_addr[3] = (lo >> 16) & 0xff;
11740 dev->dev_addr[4] = (lo >> 8) & 0xff;
11741 dev->dev_addr[5] = (lo >> 0) & 0xff;
11742
11743 /* Some old bootcode may report a 0 MAC address in SRAM */
11744 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
11745 }
11746 if (!addr_ok) {
11747 /* Next, try NVRAM. */
11748 if (!tg3_nvram_read(tp, mac_offset + 0, &hi) &&
11749 !tg3_nvram_read(tp, mac_offset + 4, &lo)) {
11750 dev->dev_addr[0] = ((hi >> 16) & 0xff);
11751 dev->dev_addr[1] = ((hi >> 24) & 0xff);
11752 dev->dev_addr[2] = ((lo >> 0) & 0xff);
11753 dev->dev_addr[3] = ((lo >> 8) & 0xff);
11754 dev->dev_addr[4] = ((lo >> 16) & 0xff);
11755 dev->dev_addr[5] = ((lo >> 24) & 0xff);
11756 }
11757 /* Finally just fetch it out of the MAC control regs. */
11758 else {
11759 hi = tr32(MAC_ADDR_0_HIGH);
11760 lo = tr32(MAC_ADDR_0_LOW);
11761
11762 dev->dev_addr[5] = lo & 0xff;
11763 dev->dev_addr[4] = (lo >> 8) & 0xff;
11764 dev->dev_addr[3] = (lo >> 16) & 0xff;
11765 dev->dev_addr[2] = (lo >> 24) & 0xff;
11766 dev->dev_addr[1] = hi & 0xff;
11767 dev->dev_addr[0] = (hi >> 8) & 0xff;
11768 }
11769 }
11770
11771 if (!is_valid_ether_addr(&dev->dev_addr[0])) {
11772 #ifdef CONFIG_SPARC64
11773 if (!tg3_get_default_macaddr_sparc(tp))
11774 return 0;
11775 #endif
11776 return -EINVAL;
11777 }
11778 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
11779 return 0;
11780 }
11781
11782 #define BOUNDARY_SINGLE_CACHELINE 1
11783 #define BOUNDARY_MULTI_CACHELINE 2
11784
11785 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
11786 {
11787 int cacheline_size;
11788 u8 byte;
11789 int goal;
11790
11791 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
11792 if (byte == 0)
11793 cacheline_size = 1024;
11794 else
11795 cacheline_size = (int) byte * 4;
11796
11797 /* On 5703 and later chips, the boundary bits have no
11798 * effect.
11799 */
11800 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
11801 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
11802 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
11803 goto out;
11804
11805 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
11806 goal = BOUNDARY_MULTI_CACHELINE;
11807 #else
11808 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
11809 goal = BOUNDARY_SINGLE_CACHELINE;
11810 #else
11811 goal = 0;
11812 #endif
11813 #endif
11814
11815 if (!goal)
11816 goto out;
11817
11818 /* PCI controllers on most RISC systems tend to disconnect
11819 * when a device tries to burst across a cache-line boundary.
11820 * Therefore, letting tg3 do so just wastes PCI bandwidth.
11821 *
11822 * Unfortunately, for PCI-E there are only limited
11823 * write-side controls for this, and thus for reads
11824 * we will still get the disconnects. We'll also waste
11825 * these PCI cycles for both read and write for chips
11826 * other than 5700 and 5701 which do not implement the
11827 * boundary bits.
11828 */
11829 if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
11830 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
11831 switch (cacheline_size) {
11832 case 16:
11833 case 32:
11834 case 64:
11835 case 128:
11836 if (goal == BOUNDARY_SINGLE_CACHELINE) {
11837 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
11838 DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
11839 } else {
11840 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
11841 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
11842 }
11843 break;
11844
11845 case 256:
11846 val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
11847 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
11848 break;
11849
11850 default:
11851 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
11852 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
11853 break;
11854 };
11855 } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
11856 switch (cacheline_size) {
11857 case 16:
11858 case 32:
11859 case 64:
11860 if (goal == BOUNDARY_SINGLE_CACHELINE) {
11861 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
11862 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
11863 break;
11864 }
11865 /* fallthrough */
11866 case 128:
11867 default:
11868 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
11869 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
11870 break;
11871 };
11872 } else {
11873 switch (cacheline_size) {
11874 case 16:
11875 if (goal == BOUNDARY_SINGLE_CACHELINE) {
11876 val |= (DMA_RWCTRL_READ_BNDRY_16 |
11877 DMA_RWCTRL_WRITE_BNDRY_16);
11878 break;
11879 }
11880 /* fallthrough */
11881 case 32:
11882 if (goal == BOUNDARY_SINGLE_CACHELINE) {
11883 val |= (DMA_RWCTRL_READ_BNDRY_32 |
11884 DMA_RWCTRL_WRITE_BNDRY_32);
11885 break;
11886 }
11887 /* fallthrough */
11888 case 64:
11889 if (goal == BOUNDARY_SINGLE_CACHELINE) {
11890 val |= (DMA_RWCTRL_READ_BNDRY_64 |
11891 DMA_RWCTRL_WRITE_BNDRY_64);
11892 break;
11893 }
11894 /* fallthrough */
11895 case 128:
11896 if (goal == BOUNDARY_SINGLE_CACHELINE) {
11897 val |= (DMA_RWCTRL_READ_BNDRY_128 |
11898 DMA_RWCTRL_WRITE_BNDRY_128);
11899 break;
11900 }
11901 /* fallthrough */
11902 case 256:
11903 val |= (DMA_RWCTRL_READ_BNDRY_256 |
11904 DMA_RWCTRL_WRITE_BNDRY_256);
11905 break;
11906 case 512:
11907 val |= (DMA_RWCTRL_READ_BNDRY_512 |
11908 DMA_RWCTRL_WRITE_BNDRY_512);
11909 break;
11910 case 1024:
11911 default:
11912 val |= (DMA_RWCTRL_READ_BNDRY_1024 |
11913 DMA_RWCTRL_WRITE_BNDRY_1024);
11914 break;
11915 };
11916 }
11917
11918 out:
11919 return val;
11920 }
11921
11922 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
11923 {
11924 struct tg3_internal_buffer_desc test_desc;
11925 u32 sram_dma_descs;
11926 int i, ret;
11927
11928 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
11929
11930 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
11931 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
11932 tw32(RDMAC_STATUS, 0);
11933 tw32(WDMAC_STATUS, 0);
11934
11935 tw32(BUFMGR_MODE, 0);
11936 tw32(FTQ_RESET, 0);
11937
11938 test_desc.addr_hi = ((u64) buf_dma) >> 32;
11939 test_desc.addr_lo = buf_dma & 0xffffffff;
11940 test_desc.nic_mbuf = 0x00002100;
11941 test_desc.len = size;
11942
11943 /*
11944 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
11945 * the *second* time the tg3 driver was getting loaded after an
11946 * initial scan.
11947 *
11948 * Broadcom tells me:
11949 * ...the DMA engine is connected to the GRC block and a DMA
11950 * reset may affect the GRC block in some unpredictable way...
11951 * The behavior of resets to individual blocks has not been tested.
11952 *
11953 * Broadcom noted the GRC reset will also reset all sub-components.
11954 */
11955 if (to_device) {
11956 test_desc.cqid_sqid = (13 << 8) | 2;
11957
11958 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
11959 udelay(40);
11960 } else {
11961 test_desc.cqid_sqid = (16 << 8) | 7;
11962
11963 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
11964 udelay(40);
11965 }
11966 test_desc.flags = 0x00000005;
11967
11968 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
11969 u32 val;
11970
11971 val = *(((u32 *)&test_desc) + i);
11972 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
11973 sram_dma_descs + (i * sizeof(u32)));
11974 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
11975 }
11976 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
11977
11978 if (to_device) {
11979 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
11980 } else {
11981 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
11982 }
11983
11984 ret = -ENODEV;
11985 for (i = 0; i < 40; i++) {
11986 u32 val;
11987
11988 if (to_device)
11989 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
11990 else
11991 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
11992 if ((val & 0xffff) == sram_dma_descs) {
11993 ret = 0;
11994 break;
11995 }
11996
11997 udelay(100);
11998 }
11999
12000 return ret;
12001 }
12002
12003 #define TEST_BUFFER_SIZE 0x2000
12004
12005 static int __devinit tg3_test_dma(struct tg3 *tp)
12006 {
12007 dma_addr_t buf_dma;
12008 u32 *buf, saved_dma_rwctrl;
12009 int ret;
12010
12011 buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
12012 if (!buf) {
12013 ret = -ENOMEM;
12014 goto out_nofree;
12015 }
12016
12017 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
12018 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
12019
12020 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
12021
12022 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
12023 /* DMA read watermark not used on PCIE */
12024 tp->dma_rwctrl |= 0x00180000;
12025 } else if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
12026 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
12027 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
12028 tp->dma_rwctrl |= 0x003f0000;
12029 else
12030 tp->dma_rwctrl |= 0x003f000f;
12031 } else {
12032 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
12033 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
12034 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
12035 u32 read_water = 0x7;
12036
12037 /* If the 5704 is behind the EPB bridge, we can
12038 * do the less restrictive ONE_DMA workaround for
12039 * better performance.
12040 */
12041 if ((tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) &&
12042 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
12043 tp->dma_rwctrl |= 0x8000;
12044 else if (ccval == 0x6 || ccval == 0x7)
12045 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
12046
12047 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
12048 read_water = 4;
12049 /* Set bit 23 to enable PCIX hw bug fix */
12050 tp->dma_rwctrl |=
12051 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
12052 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
12053 (1 << 23);
12054 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
12055 /* 5780 always in PCIX mode */
12056 tp->dma_rwctrl |= 0x00144000;
12057 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
12058 /* 5714 always in PCIX mode */
12059 tp->dma_rwctrl |= 0x00148000;
12060 } else {
12061 tp->dma_rwctrl |= 0x001b000f;
12062 }
12063 }
12064
12065 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
12066 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
12067 tp->dma_rwctrl &= 0xfffffff0;
12068
12069 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
12070 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
12071 /* Remove this if it causes problems for some boards. */
12072 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
12073
12074 /* On 5700/5701 chips, we need to set this bit.
12075 * Otherwise the chip will issue cacheline transactions
12076 * to streamable DMA memory with not all the byte
12077 * enables turned on. This is an error on several
12078 * RISC PCI controllers, in particular sparc64.
12079 *
12080 * On 5703/5704 chips, this bit has been reassigned
12081 * a different meaning. In particular, it is used
12082 * on those chips to enable a PCI-X workaround.
12083 */
12084 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
12085 }
12086
12087 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
12088
12089 #if 0
12090 /* Unneeded, already done by tg3_get_invariants. */
12091 tg3_switch_clocks(tp);
12092 #endif
12093
12094 ret = 0;
12095 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12096 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
12097 goto out;
12098
12099 /* It is best to perform DMA test with maximum write burst size
12100 * to expose the 5700/5701 write DMA bug.
12101 */
12102 saved_dma_rwctrl = tp->dma_rwctrl;
12103 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
12104 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
12105
12106 while (1) {
12107 u32 *p = buf, i;
12108
12109 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
12110 p[i] = i;
12111
12112 /* Send the buffer to the chip. */
12113 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
12114 if (ret) {
12115 printk(KERN_ERR "tg3_test_dma() Write the buffer failed %d\n", ret);
12116 break;
12117 }
12118
12119 #if 0
12120 /* validate data reached card RAM correctly. */
12121 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
12122 u32 val;
12123 tg3_read_mem(tp, 0x2100 + (i*4), &val);
12124 if (le32_to_cpu(val) != p[i]) {
12125 printk(KERN_ERR " tg3_test_dma() Card buffer corrupted on write! (%d != %d)\n", val, i);
12126 /* ret = -ENODEV here? */
12127 }
12128 p[i] = 0;
12129 }
12130 #endif
12131 /* Now read it back. */
12132 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
12133 if (ret) {
12134 printk(KERN_ERR "tg3_test_dma() Read the buffer failed %d\n", ret);
12135
12136 break;
12137 }
12138
12139 /* Verify it. */
12140 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
12141 if (p[i] == i)
12142 continue;
12143
12144 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
12145 DMA_RWCTRL_WRITE_BNDRY_16) {
12146 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
12147 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
12148 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
12149 break;
12150 } else {
12151 printk(KERN_ERR "tg3_test_dma() buffer corrupted on read back! (%d != %d)\n", p[i], i);
12152 ret = -ENODEV;
12153 goto out;
12154 }
12155 }
12156
12157 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
12158 /* Success. */
12159 ret = 0;
12160 break;
12161 }
12162 }
12163 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
12164 DMA_RWCTRL_WRITE_BNDRY_16) {
12165 static struct pci_device_id dma_wait_state_chipsets[] = {
12166 { PCI_DEVICE(PCI_VENDOR_ID_APPLE,
12167 PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
12168 { },
12169 };
12170
12171 /* DMA test passed without adjusting DMA boundary,
12172 * now look for chipsets that are known to expose the
12173 * DMA bug without failing the test.
12174 */
12175 if (pci_dev_present(dma_wait_state_chipsets)) {
12176 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
12177 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
12178 }
12179 else
12180 /* Safe to use the calculated DMA boundary. */
12181 tp->dma_rwctrl = saved_dma_rwctrl;
12182
12183 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
12184 }
12185
12186 out:
12187 pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma);
12188 out_nofree:
12189 return ret;
12190 }
12191
12192 static void __devinit tg3_init_link_config(struct tg3 *tp)
12193 {
12194 tp->link_config.advertising =
12195 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
12196 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
12197 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
12198 ADVERTISED_Autoneg | ADVERTISED_MII);
12199 tp->link_config.speed = SPEED_INVALID;
12200 tp->link_config.duplex = DUPLEX_INVALID;
12201 tp->link_config.autoneg = AUTONEG_ENABLE;
12202 tp->link_config.active_speed = SPEED_INVALID;
12203 tp->link_config.active_duplex = DUPLEX_INVALID;
12204 tp->link_config.phy_is_low_power = 0;
12205 tp->link_config.orig_speed = SPEED_INVALID;
12206 tp->link_config.orig_duplex = DUPLEX_INVALID;
12207 tp->link_config.orig_autoneg = AUTONEG_INVALID;
12208 }
12209
12210 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
12211 {
12212 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
12213 tp->bufmgr_config.mbuf_read_dma_low_water =
12214 DEFAULT_MB_RDMA_LOW_WATER_5705;
12215 tp->bufmgr_config.mbuf_mac_rx_low_water =
12216 DEFAULT_MB_MACRX_LOW_WATER_5705;
12217 tp->bufmgr_config.mbuf_high_water =
12218 DEFAULT_MB_HIGH_WATER_5705;
12219 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
12220 tp->bufmgr_config.mbuf_mac_rx_low_water =
12221 DEFAULT_MB_MACRX_LOW_WATER_5906;
12222 tp->bufmgr_config.mbuf_high_water =
12223 DEFAULT_MB_HIGH_WATER_5906;
12224 }
12225
12226 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
12227 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
12228 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
12229 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
12230 tp->bufmgr_config.mbuf_high_water_jumbo =
12231 DEFAULT_MB_HIGH_WATER_JUMBO_5780;
12232 } else {
12233 tp->bufmgr_config.mbuf_read_dma_low_water =
12234 DEFAULT_MB_RDMA_LOW_WATER;
12235 tp->bufmgr_config.mbuf_mac_rx_low_water =
12236 DEFAULT_MB_MACRX_LOW_WATER;
12237 tp->bufmgr_config.mbuf_high_water =
12238 DEFAULT_MB_HIGH_WATER;
12239
12240 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
12241 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
12242 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
12243 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
12244 tp->bufmgr_config.mbuf_high_water_jumbo =
12245 DEFAULT_MB_HIGH_WATER_JUMBO;
12246 }
12247
12248 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
12249 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
12250 }
12251
12252 static char * __devinit tg3_phy_string(struct tg3 *tp)
12253 {
12254 switch (tp->phy_id & PHY_ID_MASK) {
12255 case PHY_ID_BCM5400: return "5400";
12256 case PHY_ID_BCM5401: return "5401";
12257 case PHY_ID_BCM5411: return "5411";
12258 case PHY_ID_BCM5701: return "5701";
12259 case PHY_ID_BCM5703: return "5703";
12260 case PHY_ID_BCM5704: return "5704";
12261 case PHY_ID_BCM5705: return "5705";
12262 case PHY_ID_BCM5750: return "5750";
12263 case PHY_ID_BCM5752: return "5752";
12264 case PHY_ID_BCM5714: return "5714";
12265 case PHY_ID_BCM5780: return "5780";
12266 case PHY_ID_BCM5755: return "5755";
12267 case PHY_ID_BCM5787: return "5787";
12268 case PHY_ID_BCM5784: return "5784";
12269 case PHY_ID_BCM5756: return "5722/5756";
12270 case PHY_ID_BCM5906: return "5906";
12271 case PHY_ID_BCM5761: return "5761";
12272 case PHY_ID_BCM8002: return "8002/serdes";
12273 case 0: return "serdes";
12274 default: return "unknown";
12275 };
12276 }
12277
12278 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
12279 {
12280 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
12281 strcpy(str, "PCI Express");
12282 return str;
12283 } else if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
12284 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
12285
12286 strcpy(str, "PCIX:");
12287
12288 if ((clock_ctrl == 7) ||
12289 ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
12290 GRC_MISC_CFG_BOARD_ID_5704CIOBE))
12291 strcat(str, "133MHz");
12292 else if (clock_ctrl == 0)
12293 strcat(str, "33MHz");
12294 else if (clock_ctrl == 2)
12295 strcat(str, "50MHz");
12296 else if (clock_ctrl == 4)
12297 strcat(str, "66MHz");
12298 else if (clock_ctrl == 6)
12299 strcat(str, "100MHz");
12300 } else {
12301 strcpy(str, "PCI:");
12302 if (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED)
12303 strcat(str, "66MHz");
12304 else
12305 strcat(str, "33MHz");
12306 }
12307 if (tp->tg3_flags & TG3_FLAG_PCI_32BIT)
12308 strcat(str, ":32-bit");
12309 else
12310 strcat(str, ":64-bit");
12311 return str;
12312 }
12313
12314 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
12315 {
12316 struct pci_dev *peer;
12317 unsigned int func, devnr = tp->pdev->devfn & ~7;
12318
12319 for (func = 0; func < 8; func++) {
12320 peer = pci_get_slot(tp->pdev->bus, devnr | func);
12321 if (peer && peer != tp->pdev)
12322 break;
12323 pci_dev_put(peer);
12324 }
12325 /* 5704 can be configured in single-port mode, set peer to
12326 * tp->pdev in that case.
12327 */
12328 if (!peer) {
12329 peer = tp->pdev;
12330 return peer;
12331 }
12332
12333 /*
12334 * We don't need to keep the refcount elevated; there's no way
12335 * to remove one half of this device without removing the other
12336 */
12337 pci_dev_put(peer);
12338
12339 return peer;
12340 }
12341
12342 static void __devinit tg3_init_coal(struct tg3 *tp)
12343 {
12344 struct ethtool_coalesce *ec = &tp->coal;
12345
12346 memset(ec, 0, sizeof(*ec));
12347 ec->cmd = ETHTOOL_GCOALESCE;
12348 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
12349 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
12350 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
12351 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
12352 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
12353 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
12354 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
12355 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
12356 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
12357
12358 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
12359 HOSTCC_MODE_CLRTICK_TXBD)) {
12360 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
12361 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
12362 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
12363 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
12364 }
12365
12366 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
12367 ec->rx_coalesce_usecs_irq = 0;
12368 ec->tx_coalesce_usecs_irq = 0;
12369 ec->stats_block_coalesce_usecs = 0;
12370 }
12371 }
12372
12373 static int __devinit tg3_init_one(struct pci_dev *pdev,
12374 const struct pci_device_id *ent)
12375 {
12376 static int tg3_version_printed = 0;
12377 unsigned long tg3reg_base, tg3reg_len;
12378 struct net_device *dev;
12379 struct tg3 *tp;
12380 int err, pm_cap;
12381 char str[40];
12382 u64 dma_mask, persist_dma_mask;
12383 DECLARE_MAC_BUF(mac);
12384
12385 if (tg3_version_printed++ == 0)
12386 printk(KERN_INFO "%s", version);
12387
12388 err = pci_enable_device(pdev);
12389 if (err) {
12390 printk(KERN_ERR PFX "Cannot enable PCI device, "
12391 "aborting.\n");
12392 return err;
12393 }
12394
12395 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
12396 printk(KERN_ERR PFX "Cannot find proper PCI device "
12397 "base address, aborting.\n");
12398 err = -ENODEV;
12399 goto err_out_disable_pdev;
12400 }
12401
12402 err = pci_request_regions(pdev, DRV_MODULE_NAME);
12403 if (err) {
12404 printk(KERN_ERR PFX "Cannot obtain PCI resources, "
12405 "aborting.\n");
12406 goto err_out_disable_pdev;
12407 }
12408
12409 pci_set_master(pdev);
12410
12411 /* Find power-management capability. */
12412 pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
12413 if (pm_cap == 0) {
12414 printk(KERN_ERR PFX "Cannot find PowerManagement capability, "
12415 "aborting.\n");
12416 err = -EIO;
12417 goto err_out_free_res;
12418 }
12419
12420 tg3reg_base = pci_resource_start(pdev, 0);
12421 tg3reg_len = pci_resource_len(pdev, 0);
12422
12423 dev = alloc_etherdev(sizeof(*tp));
12424 if (!dev) {
12425 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
12426 err = -ENOMEM;
12427 goto err_out_free_res;
12428 }
12429
12430 SET_NETDEV_DEV(dev, &pdev->dev);
12431
12432 #if TG3_VLAN_TAG_USED
12433 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
12434 dev->vlan_rx_register = tg3_vlan_rx_register;
12435 #endif
12436
12437 tp = netdev_priv(dev);
12438 tp->pdev = pdev;
12439 tp->dev = dev;
12440 tp->pm_cap = pm_cap;
12441 tp->mac_mode = TG3_DEF_MAC_MODE;
12442 tp->rx_mode = TG3_DEF_RX_MODE;
12443 tp->tx_mode = TG3_DEF_TX_MODE;
12444 tp->mi_mode = MAC_MI_MODE_BASE;
12445 if (tg3_debug > 0)
12446 tp->msg_enable = tg3_debug;
12447 else
12448 tp->msg_enable = TG3_DEF_MSG_ENABLE;
12449
12450 /* The word/byte swap controls here control register access byte
12451 * swapping. DMA data byte swapping is controlled in the GRC_MODE
12452 * setting below.
12453 */
12454 tp->misc_host_ctrl =
12455 MISC_HOST_CTRL_MASK_PCI_INT |
12456 MISC_HOST_CTRL_WORD_SWAP |
12457 MISC_HOST_CTRL_INDIR_ACCESS |
12458 MISC_HOST_CTRL_PCISTATE_RW;
12459
12460 /* The NONFRM (non-frame) byte/word swap controls take effect
12461 * on descriptor entries, anything which isn't packet data.
12462 *
12463 * The StrongARM chips on the board (one for tx, one for rx)
12464 * are running in big-endian mode.
12465 */
12466 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
12467 GRC_MODE_WSWAP_NONFRM_DATA);
12468 #ifdef __BIG_ENDIAN
12469 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
12470 #endif
12471 spin_lock_init(&tp->lock);
12472 spin_lock_init(&tp->indirect_lock);
12473 INIT_WORK(&tp->reset_task, tg3_reset_task);
12474
12475 tp->regs = ioremap_nocache(tg3reg_base, tg3reg_len);
12476 if (!tp->regs) {
12477 printk(KERN_ERR PFX "Cannot map device registers, "
12478 "aborting.\n");
12479 err = -ENOMEM;
12480 goto err_out_free_dev;
12481 }
12482
12483 tg3_init_link_config(tp);
12484
12485 tp->rx_pending = TG3_DEF_RX_RING_PENDING;
12486 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
12487 tp->tx_pending = TG3_DEF_TX_RING_PENDING;
12488
12489 dev->open = tg3_open;
12490 dev->stop = tg3_close;
12491 dev->get_stats = tg3_get_stats;
12492 dev->set_multicast_list = tg3_set_rx_mode;
12493 dev->set_mac_address = tg3_set_mac_addr;
12494 dev->do_ioctl = tg3_ioctl;
12495 dev->tx_timeout = tg3_tx_timeout;
12496 netif_napi_add(dev, &tp->napi, tg3_poll, 64);
12497 dev->ethtool_ops = &tg3_ethtool_ops;
12498 dev->watchdog_timeo = TG3_TX_TIMEOUT;
12499 dev->change_mtu = tg3_change_mtu;
12500 dev->irq = pdev->irq;
12501 #ifdef CONFIG_NET_POLL_CONTROLLER
12502 dev->poll_controller = tg3_poll_controller;
12503 #endif
12504
12505 err = tg3_get_invariants(tp);
12506 if (err) {
12507 printk(KERN_ERR PFX "Problem fetching invariants of chip, "
12508 "aborting.\n");
12509 goto err_out_iounmap;
12510 }
12511
12512 /* The EPB bridge inside 5714, 5715, and 5780 and any
12513 * device behind the EPB cannot support DMA addresses > 40-bit.
12514 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
12515 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
12516 * do DMA address check in tg3_start_xmit().
12517 */
12518 if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
12519 persist_dma_mask = dma_mask = DMA_32BIT_MASK;
12520 else if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) {
12521 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
12522 #ifdef CONFIG_HIGHMEM
12523 dma_mask = DMA_64BIT_MASK;
12524 #endif
12525 } else
12526 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
12527
12528 /* Configure DMA attributes. */
12529 if (dma_mask > DMA_32BIT_MASK) {
12530 err = pci_set_dma_mask(pdev, dma_mask);
12531 if (!err) {
12532 dev->features |= NETIF_F_HIGHDMA;
12533 err = pci_set_consistent_dma_mask(pdev,
12534 persist_dma_mask);
12535 if (err < 0) {
12536 printk(KERN_ERR PFX "Unable to obtain 64 bit "
12537 "DMA for consistent allocations\n");
12538 goto err_out_iounmap;
12539 }
12540 }
12541 }
12542 if (err || dma_mask == DMA_32BIT_MASK) {
12543 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
12544 if (err) {
12545 printk(KERN_ERR PFX "No usable DMA configuration, "
12546 "aborting.\n");
12547 goto err_out_iounmap;
12548 }
12549 }
12550
12551 tg3_init_bufmgr_config(tp);
12552
12553 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
12554 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
12555 }
12556 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
12557 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
12558 tp->pci_chip_rev_id == CHIPREV_ID_5705_A0 ||
12559 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
12560 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
12561 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
12562 } else {
12563 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE | TG3_FLG2_TSO_BUG;
12564 }
12565
12566 /* TSO is on by default on chips that support hardware TSO.
12567 * Firmware TSO on older chips gives lower performance, so it
12568 * is off by default, but can be enabled using ethtool.
12569 */
12570 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
12571 dev->features |= NETIF_F_TSO;
12572 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) &&
12573 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906))
12574 dev->features |= NETIF_F_TSO6;
12575 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
12576 dev->features |= NETIF_F_TSO_ECN;
12577 }
12578
12579
12580 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
12581 !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
12582 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
12583 tp->tg3_flags2 |= TG3_FLG2_MAX_RXPEND_64;
12584 tp->rx_pending = 63;
12585 }
12586
12587 err = tg3_get_device_address(tp);
12588 if (err) {
12589 printk(KERN_ERR PFX "Could not obtain valid ethernet address, "
12590 "aborting.\n");
12591 goto err_out_iounmap;
12592 }
12593
12594 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
12595 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
12596 printk(KERN_ERR PFX "Cannot find proper PCI device "
12597 "base address for APE, aborting.\n");
12598 err = -ENODEV;
12599 goto err_out_iounmap;
12600 }
12601
12602 tg3reg_base = pci_resource_start(pdev, 2);
12603 tg3reg_len = pci_resource_len(pdev, 2);
12604
12605 tp->aperegs = ioremap_nocache(tg3reg_base, tg3reg_len);
12606 if (tp->aperegs == 0UL) {
12607 printk(KERN_ERR PFX "Cannot map APE registers, "
12608 "aborting.\n");
12609 err = -ENOMEM;
12610 goto err_out_iounmap;
12611 }
12612
12613 tg3_ape_lock_init(tp);
12614 }
12615
12616 /*
12617 * Reset chip in case UNDI or EFI driver did not shutdown
12618 * DMA self test will enable WDMAC and we'll see (spurious)
12619 * pending DMA on the PCI bus at that point.
12620 */
12621 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
12622 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
12623 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
12624 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12625 }
12626
12627 err = tg3_test_dma(tp);
12628 if (err) {
12629 printk(KERN_ERR PFX "DMA engine test failed, aborting.\n");
12630 goto err_out_apeunmap;
12631 }
12632
12633 /* Tigon3 can do ipv4 only... and some chips have buggy
12634 * checksumming.
12635 */
12636 if ((tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) == 0) {
12637 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
12638 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
12639 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
12640 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12641 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
12642 dev->features |= NETIF_F_IPV6_CSUM;
12643
12644 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
12645 } else
12646 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
12647
12648 /* flow control autonegotiation is default behavior */
12649 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
12650 tp->link_config.flowctrl = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
12651
12652 tg3_init_coal(tp);
12653
12654 pci_set_drvdata(pdev, dev);
12655
12656 err = register_netdev(dev);
12657 if (err) {
12658 printk(KERN_ERR PFX "Cannot register net device, "
12659 "aborting.\n");
12660 goto err_out_apeunmap;
12661 }
12662
12663 printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x PHY(%s)] "
12664 "(%s) %s Ethernet %s\n",
12665 dev->name,
12666 tp->board_part_number,
12667 tp->pci_chip_rev_id,
12668 tg3_phy_string(tp),
12669 tg3_bus_string(tp, str),
12670 ((tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100Base-TX" :
12671 ((tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) ? "1000Base-SX" :
12672 "10/100/1000Base-T")),
12673 print_mac(mac, dev->dev_addr));
12674
12675 printk(KERN_INFO "%s: RXcsums[%d] LinkChgREG[%d] "
12676 "MIirq[%d] ASF[%d] WireSpeed[%d] TSOcap[%d]\n",
12677 dev->name,
12678 (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0,
12679 (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
12680 (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) != 0,
12681 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0,
12682 (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0,
12683 (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
12684 printk(KERN_INFO "%s: dma_rwctrl[%08x] dma_mask[%d-bit]\n",
12685 dev->name, tp->dma_rwctrl,
12686 (pdev->dma_mask == DMA_32BIT_MASK) ? 32 :
12687 (((u64) pdev->dma_mask == DMA_40BIT_MASK) ? 40 : 64));
12688
12689 return 0;
12690
12691 err_out_apeunmap:
12692 if (tp->aperegs) {
12693 iounmap(tp->aperegs);
12694 tp->aperegs = NULL;
12695 }
12696
12697 err_out_iounmap:
12698 if (tp->regs) {
12699 iounmap(tp->regs);
12700 tp->regs = NULL;
12701 }
12702
12703 err_out_free_dev:
12704 free_netdev(dev);
12705
12706 err_out_free_res:
12707 pci_release_regions(pdev);
12708
12709 err_out_disable_pdev:
12710 pci_disable_device(pdev);
12711 pci_set_drvdata(pdev, NULL);
12712 return err;
12713 }
12714
12715 static void __devexit tg3_remove_one(struct pci_dev *pdev)
12716 {
12717 struct net_device *dev = pci_get_drvdata(pdev);
12718
12719 if (dev) {
12720 struct tg3 *tp = netdev_priv(dev);
12721
12722 flush_scheduled_work();
12723 unregister_netdev(dev);
12724 if (tp->aperegs) {
12725 iounmap(tp->aperegs);
12726 tp->aperegs = NULL;
12727 }
12728 if (tp->regs) {
12729 iounmap(tp->regs);
12730 tp->regs = NULL;
12731 }
12732 free_netdev(dev);
12733 pci_release_regions(pdev);
12734 pci_disable_device(pdev);
12735 pci_set_drvdata(pdev, NULL);
12736 }
12737 }
12738
12739 static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
12740 {
12741 struct net_device *dev = pci_get_drvdata(pdev);
12742 struct tg3 *tp = netdev_priv(dev);
12743 int err;
12744
12745 /* PCI register 4 needs to be saved whether netif_running() or not.
12746 * MSI address and data need to be saved if using MSI and
12747 * netif_running().
12748 */
12749 pci_save_state(pdev);
12750
12751 if (!netif_running(dev))
12752 return 0;
12753
12754 flush_scheduled_work();
12755 tg3_netif_stop(tp);
12756
12757 del_timer_sync(&tp->timer);
12758
12759 tg3_full_lock(tp, 1);
12760 tg3_disable_ints(tp);
12761 tg3_full_unlock(tp);
12762
12763 netif_device_detach(dev);
12764
12765 tg3_full_lock(tp, 0);
12766 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12767 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
12768 tg3_full_unlock(tp);
12769
12770 err = tg3_set_power_state(tp, pci_choose_state(pdev, state));
12771 if (err) {
12772 tg3_full_lock(tp, 0);
12773
12774 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
12775 if (tg3_restart_hw(tp, 1))
12776 goto out;
12777
12778 tp->timer.expires = jiffies + tp->timer_offset;
12779 add_timer(&tp->timer);
12780
12781 netif_device_attach(dev);
12782 tg3_netif_start(tp);
12783
12784 out:
12785 tg3_full_unlock(tp);
12786 }
12787
12788 return err;
12789 }
12790
12791 static int tg3_resume(struct pci_dev *pdev)
12792 {
12793 struct net_device *dev = pci_get_drvdata(pdev);
12794 struct tg3 *tp = netdev_priv(dev);
12795 int err;
12796
12797 pci_restore_state(tp->pdev);
12798
12799 if (!netif_running(dev))
12800 return 0;
12801
12802 err = tg3_set_power_state(tp, PCI_D0);
12803 if (err)
12804 return err;
12805
12806 netif_device_attach(dev);
12807
12808 tg3_full_lock(tp, 0);
12809
12810 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
12811 err = tg3_restart_hw(tp, 1);
12812 if (err)
12813 goto out;
12814
12815 tp->timer.expires = jiffies + tp->timer_offset;
12816 add_timer(&tp->timer);
12817
12818 tg3_netif_start(tp);
12819
12820 out:
12821 tg3_full_unlock(tp);
12822
12823 return err;
12824 }
12825
12826 static struct pci_driver tg3_driver = {
12827 .name = DRV_MODULE_NAME,
12828 .id_table = tg3_pci_tbl,
12829 .probe = tg3_init_one,
12830 .remove = __devexit_p(tg3_remove_one),
12831 .suspend = tg3_suspend,
12832 .resume = tg3_resume
12833 };
12834
12835 static int __init tg3_init(void)
12836 {
12837 return pci_register_driver(&tg3_driver);
12838 }
12839
12840 static void __exit tg3_cleanup(void)
12841 {
12842 pci_unregister_driver(&tg3_driver);
12843 }
12844
12845 module_init(tg3_init);
12846 module_exit(tg3_cleanup);
This page took 0.83567 seconds and 6 git commands to generate.