[BNX2]: Add ring constants.
[deliverable/linux.git] / drivers / net / bnx2.c
CommitLineData
b6016b76
MC
1/* bnx2.c: Broadcom NX2 network driver.
2 *
72fbaeb6 3 * Copyright (c) 2004-2007 Broadcom Corporation
b6016b76
MC
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Written by: Michael Chan (mchan@broadcom.com)
10 */
11
f2a4f052
MC
12
13#include <linux/module.h>
14#include <linux/moduleparam.h>
15
16#include <linux/kernel.h>
17#include <linux/timer.h>
18#include <linux/errno.h>
19#include <linux/ioport.h>
20#include <linux/slab.h>
21#include <linux/vmalloc.h>
22#include <linux/interrupt.h>
23#include <linux/pci.h>
24#include <linux/init.h>
25#include <linux/netdevice.h>
26#include <linux/etherdevice.h>
27#include <linux/skbuff.h>
28#include <linux/dma-mapping.h>
1977f032 29#include <linux/bitops.h>
f2a4f052
MC
30#include <asm/io.h>
31#include <asm/irq.h>
32#include <linux/delay.h>
33#include <asm/byteorder.h>
c86a31f4 34#include <asm/page.h>
f2a4f052
MC
35#include <linux/time.h>
36#include <linux/ethtool.h>
37#include <linux/mii.h>
38#ifdef NETIF_F_HW_VLAN_TX
39#include <linux/if_vlan.h>
40#define BCM_VLAN 1
41#endif
f2a4f052 42#include <net/ip.h>
de081fa5 43#include <net/tcp.h>
f2a4f052 44#include <net/checksum.h>
f2a4f052
MC
45#include <linux/workqueue.h>
46#include <linux/crc32.h>
47#include <linux/prefetch.h>
29b12174 48#include <linux/cache.h>
fba9fe91 49#include <linux/zlib.h>
f2a4f052 50
b6016b76
MC
51#include "bnx2.h"
52#include "bnx2_fw.h"
d43584c8 53#include "bnx2_fw2.h"
b6016b76 54
b3448b0b
DV
55#define FW_BUF_SIZE 0x8000
56
b6016b76
MC
57#define DRV_MODULE_NAME "bnx2"
58#define PFX DRV_MODULE_NAME ": "
bbe42974
MC
59#define DRV_MODULE_VERSION "1.6.9"
60#define DRV_MODULE_RELDATE "December 8, 2007"
b6016b76
MC
61
62#define RUN_AT(x) (jiffies + (x))
63
64/* Time in jiffies before concluding the transmitter is hung. */
65#define TX_TIMEOUT (5*HZ)
66
e19360f2 67static const char version[] __devinitdata =
b6016b76
MC
68 "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
69
70MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
05d0f1cf 71MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708 Driver");
b6016b76
MC
72MODULE_LICENSE("GPL");
73MODULE_VERSION(DRV_MODULE_VERSION);
74
75static int disable_msi = 0;
76
77module_param(disable_msi, int, 0);
78MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
79
80typedef enum {
81 BCM5706 = 0,
82 NC370T,
83 NC370I,
84 BCM5706S,
85 NC370F,
5b0c76ad
MC
86 BCM5708,
87 BCM5708S,
bac0dff6 88 BCM5709,
27a005b8 89 BCM5709S,
b6016b76
MC
90} board_t;
91
92/* indexed by board_t, above */
f71e1309 93static const struct {
b6016b76
MC
94 char *name;
95} board_info[] __devinitdata = {
96 { "Broadcom NetXtreme II BCM5706 1000Base-T" },
97 { "HP NC370T Multifunction Gigabit Server Adapter" },
98 { "HP NC370i Multifunction Gigabit Server Adapter" },
99 { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
100 { "HP NC370F Multifunction Gigabit Server Adapter" },
5b0c76ad
MC
101 { "Broadcom NetXtreme II BCM5708 1000Base-T" },
102 { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
bac0dff6 103 { "Broadcom NetXtreme II BCM5709 1000Base-T" },
27a005b8 104 { "Broadcom NetXtreme II BCM5709 1000Base-SX" },
b6016b76
MC
105 };
106
107static struct pci_device_id bnx2_pci_tbl[] = {
108 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
109 PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
110 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
111 PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
112 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
113 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
5b0c76ad
MC
114 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
115 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
b6016b76
MC
116 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
117 PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
118 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
119 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
5b0c76ad
MC
120 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
121 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
bac0dff6
MC
122 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
123 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
27a005b8
MC
124 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
125 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
b6016b76
MC
126 { 0, }
127};
128
129static struct flash_spec flash_table[] =
130{
e30372c9
MC
131#define BUFFERED_FLAGS (BNX2_NV_BUFFERED | BNX2_NV_TRANSLATE)
132#define NONBUFFERED_FLAGS (BNX2_NV_WREN)
b6016b76 133 /* Slow EEPROM */
37137709 134 {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
e30372c9 135 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
b6016b76
MC
136 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
137 "EEPROM - slow"},
37137709
MC
138 /* Expansion entry 0001 */
139 {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
e30372c9 140 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
37137709
MC
141 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
142 "Entry 0001"},
b6016b76
MC
143 /* Saifun SA25F010 (non-buffered flash) */
144 /* strap, cfg1, & write1 need updates */
37137709 145 {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
e30372c9 146 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
b6016b76
MC
147 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
148 "Non-buffered flash (128kB)"},
149 /* Saifun SA25F020 (non-buffered flash) */
150 /* strap, cfg1, & write1 need updates */
37137709 151 {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
e30372c9 152 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
b6016b76
MC
153 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
154 "Non-buffered flash (256kB)"},
37137709
MC
155 /* Expansion entry 0100 */
156 {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
e30372c9 157 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
37137709
MC
158 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
159 "Entry 0100"},
160 /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
6aa20a22 161 {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
e30372c9 162 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
37137709
MC
163 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
164 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
165 /* Entry 0110: ST M45PE20 (non-buffered flash)*/
166 {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
e30372c9 167 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
37137709
MC
168 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
169 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
170 /* Saifun SA25F005 (non-buffered flash) */
171 /* strap, cfg1, & write1 need updates */
172 {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
e30372c9 173 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
37137709
MC
174 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
175 "Non-buffered flash (64kB)"},
176 /* Fast EEPROM */
177 {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
e30372c9 178 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
37137709
MC
179 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
180 "EEPROM - fast"},
181 /* Expansion entry 1001 */
182 {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
e30372c9 183 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
37137709
MC
184 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
185 "Entry 1001"},
186 /* Expansion entry 1010 */
187 {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
e30372c9 188 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
37137709
MC
189 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
190 "Entry 1010"},
191 /* ATMEL AT45DB011B (buffered flash) */
192 {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
e30372c9 193 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
37137709
MC
194 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
195 "Buffered flash (128kB)"},
196 /* Expansion entry 1100 */
197 {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
e30372c9 198 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
37137709
MC
199 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
200 "Entry 1100"},
201 /* Expansion entry 1101 */
202 {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
e30372c9 203 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
37137709
MC
204 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
205 "Entry 1101"},
206 /* Ateml Expansion entry 1110 */
207 {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
e30372c9 208 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
37137709
MC
209 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
210 "Entry 1110 (Atmel)"},
211 /* ATMEL AT45DB021B (buffered flash) */
212 {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
e30372c9 213 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
37137709
MC
214 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
215 "Buffered flash (256kB)"},
b6016b76
MC
216};
217
e30372c9
MC
218static struct flash_spec flash_5709 = {
219 .flags = BNX2_NV_BUFFERED,
220 .page_bits = BCM5709_FLASH_PAGE_BITS,
221 .page_size = BCM5709_FLASH_PAGE_SIZE,
222 .addr_mask = BCM5709_FLASH_BYTE_ADDR_MASK,
223 .total_size = BUFFERED_FLASH_TOTAL_SIZE*2,
224 .name = "5709 Buffered flash (256kB)",
225};
226
b6016b76
MC
227MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
228
e89bbf10
MC
229static inline u32 bnx2_tx_avail(struct bnx2 *bp)
230{
2f8af120 231 u32 diff;
e89bbf10 232
2f8af120 233 smp_mb();
faac9c4b
MC
234
235 /* The ring uses 256 indices for 255 entries, one of them
236 * needs to be skipped.
237 */
238 diff = bp->tx_prod - bp->tx_cons;
239 if (unlikely(diff >= TX_DESC_CNT)) {
240 diff &= 0xffff;
241 if (diff == TX_DESC_CNT)
242 diff = MAX_TX_DESC_CNT;
243 }
e89bbf10
MC
244 return (bp->tx_ring_size - diff);
245}
246
b6016b76
MC
247static u32
248bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
249{
1b8227c4
MC
250 u32 val;
251
252 spin_lock_bh(&bp->indirect_lock);
b6016b76 253 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
1b8227c4
MC
254 val = REG_RD(bp, BNX2_PCICFG_REG_WINDOW);
255 spin_unlock_bh(&bp->indirect_lock);
256 return val;
b6016b76
MC
257}
258
259static void
260bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
261{
1b8227c4 262 spin_lock_bh(&bp->indirect_lock);
b6016b76
MC
263 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
264 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
1b8227c4 265 spin_unlock_bh(&bp->indirect_lock);
b6016b76
MC
266}
267
268static void
269bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
270{
271 offset += cid_addr;
1b8227c4 272 spin_lock_bh(&bp->indirect_lock);
59b47d8a
MC
273 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
274 int i;
275
276 REG_WR(bp, BNX2_CTX_CTX_DATA, val);
277 REG_WR(bp, BNX2_CTX_CTX_CTRL,
278 offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
279 for (i = 0; i < 5; i++) {
280 u32 val;
281 val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
282 if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
283 break;
284 udelay(5);
285 }
286 } else {
287 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
288 REG_WR(bp, BNX2_CTX_DATA, val);
289 }
1b8227c4 290 spin_unlock_bh(&bp->indirect_lock);
b6016b76
MC
291}
292
293static int
294bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
295{
296 u32 val1;
297 int i, ret;
298
299 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
300 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
301 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
302
303 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
304 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
305
306 udelay(40);
307 }
308
309 val1 = (bp->phy_addr << 21) | (reg << 16) |
310 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
311 BNX2_EMAC_MDIO_COMM_START_BUSY;
312 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
313
314 for (i = 0; i < 50; i++) {
315 udelay(10);
316
317 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
318 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
319 udelay(5);
320
321 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
322 val1 &= BNX2_EMAC_MDIO_COMM_DATA;
323
324 break;
325 }
326 }
327
328 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
329 *val = 0x0;
330 ret = -EBUSY;
331 }
332 else {
333 *val = val1;
334 ret = 0;
335 }
336
337 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
338 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
339 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
340
341 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
342 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
343
344 udelay(40);
345 }
346
347 return ret;
348}
349
350static int
351bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
352{
353 u32 val1;
354 int i, ret;
355
356 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
357 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
358 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
359
360 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
361 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
362
363 udelay(40);
364 }
365
366 val1 = (bp->phy_addr << 21) | (reg << 16) | val |
367 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
368 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
369 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
6aa20a22 370
b6016b76
MC
371 for (i = 0; i < 50; i++) {
372 udelay(10);
373
374 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
375 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
376 udelay(5);
377 break;
378 }
379 }
380
381 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
382 ret = -EBUSY;
383 else
384 ret = 0;
385
386 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
387 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
388 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
389
390 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
391 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
392
393 udelay(40);
394 }
395
396 return ret;
397}
398
399static void
400bnx2_disable_int(struct bnx2 *bp)
401{
402 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
403 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
404 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
405}
406
407static void
408bnx2_enable_int(struct bnx2 *bp)
409{
1269a8a6
MC
410 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
411 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
412 BNX2_PCICFG_INT_ACK_CMD_MASK_INT | bp->last_status_idx);
413
b6016b76
MC
414 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
415 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | bp->last_status_idx);
416
bf5295bb 417 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
b6016b76
MC
418}
419
420static void
421bnx2_disable_int_sync(struct bnx2 *bp)
422{
423 atomic_inc(&bp->intr_sem);
424 bnx2_disable_int(bp);
425 synchronize_irq(bp->pdev->irq);
426}
427
428static void
429bnx2_netif_stop(struct bnx2 *bp)
430{
431 bnx2_disable_int_sync(bp);
432 if (netif_running(bp->dev)) {
bea3348e 433 napi_disable(&bp->napi);
b6016b76
MC
434 netif_tx_disable(bp->dev);
435 bp->dev->trans_start = jiffies; /* prevent tx timeout */
436 }
437}
438
439static void
440bnx2_netif_start(struct bnx2 *bp)
441{
442 if (atomic_dec_and_test(&bp->intr_sem)) {
443 if (netif_running(bp->dev)) {
444 netif_wake_queue(bp->dev);
bea3348e 445 napi_enable(&bp->napi);
b6016b76
MC
446 bnx2_enable_int(bp);
447 }
448 }
449}
450
451static void
452bnx2_free_mem(struct bnx2 *bp)
453{
13daffa2
MC
454 int i;
455
59b47d8a
MC
456 for (i = 0; i < bp->ctx_pages; i++) {
457 if (bp->ctx_blk[i]) {
458 pci_free_consistent(bp->pdev, BCM_PAGE_SIZE,
459 bp->ctx_blk[i],
460 bp->ctx_blk_mapping[i]);
461 bp->ctx_blk[i] = NULL;
462 }
463 }
b6016b76 464 if (bp->status_blk) {
0f31f994 465 pci_free_consistent(bp->pdev, bp->status_stats_size,
b6016b76
MC
466 bp->status_blk, bp->status_blk_mapping);
467 bp->status_blk = NULL;
0f31f994 468 bp->stats_blk = NULL;
b6016b76
MC
469 }
470 if (bp->tx_desc_ring) {
e343d55c 471 pci_free_consistent(bp->pdev, TXBD_RING_SIZE,
b6016b76
MC
472 bp->tx_desc_ring, bp->tx_desc_mapping);
473 bp->tx_desc_ring = NULL;
474 }
b4558ea9
JJ
475 kfree(bp->tx_buf_ring);
476 bp->tx_buf_ring = NULL;
13daffa2
MC
477 for (i = 0; i < bp->rx_max_ring; i++) {
478 if (bp->rx_desc_ring[i])
e343d55c 479 pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
13daffa2
MC
480 bp->rx_desc_ring[i],
481 bp->rx_desc_mapping[i]);
482 bp->rx_desc_ring[i] = NULL;
483 }
484 vfree(bp->rx_buf_ring);
b4558ea9 485 bp->rx_buf_ring = NULL;
b6016b76
MC
486}
487
488static int
489bnx2_alloc_mem(struct bnx2 *bp)
490{
0f31f994 491 int i, status_blk_size;
13daffa2 492
e343d55c 493 bp->tx_buf_ring = kzalloc(SW_TXBD_RING_SIZE, GFP_KERNEL);
b6016b76
MC
494 if (bp->tx_buf_ring == NULL)
495 return -ENOMEM;
496
e343d55c 497 bp->tx_desc_ring = pci_alloc_consistent(bp->pdev, TXBD_RING_SIZE,
b6016b76
MC
498 &bp->tx_desc_mapping);
499 if (bp->tx_desc_ring == NULL)
500 goto alloc_mem_err;
501
e343d55c 502 bp->rx_buf_ring = vmalloc(SW_RXBD_RING_SIZE * bp->rx_max_ring);
b6016b76
MC
503 if (bp->rx_buf_ring == NULL)
504 goto alloc_mem_err;
505
e343d55c 506 memset(bp->rx_buf_ring, 0, SW_RXBD_RING_SIZE * bp->rx_max_ring);
13daffa2
MC
507
508 for (i = 0; i < bp->rx_max_ring; i++) {
509 bp->rx_desc_ring[i] =
e343d55c 510 pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
13daffa2
MC
511 &bp->rx_desc_mapping[i]);
512 if (bp->rx_desc_ring[i] == NULL)
513 goto alloc_mem_err;
514
515 }
b6016b76 516
0f31f994
MC
517 /* Combine status and statistics blocks into one allocation. */
518 status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
519 bp->status_stats_size = status_blk_size +
520 sizeof(struct statistics_block);
521
522 bp->status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
b6016b76
MC
523 &bp->status_blk_mapping);
524 if (bp->status_blk == NULL)
525 goto alloc_mem_err;
526
0f31f994 527 memset(bp->status_blk, 0, bp->status_stats_size);
b6016b76 528
0f31f994
MC
529 bp->stats_blk = (void *) ((unsigned long) bp->status_blk +
530 status_blk_size);
b6016b76 531
0f31f994 532 bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
b6016b76 533
59b47d8a
MC
534 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
535 bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
536 if (bp->ctx_pages == 0)
537 bp->ctx_pages = 1;
538 for (i = 0; i < bp->ctx_pages; i++) {
539 bp->ctx_blk[i] = pci_alloc_consistent(bp->pdev,
540 BCM_PAGE_SIZE,
541 &bp->ctx_blk_mapping[i]);
542 if (bp->ctx_blk[i] == NULL)
543 goto alloc_mem_err;
544 }
545 }
b6016b76
MC
546 return 0;
547
548alloc_mem_err:
549 bnx2_free_mem(bp);
550 return -ENOMEM;
551}
552
e3648b3d
MC
553static void
554bnx2_report_fw_link(struct bnx2 *bp)
555{
556 u32 fw_link_status = 0;
557
0d8a6571
MC
558 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
559 return;
560
e3648b3d
MC
561 if (bp->link_up) {
562 u32 bmsr;
563
564 switch (bp->line_speed) {
565 case SPEED_10:
566 if (bp->duplex == DUPLEX_HALF)
567 fw_link_status = BNX2_LINK_STATUS_10HALF;
568 else
569 fw_link_status = BNX2_LINK_STATUS_10FULL;
570 break;
571 case SPEED_100:
572 if (bp->duplex == DUPLEX_HALF)
573 fw_link_status = BNX2_LINK_STATUS_100HALF;
574 else
575 fw_link_status = BNX2_LINK_STATUS_100FULL;
576 break;
577 case SPEED_1000:
578 if (bp->duplex == DUPLEX_HALF)
579 fw_link_status = BNX2_LINK_STATUS_1000HALF;
580 else
581 fw_link_status = BNX2_LINK_STATUS_1000FULL;
582 break;
583 case SPEED_2500:
584 if (bp->duplex == DUPLEX_HALF)
585 fw_link_status = BNX2_LINK_STATUS_2500HALF;
586 else
587 fw_link_status = BNX2_LINK_STATUS_2500FULL;
588 break;
589 }
590
591 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
592
593 if (bp->autoneg) {
594 fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
595
ca58c3af
MC
596 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
597 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
e3648b3d
MC
598
599 if (!(bmsr & BMSR_ANEGCOMPLETE) ||
600 bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)
601 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
602 else
603 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
604 }
605 }
606 else
607 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
608
609 REG_WR_IND(bp, bp->shmem_base + BNX2_LINK_STATUS, fw_link_status);
610}
611
9b1084b8
MC
612static char *
613bnx2_xceiver_str(struct bnx2 *bp)
614{
615 return ((bp->phy_port == PORT_FIBRE) ? "SerDes" :
616 ((bp->phy_flags & PHY_SERDES_FLAG) ? "Remote Copper" :
617 "Copper"));
618}
619
b6016b76
MC
620static void
621bnx2_report_link(struct bnx2 *bp)
622{
623 if (bp->link_up) {
624 netif_carrier_on(bp->dev);
9b1084b8
MC
625 printk(KERN_INFO PFX "%s NIC %s Link is Up, ", bp->dev->name,
626 bnx2_xceiver_str(bp));
b6016b76
MC
627
628 printk("%d Mbps ", bp->line_speed);
629
630 if (bp->duplex == DUPLEX_FULL)
631 printk("full duplex");
632 else
633 printk("half duplex");
634
635 if (bp->flow_ctrl) {
636 if (bp->flow_ctrl & FLOW_CTRL_RX) {
637 printk(", receive ");
638 if (bp->flow_ctrl & FLOW_CTRL_TX)
639 printk("& transmit ");
640 }
641 else {
642 printk(", transmit ");
643 }
644 printk("flow control ON");
645 }
646 printk("\n");
647 }
648 else {
649 netif_carrier_off(bp->dev);
9b1084b8
MC
650 printk(KERN_ERR PFX "%s NIC %s Link is Down\n", bp->dev->name,
651 bnx2_xceiver_str(bp));
b6016b76 652 }
e3648b3d
MC
653
654 bnx2_report_fw_link(bp);
b6016b76
MC
655}
656
657static void
658bnx2_resolve_flow_ctrl(struct bnx2 *bp)
659{
660 u32 local_adv, remote_adv;
661
662 bp->flow_ctrl = 0;
6aa20a22 663 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
b6016b76
MC
664 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
665
666 if (bp->duplex == DUPLEX_FULL) {
667 bp->flow_ctrl = bp->req_flow_ctrl;
668 }
669 return;
670 }
671
672 if (bp->duplex != DUPLEX_FULL) {
673 return;
674 }
675
5b0c76ad
MC
676 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
677 (CHIP_NUM(bp) == CHIP_NUM_5708)) {
678 u32 val;
679
680 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
681 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
682 bp->flow_ctrl |= FLOW_CTRL_TX;
683 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
684 bp->flow_ctrl |= FLOW_CTRL_RX;
685 return;
686 }
687
ca58c3af
MC
688 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
689 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
b6016b76
MC
690
691 if (bp->phy_flags & PHY_SERDES_FLAG) {
692 u32 new_local_adv = 0;
693 u32 new_remote_adv = 0;
694
695 if (local_adv & ADVERTISE_1000XPAUSE)
696 new_local_adv |= ADVERTISE_PAUSE_CAP;
697 if (local_adv & ADVERTISE_1000XPSE_ASYM)
698 new_local_adv |= ADVERTISE_PAUSE_ASYM;
699 if (remote_adv & ADVERTISE_1000XPAUSE)
700 new_remote_adv |= ADVERTISE_PAUSE_CAP;
701 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
702 new_remote_adv |= ADVERTISE_PAUSE_ASYM;
703
704 local_adv = new_local_adv;
705 remote_adv = new_remote_adv;
706 }
707
708 /* See Table 28B-3 of 802.3ab-1999 spec. */
709 if (local_adv & ADVERTISE_PAUSE_CAP) {
710 if(local_adv & ADVERTISE_PAUSE_ASYM) {
711 if (remote_adv & ADVERTISE_PAUSE_CAP) {
712 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
713 }
714 else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
715 bp->flow_ctrl = FLOW_CTRL_RX;
716 }
717 }
718 else {
719 if (remote_adv & ADVERTISE_PAUSE_CAP) {
720 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
721 }
722 }
723 }
724 else if (local_adv & ADVERTISE_PAUSE_ASYM) {
725 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
726 (remote_adv & ADVERTISE_PAUSE_ASYM)) {
727
728 bp->flow_ctrl = FLOW_CTRL_TX;
729 }
730 }
731}
732
27a005b8
MC
733static int
734bnx2_5709s_linkup(struct bnx2 *bp)
735{
736 u32 val, speed;
737
738 bp->link_up = 1;
739
740 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
741 bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
742 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
743
744 if ((bp->autoneg & AUTONEG_SPEED) == 0) {
745 bp->line_speed = bp->req_line_speed;
746 bp->duplex = bp->req_duplex;
747 return 0;
748 }
749 speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
750 switch (speed) {
751 case MII_BNX2_GP_TOP_AN_SPEED_10:
752 bp->line_speed = SPEED_10;
753 break;
754 case MII_BNX2_GP_TOP_AN_SPEED_100:
755 bp->line_speed = SPEED_100;
756 break;
757 case MII_BNX2_GP_TOP_AN_SPEED_1G:
758 case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
759 bp->line_speed = SPEED_1000;
760 break;
761 case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
762 bp->line_speed = SPEED_2500;
763 break;
764 }
765 if (val & MII_BNX2_GP_TOP_AN_FD)
766 bp->duplex = DUPLEX_FULL;
767 else
768 bp->duplex = DUPLEX_HALF;
769 return 0;
770}
771
b6016b76 772static int
5b0c76ad
MC
773bnx2_5708s_linkup(struct bnx2 *bp)
774{
775 u32 val;
776
777 bp->link_up = 1;
778 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
779 switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
780 case BCM5708S_1000X_STAT1_SPEED_10:
781 bp->line_speed = SPEED_10;
782 break;
783 case BCM5708S_1000X_STAT1_SPEED_100:
784 bp->line_speed = SPEED_100;
785 break;
786 case BCM5708S_1000X_STAT1_SPEED_1G:
787 bp->line_speed = SPEED_1000;
788 break;
789 case BCM5708S_1000X_STAT1_SPEED_2G5:
790 bp->line_speed = SPEED_2500;
791 break;
792 }
793 if (val & BCM5708S_1000X_STAT1_FD)
794 bp->duplex = DUPLEX_FULL;
795 else
796 bp->duplex = DUPLEX_HALF;
797
798 return 0;
799}
800
801static int
802bnx2_5706s_linkup(struct bnx2 *bp)
b6016b76
MC
803{
804 u32 bmcr, local_adv, remote_adv, common;
805
806 bp->link_up = 1;
807 bp->line_speed = SPEED_1000;
808
ca58c3af 809 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
b6016b76
MC
810 if (bmcr & BMCR_FULLDPLX) {
811 bp->duplex = DUPLEX_FULL;
812 }
813 else {
814 bp->duplex = DUPLEX_HALF;
815 }
816
817 if (!(bmcr & BMCR_ANENABLE)) {
818 return 0;
819 }
820
ca58c3af
MC
821 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
822 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
b6016b76
MC
823
824 common = local_adv & remote_adv;
825 if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
826
827 if (common & ADVERTISE_1000XFULL) {
828 bp->duplex = DUPLEX_FULL;
829 }
830 else {
831 bp->duplex = DUPLEX_HALF;
832 }
833 }
834
835 return 0;
836}
837
838static int
839bnx2_copper_linkup(struct bnx2 *bp)
840{
841 u32 bmcr;
842
ca58c3af 843 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
b6016b76
MC
844 if (bmcr & BMCR_ANENABLE) {
845 u32 local_adv, remote_adv, common;
846
847 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
848 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
849
850 common = local_adv & (remote_adv >> 2);
851 if (common & ADVERTISE_1000FULL) {
852 bp->line_speed = SPEED_1000;
853 bp->duplex = DUPLEX_FULL;
854 }
855 else if (common & ADVERTISE_1000HALF) {
856 bp->line_speed = SPEED_1000;
857 bp->duplex = DUPLEX_HALF;
858 }
859 else {
ca58c3af
MC
860 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
861 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
b6016b76
MC
862
863 common = local_adv & remote_adv;
864 if (common & ADVERTISE_100FULL) {
865 bp->line_speed = SPEED_100;
866 bp->duplex = DUPLEX_FULL;
867 }
868 else if (common & ADVERTISE_100HALF) {
869 bp->line_speed = SPEED_100;
870 bp->duplex = DUPLEX_HALF;
871 }
872 else if (common & ADVERTISE_10FULL) {
873 bp->line_speed = SPEED_10;
874 bp->duplex = DUPLEX_FULL;
875 }
876 else if (common & ADVERTISE_10HALF) {
877 bp->line_speed = SPEED_10;
878 bp->duplex = DUPLEX_HALF;
879 }
880 else {
881 bp->line_speed = 0;
882 bp->link_up = 0;
883 }
884 }
885 }
886 else {
887 if (bmcr & BMCR_SPEED100) {
888 bp->line_speed = SPEED_100;
889 }
890 else {
891 bp->line_speed = SPEED_10;
892 }
893 if (bmcr & BMCR_FULLDPLX) {
894 bp->duplex = DUPLEX_FULL;
895 }
896 else {
897 bp->duplex = DUPLEX_HALF;
898 }
899 }
900
901 return 0;
902}
903
904static int
905bnx2_set_mac_link(struct bnx2 *bp)
906{
907 u32 val;
908
909 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
910 if (bp->link_up && (bp->line_speed == SPEED_1000) &&
911 (bp->duplex == DUPLEX_HALF)) {
912 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
913 }
914
915 /* Configure the EMAC mode register. */
916 val = REG_RD(bp, BNX2_EMAC_MODE);
917
918 val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
5b0c76ad 919 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
59b47d8a 920 BNX2_EMAC_MODE_25G_MODE);
b6016b76
MC
921
922 if (bp->link_up) {
5b0c76ad
MC
923 switch (bp->line_speed) {
924 case SPEED_10:
59b47d8a
MC
925 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
926 val |= BNX2_EMAC_MODE_PORT_MII_10M;
5b0c76ad
MC
927 break;
928 }
929 /* fall through */
930 case SPEED_100:
931 val |= BNX2_EMAC_MODE_PORT_MII;
932 break;
933 case SPEED_2500:
59b47d8a 934 val |= BNX2_EMAC_MODE_25G_MODE;
5b0c76ad
MC
935 /* fall through */
936 case SPEED_1000:
937 val |= BNX2_EMAC_MODE_PORT_GMII;
938 break;
939 }
b6016b76
MC
940 }
941 else {
942 val |= BNX2_EMAC_MODE_PORT_GMII;
943 }
944
945 /* Set the MAC to operate in the appropriate duplex mode. */
946 if (bp->duplex == DUPLEX_HALF)
947 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
948 REG_WR(bp, BNX2_EMAC_MODE, val);
949
950 /* Enable/disable rx PAUSE. */
951 bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
952
953 if (bp->flow_ctrl & FLOW_CTRL_RX)
954 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
955 REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
956
957 /* Enable/disable tx PAUSE. */
958 val = REG_RD(bp, BNX2_EMAC_TX_MODE);
959 val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
960
961 if (bp->flow_ctrl & FLOW_CTRL_TX)
962 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
963 REG_WR(bp, BNX2_EMAC_TX_MODE, val);
964
965 /* Acknowledge the interrupt. */
966 REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
967
968 return 0;
969}
970
27a005b8
MC
971static void
972bnx2_enable_bmsr1(struct bnx2 *bp)
973{
974 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
975 (CHIP_NUM(bp) == CHIP_NUM_5709))
976 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
977 MII_BNX2_BLK_ADDR_GP_STATUS);
978}
979
980static void
981bnx2_disable_bmsr1(struct bnx2 *bp)
982{
983 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
984 (CHIP_NUM(bp) == CHIP_NUM_5709))
985 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
986 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
987}
988
605a9e20
MC
989static int
990bnx2_test_and_enable_2g5(struct bnx2 *bp)
991{
992 u32 up1;
993 int ret = 1;
994
995 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
996 return 0;
997
998 if (bp->autoneg & AUTONEG_SPEED)
999 bp->advertising |= ADVERTISED_2500baseX_Full;
1000
27a005b8
MC
1001 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1002 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1003
605a9e20
MC
1004 bnx2_read_phy(bp, bp->mii_up1, &up1);
1005 if (!(up1 & BCM5708S_UP1_2G5)) {
1006 up1 |= BCM5708S_UP1_2G5;
1007 bnx2_write_phy(bp, bp->mii_up1, up1);
1008 ret = 0;
1009 }
1010
27a005b8
MC
1011 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1012 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1013 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1014
605a9e20
MC
1015 return ret;
1016}
1017
1018static int
1019bnx2_test_and_disable_2g5(struct bnx2 *bp)
1020{
1021 u32 up1;
1022 int ret = 0;
1023
1024 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1025 return 0;
1026
27a005b8
MC
1027 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1028 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1029
605a9e20
MC
1030 bnx2_read_phy(bp, bp->mii_up1, &up1);
1031 if (up1 & BCM5708S_UP1_2G5) {
1032 up1 &= ~BCM5708S_UP1_2G5;
1033 bnx2_write_phy(bp, bp->mii_up1, up1);
1034 ret = 1;
1035 }
1036
27a005b8
MC
1037 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1038 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1039 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1040
605a9e20
MC
1041 return ret;
1042}
1043
1044static void
1045bnx2_enable_forced_2g5(struct bnx2 *bp)
1046{
1047 u32 bmcr;
1048
1049 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1050 return;
1051
27a005b8
MC
1052 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1053 u32 val;
1054
1055 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1056 MII_BNX2_BLK_ADDR_SERDES_DIG);
1057 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1058 val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
1059 val |= MII_BNX2_SD_MISC1_FORCE | MII_BNX2_SD_MISC1_FORCE_2_5G;
1060 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1061
1062 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1063 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1064 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1065
1066 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
605a9e20
MC
1067 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1068 bmcr |= BCM5708S_BMCR_FORCE_2500;
1069 }
1070
1071 if (bp->autoneg & AUTONEG_SPEED) {
1072 bmcr &= ~BMCR_ANENABLE;
1073 if (bp->req_duplex == DUPLEX_FULL)
1074 bmcr |= BMCR_FULLDPLX;
1075 }
1076 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1077}
1078
1079static void
1080bnx2_disable_forced_2g5(struct bnx2 *bp)
1081{
1082 u32 bmcr;
1083
1084 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1085 return;
1086
27a005b8
MC
1087 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1088 u32 val;
1089
1090 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1091 MII_BNX2_BLK_ADDR_SERDES_DIG);
1092 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1093 val &= ~MII_BNX2_SD_MISC1_FORCE;
1094 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1095
1096 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1097 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1098 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1099
1100 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
605a9e20
MC
1101 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1102 bmcr &= ~BCM5708S_BMCR_FORCE_2500;
1103 }
1104
1105 if (bp->autoneg & AUTONEG_SPEED)
1106 bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
1107 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1108}
1109
b6016b76
MC
1110static int
1111bnx2_set_link(struct bnx2 *bp)
1112{
1113 u32 bmsr;
1114 u8 link_up;
1115
80be4434 1116 if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
b6016b76
MC
1117 bp->link_up = 1;
1118 return 0;
1119 }
1120
0d8a6571
MC
1121 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1122 return 0;
1123
b6016b76
MC
1124 link_up = bp->link_up;
1125
27a005b8
MC
1126 bnx2_enable_bmsr1(bp);
1127 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1128 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1129 bnx2_disable_bmsr1(bp);
b6016b76
MC
1130
1131 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
1132 (CHIP_NUM(bp) == CHIP_NUM_5706)) {
1133 u32 val;
1134
1135 val = REG_RD(bp, BNX2_EMAC_STATUS);
1136 if (val & BNX2_EMAC_STATUS_LINK)
1137 bmsr |= BMSR_LSTATUS;
1138 else
1139 bmsr &= ~BMSR_LSTATUS;
1140 }
1141
1142 if (bmsr & BMSR_LSTATUS) {
1143 bp->link_up = 1;
1144
1145 if (bp->phy_flags & PHY_SERDES_FLAG) {
5b0c76ad
MC
1146 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1147 bnx2_5706s_linkup(bp);
1148 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1149 bnx2_5708s_linkup(bp);
27a005b8
MC
1150 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1151 bnx2_5709s_linkup(bp);
b6016b76
MC
1152 }
1153 else {
1154 bnx2_copper_linkup(bp);
1155 }
1156 bnx2_resolve_flow_ctrl(bp);
1157 }
1158 else {
1159 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
605a9e20
MC
1160 (bp->autoneg & AUTONEG_SPEED))
1161 bnx2_disable_forced_2g5(bp);
b6016b76 1162
b6016b76
MC
1163 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1164 bp->link_up = 0;
1165 }
1166
1167 if (bp->link_up != link_up) {
1168 bnx2_report_link(bp);
1169 }
1170
1171 bnx2_set_mac_link(bp);
1172
1173 return 0;
1174}
1175
1176static int
1177bnx2_reset_phy(struct bnx2 *bp)
1178{
1179 int i;
1180 u32 reg;
1181
ca58c3af 1182 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
b6016b76
MC
1183
1184#define PHY_RESET_MAX_WAIT 100
1185 for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
1186 udelay(10);
1187
ca58c3af 1188 bnx2_read_phy(bp, bp->mii_bmcr, &reg);
b6016b76
MC
1189 if (!(reg & BMCR_RESET)) {
1190 udelay(20);
1191 break;
1192 }
1193 }
1194 if (i == PHY_RESET_MAX_WAIT) {
1195 return -EBUSY;
1196 }
1197 return 0;
1198}
1199
1200static u32
1201bnx2_phy_get_pause_adv(struct bnx2 *bp)
1202{
1203 u32 adv = 0;
1204
1205 if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1206 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1207
1208 if (bp->phy_flags & PHY_SERDES_FLAG) {
1209 adv = ADVERTISE_1000XPAUSE;
1210 }
1211 else {
1212 adv = ADVERTISE_PAUSE_CAP;
1213 }
1214 }
1215 else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1216 if (bp->phy_flags & PHY_SERDES_FLAG) {
1217 adv = ADVERTISE_1000XPSE_ASYM;
1218 }
1219 else {
1220 adv = ADVERTISE_PAUSE_ASYM;
1221 }
1222 }
1223 else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1224 if (bp->phy_flags & PHY_SERDES_FLAG) {
1225 adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1226 }
1227 else {
1228 adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1229 }
1230 }
1231 return adv;
1232}
1233
0d8a6571
MC
1234static int bnx2_fw_sync(struct bnx2 *, u32, int);
1235
b6016b76 1236static int
0d8a6571
MC
1237bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
1238{
1239 u32 speed_arg = 0, pause_adv;
1240
1241 pause_adv = bnx2_phy_get_pause_adv(bp);
1242
1243 if (bp->autoneg & AUTONEG_SPEED) {
1244 speed_arg |= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG;
1245 if (bp->advertising & ADVERTISED_10baseT_Half)
1246 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1247 if (bp->advertising & ADVERTISED_10baseT_Full)
1248 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1249 if (bp->advertising & ADVERTISED_100baseT_Half)
1250 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1251 if (bp->advertising & ADVERTISED_100baseT_Full)
1252 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1253 if (bp->advertising & ADVERTISED_1000baseT_Full)
1254 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1255 if (bp->advertising & ADVERTISED_2500baseX_Full)
1256 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1257 } else {
1258 if (bp->req_line_speed == SPEED_2500)
1259 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1260 else if (bp->req_line_speed == SPEED_1000)
1261 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1262 else if (bp->req_line_speed == SPEED_100) {
1263 if (bp->req_duplex == DUPLEX_FULL)
1264 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1265 else
1266 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1267 } else if (bp->req_line_speed == SPEED_10) {
1268 if (bp->req_duplex == DUPLEX_FULL)
1269 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1270 else
1271 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1272 }
1273 }
1274
1275 if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP))
1276 speed_arg |= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE;
1277 if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_1000XPSE_ASYM))
1278 speed_arg |= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE;
1279
1280 if (port == PORT_TP)
1281 speed_arg |= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE |
1282 BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED;
1283
1284 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB_ARG0, speed_arg);
1285
1286 spin_unlock_bh(&bp->phy_lock);
1287 bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 0);
1288 spin_lock_bh(&bp->phy_lock);
1289
1290 return 0;
1291}
1292
1293static int
1294bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
b6016b76 1295{
605a9e20 1296 u32 adv, bmcr;
b6016b76
MC
1297 u32 new_adv = 0;
1298
0d8a6571
MC
1299 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1300 return (bnx2_setup_remote_phy(bp, port));
1301
b6016b76
MC
1302 if (!(bp->autoneg & AUTONEG_SPEED)) {
1303 u32 new_bmcr;
5b0c76ad
MC
1304 int force_link_down = 0;
1305
605a9e20
MC
1306 if (bp->req_line_speed == SPEED_2500) {
1307 if (!bnx2_test_and_enable_2g5(bp))
1308 force_link_down = 1;
1309 } else if (bp->req_line_speed == SPEED_1000) {
1310 if (bnx2_test_and_disable_2g5(bp))
1311 force_link_down = 1;
1312 }
ca58c3af 1313 bnx2_read_phy(bp, bp->mii_adv, &adv);
80be4434
MC
1314 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1315
ca58c3af 1316 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
605a9e20 1317 new_bmcr = bmcr & ~BMCR_ANENABLE;
80be4434 1318 new_bmcr |= BMCR_SPEED1000;
605a9e20 1319
27a005b8
MC
1320 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1321 if (bp->req_line_speed == SPEED_2500)
1322 bnx2_enable_forced_2g5(bp);
1323 else if (bp->req_line_speed == SPEED_1000) {
1324 bnx2_disable_forced_2g5(bp);
1325 new_bmcr &= ~0x2000;
1326 }
1327
1328 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
605a9e20
MC
1329 if (bp->req_line_speed == SPEED_2500)
1330 new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1331 else
1332 new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
5b0c76ad
MC
1333 }
1334
b6016b76 1335 if (bp->req_duplex == DUPLEX_FULL) {
5b0c76ad 1336 adv |= ADVERTISE_1000XFULL;
b6016b76
MC
1337 new_bmcr |= BMCR_FULLDPLX;
1338 }
1339 else {
5b0c76ad 1340 adv |= ADVERTISE_1000XHALF;
b6016b76
MC
1341 new_bmcr &= ~BMCR_FULLDPLX;
1342 }
5b0c76ad 1343 if ((new_bmcr != bmcr) || (force_link_down)) {
b6016b76
MC
1344 /* Force a link down visible on the other side */
1345 if (bp->link_up) {
ca58c3af 1346 bnx2_write_phy(bp, bp->mii_adv, adv &
5b0c76ad
MC
1347 ~(ADVERTISE_1000XFULL |
1348 ADVERTISE_1000XHALF));
ca58c3af 1349 bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
b6016b76
MC
1350 BMCR_ANRESTART | BMCR_ANENABLE);
1351
1352 bp->link_up = 0;
1353 netif_carrier_off(bp->dev);
ca58c3af 1354 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
80be4434 1355 bnx2_report_link(bp);
b6016b76 1356 }
ca58c3af
MC
1357 bnx2_write_phy(bp, bp->mii_adv, adv);
1358 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
605a9e20
MC
1359 } else {
1360 bnx2_resolve_flow_ctrl(bp);
1361 bnx2_set_mac_link(bp);
b6016b76
MC
1362 }
1363 return 0;
1364 }
1365
605a9e20 1366 bnx2_test_and_enable_2g5(bp);
5b0c76ad 1367
b6016b76
MC
1368 if (bp->advertising & ADVERTISED_1000baseT_Full)
1369 new_adv |= ADVERTISE_1000XFULL;
1370
1371 new_adv |= bnx2_phy_get_pause_adv(bp);
1372
ca58c3af
MC
1373 bnx2_read_phy(bp, bp->mii_adv, &adv);
1374 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
b6016b76
MC
1375
1376 bp->serdes_an_pending = 0;
1377 if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1378 /* Force a link down visible on the other side */
1379 if (bp->link_up) {
ca58c3af 1380 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
80be4434
MC
1381 spin_unlock_bh(&bp->phy_lock);
1382 msleep(20);
1383 spin_lock_bh(&bp->phy_lock);
b6016b76
MC
1384 }
1385
ca58c3af
MC
1386 bnx2_write_phy(bp, bp->mii_adv, new_adv);
1387 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
b6016b76 1388 BMCR_ANENABLE);
f8dd064e
MC
1389 /* Speed up link-up time when the link partner
1390 * does not autonegotiate which is very common
1391 * in blade servers. Some blade servers use
1392 * IPMI for kerboard input and it's important
1393 * to minimize link disruptions. Autoneg. involves
1394 * exchanging base pages plus 3 next pages and
1395 * normally completes in about 120 msec.
1396 */
1397 bp->current_interval = SERDES_AN_TIMEOUT;
1398 bp->serdes_an_pending = 1;
1399 mod_timer(&bp->timer, jiffies + bp->current_interval);
605a9e20
MC
1400 } else {
1401 bnx2_resolve_flow_ctrl(bp);
1402 bnx2_set_mac_link(bp);
b6016b76
MC
1403 }
1404
1405 return 0;
1406}
1407
1408#define ETHTOOL_ALL_FIBRE_SPEED \
deaf391b
MC
1409 (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) ? \
1410 (ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
1411 (ADVERTISED_1000baseT_Full)
b6016b76
MC
1412
1413#define ETHTOOL_ALL_COPPER_SPEED \
1414 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1415 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1416 ADVERTISED_1000baseT_Full)
1417
1418#define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1419 ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
6aa20a22 1420
b6016b76
MC
1421#define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1422
0d8a6571
MC
1423static void
1424bnx2_set_default_remote_link(struct bnx2 *bp)
1425{
1426 u32 link;
1427
1428 if (bp->phy_port == PORT_TP)
1429 link = REG_RD_IND(bp, bp->shmem_base + BNX2_RPHY_COPPER_LINK);
1430 else
1431 link = REG_RD_IND(bp, bp->shmem_base + BNX2_RPHY_SERDES_LINK);
1432
1433 if (link & BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG) {
1434 bp->req_line_speed = 0;
1435 bp->autoneg |= AUTONEG_SPEED;
1436 bp->advertising = ADVERTISED_Autoneg;
1437 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1438 bp->advertising |= ADVERTISED_10baseT_Half;
1439 if (link & BNX2_NETLINK_SET_LINK_SPEED_10FULL)
1440 bp->advertising |= ADVERTISED_10baseT_Full;
1441 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1442 bp->advertising |= ADVERTISED_100baseT_Half;
1443 if (link & BNX2_NETLINK_SET_LINK_SPEED_100FULL)
1444 bp->advertising |= ADVERTISED_100baseT_Full;
1445 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1446 bp->advertising |= ADVERTISED_1000baseT_Full;
1447 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1448 bp->advertising |= ADVERTISED_2500baseX_Full;
1449 } else {
1450 bp->autoneg = 0;
1451 bp->advertising = 0;
1452 bp->req_duplex = DUPLEX_FULL;
1453 if (link & BNX2_NETLINK_SET_LINK_SPEED_10) {
1454 bp->req_line_speed = SPEED_10;
1455 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1456 bp->req_duplex = DUPLEX_HALF;
1457 }
1458 if (link & BNX2_NETLINK_SET_LINK_SPEED_100) {
1459 bp->req_line_speed = SPEED_100;
1460 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1461 bp->req_duplex = DUPLEX_HALF;
1462 }
1463 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1464 bp->req_line_speed = SPEED_1000;
1465 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1466 bp->req_line_speed = SPEED_2500;
1467 }
1468}
1469
deaf391b
MC
1470static void
1471bnx2_set_default_link(struct bnx2 *bp)
1472{
0d8a6571
MC
1473 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1474 return bnx2_set_default_remote_link(bp);
1475
deaf391b
MC
1476 bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
1477 bp->req_line_speed = 0;
1478 if (bp->phy_flags & PHY_SERDES_FLAG) {
1479 u32 reg;
1480
1481 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
1482
1483 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG);
1484 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
1485 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
1486 bp->autoneg = 0;
1487 bp->req_line_speed = bp->line_speed = SPEED_1000;
1488 bp->req_duplex = DUPLEX_FULL;
1489 }
1490 } else
1491 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
1492}
1493
df149d70
MC
1494static void
1495bnx2_send_heart_beat(struct bnx2 *bp)
1496{
1497 u32 msg;
1498 u32 addr;
1499
1500 spin_lock(&bp->indirect_lock);
1501 msg = (u32) (++bp->fw_drv_pulse_wr_seq & BNX2_DRV_PULSE_SEQ_MASK);
1502 addr = bp->shmem_base + BNX2_DRV_PULSE_MB;
1503 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr);
1504 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, msg);
1505 spin_unlock(&bp->indirect_lock);
1506}
1507
0d8a6571
MC
1508static void
1509bnx2_remote_phy_event(struct bnx2 *bp)
1510{
1511 u32 msg;
1512 u8 link_up = bp->link_up;
1513 u8 old_port;
1514
1515 msg = REG_RD_IND(bp, bp->shmem_base + BNX2_LINK_STATUS);
1516
df149d70
MC
1517 if (msg & BNX2_LINK_STATUS_HEART_BEAT_EXPIRED)
1518 bnx2_send_heart_beat(bp);
1519
1520 msg &= ~BNX2_LINK_STATUS_HEART_BEAT_EXPIRED;
1521
0d8a6571
MC
1522 if ((msg & BNX2_LINK_STATUS_LINK_UP) == BNX2_LINK_STATUS_LINK_DOWN)
1523 bp->link_up = 0;
1524 else {
1525 u32 speed;
1526
1527 bp->link_up = 1;
1528 speed = msg & BNX2_LINK_STATUS_SPEED_MASK;
1529 bp->duplex = DUPLEX_FULL;
1530 switch (speed) {
1531 case BNX2_LINK_STATUS_10HALF:
1532 bp->duplex = DUPLEX_HALF;
1533 case BNX2_LINK_STATUS_10FULL:
1534 bp->line_speed = SPEED_10;
1535 break;
1536 case BNX2_LINK_STATUS_100HALF:
1537 bp->duplex = DUPLEX_HALF;
1538 case BNX2_LINK_STATUS_100BASE_T4:
1539 case BNX2_LINK_STATUS_100FULL:
1540 bp->line_speed = SPEED_100;
1541 break;
1542 case BNX2_LINK_STATUS_1000HALF:
1543 bp->duplex = DUPLEX_HALF;
1544 case BNX2_LINK_STATUS_1000FULL:
1545 bp->line_speed = SPEED_1000;
1546 break;
1547 case BNX2_LINK_STATUS_2500HALF:
1548 bp->duplex = DUPLEX_HALF;
1549 case BNX2_LINK_STATUS_2500FULL:
1550 bp->line_speed = SPEED_2500;
1551 break;
1552 default:
1553 bp->line_speed = 0;
1554 break;
1555 }
1556
1557 spin_lock(&bp->phy_lock);
1558 bp->flow_ctrl = 0;
1559 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
1560 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
1561 if (bp->duplex == DUPLEX_FULL)
1562 bp->flow_ctrl = bp->req_flow_ctrl;
1563 } else {
1564 if (msg & BNX2_LINK_STATUS_TX_FC_ENABLED)
1565 bp->flow_ctrl |= FLOW_CTRL_TX;
1566 if (msg & BNX2_LINK_STATUS_RX_FC_ENABLED)
1567 bp->flow_ctrl |= FLOW_CTRL_RX;
1568 }
1569
1570 old_port = bp->phy_port;
1571 if (msg & BNX2_LINK_STATUS_SERDES_LINK)
1572 bp->phy_port = PORT_FIBRE;
1573 else
1574 bp->phy_port = PORT_TP;
1575
1576 if (old_port != bp->phy_port)
1577 bnx2_set_default_link(bp);
1578
1579 spin_unlock(&bp->phy_lock);
1580 }
1581 if (bp->link_up != link_up)
1582 bnx2_report_link(bp);
1583
1584 bnx2_set_mac_link(bp);
1585}
1586
1587static int
1588bnx2_set_remote_link(struct bnx2 *bp)
1589{
1590 u32 evt_code;
1591
1592 evt_code = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_EVT_CODE_MB);
1593 switch (evt_code) {
1594 case BNX2_FW_EVT_CODE_LINK_EVENT:
1595 bnx2_remote_phy_event(bp);
1596 break;
1597 case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT:
1598 default:
df149d70 1599 bnx2_send_heart_beat(bp);
0d8a6571
MC
1600 break;
1601 }
1602 return 0;
1603}
1604
b6016b76
MC
1605static int
1606bnx2_setup_copper_phy(struct bnx2 *bp)
1607{
1608 u32 bmcr;
1609 u32 new_bmcr;
1610
ca58c3af 1611 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
b6016b76
MC
1612
1613 if (bp->autoneg & AUTONEG_SPEED) {
1614 u32 adv_reg, adv1000_reg;
1615 u32 new_adv_reg = 0;
1616 u32 new_adv1000_reg = 0;
1617
ca58c3af 1618 bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
b6016b76
MC
1619 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
1620 ADVERTISE_PAUSE_ASYM);
1621
1622 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
1623 adv1000_reg &= PHY_ALL_1000_SPEED;
1624
1625 if (bp->advertising & ADVERTISED_10baseT_Half)
1626 new_adv_reg |= ADVERTISE_10HALF;
1627 if (bp->advertising & ADVERTISED_10baseT_Full)
1628 new_adv_reg |= ADVERTISE_10FULL;
1629 if (bp->advertising & ADVERTISED_100baseT_Half)
1630 new_adv_reg |= ADVERTISE_100HALF;
1631 if (bp->advertising & ADVERTISED_100baseT_Full)
1632 new_adv_reg |= ADVERTISE_100FULL;
1633 if (bp->advertising & ADVERTISED_1000baseT_Full)
1634 new_adv1000_reg |= ADVERTISE_1000FULL;
6aa20a22 1635
b6016b76
MC
1636 new_adv_reg |= ADVERTISE_CSMA;
1637
1638 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
1639
1640 if ((adv1000_reg != new_adv1000_reg) ||
1641 (adv_reg != new_adv_reg) ||
1642 ((bmcr & BMCR_ANENABLE) == 0)) {
1643
ca58c3af 1644 bnx2_write_phy(bp, bp->mii_adv, new_adv_reg);
b6016b76 1645 bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
ca58c3af 1646 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
b6016b76
MC
1647 BMCR_ANENABLE);
1648 }
1649 else if (bp->link_up) {
1650 /* Flow ctrl may have changed from auto to forced */
1651 /* or vice-versa. */
1652
1653 bnx2_resolve_flow_ctrl(bp);
1654 bnx2_set_mac_link(bp);
1655 }
1656 return 0;
1657 }
1658
1659 new_bmcr = 0;
1660 if (bp->req_line_speed == SPEED_100) {
1661 new_bmcr |= BMCR_SPEED100;
1662 }
1663 if (bp->req_duplex == DUPLEX_FULL) {
1664 new_bmcr |= BMCR_FULLDPLX;
1665 }
1666 if (new_bmcr != bmcr) {
1667 u32 bmsr;
b6016b76 1668
ca58c3af
MC
1669 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1670 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
6aa20a22 1671
b6016b76
MC
1672 if (bmsr & BMSR_LSTATUS) {
1673 /* Force link down */
ca58c3af 1674 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
a16dda0e
MC
1675 spin_unlock_bh(&bp->phy_lock);
1676 msleep(50);
1677 spin_lock_bh(&bp->phy_lock);
1678
ca58c3af
MC
1679 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1680 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
b6016b76
MC
1681 }
1682
ca58c3af 1683 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
b6016b76
MC
1684
1685 /* Normally, the new speed is setup after the link has
1686 * gone down and up again. In some cases, link will not go
1687 * down so we need to set up the new speed here.
1688 */
1689 if (bmsr & BMSR_LSTATUS) {
1690 bp->line_speed = bp->req_line_speed;
1691 bp->duplex = bp->req_duplex;
1692 bnx2_resolve_flow_ctrl(bp);
1693 bnx2_set_mac_link(bp);
1694 }
27a005b8
MC
1695 } else {
1696 bnx2_resolve_flow_ctrl(bp);
1697 bnx2_set_mac_link(bp);
b6016b76
MC
1698 }
1699 return 0;
1700}
1701
1702static int
0d8a6571 1703bnx2_setup_phy(struct bnx2 *bp, u8 port)
b6016b76
MC
1704{
1705 if (bp->loopback == MAC_LOOPBACK)
1706 return 0;
1707
1708 if (bp->phy_flags & PHY_SERDES_FLAG) {
0d8a6571 1709 return (bnx2_setup_serdes_phy(bp, port));
b6016b76
MC
1710 }
1711 else {
1712 return (bnx2_setup_copper_phy(bp));
1713 }
1714}
1715
27a005b8
MC
1716static int
1717bnx2_init_5709s_phy(struct bnx2 *bp)
1718{
1719 u32 val;
1720
1721 bp->mii_bmcr = MII_BMCR + 0x10;
1722 bp->mii_bmsr = MII_BMSR + 0x10;
1723 bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
1724 bp->mii_adv = MII_ADVERTISE + 0x10;
1725 bp->mii_lpa = MII_LPA + 0x10;
1726 bp->mii_up1 = MII_BNX2_OVER1G_UP1;
1727
1728 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
1729 bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
1730
1731 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1732 bnx2_reset_phy(bp);
1733
1734 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
1735
1736 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
1737 val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
1738 val |= MII_BNX2_SD_1000XCTL1_FIBER;
1739 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
1740
1741 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1742 bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
1743 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG)
1744 val |= BCM5708S_UP1_2G5;
1745 else
1746 val &= ~BCM5708S_UP1_2G5;
1747 bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
1748
1749 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
1750 bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
1751 val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
1752 bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
1753
1754 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
1755
1756 val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
1757 MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
1758 bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
1759
1760 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1761
1762 return 0;
1763}
1764
b6016b76 1765static int
5b0c76ad
MC
1766bnx2_init_5708s_phy(struct bnx2 *bp)
1767{
1768 u32 val;
1769
27a005b8
MC
1770 bnx2_reset_phy(bp);
1771
1772 bp->mii_up1 = BCM5708S_UP1;
1773
5b0c76ad
MC
1774 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
1775 bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
1776 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1777
1778 bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
1779 val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
1780 bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
1781
1782 bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
1783 val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
1784 bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
1785
1786 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) {
1787 bnx2_read_phy(bp, BCM5708S_UP1, &val);
1788 val |= BCM5708S_UP1_2G5;
1789 bnx2_write_phy(bp, BCM5708S_UP1, val);
1790 }
1791
1792 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
dda1e390
MC
1793 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
1794 (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
5b0c76ad
MC
1795 /* increase tx signal amplitude */
1796 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1797 BCM5708S_BLK_ADDR_TX_MISC);
1798 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
1799 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
1800 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
1801 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1802 }
1803
e3648b3d 1804 val = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG) &
5b0c76ad
MC
1805 BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
1806
1807 if (val) {
1808 u32 is_backplane;
1809
e3648b3d 1810 is_backplane = REG_RD_IND(bp, bp->shmem_base +
5b0c76ad
MC
1811 BNX2_SHARED_HW_CFG_CONFIG);
1812 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
1813 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1814 BCM5708S_BLK_ADDR_TX_MISC);
1815 bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
1816 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1817 BCM5708S_BLK_ADDR_DIG);
1818 }
1819 }
1820 return 0;
1821}
1822
1823static int
1824bnx2_init_5706s_phy(struct bnx2 *bp)
b6016b76 1825{
27a005b8
MC
1826 bnx2_reset_phy(bp);
1827
b6016b76
MC
1828 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1829
59b47d8a
MC
1830 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1831 REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
b6016b76
MC
1832
1833 if (bp->dev->mtu > 1500) {
1834 u32 val;
1835
1836 /* Set extended packet length bit */
1837 bnx2_write_phy(bp, 0x18, 0x7);
1838 bnx2_read_phy(bp, 0x18, &val);
1839 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
1840
1841 bnx2_write_phy(bp, 0x1c, 0x6c00);
1842 bnx2_read_phy(bp, 0x1c, &val);
1843 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
1844 }
1845 else {
1846 u32 val;
1847
1848 bnx2_write_phy(bp, 0x18, 0x7);
1849 bnx2_read_phy(bp, 0x18, &val);
1850 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1851
1852 bnx2_write_phy(bp, 0x1c, 0x6c00);
1853 bnx2_read_phy(bp, 0x1c, &val);
1854 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
1855 }
1856
1857 return 0;
1858}
1859
1860static int
1861bnx2_init_copper_phy(struct bnx2 *bp)
1862{
5b0c76ad
MC
1863 u32 val;
1864
27a005b8
MC
1865 bnx2_reset_phy(bp);
1866
b6016b76
MC
1867 if (bp->phy_flags & PHY_CRC_FIX_FLAG) {
1868 bnx2_write_phy(bp, 0x18, 0x0c00);
1869 bnx2_write_phy(bp, 0x17, 0x000a);
1870 bnx2_write_phy(bp, 0x15, 0x310b);
1871 bnx2_write_phy(bp, 0x17, 0x201f);
1872 bnx2_write_phy(bp, 0x15, 0x9506);
1873 bnx2_write_phy(bp, 0x17, 0x401f);
1874 bnx2_write_phy(bp, 0x15, 0x14e2);
1875 bnx2_write_phy(bp, 0x18, 0x0400);
1876 }
1877
b659f44e
MC
1878 if (bp->phy_flags & PHY_DIS_EARLY_DAC_FLAG) {
1879 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
1880 MII_BNX2_DSP_EXPAND_REG | 0x8);
1881 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1882 val &= ~(1 << 8);
1883 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
1884 }
1885
b6016b76 1886 if (bp->dev->mtu > 1500) {
b6016b76
MC
1887 /* Set extended packet length bit */
1888 bnx2_write_phy(bp, 0x18, 0x7);
1889 bnx2_read_phy(bp, 0x18, &val);
1890 bnx2_write_phy(bp, 0x18, val | 0x4000);
1891
1892 bnx2_read_phy(bp, 0x10, &val);
1893 bnx2_write_phy(bp, 0x10, val | 0x1);
1894 }
1895 else {
b6016b76
MC
1896 bnx2_write_phy(bp, 0x18, 0x7);
1897 bnx2_read_phy(bp, 0x18, &val);
1898 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1899
1900 bnx2_read_phy(bp, 0x10, &val);
1901 bnx2_write_phy(bp, 0x10, val & ~0x1);
1902 }
1903
5b0c76ad
MC
1904 /* ethernet@wirespeed */
1905 bnx2_write_phy(bp, 0x18, 0x7007);
1906 bnx2_read_phy(bp, 0x18, &val);
1907 bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
b6016b76
MC
1908 return 0;
1909}
1910
1911
1912static int
1913bnx2_init_phy(struct bnx2 *bp)
1914{
1915 u32 val;
1916 int rc = 0;
1917
1918 bp->phy_flags &= ~PHY_INT_MODE_MASK_FLAG;
1919 bp->phy_flags |= PHY_INT_MODE_LINK_READY_FLAG;
1920
ca58c3af
MC
1921 bp->mii_bmcr = MII_BMCR;
1922 bp->mii_bmsr = MII_BMSR;
27a005b8 1923 bp->mii_bmsr1 = MII_BMSR;
ca58c3af
MC
1924 bp->mii_adv = MII_ADVERTISE;
1925 bp->mii_lpa = MII_LPA;
1926
b6016b76
MC
1927 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
1928
0d8a6571
MC
1929 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1930 goto setup_phy;
1931
b6016b76
MC
1932 bnx2_read_phy(bp, MII_PHYSID1, &val);
1933 bp->phy_id = val << 16;
1934 bnx2_read_phy(bp, MII_PHYSID2, &val);
1935 bp->phy_id |= val & 0xffff;
1936
1937 if (bp->phy_flags & PHY_SERDES_FLAG) {
5b0c76ad
MC
1938 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1939 rc = bnx2_init_5706s_phy(bp);
1940 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1941 rc = bnx2_init_5708s_phy(bp);
27a005b8
MC
1942 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1943 rc = bnx2_init_5709s_phy(bp);
b6016b76
MC
1944 }
1945 else {
1946 rc = bnx2_init_copper_phy(bp);
1947 }
1948
0d8a6571
MC
1949setup_phy:
1950 if (!rc)
1951 rc = bnx2_setup_phy(bp, bp->phy_port);
b6016b76
MC
1952
1953 return rc;
1954}
1955
1956static int
1957bnx2_set_mac_loopback(struct bnx2 *bp)
1958{
1959 u32 mac_mode;
1960
1961 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1962 mac_mode &= ~BNX2_EMAC_MODE_PORT;
1963 mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
1964 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1965 bp->link_up = 1;
1966 return 0;
1967}
1968
bc5a0690
MC
1969static int bnx2_test_link(struct bnx2 *);
1970
1971static int
1972bnx2_set_phy_loopback(struct bnx2 *bp)
1973{
1974 u32 mac_mode;
1975 int rc, i;
1976
1977 spin_lock_bh(&bp->phy_lock);
ca58c3af 1978 rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
bc5a0690
MC
1979 BMCR_SPEED1000);
1980 spin_unlock_bh(&bp->phy_lock);
1981 if (rc)
1982 return rc;
1983
1984 for (i = 0; i < 10; i++) {
1985 if (bnx2_test_link(bp) == 0)
1986 break;
80be4434 1987 msleep(100);
bc5a0690
MC
1988 }
1989
1990 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1991 mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1992 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
59b47d8a 1993 BNX2_EMAC_MODE_25G_MODE);
bc5a0690
MC
1994
1995 mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
1996 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1997 bp->link_up = 1;
1998 return 0;
1999}
2000
b6016b76 2001static int
b090ae2b 2002bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int silent)
b6016b76
MC
2003{
2004 int i;
2005 u32 val;
2006
b6016b76
MC
2007 bp->fw_wr_seq++;
2008 msg_data |= bp->fw_wr_seq;
2009
e3648b3d 2010 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
b6016b76
MC
2011
2012 /* wait for an acknowledgement. */
b090ae2b
MC
2013 for (i = 0; i < (FW_ACK_TIME_OUT_MS / 10); i++) {
2014 msleep(10);
b6016b76 2015
e3648b3d 2016 val = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_MB);
b6016b76
MC
2017
2018 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
2019 break;
2020 }
b090ae2b
MC
2021 if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
2022 return 0;
b6016b76
MC
2023
2024 /* If we timed out, inform the firmware that this is the case. */
b090ae2b
MC
2025 if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
2026 if (!silent)
2027 printk(KERN_ERR PFX "fw sync timeout, reset code = "
2028 "%x\n", msg_data);
b6016b76
MC
2029
2030 msg_data &= ~BNX2_DRV_MSG_CODE;
2031 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
2032
e3648b3d 2033 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
b6016b76 2034
b6016b76
MC
2035 return -EBUSY;
2036 }
2037
b090ae2b
MC
2038 if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
2039 return -EIO;
2040
b6016b76
MC
2041 return 0;
2042}
2043
59b47d8a
MC
2044static int
2045bnx2_init_5709_context(struct bnx2 *bp)
2046{
2047 int i, ret = 0;
2048 u32 val;
2049
2050 val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
2051 val |= (BCM_PAGE_BITS - 8) << 16;
2052 REG_WR(bp, BNX2_CTX_COMMAND, val);
641bdcd5
MC
2053 for (i = 0; i < 10; i++) {
2054 val = REG_RD(bp, BNX2_CTX_COMMAND);
2055 if (!(val & BNX2_CTX_COMMAND_MEM_INIT))
2056 break;
2057 udelay(2);
2058 }
2059 if (val & BNX2_CTX_COMMAND_MEM_INIT)
2060 return -EBUSY;
2061
59b47d8a
MC
2062 for (i = 0; i < bp->ctx_pages; i++) {
2063 int j;
2064
2065 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
2066 (bp->ctx_blk_mapping[i] & 0xffffffff) |
2067 BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
2068 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
2069 (u64) bp->ctx_blk_mapping[i] >> 32);
2070 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
2071 BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
2072 for (j = 0; j < 10; j++) {
2073
2074 val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
2075 if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
2076 break;
2077 udelay(5);
2078 }
2079 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
2080 ret = -EBUSY;
2081 break;
2082 }
2083 }
2084 return ret;
2085}
2086
b6016b76
MC
2087static void
2088bnx2_init_context(struct bnx2 *bp)
2089{
2090 u32 vcid;
2091
2092 vcid = 96;
2093 while (vcid) {
2094 u32 vcid_addr, pcid_addr, offset;
7947b20e 2095 int i;
b6016b76
MC
2096
2097 vcid--;
2098
2099 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
2100 u32 new_vcid;
2101
2102 vcid_addr = GET_PCID_ADDR(vcid);
2103 if (vcid & 0x8) {
2104 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
2105 }
2106 else {
2107 new_vcid = vcid;
2108 }
2109 pcid_addr = GET_PCID_ADDR(new_vcid);
2110 }
2111 else {
2112 vcid_addr = GET_CID_ADDR(vcid);
2113 pcid_addr = vcid_addr;
2114 }
2115
7947b20e
MC
2116 for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
2117 vcid_addr += (i << PHY_CTX_SHIFT);
2118 pcid_addr += (i << PHY_CTX_SHIFT);
b6016b76 2119
7947b20e
MC
2120 REG_WR(bp, BNX2_CTX_VIRT_ADDR, 0x00);
2121 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
b6016b76 2122
7947b20e
MC
2123 /* Zero out the context. */
2124 for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
2125 CTX_WR(bp, 0x00, offset, 0);
2126
2127 REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
2128 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
2129 }
b6016b76
MC
2130 }
2131}
2132
2133static int
2134bnx2_alloc_bad_rbuf(struct bnx2 *bp)
2135{
2136 u16 *good_mbuf;
2137 u32 good_mbuf_cnt;
2138 u32 val;
2139
2140 good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
2141 if (good_mbuf == NULL) {
2142 printk(KERN_ERR PFX "Failed to allocate memory in "
2143 "bnx2_alloc_bad_rbuf\n");
2144 return -ENOMEM;
2145 }
2146
2147 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2148 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
2149
2150 good_mbuf_cnt = 0;
2151
2152 /* Allocate a bunch of mbufs and save the good ones in an array. */
2153 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
2154 while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
2155 REG_WR_IND(bp, BNX2_RBUF_COMMAND, BNX2_RBUF_COMMAND_ALLOC_REQ);
2156
2157 val = REG_RD_IND(bp, BNX2_RBUF_FW_BUF_ALLOC);
2158
2159 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
2160
2161 /* The addresses with Bit 9 set are bad memory blocks. */
2162 if (!(val & (1 << 9))) {
2163 good_mbuf[good_mbuf_cnt] = (u16) val;
2164 good_mbuf_cnt++;
2165 }
2166
2167 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
2168 }
2169
2170 /* Free the good ones back to the mbuf pool thus discarding
2171 * all the bad ones. */
2172 while (good_mbuf_cnt) {
2173 good_mbuf_cnt--;
2174
2175 val = good_mbuf[good_mbuf_cnt];
2176 val = (val << 9) | val | 1;
2177
2178 REG_WR_IND(bp, BNX2_RBUF_FW_BUF_FREE, val);
2179 }
2180 kfree(good_mbuf);
2181 return 0;
2182}
2183
2184static void
6aa20a22 2185bnx2_set_mac_addr(struct bnx2 *bp)
b6016b76
MC
2186{
2187 u32 val;
2188 u8 *mac_addr = bp->dev->dev_addr;
2189
2190 val = (mac_addr[0] << 8) | mac_addr[1];
2191
2192 REG_WR(bp, BNX2_EMAC_MAC_MATCH0, val);
2193
6aa20a22 2194 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
b6016b76
MC
2195 (mac_addr[4] << 8) | mac_addr[5];
2196
2197 REG_WR(bp, BNX2_EMAC_MAC_MATCH1, val);
2198}
2199
2200static inline int
2201bnx2_alloc_rx_skb(struct bnx2 *bp, u16 index)
2202{
2203 struct sk_buff *skb;
2204 struct sw_bd *rx_buf = &bp->rx_buf_ring[index];
2205 dma_addr_t mapping;
13daffa2 2206 struct rx_bd *rxbd = &bp->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
b6016b76
MC
2207 unsigned long align;
2208
932f3772 2209 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
b6016b76
MC
2210 if (skb == NULL) {
2211 return -ENOMEM;
2212 }
2213
59b47d8a
MC
2214 if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
2215 skb_reserve(skb, BNX2_RX_ALIGN - align);
b6016b76 2216
b6016b76
MC
2217 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
2218 PCI_DMA_FROMDEVICE);
2219
2220 rx_buf->skb = skb;
2221 pci_unmap_addr_set(rx_buf, mapping, mapping);
2222
2223 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2224 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2225
2226 bp->rx_prod_bseq += bp->rx_buf_use_size;
2227
2228 return 0;
2229}
2230
da3e4fbe
MC
2231static int
2232bnx2_phy_event_is_set(struct bnx2 *bp, u32 event)
b6016b76 2233{
da3e4fbe 2234 struct status_block *sblk = bp->status_blk;
b6016b76 2235 u32 new_link_state, old_link_state;
da3e4fbe 2236 int is_set = 1;
b6016b76 2237
da3e4fbe
MC
2238 new_link_state = sblk->status_attn_bits & event;
2239 old_link_state = sblk->status_attn_bits_ack & event;
b6016b76 2240 if (new_link_state != old_link_state) {
da3e4fbe
MC
2241 if (new_link_state)
2242 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
2243 else
2244 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
2245 } else
2246 is_set = 0;
2247
2248 return is_set;
2249}
2250
2251static void
2252bnx2_phy_int(struct bnx2 *bp)
2253{
2254 if (bnx2_phy_event_is_set(bp, STATUS_ATTN_BITS_LINK_STATE)) {
2255 spin_lock(&bp->phy_lock);
b6016b76 2256 bnx2_set_link(bp);
da3e4fbe 2257 spin_unlock(&bp->phy_lock);
b6016b76 2258 }
0d8a6571
MC
2259 if (bnx2_phy_event_is_set(bp, STATUS_ATTN_BITS_TIMER_ABORT))
2260 bnx2_set_remote_link(bp);
2261
b6016b76
MC
2262}
2263
2264static void
2265bnx2_tx_int(struct bnx2 *bp)
2266{
f4e418f7 2267 struct status_block *sblk = bp->status_blk;
b6016b76
MC
2268 u16 hw_cons, sw_cons, sw_ring_cons;
2269 int tx_free_bd = 0;
2270
f4e418f7 2271 hw_cons = bp->hw_tx_cons = sblk->status_tx_quick_consumer_index0;
b6016b76
MC
2272 if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
2273 hw_cons++;
2274 }
2275 sw_cons = bp->tx_cons;
2276
2277 while (sw_cons != hw_cons) {
2278 struct sw_bd *tx_buf;
2279 struct sk_buff *skb;
2280 int i, last;
2281
2282 sw_ring_cons = TX_RING_IDX(sw_cons);
2283
2284 tx_buf = &bp->tx_buf_ring[sw_ring_cons];
2285 skb = tx_buf->skb;
1d39ed56 2286
b6016b76 2287 /* partial BD completions possible with TSO packets */
89114afd 2288 if (skb_is_gso(skb)) {
b6016b76
MC
2289 u16 last_idx, last_ring_idx;
2290
2291 last_idx = sw_cons +
2292 skb_shinfo(skb)->nr_frags + 1;
2293 last_ring_idx = sw_ring_cons +
2294 skb_shinfo(skb)->nr_frags + 1;
2295 if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
2296 last_idx++;
2297 }
2298 if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
2299 break;
2300 }
2301 }
1d39ed56 2302
b6016b76
MC
2303 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
2304 skb_headlen(skb), PCI_DMA_TODEVICE);
2305
2306 tx_buf->skb = NULL;
2307 last = skb_shinfo(skb)->nr_frags;
2308
2309 for (i = 0; i < last; i++) {
2310 sw_cons = NEXT_TX_BD(sw_cons);
2311
2312 pci_unmap_page(bp->pdev,
2313 pci_unmap_addr(
2314 &bp->tx_buf_ring[TX_RING_IDX(sw_cons)],
2315 mapping),
2316 skb_shinfo(skb)->frags[i].size,
2317 PCI_DMA_TODEVICE);
2318 }
2319
2320 sw_cons = NEXT_TX_BD(sw_cons);
2321
2322 tx_free_bd += last + 1;
2323
745720e5 2324 dev_kfree_skb(skb);
b6016b76 2325
f4e418f7
MC
2326 hw_cons = bp->hw_tx_cons =
2327 sblk->status_tx_quick_consumer_index0;
2328
b6016b76
MC
2329 if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
2330 hw_cons++;
2331 }
2332 }
2333
e89bbf10 2334 bp->tx_cons = sw_cons;
2f8af120
MC
2335 /* Need to make the tx_cons update visible to bnx2_start_xmit()
2336 * before checking for netif_queue_stopped(). Without the
2337 * memory barrier, there is a small possibility that bnx2_start_xmit()
2338 * will miss it and cause the queue to be stopped forever.
2339 */
2340 smp_mb();
b6016b76 2341
2f8af120
MC
2342 if (unlikely(netif_queue_stopped(bp->dev)) &&
2343 (bnx2_tx_avail(bp) > bp->tx_wake_thresh)) {
2344 netif_tx_lock(bp->dev);
b6016b76 2345 if ((netif_queue_stopped(bp->dev)) &&
2f8af120 2346 (bnx2_tx_avail(bp) > bp->tx_wake_thresh))
b6016b76 2347 netif_wake_queue(bp->dev);
2f8af120 2348 netif_tx_unlock(bp->dev);
b6016b76 2349 }
b6016b76
MC
2350}
2351
2352static inline void
2353bnx2_reuse_rx_skb(struct bnx2 *bp, struct sk_buff *skb,
2354 u16 cons, u16 prod)
2355{
236b6394
MC
2356 struct sw_bd *cons_rx_buf, *prod_rx_buf;
2357 struct rx_bd *cons_bd, *prod_bd;
2358
2359 cons_rx_buf = &bp->rx_buf_ring[cons];
2360 prod_rx_buf = &bp->rx_buf_ring[prod];
b6016b76
MC
2361
2362 pci_dma_sync_single_for_device(bp->pdev,
2363 pci_unmap_addr(cons_rx_buf, mapping),
2364 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2365
236b6394 2366 bp->rx_prod_bseq += bp->rx_buf_use_size;
b6016b76 2367
236b6394 2368 prod_rx_buf->skb = skb;
b6016b76 2369
236b6394
MC
2370 if (cons == prod)
2371 return;
b6016b76 2372
236b6394
MC
2373 pci_unmap_addr_set(prod_rx_buf, mapping,
2374 pci_unmap_addr(cons_rx_buf, mapping));
2375
3fdfcc2c
MC
2376 cons_bd = &bp->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2377 prod_bd = &bp->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
236b6394
MC
2378 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2379 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
b6016b76
MC
2380}
2381
c09c2627
MC
2382static inline u16
2383bnx2_get_hw_rx_cons(struct bnx2 *bp)
2384{
2385 u16 cons = bp->status_blk->status_rx_quick_consumer_index0;
2386
2387 if (unlikely((cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT))
2388 cons++;
2389 return cons;
2390}
2391
b6016b76
MC
2392static int
2393bnx2_rx_int(struct bnx2 *bp, int budget)
2394{
2395 u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
2396 struct l2_fhdr *rx_hdr;
2397 int rx_pkt = 0;
2398
c09c2627 2399 hw_cons = bnx2_get_hw_rx_cons(bp);
b6016b76
MC
2400 sw_cons = bp->rx_cons;
2401 sw_prod = bp->rx_prod;
2402
2403 /* Memory barrier necessary as speculative reads of the rx
2404 * buffer can be ahead of the index in the status block
2405 */
2406 rmb();
2407 while (sw_cons != hw_cons) {
2408 unsigned int len;
ade2bfe7 2409 u32 status;
b6016b76
MC
2410 struct sw_bd *rx_buf;
2411 struct sk_buff *skb;
236b6394 2412 dma_addr_t dma_addr;
b6016b76
MC
2413
2414 sw_ring_cons = RX_RING_IDX(sw_cons);
2415 sw_ring_prod = RX_RING_IDX(sw_prod);
2416
2417 rx_buf = &bp->rx_buf_ring[sw_ring_cons];
2418 skb = rx_buf->skb;
236b6394
MC
2419
2420 rx_buf->skb = NULL;
2421
2422 dma_addr = pci_unmap_addr(rx_buf, mapping);
2423
2424 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
b6016b76
MC
2425 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2426
2427 rx_hdr = (struct l2_fhdr *) skb->data;
2428 len = rx_hdr->l2_fhdr_pkt_len - 4;
2429
ade2bfe7 2430 if ((status = rx_hdr->l2_fhdr_status) &
b6016b76
MC
2431 (L2_FHDR_ERRORS_BAD_CRC |
2432 L2_FHDR_ERRORS_PHY_DECODE |
2433 L2_FHDR_ERRORS_ALIGNMENT |
2434 L2_FHDR_ERRORS_TOO_SHORT |
2435 L2_FHDR_ERRORS_GIANT_FRAME)) {
2436
2437 goto reuse_rx;
2438 }
2439
2440 /* Since we don't have a jumbo ring, copy small packets
2441 * if mtu > 1500
2442 */
2443 if ((bp->dev->mtu > 1500) && (len <= RX_COPY_THRESH)) {
2444 struct sk_buff *new_skb;
2445
932f3772 2446 new_skb = netdev_alloc_skb(bp->dev, len + 2);
b6016b76
MC
2447 if (new_skb == NULL)
2448 goto reuse_rx;
2449
2450 /* aligned copy */
d626f62b
ACM
2451 skb_copy_from_linear_data_offset(skb, bp->rx_offset - 2,
2452 new_skb->data, len + 2);
b6016b76
MC
2453 skb_reserve(new_skb, 2);
2454 skb_put(new_skb, len);
b6016b76
MC
2455
2456 bnx2_reuse_rx_skb(bp, skb,
2457 sw_ring_cons, sw_ring_prod);
2458
2459 skb = new_skb;
2460 }
2461 else if (bnx2_alloc_rx_skb(bp, sw_ring_prod) == 0) {
236b6394 2462 pci_unmap_single(bp->pdev, dma_addr,
b6016b76
MC
2463 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
2464
2465 skb_reserve(skb, bp->rx_offset);
2466 skb_put(skb, len);
2467 }
2468 else {
2469reuse_rx:
2470 bnx2_reuse_rx_skb(bp, skb,
2471 sw_ring_cons, sw_ring_prod);
2472 goto next_rx;
2473 }
2474
2475 skb->protocol = eth_type_trans(skb, bp->dev);
2476
2477 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
d1e100ba 2478 (ntohs(skb->protocol) != 0x8100)) {
b6016b76 2479
745720e5 2480 dev_kfree_skb(skb);
b6016b76
MC
2481 goto next_rx;
2482
2483 }
2484
b6016b76
MC
2485 skb->ip_summed = CHECKSUM_NONE;
2486 if (bp->rx_csum &&
2487 (status & (L2_FHDR_STATUS_TCP_SEGMENT |
2488 L2_FHDR_STATUS_UDP_DATAGRAM))) {
2489
ade2bfe7
MC
2490 if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
2491 L2_FHDR_ERRORS_UDP_XSUM)) == 0))
b6016b76
MC
2492 skb->ip_summed = CHECKSUM_UNNECESSARY;
2493 }
2494
2495#ifdef BCM_VLAN
2496 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) && (bp->vlgrp != 0)) {
2497 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
2498 rx_hdr->l2_fhdr_vlan_tag);
2499 }
2500 else
2501#endif
2502 netif_receive_skb(skb);
2503
2504 bp->dev->last_rx = jiffies;
2505 rx_pkt++;
2506
2507next_rx:
b6016b76
MC
2508 sw_cons = NEXT_RX_BD(sw_cons);
2509 sw_prod = NEXT_RX_BD(sw_prod);
2510
2511 if ((rx_pkt == budget))
2512 break;
f4e418f7
MC
2513
2514 /* Refresh hw_cons to see if there is new work */
2515 if (sw_cons == hw_cons) {
c09c2627 2516 hw_cons = bnx2_get_hw_rx_cons(bp);
f4e418f7
MC
2517 rmb();
2518 }
b6016b76
MC
2519 }
2520 bp->rx_cons = sw_cons;
2521 bp->rx_prod = sw_prod;
2522
2523 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, sw_prod);
2524
2525 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
2526
2527 mmiowb();
2528
2529 return rx_pkt;
2530
2531}
2532
2533/* MSI ISR - The only difference between this and the INTx ISR
2534 * is that the MSI interrupt is always serviced.
2535 */
2536static irqreturn_t
7d12e780 2537bnx2_msi(int irq, void *dev_instance)
b6016b76
MC
2538{
2539 struct net_device *dev = dev_instance;
972ec0d4 2540 struct bnx2 *bp = netdev_priv(dev);
b6016b76 2541
c921e4c4 2542 prefetch(bp->status_blk);
b6016b76
MC
2543 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2544 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
2545 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2546
2547 /* Return here if interrupt is disabled. */
73eef4cd
MC
2548 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2549 return IRQ_HANDLED;
b6016b76 2550
bea3348e 2551 netif_rx_schedule(dev, &bp->napi);
b6016b76 2552
73eef4cd 2553 return IRQ_HANDLED;
b6016b76
MC
2554}
2555
8e6a72c4
MC
2556static irqreturn_t
2557bnx2_msi_1shot(int irq, void *dev_instance)
2558{
2559 struct net_device *dev = dev_instance;
2560 struct bnx2 *bp = netdev_priv(dev);
2561
2562 prefetch(bp->status_blk);
2563
2564 /* Return here if interrupt is disabled. */
2565 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2566 return IRQ_HANDLED;
2567
bea3348e 2568 netif_rx_schedule(dev, &bp->napi);
8e6a72c4
MC
2569
2570 return IRQ_HANDLED;
2571}
2572
b6016b76 2573static irqreturn_t
7d12e780 2574bnx2_interrupt(int irq, void *dev_instance)
b6016b76
MC
2575{
2576 struct net_device *dev = dev_instance;
972ec0d4 2577 struct bnx2 *bp = netdev_priv(dev);
b8a7ce7b 2578 struct status_block *sblk = bp->status_blk;
b6016b76
MC
2579
2580 /* When using INTx, it is possible for the interrupt to arrive
2581 * at the CPU before the status block posted prior to the
2582 * interrupt. Reading a register will flush the status block.
2583 * When using MSI, the MSI message will always complete after
2584 * the status block write.
2585 */
b8a7ce7b 2586 if ((sblk->status_idx == bp->last_status_idx) &&
b6016b76
MC
2587 (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
2588 BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
73eef4cd 2589 return IRQ_NONE;
b6016b76
MC
2590
2591 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2592 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
2593 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2594
b8a7ce7b
MC
2595 /* Read back to deassert IRQ immediately to avoid too many
2596 * spurious interrupts.
2597 */
2598 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
2599
b6016b76 2600 /* Return here if interrupt is shared and is disabled. */
73eef4cd
MC
2601 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2602 return IRQ_HANDLED;
b6016b76 2603
bea3348e 2604 if (netif_rx_schedule_prep(dev, &bp->napi)) {
b8a7ce7b 2605 bp->last_status_idx = sblk->status_idx;
bea3348e 2606 __netif_rx_schedule(dev, &bp->napi);
b8a7ce7b 2607 }
b6016b76 2608
73eef4cd 2609 return IRQ_HANDLED;
b6016b76
MC
2610}
2611
0d8a6571
MC
2612#define STATUS_ATTN_EVENTS (STATUS_ATTN_BITS_LINK_STATE | \
2613 STATUS_ATTN_BITS_TIMER_ABORT)
da3e4fbe 2614
f4e418f7
MC
2615static inline int
2616bnx2_has_work(struct bnx2 *bp)
2617{
2618 struct status_block *sblk = bp->status_blk;
2619
c09c2627 2620 if ((bnx2_get_hw_rx_cons(bp) != bp->rx_cons) ||
f4e418f7
MC
2621 (sblk->status_tx_quick_consumer_index0 != bp->hw_tx_cons))
2622 return 1;
2623
da3e4fbe
MC
2624 if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
2625 (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
f4e418f7
MC
2626 return 1;
2627
2628 return 0;
2629}
2630
6f535763 2631static int bnx2_poll_work(struct bnx2 *bp, int work_done, int budget)
b6016b76 2632{
da3e4fbe
MC
2633 struct status_block *sblk = bp->status_blk;
2634 u32 status_attn_bits = sblk->status_attn_bits;
2635 u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
b6016b76 2636
da3e4fbe
MC
2637 if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
2638 (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
b6016b76 2639
b6016b76 2640 bnx2_phy_int(bp);
bf5295bb
MC
2641
2642 /* This is needed to take care of transient status
2643 * during link changes.
2644 */
2645 REG_WR(bp, BNX2_HC_COMMAND,
2646 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
2647 REG_RD(bp, BNX2_HC_COMMAND);
b6016b76
MC
2648 }
2649
6dee6421 2650 if (sblk->status_tx_quick_consumer_index0 != bp->hw_tx_cons)
b6016b76 2651 bnx2_tx_int(bp);
b6016b76 2652
c09c2627 2653 if (bnx2_get_hw_rx_cons(bp) != bp->rx_cons)
6f535763 2654 work_done += bnx2_rx_int(bp, budget - work_done);
6aa20a22 2655
6f535763
DM
2656 return work_done;
2657}
2658
2659static int bnx2_poll(struct napi_struct *napi, int budget)
2660{
2661 struct bnx2 *bp = container_of(napi, struct bnx2, napi);
2662 int work_done = 0;
6dee6421 2663 struct status_block *sblk = bp->status_blk;
6f535763
DM
2664
2665 while (1) {
2666 work_done = bnx2_poll_work(bp, work_done, budget);
f4e418f7 2667
6f535763
DM
2668 if (unlikely(work_done >= budget))
2669 break;
2670
6dee6421
MC
2671 /* bp->last_status_idx is used below to tell the hw how
2672 * much work has been processed, so we must read it before
2673 * checking for more work.
2674 */
2675 bp->last_status_idx = sblk->status_idx;
2676 rmb();
6f535763 2677 if (likely(!bnx2_has_work(bp))) {
6f535763
DM
2678 netif_rx_complete(bp->dev, napi);
2679 if (likely(bp->flags & USING_MSI_FLAG)) {
2680 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2681 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2682 bp->last_status_idx);
6dee6421 2683 break;
6f535763 2684 }
1269a8a6
MC
2685 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2686 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
6f535763 2687 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
1269a8a6 2688 bp->last_status_idx);
1269a8a6 2689
6f535763
DM
2690 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2691 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2692 bp->last_status_idx);
2693 break;
2694 }
b6016b76
MC
2695 }
2696
bea3348e 2697 return work_done;
b6016b76
MC
2698}
2699
932ff279 2700/* Called with rtnl_lock from vlan functions and also netif_tx_lock
b6016b76
MC
2701 * from set_multicast.
2702 */
2703static void
2704bnx2_set_rx_mode(struct net_device *dev)
2705{
972ec0d4 2706 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
2707 u32 rx_mode, sort_mode;
2708 int i;
b6016b76 2709
c770a65c 2710 spin_lock_bh(&bp->phy_lock);
b6016b76
MC
2711
2712 rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
2713 BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
2714 sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
2715#ifdef BCM_VLAN
e29054f9 2716 if (!bp->vlgrp && !(bp->flags & ASF_ENABLE_FLAG))
b6016b76 2717 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
b6016b76 2718#else
e29054f9
MC
2719 if (!(bp->flags & ASF_ENABLE_FLAG))
2720 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
b6016b76
MC
2721#endif
2722 if (dev->flags & IFF_PROMISC) {
2723 /* Promiscuous mode. */
2724 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
7510873d
MC
2725 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
2726 BNX2_RPM_SORT_USER0_PROM_VLAN;
b6016b76
MC
2727 }
2728 else if (dev->flags & IFF_ALLMULTI) {
2729 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2730 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2731 0xffffffff);
2732 }
2733 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
2734 }
2735 else {
2736 /* Accept one or more multicast(s). */
2737 struct dev_mc_list *mclist;
2738 u32 mc_filter[NUM_MC_HASH_REGISTERS];
2739 u32 regidx;
2740 u32 bit;
2741 u32 crc;
2742
2743 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
2744
2745 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
2746 i++, mclist = mclist->next) {
2747
2748 crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
2749 bit = crc & 0xff;
2750 regidx = (bit & 0xe0) >> 5;
2751 bit &= 0x1f;
2752 mc_filter[regidx] |= (1 << bit);
2753 }
2754
2755 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2756 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2757 mc_filter[i]);
2758 }
2759
2760 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
2761 }
2762
2763 if (rx_mode != bp->rx_mode) {
2764 bp->rx_mode = rx_mode;
2765 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
2766 }
2767
2768 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
2769 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
2770 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
2771
c770a65c 2772 spin_unlock_bh(&bp->phy_lock);
b6016b76
MC
2773}
2774
2775static void
2776load_rv2p_fw(struct bnx2 *bp, u32 *rv2p_code, u32 rv2p_code_len,
2777 u32 rv2p_proc)
2778{
2779 int i;
2780 u32 val;
2781
2782
2783 for (i = 0; i < rv2p_code_len; i += 8) {
fba9fe91 2784 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, cpu_to_le32(*rv2p_code));
b6016b76 2785 rv2p_code++;
fba9fe91 2786 REG_WR(bp, BNX2_RV2P_INSTR_LOW, cpu_to_le32(*rv2p_code));
b6016b76
MC
2787 rv2p_code++;
2788
2789 if (rv2p_proc == RV2P_PROC1) {
2790 val = (i / 8) | BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
2791 REG_WR(bp, BNX2_RV2P_PROC1_ADDR_CMD, val);
2792 }
2793 else {
2794 val = (i / 8) | BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
2795 REG_WR(bp, BNX2_RV2P_PROC2_ADDR_CMD, val);
2796 }
2797 }
2798
2799 /* Reset the processor, un-stall is done later. */
2800 if (rv2p_proc == RV2P_PROC1) {
2801 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
2802 }
2803 else {
2804 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
2805 }
2806}
2807
af3ee519 2808static int
b6016b76
MC
2809load_cpu_fw(struct bnx2 *bp, struct cpu_reg *cpu_reg, struct fw_info *fw)
2810{
2811 u32 offset;
2812 u32 val;
af3ee519 2813 int rc;
b6016b76
MC
2814
2815 /* Halt the CPU. */
2816 val = REG_RD_IND(bp, cpu_reg->mode);
2817 val |= cpu_reg->mode_value_halt;
2818 REG_WR_IND(bp, cpu_reg->mode, val);
2819 REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2820
2821 /* Load the Text area. */
2822 offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
af3ee519 2823 if (fw->gz_text) {
b6016b76
MC
2824 int j;
2825
ea1f8d5c
MC
2826 rc = zlib_inflate_blob(fw->text, FW_BUF_SIZE, fw->gz_text,
2827 fw->gz_text_len);
2828 if (rc < 0)
b3448b0b 2829 return rc;
ea1f8d5c 2830
b6016b76 2831 for (j = 0; j < (fw->text_len / 4); j++, offset += 4) {
ea1f8d5c 2832 REG_WR_IND(bp, offset, cpu_to_le32(fw->text[j]));
b6016b76
MC
2833 }
2834 }
2835
2836 /* Load the Data area. */
2837 offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
2838 if (fw->data) {
2839 int j;
2840
2841 for (j = 0; j < (fw->data_len / 4); j++, offset += 4) {
2842 REG_WR_IND(bp, offset, fw->data[j]);
2843 }
2844 }
2845
2846 /* Load the SBSS area. */
2847 offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
ea1f8d5c 2848 if (fw->sbss_len) {
b6016b76
MC
2849 int j;
2850
2851 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) {
ea1f8d5c 2852 REG_WR_IND(bp, offset, 0);
b6016b76
MC
2853 }
2854 }
2855
2856 /* Load the BSS area. */
2857 offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
ea1f8d5c 2858 if (fw->bss_len) {
b6016b76
MC
2859 int j;
2860
2861 for (j = 0; j < (fw->bss_len/4); j++, offset += 4) {
ea1f8d5c 2862 REG_WR_IND(bp, offset, 0);
b6016b76
MC
2863 }
2864 }
2865
2866 /* Load the Read-Only area. */
2867 offset = cpu_reg->spad_base +
2868 (fw->rodata_addr - cpu_reg->mips_view_base);
2869 if (fw->rodata) {
2870 int j;
2871
2872 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) {
2873 REG_WR_IND(bp, offset, fw->rodata[j]);
2874 }
2875 }
2876
2877 /* Clear the pre-fetch instruction. */
2878 REG_WR_IND(bp, cpu_reg->inst, 0);
2879 REG_WR_IND(bp, cpu_reg->pc, fw->start_addr);
2880
2881 /* Start the CPU. */
2882 val = REG_RD_IND(bp, cpu_reg->mode);
2883 val &= ~cpu_reg->mode_value_halt;
2884 REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2885 REG_WR_IND(bp, cpu_reg->mode, val);
af3ee519
MC
2886
2887 return 0;
b6016b76
MC
2888}
2889
fba9fe91 2890static int
b6016b76
MC
2891bnx2_init_cpus(struct bnx2 *bp)
2892{
2893 struct cpu_reg cpu_reg;
af3ee519 2894 struct fw_info *fw;
b3448b0b 2895 int rc;
fba9fe91 2896 void *text;
b6016b76
MC
2897
2898 /* Initialize the RV2P processor. */
b3448b0b
DV
2899 text = vmalloc(FW_BUF_SIZE);
2900 if (!text)
2901 return -ENOMEM;
8336793b 2902 rc = zlib_inflate_blob(text, FW_BUF_SIZE, bnx2_rv2p_proc1, sizeof(bnx2_rv2p_proc1));
ea1f8d5c 2903 if (rc < 0)
fba9fe91 2904 goto init_cpu_err;
ea1f8d5c 2905
b3448b0b 2906 load_rv2p_fw(bp, text, rc /* == len */, RV2P_PROC1);
fba9fe91 2907
8336793b 2908 rc = zlib_inflate_blob(text, FW_BUF_SIZE, bnx2_rv2p_proc2, sizeof(bnx2_rv2p_proc2));
ea1f8d5c 2909 if (rc < 0)
fba9fe91 2910 goto init_cpu_err;
ea1f8d5c 2911
b3448b0b 2912 load_rv2p_fw(bp, text, rc /* == len */, RV2P_PROC2);
b6016b76
MC
2913
2914 /* Initialize the RX Processor. */
2915 cpu_reg.mode = BNX2_RXP_CPU_MODE;
2916 cpu_reg.mode_value_halt = BNX2_RXP_CPU_MODE_SOFT_HALT;
2917 cpu_reg.mode_value_sstep = BNX2_RXP_CPU_MODE_STEP_ENA;
2918 cpu_reg.state = BNX2_RXP_CPU_STATE;
2919 cpu_reg.state_value_clear = 0xffffff;
2920 cpu_reg.gpr0 = BNX2_RXP_CPU_REG_FILE;
2921 cpu_reg.evmask = BNX2_RXP_CPU_EVENT_MASK;
2922 cpu_reg.pc = BNX2_RXP_CPU_PROGRAM_COUNTER;
2923 cpu_reg.inst = BNX2_RXP_CPU_INSTRUCTION;
2924 cpu_reg.bp = BNX2_RXP_CPU_HW_BREAKPOINT;
2925 cpu_reg.spad_base = BNX2_RXP_SCRATCH;
2926 cpu_reg.mips_view_base = 0x8000000;
6aa20a22 2927
d43584c8
MC
2928 if (CHIP_NUM(bp) == CHIP_NUM_5709)
2929 fw = &bnx2_rxp_fw_09;
2930 else
2931 fw = &bnx2_rxp_fw_06;
fba9fe91 2932
ea1f8d5c 2933 fw->text = text;
af3ee519 2934 rc = load_cpu_fw(bp, &cpu_reg, fw);
fba9fe91
MC
2935 if (rc)
2936 goto init_cpu_err;
2937
b6016b76
MC
2938 /* Initialize the TX Processor. */
2939 cpu_reg.mode = BNX2_TXP_CPU_MODE;
2940 cpu_reg.mode_value_halt = BNX2_TXP_CPU_MODE_SOFT_HALT;
2941 cpu_reg.mode_value_sstep = BNX2_TXP_CPU_MODE_STEP_ENA;
2942 cpu_reg.state = BNX2_TXP_CPU_STATE;
2943 cpu_reg.state_value_clear = 0xffffff;
2944 cpu_reg.gpr0 = BNX2_TXP_CPU_REG_FILE;
2945 cpu_reg.evmask = BNX2_TXP_CPU_EVENT_MASK;
2946 cpu_reg.pc = BNX2_TXP_CPU_PROGRAM_COUNTER;
2947 cpu_reg.inst = BNX2_TXP_CPU_INSTRUCTION;
2948 cpu_reg.bp = BNX2_TXP_CPU_HW_BREAKPOINT;
2949 cpu_reg.spad_base = BNX2_TXP_SCRATCH;
2950 cpu_reg.mips_view_base = 0x8000000;
6aa20a22 2951
d43584c8
MC
2952 if (CHIP_NUM(bp) == CHIP_NUM_5709)
2953 fw = &bnx2_txp_fw_09;
2954 else
2955 fw = &bnx2_txp_fw_06;
fba9fe91 2956
ea1f8d5c 2957 fw->text = text;
af3ee519 2958 rc = load_cpu_fw(bp, &cpu_reg, fw);
fba9fe91
MC
2959 if (rc)
2960 goto init_cpu_err;
2961
b6016b76
MC
2962 /* Initialize the TX Patch-up Processor. */
2963 cpu_reg.mode = BNX2_TPAT_CPU_MODE;
2964 cpu_reg.mode_value_halt = BNX2_TPAT_CPU_MODE_SOFT_HALT;
2965 cpu_reg.mode_value_sstep = BNX2_TPAT_CPU_MODE_STEP_ENA;
2966 cpu_reg.state = BNX2_TPAT_CPU_STATE;
2967 cpu_reg.state_value_clear = 0xffffff;
2968 cpu_reg.gpr0 = BNX2_TPAT_CPU_REG_FILE;
2969 cpu_reg.evmask = BNX2_TPAT_CPU_EVENT_MASK;
2970 cpu_reg.pc = BNX2_TPAT_CPU_PROGRAM_COUNTER;
2971 cpu_reg.inst = BNX2_TPAT_CPU_INSTRUCTION;
2972 cpu_reg.bp = BNX2_TPAT_CPU_HW_BREAKPOINT;
2973 cpu_reg.spad_base = BNX2_TPAT_SCRATCH;
2974 cpu_reg.mips_view_base = 0x8000000;
6aa20a22 2975
d43584c8
MC
2976 if (CHIP_NUM(bp) == CHIP_NUM_5709)
2977 fw = &bnx2_tpat_fw_09;
2978 else
2979 fw = &bnx2_tpat_fw_06;
fba9fe91 2980
ea1f8d5c 2981 fw->text = text;
af3ee519 2982 rc = load_cpu_fw(bp, &cpu_reg, fw);
fba9fe91
MC
2983 if (rc)
2984 goto init_cpu_err;
2985
b6016b76
MC
2986 /* Initialize the Completion Processor. */
2987 cpu_reg.mode = BNX2_COM_CPU_MODE;
2988 cpu_reg.mode_value_halt = BNX2_COM_CPU_MODE_SOFT_HALT;
2989 cpu_reg.mode_value_sstep = BNX2_COM_CPU_MODE_STEP_ENA;
2990 cpu_reg.state = BNX2_COM_CPU_STATE;
2991 cpu_reg.state_value_clear = 0xffffff;
2992 cpu_reg.gpr0 = BNX2_COM_CPU_REG_FILE;
2993 cpu_reg.evmask = BNX2_COM_CPU_EVENT_MASK;
2994 cpu_reg.pc = BNX2_COM_CPU_PROGRAM_COUNTER;
2995 cpu_reg.inst = BNX2_COM_CPU_INSTRUCTION;
2996 cpu_reg.bp = BNX2_COM_CPU_HW_BREAKPOINT;
2997 cpu_reg.spad_base = BNX2_COM_SCRATCH;
2998 cpu_reg.mips_view_base = 0x8000000;
6aa20a22 2999
d43584c8
MC
3000 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3001 fw = &bnx2_com_fw_09;
3002 else
3003 fw = &bnx2_com_fw_06;
fba9fe91 3004
ea1f8d5c 3005 fw->text = text;
af3ee519 3006 rc = load_cpu_fw(bp, &cpu_reg, fw);
fba9fe91
MC
3007 if (rc)
3008 goto init_cpu_err;
3009
d43584c8
MC
3010 /* Initialize the Command Processor. */
3011 cpu_reg.mode = BNX2_CP_CPU_MODE;
3012 cpu_reg.mode_value_halt = BNX2_CP_CPU_MODE_SOFT_HALT;
3013 cpu_reg.mode_value_sstep = BNX2_CP_CPU_MODE_STEP_ENA;
3014 cpu_reg.state = BNX2_CP_CPU_STATE;
3015 cpu_reg.state_value_clear = 0xffffff;
3016 cpu_reg.gpr0 = BNX2_CP_CPU_REG_FILE;
3017 cpu_reg.evmask = BNX2_CP_CPU_EVENT_MASK;
3018 cpu_reg.pc = BNX2_CP_CPU_PROGRAM_COUNTER;
3019 cpu_reg.inst = BNX2_CP_CPU_INSTRUCTION;
3020 cpu_reg.bp = BNX2_CP_CPU_HW_BREAKPOINT;
3021 cpu_reg.spad_base = BNX2_CP_SCRATCH;
3022 cpu_reg.mips_view_base = 0x8000000;
b6016b76 3023
d43584c8
MC
3024 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3025 fw = &bnx2_cp_fw_09;
b6016b76 3026
ea1f8d5c 3027 fw->text = text;
6c1bbcc8 3028 rc = load_cpu_fw(bp, &cpu_reg, fw);
d43584c8
MC
3029 if (rc)
3030 goto init_cpu_err;
3031 }
fba9fe91 3032init_cpu_err:
ea1f8d5c 3033 vfree(text);
fba9fe91 3034 return rc;
b6016b76
MC
3035}
3036
3037static int
829ca9a3 3038bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
b6016b76
MC
3039{
3040 u16 pmcsr;
3041
3042 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
3043
3044 switch (state) {
829ca9a3 3045 case PCI_D0: {
b6016b76
MC
3046 u32 val;
3047
3048 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3049 (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3050 PCI_PM_CTRL_PME_STATUS);
3051
3052 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3053 /* delay required during transition out of D3hot */
3054 msleep(20);
3055
3056 val = REG_RD(bp, BNX2_EMAC_MODE);
3057 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
3058 val &= ~BNX2_EMAC_MODE_MPKT;
3059 REG_WR(bp, BNX2_EMAC_MODE, val);
3060
3061 val = REG_RD(bp, BNX2_RPM_CONFIG);
3062 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3063 REG_WR(bp, BNX2_RPM_CONFIG, val);
3064 break;
3065 }
829ca9a3 3066 case PCI_D3hot: {
b6016b76
MC
3067 int i;
3068 u32 val, wol_msg;
3069
3070 if (bp->wol) {
3071 u32 advertising;
3072 u8 autoneg;
3073
3074 autoneg = bp->autoneg;
3075 advertising = bp->advertising;
3076
239cd343
MC
3077 if (bp->phy_port == PORT_TP) {
3078 bp->autoneg = AUTONEG_SPEED;
3079 bp->advertising = ADVERTISED_10baseT_Half |
3080 ADVERTISED_10baseT_Full |
3081 ADVERTISED_100baseT_Half |
3082 ADVERTISED_100baseT_Full |
3083 ADVERTISED_Autoneg;
3084 }
b6016b76 3085
239cd343
MC
3086 spin_lock_bh(&bp->phy_lock);
3087 bnx2_setup_phy(bp, bp->phy_port);
3088 spin_unlock_bh(&bp->phy_lock);
b6016b76
MC
3089
3090 bp->autoneg = autoneg;
3091 bp->advertising = advertising;
3092
3093 bnx2_set_mac_addr(bp);
3094
3095 val = REG_RD(bp, BNX2_EMAC_MODE);
3096
3097 /* Enable port mode. */
3098 val &= ~BNX2_EMAC_MODE_PORT;
239cd343 3099 val |= BNX2_EMAC_MODE_MPKT_RCVD |
b6016b76 3100 BNX2_EMAC_MODE_ACPI_RCVD |
b6016b76 3101 BNX2_EMAC_MODE_MPKT;
239cd343
MC
3102 if (bp->phy_port == PORT_TP)
3103 val |= BNX2_EMAC_MODE_PORT_MII;
3104 else {
3105 val |= BNX2_EMAC_MODE_PORT_GMII;
3106 if (bp->line_speed == SPEED_2500)
3107 val |= BNX2_EMAC_MODE_25G_MODE;
3108 }
b6016b76
MC
3109
3110 REG_WR(bp, BNX2_EMAC_MODE, val);
3111
3112 /* receive all multicast */
3113 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3114 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3115 0xffffffff);
3116 }
3117 REG_WR(bp, BNX2_EMAC_RX_MODE,
3118 BNX2_EMAC_RX_MODE_SORT_MODE);
3119
3120 val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
3121 BNX2_RPM_SORT_USER0_MC_EN;
3122 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3123 REG_WR(bp, BNX2_RPM_SORT_USER0, val);
3124 REG_WR(bp, BNX2_RPM_SORT_USER0, val |
3125 BNX2_RPM_SORT_USER0_ENA);
3126
3127 /* Need to enable EMAC and RPM for WOL. */
3128 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3129 BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
3130 BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
3131 BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
3132
3133 val = REG_RD(bp, BNX2_RPM_CONFIG);
3134 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3135 REG_WR(bp, BNX2_RPM_CONFIG, val);
3136
3137 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
3138 }
3139 else {
3140 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
3141 }
3142
dda1e390
MC
3143 if (!(bp->flags & NO_WOL_FLAG))
3144 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg, 0);
b6016b76
MC
3145
3146 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3147 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3148 (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
3149
3150 if (bp->wol)
3151 pmcsr |= 3;
3152 }
3153 else {
3154 pmcsr |= 3;
3155 }
3156 if (bp->wol) {
3157 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
3158 }
3159 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3160 pmcsr);
3161
3162 /* No more memory access after this point until
3163 * device is brought back to D0.
3164 */
3165 udelay(50);
3166 break;
3167 }
3168 default:
3169 return -EINVAL;
3170 }
3171 return 0;
3172}
3173
3174static int
3175bnx2_acquire_nvram_lock(struct bnx2 *bp)
3176{
3177 u32 val;
3178 int j;
3179
3180 /* Request access to the flash interface. */
3181 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
3182 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3183 val = REG_RD(bp, BNX2_NVM_SW_ARB);
3184 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
3185 break;
3186
3187 udelay(5);
3188 }
3189
3190 if (j >= NVRAM_TIMEOUT_COUNT)
3191 return -EBUSY;
3192
3193 return 0;
3194}
3195
3196static int
3197bnx2_release_nvram_lock(struct bnx2 *bp)
3198{
3199 int j;
3200 u32 val;
3201
3202 /* Relinquish nvram interface. */
3203 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
3204
3205 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3206 val = REG_RD(bp, BNX2_NVM_SW_ARB);
3207 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
3208 break;
3209
3210 udelay(5);
3211 }
3212
3213 if (j >= NVRAM_TIMEOUT_COUNT)
3214 return -EBUSY;
3215
3216 return 0;
3217}
3218
3219
3220static int
3221bnx2_enable_nvram_write(struct bnx2 *bp)
3222{
3223 u32 val;
3224
3225 val = REG_RD(bp, BNX2_MISC_CFG);
3226 REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
3227
e30372c9 3228 if (bp->flash_info->flags & BNX2_NV_WREN) {
b6016b76
MC
3229 int j;
3230
3231 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3232 REG_WR(bp, BNX2_NVM_COMMAND,
3233 BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
3234
3235 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3236 udelay(5);
3237
3238 val = REG_RD(bp, BNX2_NVM_COMMAND);
3239 if (val & BNX2_NVM_COMMAND_DONE)
3240 break;
3241 }
3242
3243 if (j >= NVRAM_TIMEOUT_COUNT)
3244 return -EBUSY;
3245 }
3246 return 0;
3247}
3248
3249static void
3250bnx2_disable_nvram_write(struct bnx2 *bp)
3251{
3252 u32 val;
3253
3254 val = REG_RD(bp, BNX2_MISC_CFG);
3255 REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
3256}
3257
3258
3259static void
3260bnx2_enable_nvram_access(struct bnx2 *bp)
3261{
3262 u32 val;
3263
3264 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3265 /* Enable both bits, even on read. */
6aa20a22 3266 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
b6016b76
MC
3267 val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
3268}
3269
3270static void
3271bnx2_disable_nvram_access(struct bnx2 *bp)
3272{
3273 u32 val;
3274
3275 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3276 /* Disable both bits, even after read. */
6aa20a22 3277 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
b6016b76
MC
3278 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
3279 BNX2_NVM_ACCESS_ENABLE_WR_EN));
3280}
3281
3282static int
3283bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
3284{
3285 u32 cmd;
3286 int j;
3287
e30372c9 3288 if (bp->flash_info->flags & BNX2_NV_BUFFERED)
b6016b76
MC
3289 /* Buffered flash, no erase needed */
3290 return 0;
3291
3292 /* Build an erase command */
3293 cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
3294 BNX2_NVM_COMMAND_DOIT;
3295
3296 /* Need to clear DONE bit separately. */
3297 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3298
3299 /* Address of the NVRAM to read from. */
3300 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3301
3302 /* Issue an erase command. */
3303 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3304
3305 /* Wait for completion. */
3306 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3307 u32 val;
3308
3309 udelay(5);
3310
3311 val = REG_RD(bp, BNX2_NVM_COMMAND);
3312 if (val & BNX2_NVM_COMMAND_DONE)
3313 break;
3314 }
3315
3316 if (j >= NVRAM_TIMEOUT_COUNT)
3317 return -EBUSY;
3318
3319 return 0;
3320}
3321
3322static int
3323bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
3324{
3325 u32 cmd;
3326 int j;
3327
3328 /* Build the command word. */
3329 cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
3330
e30372c9
MC
3331 /* Calculate an offset of a buffered flash, not needed for 5709. */
3332 if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
b6016b76
MC
3333 offset = ((offset / bp->flash_info->page_size) <<
3334 bp->flash_info->page_bits) +
3335 (offset % bp->flash_info->page_size);
3336 }
3337
3338 /* Need to clear DONE bit separately. */
3339 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3340
3341 /* Address of the NVRAM to read from. */
3342 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3343
3344 /* Issue a read command. */
3345 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3346
3347 /* Wait for completion. */
3348 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3349 u32 val;
3350
3351 udelay(5);
3352
3353 val = REG_RD(bp, BNX2_NVM_COMMAND);
3354 if (val & BNX2_NVM_COMMAND_DONE) {
3355 val = REG_RD(bp, BNX2_NVM_READ);
3356
3357 val = be32_to_cpu(val);
3358 memcpy(ret_val, &val, 4);
3359 break;
3360 }
3361 }
3362 if (j >= NVRAM_TIMEOUT_COUNT)
3363 return -EBUSY;
3364
3365 return 0;
3366}
3367
3368
3369static int
3370bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
3371{
3372 u32 cmd, val32;
3373 int j;
3374
3375 /* Build the command word. */
3376 cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
3377
e30372c9
MC
3378 /* Calculate an offset of a buffered flash, not needed for 5709. */
3379 if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
b6016b76
MC
3380 offset = ((offset / bp->flash_info->page_size) <<
3381 bp->flash_info->page_bits) +
3382 (offset % bp->flash_info->page_size);
3383 }
3384
3385 /* Need to clear DONE bit separately. */
3386 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3387
3388 memcpy(&val32, val, 4);
3389 val32 = cpu_to_be32(val32);
3390
3391 /* Write the data. */
3392 REG_WR(bp, BNX2_NVM_WRITE, val32);
3393
3394 /* Address of the NVRAM to write to. */
3395 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3396
3397 /* Issue the write command. */
3398 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3399
3400 /* Wait for completion. */
3401 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3402 udelay(5);
3403
3404 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
3405 break;
3406 }
3407 if (j >= NVRAM_TIMEOUT_COUNT)
3408 return -EBUSY;
3409
3410 return 0;
3411}
3412
3413static int
3414bnx2_init_nvram(struct bnx2 *bp)
3415{
3416 u32 val;
e30372c9 3417 int j, entry_count, rc = 0;
b6016b76
MC
3418 struct flash_spec *flash;
3419
e30372c9
MC
3420 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3421 bp->flash_info = &flash_5709;
3422 goto get_flash_size;
3423 }
3424
b6016b76
MC
3425 /* Determine the selected interface. */
3426 val = REG_RD(bp, BNX2_NVM_CFG1);
3427
ff8ac609 3428 entry_count = ARRAY_SIZE(flash_table);
b6016b76 3429
b6016b76
MC
3430 if (val & 0x40000000) {
3431
3432 /* Flash interface has been reconfigured */
3433 for (j = 0, flash = &flash_table[0]; j < entry_count;
37137709
MC
3434 j++, flash++) {
3435 if ((val & FLASH_BACKUP_STRAP_MASK) ==
3436 (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
b6016b76
MC
3437 bp->flash_info = flash;
3438 break;
3439 }
3440 }
3441 }
3442 else {
37137709 3443 u32 mask;
b6016b76
MC
3444 /* Not yet been reconfigured */
3445
37137709
MC
3446 if (val & (1 << 23))
3447 mask = FLASH_BACKUP_STRAP_MASK;
3448 else
3449 mask = FLASH_STRAP_MASK;
3450
b6016b76
MC
3451 for (j = 0, flash = &flash_table[0]; j < entry_count;
3452 j++, flash++) {
3453
37137709 3454 if ((val & mask) == (flash->strapping & mask)) {
b6016b76
MC
3455 bp->flash_info = flash;
3456
3457 /* Request access to the flash interface. */
3458 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3459 return rc;
3460
3461 /* Enable access to flash interface */
3462 bnx2_enable_nvram_access(bp);
3463
3464 /* Reconfigure the flash interface */
3465 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
3466 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
3467 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
3468 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
3469
3470 /* Disable access to flash interface */
3471 bnx2_disable_nvram_access(bp);
3472 bnx2_release_nvram_lock(bp);
3473
3474 break;
3475 }
3476 }
3477 } /* if (val & 0x40000000) */
3478
3479 if (j == entry_count) {
3480 bp->flash_info = NULL;
2f23c523 3481 printk(KERN_ALERT PFX "Unknown flash/EEPROM type.\n");
1122db71 3482 return -ENODEV;
b6016b76
MC
3483 }
3484
e30372c9 3485get_flash_size:
1122db71
MC
3486 val = REG_RD_IND(bp, bp->shmem_base + BNX2_SHARED_HW_CFG_CONFIG2);
3487 val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
3488 if (val)
3489 bp->flash_size = val;
3490 else
3491 bp->flash_size = bp->flash_info->total_size;
3492
b6016b76
MC
3493 return rc;
3494}
3495
3496static int
3497bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
3498 int buf_size)
3499{
3500 int rc = 0;
3501 u32 cmd_flags, offset32, len32, extra;
3502
3503 if (buf_size == 0)
3504 return 0;
3505
3506 /* Request access to the flash interface. */
3507 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3508 return rc;
3509
3510 /* Enable access to flash interface */
3511 bnx2_enable_nvram_access(bp);
3512
3513 len32 = buf_size;
3514 offset32 = offset;
3515 extra = 0;
3516
3517 cmd_flags = 0;
3518
3519 if (offset32 & 3) {
3520 u8 buf[4];
3521 u32 pre_len;
3522
3523 offset32 &= ~3;
3524 pre_len = 4 - (offset & 3);
3525
3526 if (pre_len >= len32) {
3527 pre_len = len32;
3528 cmd_flags = BNX2_NVM_COMMAND_FIRST |
3529 BNX2_NVM_COMMAND_LAST;
3530 }
3531 else {
3532 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3533 }
3534
3535 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3536
3537 if (rc)
3538 return rc;
3539
3540 memcpy(ret_buf, buf + (offset & 3), pre_len);
3541
3542 offset32 += 4;
3543 ret_buf += pre_len;
3544 len32 -= pre_len;
3545 }
3546 if (len32 & 3) {
3547 extra = 4 - (len32 & 3);
3548 len32 = (len32 + 4) & ~3;
3549 }
3550
3551 if (len32 == 4) {
3552 u8 buf[4];
3553
3554 if (cmd_flags)
3555 cmd_flags = BNX2_NVM_COMMAND_LAST;
3556 else
3557 cmd_flags = BNX2_NVM_COMMAND_FIRST |
3558 BNX2_NVM_COMMAND_LAST;
3559
3560 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3561
3562 memcpy(ret_buf, buf, 4 - extra);
3563 }
3564 else if (len32 > 0) {
3565 u8 buf[4];
3566
3567 /* Read the first word. */
3568 if (cmd_flags)
3569 cmd_flags = 0;
3570 else
3571 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3572
3573 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
3574
3575 /* Advance to the next dword. */
3576 offset32 += 4;
3577 ret_buf += 4;
3578 len32 -= 4;
3579
3580 while (len32 > 4 && rc == 0) {
3581 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
3582
3583 /* Advance to the next dword. */
3584 offset32 += 4;
3585 ret_buf += 4;
3586 len32 -= 4;
3587 }
3588
3589 if (rc)
3590 return rc;
3591
3592 cmd_flags = BNX2_NVM_COMMAND_LAST;
3593 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3594
3595 memcpy(ret_buf, buf, 4 - extra);
3596 }
3597
3598 /* Disable access to flash interface */
3599 bnx2_disable_nvram_access(bp);
3600
3601 bnx2_release_nvram_lock(bp);
3602
3603 return rc;
3604}
3605
3606static int
3607bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
3608 int buf_size)
3609{
3610 u32 written, offset32, len32;
e6be763f 3611 u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
b6016b76
MC
3612 int rc = 0;
3613 int align_start, align_end;
3614
3615 buf = data_buf;
3616 offset32 = offset;
3617 len32 = buf_size;
3618 align_start = align_end = 0;
3619
3620 if ((align_start = (offset32 & 3))) {
3621 offset32 &= ~3;
c873879c
MC
3622 len32 += align_start;
3623 if (len32 < 4)
3624 len32 = 4;
b6016b76
MC
3625 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
3626 return rc;
3627 }
3628
3629 if (len32 & 3) {
c873879c
MC
3630 align_end = 4 - (len32 & 3);
3631 len32 += align_end;
3632 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
3633 return rc;
b6016b76
MC
3634 }
3635
3636 if (align_start || align_end) {
e6be763f
MC
3637 align_buf = kmalloc(len32, GFP_KERNEL);
3638 if (align_buf == NULL)
b6016b76
MC
3639 return -ENOMEM;
3640 if (align_start) {
e6be763f 3641 memcpy(align_buf, start, 4);
b6016b76
MC
3642 }
3643 if (align_end) {
e6be763f 3644 memcpy(align_buf + len32 - 4, end, 4);
b6016b76 3645 }
e6be763f
MC
3646 memcpy(align_buf + align_start, data_buf, buf_size);
3647 buf = align_buf;
b6016b76
MC
3648 }
3649
e30372c9 3650 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
ae181bc4
MC
3651 flash_buffer = kmalloc(264, GFP_KERNEL);
3652 if (flash_buffer == NULL) {
3653 rc = -ENOMEM;
3654 goto nvram_write_end;
3655 }
3656 }
3657
b6016b76
MC
3658 written = 0;
3659 while ((written < len32) && (rc == 0)) {
3660 u32 page_start, page_end, data_start, data_end;
3661 u32 addr, cmd_flags;
3662 int i;
b6016b76
MC
3663
3664 /* Find the page_start addr */
3665 page_start = offset32 + written;
3666 page_start -= (page_start % bp->flash_info->page_size);
3667 /* Find the page_end addr */
3668 page_end = page_start + bp->flash_info->page_size;
3669 /* Find the data_start addr */
3670 data_start = (written == 0) ? offset32 : page_start;
3671 /* Find the data_end addr */
6aa20a22 3672 data_end = (page_end > offset32 + len32) ?
b6016b76
MC
3673 (offset32 + len32) : page_end;
3674
3675 /* Request access to the flash interface. */
3676 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3677 goto nvram_write_end;
3678
3679 /* Enable access to flash interface */
3680 bnx2_enable_nvram_access(bp);
3681
3682 cmd_flags = BNX2_NVM_COMMAND_FIRST;
e30372c9 3683 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
b6016b76
MC
3684 int j;
3685
3686 /* Read the whole page into the buffer
3687 * (non-buffer flash only) */
3688 for (j = 0; j < bp->flash_info->page_size; j += 4) {
3689 if (j == (bp->flash_info->page_size - 4)) {
3690 cmd_flags |= BNX2_NVM_COMMAND_LAST;
3691 }
3692 rc = bnx2_nvram_read_dword(bp,
6aa20a22
JG
3693 page_start + j,
3694 &flash_buffer[j],
b6016b76
MC
3695 cmd_flags);
3696
3697 if (rc)
3698 goto nvram_write_end;
3699
3700 cmd_flags = 0;
3701 }
3702 }
3703
3704 /* Enable writes to flash interface (unlock write-protect) */
3705 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
3706 goto nvram_write_end;
3707
b6016b76
MC
3708 /* Loop to write back the buffer data from page_start to
3709 * data_start */
3710 i = 0;
e30372c9 3711 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
c873879c
MC
3712 /* Erase the page */
3713 if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
3714 goto nvram_write_end;
3715
3716 /* Re-enable the write again for the actual write */
3717 bnx2_enable_nvram_write(bp);
3718
b6016b76
MC
3719 for (addr = page_start; addr < data_start;
3720 addr += 4, i += 4) {
6aa20a22 3721
b6016b76
MC
3722 rc = bnx2_nvram_write_dword(bp, addr,
3723 &flash_buffer[i], cmd_flags);
3724
3725 if (rc != 0)
3726 goto nvram_write_end;
3727
3728 cmd_flags = 0;
3729 }
3730 }
3731
3732 /* Loop to write the new data from data_start to data_end */
bae25761 3733 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
b6016b76 3734 if ((addr == page_end - 4) ||
e30372c9 3735 ((bp->flash_info->flags & BNX2_NV_BUFFERED) &&
b6016b76
MC
3736 (addr == data_end - 4))) {
3737
3738 cmd_flags |= BNX2_NVM_COMMAND_LAST;
3739 }
3740 rc = bnx2_nvram_write_dword(bp, addr, buf,
3741 cmd_flags);
3742
3743 if (rc != 0)
3744 goto nvram_write_end;
3745
3746 cmd_flags = 0;
3747 buf += 4;
3748 }
3749
3750 /* Loop to write back the buffer data from data_end
3751 * to page_end */
e30372c9 3752 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
b6016b76
MC
3753 for (addr = data_end; addr < page_end;
3754 addr += 4, i += 4) {
6aa20a22 3755
b6016b76
MC
3756 if (addr == page_end-4) {
3757 cmd_flags = BNX2_NVM_COMMAND_LAST;
3758 }
3759 rc = bnx2_nvram_write_dword(bp, addr,
3760 &flash_buffer[i], cmd_flags);
3761
3762 if (rc != 0)
3763 goto nvram_write_end;
3764
3765 cmd_flags = 0;
3766 }
3767 }
3768
3769 /* Disable writes to flash interface (lock write-protect) */
3770 bnx2_disable_nvram_write(bp);
3771
3772 /* Disable access to flash interface */
3773 bnx2_disable_nvram_access(bp);
3774 bnx2_release_nvram_lock(bp);
3775
3776 /* Increment written */
3777 written += data_end - data_start;
3778 }
3779
3780nvram_write_end:
e6be763f
MC
3781 kfree(flash_buffer);
3782 kfree(align_buf);
b6016b76
MC
3783 return rc;
3784}
3785
0d8a6571
MC
3786static void
3787bnx2_init_remote_phy(struct bnx2 *bp)
3788{
3789 u32 val;
3790
3791 bp->phy_flags &= ~REMOTE_PHY_CAP_FLAG;
3792 if (!(bp->phy_flags & PHY_SERDES_FLAG))
3793 return;
3794
3795 val = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_CAP_MB);
3796 if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE)
3797 return;
3798
3799 if (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE) {
0d8a6571
MC
3800 bp->phy_flags |= REMOTE_PHY_CAP_FLAG;
3801
3802 val = REG_RD_IND(bp, bp->shmem_base + BNX2_LINK_STATUS);
3803 if (val & BNX2_LINK_STATUS_SERDES_LINK)
3804 bp->phy_port = PORT_FIBRE;
3805 else
3806 bp->phy_port = PORT_TP;
489310a4
MC
3807
3808 if (netif_running(bp->dev)) {
3809 u32 sig;
3810
3811 if (val & BNX2_LINK_STATUS_LINK_UP) {
3812 bp->link_up = 1;
3813 netif_carrier_on(bp->dev);
3814 } else {
3815 bp->link_up = 0;
3816 netif_carrier_off(bp->dev);
3817 }
3818 sig = BNX2_DRV_ACK_CAP_SIGNATURE |
3819 BNX2_FW_CAP_REMOTE_PHY_CAPABLE;
3820 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_ACK_CAP_MB,
3821 sig);
3822 }
0d8a6571
MC
3823 }
3824}
3825
b6016b76
MC
3826static int
3827bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
3828{
3829 u32 val;
3830 int i, rc = 0;
489310a4 3831 u8 old_port;
b6016b76
MC
3832
3833 /* Wait for the current PCI transaction to complete before
3834 * issuing a reset. */
3835 REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
3836 BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
3837 BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
3838 BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
3839 BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
3840 val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
3841 udelay(5);
3842
b090ae2b
MC
3843 /* Wait for the firmware to tell us it is ok to issue a reset. */
3844 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1);
3845
b6016b76
MC
3846 /* Deposit a driver reset signature so the firmware knows that
3847 * this is a soft reset. */
e3648b3d 3848 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_RESET_SIGNATURE,
b6016b76
MC
3849 BNX2_DRV_RESET_SIGNATURE_MAGIC);
3850
b6016b76
MC
3851 /* Do a dummy read to force the chip to complete all current transaction
3852 * before we issue a reset. */
3853 val = REG_RD(bp, BNX2_MISC_ID);
3854
234754d5
MC
3855 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3856 REG_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
3857 REG_RD(bp, BNX2_MISC_COMMAND);
3858 udelay(5);
b6016b76 3859
234754d5
MC
3860 val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3861 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
b6016b76 3862
234754d5 3863 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, val);
b6016b76 3864
234754d5
MC
3865 } else {
3866 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3867 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3868 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3869
3870 /* Chip reset. */
3871 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
3872
594a9dfa
MC
3873 /* Reading back any register after chip reset will hang the
3874 * bus on 5706 A0 and A1. The msleep below provides plenty
3875 * of margin for write posting.
3876 */
234754d5 3877 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
8e545881
AV
3878 (CHIP_ID(bp) == CHIP_ID_5706_A1))
3879 msleep(20);
b6016b76 3880
234754d5
MC
3881 /* Reset takes approximate 30 usec */
3882 for (i = 0; i < 10; i++) {
3883 val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
3884 if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3885 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
3886 break;
3887 udelay(10);
3888 }
3889
3890 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3891 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
3892 printk(KERN_ERR PFX "Chip reset did not complete\n");
3893 return -EBUSY;
3894 }
b6016b76
MC
3895 }
3896
3897 /* Make sure byte swapping is properly configured. */
3898 val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
3899 if (val != 0x01020304) {
3900 printk(KERN_ERR PFX "Chip not in correct endian mode\n");
3901 return -ENODEV;
3902 }
3903
b6016b76 3904 /* Wait for the firmware to finish its initialization. */
b090ae2b
MC
3905 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 0);
3906 if (rc)
3907 return rc;
b6016b76 3908
0d8a6571 3909 spin_lock_bh(&bp->phy_lock);
489310a4 3910 old_port = bp->phy_port;
0d8a6571 3911 bnx2_init_remote_phy(bp);
489310a4 3912 if ((bp->phy_flags & REMOTE_PHY_CAP_FLAG) && old_port != bp->phy_port)
0d8a6571
MC
3913 bnx2_set_default_remote_link(bp);
3914 spin_unlock_bh(&bp->phy_lock);
3915
b6016b76
MC
3916 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
3917 /* Adjust the voltage regular to two steps lower. The default
3918 * of this register is 0x0000000e. */
3919 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
3920
3921 /* Remove bad rbuf memory from the free pool. */
3922 rc = bnx2_alloc_bad_rbuf(bp);
3923 }
3924
3925 return rc;
3926}
3927
3928static int
3929bnx2_init_chip(struct bnx2 *bp)
3930{
3931 u32 val;
b090ae2b 3932 int rc;
b6016b76
MC
3933
3934 /* Make sure the interrupt is not active. */
3935 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3936
3937 val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
3938 BNX2_DMA_CONFIG_DATA_WORD_SWAP |
3939#ifdef __BIG_ENDIAN
6aa20a22 3940 BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
b6016b76 3941#endif
6aa20a22 3942 BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
b6016b76
MC
3943 DMA_READ_CHANS << 12 |
3944 DMA_WRITE_CHANS << 16;
3945
3946 val |= (0x2 << 20) | (1 << 11);
3947
dda1e390 3948 if ((bp->flags & PCIX_FLAG) && (bp->bus_speed_mhz == 133))
b6016b76
MC
3949 val |= (1 << 23);
3950
3951 if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
3952 (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & PCIX_FLAG))
3953 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
3954
3955 REG_WR(bp, BNX2_DMA_CONFIG, val);
3956
3957 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
3958 val = REG_RD(bp, BNX2_TDMA_CONFIG);
3959 val |= BNX2_TDMA_CONFIG_ONE_DMA;
3960 REG_WR(bp, BNX2_TDMA_CONFIG, val);
3961 }
3962
3963 if (bp->flags & PCIX_FLAG) {
3964 u16 val16;
3965
3966 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
3967 &val16);
3968 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
3969 val16 & ~PCI_X_CMD_ERO);
3970 }
3971
3972 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3973 BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
3974 BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
3975 BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
3976
3977 /* Initialize context mapping and zero out the quick contexts. The
3978 * context block must have already been enabled. */
641bdcd5
MC
3979 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3980 rc = bnx2_init_5709_context(bp);
3981 if (rc)
3982 return rc;
3983 } else
59b47d8a 3984 bnx2_init_context(bp);
b6016b76 3985
fba9fe91
MC
3986 if ((rc = bnx2_init_cpus(bp)) != 0)
3987 return rc;
3988
b6016b76
MC
3989 bnx2_init_nvram(bp);
3990
3991 bnx2_set_mac_addr(bp);
3992
3993 val = REG_RD(bp, BNX2_MQ_CONFIG);
3994 val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
3995 val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
68c9f75a
MC
3996 if (CHIP_ID(bp) == CHIP_ID_5709_A0 || CHIP_ID(bp) == CHIP_ID_5709_A1)
3997 val |= BNX2_MQ_CONFIG_HALT_DIS;
3998
b6016b76
MC
3999 REG_WR(bp, BNX2_MQ_CONFIG, val);
4000
4001 val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
4002 REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
4003 REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
4004
4005 val = (BCM_PAGE_BITS - 8) << 24;
4006 REG_WR(bp, BNX2_RV2P_CONFIG, val);
4007
4008 /* Configure page size. */
4009 val = REG_RD(bp, BNX2_TBDR_CONFIG);
4010 val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
4011 val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
4012 REG_WR(bp, BNX2_TBDR_CONFIG, val);
4013
4014 val = bp->mac_addr[0] +
4015 (bp->mac_addr[1] << 8) +
4016 (bp->mac_addr[2] << 16) +
4017 bp->mac_addr[3] +
4018 (bp->mac_addr[4] << 8) +
4019 (bp->mac_addr[5] << 16);
4020 REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
4021
4022 /* Program the MTU. Also include 4 bytes for CRC32. */
4023 val = bp->dev->mtu + ETH_HLEN + 4;
4024 if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
4025 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
4026 REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
4027
4028 bp->last_status_idx = 0;
4029 bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
4030
4031 /* Set up how to generate a link change interrupt. */
4032 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
4033
4034 REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
4035 (u64) bp->status_blk_mapping & 0xffffffff);
4036 REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
4037
4038 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
4039 (u64) bp->stats_blk_mapping & 0xffffffff);
4040 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
4041 (u64) bp->stats_blk_mapping >> 32);
4042
6aa20a22 4043 REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
b6016b76
MC
4044 (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
4045
4046 REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
4047 (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
4048
4049 REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
4050 (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
4051
4052 REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
4053
4054 REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
4055
4056 REG_WR(bp, BNX2_HC_COM_TICKS,
4057 (bp->com_ticks_int << 16) | bp->com_ticks);
4058
4059 REG_WR(bp, BNX2_HC_CMD_TICKS,
4060 (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
4061
02537b06
MC
4062 if (CHIP_NUM(bp) == CHIP_NUM_5708)
4063 REG_WR(bp, BNX2_HC_STATS_TICKS, 0);
4064 else
7ea6920e 4065 REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks);
b6016b76
MC
4066 REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */
4067
4068 if (CHIP_ID(bp) == CHIP_ID_5706_A1)
8e6a72c4 4069 val = BNX2_HC_CONFIG_COLLECT_STATS;
b6016b76 4070 else {
8e6a72c4
MC
4071 val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE |
4072 BNX2_HC_CONFIG_COLLECT_STATS;
b6016b76
MC
4073 }
4074
8e6a72c4
MC
4075 if (bp->flags & ONE_SHOT_MSI_FLAG)
4076 val |= BNX2_HC_CONFIG_ONE_SHOT;
4077
4078 REG_WR(bp, BNX2_HC_CONFIG, val);
4079
b6016b76
MC
4080 /* Clear internal stats counters. */
4081 REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
4082
da3e4fbe 4083 REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
b6016b76
MC
4084
4085 /* Initialize the receive filter. */
4086 bnx2_set_rx_mode(bp->dev);
4087
0aa38df7
MC
4088 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4089 val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4090 val |= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
4091 REG_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
4092 }
b090ae2b
MC
4093 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
4094 0);
b6016b76 4095
df149d70 4096 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT);
b6016b76
MC
4097 REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
4098
4099 udelay(20);
4100
bf5295bb
MC
4101 bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
4102
b090ae2b 4103 return rc;
b6016b76
MC
4104}
4105
59b47d8a
MC
4106static void
4107bnx2_init_tx_context(struct bnx2 *bp, u32 cid)
4108{
4109 u32 val, offset0, offset1, offset2, offset3;
4110
4111 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4112 offset0 = BNX2_L2CTX_TYPE_XI;
4113 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
4114 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
4115 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
4116 } else {
4117 offset0 = BNX2_L2CTX_TYPE;
4118 offset1 = BNX2_L2CTX_CMD_TYPE;
4119 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
4120 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
4121 }
4122 val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
4123 CTX_WR(bp, GET_CID_ADDR(cid), offset0, val);
4124
4125 val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
4126 CTX_WR(bp, GET_CID_ADDR(cid), offset1, val);
4127
4128 val = (u64) bp->tx_desc_mapping >> 32;
4129 CTX_WR(bp, GET_CID_ADDR(cid), offset2, val);
4130
4131 val = (u64) bp->tx_desc_mapping & 0xffffffff;
4132 CTX_WR(bp, GET_CID_ADDR(cid), offset3, val);
4133}
b6016b76
MC
4134
4135static void
4136bnx2_init_tx_ring(struct bnx2 *bp)
4137{
4138 struct tx_bd *txbd;
59b47d8a 4139 u32 cid;
b6016b76 4140
2f8af120
MC
4141 bp->tx_wake_thresh = bp->tx_ring_size / 2;
4142
b6016b76 4143 txbd = &bp->tx_desc_ring[MAX_TX_DESC_CNT];
6aa20a22 4144
b6016b76
MC
4145 txbd->tx_bd_haddr_hi = (u64) bp->tx_desc_mapping >> 32;
4146 txbd->tx_bd_haddr_lo = (u64) bp->tx_desc_mapping & 0xffffffff;
4147
4148 bp->tx_prod = 0;
4149 bp->tx_cons = 0;
f4e418f7 4150 bp->hw_tx_cons = 0;
b6016b76 4151 bp->tx_prod_bseq = 0;
6aa20a22 4152
59b47d8a
MC
4153 cid = TX_CID;
4154 bp->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
4155 bp->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
b6016b76 4156
59b47d8a 4157 bnx2_init_tx_context(bp, cid);
b6016b76
MC
4158}
4159
4160static void
4161bnx2_init_rx_ring(struct bnx2 *bp)
4162{
4163 struct rx_bd *rxbd;
4164 int i;
6aa20a22 4165 u16 prod, ring_prod;
b6016b76
MC
4166 u32 val;
4167
4168 /* 8 for CRC and VLAN */
4169 bp->rx_buf_use_size = bp->dev->mtu + ETH_HLEN + bp->rx_offset + 8;
59b47d8a
MC
4170 /* hw alignment */
4171 bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN;
b6016b76
MC
4172
4173 ring_prod = prod = bp->rx_prod = 0;
4174 bp->rx_cons = 0;
4175 bp->rx_prod_bseq = 0;
6aa20a22 4176
13daffa2
MC
4177 for (i = 0; i < bp->rx_max_ring; i++) {
4178 int j;
b6016b76 4179
13daffa2
MC
4180 rxbd = &bp->rx_desc_ring[i][0];
4181 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
4182 rxbd->rx_bd_len = bp->rx_buf_use_size;
4183 rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
4184 }
4185 if (i == (bp->rx_max_ring - 1))
4186 j = 0;
4187 else
4188 j = i + 1;
4189 rxbd->rx_bd_haddr_hi = (u64) bp->rx_desc_mapping[j] >> 32;
4190 rxbd->rx_bd_haddr_lo = (u64) bp->rx_desc_mapping[j] &
4191 0xffffffff;
4192 }
b6016b76
MC
4193
4194 val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
4195 val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
4196 val |= 0x02 << 8;
4197 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_CTX_TYPE, val);
4198
13daffa2 4199 val = (u64) bp->rx_desc_mapping[0] >> 32;
b6016b76
MC
4200 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_HI, val);
4201
13daffa2 4202 val = (u64) bp->rx_desc_mapping[0] & 0xffffffff;
b6016b76
MC
4203 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_LO, val);
4204
236b6394 4205 for (i = 0; i < bp->rx_ring_size; i++) {
b6016b76
MC
4206 if (bnx2_alloc_rx_skb(bp, ring_prod) < 0) {
4207 break;
4208 }
4209 prod = NEXT_RX_BD(prod);
4210 ring_prod = RX_RING_IDX(prod);
4211 }
4212 bp->rx_prod = prod;
4213
4214 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, prod);
4215
4216 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
4217}
4218
13daffa2
MC
4219static void
4220bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
4221{
4222 u32 num_rings, max;
4223
4224 bp->rx_ring_size = size;
4225 num_rings = 1;
4226 while (size > MAX_RX_DESC_CNT) {
4227 size -= MAX_RX_DESC_CNT;
4228 num_rings++;
4229 }
4230 /* round to next power of 2 */
4231 max = MAX_RX_RINGS;
4232 while ((max & num_rings) == 0)
4233 max >>= 1;
4234
4235 if (num_rings != max)
4236 max <<= 1;
4237
4238 bp->rx_max_ring = max;
4239 bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
4240}
4241
b6016b76
MC
4242static void
4243bnx2_free_tx_skbs(struct bnx2 *bp)
4244{
4245 int i;
4246
4247 if (bp->tx_buf_ring == NULL)
4248 return;
4249
4250 for (i = 0; i < TX_DESC_CNT; ) {
4251 struct sw_bd *tx_buf = &bp->tx_buf_ring[i];
4252 struct sk_buff *skb = tx_buf->skb;
4253 int j, last;
4254
4255 if (skb == NULL) {
4256 i++;
4257 continue;
4258 }
4259
4260 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
4261 skb_headlen(skb), PCI_DMA_TODEVICE);
4262
4263 tx_buf->skb = NULL;
4264
4265 last = skb_shinfo(skb)->nr_frags;
4266 for (j = 0; j < last; j++) {
4267 tx_buf = &bp->tx_buf_ring[i + j + 1];
4268 pci_unmap_page(bp->pdev,
4269 pci_unmap_addr(tx_buf, mapping),
4270 skb_shinfo(skb)->frags[j].size,
4271 PCI_DMA_TODEVICE);
4272 }
745720e5 4273 dev_kfree_skb(skb);
b6016b76
MC
4274 i += j + 1;
4275 }
4276
4277}
4278
4279static void
4280bnx2_free_rx_skbs(struct bnx2 *bp)
4281{
4282 int i;
4283
4284 if (bp->rx_buf_ring == NULL)
4285 return;
4286
13daffa2 4287 for (i = 0; i < bp->rx_max_ring_idx; i++) {
b6016b76
MC
4288 struct sw_bd *rx_buf = &bp->rx_buf_ring[i];
4289 struct sk_buff *skb = rx_buf->skb;
4290
05d0f1cf 4291 if (skb == NULL)
b6016b76
MC
4292 continue;
4293
4294 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
4295 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
4296
4297 rx_buf->skb = NULL;
4298
745720e5 4299 dev_kfree_skb(skb);
b6016b76
MC
4300 }
4301}
4302
4303static void
4304bnx2_free_skbs(struct bnx2 *bp)
4305{
4306 bnx2_free_tx_skbs(bp);
4307 bnx2_free_rx_skbs(bp);
4308}
4309
4310static int
4311bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
4312{
4313 int rc;
4314
4315 rc = bnx2_reset_chip(bp, reset_code);
4316 bnx2_free_skbs(bp);
4317 if (rc)
4318 return rc;
4319
fba9fe91
MC
4320 if ((rc = bnx2_init_chip(bp)) != 0)
4321 return rc;
4322
b6016b76
MC
4323 bnx2_init_tx_ring(bp);
4324 bnx2_init_rx_ring(bp);
4325 return 0;
4326}
4327
4328static int
4329bnx2_init_nic(struct bnx2 *bp)
4330{
4331 int rc;
4332
4333 if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
4334 return rc;
4335
80be4434 4336 spin_lock_bh(&bp->phy_lock);
b6016b76
MC
4337 bnx2_init_phy(bp);
4338 bnx2_set_link(bp);
0d8a6571 4339 spin_unlock_bh(&bp->phy_lock);
b6016b76
MC
4340 return 0;
4341}
4342
4343static int
4344bnx2_test_registers(struct bnx2 *bp)
4345{
4346 int ret;
5bae30c9 4347 int i, is_5709;
f71e1309 4348 static const struct {
b6016b76
MC
4349 u16 offset;
4350 u16 flags;
5bae30c9 4351#define BNX2_FL_NOT_5709 1
b6016b76
MC
4352 u32 rw_mask;
4353 u32 ro_mask;
4354 } reg_tbl[] = {
4355 { 0x006c, 0, 0x00000000, 0x0000003f },
4356 { 0x0090, 0, 0xffffffff, 0x00000000 },
4357 { 0x0094, 0, 0x00000000, 0x00000000 },
4358
5bae30c9
MC
4359 { 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
4360 { 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4361 { 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4362 { 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
4363 { 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
4364 { 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
4365 { 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
4366 { 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4367 { 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4368
4369 { 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4370 { 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4371 { 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4372 { 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4373 { 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4374 { 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4375
4376 { 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
4377 { 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
4378 { 0x0c08, BNX2_FL_NOT_5709, 0x0f0ff073, 0x00000000 },
b6016b76
MC
4379
4380 { 0x1000, 0, 0x00000000, 0x00000001 },
4381 { 0x1004, 0, 0x00000000, 0x000f0001 },
b6016b76
MC
4382
4383 { 0x1408, 0, 0x01c00800, 0x00000000 },
4384 { 0x149c, 0, 0x8000ffff, 0x00000000 },
4385 { 0x14a8, 0, 0x00000000, 0x000001ff },
5b0c76ad 4386 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
b6016b76
MC
4387 { 0x14b0, 0, 0x00000002, 0x00000001 },
4388 { 0x14b8, 0, 0x00000000, 0x00000000 },
4389 { 0x14c0, 0, 0x00000000, 0x00000009 },
4390 { 0x14c4, 0, 0x00003fff, 0x00000000 },
4391 { 0x14cc, 0, 0x00000000, 0x00000001 },
4392 { 0x14d0, 0, 0xffffffff, 0x00000000 },
b6016b76
MC
4393
4394 { 0x1800, 0, 0x00000000, 0x00000001 },
4395 { 0x1804, 0, 0x00000000, 0x00000003 },
b6016b76
MC
4396
4397 { 0x2800, 0, 0x00000000, 0x00000001 },
4398 { 0x2804, 0, 0x00000000, 0x00003f01 },
4399 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
4400 { 0x2810, 0, 0xffff0000, 0x00000000 },
4401 { 0x2814, 0, 0xffff0000, 0x00000000 },
4402 { 0x2818, 0, 0xffff0000, 0x00000000 },
4403 { 0x281c, 0, 0xffff0000, 0x00000000 },
4404 { 0x2834, 0, 0xffffffff, 0x00000000 },
4405 { 0x2840, 0, 0x00000000, 0xffffffff },
4406 { 0x2844, 0, 0x00000000, 0xffffffff },
4407 { 0x2848, 0, 0xffffffff, 0x00000000 },
4408 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
4409
4410 { 0x2c00, 0, 0x00000000, 0x00000011 },
4411 { 0x2c04, 0, 0x00000000, 0x00030007 },
4412
b6016b76
MC
4413 { 0x3c00, 0, 0x00000000, 0x00000001 },
4414 { 0x3c04, 0, 0x00000000, 0x00070000 },
4415 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
4416 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
4417 { 0x3c10, 0, 0xffffffff, 0x00000000 },
4418 { 0x3c14, 0, 0x00000000, 0xffffffff },
4419 { 0x3c18, 0, 0x00000000, 0xffffffff },
4420 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
4421 { 0x3c20, 0, 0xffffff00, 0x00000000 },
b6016b76
MC
4422
4423 { 0x5004, 0, 0x00000000, 0x0000007f },
4424 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
b6016b76 4425
b6016b76
MC
4426 { 0x5c00, 0, 0x00000000, 0x00000001 },
4427 { 0x5c04, 0, 0x00000000, 0x0003000f },
4428 { 0x5c08, 0, 0x00000003, 0x00000000 },
4429 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
4430 { 0x5c10, 0, 0x00000000, 0xffffffff },
4431 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
4432 { 0x5c84, 0, 0x00000000, 0x0000f333 },
4433 { 0x5c88, 0, 0x00000000, 0x00077373 },
4434 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
4435
4436 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
4437 { 0x680c, 0, 0xffffffff, 0x00000000 },
4438 { 0x6810, 0, 0xffffffff, 0x00000000 },
4439 { 0x6814, 0, 0xffffffff, 0x00000000 },
4440 { 0x6818, 0, 0xffffffff, 0x00000000 },
4441 { 0x681c, 0, 0xffffffff, 0x00000000 },
4442 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
4443 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
4444 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
4445 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
4446 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
4447 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
4448 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
4449 { 0x683c, 0, 0x0000ffff, 0x00000000 },
4450 { 0x6840, 0, 0x00000ff0, 0x00000000 },
4451 { 0x6844, 0, 0x00ffff00, 0x00000000 },
4452 { 0x684c, 0, 0xffffffff, 0x00000000 },
4453 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
4454 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
4455 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
4456 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
4457 { 0x6908, 0, 0x00000000, 0x0001ff0f },
4458 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
4459
4460 { 0xffff, 0, 0x00000000, 0x00000000 },
4461 };
4462
4463 ret = 0;
5bae30c9
MC
4464 is_5709 = 0;
4465 if (CHIP_NUM(bp) == CHIP_NUM_5709)
4466 is_5709 = 1;
4467
b6016b76
MC
4468 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
4469 u32 offset, rw_mask, ro_mask, save_val, val;
5bae30c9
MC
4470 u16 flags = reg_tbl[i].flags;
4471
4472 if (is_5709 && (flags & BNX2_FL_NOT_5709))
4473 continue;
b6016b76
MC
4474
4475 offset = (u32) reg_tbl[i].offset;
4476 rw_mask = reg_tbl[i].rw_mask;
4477 ro_mask = reg_tbl[i].ro_mask;
4478
14ab9b86 4479 save_val = readl(bp->regview + offset);
b6016b76 4480
14ab9b86 4481 writel(0, bp->regview + offset);
b6016b76 4482
14ab9b86 4483 val = readl(bp->regview + offset);
b6016b76
MC
4484 if ((val & rw_mask) != 0) {
4485 goto reg_test_err;
4486 }
4487
4488 if ((val & ro_mask) != (save_val & ro_mask)) {
4489 goto reg_test_err;
4490 }
4491
14ab9b86 4492 writel(0xffffffff, bp->regview + offset);
b6016b76 4493
14ab9b86 4494 val = readl(bp->regview + offset);
b6016b76
MC
4495 if ((val & rw_mask) != rw_mask) {
4496 goto reg_test_err;
4497 }
4498
4499 if ((val & ro_mask) != (save_val & ro_mask)) {
4500 goto reg_test_err;
4501 }
4502
14ab9b86 4503 writel(save_val, bp->regview + offset);
b6016b76
MC
4504 continue;
4505
4506reg_test_err:
14ab9b86 4507 writel(save_val, bp->regview + offset);
b6016b76
MC
4508 ret = -ENODEV;
4509 break;
4510 }
4511 return ret;
4512}
4513
4514static int
4515bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
4516{
f71e1309 4517 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
b6016b76
MC
4518 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
4519 int i;
4520
4521 for (i = 0; i < sizeof(test_pattern) / 4; i++) {
4522 u32 offset;
4523
4524 for (offset = 0; offset < size; offset += 4) {
4525
4526 REG_WR_IND(bp, start + offset, test_pattern[i]);
4527
4528 if (REG_RD_IND(bp, start + offset) !=
4529 test_pattern[i]) {
4530 return -ENODEV;
4531 }
4532 }
4533 }
4534 return 0;
4535}
4536
4537static int
4538bnx2_test_memory(struct bnx2 *bp)
4539{
4540 int ret = 0;
4541 int i;
5bae30c9 4542 static struct mem_entry {
b6016b76
MC
4543 u32 offset;
4544 u32 len;
5bae30c9 4545 } mem_tbl_5706[] = {
b6016b76 4546 { 0x60000, 0x4000 },
5b0c76ad 4547 { 0xa0000, 0x3000 },
b6016b76
MC
4548 { 0xe0000, 0x4000 },
4549 { 0x120000, 0x4000 },
4550 { 0x1a0000, 0x4000 },
4551 { 0x160000, 0x4000 },
4552 { 0xffffffff, 0 },
5bae30c9
MC
4553 },
4554 mem_tbl_5709[] = {
4555 { 0x60000, 0x4000 },
4556 { 0xa0000, 0x3000 },
4557 { 0xe0000, 0x4000 },
4558 { 0x120000, 0x4000 },
4559 { 0x1a0000, 0x4000 },
4560 { 0xffffffff, 0 },
b6016b76 4561 };
5bae30c9
MC
4562 struct mem_entry *mem_tbl;
4563
4564 if (CHIP_NUM(bp) == CHIP_NUM_5709)
4565 mem_tbl = mem_tbl_5709;
4566 else
4567 mem_tbl = mem_tbl_5706;
b6016b76
MC
4568
4569 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
4570 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
4571 mem_tbl[i].len)) != 0) {
4572 return ret;
4573 }
4574 }
6aa20a22 4575
b6016b76
MC
4576 return ret;
4577}
4578
bc5a0690
MC
4579#define BNX2_MAC_LOOPBACK 0
4580#define BNX2_PHY_LOOPBACK 1
4581
b6016b76 4582static int
bc5a0690 4583bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
b6016b76
MC
4584{
4585 unsigned int pkt_size, num_pkts, i;
4586 struct sk_buff *skb, *rx_skb;
4587 unsigned char *packet;
bc5a0690 4588 u16 rx_start_idx, rx_idx;
b6016b76
MC
4589 dma_addr_t map;
4590 struct tx_bd *txbd;
4591 struct sw_bd *rx_buf;
4592 struct l2_fhdr *rx_hdr;
4593 int ret = -ENODEV;
4594
bc5a0690
MC
4595 if (loopback_mode == BNX2_MAC_LOOPBACK) {
4596 bp->loopback = MAC_LOOPBACK;
4597 bnx2_set_mac_loopback(bp);
4598 }
4599 else if (loopback_mode == BNX2_PHY_LOOPBACK) {
489310a4
MC
4600 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
4601 return 0;
4602
80be4434 4603 bp->loopback = PHY_LOOPBACK;
bc5a0690
MC
4604 bnx2_set_phy_loopback(bp);
4605 }
4606 else
4607 return -EINVAL;
b6016b76
MC
4608
4609 pkt_size = 1514;
932f3772 4610 skb = netdev_alloc_skb(bp->dev, pkt_size);
b6cbc3b6
JL
4611 if (!skb)
4612 return -ENOMEM;
b6016b76 4613 packet = skb_put(skb, pkt_size);
6634292b 4614 memcpy(packet, bp->dev->dev_addr, 6);
b6016b76
MC
4615 memset(packet + 6, 0x0, 8);
4616 for (i = 14; i < pkt_size; i++)
4617 packet[i] = (unsigned char) (i & 0xff);
4618
4619 map = pci_map_single(bp->pdev, skb->data, pkt_size,
4620 PCI_DMA_TODEVICE);
4621
bf5295bb
MC
4622 REG_WR(bp, BNX2_HC_COMMAND,
4623 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4624
b6016b76
MC
4625 REG_RD(bp, BNX2_HC_COMMAND);
4626
4627 udelay(5);
4628 rx_start_idx = bp->status_blk->status_rx_quick_consumer_index0;
4629
b6016b76
MC
4630 num_pkts = 0;
4631
bc5a0690 4632 txbd = &bp->tx_desc_ring[TX_RING_IDX(bp->tx_prod)];
b6016b76
MC
4633
4634 txbd->tx_bd_haddr_hi = (u64) map >> 32;
4635 txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
4636 txbd->tx_bd_mss_nbytes = pkt_size;
4637 txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
4638
4639 num_pkts++;
bc5a0690
MC
4640 bp->tx_prod = NEXT_TX_BD(bp->tx_prod);
4641 bp->tx_prod_bseq += pkt_size;
b6016b76 4642
234754d5
MC
4643 REG_WR16(bp, bp->tx_bidx_addr, bp->tx_prod);
4644 REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
b6016b76
MC
4645
4646 udelay(100);
4647
bf5295bb
MC
4648 REG_WR(bp, BNX2_HC_COMMAND,
4649 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4650
b6016b76
MC
4651 REG_RD(bp, BNX2_HC_COMMAND);
4652
4653 udelay(5);
4654
4655 pci_unmap_single(bp->pdev, map, pkt_size, PCI_DMA_TODEVICE);
745720e5 4656 dev_kfree_skb(skb);
b6016b76 4657
bc5a0690 4658 if (bp->status_blk->status_tx_quick_consumer_index0 != bp->tx_prod) {
b6016b76
MC
4659 goto loopback_test_done;
4660 }
4661
4662 rx_idx = bp->status_blk->status_rx_quick_consumer_index0;
4663 if (rx_idx != rx_start_idx + num_pkts) {
4664 goto loopback_test_done;
4665 }
4666
4667 rx_buf = &bp->rx_buf_ring[rx_start_idx];
4668 rx_skb = rx_buf->skb;
4669
4670 rx_hdr = (struct l2_fhdr *) rx_skb->data;
4671 skb_reserve(rx_skb, bp->rx_offset);
4672
4673 pci_dma_sync_single_for_cpu(bp->pdev,
4674 pci_unmap_addr(rx_buf, mapping),
4675 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
4676
ade2bfe7 4677 if (rx_hdr->l2_fhdr_status &
b6016b76
MC
4678 (L2_FHDR_ERRORS_BAD_CRC |
4679 L2_FHDR_ERRORS_PHY_DECODE |
4680 L2_FHDR_ERRORS_ALIGNMENT |
4681 L2_FHDR_ERRORS_TOO_SHORT |
4682 L2_FHDR_ERRORS_GIANT_FRAME)) {
4683
4684 goto loopback_test_done;
4685 }
4686
4687 if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
4688 goto loopback_test_done;
4689 }
4690
4691 for (i = 14; i < pkt_size; i++) {
4692 if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
4693 goto loopback_test_done;
4694 }
4695 }
4696
4697 ret = 0;
4698
4699loopback_test_done:
4700 bp->loopback = 0;
4701 return ret;
4702}
4703
bc5a0690
MC
4704#define BNX2_MAC_LOOPBACK_FAILED 1
4705#define BNX2_PHY_LOOPBACK_FAILED 2
4706#define BNX2_LOOPBACK_FAILED (BNX2_MAC_LOOPBACK_FAILED | \
4707 BNX2_PHY_LOOPBACK_FAILED)
4708
4709static int
4710bnx2_test_loopback(struct bnx2 *bp)
4711{
4712 int rc = 0;
4713
4714 if (!netif_running(bp->dev))
4715 return BNX2_LOOPBACK_FAILED;
4716
4717 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
4718 spin_lock_bh(&bp->phy_lock);
4719 bnx2_init_phy(bp);
4720 spin_unlock_bh(&bp->phy_lock);
4721 if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
4722 rc |= BNX2_MAC_LOOPBACK_FAILED;
4723 if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
4724 rc |= BNX2_PHY_LOOPBACK_FAILED;
4725 return rc;
4726}
4727
b6016b76
MC
4728#define NVRAM_SIZE 0x200
4729#define CRC32_RESIDUAL 0xdebb20e3
4730
4731static int
4732bnx2_test_nvram(struct bnx2 *bp)
4733{
4734 u32 buf[NVRAM_SIZE / 4];
4735 u8 *data = (u8 *) buf;
4736 int rc = 0;
4737 u32 magic, csum;
4738
4739 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
4740 goto test_nvram_done;
4741
4742 magic = be32_to_cpu(buf[0]);
4743 if (magic != 0x669955aa) {
4744 rc = -ENODEV;
4745 goto test_nvram_done;
4746 }
4747
4748 if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
4749 goto test_nvram_done;
4750
4751 csum = ether_crc_le(0x100, data);
4752 if (csum != CRC32_RESIDUAL) {
4753 rc = -ENODEV;
4754 goto test_nvram_done;
4755 }
4756
4757 csum = ether_crc_le(0x100, data + 0x100);
4758 if (csum != CRC32_RESIDUAL) {
4759 rc = -ENODEV;
4760 }
4761
4762test_nvram_done:
4763 return rc;
4764}
4765
4766static int
4767bnx2_test_link(struct bnx2 *bp)
4768{
4769 u32 bmsr;
4770
489310a4
MC
4771 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG) {
4772 if (bp->link_up)
4773 return 0;
4774 return -ENODEV;
4775 }
c770a65c 4776 spin_lock_bh(&bp->phy_lock);
27a005b8
MC
4777 bnx2_enable_bmsr1(bp);
4778 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
4779 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
4780 bnx2_disable_bmsr1(bp);
c770a65c 4781 spin_unlock_bh(&bp->phy_lock);
6aa20a22 4782
b6016b76
MC
4783 if (bmsr & BMSR_LSTATUS) {
4784 return 0;
4785 }
4786 return -ENODEV;
4787}
4788
4789static int
4790bnx2_test_intr(struct bnx2 *bp)
4791{
4792 int i;
b6016b76
MC
4793 u16 status_idx;
4794
4795 if (!netif_running(bp->dev))
4796 return -ENODEV;
4797
4798 status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
4799
4800 /* This register is not touched during run-time. */
bf5295bb 4801 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
b6016b76
MC
4802 REG_RD(bp, BNX2_HC_COMMAND);
4803
4804 for (i = 0; i < 10; i++) {
4805 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
4806 status_idx) {
4807
4808 break;
4809 }
4810
4811 msleep_interruptible(10);
4812 }
4813 if (i < 10)
4814 return 0;
4815
4816 return -ENODEV;
4817}
4818
4819static void
48b01e2d 4820bnx2_5706_serdes_timer(struct bnx2 *bp)
b6016b76 4821{
48b01e2d
MC
4822 spin_lock(&bp->phy_lock);
4823 if (bp->serdes_an_pending)
4824 bp->serdes_an_pending--;
4825 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
4826 u32 bmcr;
b6016b76 4827
48b01e2d 4828 bp->current_interval = bp->timer_interval;
cd339a0e 4829
ca58c3af 4830 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
b6016b76 4831
48b01e2d
MC
4832 if (bmcr & BMCR_ANENABLE) {
4833 u32 phy1, phy2;
b6016b76 4834
48b01e2d
MC
4835 bnx2_write_phy(bp, 0x1c, 0x7c00);
4836 bnx2_read_phy(bp, 0x1c, &phy1);
cea94db9 4837
48b01e2d
MC
4838 bnx2_write_phy(bp, 0x17, 0x0f01);
4839 bnx2_read_phy(bp, 0x15, &phy2);
4840 bnx2_write_phy(bp, 0x17, 0x0f01);
4841 bnx2_read_phy(bp, 0x15, &phy2);
b6016b76 4842
48b01e2d
MC
4843 if ((phy1 & 0x10) && /* SIGNAL DETECT */
4844 !(phy2 & 0x20)) { /* no CONFIG */
4845
4846 bmcr &= ~BMCR_ANENABLE;
4847 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
ca58c3af 4848 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
48b01e2d
MC
4849 bp->phy_flags |= PHY_PARALLEL_DETECT_FLAG;
4850 }
b6016b76 4851 }
48b01e2d
MC
4852 }
4853 else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
4854 (bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)) {
4855 u32 phy2;
b6016b76 4856
48b01e2d
MC
4857 bnx2_write_phy(bp, 0x17, 0x0f01);
4858 bnx2_read_phy(bp, 0x15, &phy2);
4859 if (phy2 & 0x20) {
4860 u32 bmcr;
cd339a0e 4861
ca58c3af 4862 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
48b01e2d 4863 bmcr |= BMCR_ANENABLE;
ca58c3af 4864 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
b6016b76 4865
48b01e2d
MC
4866 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
4867 }
4868 } else
4869 bp->current_interval = bp->timer_interval;
b6016b76 4870
48b01e2d
MC
4871 spin_unlock(&bp->phy_lock);
4872}
b6016b76 4873
f8dd064e
MC
4874static void
4875bnx2_5708_serdes_timer(struct bnx2 *bp)
4876{
0d8a6571
MC
4877 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
4878 return;
4879
f8dd064e
MC
4880 if ((bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) == 0) {
4881 bp->serdes_an_pending = 0;
4882 return;
4883 }
b6016b76 4884
f8dd064e
MC
4885 spin_lock(&bp->phy_lock);
4886 if (bp->serdes_an_pending)
4887 bp->serdes_an_pending--;
4888 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
4889 u32 bmcr;
b6016b76 4890
ca58c3af 4891 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
f8dd064e 4892 if (bmcr & BMCR_ANENABLE) {
605a9e20 4893 bnx2_enable_forced_2g5(bp);
f8dd064e
MC
4894 bp->current_interval = SERDES_FORCED_TIMEOUT;
4895 } else {
605a9e20 4896 bnx2_disable_forced_2g5(bp);
f8dd064e
MC
4897 bp->serdes_an_pending = 2;
4898 bp->current_interval = bp->timer_interval;
b6016b76 4899 }
b6016b76 4900
f8dd064e
MC
4901 } else
4902 bp->current_interval = bp->timer_interval;
b6016b76 4903
f8dd064e
MC
4904 spin_unlock(&bp->phy_lock);
4905}
4906
48b01e2d
MC
4907static void
4908bnx2_timer(unsigned long data)
4909{
4910 struct bnx2 *bp = (struct bnx2 *) data;
b6016b76 4911
48b01e2d
MC
4912 if (!netif_running(bp->dev))
4913 return;
b6016b76 4914
48b01e2d
MC
4915 if (atomic_read(&bp->intr_sem) != 0)
4916 goto bnx2_restart_timer;
b6016b76 4917
df149d70 4918 bnx2_send_heart_beat(bp);
b6016b76 4919
48b01e2d 4920 bp->stats_blk->stat_FwRxDrop = REG_RD_IND(bp, BNX2_FW_RX_DROP_COUNT);
b6016b76 4921
02537b06
MC
4922 /* workaround occasional corrupted counters */
4923 if (CHIP_NUM(bp) == CHIP_NUM_5708 && bp->stats_ticks)
4924 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd |
4925 BNX2_HC_COMMAND_STATS_NOW);
4926
f8dd064e
MC
4927 if (bp->phy_flags & PHY_SERDES_FLAG) {
4928 if (CHIP_NUM(bp) == CHIP_NUM_5706)
4929 bnx2_5706_serdes_timer(bp);
27a005b8 4930 else
f8dd064e 4931 bnx2_5708_serdes_timer(bp);
b6016b76
MC
4932 }
4933
4934bnx2_restart_timer:
cd339a0e 4935 mod_timer(&bp->timer, jiffies + bp->current_interval);
b6016b76
MC
4936}
4937
8e6a72c4
MC
4938static int
4939bnx2_request_irq(struct bnx2 *bp)
4940{
4941 struct net_device *dev = bp->dev;
4942 int rc = 0;
4943
4944 if (bp->flags & USING_MSI_FLAG) {
4945 irq_handler_t fn = bnx2_msi;
4946
4947 if (bp->flags & ONE_SHOT_MSI_FLAG)
4948 fn = bnx2_msi_1shot;
4949
4950 rc = request_irq(bp->pdev->irq, fn, 0, dev->name, dev);
4951 } else
4952 rc = request_irq(bp->pdev->irq, bnx2_interrupt,
4953 IRQF_SHARED, dev->name, dev);
4954 return rc;
4955}
4956
4957static void
4958bnx2_free_irq(struct bnx2 *bp)
4959{
4960 struct net_device *dev = bp->dev;
4961
4962 if (bp->flags & USING_MSI_FLAG) {
4963 free_irq(bp->pdev->irq, dev);
4964 pci_disable_msi(bp->pdev);
4965 bp->flags &= ~(USING_MSI_FLAG | ONE_SHOT_MSI_FLAG);
4966 } else
4967 free_irq(bp->pdev->irq, dev);
4968}
4969
b6016b76
MC
4970/* Called with rtnl_lock */
4971static int
4972bnx2_open(struct net_device *dev)
4973{
972ec0d4 4974 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
4975 int rc;
4976
1b2f922f
MC
4977 netif_carrier_off(dev);
4978
829ca9a3 4979 bnx2_set_power_state(bp, PCI_D0);
b6016b76
MC
4980 bnx2_disable_int(bp);
4981
4982 rc = bnx2_alloc_mem(bp);
4983 if (rc)
4984 return rc;
4985
bea3348e
SH
4986 napi_enable(&bp->napi);
4987
8e6a72c4 4988 if ((bp->flags & MSI_CAP_FLAG) && !disable_msi) {
b6016b76
MC
4989 if (pci_enable_msi(bp->pdev) == 0) {
4990 bp->flags |= USING_MSI_FLAG;
8e6a72c4
MC
4991 if (CHIP_NUM(bp) == CHIP_NUM_5709)
4992 bp->flags |= ONE_SHOT_MSI_FLAG;
b6016b76 4993 }
b6016b76 4994 }
8e6a72c4
MC
4995 rc = bnx2_request_irq(bp);
4996
b6016b76 4997 if (rc) {
bea3348e 4998 napi_disable(&bp->napi);
b6016b76
MC
4999 bnx2_free_mem(bp);
5000 return rc;
5001 }
5002
5003 rc = bnx2_init_nic(bp);
5004
5005 if (rc) {
bea3348e 5006 napi_disable(&bp->napi);
8e6a72c4 5007 bnx2_free_irq(bp);
b6016b76
MC
5008 bnx2_free_skbs(bp);
5009 bnx2_free_mem(bp);
5010 return rc;
5011 }
6aa20a22 5012
cd339a0e 5013 mod_timer(&bp->timer, jiffies + bp->current_interval);
b6016b76
MC
5014
5015 atomic_set(&bp->intr_sem, 0);
5016
5017 bnx2_enable_int(bp);
5018
5019 if (bp->flags & USING_MSI_FLAG) {
5020 /* Test MSI to make sure it is working
5021 * If MSI test fails, go back to INTx mode
5022 */
5023 if (bnx2_test_intr(bp) != 0) {
5024 printk(KERN_WARNING PFX "%s: No interrupt was generated"
5025 " using MSI, switching to INTx mode. Please"
5026 " report this failure to the PCI maintainer"
5027 " and include system chipset information.\n",
5028 bp->dev->name);
5029
5030 bnx2_disable_int(bp);
8e6a72c4 5031 bnx2_free_irq(bp);
b6016b76
MC
5032
5033 rc = bnx2_init_nic(bp);
5034
8e6a72c4
MC
5035 if (!rc)
5036 rc = bnx2_request_irq(bp);
5037
b6016b76 5038 if (rc) {
bea3348e 5039 napi_disable(&bp->napi);
b6016b76
MC
5040 bnx2_free_skbs(bp);
5041 bnx2_free_mem(bp);
5042 del_timer_sync(&bp->timer);
5043 return rc;
5044 }
5045 bnx2_enable_int(bp);
5046 }
5047 }
5048 if (bp->flags & USING_MSI_FLAG) {
5049 printk(KERN_INFO PFX "%s: using MSI\n", dev->name);
5050 }
5051
5052 netif_start_queue(dev);
5053
5054 return 0;
5055}
5056
5057static void
c4028958 5058bnx2_reset_task(struct work_struct *work)
b6016b76 5059{
c4028958 5060 struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
b6016b76 5061
afdc08b9
MC
5062 if (!netif_running(bp->dev))
5063 return;
5064
5065 bp->in_reset_task = 1;
b6016b76
MC
5066 bnx2_netif_stop(bp);
5067
5068 bnx2_init_nic(bp);
5069
5070 atomic_set(&bp->intr_sem, 1);
5071 bnx2_netif_start(bp);
afdc08b9 5072 bp->in_reset_task = 0;
b6016b76
MC
5073}
5074
5075static void
5076bnx2_tx_timeout(struct net_device *dev)
5077{
972ec0d4 5078 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5079
5080 /* This allows the netif to be shutdown gracefully before resetting */
5081 schedule_work(&bp->reset_task);
5082}
5083
5084#ifdef BCM_VLAN
5085/* Called with rtnl_lock */
5086static void
5087bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
5088{
972ec0d4 5089 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5090
5091 bnx2_netif_stop(bp);
5092
5093 bp->vlgrp = vlgrp;
5094 bnx2_set_rx_mode(dev);
5095
5096 bnx2_netif_start(bp);
5097}
b6016b76
MC
5098#endif
5099
932ff279 5100/* Called with netif_tx_lock.
2f8af120
MC
5101 * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
5102 * netif_wake_queue().
b6016b76
MC
5103 */
5104static int
5105bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
5106{
972ec0d4 5107 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5108 dma_addr_t mapping;
5109 struct tx_bd *txbd;
5110 struct sw_bd *tx_buf;
5111 u32 len, vlan_tag_flags, last_frag, mss;
5112 u16 prod, ring_prod;
5113 int i;
5114
e89bbf10 5115 if (unlikely(bnx2_tx_avail(bp) < (skb_shinfo(skb)->nr_frags + 1))) {
b6016b76
MC
5116 netif_stop_queue(dev);
5117 printk(KERN_ERR PFX "%s: BUG! Tx ring full when queue awake!\n",
5118 dev->name);
5119
5120 return NETDEV_TX_BUSY;
5121 }
5122 len = skb_headlen(skb);
5123 prod = bp->tx_prod;
5124 ring_prod = TX_RING_IDX(prod);
5125
5126 vlan_tag_flags = 0;
84fa7933 5127 if (skb->ip_summed == CHECKSUM_PARTIAL) {
b6016b76
MC
5128 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
5129 }
5130
5131 if (bp->vlgrp != 0 && vlan_tx_tag_present(skb)) {
5132 vlan_tag_flags |=
5133 (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
5134 }
fde82055 5135 if ((mss = skb_shinfo(skb)->gso_size)) {
b6016b76 5136 u32 tcp_opt_len, ip_tcp_len;
eddc9ec5 5137 struct iphdr *iph;
b6016b76 5138
b6016b76
MC
5139 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
5140
4666f87a
MC
5141 tcp_opt_len = tcp_optlen(skb);
5142
5143 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
5144 u32 tcp_off = skb_transport_offset(skb) -
5145 sizeof(struct ipv6hdr) - ETH_HLEN;
ab6a5bb6 5146
4666f87a
MC
5147 vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) |
5148 TX_BD_FLAGS_SW_FLAGS;
5149 if (likely(tcp_off == 0))
5150 vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK;
5151 else {
5152 tcp_off >>= 3;
5153 vlan_tag_flags |= ((tcp_off & 0x3) <<
5154 TX_BD_FLAGS_TCP6_OFF0_SHL) |
5155 ((tcp_off & 0x10) <<
5156 TX_BD_FLAGS_TCP6_OFF4_SHL);
5157 mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL;
5158 }
5159 } else {
5160 if (skb_header_cloned(skb) &&
5161 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
5162 dev_kfree_skb(skb);
5163 return NETDEV_TX_OK;
5164 }
b6016b76 5165
4666f87a
MC
5166 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
5167
5168 iph = ip_hdr(skb);
5169 iph->check = 0;
5170 iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
5171 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
5172 iph->daddr, 0,
5173 IPPROTO_TCP,
5174 0);
5175 if (tcp_opt_len || (iph->ihl > 5)) {
5176 vlan_tag_flags |= ((iph->ihl - 5) +
5177 (tcp_opt_len >> 2)) << 8;
5178 }
b6016b76 5179 }
4666f87a 5180 } else
b6016b76 5181 mss = 0;
b6016b76
MC
5182
5183 mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
6aa20a22 5184
b6016b76
MC
5185 tx_buf = &bp->tx_buf_ring[ring_prod];
5186 tx_buf->skb = skb;
5187 pci_unmap_addr_set(tx_buf, mapping, mapping);
5188
5189 txbd = &bp->tx_desc_ring[ring_prod];
5190
5191 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
5192 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
5193 txbd->tx_bd_mss_nbytes = len | (mss << 16);
5194 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
5195
5196 last_frag = skb_shinfo(skb)->nr_frags;
5197
5198 for (i = 0; i < last_frag; i++) {
5199 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5200
5201 prod = NEXT_TX_BD(prod);
5202 ring_prod = TX_RING_IDX(prod);
5203 txbd = &bp->tx_desc_ring[ring_prod];
5204
5205 len = frag->size;
5206 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
5207 len, PCI_DMA_TODEVICE);
5208 pci_unmap_addr_set(&bp->tx_buf_ring[ring_prod],
5209 mapping, mapping);
5210
5211 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
5212 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
5213 txbd->tx_bd_mss_nbytes = len | (mss << 16);
5214 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
5215
5216 }
5217 txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
5218
5219 prod = NEXT_TX_BD(prod);
5220 bp->tx_prod_bseq += skb->len;
5221
234754d5
MC
5222 REG_WR16(bp, bp->tx_bidx_addr, prod);
5223 REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
b6016b76
MC
5224
5225 mmiowb();
5226
5227 bp->tx_prod = prod;
5228 dev->trans_start = jiffies;
5229
e89bbf10 5230 if (unlikely(bnx2_tx_avail(bp) <= MAX_SKB_FRAGS)) {
e89bbf10 5231 netif_stop_queue(dev);
2f8af120 5232 if (bnx2_tx_avail(bp) > bp->tx_wake_thresh)
e89bbf10 5233 netif_wake_queue(dev);
b6016b76
MC
5234 }
5235
5236 return NETDEV_TX_OK;
5237}
5238
5239/* Called with rtnl_lock */
5240static int
5241bnx2_close(struct net_device *dev)
5242{
972ec0d4 5243 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5244 u32 reset_code;
5245
afdc08b9
MC
5246 /* Calling flush_scheduled_work() may deadlock because
5247 * linkwatch_event() may be on the workqueue and it will try to get
5248 * the rtnl_lock which we are holding.
5249 */
5250 while (bp->in_reset_task)
5251 msleep(1);
5252
bea3348e
SH
5253 bnx2_disable_int_sync(bp);
5254 napi_disable(&bp->napi);
b6016b76 5255 del_timer_sync(&bp->timer);
dda1e390 5256 if (bp->flags & NO_WOL_FLAG)
6c4f095e 5257 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
dda1e390 5258 else if (bp->wol)
b6016b76
MC
5259 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
5260 else
5261 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
5262 bnx2_reset_chip(bp, reset_code);
8e6a72c4 5263 bnx2_free_irq(bp);
b6016b76
MC
5264 bnx2_free_skbs(bp);
5265 bnx2_free_mem(bp);
5266 bp->link_up = 0;
5267 netif_carrier_off(bp->dev);
829ca9a3 5268 bnx2_set_power_state(bp, PCI_D3hot);
b6016b76
MC
5269 return 0;
5270}
5271
5272#define GET_NET_STATS64(ctr) \
5273 (unsigned long) ((unsigned long) (ctr##_hi) << 32) + \
5274 (unsigned long) (ctr##_lo)
5275
5276#define GET_NET_STATS32(ctr) \
5277 (ctr##_lo)
5278
5279#if (BITS_PER_LONG == 64)
5280#define GET_NET_STATS GET_NET_STATS64
5281#else
5282#define GET_NET_STATS GET_NET_STATS32
5283#endif
5284
5285static struct net_device_stats *
5286bnx2_get_stats(struct net_device *dev)
5287{
972ec0d4 5288 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5289 struct statistics_block *stats_blk = bp->stats_blk;
5290 struct net_device_stats *net_stats = &bp->net_stats;
5291
5292 if (bp->stats_blk == NULL) {
5293 return net_stats;
5294 }
5295 net_stats->rx_packets =
5296 GET_NET_STATS(stats_blk->stat_IfHCInUcastPkts) +
5297 GET_NET_STATS(stats_blk->stat_IfHCInMulticastPkts) +
5298 GET_NET_STATS(stats_blk->stat_IfHCInBroadcastPkts);
5299
5300 net_stats->tx_packets =
5301 GET_NET_STATS(stats_blk->stat_IfHCOutUcastPkts) +
5302 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts) +
5303 GET_NET_STATS(stats_blk->stat_IfHCOutBroadcastPkts);
5304
5305 net_stats->rx_bytes =
5306 GET_NET_STATS(stats_blk->stat_IfHCInOctets);
5307
5308 net_stats->tx_bytes =
5309 GET_NET_STATS(stats_blk->stat_IfHCOutOctets);
5310
6aa20a22 5311 net_stats->multicast =
b6016b76
MC
5312 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts);
5313
6aa20a22 5314 net_stats->collisions =
b6016b76
MC
5315 (unsigned long) stats_blk->stat_EtherStatsCollisions;
5316
6aa20a22 5317 net_stats->rx_length_errors =
b6016b76
MC
5318 (unsigned long) (stats_blk->stat_EtherStatsUndersizePkts +
5319 stats_blk->stat_EtherStatsOverrsizePkts);
5320
6aa20a22 5321 net_stats->rx_over_errors =
b6016b76
MC
5322 (unsigned long) stats_blk->stat_IfInMBUFDiscards;
5323
6aa20a22 5324 net_stats->rx_frame_errors =
b6016b76
MC
5325 (unsigned long) stats_blk->stat_Dot3StatsAlignmentErrors;
5326
6aa20a22 5327 net_stats->rx_crc_errors =
b6016b76
MC
5328 (unsigned long) stats_blk->stat_Dot3StatsFCSErrors;
5329
5330 net_stats->rx_errors = net_stats->rx_length_errors +
5331 net_stats->rx_over_errors + net_stats->rx_frame_errors +
5332 net_stats->rx_crc_errors;
5333
5334 net_stats->tx_aborted_errors =
5335 (unsigned long) (stats_blk->stat_Dot3StatsExcessiveCollisions +
5336 stats_blk->stat_Dot3StatsLateCollisions);
5337
5b0c76ad
MC
5338 if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
5339 (CHIP_ID(bp) == CHIP_ID_5708_A0))
b6016b76
MC
5340 net_stats->tx_carrier_errors = 0;
5341 else {
5342 net_stats->tx_carrier_errors =
5343 (unsigned long)
5344 stats_blk->stat_Dot3StatsCarrierSenseErrors;
5345 }
5346
5347 net_stats->tx_errors =
6aa20a22 5348 (unsigned long)
b6016b76
MC
5349 stats_blk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors
5350 +
5351 net_stats->tx_aborted_errors +
5352 net_stats->tx_carrier_errors;
5353
cea94db9
MC
5354 net_stats->rx_missed_errors =
5355 (unsigned long) (stats_blk->stat_IfInMBUFDiscards +
5356 stats_blk->stat_FwRxDrop);
5357
b6016b76
MC
5358 return net_stats;
5359}
5360
5361/* All ethtool functions called with rtnl_lock */
5362
5363static int
5364bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
5365{
972ec0d4 5366 struct bnx2 *bp = netdev_priv(dev);
7b6b8347 5367 int support_serdes = 0, support_copper = 0;
b6016b76
MC
5368
5369 cmd->supported = SUPPORTED_Autoneg;
7b6b8347
MC
5370 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG) {
5371 support_serdes = 1;
5372 support_copper = 1;
5373 } else if (bp->phy_port == PORT_FIBRE)
5374 support_serdes = 1;
5375 else
5376 support_copper = 1;
5377
5378 if (support_serdes) {
b6016b76
MC
5379 cmd->supported |= SUPPORTED_1000baseT_Full |
5380 SUPPORTED_FIBRE;
605a9e20
MC
5381 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG)
5382 cmd->supported |= SUPPORTED_2500baseX_Full;
b6016b76 5383
b6016b76 5384 }
7b6b8347 5385 if (support_copper) {
b6016b76
MC
5386 cmd->supported |= SUPPORTED_10baseT_Half |
5387 SUPPORTED_10baseT_Full |
5388 SUPPORTED_100baseT_Half |
5389 SUPPORTED_100baseT_Full |
5390 SUPPORTED_1000baseT_Full |
5391 SUPPORTED_TP;
5392
b6016b76
MC
5393 }
5394
7b6b8347
MC
5395 spin_lock_bh(&bp->phy_lock);
5396 cmd->port = bp->phy_port;
b6016b76
MC
5397 cmd->advertising = bp->advertising;
5398
5399 if (bp->autoneg & AUTONEG_SPEED) {
5400 cmd->autoneg = AUTONEG_ENABLE;
5401 }
5402 else {
5403 cmd->autoneg = AUTONEG_DISABLE;
5404 }
5405
5406 if (netif_carrier_ok(dev)) {
5407 cmd->speed = bp->line_speed;
5408 cmd->duplex = bp->duplex;
5409 }
5410 else {
5411 cmd->speed = -1;
5412 cmd->duplex = -1;
5413 }
7b6b8347 5414 spin_unlock_bh(&bp->phy_lock);
b6016b76
MC
5415
5416 cmd->transceiver = XCVR_INTERNAL;
5417 cmd->phy_address = bp->phy_addr;
5418
5419 return 0;
5420}
6aa20a22 5421
b6016b76
MC
5422static int
5423bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
5424{
972ec0d4 5425 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5426 u8 autoneg = bp->autoneg;
5427 u8 req_duplex = bp->req_duplex;
5428 u16 req_line_speed = bp->req_line_speed;
5429 u32 advertising = bp->advertising;
7b6b8347
MC
5430 int err = -EINVAL;
5431
5432 spin_lock_bh(&bp->phy_lock);
5433
5434 if (cmd->port != PORT_TP && cmd->port != PORT_FIBRE)
5435 goto err_out_unlock;
5436
5437 if (cmd->port != bp->phy_port && !(bp->phy_flags & REMOTE_PHY_CAP_FLAG))
5438 goto err_out_unlock;
b6016b76
MC
5439
5440 if (cmd->autoneg == AUTONEG_ENABLE) {
5441 autoneg |= AUTONEG_SPEED;
5442
6aa20a22 5443 cmd->advertising &= ETHTOOL_ALL_COPPER_SPEED;
b6016b76
MC
5444
5445 /* allow advertising 1 speed */
5446 if ((cmd->advertising == ADVERTISED_10baseT_Half) ||
5447 (cmd->advertising == ADVERTISED_10baseT_Full) ||
5448 (cmd->advertising == ADVERTISED_100baseT_Half) ||
5449 (cmd->advertising == ADVERTISED_100baseT_Full)) {
5450
7b6b8347
MC
5451 if (cmd->port == PORT_FIBRE)
5452 goto err_out_unlock;
b6016b76
MC
5453
5454 advertising = cmd->advertising;
5455
27a005b8 5456 } else if (cmd->advertising == ADVERTISED_2500baseX_Full) {
7b6b8347
MC
5457 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) ||
5458 (cmd->port == PORT_TP))
5459 goto err_out_unlock;
5460 } else if (cmd->advertising == ADVERTISED_1000baseT_Full)
b6016b76 5461 advertising = cmd->advertising;
7b6b8347
MC
5462 else if (cmd->advertising == ADVERTISED_1000baseT_Half)
5463 goto err_out_unlock;
b6016b76 5464 else {
7b6b8347 5465 if (cmd->port == PORT_FIBRE)
b6016b76 5466 advertising = ETHTOOL_ALL_FIBRE_SPEED;
7b6b8347 5467 else
b6016b76 5468 advertising = ETHTOOL_ALL_COPPER_SPEED;
b6016b76
MC
5469 }
5470 advertising |= ADVERTISED_Autoneg;
5471 }
5472 else {
7b6b8347 5473 if (cmd->port == PORT_FIBRE) {
80be4434
MC
5474 if ((cmd->speed != SPEED_1000 &&
5475 cmd->speed != SPEED_2500) ||
5476 (cmd->duplex != DUPLEX_FULL))
7b6b8347 5477 goto err_out_unlock;
80be4434
MC
5478
5479 if (cmd->speed == SPEED_2500 &&
5480 !(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
7b6b8347 5481 goto err_out_unlock;
b6016b76 5482 }
7b6b8347
MC
5483 else if (cmd->speed == SPEED_1000 || cmd->speed == SPEED_2500)
5484 goto err_out_unlock;
5485
b6016b76
MC
5486 autoneg &= ~AUTONEG_SPEED;
5487 req_line_speed = cmd->speed;
5488 req_duplex = cmd->duplex;
5489 advertising = 0;
5490 }
5491
5492 bp->autoneg = autoneg;
5493 bp->advertising = advertising;
5494 bp->req_line_speed = req_line_speed;
5495 bp->req_duplex = req_duplex;
5496
7b6b8347 5497 err = bnx2_setup_phy(bp, cmd->port);
b6016b76 5498
7b6b8347 5499err_out_unlock:
c770a65c 5500 spin_unlock_bh(&bp->phy_lock);
b6016b76 5501
7b6b8347 5502 return err;
b6016b76
MC
5503}
5504
5505static void
5506bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
5507{
972ec0d4 5508 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5509
5510 strcpy(info->driver, DRV_MODULE_NAME);
5511 strcpy(info->version, DRV_MODULE_VERSION);
5512 strcpy(info->bus_info, pci_name(bp->pdev));
58fc2ea4 5513 strcpy(info->fw_version, bp->fw_version);
b6016b76
MC
5514}
5515
244ac4f4
MC
5516#define BNX2_REGDUMP_LEN (32 * 1024)
5517
5518static int
5519bnx2_get_regs_len(struct net_device *dev)
5520{
5521 return BNX2_REGDUMP_LEN;
5522}
5523
5524static void
5525bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
5526{
5527 u32 *p = _p, i, offset;
5528 u8 *orig_p = _p;
5529 struct bnx2 *bp = netdev_priv(dev);
5530 u32 reg_boundaries[] = { 0x0000, 0x0098, 0x0400, 0x045c,
5531 0x0800, 0x0880, 0x0c00, 0x0c10,
5532 0x0c30, 0x0d08, 0x1000, 0x101c,
5533 0x1040, 0x1048, 0x1080, 0x10a4,
5534 0x1400, 0x1490, 0x1498, 0x14f0,
5535 0x1500, 0x155c, 0x1580, 0x15dc,
5536 0x1600, 0x1658, 0x1680, 0x16d8,
5537 0x1800, 0x1820, 0x1840, 0x1854,
5538 0x1880, 0x1894, 0x1900, 0x1984,
5539 0x1c00, 0x1c0c, 0x1c40, 0x1c54,
5540 0x1c80, 0x1c94, 0x1d00, 0x1d84,
5541 0x2000, 0x2030, 0x23c0, 0x2400,
5542 0x2800, 0x2820, 0x2830, 0x2850,
5543 0x2b40, 0x2c10, 0x2fc0, 0x3058,
5544 0x3c00, 0x3c94, 0x4000, 0x4010,
5545 0x4080, 0x4090, 0x43c0, 0x4458,
5546 0x4c00, 0x4c18, 0x4c40, 0x4c54,
5547 0x4fc0, 0x5010, 0x53c0, 0x5444,
5548 0x5c00, 0x5c18, 0x5c80, 0x5c90,
5549 0x5fc0, 0x6000, 0x6400, 0x6428,
5550 0x6800, 0x6848, 0x684c, 0x6860,
5551 0x6888, 0x6910, 0x8000 };
5552
5553 regs->version = 0;
5554
5555 memset(p, 0, BNX2_REGDUMP_LEN);
5556
5557 if (!netif_running(bp->dev))
5558 return;
5559
5560 i = 0;
5561 offset = reg_boundaries[0];
5562 p += offset;
5563 while (offset < BNX2_REGDUMP_LEN) {
5564 *p++ = REG_RD(bp, offset);
5565 offset += 4;
5566 if (offset == reg_boundaries[i + 1]) {
5567 offset = reg_boundaries[i + 2];
5568 p = (u32 *) (orig_p + offset);
5569 i += 2;
5570 }
5571 }
5572}
5573
b6016b76
MC
5574static void
5575bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
5576{
972ec0d4 5577 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5578
5579 if (bp->flags & NO_WOL_FLAG) {
5580 wol->supported = 0;
5581 wol->wolopts = 0;
5582 }
5583 else {
5584 wol->supported = WAKE_MAGIC;
5585 if (bp->wol)
5586 wol->wolopts = WAKE_MAGIC;
5587 else
5588 wol->wolopts = 0;
5589 }
5590 memset(&wol->sopass, 0, sizeof(wol->sopass));
5591}
5592
5593static int
5594bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
5595{
972ec0d4 5596 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5597
5598 if (wol->wolopts & ~WAKE_MAGIC)
5599 return -EINVAL;
5600
5601 if (wol->wolopts & WAKE_MAGIC) {
5602 if (bp->flags & NO_WOL_FLAG)
5603 return -EINVAL;
5604
5605 bp->wol = 1;
5606 }
5607 else {
5608 bp->wol = 0;
5609 }
5610 return 0;
5611}
5612
5613static int
5614bnx2_nway_reset(struct net_device *dev)
5615{
972ec0d4 5616 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5617 u32 bmcr;
5618
5619 if (!(bp->autoneg & AUTONEG_SPEED)) {
5620 return -EINVAL;
5621 }
5622
c770a65c 5623 spin_lock_bh(&bp->phy_lock);
b6016b76 5624
7b6b8347
MC
5625 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG) {
5626 int rc;
5627
5628 rc = bnx2_setup_remote_phy(bp, bp->phy_port);
5629 spin_unlock_bh(&bp->phy_lock);
5630 return rc;
5631 }
5632
b6016b76
MC
5633 /* Force a link down visible on the other side */
5634 if (bp->phy_flags & PHY_SERDES_FLAG) {
ca58c3af 5635 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
c770a65c 5636 spin_unlock_bh(&bp->phy_lock);
b6016b76
MC
5637
5638 msleep(20);
5639
c770a65c 5640 spin_lock_bh(&bp->phy_lock);
f8dd064e
MC
5641
5642 bp->current_interval = SERDES_AN_TIMEOUT;
5643 bp->serdes_an_pending = 1;
5644 mod_timer(&bp->timer, jiffies + bp->current_interval);
b6016b76
MC
5645 }
5646
ca58c3af 5647 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
b6016b76 5648 bmcr &= ~BMCR_LOOPBACK;
ca58c3af 5649 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
b6016b76 5650
c770a65c 5651 spin_unlock_bh(&bp->phy_lock);
b6016b76
MC
5652
5653 return 0;
5654}
5655
5656static int
5657bnx2_get_eeprom_len(struct net_device *dev)
5658{
972ec0d4 5659 struct bnx2 *bp = netdev_priv(dev);
b6016b76 5660
1122db71 5661 if (bp->flash_info == NULL)
b6016b76
MC
5662 return 0;
5663
1122db71 5664 return (int) bp->flash_size;
b6016b76
MC
5665}
5666
5667static int
5668bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
5669 u8 *eebuf)
5670{
972ec0d4 5671 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5672 int rc;
5673
1064e944 5674 /* parameters already validated in ethtool_get_eeprom */
b6016b76
MC
5675
5676 rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
5677
5678 return rc;
5679}
5680
5681static int
5682bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
5683 u8 *eebuf)
5684{
972ec0d4 5685 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5686 int rc;
5687
1064e944 5688 /* parameters already validated in ethtool_set_eeprom */
b6016b76
MC
5689
5690 rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
5691
5692 return rc;
5693}
5694
5695static int
5696bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
5697{
972ec0d4 5698 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5699
5700 memset(coal, 0, sizeof(struct ethtool_coalesce));
5701
5702 coal->rx_coalesce_usecs = bp->rx_ticks;
5703 coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
5704 coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
5705 coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
5706
5707 coal->tx_coalesce_usecs = bp->tx_ticks;
5708 coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
5709 coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
5710 coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
5711
5712 coal->stats_block_coalesce_usecs = bp->stats_ticks;
5713
5714 return 0;
5715}
5716
5717static int
5718bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
5719{
972ec0d4 5720 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5721
5722 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
5723 if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
5724
6aa20a22 5725 bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
b6016b76
MC
5726 if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
5727
5728 bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
5729 if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
5730
5731 bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
5732 if (bp->rx_quick_cons_trip_int > 0xff)
5733 bp->rx_quick_cons_trip_int = 0xff;
5734
5735 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
5736 if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
5737
5738 bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
5739 if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
5740
5741 bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
5742 if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
5743
5744 bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
5745 if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
5746 0xff;
5747
5748 bp->stats_ticks = coal->stats_block_coalesce_usecs;
02537b06
MC
5749 if (CHIP_NUM(bp) == CHIP_NUM_5708) {
5750 if (bp->stats_ticks != 0 && bp->stats_ticks != USEC_PER_SEC)
5751 bp->stats_ticks = USEC_PER_SEC;
5752 }
7ea6920e
MC
5753 if (bp->stats_ticks > BNX2_HC_STATS_TICKS_HC_STAT_TICKS)
5754 bp->stats_ticks = BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
5755 bp->stats_ticks &= BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
b6016b76
MC
5756
5757 if (netif_running(bp->dev)) {
5758 bnx2_netif_stop(bp);
5759 bnx2_init_nic(bp);
5760 bnx2_netif_start(bp);
5761 }
5762
5763 return 0;
5764}
5765
5766static void
5767bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
5768{
972ec0d4 5769 struct bnx2 *bp = netdev_priv(dev);
b6016b76 5770
13daffa2 5771 ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT;
b6016b76
MC
5772 ering->rx_mini_max_pending = 0;
5773 ering->rx_jumbo_max_pending = 0;
5774
5775 ering->rx_pending = bp->rx_ring_size;
5776 ering->rx_mini_pending = 0;
5777 ering->rx_jumbo_pending = 0;
5778
5779 ering->tx_max_pending = MAX_TX_DESC_CNT;
5780 ering->tx_pending = bp->tx_ring_size;
5781}
5782
5783static int
5784bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
5785{
972ec0d4 5786 struct bnx2 *bp = netdev_priv(dev);
b6016b76 5787
13daffa2 5788 if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
b6016b76
MC
5789 (ering->tx_pending > MAX_TX_DESC_CNT) ||
5790 (ering->tx_pending <= MAX_SKB_FRAGS)) {
5791
5792 return -EINVAL;
5793 }
13daffa2
MC
5794 if (netif_running(bp->dev)) {
5795 bnx2_netif_stop(bp);
5796 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
5797 bnx2_free_skbs(bp);
5798 bnx2_free_mem(bp);
5799 }
5800
5801 bnx2_set_rx_ring_size(bp, ering->rx_pending);
b6016b76
MC
5802 bp->tx_ring_size = ering->tx_pending;
5803
5804 if (netif_running(bp->dev)) {
13daffa2
MC
5805 int rc;
5806
5807 rc = bnx2_alloc_mem(bp);
5808 if (rc)
5809 return rc;
b6016b76
MC
5810 bnx2_init_nic(bp);
5811 bnx2_netif_start(bp);
5812 }
5813
5814 return 0;
5815}
5816
5817static void
5818bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
5819{
972ec0d4 5820 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5821
5822 epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
5823 epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
5824 epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
5825}
5826
5827static int
5828bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
5829{
972ec0d4 5830 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5831
5832 bp->req_flow_ctrl = 0;
5833 if (epause->rx_pause)
5834 bp->req_flow_ctrl |= FLOW_CTRL_RX;
5835 if (epause->tx_pause)
5836 bp->req_flow_ctrl |= FLOW_CTRL_TX;
5837
5838 if (epause->autoneg) {
5839 bp->autoneg |= AUTONEG_FLOW_CTRL;
5840 }
5841 else {
5842 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
5843 }
5844
c770a65c 5845 spin_lock_bh(&bp->phy_lock);
b6016b76 5846
0d8a6571 5847 bnx2_setup_phy(bp, bp->phy_port);
b6016b76 5848
c770a65c 5849 spin_unlock_bh(&bp->phy_lock);
b6016b76
MC
5850
5851 return 0;
5852}
5853
5854static u32
5855bnx2_get_rx_csum(struct net_device *dev)
5856{
972ec0d4 5857 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5858
5859 return bp->rx_csum;
5860}
5861
5862static int
5863bnx2_set_rx_csum(struct net_device *dev, u32 data)
5864{
972ec0d4 5865 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5866
5867 bp->rx_csum = data;
5868 return 0;
5869}
5870
b11d6213
MC
5871static int
5872bnx2_set_tso(struct net_device *dev, u32 data)
5873{
4666f87a
MC
5874 struct bnx2 *bp = netdev_priv(dev);
5875
5876 if (data) {
b11d6213 5877 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
4666f87a
MC
5878 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5879 dev->features |= NETIF_F_TSO6;
5880 } else
5881 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6 |
5882 NETIF_F_TSO_ECN);
b11d6213
MC
5883 return 0;
5884}
5885
cea94db9 5886#define BNX2_NUM_STATS 46
b6016b76 5887
14ab9b86 5888static struct {
b6016b76
MC
5889 char string[ETH_GSTRING_LEN];
5890} bnx2_stats_str_arr[BNX2_NUM_STATS] = {
5891 { "rx_bytes" },
5892 { "rx_error_bytes" },
5893 { "tx_bytes" },
5894 { "tx_error_bytes" },
5895 { "rx_ucast_packets" },
5896 { "rx_mcast_packets" },
5897 { "rx_bcast_packets" },
5898 { "tx_ucast_packets" },
5899 { "tx_mcast_packets" },
5900 { "tx_bcast_packets" },
5901 { "tx_mac_errors" },
5902 { "tx_carrier_errors" },
5903 { "rx_crc_errors" },
5904 { "rx_align_errors" },
5905 { "tx_single_collisions" },
5906 { "tx_multi_collisions" },
5907 { "tx_deferred" },
5908 { "tx_excess_collisions" },
5909 { "tx_late_collisions" },
5910 { "tx_total_collisions" },
5911 { "rx_fragments" },
5912 { "rx_jabbers" },
5913 { "rx_undersize_packets" },
5914 { "rx_oversize_packets" },
5915 { "rx_64_byte_packets" },
5916 { "rx_65_to_127_byte_packets" },
5917 { "rx_128_to_255_byte_packets" },
5918 { "rx_256_to_511_byte_packets" },
5919 { "rx_512_to_1023_byte_packets" },
5920 { "rx_1024_to_1522_byte_packets" },
5921 { "rx_1523_to_9022_byte_packets" },
5922 { "tx_64_byte_packets" },
5923 { "tx_65_to_127_byte_packets" },
5924 { "tx_128_to_255_byte_packets" },
5925 { "tx_256_to_511_byte_packets" },
5926 { "tx_512_to_1023_byte_packets" },
5927 { "tx_1024_to_1522_byte_packets" },
5928 { "tx_1523_to_9022_byte_packets" },
5929 { "rx_xon_frames" },
5930 { "rx_xoff_frames" },
5931 { "tx_xon_frames" },
5932 { "tx_xoff_frames" },
5933 { "rx_mac_ctrl_frames" },
5934 { "rx_filtered_packets" },
5935 { "rx_discards" },
cea94db9 5936 { "rx_fw_discards" },
b6016b76
MC
5937};
5938
5939#define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
5940
f71e1309 5941static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
b6016b76
MC
5942 STATS_OFFSET32(stat_IfHCInOctets_hi),
5943 STATS_OFFSET32(stat_IfHCInBadOctets_hi),
5944 STATS_OFFSET32(stat_IfHCOutOctets_hi),
5945 STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
5946 STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
5947 STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
5948 STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
5949 STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
5950 STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
5951 STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
5952 STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
6aa20a22
JG
5953 STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
5954 STATS_OFFSET32(stat_Dot3StatsFCSErrors),
5955 STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
5956 STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
5957 STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
5958 STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
5959 STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
5960 STATS_OFFSET32(stat_Dot3StatsLateCollisions),
5961 STATS_OFFSET32(stat_EtherStatsCollisions),
5962 STATS_OFFSET32(stat_EtherStatsFragments),
5963 STATS_OFFSET32(stat_EtherStatsJabbers),
5964 STATS_OFFSET32(stat_EtherStatsUndersizePkts),
5965 STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
5966 STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
5967 STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
5968 STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
5969 STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
5970 STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
5971 STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
5972 STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
5973 STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
5974 STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
5975 STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
5976 STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
5977 STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
5978 STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
5979 STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
5980 STATS_OFFSET32(stat_XonPauseFramesReceived),
5981 STATS_OFFSET32(stat_XoffPauseFramesReceived),
5982 STATS_OFFSET32(stat_OutXonSent),
5983 STATS_OFFSET32(stat_OutXoffSent),
5984 STATS_OFFSET32(stat_MacControlFramesReceived),
5985 STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
5986 STATS_OFFSET32(stat_IfInMBUFDiscards),
cea94db9 5987 STATS_OFFSET32(stat_FwRxDrop),
b6016b76
MC
5988};
5989
5990/* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
5991 * skipped because of errata.
6aa20a22 5992 */
14ab9b86 5993static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
b6016b76
MC
5994 8,0,8,8,8,8,8,8,8,8,
5995 4,0,4,4,4,4,4,4,4,4,
5996 4,4,4,4,4,4,4,4,4,4,
5997 4,4,4,4,4,4,4,4,4,4,
cea94db9 5998 4,4,4,4,4,4,
b6016b76
MC
5999};
6000
5b0c76ad
MC
6001static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
6002 8,0,8,8,8,8,8,8,8,8,
6003 4,4,4,4,4,4,4,4,4,4,
6004 4,4,4,4,4,4,4,4,4,4,
6005 4,4,4,4,4,4,4,4,4,4,
cea94db9 6006 4,4,4,4,4,4,
5b0c76ad
MC
6007};
6008
b6016b76
MC
6009#define BNX2_NUM_TESTS 6
6010
14ab9b86 6011static struct {
b6016b76
MC
6012 char string[ETH_GSTRING_LEN];
6013} bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
6014 { "register_test (offline)" },
6015 { "memory_test (offline)" },
6016 { "loopback_test (offline)" },
6017 { "nvram_test (online)" },
6018 { "interrupt_test (online)" },
6019 { "link_test (online)" },
6020};
6021
6022static int
b9f2c044 6023bnx2_get_sset_count(struct net_device *dev, int sset)
b6016b76 6024{
b9f2c044
JG
6025 switch (sset) {
6026 case ETH_SS_TEST:
6027 return BNX2_NUM_TESTS;
6028 case ETH_SS_STATS:
6029 return BNX2_NUM_STATS;
6030 default:
6031 return -EOPNOTSUPP;
6032 }
b6016b76
MC
6033}
6034
6035static void
6036bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
6037{
972ec0d4 6038 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6039
6040 memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
6041 if (etest->flags & ETH_TEST_FL_OFFLINE) {
80be4434
MC
6042 int i;
6043
b6016b76
MC
6044 bnx2_netif_stop(bp);
6045 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
6046 bnx2_free_skbs(bp);
6047
6048 if (bnx2_test_registers(bp) != 0) {
6049 buf[0] = 1;
6050 etest->flags |= ETH_TEST_FL_FAILED;
6051 }
6052 if (bnx2_test_memory(bp) != 0) {
6053 buf[1] = 1;
6054 etest->flags |= ETH_TEST_FL_FAILED;
6055 }
bc5a0690 6056 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
b6016b76 6057 etest->flags |= ETH_TEST_FL_FAILED;
b6016b76
MC
6058
6059 if (!netif_running(bp->dev)) {
6060 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
6061 }
6062 else {
6063 bnx2_init_nic(bp);
6064 bnx2_netif_start(bp);
6065 }
6066
6067 /* wait for link up */
80be4434
MC
6068 for (i = 0; i < 7; i++) {
6069 if (bp->link_up)
6070 break;
6071 msleep_interruptible(1000);
6072 }
b6016b76
MC
6073 }
6074
6075 if (bnx2_test_nvram(bp) != 0) {
6076 buf[3] = 1;
6077 etest->flags |= ETH_TEST_FL_FAILED;
6078 }
6079 if (bnx2_test_intr(bp) != 0) {
6080 buf[4] = 1;
6081 etest->flags |= ETH_TEST_FL_FAILED;
6082 }
6083
6084 if (bnx2_test_link(bp) != 0) {
6085 buf[5] = 1;
6086 etest->flags |= ETH_TEST_FL_FAILED;
6087
6088 }
6089}
6090
6091static void
6092bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
6093{
6094 switch (stringset) {
6095 case ETH_SS_STATS:
6096 memcpy(buf, bnx2_stats_str_arr,
6097 sizeof(bnx2_stats_str_arr));
6098 break;
6099 case ETH_SS_TEST:
6100 memcpy(buf, bnx2_tests_str_arr,
6101 sizeof(bnx2_tests_str_arr));
6102 break;
6103 }
6104}
6105
b6016b76
MC
6106static void
6107bnx2_get_ethtool_stats(struct net_device *dev,
6108 struct ethtool_stats *stats, u64 *buf)
6109{
972ec0d4 6110 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6111 int i;
6112 u32 *hw_stats = (u32 *) bp->stats_blk;
14ab9b86 6113 u8 *stats_len_arr = NULL;
b6016b76
MC
6114
6115 if (hw_stats == NULL) {
6116 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
6117 return;
6118 }
6119
5b0c76ad
MC
6120 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
6121 (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
6122 (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
6123 (CHIP_ID(bp) == CHIP_ID_5708_A0))
b6016b76 6124 stats_len_arr = bnx2_5706_stats_len_arr;
5b0c76ad
MC
6125 else
6126 stats_len_arr = bnx2_5708_stats_len_arr;
b6016b76
MC
6127
6128 for (i = 0; i < BNX2_NUM_STATS; i++) {
6129 if (stats_len_arr[i] == 0) {
6130 /* skip this counter */
6131 buf[i] = 0;
6132 continue;
6133 }
6134 if (stats_len_arr[i] == 4) {
6135 /* 4-byte counter */
6136 buf[i] = (u64)
6137 *(hw_stats + bnx2_stats_offset_arr[i]);
6138 continue;
6139 }
6140 /* 8-byte counter */
6141 buf[i] = (((u64) *(hw_stats +
6142 bnx2_stats_offset_arr[i])) << 32) +
6143 *(hw_stats + bnx2_stats_offset_arr[i] + 1);
6144 }
6145}
6146
6147static int
6148bnx2_phys_id(struct net_device *dev, u32 data)
6149{
972ec0d4 6150 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6151 int i;
6152 u32 save;
6153
6154 if (data == 0)
6155 data = 2;
6156
6157 save = REG_RD(bp, BNX2_MISC_CFG);
6158 REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
6159
6160 for (i = 0; i < (data * 2); i++) {
6161 if ((i % 2) == 0) {
6162 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
6163 }
6164 else {
6165 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
6166 BNX2_EMAC_LED_1000MB_OVERRIDE |
6167 BNX2_EMAC_LED_100MB_OVERRIDE |
6168 BNX2_EMAC_LED_10MB_OVERRIDE |
6169 BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
6170 BNX2_EMAC_LED_TRAFFIC);
6171 }
6172 msleep_interruptible(500);
6173 if (signal_pending(current))
6174 break;
6175 }
6176 REG_WR(bp, BNX2_EMAC_LED, 0);
6177 REG_WR(bp, BNX2_MISC_CFG, save);
6178 return 0;
6179}
6180
4666f87a
MC
6181static int
6182bnx2_set_tx_csum(struct net_device *dev, u32 data)
6183{
6184 struct bnx2 *bp = netdev_priv(dev);
6185
6186 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6460d948 6187 return (ethtool_op_set_tx_ipv6_csum(dev, data));
4666f87a
MC
6188 else
6189 return (ethtool_op_set_tx_csum(dev, data));
6190}
6191
7282d491 6192static const struct ethtool_ops bnx2_ethtool_ops = {
b6016b76
MC
6193 .get_settings = bnx2_get_settings,
6194 .set_settings = bnx2_set_settings,
6195 .get_drvinfo = bnx2_get_drvinfo,
244ac4f4
MC
6196 .get_regs_len = bnx2_get_regs_len,
6197 .get_regs = bnx2_get_regs,
b6016b76
MC
6198 .get_wol = bnx2_get_wol,
6199 .set_wol = bnx2_set_wol,
6200 .nway_reset = bnx2_nway_reset,
6201 .get_link = ethtool_op_get_link,
6202 .get_eeprom_len = bnx2_get_eeprom_len,
6203 .get_eeprom = bnx2_get_eeprom,
6204 .set_eeprom = bnx2_set_eeprom,
6205 .get_coalesce = bnx2_get_coalesce,
6206 .set_coalesce = bnx2_set_coalesce,
6207 .get_ringparam = bnx2_get_ringparam,
6208 .set_ringparam = bnx2_set_ringparam,
6209 .get_pauseparam = bnx2_get_pauseparam,
6210 .set_pauseparam = bnx2_set_pauseparam,
6211 .get_rx_csum = bnx2_get_rx_csum,
6212 .set_rx_csum = bnx2_set_rx_csum,
4666f87a 6213 .set_tx_csum = bnx2_set_tx_csum,
b6016b76 6214 .set_sg = ethtool_op_set_sg,
b11d6213 6215 .set_tso = bnx2_set_tso,
b6016b76
MC
6216 .self_test = bnx2_self_test,
6217 .get_strings = bnx2_get_strings,
6218 .phys_id = bnx2_phys_id,
b6016b76 6219 .get_ethtool_stats = bnx2_get_ethtool_stats,
b9f2c044 6220 .get_sset_count = bnx2_get_sset_count,
b6016b76
MC
6221};
6222
6223/* Called with rtnl_lock */
6224static int
6225bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
6226{
14ab9b86 6227 struct mii_ioctl_data *data = if_mii(ifr);
972ec0d4 6228 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6229 int err;
6230
6231 switch(cmd) {
6232 case SIOCGMIIPHY:
6233 data->phy_id = bp->phy_addr;
6234
6235 /* fallthru */
6236 case SIOCGMIIREG: {
6237 u32 mii_regval;
6238
7b6b8347
MC
6239 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
6240 return -EOPNOTSUPP;
6241
dad3e452
MC
6242 if (!netif_running(dev))
6243 return -EAGAIN;
6244
c770a65c 6245 spin_lock_bh(&bp->phy_lock);
b6016b76 6246 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
c770a65c 6247 spin_unlock_bh(&bp->phy_lock);
b6016b76
MC
6248
6249 data->val_out = mii_regval;
6250
6251 return err;
6252 }
6253
6254 case SIOCSMIIREG:
6255 if (!capable(CAP_NET_ADMIN))
6256 return -EPERM;
6257
7b6b8347
MC
6258 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
6259 return -EOPNOTSUPP;
6260
dad3e452
MC
6261 if (!netif_running(dev))
6262 return -EAGAIN;
6263
c770a65c 6264 spin_lock_bh(&bp->phy_lock);
b6016b76 6265 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
c770a65c 6266 spin_unlock_bh(&bp->phy_lock);
b6016b76
MC
6267
6268 return err;
6269
6270 default:
6271 /* do nothing */
6272 break;
6273 }
6274 return -EOPNOTSUPP;
6275}
6276
6277/* Called with rtnl_lock */
6278static int
6279bnx2_change_mac_addr(struct net_device *dev, void *p)
6280{
6281 struct sockaddr *addr = p;
972ec0d4 6282 struct bnx2 *bp = netdev_priv(dev);
b6016b76 6283
73eef4cd
MC
6284 if (!is_valid_ether_addr(addr->sa_data))
6285 return -EINVAL;
6286
b6016b76
MC
6287 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
6288 if (netif_running(dev))
6289 bnx2_set_mac_addr(bp);
6290
6291 return 0;
6292}
6293
6294/* Called with rtnl_lock */
6295static int
6296bnx2_change_mtu(struct net_device *dev, int new_mtu)
6297{
972ec0d4 6298 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6299
6300 if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
6301 ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
6302 return -EINVAL;
6303
6304 dev->mtu = new_mtu;
6305 if (netif_running(dev)) {
6306 bnx2_netif_stop(bp);
6307
6308 bnx2_init_nic(bp);
6309
6310 bnx2_netif_start(bp);
6311 }
6312 return 0;
6313}
6314
6315#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
6316static void
6317poll_bnx2(struct net_device *dev)
6318{
972ec0d4 6319 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6320
6321 disable_irq(bp->pdev->irq);
7d12e780 6322 bnx2_interrupt(bp->pdev->irq, dev);
b6016b76
MC
6323 enable_irq(bp->pdev->irq);
6324}
6325#endif
6326
253c8b75
MC
6327static void __devinit
6328bnx2_get_5709_media(struct bnx2 *bp)
6329{
6330 u32 val = REG_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
6331 u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
6332 u32 strap;
6333
6334 if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
6335 return;
6336 else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
6337 bp->phy_flags |= PHY_SERDES_FLAG;
6338 return;
6339 }
6340
6341 if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
6342 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
6343 else
6344 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
6345
6346 if (PCI_FUNC(bp->pdev->devfn) == 0) {
6347 switch (strap) {
6348 case 0x4:
6349 case 0x5:
6350 case 0x6:
6351 bp->phy_flags |= PHY_SERDES_FLAG;
6352 return;
6353 }
6354 } else {
6355 switch (strap) {
6356 case 0x1:
6357 case 0x2:
6358 case 0x4:
6359 bp->phy_flags |= PHY_SERDES_FLAG;
6360 return;
6361 }
6362 }
6363}
6364
883e5151
MC
6365static void __devinit
6366bnx2_get_pci_speed(struct bnx2 *bp)
6367{
6368 u32 reg;
6369
6370 reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
6371 if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
6372 u32 clkreg;
6373
6374 bp->flags |= PCIX_FLAG;
6375
6376 clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
6377
6378 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
6379 switch (clkreg) {
6380 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
6381 bp->bus_speed_mhz = 133;
6382 break;
6383
6384 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
6385 bp->bus_speed_mhz = 100;
6386 break;
6387
6388 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
6389 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
6390 bp->bus_speed_mhz = 66;
6391 break;
6392
6393 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
6394 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
6395 bp->bus_speed_mhz = 50;
6396 break;
6397
6398 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
6399 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
6400 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
6401 bp->bus_speed_mhz = 33;
6402 break;
6403 }
6404 }
6405 else {
6406 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
6407 bp->bus_speed_mhz = 66;
6408 else
6409 bp->bus_speed_mhz = 33;
6410 }
6411
6412 if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
6413 bp->flags |= PCI_32BIT_FLAG;
6414
6415}
6416
b6016b76
MC
6417static int __devinit
6418bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
6419{
6420 struct bnx2 *bp;
6421 unsigned long mem_len;
58fc2ea4 6422 int rc, i, j;
b6016b76 6423 u32 reg;
40453c83 6424 u64 dma_mask, persist_dma_mask;
b6016b76 6425
b6016b76 6426 SET_NETDEV_DEV(dev, &pdev->dev);
972ec0d4 6427 bp = netdev_priv(dev);
b6016b76
MC
6428
6429 bp->flags = 0;
6430 bp->phy_flags = 0;
6431
6432 /* enable device (incl. PCI PM wakeup), and bus-mastering */
6433 rc = pci_enable_device(pdev);
6434 if (rc) {
898eb71c 6435 dev_err(&pdev->dev, "Cannot enable PCI device, aborting.\n");
b6016b76
MC
6436 goto err_out;
6437 }
6438
6439 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
9b91cf9d 6440 dev_err(&pdev->dev,
2e8a538d 6441 "Cannot find PCI device base address, aborting.\n");
b6016b76
MC
6442 rc = -ENODEV;
6443 goto err_out_disable;
6444 }
6445
6446 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
6447 if (rc) {
9b91cf9d 6448 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting.\n");
b6016b76
MC
6449 goto err_out_disable;
6450 }
6451
6452 pci_set_master(pdev);
6453
6454 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
6455 if (bp->pm_cap == 0) {
9b91cf9d 6456 dev_err(&pdev->dev,
2e8a538d 6457 "Cannot find power management capability, aborting.\n");
b6016b76
MC
6458 rc = -EIO;
6459 goto err_out_release;
6460 }
6461
b6016b76
MC
6462 bp->dev = dev;
6463 bp->pdev = pdev;
6464
6465 spin_lock_init(&bp->phy_lock);
1b8227c4 6466 spin_lock_init(&bp->indirect_lock);
c4028958 6467 INIT_WORK(&bp->reset_task, bnx2_reset_task);
b6016b76
MC
6468
6469 dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
59b47d8a 6470 mem_len = MB_GET_CID_ADDR(TX_TSS_CID + 1);
b6016b76
MC
6471 dev->mem_end = dev->mem_start + mem_len;
6472 dev->irq = pdev->irq;
6473
6474 bp->regview = ioremap_nocache(dev->base_addr, mem_len);
6475
6476 if (!bp->regview) {
9b91cf9d 6477 dev_err(&pdev->dev, "Cannot map register space, aborting.\n");
b6016b76
MC
6478 rc = -ENOMEM;
6479 goto err_out_release;
6480 }
6481
6482 /* Configure byte swap and enable write to the reg_window registers.
6483 * Rely on CPU to do target byte swapping on big endian systems
6484 * The chip's target access swapping will not swap all accesses
6485 */
6486 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG,
6487 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
6488 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
6489
829ca9a3 6490 bnx2_set_power_state(bp, PCI_D0);
b6016b76
MC
6491
6492 bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
6493
883e5151
MC
6494 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
6495 if (pci_find_capability(pdev, PCI_CAP_ID_EXP) == 0) {
6496 dev_err(&pdev->dev,
6497 "Cannot find PCIE capability, aborting.\n");
6498 rc = -EIO;
6499 goto err_out_unmap;
6500 }
6501 bp->flags |= PCIE_FLAG;
6502 } else {
59b47d8a
MC
6503 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
6504 if (bp->pcix_cap == 0) {
6505 dev_err(&pdev->dev,
6506 "Cannot find PCIX capability, aborting.\n");
6507 rc = -EIO;
6508 goto err_out_unmap;
6509 }
6510 }
6511
8e6a72c4
MC
6512 if (CHIP_ID(bp) != CHIP_ID_5706_A0 && CHIP_ID(bp) != CHIP_ID_5706_A1) {
6513 if (pci_find_capability(pdev, PCI_CAP_ID_MSI))
6514 bp->flags |= MSI_CAP_FLAG;
6515 }
6516
40453c83
MC
6517 /* 5708 cannot support DMA addresses > 40-bit. */
6518 if (CHIP_NUM(bp) == CHIP_NUM_5708)
6519 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
6520 else
6521 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
6522
6523 /* Configure DMA attributes. */
6524 if (pci_set_dma_mask(pdev, dma_mask) == 0) {
6525 dev->features |= NETIF_F_HIGHDMA;
6526 rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask);
6527 if (rc) {
6528 dev_err(&pdev->dev,
6529 "pci_set_consistent_dma_mask failed, aborting.\n");
6530 goto err_out_unmap;
6531 }
6532 } else if ((rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) != 0) {
6533 dev_err(&pdev->dev, "System does not support DMA, aborting.\n");
6534 goto err_out_unmap;
6535 }
6536
883e5151
MC
6537 if (!(bp->flags & PCIE_FLAG))
6538 bnx2_get_pci_speed(bp);
b6016b76
MC
6539
6540 /* 5706A0 may falsely detect SERR and PERR. */
6541 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
6542 reg = REG_RD(bp, PCI_COMMAND);
6543 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
6544 REG_WR(bp, PCI_COMMAND, reg);
6545 }
6546 else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
6547 !(bp->flags & PCIX_FLAG)) {
6548
9b91cf9d 6549 dev_err(&pdev->dev,
2e8a538d 6550 "5706 A1 can only be used in a PCIX bus, aborting.\n");
b6016b76
MC
6551 goto err_out_unmap;
6552 }
6553
6554 bnx2_init_nvram(bp);
6555
e3648b3d
MC
6556 reg = REG_RD_IND(bp, BNX2_SHM_HDR_SIGNATURE);
6557
6558 if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
24cb230b
MC
6559 BNX2_SHM_HDR_SIGNATURE_SIG) {
6560 u32 off = PCI_FUNC(pdev->devfn) << 2;
6561
6562 bp->shmem_base = REG_RD_IND(bp, BNX2_SHM_HDR_ADDR_0 + off);
6563 } else
e3648b3d
MC
6564 bp->shmem_base = HOST_VIEW_SHMEM_BASE;
6565
b6016b76
MC
6566 /* Get the permanent MAC address. First we need to make sure the
6567 * firmware is actually running.
6568 */
e3648b3d 6569 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_SIGNATURE);
b6016b76
MC
6570
6571 if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
6572 BNX2_DEV_INFO_SIGNATURE_MAGIC) {
9b91cf9d 6573 dev_err(&pdev->dev, "Firmware not running, aborting.\n");
b6016b76
MC
6574 rc = -ENODEV;
6575 goto err_out_unmap;
6576 }
6577
58fc2ea4
MC
6578 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_BC_REV);
6579 for (i = 0, j = 0; i < 3; i++) {
6580 u8 num, k, skip0;
6581
6582 num = (u8) (reg >> (24 - (i * 8)));
6583 for (k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) {
6584 if (num >= k || !skip0 || k == 1) {
6585 bp->fw_version[j++] = (num / k) + '0';
6586 skip0 = 0;
6587 }
6588 }
6589 if (i != 2)
6590 bp->fw_version[j++] = '.';
6591 }
846f5c62
MC
6592 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_FEATURE);
6593 if (reg & BNX2_PORT_FEATURE_WOL_ENABLED)
6594 bp->wol = 1;
6595
6596 if (reg & BNX2_PORT_FEATURE_ASF_ENABLED) {
c2d3db8c
MC
6597 bp->flags |= ASF_ENABLE_FLAG;
6598
6599 for (i = 0; i < 30; i++) {
6600 reg = REG_RD_IND(bp, bp->shmem_base +
6601 BNX2_BC_STATE_CONDITION);
6602 if (reg & BNX2_CONDITION_MFW_RUN_MASK)
6603 break;
6604 msleep(10);
6605 }
6606 }
58fc2ea4
MC
6607 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_BC_STATE_CONDITION);
6608 reg &= BNX2_CONDITION_MFW_RUN_MASK;
6609 if (reg != BNX2_CONDITION_MFW_RUN_UNKNOWN &&
6610 reg != BNX2_CONDITION_MFW_RUN_NONE) {
6611 int i;
6612 u32 addr = REG_RD_IND(bp, bp->shmem_base + BNX2_MFW_VER_PTR);
6613
6614 bp->fw_version[j++] = ' ';
6615 for (i = 0; i < 3; i++) {
6616 reg = REG_RD_IND(bp, addr + i * 4);
6617 reg = swab32(reg);
6618 memcpy(&bp->fw_version[j], &reg, 4);
6619 j += 4;
6620 }
6621 }
b6016b76 6622
e3648b3d 6623 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_UPPER);
b6016b76
MC
6624 bp->mac_addr[0] = (u8) (reg >> 8);
6625 bp->mac_addr[1] = (u8) reg;
6626
e3648b3d 6627 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_LOWER);
b6016b76
MC
6628 bp->mac_addr[2] = (u8) (reg >> 24);
6629 bp->mac_addr[3] = (u8) (reg >> 16);
6630 bp->mac_addr[4] = (u8) (reg >> 8);
6631 bp->mac_addr[5] = (u8) reg;
6632
6633 bp->tx_ring_size = MAX_TX_DESC_CNT;
932f3772 6634 bnx2_set_rx_ring_size(bp, 255);
b6016b76
MC
6635
6636 bp->rx_csum = 1;
6637
6638 bp->rx_offset = sizeof(struct l2_fhdr) + 2;
6639
6640 bp->tx_quick_cons_trip_int = 20;
6641 bp->tx_quick_cons_trip = 20;
6642 bp->tx_ticks_int = 80;
6643 bp->tx_ticks = 80;
6aa20a22 6644
b6016b76
MC
6645 bp->rx_quick_cons_trip_int = 6;
6646 bp->rx_quick_cons_trip = 6;
6647 bp->rx_ticks_int = 18;
6648 bp->rx_ticks = 18;
6649
7ea6920e 6650 bp->stats_ticks = USEC_PER_SEC & BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
b6016b76
MC
6651
6652 bp->timer_interval = HZ;
cd339a0e 6653 bp->current_interval = HZ;
b6016b76 6654
5b0c76ad
MC
6655 bp->phy_addr = 1;
6656
b6016b76 6657 /* Disable WOL support if we are running on a SERDES chip. */
253c8b75
MC
6658 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6659 bnx2_get_5709_media(bp);
6660 else if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT)
b6016b76 6661 bp->phy_flags |= PHY_SERDES_FLAG;
bac0dff6 6662
0d8a6571 6663 bp->phy_port = PORT_TP;
bac0dff6 6664 if (bp->phy_flags & PHY_SERDES_FLAG) {
0d8a6571 6665 bp->phy_port = PORT_FIBRE;
846f5c62
MC
6666 reg = REG_RD_IND(bp, bp->shmem_base +
6667 BNX2_SHARED_HW_CFG_CONFIG);
6668 if (!(reg & BNX2_SHARED_HW_CFG_GIG_LINK_ON_VAUX)) {
6669 bp->flags |= NO_WOL_FLAG;
6670 bp->wol = 0;
6671 }
bac0dff6 6672 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
5b0c76ad 6673 bp->phy_addr = 2;
5b0c76ad
MC
6674 if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
6675 bp->phy_flags |= PHY_2_5G_CAPABLE_FLAG;
6676 }
0d8a6571
MC
6677 bnx2_init_remote_phy(bp);
6678
261dd5ca
MC
6679 } else if (CHIP_NUM(bp) == CHIP_NUM_5706 ||
6680 CHIP_NUM(bp) == CHIP_NUM_5708)
6681 bp->phy_flags |= PHY_CRC_FIX_FLAG;
fb0c18bd
MC
6682 else if (CHIP_NUM(bp) == CHIP_NUM_5709 &&
6683 (CHIP_REV(bp) == CHIP_REV_Ax ||
6684 CHIP_REV(bp) == CHIP_REV_Bx))
b659f44e 6685 bp->phy_flags |= PHY_DIS_EARLY_DAC_FLAG;
b6016b76 6686
16088272
MC
6687 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
6688 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
846f5c62 6689 (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
dda1e390 6690 bp->flags |= NO_WOL_FLAG;
846f5c62
MC
6691 bp->wol = 0;
6692 }
dda1e390 6693
b6016b76
MC
6694 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
6695 bp->tx_quick_cons_trip_int =
6696 bp->tx_quick_cons_trip;
6697 bp->tx_ticks_int = bp->tx_ticks;
6698 bp->rx_quick_cons_trip_int =
6699 bp->rx_quick_cons_trip;
6700 bp->rx_ticks_int = bp->rx_ticks;
6701 bp->comp_prod_trip_int = bp->comp_prod_trip;
6702 bp->com_ticks_int = bp->com_ticks;
6703 bp->cmd_ticks_int = bp->cmd_ticks;
6704 }
6705
f9317a40
MC
6706 /* Disable MSI on 5706 if AMD 8132 bridge is found.
6707 *
6708 * MSI is defined to be 32-bit write. The 5706 does 64-bit MSI writes
6709 * with byte enables disabled on the unused 32-bit word. This is legal
6710 * but causes problems on the AMD 8132 which will eventually stop
6711 * responding after a while.
6712 *
6713 * AMD believes this incompatibility is unique to the 5706, and
88187dfa 6714 * prefers to locally disable MSI rather than globally disabling it.
f9317a40
MC
6715 */
6716 if (CHIP_NUM(bp) == CHIP_NUM_5706 && disable_msi == 0) {
6717 struct pci_dev *amd_8132 = NULL;
6718
6719 while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
6720 PCI_DEVICE_ID_AMD_8132_BRIDGE,
6721 amd_8132))) {
f9317a40 6722
44c10138
AK
6723 if (amd_8132->revision >= 0x10 &&
6724 amd_8132->revision <= 0x13) {
f9317a40
MC
6725 disable_msi = 1;
6726 pci_dev_put(amd_8132);
6727 break;
6728 }
6729 }
6730 }
6731
deaf391b 6732 bnx2_set_default_link(bp);
b6016b76
MC
6733 bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
6734
cd339a0e
MC
6735 init_timer(&bp->timer);
6736 bp->timer.expires = RUN_AT(bp->timer_interval);
6737 bp->timer.data = (unsigned long) bp;
6738 bp->timer.function = bnx2_timer;
6739
b6016b76
MC
6740 return 0;
6741
6742err_out_unmap:
6743 if (bp->regview) {
6744 iounmap(bp->regview);
73eef4cd 6745 bp->regview = NULL;
b6016b76
MC
6746 }
6747
6748err_out_release:
6749 pci_release_regions(pdev);
6750
6751err_out_disable:
6752 pci_disable_device(pdev);
6753 pci_set_drvdata(pdev, NULL);
6754
6755err_out:
6756 return rc;
6757}
6758
883e5151
MC
6759static char * __devinit
6760bnx2_bus_string(struct bnx2 *bp, char *str)
6761{
6762 char *s = str;
6763
6764 if (bp->flags & PCIE_FLAG) {
6765 s += sprintf(s, "PCI Express");
6766 } else {
6767 s += sprintf(s, "PCI");
6768 if (bp->flags & PCIX_FLAG)
6769 s += sprintf(s, "-X");
6770 if (bp->flags & PCI_32BIT_FLAG)
6771 s += sprintf(s, " 32-bit");
6772 else
6773 s += sprintf(s, " 64-bit");
6774 s += sprintf(s, " %dMHz", bp->bus_speed_mhz);
6775 }
6776 return str;
6777}
6778
b6016b76
MC
6779static int __devinit
6780bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
6781{
6782 static int version_printed = 0;
6783 struct net_device *dev = NULL;
6784 struct bnx2 *bp;
0795af57 6785 int rc;
883e5151 6786 char str[40];
0795af57 6787 DECLARE_MAC_BUF(mac);
b6016b76
MC
6788
6789 if (version_printed++ == 0)
6790 printk(KERN_INFO "%s", version);
6791
6792 /* dev zeroed in init_etherdev */
6793 dev = alloc_etherdev(sizeof(*bp));
6794
6795 if (!dev)
6796 return -ENOMEM;
6797
6798 rc = bnx2_init_board(pdev, dev);
6799 if (rc < 0) {
6800 free_netdev(dev);
6801 return rc;
6802 }
6803
6804 dev->open = bnx2_open;
6805 dev->hard_start_xmit = bnx2_start_xmit;
6806 dev->stop = bnx2_close;
6807 dev->get_stats = bnx2_get_stats;
6808 dev->set_multicast_list = bnx2_set_rx_mode;
6809 dev->do_ioctl = bnx2_ioctl;
6810 dev->set_mac_address = bnx2_change_mac_addr;
6811 dev->change_mtu = bnx2_change_mtu;
6812 dev->tx_timeout = bnx2_tx_timeout;
6813 dev->watchdog_timeo = TX_TIMEOUT;
6814#ifdef BCM_VLAN
6815 dev->vlan_rx_register = bnx2_vlan_rx_register;
b6016b76 6816#endif
b6016b76 6817 dev->ethtool_ops = &bnx2_ethtool_ops;
b6016b76 6818
972ec0d4 6819 bp = netdev_priv(dev);
bea3348e 6820 netif_napi_add(dev, &bp->napi, bnx2_poll, 64);
b6016b76
MC
6821
6822#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
6823 dev->poll_controller = poll_bnx2;
6824#endif
6825
1b2f922f
MC
6826 pci_set_drvdata(pdev, dev);
6827
6828 memcpy(dev->dev_addr, bp->mac_addr, 6);
6829 memcpy(dev->perm_addr, bp->mac_addr, 6);
6830 bp->name = board_info[ent->driver_data].name;
6831
d212f87b 6832 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
4666f87a 6833 if (CHIP_NUM(bp) == CHIP_NUM_5709)
d212f87b
SH
6834 dev->features |= NETIF_F_IPV6_CSUM;
6835
1b2f922f
MC
6836#ifdef BCM_VLAN
6837 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
6838#endif
6839 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
4666f87a
MC
6840 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6841 dev->features |= NETIF_F_TSO6;
1b2f922f 6842
b6016b76 6843 if ((rc = register_netdev(dev))) {
9b91cf9d 6844 dev_err(&pdev->dev, "Cannot register net device\n");
b6016b76
MC
6845 if (bp->regview)
6846 iounmap(bp->regview);
6847 pci_release_regions(pdev);
6848 pci_disable_device(pdev);
6849 pci_set_drvdata(pdev, NULL);
6850 free_netdev(dev);
6851 return rc;
6852 }
6853
883e5151 6854 printk(KERN_INFO "%s: %s (%c%d) %s found at mem %lx, "
0795af57 6855 "IRQ %d, node addr %s\n",
b6016b76
MC
6856 dev->name,
6857 bp->name,
6858 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
6859 ((CHIP_ID(bp) & 0x0ff0) >> 4),
883e5151 6860 bnx2_bus_string(bp, str),
b6016b76 6861 dev->base_addr,
0795af57 6862 bp->pdev->irq, print_mac(mac, dev->dev_addr));
b6016b76 6863
b6016b76
MC
6864 return 0;
6865}
6866
6867static void __devexit
6868bnx2_remove_one(struct pci_dev *pdev)
6869{
6870 struct net_device *dev = pci_get_drvdata(pdev);
972ec0d4 6871 struct bnx2 *bp = netdev_priv(dev);
b6016b76 6872
afdc08b9
MC
6873 flush_scheduled_work();
6874
b6016b76
MC
6875 unregister_netdev(dev);
6876
6877 if (bp->regview)
6878 iounmap(bp->regview);
6879
6880 free_netdev(dev);
6881 pci_release_regions(pdev);
6882 pci_disable_device(pdev);
6883 pci_set_drvdata(pdev, NULL);
6884}
6885
6886static int
829ca9a3 6887bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
b6016b76
MC
6888{
6889 struct net_device *dev = pci_get_drvdata(pdev);
972ec0d4 6890 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6891 u32 reset_code;
6892
6caebb02
MC
6893 /* PCI register 4 needs to be saved whether netif_running() or not.
6894 * MSI address and data need to be saved if using MSI and
6895 * netif_running().
6896 */
6897 pci_save_state(pdev);
b6016b76
MC
6898 if (!netif_running(dev))
6899 return 0;
6900
1d60290f 6901 flush_scheduled_work();
b6016b76
MC
6902 bnx2_netif_stop(bp);
6903 netif_device_detach(dev);
6904 del_timer_sync(&bp->timer);
dda1e390 6905 if (bp->flags & NO_WOL_FLAG)
6c4f095e 6906 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
dda1e390 6907 else if (bp->wol)
b6016b76
MC
6908 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
6909 else
6910 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
6911 bnx2_reset_chip(bp, reset_code);
6912 bnx2_free_skbs(bp);
829ca9a3 6913 bnx2_set_power_state(bp, pci_choose_state(pdev, state));
b6016b76
MC
6914 return 0;
6915}
6916
6917static int
6918bnx2_resume(struct pci_dev *pdev)
6919{
6920 struct net_device *dev = pci_get_drvdata(pdev);
972ec0d4 6921 struct bnx2 *bp = netdev_priv(dev);
b6016b76 6922
6caebb02 6923 pci_restore_state(pdev);
b6016b76
MC
6924 if (!netif_running(dev))
6925 return 0;
6926
829ca9a3 6927 bnx2_set_power_state(bp, PCI_D0);
b6016b76
MC
6928 netif_device_attach(dev);
6929 bnx2_init_nic(bp);
6930 bnx2_netif_start(bp);
6931 return 0;
6932}
6933
6934static struct pci_driver bnx2_pci_driver = {
14ab9b86
PH
6935 .name = DRV_MODULE_NAME,
6936 .id_table = bnx2_pci_tbl,
6937 .probe = bnx2_init_one,
6938 .remove = __devexit_p(bnx2_remove_one),
6939 .suspend = bnx2_suspend,
6940 .resume = bnx2_resume,
b6016b76
MC
6941};
6942
6943static int __init bnx2_init(void)
6944{
29917620 6945 return pci_register_driver(&bnx2_pci_driver);
b6016b76
MC
6946}
6947
6948static void __exit bnx2_cleanup(void)
6949{
6950 pci_unregister_driver(&bnx2_pci_driver);
6951}
6952
6953module_init(bnx2_init);
6954module_exit(bnx2_cleanup);
6955
6956
6957
This page took 0.777813 seconds and 5 git commands to generate.