[BNX2]: Move rx indexes into bnx2_napi struct.
[deliverable/linux.git] / drivers / net / bnx2.c
CommitLineData
b6016b76
MC
1/* bnx2.c: Broadcom NX2 network driver.
2 *
72fbaeb6 3 * Copyright (c) 2004-2007 Broadcom Corporation
b6016b76
MC
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Written by: Michael Chan (mchan@broadcom.com)
10 */
11
f2a4f052
MC
12
13#include <linux/module.h>
14#include <linux/moduleparam.h>
15
16#include <linux/kernel.h>
17#include <linux/timer.h>
18#include <linux/errno.h>
19#include <linux/ioport.h>
20#include <linux/slab.h>
21#include <linux/vmalloc.h>
22#include <linux/interrupt.h>
23#include <linux/pci.h>
24#include <linux/init.h>
25#include <linux/netdevice.h>
26#include <linux/etherdevice.h>
27#include <linux/skbuff.h>
28#include <linux/dma-mapping.h>
1977f032 29#include <linux/bitops.h>
f2a4f052
MC
30#include <asm/io.h>
31#include <asm/irq.h>
32#include <linux/delay.h>
33#include <asm/byteorder.h>
c86a31f4 34#include <asm/page.h>
f2a4f052
MC
35#include <linux/time.h>
36#include <linux/ethtool.h>
37#include <linux/mii.h>
38#ifdef NETIF_F_HW_VLAN_TX
39#include <linux/if_vlan.h>
40#define BCM_VLAN 1
41#endif
f2a4f052 42#include <net/ip.h>
de081fa5 43#include <net/tcp.h>
f2a4f052 44#include <net/checksum.h>
f2a4f052
MC
45#include <linux/workqueue.h>
46#include <linux/crc32.h>
47#include <linux/prefetch.h>
29b12174 48#include <linux/cache.h>
fba9fe91 49#include <linux/zlib.h>
f2a4f052 50
b6016b76
MC
51#include "bnx2.h"
52#include "bnx2_fw.h"
d43584c8 53#include "bnx2_fw2.h"
b6016b76 54
110d0ef9 55#define FW_BUF_SIZE 0x10000
b3448b0b 56
b6016b76
MC
57#define DRV_MODULE_NAME "bnx2"
58#define PFX DRV_MODULE_NAME ": "
a0d142c6
MC
59#define DRV_MODULE_VERSION "1.7.0"
60#define DRV_MODULE_RELDATE "December 11, 2007"
b6016b76
MC
61
62#define RUN_AT(x) (jiffies + (x))
63
64/* Time in jiffies before concluding the transmitter is hung. */
65#define TX_TIMEOUT (5*HZ)
66
e19360f2 67static const char version[] __devinitdata =
b6016b76
MC
68 "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
69
70MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
05d0f1cf 71MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708 Driver");
b6016b76
MC
72MODULE_LICENSE("GPL");
73MODULE_VERSION(DRV_MODULE_VERSION);
74
75static int disable_msi = 0;
76
77module_param(disable_msi, int, 0);
78MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
79
80typedef enum {
81 BCM5706 = 0,
82 NC370T,
83 NC370I,
84 BCM5706S,
85 NC370F,
5b0c76ad
MC
86 BCM5708,
87 BCM5708S,
bac0dff6 88 BCM5709,
27a005b8 89 BCM5709S,
b6016b76
MC
90} board_t;
91
92/* indexed by board_t, above */
f71e1309 93static const struct {
b6016b76
MC
94 char *name;
95} board_info[] __devinitdata = {
96 { "Broadcom NetXtreme II BCM5706 1000Base-T" },
97 { "HP NC370T Multifunction Gigabit Server Adapter" },
98 { "HP NC370i Multifunction Gigabit Server Adapter" },
99 { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
100 { "HP NC370F Multifunction Gigabit Server Adapter" },
5b0c76ad
MC
101 { "Broadcom NetXtreme II BCM5708 1000Base-T" },
102 { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
bac0dff6 103 { "Broadcom NetXtreme II BCM5709 1000Base-T" },
27a005b8 104 { "Broadcom NetXtreme II BCM5709 1000Base-SX" },
b6016b76
MC
105 };
106
107static struct pci_device_id bnx2_pci_tbl[] = {
108 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
109 PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
110 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
111 PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
112 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
113 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
5b0c76ad
MC
114 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
115 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
b6016b76
MC
116 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
117 PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
118 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
119 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
5b0c76ad
MC
120 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
121 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
bac0dff6
MC
122 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
123 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
27a005b8
MC
124 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
125 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
b6016b76
MC
126 { 0, }
127};
128
129static struct flash_spec flash_table[] =
130{
e30372c9
MC
131#define BUFFERED_FLAGS (BNX2_NV_BUFFERED | BNX2_NV_TRANSLATE)
132#define NONBUFFERED_FLAGS (BNX2_NV_WREN)
b6016b76 133 /* Slow EEPROM */
37137709 134 {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
e30372c9 135 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
b6016b76
MC
136 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
137 "EEPROM - slow"},
37137709
MC
138 /* Expansion entry 0001 */
139 {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
e30372c9 140 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
37137709
MC
141 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
142 "Entry 0001"},
b6016b76
MC
143 /* Saifun SA25F010 (non-buffered flash) */
144 /* strap, cfg1, & write1 need updates */
37137709 145 {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
e30372c9 146 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
b6016b76
MC
147 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
148 "Non-buffered flash (128kB)"},
149 /* Saifun SA25F020 (non-buffered flash) */
150 /* strap, cfg1, & write1 need updates */
37137709 151 {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
e30372c9 152 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
b6016b76
MC
153 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
154 "Non-buffered flash (256kB)"},
37137709
MC
155 /* Expansion entry 0100 */
156 {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
e30372c9 157 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
37137709
MC
158 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
159 "Entry 0100"},
160 /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
6aa20a22 161 {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
e30372c9 162 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
37137709
MC
163 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
164 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
165 /* Entry 0110: ST M45PE20 (non-buffered flash)*/
166 {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
e30372c9 167 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
37137709
MC
168 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
169 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
170 /* Saifun SA25F005 (non-buffered flash) */
171 /* strap, cfg1, & write1 need updates */
172 {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
e30372c9 173 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
37137709
MC
174 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
175 "Non-buffered flash (64kB)"},
176 /* Fast EEPROM */
177 {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
e30372c9 178 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
37137709
MC
179 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
180 "EEPROM - fast"},
181 /* Expansion entry 1001 */
182 {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
e30372c9 183 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
37137709
MC
184 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
185 "Entry 1001"},
186 /* Expansion entry 1010 */
187 {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
e30372c9 188 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
37137709
MC
189 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
190 "Entry 1010"},
191 /* ATMEL AT45DB011B (buffered flash) */
192 {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
e30372c9 193 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
37137709
MC
194 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
195 "Buffered flash (128kB)"},
196 /* Expansion entry 1100 */
197 {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
e30372c9 198 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
37137709
MC
199 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
200 "Entry 1100"},
201 /* Expansion entry 1101 */
202 {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
e30372c9 203 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
37137709
MC
204 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
205 "Entry 1101"},
206 /* Ateml Expansion entry 1110 */
207 {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
e30372c9 208 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
37137709
MC
209 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
210 "Entry 1110 (Atmel)"},
211 /* ATMEL AT45DB021B (buffered flash) */
212 {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
e30372c9 213 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
37137709
MC
214 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
215 "Buffered flash (256kB)"},
b6016b76
MC
216};
217
e30372c9
MC
218static struct flash_spec flash_5709 = {
219 .flags = BNX2_NV_BUFFERED,
220 .page_bits = BCM5709_FLASH_PAGE_BITS,
221 .page_size = BCM5709_FLASH_PAGE_SIZE,
222 .addr_mask = BCM5709_FLASH_BYTE_ADDR_MASK,
223 .total_size = BUFFERED_FLASH_TOTAL_SIZE*2,
224 .name = "5709 Buffered flash (256kB)",
225};
226
b6016b76
MC
227MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
228
a550c99b 229static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_napi *bnapi)
e89bbf10 230{
2f8af120 231 u32 diff;
e89bbf10 232
2f8af120 233 smp_mb();
faac9c4b
MC
234
235 /* The ring uses 256 indices for 255 entries, one of them
236 * needs to be skipped.
237 */
a550c99b 238 diff = bp->tx_prod - bnapi->tx_cons;
faac9c4b
MC
239 if (unlikely(diff >= TX_DESC_CNT)) {
240 diff &= 0xffff;
241 if (diff == TX_DESC_CNT)
242 diff = MAX_TX_DESC_CNT;
243 }
e89bbf10
MC
244 return (bp->tx_ring_size - diff);
245}
246
b6016b76
MC
247static u32
248bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
249{
1b8227c4
MC
250 u32 val;
251
252 spin_lock_bh(&bp->indirect_lock);
b6016b76 253 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
1b8227c4
MC
254 val = REG_RD(bp, BNX2_PCICFG_REG_WINDOW);
255 spin_unlock_bh(&bp->indirect_lock);
256 return val;
b6016b76
MC
257}
258
259static void
260bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
261{
1b8227c4 262 spin_lock_bh(&bp->indirect_lock);
b6016b76
MC
263 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
264 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
1b8227c4 265 spin_unlock_bh(&bp->indirect_lock);
b6016b76
MC
266}
267
268static void
269bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
270{
271 offset += cid_addr;
1b8227c4 272 spin_lock_bh(&bp->indirect_lock);
59b47d8a
MC
273 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
274 int i;
275
276 REG_WR(bp, BNX2_CTX_CTX_DATA, val);
277 REG_WR(bp, BNX2_CTX_CTX_CTRL,
278 offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
279 for (i = 0; i < 5; i++) {
280 u32 val;
281 val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
282 if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
283 break;
284 udelay(5);
285 }
286 } else {
287 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
288 REG_WR(bp, BNX2_CTX_DATA, val);
289 }
1b8227c4 290 spin_unlock_bh(&bp->indirect_lock);
b6016b76
MC
291}
292
293static int
294bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
295{
296 u32 val1;
297 int i, ret;
298
299 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
300 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
301 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
302
303 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
304 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
305
306 udelay(40);
307 }
308
309 val1 = (bp->phy_addr << 21) | (reg << 16) |
310 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
311 BNX2_EMAC_MDIO_COMM_START_BUSY;
312 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
313
314 for (i = 0; i < 50; i++) {
315 udelay(10);
316
317 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
318 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
319 udelay(5);
320
321 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
322 val1 &= BNX2_EMAC_MDIO_COMM_DATA;
323
324 break;
325 }
326 }
327
328 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
329 *val = 0x0;
330 ret = -EBUSY;
331 }
332 else {
333 *val = val1;
334 ret = 0;
335 }
336
337 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
338 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
339 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
340
341 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
342 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
343
344 udelay(40);
345 }
346
347 return ret;
348}
349
350static int
351bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
352{
353 u32 val1;
354 int i, ret;
355
356 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
357 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
358 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
359
360 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
361 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
362
363 udelay(40);
364 }
365
366 val1 = (bp->phy_addr << 21) | (reg << 16) | val |
367 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
368 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
369 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
6aa20a22 370
b6016b76
MC
371 for (i = 0; i < 50; i++) {
372 udelay(10);
373
374 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
375 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
376 udelay(5);
377 break;
378 }
379 }
380
381 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
382 ret = -EBUSY;
383 else
384 ret = 0;
385
386 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
387 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
388 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
389
390 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
391 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
392
393 udelay(40);
394 }
395
396 return ret;
397}
398
399static void
400bnx2_disable_int(struct bnx2 *bp)
401{
402 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
403 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
404 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
405}
406
407static void
408bnx2_enable_int(struct bnx2 *bp)
409{
35efa7c1
MC
410 struct bnx2_napi *bnapi = &bp->bnx2_napi;
411
1269a8a6
MC
412 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
413 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
35efa7c1 414 BNX2_PCICFG_INT_ACK_CMD_MASK_INT | bnapi->last_status_idx);
1269a8a6 415
b6016b76 416 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
35efa7c1 417 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | bnapi->last_status_idx);
b6016b76 418
bf5295bb 419 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
b6016b76
MC
420}
421
422static void
423bnx2_disable_int_sync(struct bnx2 *bp)
424{
425 atomic_inc(&bp->intr_sem);
426 bnx2_disable_int(bp);
427 synchronize_irq(bp->pdev->irq);
428}
429
35efa7c1
MC
430static void
431bnx2_napi_disable(struct bnx2 *bp)
432{
433 napi_disable(&bp->bnx2_napi.napi);
434}
435
436static void
437bnx2_napi_enable(struct bnx2 *bp)
438{
439 napi_enable(&bp->bnx2_napi.napi);
440}
441
b6016b76
MC
442static void
443bnx2_netif_stop(struct bnx2 *bp)
444{
445 bnx2_disable_int_sync(bp);
446 if (netif_running(bp->dev)) {
35efa7c1 447 bnx2_napi_disable(bp);
b6016b76
MC
448 netif_tx_disable(bp->dev);
449 bp->dev->trans_start = jiffies; /* prevent tx timeout */
450 }
451}
452
453static void
454bnx2_netif_start(struct bnx2 *bp)
455{
456 if (atomic_dec_and_test(&bp->intr_sem)) {
457 if (netif_running(bp->dev)) {
458 netif_wake_queue(bp->dev);
35efa7c1 459 bnx2_napi_enable(bp);
b6016b76
MC
460 bnx2_enable_int(bp);
461 }
462 }
463}
464
465static void
466bnx2_free_mem(struct bnx2 *bp)
467{
13daffa2
MC
468 int i;
469
59b47d8a
MC
470 for (i = 0; i < bp->ctx_pages; i++) {
471 if (bp->ctx_blk[i]) {
472 pci_free_consistent(bp->pdev, BCM_PAGE_SIZE,
473 bp->ctx_blk[i],
474 bp->ctx_blk_mapping[i]);
475 bp->ctx_blk[i] = NULL;
476 }
477 }
b6016b76 478 if (bp->status_blk) {
0f31f994 479 pci_free_consistent(bp->pdev, bp->status_stats_size,
b6016b76
MC
480 bp->status_blk, bp->status_blk_mapping);
481 bp->status_blk = NULL;
0f31f994 482 bp->stats_blk = NULL;
b6016b76
MC
483 }
484 if (bp->tx_desc_ring) {
e343d55c 485 pci_free_consistent(bp->pdev, TXBD_RING_SIZE,
b6016b76
MC
486 bp->tx_desc_ring, bp->tx_desc_mapping);
487 bp->tx_desc_ring = NULL;
488 }
b4558ea9
JJ
489 kfree(bp->tx_buf_ring);
490 bp->tx_buf_ring = NULL;
13daffa2
MC
491 for (i = 0; i < bp->rx_max_ring; i++) {
492 if (bp->rx_desc_ring[i])
e343d55c 493 pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
13daffa2
MC
494 bp->rx_desc_ring[i],
495 bp->rx_desc_mapping[i]);
496 bp->rx_desc_ring[i] = NULL;
497 }
498 vfree(bp->rx_buf_ring);
b4558ea9 499 bp->rx_buf_ring = NULL;
47bf4246
MC
500 for (i = 0; i < bp->rx_max_pg_ring; i++) {
501 if (bp->rx_pg_desc_ring[i])
502 pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
503 bp->rx_pg_desc_ring[i],
504 bp->rx_pg_desc_mapping[i]);
505 bp->rx_pg_desc_ring[i] = NULL;
506 }
507 if (bp->rx_pg_ring)
508 vfree(bp->rx_pg_ring);
509 bp->rx_pg_ring = NULL;
b6016b76
MC
510}
511
512static int
513bnx2_alloc_mem(struct bnx2 *bp)
514{
0f31f994 515 int i, status_blk_size;
13daffa2 516
e343d55c 517 bp->tx_buf_ring = kzalloc(SW_TXBD_RING_SIZE, GFP_KERNEL);
b6016b76
MC
518 if (bp->tx_buf_ring == NULL)
519 return -ENOMEM;
520
e343d55c 521 bp->tx_desc_ring = pci_alloc_consistent(bp->pdev, TXBD_RING_SIZE,
b6016b76
MC
522 &bp->tx_desc_mapping);
523 if (bp->tx_desc_ring == NULL)
524 goto alloc_mem_err;
525
e343d55c 526 bp->rx_buf_ring = vmalloc(SW_RXBD_RING_SIZE * bp->rx_max_ring);
b6016b76
MC
527 if (bp->rx_buf_ring == NULL)
528 goto alloc_mem_err;
529
e343d55c 530 memset(bp->rx_buf_ring, 0, SW_RXBD_RING_SIZE * bp->rx_max_ring);
13daffa2
MC
531
532 for (i = 0; i < bp->rx_max_ring; i++) {
533 bp->rx_desc_ring[i] =
e343d55c 534 pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
13daffa2
MC
535 &bp->rx_desc_mapping[i]);
536 if (bp->rx_desc_ring[i] == NULL)
537 goto alloc_mem_err;
538
539 }
b6016b76 540
47bf4246
MC
541 if (bp->rx_pg_ring_size) {
542 bp->rx_pg_ring = vmalloc(SW_RXPG_RING_SIZE *
543 bp->rx_max_pg_ring);
544 if (bp->rx_pg_ring == NULL)
545 goto alloc_mem_err;
546
547 memset(bp->rx_pg_ring, 0, SW_RXPG_RING_SIZE *
548 bp->rx_max_pg_ring);
549 }
550
551 for (i = 0; i < bp->rx_max_pg_ring; i++) {
552 bp->rx_pg_desc_ring[i] =
553 pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
554 &bp->rx_pg_desc_mapping[i]);
555 if (bp->rx_pg_desc_ring[i] == NULL)
556 goto alloc_mem_err;
557
558 }
559
0f31f994
MC
560 /* Combine status and statistics blocks into one allocation. */
561 status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
562 bp->status_stats_size = status_blk_size +
563 sizeof(struct statistics_block);
564
565 bp->status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
b6016b76
MC
566 &bp->status_blk_mapping);
567 if (bp->status_blk == NULL)
568 goto alloc_mem_err;
569
0f31f994 570 memset(bp->status_blk, 0, bp->status_stats_size);
b6016b76 571
35efa7c1
MC
572 bp->bnx2_napi.status_blk = bp->status_blk;
573
0f31f994
MC
574 bp->stats_blk = (void *) ((unsigned long) bp->status_blk +
575 status_blk_size);
b6016b76 576
0f31f994 577 bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
b6016b76 578
59b47d8a
MC
579 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
580 bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
581 if (bp->ctx_pages == 0)
582 bp->ctx_pages = 1;
583 for (i = 0; i < bp->ctx_pages; i++) {
584 bp->ctx_blk[i] = pci_alloc_consistent(bp->pdev,
585 BCM_PAGE_SIZE,
586 &bp->ctx_blk_mapping[i]);
587 if (bp->ctx_blk[i] == NULL)
588 goto alloc_mem_err;
589 }
590 }
b6016b76
MC
591 return 0;
592
593alloc_mem_err:
594 bnx2_free_mem(bp);
595 return -ENOMEM;
596}
597
e3648b3d
MC
598static void
599bnx2_report_fw_link(struct bnx2 *bp)
600{
601 u32 fw_link_status = 0;
602
0d8a6571
MC
603 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
604 return;
605
e3648b3d
MC
606 if (bp->link_up) {
607 u32 bmsr;
608
609 switch (bp->line_speed) {
610 case SPEED_10:
611 if (bp->duplex == DUPLEX_HALF)
612 fw_link_status = BNX2_LINK_STATUS_10HALF;
613 else
614 fw_link_status = BNX2_LINK_STATUS_10FULL;
615 break;
616 case SPEED_100:
617 if (bp->duplex == DUPLEX_HALF)
618 fw_link_status = BNX2_LINK_STATUS_100HALF;
619 else
620 fw_link_status = BNX2_LINK_STATUS_100FULL;
621 break;
622 case SPEED_1000:
623 if (bp->duplex == DUPLEX_HALF)
624 fw_link_status = BNX2_LINK_STATUS_1000HALF;
625 else
626 fw_link_status = BNX2_LINK_STATUS_1000FULL;
627 break;
628 case SPEED_2500:
629 if (bp->duplex == DUPLEX_HALF)
630 fw_link_status = BNX2_LINK_STATUS_2500HALF;
631 else
632 fw_link_status = BNX2_LINK_STATUS_2500FULL;
633 break;
634 }
635
636 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
637
638 if (bp->autoneg) {
639 fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
640
ca58c3af
MC
641 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
642 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
e3648b3d
MC
643
644 if (!(bmsr & BMSR_ANEGCOMPLETE) ||
645 bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)
646 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
647 else
648 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
649 }
650 }
651 else
652 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
653
654 REG_WR_IND(bp, bp->shmem_base + BNX2_LINK_STATUS, fw_link_status);
655}
656
9b1084b8
MC
657static char *
658bnx2_xceiver_str(struct bnx2 *bp)
659{
660 return ((bp->phy_port == PORT_FIBRE) ? "SerDes" :
661 ((bp->phy_flags & PHY_SERDES_FLAG) ? "Remote Copper" :
662 "Copper"));
663}
664
b6016b76
MC
665static void
666bnx2_report_link(struct bnx2 *bp)
667{
668 if (bp->link_up) {
669 netif_carrier_on(bp->dev);
9b1084b8
MC
670 printk(KERN_INFO PFX "%s NIC %s Link is Up, ", bp->dev->name,
671 bnx2_xceiver_str(bp));
b6016b76
MC
672
673 printk("%d Mbps ", bp->line_speed);
674
675 if (bp->duplex == DUPLEX_FULL)
676 printk("full duplex");
677 else
678 printk("half duplex");
679
680 if (bp->flow_ctrl) {
681 if (bp->flow_ctrl & FLOW_CTRL_RX) {
682 printk(", receive ");
683 if (bp->flow_ctrl & FLOW_CTRL_TX)
684 printk("& transmit ");
685 }
686 else {
687 printk(", transmit ");
688 }
689 printk("flow control ON");
690 }
691 printk("\n");
692 }
693 else {
694 netif_carrier_off(bp->dev);
9b1084b8
MC
695 printk(KERN_ERR PFX "%s NIC %s Link is Down\n", bp->dev->name,
696 bnx2_xceiver_str(bp));
b6016b76 697 }
e3648b3d
MC
698
699 bnx2_report_fw_link(bp);
b6016b76
MC
700}
701
702static void
703bnx2_resolve_flow_ctrl(struct bnx2 *bp)
704{
705 u32 local_adv, remote_adv;
706
707 bp->flow_ctrl = 0;
6aa20a22 708 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
b6016b76
MC
709 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
710
711 if (bp->duplex == DUPLEX_FULL) {
712 bp->flow_ctrl = bp->req_flow_ctrl;
713 }
714 return;
715 }
716
717 if (bp->duplex != DUPLEX_FULL) {
718 return;
719 }
720
5b0c76ad
MC
721 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
722 (CHIP_NUM(bp) == CHIP_NUM_5708)) {
723 u32 val;
724
725 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
726 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
727 bp->flow_ctrl |= FLOW_CTRL_TX;
728 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
729 bp->flow_ctrl |= FLOW_CTRL_RX;
730 return;
731 }
732
ca58c3af
MC
733 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
734 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
b6016b76
MC
735
736 if (bp->phy_flags & PHY_SERDES_FLAG) {
737 u32 new_local_adv = 0;
738 u32 new_remote_adv = 0;
739
740 if (local_adv & ADVERTISE_1000XPAUSE)
741 new_local_adv |= ADVERTISE_PAUSE_CAP;
742 if (local_adv & ADVERTISE_1000XPSE_ASYM)
743 new_local_adv |= ADVERTISE_PAUSE_ASYM;
744 if (remote_adv & ADVERTISE_1000XPAUSE)
745 new_remote_adv |= ADVERTISE_PAUSE_CAP;
746 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
747 new_remote_adv |= ADVERTISE_PAUSE_ASYM;
748
749 local_adv = new_local_adv;
750 remote_adv = new_remote_adv;
751 }
752
753 /* See Table 28B-3 of 802.3ab-1999 spec. */
754 if (local_adv & ADVERTISE_PAUSE_CAP) {
755 if(local_adv & ADVERTISE_PAUSE_ASYM) {
756 if (remote_adv & ADVERTISE_PAUSE_CAP) {
757 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
758 }
759 else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
760 bp->flow_ctrl = FLOW_CTRL_RX;
761 }
762 }
763 else {
764 if (remote_adv & ADVERTISE_PAUSE_CAP) {
765 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
766 }
767 }
768 }
769 else if (local_adv & ADVERTISE_PAUSE_ASYM) {
770 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
771 (remote_adv & ADVERTISE_PAUSE_ASYM)) {
772
773 bp->flow_ctrl = FLOW_CTRL_TX;
774 }
775 }
776}
777
27a005b8
MC
778static int
779bnx2_5709s_linkup(struct bnx2 *bp)
780{
781 u32 val, speed;
782
783 bp->link_up = 1;
784
785 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
786 bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
787 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
788
789 if ((bp->autoneg & AUTONEG_SPEED) == 0) {
790 bp->line_speed = bp->req_line_speed;
791 bp->duplex = bp->req_duplex;
792 return 0;
793 }
794 speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
795 switch (speed) {
796 case MII_BNX2_GP_TOP_AN_SPEED_10:
797 bp->line_speed = SPEED_10;
798 break;
799 case MII_BNX2_GP_TOP_AN_SPEED_100:
800 bp->line_speed = SPEED_100;
801 break;
802 case MII_BNX2_GP_TOP_AN_SPEED_1G:
803 case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
804 bp->line_speed = SPEED_1000;
805 break;
806 case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
807 bp->line_speed = SPEED_2500;
808 break;
809 }
810 if (val & MII_BNX2_GP_TOP_AN_FD)
811 bp->duplex = DUPLEX_FULL;
812 else
813 bp->duplex = DUPLEX_HALF;
814 return 0;
815}
816
b6016b76 817static int
5b0c76ad
MC
818bnx2_5708s_linkup(struct bnx2 *bp)
819{
820 u32 val;
821
822 bp->link_up = 1;
823 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
824 switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
825 case BCM5708S_1000X_STAT1_SPEED_10:
826 bp->line_speed = SPEED_10;
827 break;
828 case BCM5708S_1000X_STAT1_SPEED_100:
829 bp->line_speed = SPEED_100;
830 break;
831 case BCM5708S_1000X_STAT1_SPEED_1G:
832 bp->line_speed = SPEED_1000;
833 break;
834 case BCM5708S_1000X_STAT1_SPEED_2G5:
835 bp->line_speed = SPEED_2500;
836 break;
837 }
838 if (val & BCM5708S_1000X_STAT1_FD)
839 bp->duplex = DUPLEX_FULL;
840 else
841 bp->duplex = DUPLEX_HALF;
842
843 return 0;
844}
845
846static int
847bnx2_5706s_linkup(struct bnx2 *bp)
b6016b76
MC
848{
849 u32 bmcr, local_adv, remote_adv, common;
850
851 bp->link_up = 1;
852 bp->line_speed = SPEED_1000;
853
ca58c3af 854 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
b6016b76
MC
855 if (bmcr & BMCR_FULLDPLX) {
856 bp->duplex = DUPLEX_FULL;
857 }
858 else {
859 bp->duplex = DUPLEX_HALF;
860 }
861
862 if (!(bmcr & BMCR_ANENABLE)) {
863 return 0;
864 }
865
ca58c3af
MC
866 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
867 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
b6016b76
MC
868
869 common = local_adv & remote_adv;
870 if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
871
872 if (common & ADVERTISE_1000XFULL) {
873 bp->duplex = DUPLEX_FULL;
874 }
875 else {
876 bp->duplex = DUPLEX_HALF;
877 }
878 }
879
880 return 0;
881}
882
883static int
884bnx2_copper_linkup(struct bnx2 *bp)
885{
886 u32 bmcr;
887
ca58c3af 888 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
b6016b76
MC
889 if (bmcr & BMCR_ANENABLE) {
890 u32 local_adv, remote_adv, common;
891
892 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
893 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
894
895 common = local_adv & (remote_adv >> 2);
896 if (common & ADVERTISE_1000FULL) {
897 bp->line_speed = SPEED_1000;
898 bp->duplex = DUPLEX_FULL;
899 }
900 else if (common & ADVERTISE_1000HALF) {
901 bp->line_speed = SPEED_1000;
902 bp->duplex = DUPLEX_HALF;
903 }
904 else {
ca58c3af
MC
905 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
906 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
b6016b76
MC
907
908 common = local_adv & remote_adv;
909 if (common & ADVERTISE_100FULL) {
910 bp->line_speed = SPEED_100;
911 bp->duplex = DUPLEX_FULL;
912 }
913 else if (common & ADVERTISE_100HALF) {
914 bp->line_speed = SPEED_100;
915 bp->duplex = DUPLEX_HALF;
916 }
917 else if (common & ADVERTISE_10FULL) {
918 bp->line_speed = SPEED_10;
919 bp->duplex = DUPLEX_FULL;
920 }
921 else if (common & ADVERTISE_10HALF) {
922 bp->line_speed = SPEED_10;
923 bp->duplex = DUPLEX_HALF;
924 }
925 else {
926 bp->line_speed = 0;
927 bp->link_up = 0;
928 }
929 }
930 }
931 else {
932 if (bmcr & BMCR_SPEED100) {
933 bp->line_speed = SPEED_100;
934 }
935 else {
936 bp->line_speed = SPEED_10;
937 }
938 if (bmcr & BMCR_FULLDPLX) {
939 bp->duplex = DUPLEX_FULL;
940 }
941 else {
942 bp->duplex = DUPLEX_HALF;
943 }
944 }
945
946 return 0;
947}
948
949static int
950bnx2_set_mac_link(struct bnx2 *bp)
951{
952 u32 val;
953
954 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
955 if (bp->link_up && (bp->line_speed == SPEED_1000) &&
956 (bp->duplex == DUPLEX_HALF)) {
957 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
958 }
959
960 /* Configure the EMAC mode register. */
961 val = REG_RD(bp, BNX2_EMAC_MODE);
962
963 val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
5b0c76ad 964 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
59b47d8a 965 BNX2_EMAC_MODE_25G_MODE);
b6016b76
MC
966
967 if (bp->link_up) {
5b0c76ad
MC
968 switch (bp->line_speed) {
969 case SPEED_10:
59b47d8a
MC
970 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
971 val |= BNX2_EMAC_MODE_PORT_MII_10M;
5b0c76ad
MC
972 break;
973 }
974 /* fall through */
975 case SPEED_100:
976 val |= BNX2_EMAC_MODE_PORT_MII;
977 break;
978 case SPEED_2500:
59b47d8a 979 val |= BNX2_EMAC_MODE_25G_MODE;
5b0c76ad
MC
980 /* fall through */
981 case SPEED_1000:
982 val |= BNX2_EMAC_MODE_PORT_GMII;
983 break;
984 }
b6016b76
MC
985 }
986 else {
987 val |= BNX2_EMAC_MODE_PORT_GMII;
988 }
989
990 /* Set the MAC to operate in the appropriate duplex mode. */
991 if (bp->duplex == DUPLEX_HALF)
992 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
993 REG_WR(bp, BNX2_EMAC_MODE, val);
994
995 /* Enable/disable rx PAUSE. */
996 bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
997
998 if (bp->flow_ctrl & FLOW_CTRL_RX)
999 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
1000 REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
1001
1002 /* Enable/disable tx PAUSE. */
1003 val = REG_RD(bp, BNX2_EMAC_TX_MODE);
1004 val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
1005
1006 if (bp->flow_ctrl & FLOW_CTRL_TX)
1007 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
1008 REG_WR(bp, BNX2_EMAC_TX_MODE, val);
1009
1010 /* Acknowledge the interrupt. */
1011 REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
1012
1013 return 0;
1014}
1015
27a005b8
MC
1016static void
1017bnx2_enable_bmsr1(struct bnx2 *bp)
1018{
1019 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
1020 (CHIP_NUM(bp) == CHIP_NUM_5709))
1021 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1022 MII_BNX2_BLK_ADDR_GP_STATUS);
1023}
1024
1025static void
1026bnx2_disable_bmsr1(struct bnx2 *bp)
1027{
1028 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
1029 (CHIP_NUM(bp) == CHIP_NUM_5709))
1030 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1031 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1032}
1033
605a9e20
MC
1034static int
1035bnx2_test_and_enable_2g5(struct bnx2 *bp)
1036{
1037 u32 up1;
1038 int ret = 1;
1039
1040 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1041 return 0;
1042
1043 if (bp->autoneg & AUTONEG_SPEED)
1044 bp->advertising |= ADVERTISED_2500baseX_Full;
1045
27a005b8
MC
1046 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1047 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1048
605a9e20
MC
1049 bnx2_read_phy(bp, bp->mii_up1, &up1);
1050 if (!(up1 & BCM5708S_UP1_2G5)) {
1051 up1 |= BCM5708S_UP1_2G5;
1052 bnx2_write_phy(bp, bp->mii_up1, up1);
1053 ret = 0;
1054 }
1055
27a005b8
MC
1056 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1057 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1058 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1059
605a9e20
MC
1060 return ret;
1061}
1062
1063static int
1064bnx2_test_and_disable_2g5(struct bnx2 *bp)
1065{
1066 u32 up1;
1067 int ret = 0;
1068
1069 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1070 return 0;
1071
27a005b8
MC
1072 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1073 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1074
605a9e20
MC
1075 bnx2_read_phy(bp, bp->mii_up1, &up1);
1076 if (up1 & BCM5708S_UP1_2G5) {
1077 up1 &= ~BCM5708S_UP1_2G5;
1078 bnx2_write_phy(bp, bp->mii_up1, up1);
1079 ret = 1;
1080 }
1081
27a005b8
MC
1082 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1083 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1084 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1085
605a9e20
MC
1086 return ret;
1087}
1088
1089static void
1090bnx2_enable_forced_2g5(struct bnx2 *bp)
1091{
1092 u32 bmcr;
1093
1094 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1095 return;
1096
27a005b8
MC
1097 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1098 u32 val;
1099
1100 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1101 MII_BNX2_BLK_ADDR_SERDES_DIG);
1102 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1103 val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
1104 val |= MII_BNX2_SD_MISC1_FORCE | MII_BNX2_SD_MISC1_FORCE_2_5G;
1105 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1106
1107 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1108 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1109 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1110
1111 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
605a9e20
MC
1112 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1113 bmcr |= BCM5708S_BMCR_FORCE_2500;
1114 }
1115
1116 if (bp->autoneg & AUTONEG_SPEED) {
1117 bmcr &= ~BMCR_ANENABLE;
1118 if (bp->req_duplex == DUPLEX_FULL)
1119 bmcr |= BMCR_FULLDPLX;
1120 }
1121 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1122}
1123
1124static void
1125bnx2_disable_forced_2g5(struct bnx2 *bp)
1126{
1127 u32 bmcr;
1128
1129 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1130 return;
1131
27a005b8
MC
1132 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1133 u32 val;
1134
1135 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1136 MII_BNX2_BLK_ADDR_SERDES_DIG);
1137 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1138 val &= ~MII_BNX2_SD_MISC1_FORCE;
1139 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1140
1141 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1142 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1143 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1144
1145 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
605a9e20
MC
1146 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1147 bmcr &= ~BCM5708S_BMCR_FORCE_2500;
1148 }
1149
1150 if (bp->autoneg & AUTONEG_SPEED)
1151 bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
1152 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1153}
1154
b6016b76
MC
1155static int
1156bnx2_set_link(struct bnx2 *bp)
1157{
1158 u32 bmsr;
1159 u8 link_up;
1160
80be4434 1161 if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
b6016b76
MC
1162 bp->link_up = 1;
1163 return 0;
1164 }
1165
0d8a6571
MC
1166 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1167 return 0;
1168
b6016b76
MC
1169 link_up = bp->link_up;
1170
27a005b8
MC
1171 bnx2_enable_bmsr1(bp);
1172 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1173 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1174 bnx2_disable_bmsr1(bp);
b6016b76
MC
1175
1176 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
1177 (CHIP_NUM(bp) == CHIP_NUM_5706)) {
1178 u32 val;
1179
1180 val = REG_RD(bp, BNX2_EMAC_STATUS);
1181 if (val & BNX2_EMAC_STATUS_LINK)
1182 bmsr |= BMSR_LSTATUS;
1183 else
1184 bmsr &= ~BMSR_LSTATUS;
1185 }
1186
1187 if (bmsr & BMSR_LSTATUS) {
1188 bp->link_up = 1;
1189
1190 if (bp->phy_flags & PHY_SERDES_FLAG) {
5b0c76ad
MC
1191 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1192 bnx2_5706s_linkup(bp);
1193 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1194 bnx2_5708s_linkup(bp);
27a005b8
MC
1195 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1196 bnx2_5709s_linkup(bp);
b6016b76
MC
1197 }
1198 else {
1199 bnx2_copper_linkup(bp);
1200 }
1201 bnx2_resolve_flow_ctrl(bp);
1202 }
1203 else {
1204 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
605a9e20
MC
1205 (bp->autoneg & AUTONEG_SPEED))
1206 bnx2_disable_forced_2g5(bp);
b6016b76 1207
b6016b76
MC
1208 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1209 bp->link_up = 0;
1210 }
1211
1212 if (bp->link_up != link_up) {
1213 bnx2_report_link(bp);
1214 }
1215
1216 bnx2_set_mac_link(bp);
1217
1218 return 0;
1219}
1220
1221static int
1222bnx2_reset_phy(struct bnx2 *bp)
1223{
1224 int i;
1225 u32 reg;
1226
ca58c3af 1227 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
b6016b76
MC
1228
1229#define PHY_RESET_MAX_WAIT 100
1230 for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
1231 udelay(10);
1232
ca58c3af 1233 bnx2_read_phy(bp, bp->mii_bmcr, &reg);
b6016b76
MC
1234 if (!(reg & BMCR_RESET)) {
1235 udelay(20);
1236 break;
1237 }
1238 }
1239 if (i == PHY_RESET_MAX_WAIT) {
1240 return -EBUSY;
1241 }
1242 return 0;
1243}
1244
1245static u32
1246bnx2_phy_get_pause_adv(struct bnx2 *bp)
1247{
1248 u32 adv = 0;
1249
1250 if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1251 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1252
1253 if (bp->phy_flags & PHY_SERDES_FLAG) {
1254 adv = ADVERTISE_1000XPAUSE;
1255 }
1256 else {
1257 adv = ADVERTISE_PAUSE_CAP;
1258 }
1259 }
1260 else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1261 if (bp->phy_flags & PHY_SERDES_FLAG) {
1262 adv = ADVERTISE_1000XPSE_ASYM;
1263 }
1264 else {
1265 adv = ADVERTISE_PAUSE_ASYM;
1266 }
1267 }
1268 else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1269 if (bp->phy_flags & PHY_SERDES_FLAG) {
1270 adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1271 }
1272 else {
1273 adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1274 }
1275 }
1276 return adv;
1277}
1278
0d8a6571
MC
1279static int bnx2_fw_sync(struct bnx2 *, u32, int);
1280
b6016b76 1281static int
0d8a6571
MC
1282bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
1283{
1284 u32 speed_arg = 0, pause_adv;
1285
1286 pause_adv = bnx2_phy_get_pause_adv(bp);
1287
1288 if (bp->autoneg & AUTONEG_SPEED) {
1289 speed_arg |= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG;
1290 if (bp->advertising & ADVERTISED_10baseT_Half)
1291 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1292 if (bp->advertising & ADVERTISED_10baseT_Full)
1293 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1294 if (bp->advertising & ADVERTISED_100baseT_Half)
1295 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1296 if (bp->advertising & ADVERTISED_100baseT_Full)
1297 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1298 if (bp->advertising & ADVERTISED_1000baseT_Full)
1299 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1300 if (bp->advertising & ADVERTISED_2500baseX_Full)
1301 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1302 } else {
1303 if (bp->req_line_speed == SPEED_2500)
1304 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1305 else if (bp->req_line_speed == SPEED_1000)
1306 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1307 else if (bp->req_line_speed == SPEED_100) {
1308 if (bp->req_duplex == DUPLEX_FULL)
1309 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1310 else
1311 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1312 } else if (bp->req_line_speed == SPEED_10) {
1313 if (bp->req_duplex == DUPLEX_FULL)
1314 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1315 else
1316 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1317 }
1318 }
1319
1320 if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP))
1321 speed_arg |= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE;
1322 if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_1000XPSE_ASYM))
1323 speed_arg |= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE;
1324
1325 if (port == PORT_TP)
1326 speed_arg |= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE |
1327 BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED;
1328
1329 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB_ARG0, speed_arg);
1330
1331 spin_unlock_bh(&bp->phy_lock);
1332 bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 0);
1333 spin_lock_bh(&bp->phy_lock);
1334
1335 return 0;
1336}
1337
1338static int
1339bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
b6016b76 1340{
605a9e20 1341 u32 adv, bmcr;
b6016b76
MC
1342 u32 new_adv = 0;
1343
0d8a6571
MC
1344 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1345 return (bnx2_setup_remote_phy(bp, port));
1346
b6016b76
MC
1347 if (!(bp->autoneg & AUTONEG_SPEED)) {
1348 u32 new_bmcr;
5b0c76ad
MC
1349 int force_link_down = 0;
1350
605a9e20
MC
1351 if (bp->req_line_speed == SPEED_2500) {
1352 if (!bnx2_test_and_enable_2g5(bp))
1353 force_link_down = 1;
1354 } else if (bp->req_line_speed == SPEED_1000) {
1355 if (bnx2_test_and_disable_2g5(bp))
1356 force_link_down = 1;
1357 }
ca58c3af 1358 bnx2_read_phy(bp, bp->mii_adv, &adv);
80be4434
MC
1359 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1360
ca58c3af 1361 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
605a9e20 1362 new_bmcr = bmcr & ~BMCR_ANENABLE;
80be4434 1363 new_bmcr |= BMCR_SPEED1000;
605a9e20 1364
27a005b8
MC
1365 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1366 if (bp->req_line_speed == SPEED_2500)
1367 bnx2_enable_forced_2g5(bp);
1368 else if (bp->req_line_speed == SPEED_1000) {
1369 bnx2_disable_forced_2g5(bp);
1370 new_bmcr &= ~0x2000;
1371 }
1372
1373 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
605a9e20
MC
1374 if (bp->req_line_speed == SPEED_2500)
1375 new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1376 else
1377 new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
5b0c76ad
MC
1378 }
1379
b6016b76 1380 if (bp->req_duplex == DUPLEX_FULL) {
5b0c76ad 1381 adv |= ADVERTISE_1000XFULL;
b6016b76
MC
1382 new_bmcr |= BMCR_FULLDPLX;
1383 }
1384 else {
5b0c76ad 1385 adv |= ADVERTISE_1000XHALF;
b6016b76
MC
1386 new_bmcr &= ~BMCR_FULLDPLX;
1387 }
5b0c76ad 1388 if ((new_bmcr != bmcr) || (force_link_down)) {
b6016b76
MC
1389 /* Force a link down visible on the other side */
1390 if (bp->link_up) {
ca58c3af 1391 bnx2_write_phy(bp, bp->mii_adv, adv &
5b0c76ad
MC
1392 ~(ADVERTISE_1000XFULL |
1393 ADVERTISE_1000XHALF));
ca58c3af 1394 bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
b6016b76
MC
1395 BMCR_ANRESTART | BMCR_ANENABLE);
1396
1397 bp->link_up = 0;
1398 netif_carrier_off(bp->dev);
ca58c3af 1399 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
80be4434 1400 bnx2_report_link(bp);
b6016b76 1401 }
ca58c3af
MC
1402 bnx2_write_phy(bp, bp->mii_adv, adv);
1403 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
605a9e20
MC
1404 } else {
1405 bnx2_resolve_flow_ctrl(bp);
1406 bnx2_set_mac_link(bp);
b6016b76
MC
1407 }
1408 return 0;
1409 }
1410
605a9e20 1411 bnx2_test_and_enable_2g5(bp);
5b0c76ad 1412
b6016b76
MC
1413 if (bp->advertising & ADVERTISED_1000baseT_Full)
1414 new_adv |= ADVERTISE_1000XFULL;
1415
1416 new_adv |= bnx2_phy_get_pause_adv(bp);
1417
ca58c3af
MC
1418 bnx2_read_phy(bp, bp->mii_adv, &adv);
1419 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
b6016b76
MC
1420
1421 bp->serdes_an_pending = 0;
1422 if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1423 /* Force a link down visible on the other side */
1424 if (bp->link_up) {
ca58c3af 1425 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
80be4434
MC
1426 spin_unlock_bh(&bp->phy_lock);
1427 msleep(20);
1428 spin_lock_bh(&bp->phy_lock);
b6016b76
MC
1429 }
1430
ca58c3af
MC
1431 bnx2_write_phy(bp, bp->mii_adv, new_adv);
1432 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
b6016b76 1433 BMCR_ANENABLE);
f8dd064e
MC
1434 /* Speed up link-up time when the link partner
1435 * does not autonegotiate which is very common
1436 * in blade servers. Some blade servers use
1437 * IPMI for kerboard input and it's important
1438 * to minimize link disruptions. Autoneg. involves
1439 * exchanging base pages plus 3 next pages and
1440 * normally completes in about 120 msec.
1441 */
1442 bp->current_interval = SERDES_AN_TIMEOUT;
1443 bp->serdes_an_pending = 1;
1444 mod_timer(&bp->timer, jiffies + bp->current_interval);
605a9e20
MC
1445 } else {
1446 bnx2_resolve_flow_ctrl(bp);
1447 bnx2_set_mac_link(bp);
b6016b76
MC
1448 }
1449
1450 return 0;
1451}
1452
1453#define ETHTOOL_ALL_FIBRE_SPEED \
deaf391b
MC
1454 (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) ? \
1455 (ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
1456 (ADVERTISED_1000baseT_Full)
b6016b76
MC
1457
1458#define ETHTOOL_ALL_COPPER_SPEED \
1459 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1460 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1461 ADVERTISED_1000baseT_Full)
1462
1463#define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1464 ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
6aa20a22 1465
b6016b76
MC
1466#define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1467
0d8a6571
MC
1468static void
1469bnx2_set_default_remote_link(struct bnx2 *bp)
1470{
1471 u32 link;
1472
1473 if (bp->phy_port == PORT_TP)
1474 link = REG_RD_IND(bp, bp->shmem_base + BNX2_RPHY_COPPER_LINK);
1475 else
1476 link = REG_RD_IND(bp, bp->shmem_base + BNX2_RPHY_SERDES_LINK);
1477
1478 if (link & BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG) {
1479 bp->req_line_speed = 0;
1480 bp->autoneg |= AUTONEG_SPEED;
1481 bp->advertising = ADVERTISED_Autoneg;
1482 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1483 bp->advertising |= ADVERTISED_10baseT_Half;
1484 if (link & BNX2_NETLINK_SET_LINK_SPEED_10FULL)
1485 bp->advertising |= ADVERTISED_10baseT_Full;
1486 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1487 bp->advertising |= ADVERTISED_100baseT_Half;
1488 if (link & BNX2_NETLINK_SET_LINK_SPEED_100FULL)
1489 bp->advertising |= ADVERTISED_100baseT_Full;
1490 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1491 bp->advertising |= ADVERTISED_1000baseT_Full;
1492 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1493 bp->advertising |= ADVERTISED_2500baseX_Full;
1494 } else {
1495 bp->autoneg = 0;
1496 bp->advertising = 0;
1497 bp->req_duplex = DUPLEX_FULL;
1498 if (link & BNX2_NETLINK_SET_LINK_SPEED_10) {
1499 bp->req_line_speed = SPEED_10;
1500 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1501 bp->req_duplex = DUPLEX_HALF;
1502 }
1503 if (link & BNX2_NETLINK_SET_LINK_SPEED_100) {
1504 bp->req_line_speed = SPEED_100;
1505 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1506 bp->req_duplex = DUPLEX_HALF;
1507 }
1508 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1509 bp->req_line_speed = SPEED_1000;
1510 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1511 bp->req_line_speed = SPEED_2500;
1512 }
1513}
1514
deaf391b
MC
1515static void
1516bnx2_set_default_link(struct bnx2 *bp)
1517{
0d8a6571
MC
1518 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1519 return bnx2_set_default_remote_link(bp);
1520
deaf391b
MC
1521 bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
1522 bp->req_line_speed = 0;
1523 if (bp->phy_flags & PHY_SERDES_FLAG) {
1524 u32 reg;
1525
1526 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
1527
1528 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG);
1529 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
1530 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
1531 bp->autoneg = 0;
1532 bp->req_line_speed = bp->line_speed = SPEED_1000;
1533 bp->req_duplex = DUPLEX_FULL;
1534 }
1535 } else
1536 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
1537}
1538
df149d70
MC
1539static void
1540bnx2_send_heart_beat(struct bnx2 *bp)
1541{
1542 u32 msg;
1543 u32 addr;
1544
1545 spin_lock(&bp->indirect_lock);
1546 msg = (u32) (++bp->fw_drv_pulse_wr_seq & BNX2_DRV_PULSE_SEQ_MASK);
1547 addr = bp->shmem_base + BNX2_DRV_PULSE_MB;
1548 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr);
1549 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, msg);
1550 spin_unlock(&bp->indirect_lock);
1551}
1552
0d8a6571
MC
1553static void
1554bnx2_remote_phy_event(struct bnx2 *bp)
1555{
1556 u32 msg;
1557 u8 link_up = bp->link_up;
1558 u8 old_port;
1559
1560 msg = REG_RD_IND(bp, bp->shmem_base + BNX2_LINK_STATUS);
1561
df149d70
MC
1562 if (msg & BNX2_LINK_STATUS_HEART_BEAT_EXPIRED)
1563 bnx2_send_heart_beat(bp);
1564
1565 msg &= ~BNX2_LINK_STATUS_HEART_BEAT_EXPIRED;
1566
0d8a6571
MC
1567 if ((msg & BNX2_LINK_STATUS_LINK_UP) == BNX2_LINK_STATUS_LINK_DOWN)
1568 bp->link_up = 0;
1569 else {
1570 u32 speed;
1571
1572 bp->link_up = 1;
1573 speed = msg & BNX2_LINK_STATUS_SPEED_MASK;
1574 bp->duplex = DUPLEX_FULL;
1575 switch (speed) {
1576 case BNX2_LINK_STATUS_10HALF:
1577 bp->duplex = DUPLEX_HALF;
1578 case BNX2_LINK_STATUS_10FULL:
1579 bp->line_speed = SPEED_10;
1580 break;
1581 case BNX2_LINK_STATUS_100HALF:
1582 bp->duplex = DUPLEX_HALF;
1583 case BNX2_LINK_STATUS_100BASE_T4:
1584 case BNX2_LINK_STATUS_100FULL:
1585 bp->line_speed = SPEED_100;
1586 break;
1587 case BNX2_LINK_STATUS_1000HALF:
1588 bp->duplex = DUPLEX_HALF;
1589 case BNX2_LINK_STATUS_1000FULL:
1590 bp->line_speed = SPEED_1000;
1591 break;
1592 case BNX2_LINK_STATUS_2500HALF:
1593 bp->duplex = DUPLEX_HALF;
1594 case BNX2_LINK_STATUS_2500FULL:
1595 bp->line_speed = SPEED_2500;
1596 break;
1597 default:
1598 bp->line_speed = 0;
1599 break;
1600 }
1601
1602 spin_lock(&bp->phy_lock);
1603 bp->flow_ctrl = 0;
1604 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
1605 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
1606 if (bp->duplex == DUPLEX_FULL)
1607 bp->flow_ctrl = bp->req_flow_ctrl;
1608 } else {
1609 if (msg & BNX2_LINK_STATUS_TX_FC_ENABLED)
1610 bp->flow_ctrl |= FLOW_CTRL_TX;
1611 if (msg & BNX2_LINK_STATUS_RX_FC_ENABLED)
1612 bp->flow_ctrl |= FLOW_CTRL_RX;
1613 }
1614
1615 old_port = bp->phy_port;
1616 if (msg & BNX2_LINK_STATUS_SERDES_LINK)
1617 bp->phy_port = PORT_FIBRE;
1618 else
1619 bp->phy_port = PORT_TP;
1620
1621 if (old_port != bp->phy_port)
1622 bnx2_set_default_link(bp);
1623
1624 spin_unlock(&bp->phy_lock);
1625 }
1626 if (bp->link_up != link_up)
1627 bnx2_report_link(bp);
1628
1629 bnx2_set_mac_link(bp);
1630}
1631
1632static int
1633bnx2_set_remote_link(struct bnx2 *bp)
1634{
1635 u32 evt_code;
1636
1637 evt_code = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_EVT_CODE_MB);
1638 switch (evt_code) {
1639 case BNX2_FW_EVT_CODE_LINK_EVENT:
1640 bnx2_remote_phy_event(bp);
1641 break;
1642 case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT:
1643 default:
df149d70 1644 bnx2_send_heart_beat(bp);
0d8a6571
MC
1645 break;
1646 }
1647 return 0;
1648}
1649
b6016b76
MC
1650static int
1651bnx2_setup_copper_phy(struct bnx2 *bp)
1652{
1653 u32 bmcr;
1654 u32 new_bmcr;
1655
ca58c3af 1656 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
b6016b76
MC
1657
1658 if (bp->autoneg & AUTONEG_SPEED) {
1659 u32 adv_reg, adv1000_reg;
1660 u32 new_adv_reg = 0;
1661 u32 new_adv1000_reg = 0;
1662
ca58c3af 1663 bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
b6016b76
MC
1664 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
1665 ADVERTISE_PAUSE_ASYM);
1666
1667 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
1668 adv1000_reg &= PHY_ALL_1000_SPEED;
1669
1670 if (bp->advertising & ADVERTISED_10baseT_Half)
1671 new_adv_reg |= ADVERTISE_10HALF;
1672 if (bp->advertising & ADVERTISED_10baseT_Full)
1673 new_adv_reg |= ADVERTISE_10FULL;
1674 if (bp->advertising & ADVERTISED_100baseT_Half)
1675 new_adv_reg |= ADVERTISE_100HALF;
1676 if (bp->advertising & ADVERTISED_100baseT_Full)
1677 new_adv_reg |= ADVERTISE_100FULL;
1678 if (bp->advertising & ADVERTISED_1000baseT_Full)
1679 new_adv1000_reg |= ADVERTISE_1000FULL;
6aa20a22 1680
b6016b76
MC
1681 new_adv_reg |= ADVERTISE_CSMA;
1682
1683 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
1684
1685 if ((adv1000_reg != new_adv1000_reg) ||
1686 (adv_reg != new_adv_reg) ||
1687 ((bmcr & BMCR_ANENABLE) == 0)) {
1688
ca58c3af 1689 bnx2_write_phy(bp, bp->mii_adv, new_adv_reg);
b6016b76 1690 bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
ca58c3af 1691 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
b6016b76
MC
1692 BMCR_ANENABLE);
1693 }
1694 else if (bp->link_up) {
1695 /* Flow ctrl may have changed from auto to forced */
1696 /* or vice-versa. */
1697
1698 bnx2_resolve_flow_ctrl(bp);
1699 bnx2_set_mac_link(bp);
1700 }
1701 return 0;
1702 }
1703
1704 new_bmcr = 0;
1705 if (bp->req_line_speed == SPEED_100) {
1706 new_bmcr |= BMCR_SPEED100;
1707 }
1708 if (bp->req_duplex == DUPLEX_FULL) {
1709 new_bmcr |= BMCR_FULLDPLX;
1710 }
1711 if (new_bmcr != bmcr) {
1712 u32 bmsr;
b6016b76 1713
ca58c3af
MC
1714 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1715 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
6aa20a22 1716
b6016b76
MC
1717 if (bmsr & BMSR_LSTATUS) {
1718 /* Force link down */
ca58c3af 1719 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
a16dda0e
MC
1720 spin_unlock_bh(&bp->phy_lock);
1721 msleep(50);
1722 spin_lock_bh(&bp->phy_lock);
1723
ca58c3af
MC
1724 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1725 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
b6016b76
MC
1726 }
1727
ca58c3af 1728 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
b6016b76
MC
1729
1730 /* Normally, the new speed is setup after the link has
1731 * gone down and up again. In some cases, link will not go
1732 * down so we need to set up the new speed here.
1733 */
1734 if (bmsr & BMSR_LSTATUS) {
1735 bp->line_speed = bp->req_line_speed;
1736 bp->duplex = bp->req_duplex;
1737 bnx2_resolve_flow_ctrl(bp);
1738 bnx2_set_mac_link(bp);
1739 }
27a005b8
MC
1740 } else {
1741 bnx2_resolve_flow_ctrl(bp);
1742 bnx2_set_mac_link(bp);
b6016b76
MC
1743 }
1744 return 0;
1745}
1746
1747static int
0d8a6571 1748bnx2_setup_phy(struct bnx2 *bp, u8 port)
b6016b76
MC
1749{
1750 if (bp->loopback == MAC_LOOPBACK)
1751 return 0;
1752
1753 if (bp->phy_flags & PHY_SERDES_FLAG) {
0d8a6571 1754 return (bnx2_setup_serdes_phy(bp, port));
b6016b76
MC
1755 }
1756 else {
1757 return (bnx2_setup_copper_phy(bp));
1758 }
1759}
1760
27a005b8
MC
1761static int
1762bnx2_init_5709s_phy(struct bnx2 *bp)
1763{
1764 u32 val;
1765
1766 bp->mii_bmcr = MII_BMCR + 0x10;
1767 bp->mii_bmsr = MII_BMSR + 0x10;
1768 bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
1769 bp->mii_adv = MII_ADVERTISE + 0x10;
1770 bp->mii_lpa = MII_LPA + 0x10;
1771 bp->mii_up1 = MII_BNX2_OVER1G_UP1;
1772
1773 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
1774 bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
1775
1776 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1777 bnx2_reset_phy(bp);
1778
1779 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
1780
1781 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
1782 val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
1783 val |= MII_BNX2_SD_1000XCTL1_FIBER;
1784 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
1785
1786 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1787 bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
1788 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG)
1789 val |= BCM5708S_UP1_2G5;
1790 else
1791 val &= ~BCM5708S_UP1_2G5;
1792 bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
1793
1794 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
1795 bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
1796 val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
1797 bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
1798
1799 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
1800
1801 val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
1802 MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
1803 bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
1804
1805 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1806
1807 return 0;
1808}
1809
b6016b76 1810static int
5b0c76ad
MC
1811bnx2_init_5708s_phy(struct bnx2 *bp)
1812{
1813 u32 val;
1814
27a005b8
MC
1815 bnx2_reset_phy(bp);
1816
1817 bp->mii_up1 = BCM5708S_UP1;
1818
5b0c76ad
MC
1819 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
1820 bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
1821 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1822
1823 bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
1824 val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
1825 bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
1826
1827 bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
1828 val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
1829 bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
1830
1831 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) {
1832 bnx2_read_phy(bp, BCM5708S_UP1, &val);
1833 val |= BCM5708S_UP1_2G5;
1834 bnx2_write_phy(bp, BCM5708S_UP1, val);
1835 }
1836
1837 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
dda1e390
MC
1838 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
1839 (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
5b0c76ad
MC
1840 /* increase tx signal amplitude */
1841 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1842 BCM5708S_BLK_ADDR_TX_MISC);
1843 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
1844 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
1845 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
1846 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1847 }
1848
e3648b3d 1849 val = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG) &
5b0c76ad
MC
1850 BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
1851
1852 if (val) {
1853 u32 is_backplane;
1854
e3648b3d 1855 is_backplane = REG_RD_IND(bp, bp->shmem_base +
5b0c76ad
MC
1856 BNX2_SHARED_HW_CFG_CONFIG);
1857 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
1858 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1859 BCM5708S_BLK_ADDR_TX_MISC);
1860 bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
1861 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1862 BCM5708S_BLK_ADDR_DIG);
1863 }
1864 }
1865 return 0;
1866}
1867
1868static int
1869bnx2_init_5706s_phy(struct bnx2 *bp)
b6016b76 1870{
27a005b8
MC
1871 bnx2_reset_phy(bp);
1872
b6016b76
MC
1873 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1874
59b47d8a
MC
1875 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1876 REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
b6016b76
MC
1877
1878 if (bp->dev->mtu > 1500) {
1879 u32 val;
1880
1881 /* Set extended packet length bit */
1882 bnx2_write_phy(bp, 0x18, 0x7);
1883 bnx2_read_phy(bp, 0x18, &val);
1884 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
1885
1886 bnx2_write_phy(bp, 0x1c, 0x6c00);
1887 bnx2_read_phy(bp, 0x1c, &val);
1888 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
1889 }
1890 else {
1891 u32 val;
1892
1893 bnx2_write_phy(bp, 0x18, 0x7);
1894 bnx2_read_phy(bp, 0x18, &val);
1895 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1896
1897 bnx2_write_phy(bp, 0x1c, 0x6c00);
1898 bnx2_read_phy(bp, 0x1c, &val);
1899 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
1900 }
1901
1902 return 0;
1903}
1904
1905static int
1906bnx2_init_copper_phy(struct bnx2 *bp)
1907{
5b0c76ad
MC
1908 u32 val;
1909
27a005b8
MC
1910 bnx2_reset_phy(bp);
1911
b6016b76
MC
1912 if (bp->phy_flags & PHY_CRC_FIX_FLAG) {
1913 bnx2_write_phy(bp, 0x18, 0x0c00);
1914 bnx2_write_phy(bp, 0x17, 0x000a);
1915 bnx2_write_phy(bp, 0x15, 0x310b);
1916 bnx2_write_phy(bp, 0x17, 0x201f);
1917 bnx2_write_phy(bp, 0x15, 0x9506);
1918 bnx2_write_phy(bp, 0x17, 0x401f);
1919 bnx2_write_phy(bp, 0x15, 0x14e2);
1920 bnx2_write_phy(bp, 0x18, 0x0400);
1921 }
1922
b659f44e
MC
1923 if (bp->phy_flags & PHY_DIS_EARLY_DAC_FLAG) {
1924 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
1925 MII_BNX2_DSP_EXPAND_REG | 0x8);
1926 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1927 val &= ~(1 << 8);
1928 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
1929 }
1930
b6016b76 1931 if (bp->dev->mtu > 1500) {
b6016b76
MC
1932 /* Set extended packet length bit */
1933 bnx2_write_phy(bp, 0x18, 0x7);
1934 bnx2_read_phy(bp, 0x18, &val);
1935 bnx2_write_phy(bp, 0x18, val | 0x4000);
1936
1937 bnx2_read_phy(bp, 0x10, &val);
1938 bnx2_write_phy(bp, 0x10, val | 0x1);
1939 }
1940 else {
b6016b76
MC
1941 bnx2_write_phy(bp, 0x18, 0x7);
1942 bnx2_read_phy(bp, 0x18, &val);
1943 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1944
1945 bnx2_read_phy(bp, 0x10, &val);
1946 bnx2_write_phy(bp, 0x10, val & ~0x1);
1947 }
1948
5b0c76ad
MC
1949 /* ethernet@wirespeed */
1950 bnx2_write_phy(bp, 0x18, 0x7007);
1951 bnx2_read_phy(bp, 0x18, &val);
1952 bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
b6016b76
MC
1953 return 0;
1954}
1955
1956
1957static int
1958bnx2_init_phy(struct bnx2 *bp)
1959{
1960 u32 val;
1961 int rc = 0;
1962
1963 bp->phy_flags &= ~PHY_INT_MODE_MASK_FLAG;
1964 bp->phy_flags |= PHY_INT_MODE_LINK_READY_FLAG;
1965
ca58c3af
MC
1966 bp->mii_bmcr = MII_BMCR;
1967 bp->mii_bmsr = MII_BMSR;
27a005b8 1968 bp->mii_bmsr1 = MII_BMSR;
ca58c3af
MC
1969 bp->mii_adv = MII_ADVERTISE;
1970 bp->mii_lpa = MII_LPA;
1971
b6016b76
MC
1972 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
1973
0d8a6571
MC
1974 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1975 goto setup_phy;
1976
b6016b76
MC
1977 bnx2_read_phy(bp, MII_PHYSID1, &val);
1978 bp->phy_id = val << 16;
1979 bnx2_read_phy(bp, MII_PHYSID2, &val);
1980 bp->phy_id |= val & 0xffff;
1981
1982 if (bp->phy_flags & PHY_SERDES_FLAG) {
5b0c76ad
MC
1983 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1984 rc = bnx2_init_5706s_phy(bp);
1985 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1986 rc = bnx2_init_5708s_phy(bp);
27a005b8
MC
1987 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1988 rc = bnx2_init_5709s_phy(bp);
b6016b76
MC
1989 }
1990 else {
1991 rc = bnx2_init_copper_phy(bp);
1992 }
1993
0d8a6571
MC
1994setup_phy:
1995 if (!rc)
1996 rc = bnx2_setup_phy(bp, bp->phy_port);
b6016b76
MC
1997
1998 return rc;
1999}
2000
2001static int
2002bnx2_set_mac_loopback(struct bnx2 *bp)
2003{
2004 u32 mac_mode;
2005
2006 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2007 mac_mode &= ~BNX2_EMAC_MODE_PORT;
2008 mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
2009 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2010 bp->link_up = 1;
2011 return 0;
2012}
2013
bc5a0690
MC
2014static int bnx2_test_link(struct bnx2 *);
2015
2016static int
2017bnx2_set_phy_loopback(struct bnx2 *bp)
2018{
2019 u32 mac_mode;
2020 int rc, i;
2021
2022 spin_lock_bh(&bp->phy_lock);
ca58c3af 2023 rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
bc5a0690
MC
2024 BMCR_SPEED1000);
2025 spin_unlock_bh(&bp->phy_lock);
2026 if (rc)
2027 return rc;
2028
2029 for (i = 0; i < 10; i++) {
2030 if (bnx2_test_link(bp) == 0)
2031 break;
80be4434 2032 msleep(100);
bc5a0690
MC
2033 }
2034
2035 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2036 mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
2037 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
59b47d8a 2038 BNX2_EMAC_MODE_25G_MODE);
bc5a0690
MC
2039
2040 mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
2041 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2042 bp->link_up = 1;
2043 return 0;
2044}
2045
b6016b76 2046static int
b090ae2b 2047bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int silent)
b6016b76
MC
2048{
2049 int i;
2050 u32 val;
2051
b6016b76
MC
2052 bp->fw_wr_seq++;
2053 msg_data |= bp->fw_wr_seq;
2054
e3648b3d 2055 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
b6016b76
MC
2056
2057 /* wait for an acknowledgement. */
b090ae2b
MC
2058 for (i = 0; i < (FW_ACK_TIME_OUT_MS / 10); i++) {
2059 msleep(10);
b6016b76 2060
e3648b3d 2061 val = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_MB);
b6016b76
MC
2062
2063 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
2064 break;
2065 }
b090ae2b
MC
2066 if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
2067 return 0;
b6016b76
MC
2068
2069 /* If we timed out, inform the firmware that this is the case. */
b090ae2b
MC
2070 if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
2071 if (!silent)
2072 printk(KERN_ERR PFX "fw sync timeout, reset code = "
2073 "%x\n", msg_data);
b6016b76
MC
2074
2075 msg_data &= ~BNX2_DRV_MSG_CODE;
2076 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
2077
e3648b3d 2078 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
b6016b76 2079
b6016b76
MC
2080 return -EBUSY;
2081 }
2082
b090ae2b
MC
2083 if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
2084 return -EIO;
2085
b6016b76
MC
2086 return 0;
2087}
2088
59b47d8a
MC
2089static int
2090bnx2_init_5709_context(struct bnx2 *bp)
2091{
2092 int i, ret = 0;
2093 u32 val;
2094
2095 val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
2096 val |= (BCM_PAGE_BITS - 8) << 16;
2097 REG_WR(bp, BNX2_CTX_COMMAND, val);
641bdcd5
MC
2098 for (i = 0; i < 10; i++) {
2099 val = REG_RD(bp, BNX2_CTX_COMMAND);
2100 if (!(val & BNX2_CTX_COMMAND_MEM_INIT))
2101 break;
2102 udelay(2);
2103 }
2104 if (val & BNX2_CTX_COMMAND_MEM_INIT)
2105 return -EBUSY;
2106
59b47d8a
MC
2107 for (i = 0; i < bp->ctx_pages; i++) {
2108 int j;
2109
2110 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
2111 (bp->ctx_blk_mapping[i] & 0xffffffff) |
2112 BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
2113 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
2114 (u64) bp->ctx_blk_mapping[i] >> 32);
2115 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
2116 BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
2117 for (j = 0; j < 10; j++) {
2118
2119 val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
2120 if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
2121 break;
2122 udelay(5);
2123 }
2124 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
2125 ret = -EBUSY;
2126 break;
2127 }
2128 }
2129 return ret;
2130}
2131
b6016b76
MC
2132static void
2133bnx2_init_context(struct bnx2 *bp)
2134{
2135 u32 vcid;
2136
2137 vcid = 96;
2138 while (vcid) {
2139 u32 vcid_addr, pcid_addr, offset;
7947b20e 2140 int i;
b6016b76
MC
2141
2142 vcid--;
2143
2144 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
2145 u32 new_vcid;
2146
2147 vcid_addr = GET_PCID_ADDR(vcid);
2148 if (vcid & 0x8) {
2149 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
2150 }
2151 else {
2152 new_vcid = vcid;
2153 }
2154 pcid_addr = GET_PCID_ADDR(new_vcid);
2155 }
2156 else {
2157 vcid_addr = GET_CID_ADDR(vcid);
2158 pcid_addr = vcid_addr;
2159 }
2160
7947b20e
MC
2161 for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
2162 vcid_addr += (i << PHY_CTX_SHIFT);
2163 pcid_addr += (i << PHY_CTX_SHIFT);
b6016b76 2164
5d5d0015 2165 REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
7947b20e 2166 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
b6016b76 2167
7947b20e
MC
2168 /* Zero out the context. */
2169 for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
5d5d0015 2170 CTX_WR(bp, vcid_addr, offset, 0);
7947b20e 2171 }
b6016b76
MC
2172 }
2173}
2174
2175static int
2176bnx2_alloc_bad_rbuf(struct bnx2 *bp)
2177{
2178 u16 *good_mbuf;
2179 u32 good_mbuf_cnt;
2180 u32 val;
2181
2182 good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
2183 if (good_mbuf == NULL) {
2184 printk(KERN_ERR PFX "Failed to allocate memory in "
2185 "bnx2_alloc_bad_rbuf\n");
2186 return -ENOMEM;
2187 }
2188
2189 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2190 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
2191
2192 good_mbuf_cnt = 0;
2193
2194 /* Allocate a bunch of mbufs and save the good ones in an array. */
2195 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
2196 while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
2197 REG_WR_IND(bp, BNX2_RBUF_COMMAND, BNX2_RBUF_COMMAND_ALLOC_REQ);
2198
2199 val = REG_RD_IND(bp, BNX2_RBUF_FW_BUF_ALLOC);
2200
2201 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
2202
2203 /* The addresses with Bit 9 set are bad memory blocks. */
2204 if (!(val & (1 << 9))) {
2205 good_mbuf[good_mbuf_cnt] = (u16) val;
2206 good_mbuf_cnt++;
2207 }
2208
2209 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
2210 }
2211
2212 /* Free the good ones back to the mbuf pool thus discarding
2213 * all the bad ones. */
2214 while (good_mbuf_cnt) {
2215 good_mbuf_cnt--;
2216
2217 val = good_mbuf[good_mbuf_cnt];
2218 val = (val << 9) | val | 1;
2219
2220 REG_WR_IND(bp, BNX2_RBUF_FW_BUF_FREE, val);
2221 }
2222 kfree(good_mbuf);
2223 return 0;
2224}
2225
2226static void
6aa20a22 2227bnx2_set_mac_addr(struct bnx2 *bp)
b6016b76
MC
2228{
2229 u32 val;
2230 u8 *mac_addr = bp->dev->dev_addr;
2231
2232 val = (mac_addr[0] << 8) | mac_addr[1];
2233
2234 REG_WR(bp, BNX2_EMAC_MAC_MATCH0, val);
2235
6aa20a22 2236 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
b6016b76
MC
2237 (mac_addr[4] << 8) | mac_addr[5];
2238
2239 REG_WR(bp, BNX2_EMAC_MAC_MATCH1, val);
2240}
2241
47bf4246
MC
2242static inline int
2243bnx2_alloc_rx_page(struct bnx2 *bp, u16 index)
2244{
2245 dma_addr_t mapping;
2246 struct sw_pg *rx_pg = &bp->rx_pg_ring[index];
2247 struct rx_bd *rxbd =
2248 &bp->rx_pg_desc_ring[RX_RING(index)][RX_IDX(index)];
2249 struct page *page = alloc_page(GFP_ATOMIC);
2250
2251 if (!page)
2252 return -ENOMEM;
2253 mapping = pci_map_page(bp->pdev, page, 0, PAGE_SIZE,
2254 PCI_DMA_FROMDEVICE);
2255 rx_pg->page = page;
2256 pci_unmap_addr_set(rx_pg, mapping, mapping);
2257 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2258 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2259 return 0;
2260}
2261
2262static void
2263bnx2_free_rx_page(struct bnx2 *bp, u16 index)
2264{
2265 struct sw_pg *rx_pg = &bp->rx_pg_ring[index];
2266 struct page *page = rx_pg->page;
2267
2268 if (!page)
2269 return;
2270
2271 pci_unmap_page(bp->pdev, pci_unmap_addr(rx_pg, mapping), PAGE_SIZE,
2272 PCI_DMA_FROMDEVICE);
2273
2274 __free_page(page);
2275 rx_pg->page = NULL;
2276}
2277
b6016b76 2278static inline int
a1f60190 2279bnx2_alloc_rx_skb(struct bnx2 *bp, struct bnx2_napi *bnapi, u16 index)
b6016b76
MC
2280{
2281 struct sk_buff *skb;
2282 struct sw_bd *rx_buf = &bp->rx_buf_ring[index];
2283 dma_addr_t mapping;
13daffa2 2284 struct rx_bd *rxbd = &bp->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
b6016b76
MC
2285 unsigned long align;
2286
932f3772 2287 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
b6016b76
MC
2288 if (skb == NULL) {
2289 return -ENOMEM;
2290 }
2291
59b47d8a
MC
2292 if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
2293 skb_reserve(skb, BNX2_RX_ALIGN - align);
b6016b76 2294
b6016b76
MC
2295 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
2296 PCI_DMA_FROMDEVICE);
2297
2298 rx_buf->skb = skb;
2299 pci_unmap_addr_set(rx_buf, mapping, mapping);
2300
2301 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2302 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2303
a1f60190 2304 bnapi->rx_prod_bseq += bp->rx_buf_use_size;
b6016b76
MC
2305
2306 return 0;
2307}
2308
da3e4fbe 2309static int
35efa7c1 2310bnx2_phy_event_is_set(struct bnx2 *bp, struct bnx2_napi *bnapi, u32 event)
b6016b76 2311{
35efa7c1 2312 struct status_block *sblk = bnapi->status_blk;
b6016b76 2313 u32 new_link_state, old_link_state;
da3e4fbe 2314 int is_set = 1;
b6016b76 2315
da3e4fbe
MC
2316 new_link_state = sblk->status_attn_bits & event;
2317 old_link_state = sblk->status_attn_bits_ack & event;
b6016b76 2318 if (new_link_state != old_link_state) {
da3e4fbe
MC
2319 if (new_link_state)
2320 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
2321 else
2322 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
2323 } else
2324 is_set = 0;
2325
2326 return is_set;
2327}
2328
2329static void
35efa7c1 2330bnx2_phy_int(struct bnx2 *bp, struct bnx2_napi *bnapi)
da3e4fbe 2331{
35efa7c1 2332 if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_LINK_STATE)) {
da3e4fbe 2333 spin_lock(&bp->phy_lock);
b6016b76 2334 bnx2_set_link(bp);
da3e4fbe 2335 spin_unlock(&bp->phy_lock);
b6016b76 2336 }
35efa7c1 2337 if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_TIMER_ABORT))
0d8a6571
MC
2338 bnx2_set_remote_link(bp);
2339
b6016b76
MC
2340}
2341
ead7270b 2342static inline u16
35efa7c1 2343bnx2_get_hw_tx_cons(struct bnx2_napi *bnapi)
ead7270b
MC
2344{
2345 u16 cons;
2346
35efa7c1 2347 cons = bnapi->status_blk->status_tx_quick_consumer_index0;
ead7270b
MC
2348
2349 if (unlikely((cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT))
2350 cons++;
2351 return cons;
2352}
2353
b6016b76 2354static void
35efa7c1 2355bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi)
b6016b76
MC
2356{
2357 u16 hw_cons, sw_cons, sw_ring_cons;
2358 int tx_free_bd = 0;
2359
35efa7c1 2360 hw_cons = bnx2_get_hw_tx_cons(bnapi);
a550c99b 2361 sw_cons = bnapi->tx_cons;
b6016b76
MC
2362
2363 while (sw_cons != hw_cons) {
2364 struct sw_bd *tx_buf;
2365 struct sk_buff *skb;
2366 int i, last;
2367
2368 sw_ring_cons = TX_RING_IDX(sw_cons);
2369
2370 tx_buf = &bp->tx_buf_ring[sw_ring_cons];
2371 skb = tx_buf->skb;
1d39ed56 2372
b6016b76 2373 /* partial BD completions possible with TSO packets */
89114afd 2374 if (skb_is_gso(skb)) {
b6016b76
MC
2375 u16 last_idx, last_ring_idx;
2376
2377 last_idx = sw_cons +
2378 skb_shinfo(skb)->nr_frags + 1;
2379 last_ring_idx = sw_ring_cons +
2380 skb_shinfo(skb)->nr_frags + 1;
2381 if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
2382 last_idx++;
2383 }
2384 if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
2385 break;
2386 }
2387 }
1d39ed56 2388
b6016b76
MC
2389 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
2390 skb_headlen(skb), PCI_DMA_TODEVICE);
2391
2392 tx_buf->skb = NULL;
2393 last = skb_shinfo(skb)->nr_frags;
2394
2395 for (i = 0; i < last; i++) {
2396 sw_cons = NEXT_TX_BD(sw_cons);
2397
2398 pci_unmap_page(bp->pdev,
2399 pci_unmap_addr(
2400 &bp->tx_buf_ring[TX_RING_IDX(sw_cons)],
2401 mapping),
2402 skb_shinfo(skb)->frags[i].size,
2403 PCI_DMA_TODEVICE);
2404 }
2405
2406 sw_cons = NEXT_TX_BD(sw_cons);
2407
2408 tx_free_bd += last + 1;
2409
745720e5 2410 dev_kfree_skb(skb);
b6016b76 2411
35efa7c1 2412 hw_cons = bnx2_get_hw_tx_cons(bnapi);
b6016b76
MC
2413 }
2414
a550c99b
MC
2415 bnapi->hw_tx_cons = hw_cons;
2416 bnapi->tx_cons = sw_cons;
2f8af120
MC
2417 /* Need to make the tx_cons update visible to bnx2_start_xmit()
2418 * before checking for netif_queue_stopped(). Without the
2419 * memory barrier, there is a small possibility that bnx2_start_xmit()
2420 * will miss it and cause the queue to be stopped forever.
2421 */
2422 smp_mb();
b6016b76 2423
2f8af120 2424 if (unlikely(netif_queue_stopped(bp->dev)) &&
a550c99b 2425 (bnx2_tx_avail(bp, bnapi) > bp->tx_wake_thresh)) {
2f8af120 2426 netif_tx_lock(bp->dev);
b6016b76 2427 if ((netif_queue_stopped(bp->dev)) &&
a550c99b 2428 (bnx2_tx_avail(bp, bnapi) > bp->tx_wake_thresh))
b6016b76 2429 netif_wake_queue(bp->dev);
2f8af120 2430 netif_tx_unlock(bp->dev);
b6016b76 2431 }
b6016b76
MC
2432}
2433
1db82f2a 2434static void
a1f60190
MC
2435bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_napi *bnapi,
2436 struct sk_buff *skb, int count)
1db82f2a
MC
2437{
2438 struct sw_pg *cons_rx_pg, *prod_rx_pg;
2439 struct rx_bd *cons_bd, *prod_bd;
2440 dma_addr_t mapping;
2441 int i;
a1f60190
MC
2442 u16 hw_prod = bnapi->rx_pg_prod, prod;
2443 u16 cons = bnapi->rx_pg_cons;
1db82f2a
MC
2444
2445 for (i = 0; i < count; i++) {
2446 prod = RX_PG_RING_IDX(hw_prod);
2447
2448 prod_rx_pg = &bp->rx_pg_ring[prod];
2449 cons_rx_pg = &bp->rx_pg_ring[cons];
2450 cons_bd = &bp->rx_pg_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2451 prod_bd = &bp->rx_pg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2452
2453 if (i == 0 && skb) {
2454 struct page *page;
2455 struct skb_shared_info *shinfo;
2456
2457 shinfo = skb_shinfo(skb);
2458 shinfo->nr_frags--;
2459 page = shinfo->frags[shinfo->nr_frags].page;
2460 shinfo->frags[shinfo->nr_frags].page = NULL;
2461 mapping = pci_map_page(bp->pdev, page, 0, PAGE_SIZE,
2462 PCI_DMA_FROMDEVICE);
2463 cons_rx_pg->page = page;
2464 pci_unmap_addr_set(cons_rx_pg, mapping, mapping);
2465 dev_kfree_skb(skb);
2466 }
2467 if (prod != cons) {
2468 prod_rx_pg->page = cons_rx_pg->page;
2469 cons_rx_pg->page = NULL;
2470 pci_unmap_addr_set(prod_rx_pg, mapping,
2471 pci_unmap_addr(cons_rx_pg, mapping));
2472
2473 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2474 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2475
2476 }
2477 cons = RX_PG_RING_IDX(NEXT_RX_BD(cons));
2478 hw_prod = NEXT_RX_BD(hw_prod);
2479 }
a1f60190
MC
2480 bnapi->rx_pg_prod = hw_prod;
2481 bnapi->rx_pg_cons = cons;
1db82f2a
MC
2482}
2483
b6016b76 2484static inline void
a1f60190 2485bnx2_reuse_rx_skb(struct bnx2 *bp, struct bnx2_napi *bnapi, struct sk_buff *skb,
b6016b76
MC
2486 u16 cons, u16 prod)
2487{
236b6394
MC
2488 struct sw_bd *cons_rx_buf, *prod_rx_buf;
2489 struct rx_bd *cons_bd, *prod_bd;
2490
2491 cons_rx_buf = &bp->rx_buf_ring[cons];
2492 prod_rx_buf = &bp->rx_buf_ring[prod];
b6016b76
MC
2493
2494 pci_dma_sync_single_for_device(bp->pdev,
2495 pci_unmap_addr(cons_rx_buf, mapping),
2496 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2497
a1f60190 2498 bnapi->rx_prod_bseq += bp->rx_buf_use_size;
b6016b76 2499
236b6394 2500 prod_rx_buf->skb = skb;
b6016b76 2501
236b6394
MC
2502 if (cons == prod)
2503 return;
b6016b76 2504
236b6394
MC
2505 pci_unmap_addr_set(prod_rx_buf, mapping,
2506 pci_unmap_addr(cons_rx_buf, mapping));
2507
3fdfcc2c
MC
2508 cons_bd = &bp->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2509 prod_bd = &bp->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
236b6394
MC
2510 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2511 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
b6016b76
MC
2512}
2513
85833c62 2514static int
a1f60190
MC
2515bnx2_rx_skb(struct bnx2 *bp, struct bnx2_napi *bnapi, struct sk_buff *skb,
2516 unsigned int len, unsigned int hdr_len, dma_addr_t dma_addr,
2517 u32 ring_idx)
85833c62
MC
2518{
2519 int err;
2520 u16 prod = ring_idx & 0xffff;
2521
a1f60190 2522 err = bnx2_alloc_rx_skb(bp, bnapi, prod);
85833c62 2523 if (unlikely(err)) {
a1f60190 2524 bnx2_reuse_rx_skb(bp, bnapi, skb, (u16) (ring_idx >> 16), prod);
1db82f2a
MC
2525 if (hdr_len) {
2526 unsigned int raw_len = len + 4;
2527 int pages = PAGE_ALIGN(raw_len - hdr_len) >> PAGE_SHIFT;
2528
a1f60190 2529 bnx2_reuse_rx_skb_pages(bp, bnapi, NULL, pages);
1db82f2a 2530 }
85833c62
MC
2531 return err;
2532 }
2533
2534 skb_reserve(skb, bp->rx_offset);
2535 pci_unmap_single(bp->pdev, dma_addr, bp->rx_buf_use_size,
2536 PCI_DMA_FROMDEVICE);
2537
1db82f2a
MC
2538 if (hdr_len == 0) {
2539 skb_put(skb, len);
2540 return 0;
2541 } else {
2542 unsigned int i, frag_len, frag_size, pages;
2543 struct sw_pg *rx_pg;
a1f60190
MC
2544 u16 pg_cons = bnapi->rx_pg_cons;
2545 u16 pg_prod = bnapi->rx_pg_prod;
1db82f2a
MC
2546
2547 frag_size = len + 4 - hdr_len;
2548 pages = PAGE_ALIGN(frag_size) >> PAGE_SHIFT;
2549 skb_put(skb, hdr_len);
2550
2551 for (i = 0; i < pages; i++) {
2552 frag_len = min(frag_size, (unsigned int) PAGE_SIZE);
2553 if (unlikely(frag_len <= 4)) {
2554 unsigned int tail = 4 - frag_len;
2555
a1f60190
MC
2556 bnapi->rx_pg_cons = pg_cons;
2557 bnapi->rx_pg_prod = pg_prod;
2558 bnx2_reuse_rx_skb_pages(bp, bnapi, NULL,
2559 pages - i);
1db82f2a
MC
2560 skb->len -= tail;
2561 if (i == 0) {
2562 skb->tail -= tail;
2563 } else {
2564 skb_frag_t *frag =
2565 &skb_shinfo(skb)->frags[i - 1];
2566 frag->size -= tail;
2567 skb->data_len -= tail;
2568 skb->truesize -= tail;
2569 }
2570 return 0;
2571 }
2572 rx_pg = &bp->rx_pg_ring[pg_cons];
2573
2574 pci_unmap_page(bp->pdev, pci_unmap_addr(rx_pg, mapping),
2575 PAGE_SIZE, PCI_DMA_FROMDEVICE);
2576
2577 if (i == pages - 1)
2578 frag_len -= 4;
2579
2580 skb_fill_page_desc(skb, i, rx_pg->page, 0, frag_len);
2581 rx_pg->page = NULL;
2582
2583 err = bnx2_alloc_rx_page(bp, RX_PG_RING_IDX(pg_prod));
2584 if (unlikely(err)) {
a1f60190
MC
2585 bnapi->rx_pg_cons = pg_cons;
2586 bnapi->rx_pg_prod = pg_prod;
2587 bnx2_reuse_rx_skb_pages(bp, bnapi, skb,
2588 pages - i);
1db82f2a
MC
2589 return err;
2590 }
2591
2592 frag_size -= frag_len;
2593 skb->data_len += frag_len;
2594 skb->truesize += frag_len;
2595 skb->len += frag_len;
2596
2597 pg_prod = NEXT_RX_BD(pg_prod);
2598 pg_cons = RX_PG_RING_IDX(NEXT_RX_BD(pg_cons));
2599 }
a1f60190
MC
2600 bnapi->rx_pg_prod = pg_prod;
2601 bnapi->rx_pg_cons = pg_cons;
1db82f2a 2602 }
85833c62
MC
2603 return 0;
2604}
2605
c09c2627 2606static inline u16
35efa7c1 2607bnx2_get_hw_rx_cons(struct bnx2_napi *bnapi)
c09c2627 2608{
35efa7c1 2609 u16 cons = bnapi->status_blk->status_rx_quick_consumer_index0;
c09c2627
MC
2610
2611 if (unlikely((cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT))
2612 cons++;
2613 return cons;
2614}
2615
b6016b76 2616static int
35efa7c1 2617bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
b6016b76
MC
2618{
2619 u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
2620 struct l2_fhdr *rx_hdr;
1db82f2a 2621 int rx_pkt = 0, pg_ring_used = 0;
b6016b76 2622
35efa7c1 2623 hw_cons = bnx2_get_hw_rx_cons(bnapi);
a1f60190
MC
2624 sw_cons = bnapi->rx_cons;
2625 sw_prod = bnapi->rx_prod;
b6016b76
MC
2626
2627 /* Memory barrier necessary as speculative reads of the rx
2628 * buffer can be ahead of the index in the status block
2629 */
2630 rmb();
2631 while (sw_cons != hw_cons) {
1db82f2a 2632 unsigned int len, hdr_len;
ade2bfe7 2633 u32 status;
b6016b76
MC
2634 struct sw_bd *rx_buf;
2635 struct sk_buff *skb;
236b6394 2636 dma_addr_t dma_addr;
b6016b76
MC
2637
2638 sw_ring_cons = RX_RING_IDX(sw_cons);
2639 sw_ring_prod = RX_RING_IDX(sw_prod);
2640
2641 rx_buf = &bp->rx_buf_ring[sw_ring_cons];
2642 skb = rx_buf->skb;
236b6394
MC
2643
2644 rx_buf->skb = NULL;
2645
2646 dma_addr = pci_unmap_addr(rx_buf, mapping);
2647
2648 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
b6016b76
MC
2649 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2650
2651 rx_hdr = (struct l2_fhdr *) skb->data;
1db82f2a 2652 len = rx_hdr->l2_fhdr_pkt_len;
b6016b76 2653
ade2bfe7 2654 if ((status = rx_hdr->l2_fhdr_status) &
b6016b76
MC
2655 (L2_FHDR_ERRORS_BAD_CRC |
2656 L2_FHDR_ERRORS_PHY_DECODE |
2657 L2_FHDR_ERRORS_ALIGNMENT |
2658 L2_FHDR_ERRORS_TOO_SHORT |
2659 L2_FHDR_ERRORS_GIANT_FRAME)) {
2660
a1f60190
MC
2661 bnx2_reuse_rx_skb(bp, bnapi, skb, sw_ring_cons,
2662 sw_ring_prod);
85833c62 2663 goto next_rx;
b6016b76 2664 }
1db82f2a
MC
2665 hdr_len = 0;
2666 if (status & L2_FHDR_STATUS_SPLIT) {
2667 hdr_len = rx_hdr->l2_fhdr_ip_xsum;
2668 pg_ring_used = 1;
2669 } else if (len > bp->rx_jumbo_thresh) {
2670 hdr_len = bp->rx_jumbo_thresh;
2671 pg_ring_used = 1;
2672 }
2673
2674 len -= 4;
b6016b76 2675
5d5d0015 2676 if (len <= bp->rx_copy_thresh) {
b6016b76
MC
2677 struct sk_buff *new_skb;
2678
932f3772 2679 new_skb = netdev_alloc_skb(bp->dev, len + 2);
85833c62 2680 if (new_skb == NULL) {
a1f60190 2681 bnx2_reuse_rx_skb(bp, bnapi, skb, sw_ring_cons,
85833c62
MC
2682 sw_ring_prod);
2683 goto next_rx;
2684 }
b6016b76
MC
2685
2686 /* aligned copy */
d626f62b
ACM
2687 skb_copy_from_linear_data_offset(skb, bp->rx_offset - 2,
2688 new_skb->data, len + 2);
b6016b76
MC
2689 skb_reserve(new_skb, 2);
2690 skb_put(new_skb, len);
b6016b76 2691
a1f60190 2692 bnx2_reuse_rx_skb(bp, bnapi, skb,
b6016b76
MC
2693 sw_ring_cons, sw_ring_prod);
2694
2695 skb = new_skb;
a1f60190
MC
2696 } else if (unlikely(bnx2_rx_skb(bp, bnapi, skb, len, hdr_len,
2697 dma_addr, (sw_ring_cons << 16) | sw_ring_prod)))
b6016b76 2698 goto next_rx;
b6016b76
MC
2699
2700 skb->protocol = eth_type_trans(skb, bp->dev);
2701
2702 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
d1e100ba 2703 (ntohs(skb->protocol) != 0x8100)) {
b6016b76 2704
745720e5 2705 dev_kfree_skb(skb);
b6016b76
MC
2706 goto next_rx;
2707
2708 }
2709
b6016b76
MC
2710 skb->ip_summed = CHECKSUM_NONE;
2711 if (bp->rx_csum &&
2712 (status & (L2_FHDR_STATUS_TCP_SEGMENT |
2713 L2_FHDR_STATUS_UDP_DATAGRAM))) {
2714
ade2bfe7
MC
2715 if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
2716 L2_FHDR_ERRORS_UDP_XSUM)) == 0))
b6016b76
MC
2717 skb->ip_summed = CHECKSUM_UNNECESSARY;
2718 }
2719
2720#ifdef BCM_VLAN
2721 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) && (bp->vlgrp != 0)) {
2722 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
2723 rx_hdr->l2_fhdr_vlan_tag);
2724 }
2725 else
2726#endif
2727 netif_receive_skb(skb);
2728
2729 bp->dev->last_rx = jiffies;
2730 rx_pkt++;
2731
2732next_rx:
b6016b76
MC
2733 sw_cons = NEXT_RX_BD(sw_cons);
2734 sw_prod = NEXT_RX_BD(sw_prod);
2735
2736 if ((rx_pkt == budget))
2737 break;
f4e418f7
MC
2738
2739 /* Refresh hw_cons to see if there is new work */
2740 if (sw_cons == hw_cons) {
35efa7c1 2741 hw_cons = bnx2_get_hw_rx_cons(bnapi);
f4e418f7
MC
2742 rmb();
2743 }
b6016b76 2744 }
a1f60190
MC
2745 bnapi->rx_cons = sw_cons;
2746 bnapi->rx_prod = sw_prod;
b6016b76 2747
1db82f2a
MC
2748 if (pg_ring_used)
2749 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_PG_BDIDX,
a1f60190 2750 bnapi->rx_pg_prod);
1db82f2a 2751
b6016b76
MC
2752 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, sw_prod);
2753
a1f60190 2754 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bnapi->rx_prod_bseq);
b6016b76
MC
2755
2756 mmiowb();
2757
2758 return rx_pkt;
2759
2760}
2761
2762/* MSI ISR - The only difference between this and the INTx ISR
2763 * is that the MSI interrupt is always serviced.
2764 */
2765static irqreturn_t
7d12e780 2766bnx2_msi(int irq, void *dev_instance)
b6016b76
MC
2767{
2768 struct net_device *dev = dev_instance;
972ec0d4 2769 struct bnx2 *bp = netdev_priv(dev);
35efa7c1 2770 struct bnx2_napi *bnapi = &bp->bnx2_napi;
b6016b76 2771
35efa7c1 2772 prefetch(bnapi->status_blk);
b6016b76
MC
2773 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2774 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
2775 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2776
2777 /* Return here if interrupt is disabled. */
73eef4cd
MC
2778 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2779 return IRQ_HANDLED;
b6016b76 2780
35efa7c1 2781 netif_rx_schedule(dev, &bnapi->napi);
b6016b76 2782
73eef4cd 2783 return IRQ_HANDLED;
b6016b76
MC
2784}
2785
8e6a72c4
MC
2786static irqreturn_t
2787bnx2_msi_1shot(int irq, void *dev_instance)
2788{
2789 struct net_device *dev = dev_instance;
2790 struct bnx2 *bp = netdev_priv(dev);
35efa7c1 2791 struct bnx2_napi *bnapi = &bp->bnx2_napi;
8e6a72c4 2792
35efa7c1 2793 prefetch(bnapi->status_blk);
8e6a72c4
MC
2794
2795 /* Return here if interrupt is disabled. */
2796 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2797 return IRQ_HANDLED;
2798
35efa7c1 2799 netif_rx_schedule(dev, &bnapi->napi);
8e6a72c4
MC
2800
2801 return IRQ_HANDLED;
2802}
2803
b6016b76 2804static irqreturn_t
7d12e780 2805bnx2_interrupt(int irq, void *dev_instance)
b6016b76
MC
2806{
2807 struct net_device *dev = dev_instance;
972ec0d4 2808 struct bnx2 *bp = netdev_priv(dev);
35efa7c1
MC
2809 struct bnx2_napi *bnapi = &bp->bnx2_napi;
2810 struct status_block *sblk = bnapi->status_blk;
b6016b76
MC
2811
2812 /* When using INTx, it is possible for the interrupt to arrive
2813 * at the CPU before the status block posted prior to the
2814 * interrupt. Reading a register will flush the status block.
2815 * When using MSI, the MSI message will always complete after
2816 * the status block write.
2817 */
35efa7c1 2818 if ((sblk->status_idx == bnapi->last_status_idx) &&
b6016b76
MC
2819 (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
2820 BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
73eef4cd 2821 return IRQ_NONE;
b6016b76
MC
2822
2823 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2824 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
2825 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2826
b8a7ce7b
MC
2827 /* Read back to deassert IRQ immediately to avoid too many
2828 * spurious interrupts.
2829 */
2830 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
2831
b6016b76 2832 /* Return here if interrupt is shared and is disabled. */
73eef4cd
MC
2833 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2834 return IRQ_HANDLED;
b6016b76 2835
35efa7c1
MC
2836 if (netif_rx_schedule_prep(dev, &bnapi->napi)) {
2837 bnapi->last_status_idx = sblk->status_idx;
2838 __netif_rx_schedule(dev, &bnapi->napi);
b8a7ce7b 2839 }
b6016b76 2840
73eef4cd 2841 return IRQ_HANDLED;
b6016b76
MC
2842}
2843
0d8a6571
MC
2844#define STATUS_ATTN_EVENTS (STATUS_ATTN_BITS_LINK_STATE | \
2845 STATUS_ATTN_BITS_TIMER_ABORT)
da3e4fbe 2846
f4e418f7 2847static inline int
35efa7c1 2848bnx2_has_work(struct bnx2_napi *bnapi)
f4e418f7 2849{
35efa7c1 2850 struct bnx2 *bp = bnapi->bp;
f4e418f7
MC
2851 struct status_block *sblk = bp->status_blk;
2852
a1f60190 2853 if ((bnx2_get_hw_rx_cons(bnapi) != bnapi->rx_cons) ||
a550c99b 2854 (bnx2_get_hw_tx_cons(bnapi) != bnapi->hw_tx_cons))
f4e418f7
MC
2855 return 1;
2856
da3e4fbe
MC
2857 if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
2858 (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
f4e418f7
MC
2859 return 1;
2860
2861 return 0;
2862}
2863
35efa7c1
MC
2864static int bnx2_poll_work(struct bnx2 *bp, struct bnx2_napi *bnapi,
2865 int work_done, int budget)
b6016b76 2866{
35efa7c1 2867 struct status_block *sblk = bnapi->status_blk;
da3e4fbe
MC
2868 u32 status_attn_bits = sblk->status_attn_bits;
2869 u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
b6016b76 2870
da3e4fbe
MC
2871 if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
2872 (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
b6016b76 2873
35efa7c1 2874 bnx2_phy_int(bp, bnapi);
bf5295bb
MC
2875
2876 /* This is needed to take care of transient status
2877 * during link changes.
2878 */
2879 REG_WR(bp, BNX2_HC_COMMAND,
2880 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
2881 REG_RD(bp, BNX2_HC_COMMAND);
b6016b76
MC
2882 }
2883
a550c99b 2884 if (bnx2_get_hw_tx_cons(bnapi) != bnapi->hw_tx_cons)
35efa7c1 2885 bnx2_tx_int(bp, bnapi);
b6016b76 2886
a1f60190 2887 if (bnx2_get_hw_rx_cons(bnapi) != bnapi->rx_cons)
35efa7c1 2888 work_done += bnx2_rx_int(bp, bnapi, budget - work_done);
6aa20a22 2889
6f535763
DM
2890 return work_done;
2891}
2892
2893static int bnx2_poll(struct napi_struct *napi, int budget)
2894{
35efa7c1
MC
2895 struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
2896 struct bnx2 *bp = bnapi->bp;
6f535763 2897 int work_done = 0;
35efa7c1 2898 struct status_block *sblk = bnapi->status_blk;
6f535763
DM
2899
2900 while (1) {
35efa7c1 2901 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
f4e418f7 2902
6f535763
DM
2903 if (unlikely(work_done >= budget))
2904 break;
2905
35efa7c1 2906 /* bnapi->last_status_idx is used below to tell the hw how
6dee6421
MC
2907 * much work has been processed, so we must read it before
2908 * checking for more work.
2909 */
35efa7c1 2910 bnapi->last_status_idx = sblk->status_idx;
6dee6421 2911 rmb();
35efa7c1 2912 if (likely(!bnx2_has_work(bnapi))) {
6f535763
DM
2913 netif_rx_complete(bp->dev, napi);
2914 if (likely(bp->flags & USING_MSI_FLAG)) {
2915 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2916 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
35efa7c1 2917 bnapi->last_status_idx);
6dee6421 2918 break;
6f535763 2919 }
1269a8a6
MC
2920 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2921 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
6f535763 2922 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
35efa7c1 2923 bnapi->last_status_idx);
1269a8a6 2924
6f535763
DM
2925 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2926 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
35efa7c1 2927 bnapi->last_status_idx);
6f535763
DM
2928 break;
2929 }
b6016b76
MC
2930 }
2931
bea3348e 2932 return work_done;
b6016b76
MC
2933}
2934
932ff279 2935/* Called with rtnl_lock from vlan functions and also netif_tx_lock
b6016b76
MC
2936 * from set_multicast.
2937 */
2938static void
2939bnx2_set_rx_mode(struct net_device *dev)
2940{
972ec0d4 2941 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
2942 u32 rx_mode, sort_mode;
2943 int i;
b6016b76 2944
c770a65c 2945 spin_lock_bh(&bp->phy_lock);
b6016b76
MC
2946
2947 rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
2948 BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
2949 sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
2950#ifdef BCM_VLAN
e29054f9 2951 if (!bp->vlgrp && !(bp->flags & ASF_ENABLE_FLAG))
b6016b76 2952 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
b6016b76 2953#else
e29054f9
MC
2954 if (!(bp->flags & ASF_ENABLE_FLAG))
2955 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
b6016b76
MC
2956#endif
2957 if (dev->flags & IFF_PROMISC) {
2958 /* Promiscuous mode. */
2959 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
7510873d
MC
2960 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
2961 BNX2_RPM_SORT_USER0_PROM_VLAN;
b6016b76
MC
2962 }
2963 else if (dev->flags & IFF_ALLMULTI) {
2964 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2965 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2966 0xffffffff);
2967 }
2968 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
2969 }
2970 else {
2971 /* Accept one or more multicast(s). */
2972 struct dev_mc_list *mclist;
2973 u32 mc_filter[NUM_MC_HASH_REGISTERS];
2974 u32 regidx;
2975 u32 bit;
2976 u32 crc;
2977
2978 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
2979
2980 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
2981 i++, mclist = mclist->next) {
2982
2983 crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
2984 bit = crc & 0xff;
2985 regidx = (bit & 0xe0) >> 5;
2986 bit &= 0x1f;
2987 mc_filter[regidx] |= (1 << bit);
2988 }
2989
2990 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2991 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2992 mc_filter[i]);
2993 }
2994
2995 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
2996 }
2997
2998 if (rx_mode != bp->rx_mode) {
2999 bp->rx_mode = rx_mode;
3000 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
3001 }
3002
3003 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3004 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
3005 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
3006
c770a65c 3007 spin_unlock_bh(&bp->phy_lock);
b6016b76
MC
3008}
3009
3010static void
3011load_rv2p_fw(struct bnx2 *bp, u32 *rv2p_code, u32 rv2p_code_len,
3012 u32 rv2p_proc)
3013{
3014 int i;
3015 u32 val;
3016
3017
3018 for (i = 0; i < rv2p_code_len; i += 8) {
fba9fe91 3019 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, cpu_to_le32(*rv2p_code));
b6016b76 3020 rv2p_code++;
fba9fe91 3021 REG_WR(bp, BNX2_RV2P_INSTR_LOW, cpu_to_le32(*rv2p_code));
b6016b76
MC
3022 rv2p_code++;
3023
3024 if (rv2p_proc == RV2P_PROC1) {
3025 val = (i / 8) | BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
3026 REG_WR(bp, BNX2_RV2P_PROC1_ADDR_CMD, val);
3027 }
3028 else {
3029 val = (i / 8) | BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
3030 REG_WR(bp, BNX2_RV2P_PROC2_ADDR_CMD, val);
3031 }
3032 }
3033
3034 /* Reset the processor, un-stall is done later. */
3035 if (rv2p_proc == RV2P_PROC1) {
3036 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
3037 }
3038 else {
3039 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
3040 }
3041}
3042
af3ee519 3043static int
b6016b76
MC
3044load_cpu_fw(struct bnx2 *bp, struct cpu_reg *cpu_reg, struct fw_info *fw)
3045{
3046 u32 offset;
3047 u32 val;
af3ee519 3048 int rc;
b6016b76
MC
3049
3050 /* Halt the CPU. */
3051 val = REG_RD_IND(bp, cpu_reg->mode);
3052 val |= cpu_reg->mode_value_halt;
3053 REG_WR_IND(bp, cpu_reg->mode, val);
3054 REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
3055
3056 /* Load the Text area. */
3057 offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
af3ee519 3058 if (fw->gz_text) {
b6016b76
MC
3059 int j;
3060
ea1f8d5c
MC
3061 rc = zlib_inflate_blob(fw->text, FW_BUF_SIZE, fw->gz_text,
3062 fw->gz_text_len);
3063 if (rc < 0)
b3448b0b 3064 return rc;
ea1f8d5c 3065
b6016b76 3066 for (j = 0; j < (fw->text_len / 4); j++, offset += 4) {
ea1f8d5c 3067 REG_WR_IND(bp, offset, cpu_to_le32(fw->text[j]));
b6016b76
MC
3068 }
3069 }
3070
3071 /* Load the Data area. */
3072 offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
3073 if (fw->data) {
3074 int j;
3075
3076 for (j = 0; j < (fw->data_len / 4); j++, offset += 4) {
3077 REG_WR_IND(bp, offset, fw->data[j]);
3078 }
3079 }
3080
3081 /* Load the SBSS area. */
3082 offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
ea1f8d5c 3083 if (fw->sbss_len) {
b6016b76
MC
3084 int j;
3085
3086 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) {
ea1f8d5c 3087 REG_WR_IND(bp, offset, 0);
b6016b76
MC
3088 }
3089 }
3090
3091 /* Load the BSS area. */
3092 offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
ea1f8d5c 3093 if (fw->bss_len) {
b6016b76
MC
3094 int j;
3095
3096 for (j = 0; j < (fw->bss_len/4); j++, offset += 4) {
ea1f8d5c 3097 REG_WR_IND(bp, offset, 0);
b6016b76
MC
3098 }
3099 }
3100
3101 /* Load the Read-Only area. */
3102 offset = cpu_reg->spad_base +
3103 (fw->rodata_addr - cpu_reg->mips_view_base);
3104 if (fw->rodata) {
3105 int j;
3106
3107 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) {
3108 REG_WR_IND(bp, offset, fw->rodata[j]);
3109 }
3110 }
3111
3112 /* Clear the pre-fetch instruction. */
3113 REG_WR_IND(bp, cpu_reg->inst, 0);
3114 REG_WR_IND(bp, cpu_reg->pc, fw->start_addr);
3115
3116 /* Start the CPU. */
3117 val = REG_RD_IND(bp, cpu_reg->mode);
3118 val &= ~cpu_reg->mode_value_halt;
3119 REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
3120 REG_WR_IND(bp, cpu_reg->mode, val);
af3ee519
MC
3121
3122 return 0;
b6016b76
MC
3123}
3124
fba9fe91 3125static int
b6016b76
MC
3126bnx2_init_cpus(struct bnx2 *bp)
3127{
3128 struct cpu_reg cpu_reg;
af3ee519 3129 struct fw_info *fw;
110d0ef9
MC
3130 int rc, rv2p_len;
3131 void *text, *rv2p;
b6016b76
MC
3132
3133 /* Initialize the RV2P processor. */
b3448b0b
DV
3134 text = vmalloc(FW_BUF_SIZE);
3135 if (!text)
3136 return -ENOMEM;
110d0ef9
MC
3137 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3138 rv2p = bnx2_xi_rv2p_proc1;
3139 rv2p_len = sizeof(bnx2_xi_rv2p_proc1);
3140 } else {
3141 rv2p = bnx2_rv2p_proc1;
3142 rv2p_len = sizeof(bnx2_rv2p_proc1);
3143 }
3144 rc = zlib_inflate_blob(text, FW_BUF_SIZE, rv2p, rv2p_len);
ea1f8d5c 3145 if (rc < 0)
fba9fe91 3146 goto init_cpu_err;
ea1f8d5c 3147
b3448b0b 3148 load_rv2p_fw(bp, text, rc /* == len */, RV2P_PROC1);
fba9fe91 3149
110d0ef9
MC
3150 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3151 rv2p = bnx2_xi_rv2p_proc2;
3152 rv2p_len = sizeof(bnx2_xi_rv2p_proc2);
3153 } else {
3154 rv2p = bnx2_rv2p_proc2;
3155 rv2p_len = sizeof(bnx2_rv2p_proc2);
3156 }
3157 rc = zlib_inflate_blob(text, FW_BUF_SIZE, rv2p, rv2p_len);
ea1f8d5c 3158 if (rc < 0)
fba9fe91 3159 goto init_cpu_err;
ea1f8d5c 3160
b3448b0b 3161 load_rv2p_fw(bp, text, rc /* == len */, RV2P_PROC2);
b6016b76
MC
3162
3163 /* Initialize the RX Processor. */
3164 cpu_reg.mode = BNX2_RXP_CPU_MODE;
3165 cpu_reg.mode_value_halt = BNX2_RXP_CPU_MODE_SOFT_HALT;
3166 cpu_reg.mode_value_sstep = BNX2_RXP_CPU_MODE_STEP_ENA;
3167 cpu_reg.state = BNX2_RXP_CPU_STATE;
3168 cpu_reg.state_value_clear = 0xffffff;
3169 cpu_reg.gpr0 = BNX2_RXP_CPU_REG_FILE;
3170 cpu_reg.evmask = BNX2_RXP_CPU_EVENT_MASK;
3171 cpu_reg.pc = BNX2_RXP_CPU_PROGRAM_COUNTER;
3172 cpu_reg.inst = BNX2_RXP_CPU_INSTRUCTION;
3173 cpu_reg.bp = BNX2_RXP_CPU_HW_BREAKPOINT;
3174 cpu_reg.spad_base = BNX2_RXP_SCRATCH;
3175 cpu_reg.mips_view_base = 0x8000000;
6aa20a22 3176
d43584c8
MC
3177 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3178 fw = &bnx2_rxp_fw_09;
3179 else
3180 fw = &bnx2_rxp_fw_06;
fba9fe91 3181
ea1f8d5c 3182 fw->text = text;
af3ee519 3183 rc = load_cpu_fw(bp, &cpu_reg, fw);
fba9fe91
MC
3184 if (rc)
3185 goto init_cpu_err;
3186
b6016b76
MC
3187 /* Initialize the TX Processor. */
3188 cpu_reg.mode = BNX2_TXP_CPU_MODE;
3189 cpu_reg.mode_value_halt = BNX2_TXP_CPU_MODE_SOFT_HALT;
3190 cpu_reg.mode_value_sstep = BNX2_TXP_CPU_MODE_STEP_ENA;
3191 cpu_reg.state = BNX2_TXP_CPU_STATE;
3192 cpu_reg.state_value_clear = 0xffffff;
3193 cpu_reg.gpr0 = BNX2_TXP_CPU_REG_FILE;
3194 cpu_reg.evmask = BNX2_TXP_CPU_EVENT_MASK;
3195 cpu_reg.pc = BNX2_TXP_CPU_PROGRAM_COUNTER;
3196 cpu_reg.inst = BNX2_TXP_CPU_INSTRUCTION;
3197 cpu_reg.bp = BNX2_TXP_CPU_HW_BREAKPOINT;
3198 cpu_reg.spad_base = BNX2_TXP_SCRATCH;
3199 cpu_reg.mips_view_base = 0x8000000;
6aa20a22 3200
d43584c8
MC
3201 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3202 fw = &bnx2_txp_fw_09;
3203 else
3204 fw = &bnx2_txp_fw_06;
fba9fe91 3205
ea1f8d5c 3206 fw->text = text;
af3ee519 3207 rc = load_cpu_fw(bp, &cpu_reg, fw);
fba9fe91
MC
3208 if (rc)
3209 goto init_cpu_err;
3210
b6016b76
MC
3211 /* Initialize the TX Patch-up Processor. */
3212 cpu_reg.mode = BNX2_TPAT_CPU_MODE;
3213 cpu_reg.mode_value_halt = BNX2_TPAT_CPU_MODE_SOFT_HALT;
3214 cpu_reg.mode_value_sstep = BNX2_TPAT_CPU_MODE_STEP_ENA;
3215 cpu_reg.state = BNX2_TPAT_CPU_STATE;
3216 cpu_reg.state_value_clear = 0xffffff;
3217 cpu_reg.gpr0 = BNX2_TPAT_CPU_REG_FILE;
3218 cpu_reg.evmask = BNX2_TPAT_CPU_EVENT_MASK;
3219 cpu_reg.pc = BNX2_TPAT_CPU_PROGRAM_COUNTER;
3220 cpu_reg.inst = BNX2_TPAT_CPU_INSTRUCTION;
3221 cpu_reg.bp = BNX2_TPAT_CPU_HW_BREAKPOINT;
3222 cpu_reg.spad_base = BNX2_TPAT_SCRATCH;
3223 cpu_reg.mips_view_base = 0x8000000;
6aa20a22 3224
d43584c8
MC
3225 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3226 fw = &bnx2_tpat_fw_09;
3227 else
3228 fw = &bnx2_tpat_fw_06;
fba9fe91 3229
ea1f8d5c 3230 fw->text = text;
af3ee519 3231 rc = load_cpu_fw(bp, &cpu_reg, fw);
fba9fe91
MC
3232 if (rc)
3233 goto init_cpu_err;
3234
b6016b76
MC
3235 /* Initialize the Completion Processor. */
3236 cpu_reg.mode = BNX2_COM_CPU_MODE;
3237 cpu_reg.mode_value_halt = BNX2_COM_CPU_MODE_SOFT_HALT;
3238 cpu_reg.mode_value_sstep = BNX2_COM_CPU_MODE_STEP_ENA;
3239 cpu_reg.state = BNX2_COM_CPU_STATE;
3240 cpu_reg.state_value_clear = 0xffffff;
3241 cpu_reg.gpr0 = BNX2_COM_CPU_REG_FILE;
3242 cpu_reg.evmask = BNX2_COM_CPU_EVENT_MASK;
3243 cpu_reg.pc = BNX2_COM_CPU_PROGRAM_COUNTER;
3244 cpu_reg.inst = BNX2_COM_CPU_INSTRUCTION;
3245 cpu_reg.bp = BNX2_COM_CPU_HW_BREAKPOINT;
3246 cpu_reg.spad_base = BNX2_COM_SCRATCH;
3247 cpu_reg.mips_view_base = 0x8000000;
6aa20a22 3248
d43584c8
MC
3249 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3250 fw = &bnx2_com_fw_09;
3251 else
3252 fw = &bnx2_com_fw_06;
fba9fe91 3253
ea1f8d5c 3254 fw->text = text;
af3ee519 3255 rc = load_cpu_fw(bp, &cpu_reg, fw);
fba9fe91
MC
3256 if (rc)
3257 goto init_cpu_err;
3258
d43584c8
MC
3259 /* Initialize the Command Processor. */
3260 cpu_reg.mode = BNX2_CP_CPU_MODE;
3261 cpu_reg.mode_value_halt = BNX2_CP_CPU_MODE_SOFT_HALT;
3262 cpu_reg.mode_value_sstep = BNX2_CP_CPU_MODE_STEP_ENA;
3263 cpu_reg.state = BNX2_CP_CPU_STATE;
3264 cpu_reg.state_value_clear = 0xffffff;
3265 cpu_reg.gpr0 = BNX2_CP_CPU_REG_FILE;
3266 cpu_reg.evmask = BNX2_CP_CPU_EVENT_MASK;
3267 cpu_reg.pc = BNX2_CP_CPU_PROGRAM_COUNTER;
3268 cpu_reg.inst = BNX2_CP_CPU_INSTRUCTION;
3269 cpu_reg.bp = BNX2_CP_CPU_HW_BREAKPOINT;
3270 cpu_reg.spad_base = BNX2_CP_SCRATCH;
3271 cpu_reg.mips_view_base = 0x8000000;
b6016b76 3272
110d0ef9 3273 if (CHIP_NUM(bp) == CHIP_NUM_5709)
d43584c8 3274 fw = &bnx2_cp_fw_09;
110d0ef9
MC
3275 else
3276 fw = &bnx2_cp_fw_06;
3277
3278 fw->text = text;
3279 rc = load_cpu_fw(bp, &cpu_reg, fw);
b6016b76 3280
fba9fe91 3281init_cpu_err:
ea1f8d5c 3282 vfree(text);
fba9fe91 3283 return rc;
b6016b76
MC
3284}
3285
3286static int
829ca9a3 3287bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
b6016b76
MC
3288{
3289 u16 pmcsr;
3290
3291 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
3292
3293 switch (state) {
829ca9a3 3294 case PCI_D0: {
b6016b76
MC
3295 u32 val;
3296
3297 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3298 (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3299 PCI_PM_CTRL_PME_STATUS);
3300
3301 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3302 /* delay required during transition out of D3hot */
3303 msleep(20);
3304
3305 val = REG_RD(bp, BNX2_EMAC_MODE);
3306 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
3307 val &= ~BNX2_EMAC_MODE_MPKT;
3308 REG_WR(bp, BNX2_EMAC_MODE, val);
3309
3310 val = REG_RD(bp, BNX2_RPM_CONFIG);
3311 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3312 REG_WR(bp, BNX2_RPM_CONFIG, val);
3313 break;
3314 }
829ca9a3 3315 case PCI_D3hot: {
b6016b76
MC
3316 int i;
3317 u32 val, wol_msg;
3318
3319 if (bp->wol) {
3320 u32 advertising;
3321 u8 autoneg;
3322
3323 autoneg = bp->autoneg;
3324 advertising = bp->advertising;
3325
239cd343
MC
3326 if (bp->phy_port == PORT_TP) {
3327 bp->autoneg = AUTONEG_SPEED;
3328 bp->advertising = ADVERTISED_10baseT_Half |
3329 ADVERTISED_10baseT_Full |
3330 ADVERTISED_100baseT_Half |
3331 ADVERTISED_100baseT_Full |
3332 ADVERTISED_Autoneg;
3333 }
b6016b76 3334
239cd343
MC
3335 spin_lock_bh(&bp->phy_lock);
3336 bnx2_setup_phy(bp, bp->phy_port);
3337 spin_unlock_bh(&bp->phy_lock);
b6016b76
MC
3338
3339 bp->autoneg = autoneg;
3340 bp->advertising = advertising;
3341
3342 bnx2_set_mac_addr(bp);
3343
3344 val = REG_RD(bp, BNX2_EMAC_MODE);
3345
3346 /* Enable port mode. */
3347 val &= ~BNX2_EMAC_MODE_PORT;
239cd343 3348 val |= BNX2_EMAC_MODE_MPKT_RCVD |
b6016b76 3349 BNX2_EMAC_MODE_ACPI_RCVD |
b6016b76 3350 BNX2_EMAC_MODE_MPKT;
239cd343
MC
3351 if (bp->phy_port == PORT_TP)
3352 val |= BNX2_EMAC_MODE_PORT_MII;
3353 else {
3354 val |= BNX2_EMAC_MODE_PORT_GMII;
3355 if (bp->line_speed == SPEED_2500)
3356 val |= BNX2_EMAC_MODE_25G_MODE;
3357 }
b6016b76
MC
3358
3359 REG_WR(bp, BNX2_EMAC_MODE, val);
3360
3361 /* receive all multicast */
3362 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3363 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3364 0xffffffff);
3365 }
3366 REG_WR(bp, BNX2_EMAC_RX_MODE,
3367 BNX2_EMAC_RX_MODE_SORT_MODE);
3368
3369 val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
3370 BNX2_RPM_SORT_USER0_MC_EN;
3371 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3372 REG_WR(bp, BNX2_RPM_SORT_USER0, val);
3373 REG_WR(bp, BNX2_RPM_SORT_USER0, val |
3374 BNX2_RPM_SORT_USER0_ENA);
3375
3376 /* Need to enable EMAC and RPM for WOL. */
3377 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3378 BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
3379 BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
3380 BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
3381
3382 val = REG_RD(bp, BNX2_RPM_CONFIG);
3383 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3384 REG_WR(bp, BNX2_RPM_CONFIG, val);
3385
3386 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
3387 }
3388 else {
3389 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
3390 }
3391
dda1e390
MC
3392 if (!(bp->flags & NO_WOL_FLAG))
3393 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg, 0);
b6016b76
MC
3394
3395 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3396 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3397 (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
3398
3399 if (bp->wol)
3400 pmcsr |= 3;
3401 }
3402 else {
3403 pmcsr |= 3;
3404 }
3405 if (bp->wol) {
3406 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
3407 }
3408 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3409 pmcsr);
3410
3411 /* No more memory access after this point until
3412 * device is brought back to D0.
3413 */
3414 udelay(50);
3415 break;
3416 }
3417 default:
3418 return -EINVAL;
3419 }
3420 return 0;
3421}
3422
3423static int
3424bnx2_acquire_nvram_lock(struct bnx2 *bp)
3425{
3426 u32 val;
3427 int j;
3428
3429 /* Request access to the flash interface. */
3430 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
3431 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3432 val = REG_RD(bp, BNX2_NVM_SW_ARB);
3433 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
3434 break;
3435
3436 udelay(5);
3437 }
3438
3439 if (j >= NVRAM_TIMEOUT_COUNT)
3440 return -EBUSY;
3441
3442 return 0;
3443}
3444
3445static int
3446bnx2_release_nvram_lock(struct bnx2 *bp)
3447{
3448 int j;
3449 u32 val;
3450
3451 /* Relinquish nvram interface. */
3452 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
3453
3454 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3455 val = REG_RD(bp, BNX2_NVM_SW_ARB);
3456 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
3457 break;
3458
3459 udelay(5);
3460 }
3461
3462 if (j >= NVRAM_TIMEOUT_COUNT)
3463 return -EBUSY;
3464
3465 return 0;
3466}
3467
3468
3469static int
3470bnx2_enable_nvram_write(struct bnx2 *bp)
3471{
3472 u32 val;
3473
3474 val = REG_RD(bp, BNX2_MISC_CFG);
3475 REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
3476
e30372c9 3477 if (bp->flash_info->flags & BNX2_NV_WREN) {
b6016b76
MC
3478 int j;
3479
3480 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3481 REG_WR(bp, BNX2_NVM_COMMAND,
3482 BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
3483
3484 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3485 udelay(5);
3486
3487 val = REG_RD(bp, BNX2_NVM_COMMAND);
3488 if (val & BNX2_NVM_COMMAND_DONE)
3489 break;
3490 }
3491
3492 if (j >= NVRAM_TIMEOUT_COUNT)
3493 return -EBUSY;
3494 }
3495 return 0;
3496}
3497
3498static void
3499bnx2_disable_nvram_write(struct bnx2 *bp)
3500{
3501 u32 val;
3502
3503 val = REG_RD(bp, BNX2_MISC_CFG);
3504 REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
3505}
3506
3507
3508static void
3509bnx2_enable_nvram_access(struct bnx2 *bp)
3510{
3511 u32 val;
3512
3513 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3514 /* Enable both bits, even on read. */
6aa20a22 3515 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
b6016b76
MC
3516 val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
3517}
3518
3519static void
3520bnx2_disable_nvram_access(struct bnx2 *bp)
3521{
3522 u32 val;
3523
3524 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3525 /* Disable both bits, even after read. */
6aa20a22 3526 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
b6016b76
MC
3527 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
3528 BNX2_NVM_ACCESS_ENABLE_WR_EN));
3529}
3530
3531static int
3532bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
3533{
3534 u32 cmd;
3535 int j;
3536
e30372c9 3537 if (bp->flash_info->flags & BNX2_NV_BUFFERED)
b6016b76
MC
3538 /* Buffered flash, no erase needed */
3539 return 0;
3540
3541 /* Build an erase command */
3542 cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
3543 BNX2_NVM_COMMAND_DOIT;
3544
3545 /* Need to clear DONE bit separately. */
3546 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3547
3548 /* Address of the NVRAM to read from. */
3549 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3550
3551 /* Issue an erase command. */
3552 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3553
3554 /* Wait for completion. */
3555 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3556 u32 val;
3557
3558 udelay(5);
3559
3560 val = REG_RD(bp, BNX2_NVM_COMMAND);
3561 if (val & BNX2_NVM_COMMAND_DONE)
3562 break;
3563 }
3564
3565 if (j >= NVRAM_TIMEOUT_COUNT)
3566 return -EBUSY;
3567
3568 return 0;
3569}
3570
3571static int
3572bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
3573{
3574 u32 cmd;
3575 int j;
3576
3577 /* Build the command word. */
3578 cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
3579
e30372c9
MC
3580 /* Calculate an offset of a buffered flash, not needed for 5709. */
3581 if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
b6016b76
MC
3582 offset = ((offset / bp->flash_info->page_size) <<
3583 bp->flash_info->page_bits) +
3584 (offset % bp->flash_info->page_size);
3585 }
3586
3587 /* Need to clear DONE bit separately. */
3588 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3589
3590 /* Address of the NVRAM to read from. */
3591 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3592
3593 /* Issue a read command. */
3594 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3595
3596 /* Wait for completion. */
3597 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3598 u32 val;
3599
3600 udelay(5);
3601
3602 val = REG_RD(bp, BNX2_NVM_COMMAND);
3603 if (val & BNX2_NVM_COMMAND_DONE) {
3604 val = REG_RD(bp, BNX2_NVM_READ);
3605
3606 val = be32_to_cpu(val);
3607 memcpy(ret_val, &val, 4);
3608 break;
3609 }
3610 }
3611 if (j >= NVRAM_TIMEOUT_COUNT)
3612 return -EBUSY;
3613
3614 return 0;
3615}
3616
3617
3618static int
3619bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
3620{
3621 u32 cmd, val32;
3622 int j;
3623
3624 /* Build the command word. */
3625 cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
3626
e30372c9
MC
3627 /* Calculate an offset of a buffered flash, not needed for 5709. */
3628 if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
b6016b76
MC
3629 offset = ((offset / bp->flash_info->page_size) <<
3630 bp->flash_info->page_bits) +
3631 (offset % bp->flash_info->page_size);
3632 }
3633
3634 /* Need to clear DONE bit separately. */
3635 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3636
3637 memcpy(&val32, val, 4);
3638 val32 = cpu_to_be32(val32);
3639
3640 /* Write the data. */
3641 REG_WR(bp, BNX2_NVM_WRITE, val32);
3642
3643 /* Address of the NVRAM to write to. */
3644 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3645
3646 /* Issue the write command. */
3647 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3648
3649 /* Wait for completion. */
3650 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3651 udelay(5);
3652
3653 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
3654 break;
3655 }
3656 if (j >= NVRAM_TIMEOUT_COUNT)
3657 return -EBUSY;
3658
3659 return 0;
3660}
3661
3662static int
3663bnx2_init_nvram(struct bnx2 *bp)
3664{
3665 u32 val;
e30372c9 3666 int j, entry_count, rc = 0;
b6016b76
MC
3667 struct flash_spec *flash;
3668
e30372c9
MC
3669 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3670 bp->flash_info = &flash_5709;
3671 goto get_flash_size;
3672 }
3673
b6016b76
MC
3674 /* Determine the selected interface. */
3675 val = REG_RD(bp, BNX2_NVM_CFG1);
3676
ff8ac609 3677 entry_count = ARRAY_SIZE(flash_table);
b6016b76 3678
b6016b76
MC
3679 if (val & 0x40000000) {
3680
3681 /* Flash interface has been reconfigured */
3682 for (j = 0, flash = &flash_table[0]; j < entry_count;
37137709
MC
3683 j++, flash++) {
3684 if ((val & FLASH_BACKUP_STRAP_MASK) ==
3685 (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
b6016b76
MC
3686 bp->flash_info = flash;
3687 break;
3688 }
3689 }
3690 }
3691 else {
37137709 3692 u32 mask;
b6016b76
MC
3693 /* Not yet been reconfigured */
3694
37137709
MC
3695 if (val & (1 << 23))
3696 mask = FLASH_BACKUP_STRAP_MASK;
3697 else
3698 mask = FLASH_STRAP_MASK;
3699
b6016b76
MC
3700 for (j = 0, flash = &flash_table[0]; j < entry_count;
3701 j++, flash++) {
3702
37137709 3703 if ((val & mask) == (flash->strapping & mask)) {
b6016b76
MC
3704 bp->flash_info = flash;
3705
3706 /* Request access to the flash interface. */
3707 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3708 return rc;
3709
3710 /* Enable access to flash interface */
3711 bnx2_enable_nvram_access(bp);
3712
3713 /* Reconfigure the flash interface */
3714 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
3715 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
3716 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
3717 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
3718
3719 /* Disable access to flash interface */
3720 bnx2_disable_nvram_access(bp);
3721 bnx2_release_nvram_lock(bp);
3722
3723 break;
3724 }
3725 }
3726 } /* if (val & 0x40000000) */
3727
3728 if (j == entry_count) {
3729 bp->flash_info = NULL;
2f23c523 3730 printk(KERN_ALERT PFX "Unknown flash/EEPROM type.\n");
1122db71 3731 return -ENODEV;
b6016b76
MC
3732 }
3733
e30372c9 3734get_flash_size:
1122db71
MC
3735 val = REG_RD_IND(bp, bp->shmem_base + BNX2_SHARED_HW_CFG_CONFIG2);
3736 val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
3737 if (val)
3738 bp->flash_size = val;
3739 else
3740 bp->flash_size = bp->flash_info->total_size;
3741
b6016b76
MC
3742 return rc;
3743}
3744
3745static int
3746bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
3747 int buf_size)
3748{
3749 int rc = 0;
3750 u32 cmd_flags, offset32, len32, extra;
3751
3752 if (buf_size == 0)
3753 return 0;
3754
3755 /* Request access to the flash interface. */
3756 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3757 return rc;
3758
3759 /* Enable access to flash interface */
3760 bnx2_enable_nvram_access(bp);
3761
3762 len32 = buf_size;
3763 offset32 = offset;
3764 extra = 0;
3765
3766 cmd_flags = 0;
3767
3768 if (offset32 & 3) {
3769 u8 buf[4];
3770 u32 pre_len;
3771
3772 offset32 &= ~3;
3773 pre_len = 4 - (offset & 3);
3774
3775 if (pre_len >= len32) {
3776 pre_len = len32;
3777 cmd_flags = BNX2_NVM_COMMAND_FIRST |
3778 BNX2_NVM_COMMAND_LAST;
3779 }
3780 else {
3781 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3782 }
3783
3784 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3785
3786 if (rc)
3787 return rc;
3788
3789 memcpy(ret_buf, buf + (offset & 3), pre_len);
3790
3791 offset32 += 4;
3792 ret_buf += pre_len;
3793 len32 -= pre_len;
3794 }
3795 if (len32 & 3) {
3796 extra = 4 - (len32 & 3);
3797 len32 = (len32 + 4) & ~3;
3798 }
3799
3800 if (len32 == 4) {
3801 u8 buf[4];
3802
3803 if (cmd_flags)
3804 cmd_flags = BNX2_NVM_COMMAND_LAST;
3805 else
3806 cmd_flags = BNX2_NVM_COMMAND_FIRST |
3807 BNX2_NVM_COMMAND_LAST;
3808
3809 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3810
3811 memcpy(ret_buf, buf, 4 - extra);
3812 }
3813 else if (len32 > 0) {
3814 u8 buf[4];
3815
3816 /* Read the first word. */
3817 if (cmd_flags)
3818 cmd_flags = 0;
3819 else
3820 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3821
3822 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
3823
3824 /* Advance to the next dword. */
3825 offset32 += 4;
3826 ret_buf += 4;
3827 len32 -= 4;
3828
3829 while (len32 > 4 && rc == 0) {
3830 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
3831
3832 /* Advance to the next dword. */
3833 offset32 += 4;
3834 ret_buf += 4;
3835 len32 -= 4;
3836 }
3837
3838 if (rc)
3839 return rc;
3840
3841 cmd_flags = BNX2_NVM_COMMAND_LAST;
3842 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3843
3844 memcpy(ret_buf, buf, 4 - extra);
3845 }
3846
3847 /* Disable access to flash interface */
3848 bnx2_disable_nvram_access(bp);
3849
3850 bnx2_release_nvram_lock(bp);
3851
3852 return rc;
3853}
3854
3855static int
3856bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
3857 int buf_size)
3858{
3859 u32 written, offset32, len32;
e6be763f 3860 u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
b6016b76
MC
3861 int rc = 0;
3862 int align_start, align_end;
3863
3864 buf = data_buf;
3865 offset32 = offset;
3866 len32 = buf_size;
3867 align_start = align_end = 0;
3868
3869 if ((align_start = (offset32 & 3))) {
3870 offset32 &= ~3;
c873879c
MC
3871 len32 += align_start;
3872 if (len32 < 4)
3873 len32 = 4;
b6016b76
MC
3874 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
3875 return rc;
3876 }
3877
3878 if (len32 & 3) {
c873879c
MC
3879 align_end = 4 - (len32 & 3);
3880 len32 += align_end;
3881 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
3882 return rc;
b6016b76
MC
3883 }
3884
3885 if (align_start || align_end) {
e6be763f
MC
3886 align_buf = kmalloc(len32, GFP_KERNEL);
3887 if (align_buf == NULL)
b6016b76
MC
3888 return -ENOMEM;
3889 if (align_start) {
e6be763f 3890 memcpy(align_buf, start, 4);
b6016b76
MC
3891 }
3892 if (align_end) {
e6be763f 3893 memcpy(align_buf + len32 - 4, end, 4);
b6016b76 3894 }
e6be763f
MC
3895 memcpy(align_buf + align_start, data_buf, buf_size);
3896 buf = align_buf;
b6016b76
MC
3897 }
3898
e30372c9 3899 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
ae181bc4
MC
3900 flash_buffer = kmalloc(264, GFP_KERNEL);
3901 if (flash_buffer == NULL) {
3902 rc = -ENOMEM;
3903 goto nvram_write_end;
3904 }
3905 }
3906
b6016b76
MC
3907 written = 0;
3908 while ((written < len32) && (rc == 0)) {
3909 u32 page_start, page_end, data_start, data_end;
3910 u32 addr, cmd_flags;
3911 int i;
b6016b76
MC
3912
3913 /* Find the page_start addr */
3914 page_start = offset32 + written;
3915 page_start -= (page_start % bp->flash_info->page_size);
3916 /* Find the page_end addr */
3917 page_end = page_start + bp->flash_info->page_size;
3918 /* Find the data_start addr */
3919 data_start = (written == 0) ? offset32 : page_start;
3920 /* Find the data_end addr */
6aa20a22 3921 data_end = (page_end > offset32 + len32) ?
b6016b76
MC
3922 (offset32 + len32) : page_end;
3923
3924 /* Request access to the flash interface. */
3925 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3926 goto nvram_write_end;
3927
3928 /* Enable access to flash interface */
3929 bnx2_enable_nvram_access(bp);
3930
3931 cmd_flags = BNX2_NVM_COMMAND_FIRST;
e30372c9 3932 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
b6016b76
MC
3933 int j;
3934
3935 /* Read the whole page into the buffer
3936 * (non-buffer flash only) */
3937 for (j = 0; j < bp->flash_info->page_size; j += 4) {
3938 if (j == (bp->flash_info->page_size - 4)) {
3939 cmd_flags |= BNX2_NVM_COMMAND_LAST;
3940 }
3941 rc = bnx2_nvram_read_dword(bp,
6aa20a22
JG
3942 page_start + j,
3943 &flash_buffer[j],
b6016b76
MC
3944 cmd_flags);
3945
3946 if (rc)
3947 goto nvram_write_end;
3948
3949 cmd_flags = 0;
3950 }
3951 }
3952
3953 /* Enable writes to flash interface (unlock write-protect) */
3954 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
3955 goto nvram_write_end;
3956
b6016b76
MC
3957 /* Loop to write back the buffer data from page_start to
3958 * data_start */
3959 i = 0;
e30372c9 3960 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
c873879c
MC
3961 /* Erase the page */
3962 if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
3963 goto nvram_write_end;
3964
3965 /* Re-enable the write again for the actual write */
3966 bnx2_enable_nvram_write(bp);
3967
b6016b76
MC
3968 for (addr = page_start; addr < data_start;
3969 addr += 4, i += 4) {
6aa20a22 3970
b6016b76
MC
3971 rc = bnx2_nvram_write_dword(bp, addr,
3972 &flash_buffer[i], cmd_flags);
3973
3974 if (rc != 0)
3975 goto nvram_write_end;
3976
3977 cmd_flags = 0;
3978 }
3979 }
3980
3981 /* Loop to write the new data from data_start to data_end */
bae25761 3982 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
b6016b76 3983 if ((addr == page_end - 4) ||
e30372c9 3984 ((bp->flash_info->flags & BNX2_NV_BUFFERED) &&
b6016b76
MC
3985 (addr == data_end - 4))) {
3986
3987 cmd_flags |= BNX2_NVM_COMMAND_LAST;
3988 }
3989 rc = bnx2_nvram_write_dword(bp, addr, buf,
3990 cmd_flags);
3991
3992 if (rc != 0)
3993 goto nvram_write_end;
3994
3995 cmd_flags = 0;
3996 buf += 4;
3997 }
3998
3999 /* Loop to write back the buffer data from data_end
4000 * to page_end */
e30372c9 4001 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
b6016b76
MC
4002 for (addr = data_end; addr < page_end;
4003 addr += 4, i += 4) {
6aa20a22 4004
b6016b76
MC
4005 if (addr == page_end-4) {
4006 cmd_flags = BNX2_NVM_COMMAND_LAST;
4007 }
4008 rc = bnx2_nvram_write_dword(bp, addr,
4009 &flash_buffer[i], cmd_flags);
4010
4011 if (rc != 0)
4012 goto nvram_write_end;
4013
4014 cmd_flags = 0;
4015 }
4016 }
4017
4018 /* Disable writes to flash interface (lock write-protect) */
4019 bnx2_disable_nvram_write(bp);
4020
4021 /* Disable access to flash interface */
4022 bnx2_disable_nvram_access(bp);
4023 bnx2_release_nvram_lock(bp);
4024
4025 /* Increment written */
4026 written += data_end - data_start;
4027 }
4028
4029nvram_write_end:
e6be763f
MC
4030 kfree(flash_buffer);
4031 kfree(align_buf);
b6016b76
MC
4032 return rc;
4033}
4034
0d8a6571
MC
4035static void
4036bnx2_init_remote_phy(struct bnx2 *bp)
4037{
4038 u32 val;
4039
4040 bp->phy_flags &= ~REMOTE_PHY_CAP_FLAG;
4041 if (!(bp->phy_flags & PHY_SERDES_FLAG))
4042 return;
4043
4044 val = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_CAP_MB);
4045 if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE)
4046 return;
4047
4048 if (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE) {
0d8a6571
MC
4049 bp->phy_flags |= REMOTE_PHY_CAP_FLAG;
4050
4051 val = REG_RD_IND(bp, bp->shmem_base + BNX2_LINK_STATUS);
4052 if (val & BNX2_LINK_STATUS_SERDES_LINK)
4053 bp->phy_port = PORT_FIBRE;
4054 else
4055 bp->phy_port = PORT_TP;
489310a4
MC
4056
4057 if (netif_running(bp->dev)) {
4058 u32 sig;
4059
4060 if (val & BNX2_LINK_STATUS_LINK_UP) {
4061 bp->link_up = 1;
4062 netif_carrier_on(bp->dev);
4063 } else {
4064 bp->link_up = 0;
4065 netif_carrier_off(bp->dev);
4066 }
4067 sig = BNX2_DRV_ACK_CAP_SIGNATURE |
4068 BNX2_FW_CAP_REMOTE_PHY_CAPABLE;
4069 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_ACK_CAP_MB,
4070 sig);
4071 }
0d8a6571
MC
4072 }
4073}
4074
b6016b76
MC
4075static int
4076bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
4077{
4078 u32 val;
4079 int i, rc = 0;
489310a4 4080 u8 old_port;
b6016b76
MC
4081
4082 /* Wait for the current PCI transaction to complete before
4083 * issuing a reset. */
4084 REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
4085 BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
4086 BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
4087 BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
4088 BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
4089 val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
4090 udelay(5);
4091
b090ae2b
MC
4092 /* Wait for the firmware to tell us it is ok to issue a reset. */
4093 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1);
4094
b6016b76
MC
4095 /* Deposit a driver reset signature so the firmware knows that
4096 * this is a soft reset. */
e3648b3d 4097 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_RESET_SIGNATURE,
b6016b76
MC
4098 BNX2_DRV_RESET_SIGNATURE_MAGIC);
4099
b6016b76
MC
4100 /* Do a dummy read to force the chip to complete all current transaction
4101 * before we issue a reset. */
4102 val = REG_RD(bp, BNX2_MISC_ID);
4103
234754d5
MC
4104 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4105 REG_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
4106 REG_RD(bp, BNX2_MISC_COMMAND);
4107 udelay(5);
b6016b76 4108
234754d5
MC
4109 val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4110 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
b6016b76 4111
234754d5 4112 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, val);
b6016b76 4113
234754d5
MC
4114 } else {
4115 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4116 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4117 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4118
4119 /* Chip reset. */
4120 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4121
594a9dfa
MC
4122 /* Reading back any register after chip reset will hang the
4123 * bus on 5706 A0 and A1. The msleep below provides plenty
4124 * of margin for write posting.
4125 */
234754d5 4126 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
8e545881
AV
4127 (CHIP_ID(bp) == CHIP_ID_5706_A1))
4128 msleep(20);
b6016b76 4129
234754d5
MC
4130 /* Reset takes approximate 30 usec */
4131 for (i = 0; i < 10; i++) {
4132 val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
4133 if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4134 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
4135 break;
4136 udelay(10);
4137 }
4138
4139 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4140 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
4141 printk(KERN_ERR PFX "Chip reset did not complete\n");
4142 return -EBUSY;
4143 }
b6016b76
MC
4144 }
4145
4146 /* Make sure byte swapping is properly configured. */
4147 val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
4148 if (val != 0x01020304) {
4149 printk(KERN_ERR PFX "Chip not in correct endian mode\n");
4150 return -ENODEV;
4151 }
4152
b6016b76 4153 /* Wait for the firmware to finish its initialization. */
b090ae2b
MC
4154 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 0);
4155 if (rc)
4156 return rc;
b6016b76 4157
0d8a6571 4158 spin_lock_bh(&bp->phy_lock);
489310a4 4159 old_port = bp->phy_port;
0d8a6571 4160 bnx2_init_remote_phy(bp);
489310a4 4161 if ((bp->phy_flags & REMOTE_PHY_CAP_FLAG) && old_port != bp->phy_port)
0d8a6571
MC
4162 bnx2_set_default_remote_link(bp);
4163 spin_unlock_bh(&bp->phy_lock);
4164
b6016b76
MC
4165 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4166 /* Adjust the voltage regular to two steps lower. The default
4167 * of this register is 0x0000000e. */
4168 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
4169
4170 /* Remove bad rbuf memory from the free pool. */
4171 rc = bnx2_alloc_bad_rbuf(bp);
4172 }
4173
4174 return rc;
4175}
4176
4177static int
4178bnx2_init_chip(struct bnx2 *bp)
4179{
4180 u32 val;
b090ae2b 4181 int rc;
b6016b76
MC
4182
4183 /* Make sure the interrupt is not active. */
4184 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
4185
4186 val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
4187 BNX2_DMA_CONFIG_DATA_WORD_SWAP |
4188#ifdef __BIG_ENDIAN
6aa20a22 4189 BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
b6016b76 4190#endif
6aa20a22 4191 BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
b6016b76
MC
4192 DMA_READ_CHANS << 12 |
4193 DMA_WRITE_CHANS << 16;
4194
4195 val |= (0x2 << 20) | (1 << 11);
4196
dda1e390 4197 if ((bp->flags & PCIX_FLAG) && (bp->bus_speed_mhz == 133))
b6016b76
MC
4198 val |= (1 << 23);
4199
4200 if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
4201 (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & PCIX_FLAG))
4202 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
4203
4204 REG_WR(bp, BNX2_DMA_CONFIG, val);
4205
4206 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4207 val = REG_RD(bp, BNX2_TDMA_CONFIG);
4208 val |= BNX2_TDMA_CONFIG_ONE_DMA;
4209 REG_WR(bp, BNX2_TDMA_CONFIG, val);
4210 }
4211
4212 if (bp->flags & PCIX_FLAG) {
4213 u16 val16;
4214
4215 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4216 &val16);
4217 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4218 val16 & ~PCI_X_CMD_ERO);
4219 }
4220
4221 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
4222 BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
4223 BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
4224 BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
4225
4226 /* Initialize context mapping and zero out the quick contexts. The
4227 * context block must have already been enabled. */
641bdcd5
MC
4228 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4229 rc = bnx2_init_5709_context(bp);
4230 if (rc)
4231 return rc;
4232 } else
59b47d8a 4233 bnx2_init_context(bp);
b6016b76 4234
fba9fe91
MC
4235 if ((rc = bnx2_init_cpus(bp)) != 0)
4236 return rc;
4237
b6016b76
MC
4238 bnx2_init_nvram(bp);
4239
4240 bnx2_set_mac_addr(bp);
4241
4242 val = REG_RD(bp, BNX2_MQ_CONFIG);
4243 val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4244 val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
68c9f75a
MC
4245 if (CHIP_ID(bp) == CHIP_ID_5709_A0 || CHIP_ID(bp) == CHIP_ID_5709_A1)
4246 val |= BNX2_MQ_CONFIG_HALT_DIS;
4247
b6016b76
MC
4248 REG_WR(bp, BNX2_MQ_CONFIG, val);
4249
4250 val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
4251 REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
4252 REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
4253
4254 val = (BCM_PAGE_BITS - 8) << 24;
4255 REG_WR(bp, BNX2_RV2P_CONFIG, val);
4256
4257 /* Configure page size. */
4258 val = REG_RD(bp, BNX2_TBDR_CONFIG);
4259 val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
4260 val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
4261 REG_WR(bp, BNX2_TBDR_CONFIG, val);
4262
4263 val = bp->mac_addr[0] +
4264 (bp->mac_addr[1] << 8) +
4265 (bp->mac_addr[2] << 16) +
4266 bp->mac_addr[3] +
4267 (bp->mac_addr[4] << 8) +
4268 (bp->mac_addr[5] << 16);
4269 REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
4270
4271 /* Program the MTU. Also include 4 bytes for CRC32. */
4272 val = bp->dev->mtu + ETH_HLEN + 4;
4273 if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
4274 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
4275 REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
4276
35efa7c1 4277 bp->bnx2_napi.last_status_idx = 0;
b6016b76
MC
4278 bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
4279
4280 /* Set up how to generate a link change interrupt. */
4281 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
4282
4283 REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
4284 (u64) bp->status_blk_mapping & 0xffffffff);
4285 REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
4286
4287 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
4288 (u64) bp->stats_blk_mapping & 0xffffffff);
4289 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
4290 (u64) bp->stats_blk_mapping >> 32);
4291
6aa20a22 4292 REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
b6016b76
MC
4293 (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
4294
4295 REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
4296 (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
4297
4298 REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
4299 (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
4300
4301 REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
4302
4303 REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
4304
4305 REG_WR(bp, BNX2_HC_COM_TICKS,
4306 (bp->com_ticks_int << 16) | bp->com_ticks);
4307
4308 REG_WR(bp, BNX2_HC_CMD_TICKS,
4309 (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
4310
02537b06
MC
4311 if (CHIP_NUM(bp) == CHIP_NUM_5708)
4312 REG_WR(bp, BNX2_HC_STATS_TICKS, 0);
4313 else
7ea6920e 4314 REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks);
b6016b76
MC
4315 REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */
4316
4317 if (CHIP_ID(bp) == CHIP_ID_5706_A1)
8e6a72c4 4318 val = BNX2_HC_CONFIG_COLLECT_STATS;
b6016b76 4319 else {
8e6a72c4
MC
4320 val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE |
4321 BNX2_HC_CONFIG_COLLECT_STATS;
b6016b76
MC
4322 }
4323
8e6a72c4
MC
4324 if (bp->flags & ONE_SHOT_MSI_FLAG)
4325 val |= BNX2_HC_CONFIG_ONE_SHOT;
4326
4327 REG_WR(bp, BNX2_HC_CONFIG, val);
4328
b6016b76
MC
4329 /* Clear internal stats counters. */
4330 REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
4331
da3e4fbe 4332 REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
b6016b76
MC
4333
4334 /* Initialize the receive filter. */
4335 bnx2_set_rx_mode(bp->dev);
4336
0aa38df7
MC
4337 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4338 val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4339 val |= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
4340 REG_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
4341 }
b090ae2b
MC
4342 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
4343 0);
b6016b76 4344
df149d70 4345 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT);
b6016b76
MC
4346 REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
4347
4348 udelay(20);
4349
bf5295bb
MC
4350 bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
4351
b090ae2b 4352 return rc;
b6016b76
MC
4353}
4354
59b47d8a
MC
4355static void
4356bnx2_init_tx_context(struct bnx2 *bp, u32 cid)
4357{
4358 u32 val, offset0, offset1, offset2, offset3;
4359
4360 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4361 offset0 = BNX2_L2CTX_TYPE_XI;
4362 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
4363 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
4364 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
4365 } else {
4366 offset0 = BNX2_L2CTX_TYPE;
4367 offset1 = BNX2_L2CTX_CMD_TYPE;
4368 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
4369 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
4370 }
4371 val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
4372 CTX_WR(bp, GET_CID_ADDR(cid), offset0, val);
4373
4374 val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
4375 CTX_WR(bp, GET_CID_ADDR(cid), offset1, val);
4376
4377 val = (u64) bp->tx_desc_mapping >> 32;
4378 CTX_WR(bp, GET_CID_ADDR(cid), offset2, val);
4379
4380 val = (u64) bp->tx_desc_mapping & 0xffffffff;
4381 CTX_WR(bp, GET_CID_ADDR(cid), offset3, val);
4382}
b6016b76
MC
4383
4384static void
4385bnx2_init_tx_ring(struct bnx2 *bp)
4386{
4387 struct tx_bd *txbd;
59b47d8a 4388 u32 cid;
a550c99b 4389 struct bnx2_napi *bnapi = &bp->bnx2_napi;
b6016b76 4390
2f8af120
MC
4391 bp->tx_wake_thresh = bp->tx_ring_size / 2;
4392
b6016b76 4393 txbd = &bp->tx_desc_ring[MAX_TX_DESC_CNT];
6aa20a22 4394
b6016b76
MC
4395 txbd->tx_bd_haddr_hi = (u64) bp->tx_desc_mapping >> 32;
4396 txbd->tx_bd_haddr_lo = (u64) bp->tx_desc_mapping & 0xffffffff;
4397
4398 bp->tx_prod = 0;
a550c99b
MC
4399 bnapi->tx_cons = 0;
4400 bnapi->hw_tx_cons = 0;
b6016b76 4401 bp->tx_prod_bseq = 0;
6aa20a22 4402
59b47d8a
MC
4403 cid = TX_CID;
4404 bp->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
4405 bp->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
b6016b76 4406
59b47d8a 4407 bnx2_init_tx_context(bp, cid);
b6016b76
MC
4408}
4409
4410static void
5d5d0015
MC
4411bnx2_init_rxbd_rings(struct rx_bd *rx_ring[], dma_addr_t dma[], u32 buf_size,
4412 int num_rings)
b6016b76 4413{
b6016b76 4414 int i;
5d5d0015 4415 struct rx_bd *rxbd;
6aa20a22 4416
5d5d0015 4417 for (i = 0; i < num_rings; i++) {
13daffa2 4418 int j;
b6016b76 4419
5d5d0015 4420 rxbd = &rx_ring[i][0];
13daffa2 4421 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
5d5d0015 4422 rxbd->rx_bd_len = buf_size;
13daffa2
MC
4423 rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
4424 }
5d5d0015 4425 if (i == (num_rings - 1))
13daffa2
MC
4426 j = 0;
4427 else
4428 j = i + 1;
5d5d0015
MC
4429 rxbd->rx_bd_haddr_hi = (u64) dma[j] >> 32;
4430 rxbd->rx_bd_haddr_lo = (u64) dma[j] & 0xffffffff;
13daffa2 4431 }
5d5d0015
MC
4432}
4433
4434static void
4435bnx2_init_rx_ring(struct bnx2 *bp)
4436{
4437 int i;
4438 u16 prod, ring_prod;
4439 u32 val, rx_cid_addr = GET_CID_ADDR(RX_CID);
a1f60190 4440 struct bnx2_napi *bnapi = &bp->bnx2_napi;
5d5d0015 4441
a1f60190
MC
4442 bnapi->rx_prod = 0;
4443 bnapi->rx_cons = 0;
4444 bnapi->rx_prod_bseq = 0;
4445 bnapi->rx_pg_prod = 0;
4446 bnapi->rx_pg_cons = 0;
5d5d0015
MC
4447
4448 bnx2_init_rxbd_rings(bp->rx_desc_ring, bp->rx_desc_mapping,
4449 bp->rx_buf_use_size, bp->rx_max_ring);
4450
4451 CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, 0);
47bf4246
MC
4452 if (bp->rx_pg_ring_size) {
4453 bnx2_init_rxbd_rings(bp->rx_pg_desc_ring,
4454 bp->rx_pg_desc_mapping,
4455 PAGE_SIZE, bp->rx_max_pg_ring);
4456 val = (bp->rx_buf_use_size << 16) | PAGE_SIZE;
4457 CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, val);
4458 CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_RBDC_KEY,
4459 BNX2_L2CTX_RBDC_JUMBO_KEY);
4460
4461 val = (u64) bp->rx_pg_desc_mapping[0] >> 32;
4462 CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_HI, val);
4463
4464 val = (u64) bp->rx_pg_desc_mapping[0] & 0xffffffff;
4465 CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_LO, val);
4466
4467 if (CHIP_NUM(bp) == CHIP_NUM_5709)
4468 REG_WR(bp, BNX2_MQ_MAP_L2_3, BNX2_MQ_MAP_L2_3_DEFAULT);
4469 }
b6016b76
MC
4470
4471 val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
4472 val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
4473 val |= 0x02 << 8;
5d5d0015 4474 CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_CTX_TYPE, val);
b6016b76 4475
13daffa2 4476 val = (u64) bp->rx_desc_mapping[0] >> 32;
5d5d0015 4477 CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
b6016b76 4478
13daffa2 4479 val = (u64) bp->rx_desc_mapping[0] & 0xffffffff;
5d5d0015 4480 CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
b6016b76 4481
a1f60190 4482 ring_prod = prod = bnapi->rx_pg_prod;
47bf4246
MC
4483 for (i = 0; i < bp->rx_pg_ring_size; i++) {
4484 if (bnx2_alloc_rx_page(bp, ring_prod) < 0)
4485 break;
4486 prod = NEXT_RX_BD(prod);
4487 ring_prod = RX_PG_RING_IDX(prod);
4488 }
a1f60190 4489 bnapi->rx_pg_prod = prod;
47bf4246 4490
a1f60190 4491 ring_prod = prod = bnapi->rx_prod;
236b6394 4492 for (i = 0; i < bp->rx_ring_size; i++) {
a1f60190 4493 if (bnx2_alloc_rx_skb(bp, bnapi, ring_prod) < 0) {
b6016b76
MC
4494 break;
4495 }
4496 prod = NEXT_RX_BD(prod);
4497 ring_prod = RX_RING_IDX(prod);
4498 }
a1f60190 4499 bnapi->rx_prod = prod;
b6016b76 4500
a1f60190
MC
4501 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_PG_BDIDX,
4502 bnapi->rx_pg_prod);
b6016b76
MC
4503 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, prod);
4504
a1f60190 4505 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bnapi->rx_prod_bseq);
b6016b76
MC
4506}
4507
5d5d0015 4508static u32 bnx2_find_max_ring(u32 ring_size, u32 max_size)
13daffa2 4509{
5d5d0015 4510 u32 max, num_rings = 1;
13daffa2 4511
5d5d0015
MC
4512 while (ring_size > MAX_RX_DESC_CNT) {
4513 ring_size -= MAX_RX_DESC_CNT;
13daffa2
MC
4514 num_rings++;
4515 }
4516 /* round to next power of 2 */
5d5d0015 4517 max = max_size;
13daffa2
MC
4518 while ((max & num_rings) == 0)
4519 max >>= 1;
4520
4521 if (num_rings != max)
4522 max <<= 1;
4523
5d5d0015
MC
4524 return max;
4525}
4526
4527static void
4528bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
4529{
84eaa187 4530 u32 rx_size, rx_space, jumbo_size;
5d5d0015
MC
4531
4532 /* 8 for CRC and VLAN */
4533 rx_size = bp->dev->mtu + ETH_HLEN + bp->rx_offset + 8;
4534
84eaa187
MC
4535 rx_space = SKB_DATA_ALIGN(rx_size + BNX2_RX_ALIGN) + NET_SKB_PAD +
4536 sizeof(struct skb_shared_info);
4537
5d5d0015 4538 bp->rx_copy_thresh = RX_COPY_THRESH;
47bf4246
MC
4539 bp->rx_pg_ring_size = 0;
4540 bp->rx_max_pg_ring = 0;
4541 bp->rx_max_pg_ring_idx = 0;
84eaa187
MC
4542 if (rx_space > PAGE_SIZE) {
4543 int pages = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
4544
4545 jumbo_size = size * pages;
4546 if (jumbo_size > MAX_TOTAL_RX_PG_DESC_CNT)
4547 jumbo_size = MAX_TOTAL_RX_PG_DESC_CNT;
4548
4549 bp->rx_pg_ring_size = jumbo_size;
4550 bp->rx_max_pg_ring = bnx2_find_max_ring(jumbo_size,
4551 MAX_RX_PG_RINGS);
4552 bp->rx_max_pg_ring_idx = (bp->rx_max_pg_ring * RX_DESC_CNT) - 1;
4553 rx_size = RX_COPY_THRESH + bp->rx_offset;
4554 bp->rx_copy_thresh = 0;
4555 }
5d5d0015
MC
4556
4557 bp->rx_buf_use_size = rx_size;
4558 /* hw alignment */
4559 bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN;
1db82f2a 4560 bp->rx_jumbo_thresh = rx_size - bp->rx_offset;
5d5d0015
MC
4561 bp->rx_ring_size = size;
4562 bp->rx_max_ring = bnx2_find_max_ring(size, MAX_RX_RINGS);
13daffa2
MC
4563 bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
4564}
4565
b6016b76
MC
4566static void
4567bnx2_free_tx_skbs(struct bnx2 *bp)
4568{
4569 int i;
4570
4571 if (bp->tx_buf_ring == NULL)
4572 return;
4573
4574 for (i = 0; i < TX_DESC_CNT; ) {
4575 struct sw_bd *tx_buf = &bp->tx_buf_ring[i];
4576 struct sk_buff *skb = tx_buf->skb;
4577 int j, last;
4578
4579 if (skb == NULL) {
4580 i++;
4581 continue;
4582 }
4583
4584 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
4585 skb_headlen(skb), PCI_DMA_TODEVICE);
4586
4587 tx_buf->skb = NULL;
4588
4589 last = skb_shinfo(skb)->nr_frags;
4590 for (j = 0; j < last; j++) {
4591 tx_buf = &bp->tx_buf_ring[i + j + 1];
4592 pci_unmap_page(bp->pdev,
4593 pci_unmap_addr(tx_buf, mapping),
4594 skb_shinfo(skb)->frags[j].size,
4595 PCI_DMA_TODEVICE);
4596 }
745720e5 4597 dev_kfree_skb(skb);
b6016b76
MC
4598 i += j + 1;
4599 }
4600
4601}
4602
4603static void
4604bnx2_free_rx_skbs(struct bnx2 *bp)
4605{
4606 int i;
4607
4608 if (bp->rx_buf_ring == NULL)
4609 return;
4610
13daffa2 4611 for (i = 0; i < bp->rx_max_ring_idx; i++) {
b6016b76
MC
4612 struct sw_bd *rx_buf = &bp->rx_buf_ring[i];
4613 struct sk_buff *skb = rx_buf->skb;
4614
05d0f1cf 4615 if (skb == NULL)
b6016b76
MC
4616 continue;
4617
4618 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
4619 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
4620
4621 rx_buf->skb = NULL;
4622
745720e5 4623 dev_kfree_skb(skb);
b6016b76 4624 }
47bf4246
MC
4625 for (i = 0; i < bp->rx_max_pg_ring_idx; i++)
4626 bnx2_free_rx_page(bp, i);
b6016b76
MC
4627}
4628
4629static void
4630bnx2_free_skbs(struct bnx2 *bp)
4631{
4632 bnx2_free_tx_skbs(bp);
4633 bnx2_free_rx_skbs(bp);
4634}
4635
4636static int
4637bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
4638{
4639 int rc;
4640
4641 rc = bnx2_reset_chip(bp, reset_code);
4642 bnx2_free_skbs(bp);
4643 if (rc)
4644 return rc;
4645
fba9fe91
MC
4646 if ((rc = bnx2_init_chip(bp)) != 0)
4647 return rc;
4648
b6016b76
MC
4649 bnx2_init_tx_ring(bp);
4650 bnx2_init_rx_ring(bp);
4651 return 0;
4652}
4653
4654static int
4655bnx2_init_nic(struct bnx2 *bp)
4656{
4657 int rc;
4658
4659 if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
4660 return rc;
4661
80be4434 4662 spin_lock_bh(&bp->phy_lock);
b6016b76
MC
4663 bnx2_init_phy(bp);
4664 bnx2_set_link(bp);
0d8a6571 4665 spin_unlock_bh(&bp->phy_lock);
b6016b76
MC
4666 return 0;
4667}
4668
4669static int
4670bnx2_test_registers(struct bnx2 *bp)
4671{
4672 int ret;
5bae30c9 4673 int i, is_5709;
f71e1309 4674 static const struct {
b6016b76
MC
4675 u16 offset;
4676 u16 flags;
5bae30c9 4677#define BNX2_FL_NOT_5709 1
b6016b76
MC
4678 u32 rw_mask;
4679 u32 ro_mask;
4680 } reg_tbl[] = {
4681 { 0x006c, 0, 0x00000000, 0x0000003f },
4682 { 0x0090, 0, 0xffffffff, 0x00000000 },
4683 { 0x0094, 0, 0x00000000, 0x00000000 },
4684
5bae30c9
MC
4685 { 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
4686 { 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4687 { 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4688 { 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
4689 { 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
4690 { 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
4691 { 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
4692 { 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4693 { 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4694
4695 { 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4696 { 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4697 { 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4698 { 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4699 { 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4700 { 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4701
4702 { 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
4703 { 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
4704 { 0x0c08, BNX2_FL_NOT_5709, 0x0f0ff073, 0x00000000 },
b6016b76
MC
4705
4706 { 0x1000, 0, 0x00000000, 0x00000001 },
4707 { 0x1004, 0, 0x00000000, 0x000f0001 },
b6016b76
MC
4708
4709 { 0x1408, 0, 0x01c00800, 0x00000000 },
4710 { 0x149c, 0, 0x8000ffff, 0x00000000 },
4711 { 0x14a8, 0, 0x00000000, 0x000001ff },
5b0c76ad 4712 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
b6016b76
MC
4713 { 0x14b0, 0, 0x00000002, 0x00000001 },
4714 { 0x14b8, 0, 0x00000000, 0x00000000 },
4715 { 0x14c0, 0, 0x00000000, 0x00000009 },
4716 { 0x14c4, 0, 0x00003fff, 0x00000000 },
4717 { 0x14cc, 0, 0x00000000, 0x00000001 },
4718 { 0x14d0, 0, 0xffffffff, 0x00000000 },
b6016b76
MC
4719
4720 { 0x1800, 0, 0x00000000, 0x00000001 },
4721 { 0x1804, 0, 0x00000000, 0x00000003 },
b6016b76
MC
4722
4723 { 0x2800, 0, 0x00000000, 0x00000001 },
4724 { 0x2804, 0, 0x00000000, 0x00003f01 },
4725 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
4726 { 0x2810, 0, 0xffff0000, 0x00000000 },
4727 { 0x2814, 0, 0xffff0000, 0x00000000 },
4728 { 0x2818, 0, 0xffff0000, 0x00000000 },
4729 { 0x281c, 0, 0xffff0000, 0x00000000 },
4730 { 0x2834, 0, 0xffffffff, 0x00000000 },
4731 { 0x2840, 0, 0x00000000, 0xffffffff },
4732 { 0x2844, 0, 0x00000000, 0xffffffff },
4733 { 0x2848, 0, 0xffffffff, 0x00000000 },
4734 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
4735
4736 { 0x2c00, 0, 0x00000000, 0x00000011 },
4737 { 0x2c04, 0, 0x00000000, 0x00030007 },
4738
b6016b76
MC
4739 { 0x3c00, 0, 0x00000000, 0x00000001 },
4740 { 0x3c04, 0, 0x00000000, 0x00070000 },
4741 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
4742 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
4743 { 0x3c10, 0, 0xffffffff, 0x00000000 },
4744 { 0x3c14, 0, 0x00000000, 0xffffffff },
4745 { 0x3c18, 0, 0x00000000, 0xffffffff },
4746 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
4747 { 0x3c20, 0, 0xffffff00, 0x00000000 },
b6016b76
MC
4748
4749 { 0x5004, 0, 0x00000000, 0x0000007f },
4750 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
b6016b76 4751
b6016b76
MC
4752 { 0x5c00, 0, 0x00000000, 0x00000001 },
4753 { 0x5c04, 0, 0x00000000, 0x0003000f },
4754 { 0x5c08, 0, 0x00000003, 0x00000000 },
4755 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
4756 { 0x5c10, 0, 0x00000000, 0xffffffff },
4757 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
4758 { 0x5c84, 0, 0x00000000, 0x0000f333 },
4759 { 0x5c88, 0, 0x00000000, 0x00077373 },
4760 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
4761
4762 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
4763 { 0x680c, 0, 0xffffffff, 0x00000000 },
4764 { 0x6810, 0, 0xffffffff, 0x00000000 },
4765 { 0x6814, 0, 0xffffffff, 0x00000000 },
4766 { 0x6818, 0, 0xffffffff, 0x00000000 },
4767 { 0x681c, 0, 0xffffffff, 0x00000000 },
4768 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
4769 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
4770 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
4771 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
4772 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
4773 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
4774 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
4775 { 0x683c, 0, 0x0000ffff, 0x00000000 },
4776 { 0x6840, 0, 0x00000ff0, 0x00000000 },
4777 { 0x6844, 0, 0x00ffff00, 0x00000000 },
4778 { 0x684c, 0, 0xffffffff, 0x00000000 },
4779 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
4780 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
4781 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
4782 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
4783 { 0x6908, 0, 0x00000000, 0x0001ff0f },
4784 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
4785
4786 { 0xffff, 0, 0x00000000, 0x00000000 },
4787 };
4788
4789 ret = 0;
5bae30c9
MC
4790 is_5709 = 0;
4791 if (CHIP_NUM(bp) == CHIP_NUM_5709)
4792 is_5709 = 1;
4793
b6016b76
MC
4794 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
4795 u32 offset, rw_mask, ro_mask, save_val, val;
5bae30c9
MC
4796 u16 flags = reg_tbl[i].flags;
4797
4798 if (is_5709 && (flags & BNX2_FL_NOT_5709))
4799 continue;
b6016b76
MC
4800
4801 offset = (u32) reg_tbl[i].offset;
4802 rw_mask = reg_tbl[i].rw_mask;
4803 ro_mask = reg_tbl[i].ro_mask;
4804
14ab9b86 4805 save_val = readl(bp->regview + offset);
b6016b76 4806
14ab9b86 4807 writel(0, bp->regview + offset);
b6016b76 4808
14ab9b86 4809 val = readl(bp->regview + offset);
b6016b76
MC
4810 if ((val & rw_mask) != 0) {
4811 goto reg_test_err;
4812 }
4813
4814 if ((val & ro_mask) != (save_val & ro_mask)) {
4815 goto reg_test_err;
4816 }
4817
14ab9b86 4818 writel(0xffffffff, bp->regview + offset);
b6016b76 4819
14ab9b86 4820 val = readl(bp->regview + offset);
b6016b76
MC
4821 if ((val & rw_mask) != rw_mask) {
4822 goto reg_test_err;
4823 }
4824
4825 if ((val & ro_mask) != (save_val & ro_mask)) {
4826 goto reg_test_err;
4827 }
4828
14ab9b86 4829 writel(save_val, bp->regview + offset);
b6016b76
MC
4830 continue;
4831
4832reg_test_err:
14ab9b86 4833 writel(save_val, bp->regview + offset);
b6016b76
MC
4834 ret = -ENODEV;
4835 break;
4836 }
4837 return ret;
4838}
4839
4840static int
4841bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
4842{
f71e1309 4843 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
b6016b76
MC
4844 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
4845 int i;
4846
4847 for (i = 0; i < sizeof(test_pattern) / 4; i++) {
4848 u32 offset;
4849
4850 for (offset = 0; offset < size; offset += 4) {
4851
4852 REG_WR_IND(bp, start + offset, test_pattern[i]);
4853
4854 if (REG_RD_IND(bp, start + offset) !=
4855 test_pattern[i]) {
4856 return -ENODEV;
4857 }
4858 }
4859 }
4860 return 0;
4861}
4862
4863static int
4864bnx2_test_memory(struct bnx2 *bp)
4865{
4866 int ret = 0;
4867 int i;
5bae30c9 4868 static struct mem_entry {
b6016b76
MC
4869 u32 offset;
4870 u32 len;
5bae30c9 4871 } mem_tbl_5706[] = {
b6016b76 4872 { 0x60000, 0x4000 },
5b0c76ad 4873 { 0xa0000, 0x3000 },
b6016b76
MC
4874 { 0xe0000, 0x4000 },
4875 { 0x120000, 0x4000 },
4876 { 0x1a0000, 0x4000 },
4877 { 0x160000, 0x4000 },
4878 { 0xffffffff, 0 },
5bae30c9
MC
4879 },
4880 mem_tbl_5709[] = {
4881 { 0x60000, 0x4000 },
4882 { 0xa0000, 0x3000 },
4883 { 0xe0000, 0x4000 },
4884 { 0x120000, 0x4000 },
4885 { 0x1a0000, 0x4000 },
4886 { 0xffffffff, 0 },
b6016b76 4887 };
5bae30c9
MC
4888 struct mem_entry *mem_tbl;
4889
4890 if (CHIP_NUM(bp) == CHIP_NUM_5709)
4891 mem_tbl = mem_tbl_5709;
4892 else
4893 mem_tbl = mem_tbl_5706;
b6016b76
MC
4894
4895 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
4896 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
4897 mem_tbl[i].len)) != 0) {
4898 return ret;
4899 }
4900 }
6aa20a22 4901
b6016b76
MC
4902 return ret;
4903}
4904
bc5a0690
MC
4905#define BNX2_MAC_LOOPBACK 0
4906#define BNX2_PHY_LOOPBACK 1
4907
b6016b76 4908static int
bc5a0690 4909bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
b6016b76
MC
4910{
4911 unsigned int pkt_size, num_pkts, i;
4912 struct sk_buff *skb, *rx_skb;
4913 unsigned char *packet;
bc5a0690 4914 u16 rx_start_idx, rx_idx;
b6016b76
MC
4915 dma_addr_t map;
4916 struct tx_bd *txbd;
4917 struct sw_bd *rx_buf;
4918 struct l2_fhdr *rx_hdr;
4919 int ret = -ENODEV;
35efa7c1 4920 struct bnx2_napi *bnapi = &bp->bnx2_napi;
b6016b76 4921
bc5a0690
MC
4922 if (loopback_mode == BNX2_MAC_LOOPBACK) {
4923 bp->loopback = MAC_LOOPBACK;
4924 bnx2_set_mac_loopback(bp);
4925 }
4926 else if (loopback_mode == BNX2_PHY_LOOPBACK) {
489310a4
MC
4927 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
4928 return 0;
4929
80be4434 4930 bp->loopback = PHY_LOOPBACK;
bc5a0690
MC
4931 bnx2_set_phy_loopback(bp);
4932 }
4933 else
4934 return -EINVAL;
b6016b76 4935
84eaa187 4936 pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_jumbo_thresh - 4);
932f3772 4937 skb = netdev_alloc_skb(bp->dev, pkt_size);
b6cbc3b6
JL
4938 if (!skb)
4939 return -ENOMEM;
b6016b76 4940 packet = skb_put(skb, pkt_size);
6634292b 4941 memcpy(packet, bp->dev->dev_addr, 6);
b6016b76
MC
4942 memset(packet + 6, 0x0, 8);
4943 for (i = 14; i < pkt_size; i++)
4944 packet[i] = (unsigned char) (i & 0xff);
4945
4946 map = pci_map_single(bp->pdev, skb->data, pkt_size,
4947 PCI_DMA_TODEVICE);
4948
bf5295bb
MC
4949 REG_WR(bp, BNX2_HC_COMMAND,
4950 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4951
b6016b76
MC
4952 REG_RD(bp, BNX2_HC_COMMAND);
4953
4954 udelay(5);
35efa7c1 4955 rx_start_idx = bnx2_get_hw_rx_cons(bnapi);
b6016b76 4956
b6016b76
MC
4957 num_pkts = 0;
4958
bc5a0690 4959 txbd = &bp->tx_desc_ring[TX_RING_IDX(bp->tx_prod)];
b6016b76
MC
4960
4961 txbd->tx_bd_haddr_hi = (u64) map >> 32;
4962 txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
4963 txbd->tx_bd_mss_nbytes = pkt_size;
4964 txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
4965
4966 num_pkts++;
bc5a0690
MC
4967 bp->tx_prod = NEXT_TX_BD(bp->tx_prod);
4968 bp->tx_prod_bseq += pkt_size;
b6016b76 4969
234754d5
MC
4970 REG_WR16(bp, bp->tx_bidx_addr, bp->tx_prod);
4971 REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
b6016b76
MC
4972
4973 udelay(100);
4974
bf5295bb
MC
4975 REG_WR(bp, BNX2_HC_COMMAND,
4976 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4977
b6016b76
MC
4978 REG_RD(bp, BNX2_HC_COMMAND);
4979
4980 udelay(5);
4981
4982 pci_unmap_single(bp->pdev, map, pkt_size, PCI_DMA_TODEVICE);
745720e5 4983 dev_kfree_skb(skb);
b6016b76 4984
35efa7c1 4985 if (bnx2_get_hw_tx_cons(bnapi) != bp->tx_prod)
b6016b76 4986 goto loopback_test_done;
b6016b76 4987
35efa7c1 4988 rx_idx = bnx2_get_hw_rx_cons(bnapi);
b6016b76
MC
4989 if (rx_idx != rx_start_idx + num_pkts) {
4990 goto loopback_test_done;
4991 }
4992
4993 rx_buf = &bp->rx_buf_ring[rx_start_idx];
4994 rx_skb = rx_buf->skb;
4995
4996 rx_hdr = (struct l2_fhdr *) rx_skb->data;
4997 skb_reserve(rx_skb, bp->rx_offset);
4998
4999 pci_dma_sync_single_for_cpu(bp->pdev,
5000 pci_unmap_addr(rx_buf, mapping),
5001 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
5002
ade2bfe7 5003 if (rx_hdr->l2_fhdr_status &
b6016b76
MC
5004 (L2_FHDR_ERRORS_BAD_CRC |
5005 L2_FHDR_ERRORS_PHY_DECODE |
5006 L2_FHDR_ERRORS_ALIGNMENT |
5007 L2_FHDR_ERRORS_TOO_SHORT |
5008 L2_FHDR_ERRORS_GIANT_FRAME)) {
5009
5010 goto loopback_test_done;
5011 }
5012
5013 if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
5014 goto loopback_test_done;
5015 }
5016
5017 for (i = 14; i < pkt_size; i++) {
5018 if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
5019 goto loopback_test_done;
5020 }
5021 }
5022
5023 ret = 0;
5024
5025loopback_test_done:
5026 bp->loopback = 0;
5027 return ret;
5028}
5029
bc5a0690
MC
5030#define BNX2_MAC_LOOPBACK_FAILED 1
5031#define BNX2_PHY_LOOPBACK_FAILED 2
5032#define BNX2_LOOPBACK_FAILED (BNX2_MAC_LOOPBACK_FAILED | \
5033 BNX2_PHY_LOOPBACK_FAILED)
5034
5035static int
5036bnx2_test_loopback(struct bnx2 *bp)
5037{
5038 int rc = 0;
5039
5040 if (!netif_running(bp->dev))
5041 return BNX2_LOOPBACK_FAILED;
5042
5043 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
5044 spin_lock_bh(&bp->phy_lock);
5045 bnx2_init_phy(bp);
5046 spin_unlock_bh(&bp->phy_lock);
5047 if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
5048 rc |= BNX2_MAC_LOOPBACK_FAILED;
5049 if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
5050 rc |= BNX2_PHY_LOOPBACK_FAILED;
5051 return rc;
5052}
5053
b6016b76
MC
5054#define NVRAM_SIZE 0x200
5055#define CRC32_RESIDUAL 0xdebb20e3
5056
5057static int
5058bnx2_test_nvram(struct bnx2 *bp)
5059{
5060 u32 buf[NVRAM_SIZE / 4];
5061 u8 *data = (u8 *) buf;
5062 int rc = 0;
5063 u32 magic, csum;
5064
5065 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
5066 goto test_nvram_done;
5067
5068 magic = be32_to_cpu(buf[0]);
5069 if (magic != 0x669955aa) {
5070 rc = -ENODEV;
5071 goto test_nvram_done;
5072 }
5073
5074 if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
5075 goto test_nvram_done;
5076
5077 csum = ether_crc_le(0x100, data);
5078 if (csum != CRC32_RESIDUAL) {
5079 rc = -ENODEV;
5080 goto test_nvram_done;
5081 }
5082
5083 csum = ether_crc_le(0x100, data + 0x100);
5084 if (csum != CRC32_RESIDUAL) {
5085 rc = -ENODEV;
5086 }
5087
5088test_nvram_done:
5089 return rc;
5090}
5091
5092static int
5093bnx2_test_link(struct bnx2 *bp)
5094{
5095 u32 bmsr;
5096
489310a4
MC
5097 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG) {
5098 if (bp->link_up)
5099 return 0;
5100 return -ENODEV;
5101 }
c770a65c 5102 spin_lock_bh(&bp->phy_lock);
27a005b8
MC
5103 bnx2_enable_bmsr1(bp);
5104 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5105 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5106 bnx2_disable_bmsr1(bp);
c770a65c 5107 spin_unlock_bh(&bp->phy_lock);
6aa20a22 5108
b6016b76
MC
5109 if (bmsr & BMSR_LSTATUS) {
5110 return 0;
5111 }
5112 return -ENODEV;
5113}
5114
5115static int
5116bnx2_test_intr(struct bnx2 *bp)
5117{
5118 int i;
b6016b76
MC
5119 u16 status_idx;
5120
5121 if (!netif_running(bp->dev))
5122 return -ENODEV;
5123
5124 status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
5125
5126 /* This register is not touched during run-time. */
bf5295bb 5127 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
b6016b76
MC
5128 REG_RD(bp, BNX2_HC_COMMAND);
5129
5130 for (i = 0; i < 10; i++) {
5131 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
5132 status_idx) {
5133
5134 break;
5135 }
5136
5137 msleep_interruptible(10);
5138 }
5139 if (i < 10)
5140 return 0;
5141
5142 return -ENODEV;
5143}
5144
5145static void
48b01e2d 5146bnx2_5706_serdes_timer(struct bnx2 *bp)
b6016b76 5147{
48b01e2d
MC
5148 spin_lock(&bp->phy_lock);
5149 if (bp->serdes_an_pending)
5150 bp->serdes_an_pending--;
5151 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
5152 u32 bmcr;
b6016b76 5153
48b01e2d 5154 bp->current_interval = bp->timer_interval;
cd339a0e 5155
ca58c3af 5156 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
b6016b76 5157
48b01e2d
MC
5158 if (bmcr & BMCR_ANENABLE) {
5159 u32 phy1, phy2;
b6016b76 5160
48b01e2d
MC
5161 bnx2_write_phy(bp, 0x1c, 0x7c00);
5162 bnx2_read_phy(bp, 0x1c, &phy1);
cea94db9 5163
48b01e2d
MC
5164 bnx2_write_phy(bp, 0x17, 0x0f01);
5165 bnx2_read_phy(bp, 0x15, &phy2);
5166 bnx2_write_phy(bp, 0x17, 0x0f01);
5167 bnx2_read_phy(bp, 0x15, &phy2);
b6016b76 5168
48b01e2d
MC
5169 if ((phy1 & 0x10) && /* SIGNAL DETECT */
5170 !(phy2 & 0x20)) { /* no CONFIG */
5171
5172 bmcr &= ~BMCR_ANENABLE;
5173 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
ca58c3af 5174 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
48b01e2d
MC
5175 bp->phy_flags |= PHY_PARALLEL_DETECT_FLAG;
5176 }
b6016b76 5177 }
48b01e2d
MC
5178 }
5179 else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
5180 (bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)) {
5181 u32 phy2;
b6016b76 5182
48b01e2d
MC
5183 bnx2_write_phy(bp, 0x17, 0x0f01);
5184 bnx2_read_phy(bp, 0x15, &phy2);
5185 if (phy2 & 0x20) {
5186 u32 bmcr;
cd339a0e 5187
ca58c3af 5188 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
48b01e2d 5189 bmcr |= BMCR_ANENABLE;
ca58c3af 5190 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
b6016b76 5191
48b01e2d
MC
5192 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
5193 }
5194 } else
5195 bp->current_interval = bp->timer_interval;
b6016b76 5196
48b01e2d
MC
5197 spin_unlock(&bp->phy_lock);
5198}
b6016b76 5199
f8dd064e
MC
5200static void
5201bnx2_5708_serdes_timer(struct bnx2 *bp)
5202{
0d8a6571
MC
5203 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
5204 return;
5205
f8dd064e
MC
5206 if ((bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) == 0) {
5207 bp->serdes_an_pending = 0;
5208 return;
5209 }
b6016b76 5210
f8dd064e
MC
5211 spin_lock(&bp->phy_lock);
5212 if (bp->serdes_an_pending)
5213 bp->serdes_an_pending--;
5214 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
5215 u32 bmcr;
b6016b76 5216
ca58c3af 5217 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
f8dd064e 5218 if (bmcr & BMCR_ANENABLE) {
605a9e20 5219 bnx2_enable_forced_2g5(bp);
f8dd064e
MC
5220 bp->current_interval = SERDES_FORCED_TIMEOUT;
5221 } else {
605a9e20 5222 bnx2_disable_forced_2g5(bp);
f8dd064e
MC
5223 bp->serdes_an_pending = 2;
5224 bp->current_interval = bp->timer_interval;
b6016b76 5225 }
b6016b76 5226
f8dd064e
MC
5227 } else
5228 bp->current_interval = bp->timer_interval;
b6016b76 5229
f8dd064e
MC
5230 spin_unlock(&bp->phy_lock);
5231}
5232
48b01e2d
MC
5233static void
5234bnx2_timer(unsigned long data)
5235{
5236 struct bnx2 *bp = (struct bnx2 *) data;
b6016b76 5237
48b01e2d
MC
5238 if (!netif_running(bp->dev))
5239 return;
b6016b76 5240
48b01e2d
MC
5241 if (atomic_read(&bp->intr_sem) != 0)
5242 goto bnx2_restart_timer;
b6016b76 5243
df149d70 5244 bnx2_send_heart_beat(bp);
b6016b76 5245
48b01e2d 5246 bp->stats_blk->stat_FwRxDrop = REG_RD_IND(bp, BNX2_FW_RX_DROP_COUNT);
b6016b76 5247
02537b06
MC
5248 /* workaround occasional corrupted counters */
5249 if (CHIP_NUM(bp) == CHIP_NUM_5708 && bp->stats_ticks)
5250 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd |
5251 BNX2_HC_COMMAND_STATS_NOW);
5252
f8dd064e
MC
5253 if (bp->phy_flags & PHY_SERDES_FLAG) {
5254 if (CHIP_NUM(bp) == CHIP_NUM_5706)
5255 bnx2_5706_serdes_timer(bp);
27a005b8 5256 else
f8dd064e 5257 bnx2_5708_serdes_timer(bp);
b6016b76
MC
5258 }
5259
5260bnx2_restart_timer:
cd339a0e 5261 mod_timer(&bp->timer, jiffies + bp->current_interval);
b6016b76
MC
5262}
5263
8e6a72c4
MC
5264static int
5265bnx2_request_irq(struct bnx2 *bp)
5266{
5267 struct net_device *dev = bp->dev;
6d866ffc
MC
5268 unsigned long flags;
5269 struct bnx2_irq *irq = &bp->irq_tbl[0];
5270 int rc;
8e6a72c4 5271
6d866ffc
MC
5272 if (bp->flags & USING_MSI_FLAG)
5273 flags = 0;
5274 else
5275 flags = IRQF_SHARED;
5276 rc = request_irq(irq->vector, irq->handler, flags, dev->name, dev);
8e6a72c4
MC
5277 return rc;
5278}
5279
5280static void
5281bnx2_free_irq(struct bnx2 *bp)
5282{
5283 struct net_device *dev = bp->dev;
5284
6d866ffc 5285 free_irq(bp->irq_tbl[0].vector, dev);
8e6a72c4 5286 if (bp->flags & USING_MSI_FLAG) {
8e6a72c4
MC
5287 pci_disable_msi(bp->pdev);
5288 bp->flags &= ~(USING_MSI_FLAG | ONE_SHOT_MSI_FLAG);
6d866ffc
MC
5289 }
5290}
5291
5292static void
5293bnx2_setup_int_mode(struct bnx2 *bp, int dis_msi)
5294{
5295 bp->irq_tbl[0].handler = bnx2_interrupt;
5296 strcpy(bp->irq_tbl[0].name, bp->dev->name);
5297
5298 if ((bp->flags & MSI_CAP_FLAG) && !dis_msi) {
5299 if (pci_enable_msi(bp->pdev) == 0) {
5300 bp->flags |= USING_MSI_FLAG;
5301 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
5302 bp->flags |= ONE_SHOT_MSI_FLAG;
5303 bp->irq_tbl[0].handler = bnx2_msi_1shot;
5304 } else
5305 bp->irq_tbl[0].handler = bnx2_msi;
5306 }
5307 }
5308
5309 bp->irq_tbl[0].vector = bp->pdev->irq;
8e6a72c4
MC
5310}
5311
b6016b76
MC
5312/* Called with rtnl_lock */
5313static int
5314bnx2_open(struct net_device *dev)
5315{
972ec0d4 5316 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5317 int rc;
5318
1b2f922f
MC
5319 netif_carrier_off(dev);
5320
829ca9a3 5321 bnx2_set_power_state(bp, PCI_D0);
b6016b76
MC
5322 bnx2_disable_int(bp);
5323
5324 rc = bnx2_alloc_mem(bp);
5325 if (rc)
5326 return rc;
5327
6d866ffc 5328 bnx2_setup_int_mode(bp, disable_msi);
35efa7c1 5329 bnx2_napi_enable(bp);
8e6a72c4
MC
5330 rc = bnx2_request_irq(bp);
5331
b6016b76 5332 if (rc) {
35efa7c1 5333 bnx2_napi_disable(bp);
b6016b76
MC
5334 bnx2_free_mem(bp);
5335 return rc;
5336 }
5337
5338 rc = bnx2_init_nic(bp);
5339
5340 if (rc) {
35efa7c1 5341 bnx2_napi_disable(bp);
8e6a72c4 5342 bnx2_free_irq(bp);
b6016b76
MC
5343 bnx2_free_skbs(bp);
5344 bnx2_free_mem(bp);
5345 return rc;
5346 }
6aa20a22 5347
cd339a0e 5348 mod_timer(&bp->timer, jiffies + bp->current_interval);
b6016b76
MC
5349
5350 atomic_set(&bp->intr_sem, 0);
5351
5352 bnx2_enable_int(bp);
5353
5354 if (bp->flags & USING_MSI_FLAG) {
5355 /* Test MSI to make sure it is working
5356 * If MSI test fails, go back to INTx mode
5357 */
5358 if (bnx2_test_intr(bp) != 0) {
5359 printk(KERN_WARNING PFX "%s: No interrupt was generated"
5360 " using MSI, switching to INTx mode. Please"
5361 " report this failure to the PCI maintainer"
5362 " and include system chipset information.\n",
5363 bp->dev->name);
5364
5365 bnx2_disable_int(bp);
8e6a72c4 5366 bnx2_free_irq(bp);
b6016b76 5367
6d866ffc
MC
5368 bnx2_setup_int_mode(bp, 1);
5369
b6016b76
MC
5370 rc = bnx2_init_nic(bp);
5371
8e6a72c4
MC
5372 if (!rc)
5373 rc = bnx2_request_irq(bp);
5374
b6016b76 5375 if (rc) {
35efa7c1 5376 bnx2_napi_disable(bp);
b6016b76
MC
5377 bnx2_free_skbs(bp);
5378 bnx2_free_mem(bp);
5379 del_timer_sync(&bp->timer);
5380 return rc;
5381 }
5382 bnx2_enable_int(bp);
5383 }
5384 }
5385 if (bp->flags & USING_MSI_FLAG) {
5386 printk(KERN_INFO PFX "%s: using MSI\n", dev->name);
5387 }
5388
5389 netif_start_queue(dev);
5390
5391 return 0;
5392}
5393
5394static void
c4028958 5395bnx2_reset_task(struct work_struct *work)
b6016b76 5396{
c4028958 5397 struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
b6016b76 5398
afdc08b9
MC
5399 if (!netif_running(bp->dev))
5400 return;
5401
5402 bp->in_reset_task = 1;
b6016b76
MC
5403 bnx2_netif_stop(bp);
5404
5405 bnx2_init_nic(bp);
5406
5407 atomic_set(&bp->intr_sem, 1);
5408 bnx2_netif_start(bp);
afdc08b9 5409 bp->in_reset_task = 0;
b6016b76
MC
5410}
5411
5412static void
5413bnx2_tx_timeout(struct net_device *dev)
5414{
972ec0d4 5415 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5416
5417 /* This allows the netif to be shutdown gracefully before resetting */
5418 schedule_work(&bp->reset_task);
5419}
5420
5421#ifdef BCM_VLAN
5422/* Called with rtnl_lock */
5423static void
5424bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
5425{
972ec0d4 5426 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5427
5428 bnx2_netif_stop(bp);
5429
5430 bp->vlgrp = vlgrp;
5431 bnx2_set_rx_mode(dev);
5432
5433 bnx2_netif_start(bp);
5434}
b6016b76
MC
5435#endif
5436
932ff279 5437/* Called with netif_tx_lock.
2f8af120
MC
5438 * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
5439 * netif_wake_queue().
b6016b76
MC
5440 */
5441static int
5442bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
5443{
972ec0d4 5444 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5445 dma_addr_t mapping;
5446 struct tx_bd *txbd;
5447 struct sw_bd *tx_buf;
5448 u32 len, vlan_tag_flags, last_frag, mss;
5449 u16 prod, ring_prod;
5450 int i;
a550c99b 5451 struct bnx2_napi *bnapi = &bp->bnx2_napi;
b6016b76 5452
a550c99b
MC
5453 if (unlikely(bnx2_tx_avail(bp, bnapi) <
5454 (skb_shinfo(skb)->nr_frags + 1))) {
b6016b76
MC
5455 netif_stop_queue(dev);
5456 printk(KERN_ERR PFX "%s: BUG! Tx ring full when queue awake!\n",
5457 dev->name);
5458
5459 return NETDEV_TX_BUSY;
5460 }
5461 len = skb_headlen(skb);
5462 prod = bp->tx_prod;
5463 ring_prod = TX_RING_IDX(prod);
5464
5465 vlan_tag_flags = 0;
84fa7933 5466 if (skb->ip_summed == CHECKSUM_PARTIAL) {
b6016b76
MC
5467 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
5468 }
5469
5470 if (bp->vlgrp != 0 && vlan_tx_tag_present(skb)) {
5471 vlan_tag_flags |=
5472 (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
5473 }
fde82055 5474 if ((mss = skb_shinfo(skb)->gso_size)) {
b6016b76 5475 u32 tcp_opt_len, ip_tcp_len;
eddc9ec5 5476 struct iphdr *iph;
b6016b76 5477
b6016b76
MC
5478 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
5479
4666f87a
MC
5480 tcp_opt_len = tcp_optlen(skb);
5481
5482 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
5483 u32 tcp_off = skb_transport_offset(skb) -
5484 sizeof(struct ipv6hdr) - ETH_HLEN;
ab6a5bb6 5485
4666f87a
MC
5486 vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) |
5487 TX_BD_FLAGS_SW_FLAGS;
5488 if (likely(tcp_off == 0))
5489 vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK;
5490 else {
5491 tcp_off >>= 3;
5492 vlan_tag_flags |= ((tcp_off & 0x3) <<
5493 TX_BD_FLAGS_TCP6_OFF0_SHL) |
5494 ((tcp_off & 0x10) <<
5495 TX_BD_FLAGS_TCP6_OFF4_SHL);
5496 mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL;
5497 }
5498 } else {
5499 if (skb_header_cloned(skb) &&
5500 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
5501 dev_kfree_skb(skb);
5502 return NETDEV_TX_OK;
5503 }
b6016b76 5504
4666f87a
MC
5505 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
5506
5507 iph = ip_hdr(skb);
5508 iph->check = 0;
5509 iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
5510 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
5511 iph->daddr, 0,
5512 IPPROTO_TCP,
5513 0);
5514 if (tcp_opt_len || (iph->ihl > 5)) {
5515 vlan_tag_flags |= ((iph->ihl - 5) +
5516 (tcp_opt_len >> 2)) << 8;
5517 }
b6016b76 5518 }
4666f87a 5519 } else
b6016b76 5520 mss = 0;
b6016b76
MC
5521
5522 mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
6aa20a22 5523
b6016b76
MC
5524 tx_buf = &bp->tx_buf_ring[ring_prod];
5525 tx_buf->skb = skb;
5526 pci_unmap_addr_set(tx_buf, mapping, mapping);
5527
5528 txbd = &bp->tx_desc_ring[ring_prod];
5529
5530 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
5531 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
5532 txbd->tx_bd_mss_nbytes = len | (mss << 16);
5533 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
5534
5535 last_frag = skb_shinfo(skb)->nr_frags;
5536
5537 for (i = 0; i < last_frag; i++) {
5538 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5539
5540 prod = NEXT_TX_BD(prod);
5541 ring_prod = TX_RING_IDX(prod);
5542 txbd = &bp->tx_desc_ring[ring_prod];
5543
5544 len = frag->size;
5545 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
5546 len, PCI_DMA_TODEVICE);
5547 pci_unmap_addr_set(&bp->tx_buf_ring[ring_prod],
5548 mapping, mapping);
5549
5550 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
5551 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
5552 txbd->tx_bd_mss_nbytes = len | (mss << 16);
5553 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
5554
5555 }
5556 txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
5557
5558 prod = NEXT_TX_BD(prod);
5559 bp->tx_prod_bseq += skb->len;
5560
234754d5
MC
5561 REG_WR16(bp, bp->tx_bidx_addr, prod);
5562 REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
b6016b76
MC
5563
5564 mmiowb();
5565
5566 bp->tx_prod = prod;
5567 dev->trans_start = jiffies;
5568
a550c99b 5569 if (unlikely(bnx2_tx_avail(bp, bnapi) <= MAX_SKB_FRAGS)) {
e89bbf10 5570 netif_stop_queue(dev);
a550c99b 5571 if (bnx2_tx_avail(bp, bnapi) > bp->tx_wake_thresh)
e89bbf10 5572 netif_wake_queue(dev);
b6016b76
MC
5573 }
5574
5575 return NETDEV_TX_OK;
5576}
5577
5578/* Called with rtnl_lock */
5579static int
5580bnx2_close(struct net_device *dev)
5581{
972ec0d4 5582 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5583 u32 reset_code;
5584
afdc08b9
MC
5585 /* Calling flush_scheduled_work() may deadlock because
5586 * linkwatch_event() may be on the workqueue and it will try to get
5587 * the rtnl_lock which we are holding.
5588 */
5589 while (bp->in_reset_task)
5590 msleep(1);
5591
bea3348e 5592 bnx2_disable_int_sync(bp);
35efa7c1 5593 bnx2_napi_disable(bp);
b6016b76 5594 del_timer_sync(&bp->timer);
dda1e390 5595 if (bp->flags & NO_WOL_FLAG)
6c4f095e 5596 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
dda1e390 5597 else if (bp->wol)
b6016b76
MC
5598 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
5599 else
5600 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
5601 bnx2_reset_chip(bp, reset_code);
8e6a72c4 5602 bnx2_free_irq(bp);
b6016b76
MC
5603 bnx2_free_skbs(bp);
5604 bnx2_free_mem(bp);
5605 bp->link_up = 0;
5606 netif_carrier_off(bp->dev);
829ca9a3 5607 bnx2_set_power_state(bp, PCI_D3hot);
b6016b76
MC
5608 return 0;
5609}
5610
5611#define GET_NET_STATS64(ctr) \
5612 (unsigned long) ((unsigned long) (ctr##_hi) << 32) + \
5613 (unsigned long) (ctr##_lo)
5614
5615#define GET_NET_STATS32(ctr) \
5616 (ctr##_lo)
5617
5618#if (BITS_PER_LONG == 64)
5619#define GET_NET_STATS GET_NET_STATS64
5620#else
5621#define GET_NET_STATS GET_NET_STATS32
5622#endif
5623
5624static struct net_device_stats *
5625bnx2_get_stats(struct net_device *dev)
5626{
972ec0d4 5627 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5628 struct statistics_block *stats_blk = bp->stats_blk;
5629 struct net_device_stats *net_stats = &bp->net_stats;
5630
5631 if (bp->stats_blk == NULL) {
5632 return net_stats;
5633 }
5634 net_stats->rx_packets =
5635 GET_NET_STATS(stats_blk->stat_IfHCInUcastPkts) +
5636 GET_NET_STATS(stats_blk->stat_IfHCInMulticastPkts) +
5637 GET_NET_STATS(stats_blk->stat_IfHCInBroadcastPkts);
5638
5639 net_stats->tx_packets =
5640 GET_NET_STATS(stats_blk->stat_IfHCOutUcastPkts) +
5641 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts) +
5642 GET_NET_STATS(stats_blk->stat_IfHCOutBroadcastPkts);
5643
5644 net_stats->rx_bytes =
5645 GET_NET_STATS(stats_blk->stat_IfHCInOctets);
5646
5647 net_stats->tx_bytes =
5648 GET_NET_STATS(stats_blk->stat_IfHCOutOctets);
5649
6aa20a22 5650 net_stats->multicast =
b6016b76
MC
5651 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts);
5652
6aa20a22 5653 net_stats->collisions =
b6016b76
MC
5654 (unsigned long) stats_blk->stat_EtherStatsCollisions;
5655
6aa20a22 5656 net_stats->rx_length_errors =
b6016b76
MC
5657 (unsigned long) (stats_blk->stat_EtherStatsUndersizePkts +
5658 stats_blk->stat_EtherStatsOverrsizePkts);
5659
6aa20a22 5660 net_stats->rx_over_errors =
b6016b76
MC
5661 (unsigned long) stats_blk->stat_IfInMBUFDiscards;
5662
6aa20a22 5663 net_stats->rx_frame_errors =
b6016b76
MC
5664 (unsigned long) stats_blk->stat_Dot3StatsAlignmentErrors;
5665
6aa20a22 5666 net_stats->rx_crc_errors =
b6016b76
MC
5667 (unsigned long) stats_blk->stat_Dot3StatsFCSErrors;
5668
5669 net_stats->rx_errors = net_stats->rx_length_errors +
5670 net_stats->rx_over_errors + net_stats->rx_frame_errors +
5671 net_stats->rx_crc_errors;
5672
5673 net_stats->tx_aborted_errors =
5674 (unsigned long) (stats_blk->stat_Dot3StatsExcessiveCollisions +
5675 stats_blk->stat_Dot3StatsLateCollisions);
5676
5b0c76ad
MC
5677 if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
5678 (CHIP_ID(bp) == CHIP_ID_5708_A0))
b6016b76
MC
5679 net_stats->tx_carrier_errors = 0;
5680 else {
5681 net_stats->tx_carrier_errors =
5682 (unsigned long)
5683 stats_blk->stat_Dot3StatsCarrierSenseErrors;
5684 }
5685
5686 net_stats->tx_errors =
6aa20a22 5687 (unsigned long)
b6016b76
MC
5688 stats_blk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors
5689 +
5690 net_stats->tx_aborted_errors +
5691 net_stats->tx_carrier_errors;
5692
cea94db9
MC
5693 net_stats->rx_missed_errors =
5694 (unsigned long) (stats_blk->stat_IfInMBUFDiscards +
5695 stats_blk->stat_FwRxDrop);
5696
b6016b76
MC
5697 return net_stats;
5698}
5699
5700/* All ethtool functions called with rtnl_lock */
5701
5702static int
5703bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
5704{
972ec0d4 5705 struct bnx2 *bp = netdev_priv(dev);
7b6b8347 5706 int support_serdes = 0, support_copper = 0;
b6016b76
MC
5707
5708 cmd->supported = SUPPORTED_Autoneg;
7b6b8347
MC
5709 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG) {
5710 support_serdes = 1;
5711 support_copper = 1;
5712 } else if (bp->phy_port == PORT_FIBRE)
5713 support_serdes = 1;
5714 else
5715 support_copper = 1;
5716
5717 if (support_serdes) {
b6016b76
MC
5718 cmd->supported |= SUPPORTED_1000baseT_Full |
5719 SUPPORTED_FIBRE;
605a9e20
MC
5720 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG)
5721 cmd->supported |= SUPPORTED_2500baseX_Full;
b6016b76 5722
b6016b76 5723 }
7b6b8347 5724 if (support_copper) {
b6016b76
MC
5725 cmd->supported |= SUPPORTED_10baseT_Half |
5726 SUPPORTED_10baseT_Full |
5727 SUPPORTED_100baseT_Half |
5728 SUPPORTED_100baseT_Full |
5729 SUPPORTED_1000baseT_Full |
5730 SUPPORTED_TP;
5731
b6016b76
MC
5732 }
5733
7b6b8347
MC
5734 spin_lock_bh(&bp->phy_lock);
5735 cmd->port = bp->phy_port;
b6016b76
MC
5736 cmd->advertising = bp->advertising;
5737
5738 if (bp->autoneg & AUTONEG_SPEED) {
5739 cmd->autoneg = AUTONEG_ENABLE;
5740 }
5741 else {
5742 cmd->autoneg = AUTONEG_DISABLE;
5743 }
5744
5745 if (netif_carrier_ok(dev)) {
5746 cmd->speed = bp->line_speed;
5747 cmd->duplex = bp->duplex;
5748 }
5749 else {
5750 cmd->speed = -1;
5751 cmd->duplex = -1;
5752 }
7b6b8347 5753 spin_unlock_bh(&bp->phy_lock);
b6016b76
MC
5754
5755 cmd->transceiver = XCVR_INTERNAL;
5756 cmd->phy_address = bp->phy_addr;
5757
5758 return 0;
5759}
6aa20a22 5760
b6016b76
MC
5761static int
5762bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
5763{
972ec0d4 5764 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5765 u8 autoneg = bp->autoneg;
5766 u8 req_duplex = bp->req_duplex;
5767 u16 req_line_speed = bp->req_line_speed;
5768 u32 advertising = bp->advertising;
7b6b8347
MC
5769 int err = -EINVAL;
5770
5771 spin_lock_bh(&bp->phy_lock);
5772
5773 if (cmd->port != PORT_TP && cmd->port != PORT_FIBRE)
5774 goto err_out_unlock;
5775
5776 if (cmd->port != bp->phy_port && !(bp->phy_flags & REMOTE_PHY_CAP_FLAG))
5777 goto err_out_unlock;
b6016b76
MC
5778
5779 if (cmd->autoneg == AUTONEG_ENABLE) {
5780 autoneg |= AUTONEG_SPEED;
5781
6aa20a22 5782 cmd->advertising &= ETHTOOL_ALL_COPPER_SPEED;
b6016b76
MC
5783
5784 /* allow advertising 1 speed */
5785 if ((cmd->advertising == ADVERTISED_10baseT_Half) ||
5786 (cmd->advertising == ADVERTISED_10baseT_Full) ||
5787 (cmd->advertising == ADVERTISED_100baseT_Half) ||
5788 (cmd->advertising == ADVERTISED_100baseT_Full)) {
5789
7b6b8347
MC
5790 if (cmd->port == PORT_FIBRE)
5791 goto err_out_unlock;
b6016b76
MC
5792
5793 advertising = cmd->advertising;
5794
27a005b8 5795 } else if (cmd->advertising == ADVERTISED_2500baseX_Full) {
7b6b8347
MC
5796 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) ||
5797 (cmd->port == PORT_TP))
5798 goto err_out_unlock;
5799 } else if (cmd->advertising == ADVERTISED_1000baseT_Full)
b6016b76 5800 advertising = cmd->advertising;
7b6b8347
MC
5801 else if (cmd->advertising == ADVERTISED_1000baseT_Half)
5802 goto err_out_unlock;
b6016b76 5803 else {
7b6b8347 5804 if (cmd->port == PORT_FIBRE)
b6016b76 5805 advertising = ETHTOOL_ALL_FIBRE_SPEED;
7b6b8347 5806 else
b6016b76 5807 advertising = ETHTOOL_ALL_COPPER_SPEED;
b6016b76
MC
5808 }
5809 advertising |= ADVERTISED_Autoneg;
5810 }
5811 else {
7b6b8347 5812 if (cmd->port == PORT_FIBRE) {
80be4434
MC
5813 if ((cmd->speed != SPEED_1000 &&
5814 cmd->speed != SPEED_2500) ||
5815 (cmd->duplex != DUPLEX_FULL))
7b6b8347 5816 goto err_out_unlock;
80be4434
MC
5817
5818 if (cmd->speed == SPEED_2500 &&
5819 !(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
7b6b8347 5820 goto err_out_unlock;
b6016b76 5821 }
7b6b8347
MC
5822 else if (cmd->speed == SPEED_1000 || cmd->speed == SPEED_2500)
5823 goto err_out_unlock;
5824
b6016b76
MC
5825 autoneg &= ~AUTONEG_SPEED;
5826 req_line_speed = cmd->speed;
5827 req_duplex = cmd->duplex;
5828 advertising = 0;
5829 }
5830
5831 bp->autoneg = autoneg;
5832 bp->advertising = advertising;
5833 bp->req_line_speed = req_line_speed;
5834 bp->req_duplex = req_duplex;
5835
7b6b8347 5836 err = bnx2_setup_phy(bp, cmd->port);
b6016b76 5837
7b6b8347 5838err_out_unlock:
c770a65c 5839 spin_unlock_bh(&bp->phy_lock);
b6016b76 5840
7b6b8347 5841 return err;
b6016b76
MC
5842}
5843
5844static void
5845bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
5846{
972ec0d4 5847 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5848
5849 strcpy(info->driver, DRV_MODULE_NAME);
5850 strcpy(info->version, DRV_MODULE_VERSION);
5851 strcpy(info->bus_info, pci_name(bp->pdev));
58fc2ea4 5852 strcpy(info->fw_version, bp->fw_version);
b6016b76
MC
5853}
5854
244ac4f4
MC
5855#define BNX2_REGDUMP_LEN (32 * 1024)
5856
5857static int
5858bnx2_get_regs_len(struct net_device *dev)
5859{
5860 return BNX2_REGDUMP_LEN;
5861}
5862
5863static void
5864bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
5865{
5866 u32 *p = _p, i, offset;
5867 u8 *orig_p = _p;
5868 struct bnx2 *bp = netdev_priv(dev);
5869 u32 reg_boundaries[] = { 0x0000, 0x0098, 0x0400, 0x045c,
5870 0x0800, 0x0880, 0x0c00, 0x0c10,
5871 0x0c30, 0x0d08, 0x1000, 0x101c,
5872 0x1040, 0x1048, 0x1080, 0x10a4,
5873 0x1400, 0x1490, 0x1498, 0x14f0,
5874 0x1500, 0x155c, 0x1580, 0x15dc,
5875 0x1600, 0x1658, 0x1680, 0x16d8,
5876 0x1800, 0x1820, 0x1840, 0x1854,
5877 0x1880, 0x1894, 0x1900, 0x1984,
5878 0x1c00, 0x1c0c, 0x1c40, 0x1c54,
5879 0x1c80, 0x1c94, 0x1d00, 0x1d84,
5880 0x2000, 0x2030, 0x23c0, 0x2400,
5881 0x2800, 0x2820, 0x2830, 0x2850,
5882 0x2b40, 0x2c10, 0x2fc0, 0x3058,
5883 0x3c00, 0x3c94, 0x4000, 0x4010,
5884 0x4080, 0x4090, 0x43c0, 0x4458,
5885 0x4c00, 0x4c18, 0x4c40, 0x4c54,
5886 0x4fc0, 0x5010, 0x53c0, 0x5444,
5887 0x5c00, 0x5c18, 0x5c80, 0x5c90,
5888 0x5fc0, 0x6000, 0x6400, 0x6428,
5889 0x6800, 0x6848, 0x684c, 0x6860,
5890 0x6888, 0x6910, 0x8000 };
5891
5892 regs->version = 0;
5893
5894 memset(p, 0, BNX2_REGDUMP_LEN);
5895
5896 if (!netif_running(bp->dev))
5897 return;
5898
5899 i = 0;
5900 offset = reg_boundaries[0];
5901 p += offset;
5902 while (offset < BNX2_REGDUMP_LEN) {
5903 *p++ = REG_RD(bp, offset);
5904 offset += 4;
5905 if (offset == reg_boundaries[i + 1]) {
5906 offset = reg_boundaries[i + 2];
5907 p = (u32 *) (orig_p + offset);
5908 i += 2;
5909 }
5910 }
5911}
5912
b6016b76
MC
5913static void
5914bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
5915{
972ec0d4 5916 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5917
5918 if (bp->flags & NO_WOL_FLAG) {
5919 wol->supported = 0;
5920 wol->wolopts = 0;
5921 }
5922 else {
5923 wol->supported = WAKE_MAGIC;
5924 if (bp->wol)
5925 wol->wolopts = WAKE_MAGIC;
5926 else
5927 wol->wolopts = 0;
5928 }
5929 memset(&wol->sopass, 0, sizeof(wol->sopass));
5930}
5931
5932static int
5933bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
5934{
972ec0d4 5935 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5936
5937 if (wol->wolopts & ~WAKE_MAGIC)
5938 return -EINVAL;
5939
5940 if (wol->wolopts & WAKE_MAGIC) {
5941 if (bp->flags & NO_WOL_FLAG)
5942 return -EINVAL;
5943
5944 bp->wol = 1;
5945 }
5946 else {
5947 bp->wol = 0;
5948 }
5949 return 0;
5950}
5951
5952static int
5953bnx2_nway_reset(struct net_device *dev)
5954{
972ec0d4 5955 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5956 u32 bmcr;
5957
5958 if (!(bp->autoneg & AUTONEG_SPEED)) {
5959 return -EINVAL;
5960 }
5961
c770a65c 5962 spin_lock_bh(&bp->phy_lock);
b6016b76 5963
7b6b8347
MC
5964 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG) {
5965 int rc;
5966
5967 rc = bnx2_setup_remote_phy(bp, bp->phy_port);
5968 spin_unlock_bh(&bp->phy_lock);
5969 return rc;
5970 }
5971
b6016b76
MC
5972 /* Force a link down visible on the other side */
5973 if (bp->phy_flags & PHY_SERDES_FLAG) {
ca58c3af 5974 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
c770a65c 5975 spin_unlock_bh(&bp->phy_lock);
b6016b76
MC
5976
5977 msleep(20);
5978
c770a65c 5979 spin_lock_bh(&bp->phy_lock);
f8dd064e
MC
5980
5981 bp->current_interval = SERDES_AN_TIMEOUT;
5982 bp->serdes_an_pending = 1;
5983 mod_timer(&bp->timer, jiffies + bp->current_interval);
b6016b76
MC
5984 }
5985
ca58c3af 5986 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
b6016b76 5987 bmcr &= ~BMCR_LOOPBACK;
ca58c3af 5988 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
b6016b76 5989
c770a65c 5990 spin_unlock_bh(&bp->phy_lock);
b6016b76
MC
5991
5992 return 0;
5993}
5994
5995static int
5996bnx2_get_eeprom_len(struct net_device *dev)
5997{
972ec0d4 5998 struct bnx2 *bp = netdev_priv(dev);
b6016b76 5999
1122db71 6000 if (bp->flash_info == NULL)
b6016b76
MC
6001 return 0;
6002
1122db71 6003 return (int) bp->flash_size;
b6016b76
MC
6004}
6005
6006static int
6007bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
6008 u8 *eebuf)
6009{
972ec0d4 6010 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6011 int rc;
6012
1064e944 6013 /* parameters already validated in ethtool_get_eeprom */
b6016b76
MC
6014
6015 rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
6016
6017 return rc;
6018}
6019
6020static int
6021bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
6022 u8 *eebuf)
6023{
972ec0d4 6024 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6025 int rc;
6026
1064e944 6027 /* parameters already validated in ethtool_set_eeprom */
b6016b76
MC
6028
6029 rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
6030
6031 return rc;
6032}
6033
6034static int
6035bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
6036{
972ec0d4 6037 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6038
6039 memset(coal, 0, sizeof(struct ethtool_coalesce));
6040
6041 coal->rx_coalesce_usecs = bp->rx_ticks;
6042 coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
6043 coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
6044 coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
6045
6046 coal->tx_coalesce_usecs = bp->tx_ticks;
6047 coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
6048 coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
6049 coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
6050
6051 coal->stats_block_coalesce_usecs = bp->stats_ticks;
6052
6053 return 0;
6054}
6055
6056static int
6057bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
6058{
972ec0d4 6059 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6060
6061 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
6062 if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
6063
6aa20a22 6064 bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
b6016b76
MC
6065 if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
6066
6067 bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
6068 if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
6069
6070 bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
6071 if (bp->rx_quick_cons_trip_int > 0xff)
6072 bp->rx_quick_cons_trip_int = 0xff;
6073
6074 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
6075 if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
6076
6077 bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
6078 if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
6079
6080 bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
6081 if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
6082
6083 bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
6084 if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
6085 0xff;
6086
6087 bp->stats_ticks = coal->stats_block_coalesce_usecs;
02537b06
MC
6088 if (CHIP_NUM(bp) == CHIP_NUM_5708) {
6089 if (bp->stats_ticks != 0 && bp->stats_ticks != USEC_PER_SEC)
6090 bp->stats_ticks = USEC_PER_SEC;
6091 }
7ea6920e
MC
6092 if (bp->stats_ticks > BNX2_HC_STATS_TICKS_HC_STAT_TICKS)
6093 bp->stats_ticks = BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
6094 bp->stats_ticks &= BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
b6016b76
MC
6095
6096 if (netif_running(bp->dev)) {
6097 bnx2_netif_stop(bp);
6098 bnx2_init_nic(bp);
6099 bnx2_netif_start(bp);
6100 }
6101
6102 return 0;
6103}
6104
6105static void
6106bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
6107{
972ec0d4 6108 struct bnx2 *bp = netdev_priv(dev);
b6016b76 6109
13daffa2 6110 ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT;
b6016b76 6111 ering->rx_mini_max_pending = 0;
47bf4246 6112 ering->rx_jumbo_max_pending = MAX_TOTAL_RX_PG_DESC_CNT;
b6016b76
MC
6113
6114 ering->rx_pending = bp->rx_ring_size;
6115 ering->rx_mini_pending = 0;
47bf4246 6116 ering->rx_jumbo_pending = bp->rx_pg_ring_size;
b6016b76
MC
6117
6118 ering->tx_max_pending = MAX_TX_DESC_CNT;
6119 ering->tx_pending = bp->tx_ring_size;
6120}
6121
6122static int
5d5d0015 6123bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx)
b6016b76 6124{
13daffa2
MC
6125 if (netif_running(bp->dev)) {
6126 bnx2_netif_stop(bp);
6127 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
6128 bnx2_free_skbs(bp);
6129 bnx2_free_mem(bp);
6130 }
6131
5d5d0015
MC
6132 bnx2_set_rx_ring_size(bp, rx);
6133 bp->tx_ring_size = tx;
b6016b76
MC
6134
6135 if (netif_running(bp->dev)) {
13daffa2
MC
6136 int rc;
6137
6138 rc = bnx2_alloc_mem(bp);
6139 if (rc)
6140 return rc;
b6016b76
MC
6141 bnx2_init_nic(bp);
6142 bnx2_netif_start(bp);
6143 }
b6016b76
MC
6144 return 0;
6145}
6146
5d5d0015
MC
6147static int
6148bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
6149{
6150 struct bnx2 *bp = netdev_priv(dev);
6151 int rc;
6152
6153 if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
6154 (ering->tx_pending > MAX_TX_DESC_CNT) ||
6155 (ering->tx_pending <= MAX_SKB_FRAGS)) {
6156
6157 return -EINVAL;
6158 }
6159 rc = bnx2_change_ring_size(bp, ering->rx_pending, ering->tx_pending);
6160 return rc;
6161}
6162
b6016b76
MC
6163static void
6164bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
6165{
972ec0d4 6166 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6167
6168 epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
6169 epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
6170 epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
6171}
6172
6173static int
6174bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
6175{
972ec0d4 6176 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6177
6178 bp->req_flow_ctrl = 0;
6179 if (epause->rx_pause)
6180 bp->req_flow_ctrl |= FLOW_CTRL_RX;
6181 if (epause->tx_pause)
6182 bp->req_flow_ctrl |= FLOW_CTRL_TX;
6183
6184 if (epause->autoneg) {
6185 bp->autoneg |= AUTONEG_FLOW_CTRL;
6186 }
6187 else {
6188 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
6189 }
6190
c770a65c 6191 spin_lock_bh(&bp->phy_lock);
b6016b76 6192
0d8a6571 6193 bnx2_setup_phy(bp, bp->phy_port);
b6016b76 6194
c770a65c 6195 spin_unlock_bh(&bp->phy_lock);
b6016b76
MC
6196
6197 return 0;
6198}
6199
6200static u32
6201bnx2_get_rx_csum(struct net_device *dev)
6202{
972ec0d4 6203 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6204
6205 return bp->rx_csum;
6206}
6207
6208static int
6209bnx2_set_rx_csum(struct net_device *dev, u32 data)
6210{
972ec0d4 6211 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6212
6213 bp->rx_csum = data;
6214 return 0;
6215}
6216
b11d6213
MC
6217static int
6218bnx2_set_tso(struct net_device *dev, u32 data)
6219{
4666f87a
MC
6220 struct bnx2 *bp = netdev_priv(dev);
6221
6222 if (data) {
b11d6213 6223 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
4666f87a
MC
6224 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6225 dev->features |= NETIF_F_TSO6;
6226 } else
6227 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6 |
6228 NETIF_F_TSO_ECN);
b11d6213
MC
6229 return 0;
6230}
6231
cea94db9 6232#define BNX2_NUM_STATS 46
b6016b76 6233
14ab9b86 6234static struct {
b6016b76
MC
6235 char string[ETH_GSTRING_LEN];
6236} bnx2_stats_str_arr[BNX2_NUM_STATS] = {
6237 { "rx_bytes" },
6238 { "rx_error_bytes" },
6239 { "tx_bytes" },
6240 { "tx_error_bytes" },
6241 { "rx_ucast_packets" },
6242 { "rx_mcast_packets" },
6243 { "rx_bcast_packets" },
6244 { "tx_ucast_packets" },
6245 { "tx_mcast_packets" },
6246 { "tx_bcast_packets" },
6247 { "tx_mac_errors" },
6248 { "tx_carrier_errors" },
6249 { "rx_crc_errors" },
6250 { "rx_align_errors" },
6251 { "tx_single_collisions" },
6252 { "tx_multi_collisions" },
6253 { "tx_deferred" },
6254 { "tx_excess_collisions" },
6255 { "tx_late_collisions" },
6256 { "tx_total_collisions" },
6257 { "rx_fragments" },
6258 { "rx_jabbers" },
6259 { "rx_undersize_packets" },
6260 { "rx_oversize_packets" },
6261 { "rx_64_byte_packets" },
6262 { "rx_65_to_127_byte_packets" },
6263 { "rx_128_to_255_byte_packets" },
6264 { "rx_256_to_511_byte_packets" },
6265 { "rx_512_to_1023_byte_packets" },
6266 { "rx_1024_to_1522_byte_packets" },
6267 { "rx_1523_to_9022_byte_packets" },
6268 { "tx_64_byte_packets" },
6269 { "tx_65_to_127_byte_packets" },
6270 { "tx_128_to_255_byte_packets" },
6271 { "tx_256_to_511_byte_packets" },
6272 { "tx_512_to_1023_byte_packets" },
6273 { "tx_1024_to_1522_byte_packets" },
6274 { "tx_1523_to_9022_byte_packets" },
6275 { "rx_xon_frames" },
6276 { "rx_xoff_frames" },
6277 { "tx_xon_frames" },
6278 { "tx_xoff_frames" },
6279 { "rx_mac_ctrl_frames" },
6280 { "rx_filtered_packets" },
6281 { "rx_discards" },
cea94db9 6282 { "rx_fw_discards" },
b6016b76
MC
6283};
6284
6285#define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
6286
f71e1309 6287static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
b6016b76
MC
6288 STATS_OFFSET32(stat_IfHCInOctets_hi),
6289 STATS_OFFSET32(stat_IfHCInBadOctets_hi),
6290 STATS_OFFSET32(stat_IfHCOutOctets_hi),
6291 STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
6292 STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
6293 STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
6294 STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
6295 STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
6296 STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
6297 STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
6298 STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
6aa20a22
JG
6299 STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
6300 STATS_OFFSET32(stat_Dot3StatsFCSErrors),
6301 STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
6302 STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
6303 STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
6304 STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
6305 STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
6306 STATS_OFFSET32(stat_Dot3StatsLateCollisions),
6307 STATS_OFFSET32(stat_EtherStatsCollisions),
6308 STATS_OFFSET32(stat_EtherStatsFragments),
6309 STATS_OFFSET32(stat_EtherStatsJabbers),
6310 STATS_OFFSET32(stat_EtherStatsUndersizePkts),
6311 STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
6312 STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
6313 STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
6314 STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
6315 STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
6316 STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
6317 STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
6318 STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
6319 STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
6320 STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
6321 STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
6322 STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
6323 STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
6324 STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
6325 STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
6326 STATS_OFFSET32(stat_XonPauseFramesReceived),
6327 STATS_OFFSET32(stat_XoffPauseFramesReceived),
6328 STATS_OFFSET32(stat_OutXonSent),
6329 STATS_OFFSET32(stat_OutXoffSent),
6330 STATS_OFFSET32(stat_MacControlFramesReceived),
6331 STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
6332 STATS_OFFSET32(stat_IfInMBUFDiscards),
cea94db9 6333 STATS_OFFSET32(stat_FwRxDrop),
b6016b76
MC
6334};
6335
6336/* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
6337 * skipped because of errata.
6aa20a22 6338 */
14ab9b86 6339static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
b6016b76
MC
6340 8,0,8,8,8,8,8,8,8,8,
6341 4,0,4,4,4,4,4,4,4,4,
6342 4,4,4,4,4,4,4,4,4,4,
6343 4,4,4,4,4,4,4,4,4,4,
cea94db9 6344 4,4,4,4,4,4,
b6016b76
MC
6345};
6346
5b0c76ad
MC
6347static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
6348 8,0,8,8,8,8,8,8,8,8,
6349 4,4,4,4,4,4,4,4,4,4,
6350 4,4,4,4,4,4,4,4,4,4,
6351 4,4,4,4,4,4,4,4,4,4,
cea94db9 6352 4,4,4,4,4,4,
5b0c76ad
MC
6353};
6354
b6016b76
MC
6355#define BNX2_NUM_TESTS 6
6356
14ab9b86 6357static struct {
b6016b76
MC
6358 char string[ETH_GSTRING_LEN];
6359} bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
6360 { "register_test (offline)" },
6361 { "memory_test (offline)" },
6362 { "loopback_test (offline)" },
6363 { "nvram_test (online)" },
6364 { "interrupt_test (online)" },
6365 { "link_test (online)" },
6366};
6367
6368static int
b9f2c044 6369bnx2_get_sset_count(struct net_device *dev, int sset)
b6016b76 6370{
b9f2c044
JG
6371 switch (sset) {
6372 case ETH_SS_TEST:
6373 return BNX2_NUM_TESTS;
6374 case ETH_SS_STATS:
6375 return BNX2_NUM_STATS;
6376 default:
6377 return -EOPNOTSUPP;
6378 }
b6016b76
MC
6379}
6380
6381static void
6382bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
6383{
972ec0d4 6384 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6385
6386 memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
6387 if (etest->flags & ETH_TEST_FL_OFFLINE) {
80be4434
MC
6388 int i;
6389
b6016b76
MC
6390 bnx2_netif_stop(bp);
6391 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
6392 bnx2_free_skbs(bp);
6393
6394 if (bnx2_test_registers(bp) != 0) {
6395 buf[0] = 1;
6396 etest->flags |= ETH_TEST_FL_FAILED;
6397 }
6398 if (bnx2_test_memory(bp) != 0) {
6399 buf[1] = 1;
6400 etest->flags |= ETH_TEST_FL_FAILED;
6401 }
bc5a0690 6402 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
b6016b76 6403 etest->flags |= ETH_TEST_FL_FAILED;
b6016b76
MC
6404
6405 if (!netif_running(bp->dev)) {
6406 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
6407 }
6408 else {
6409 bnx2_init_nic(bp);
6410 bnx2_netif_start(bp);
6411 }
6412
6413 /* wait for link up */
80be4434
MC
6414 for (i = 0; i < 7; i++) {
6415 if (bp->link_up)
6416 break;
6417 msleep_interruptible(1000);
6418 }
b6016b76
MC
6419 }
6420
6421 if (bnx2_test_nvram(bp) != 0) {
6422 buf[3] = 1;
6423 etest->flags |= ETH_TEST_FL_FAILED;
6424 }
6425 if (bnx2_test_intr(bp) != 0) {
6426 buf[4] = 1;
6427 etest->flags |= ETH_TEST_FL_FAILED;
6428 }
6429
6430 if (bnx2_test_link(bp) != 0) {
6431 buf[5] = 1;
6432 etest->flags |= ETH_TEST_FL_FAILED;
6433
6434 }
6435}
6436
6437static void
6438bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
6439{
6440 switch (stringset) {
6441 case ETH_SS_STATS:
6442 memcpy(buf, bnx2_stats_str_arr,
6443 sizeof(bnx2_stats_str_arr));
6444 break;
6445 case ETH_SS_TEST:
6446 memcpy(buf, bnx2_tests_str_arr,
6447 sizeof(bnx2_tests_str_arr));
6448 break;
6449 }
6450}
6451
b6016b76
MC
6452static void
6453bnx2_get_ethtool_stats(struct net_device *dev,
6454 struct ethtool_stats *stats, u64 *buf)
6455{
972ec0d4 6456 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6457 int i;
6458 u32 *hw_stats = (u32 *) bp->stats_blk;
14ab9b86 6459 u8 *stats_len_arr = NULL;
b6016b76
MC
6460
6461 if (hw_stats == NULL) {
6462 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
6463 return;
6464 }
6465
5b0c76ad
MC
6466 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
6467 (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
6468 (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
6469 (CHIP_ID(bp) == CHIP_ID_5708_A0))
b6016b76 6470 stats_len_arr = bnx2_5706_stats_len_arr;
5b0c76ad
MC
6471 else
6472 stats_len_arr = bnx2_5708_stats_len_arr;
b6016b76
MC
6473
6474 for (i = 0; i < BNX2_NUM_STATS; i++) {
6475 if (stats_len_arr[i] == 0) {
6476 /* skip this counter */
6477 buf[i] = 0;
6478 continue;
6479 }
6480 if (stats_len_arr[i] == 4) {
6481 /* 4-byte counter */
6482 buf[i] = (u64)
6483 *(hw_stats + bnx2_stats_offset_arr[i]);
6484 continue;
6485 }
6486 /* 8-byte counter */
6487 buf[i] = (((u64) *(hw_stats +
6488 bnx2_stats_offset_arr[i])) << 32) +
6489 *(hw_stats + bnx2_stats_offset_arr[i] + 1);
6490 }
6491}
6492
6493static int
6494bnx2_phys_id(struct net_device *dev, u32 data)
6495{
972ec0d4 6496 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6497 int i;
6498 u32 save;
6499
6500 if (data == 0)
6501 data = 2;
6502
6503 save = REG_RD(bp, BNX2_MISC_CFG);
6504 REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
6505
6506 for (i = 0; i < (data * 2); i++) {
6507 if ((i % 2) == 0) {
6508 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
6509 }
6510 else {
6511 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
6512 BNX2_EMAC_LED_1000MB_OVERRIDE |
6513 BNX2_EMAC_LED_100MB_OVERRIDE |
6514 BNX2_EMAC_LED_10MB_OVERRIDE |
6515 BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
6516 BNX2_EMAC_LED_TRAFFIC);
6517 }
6518 msleep_interruptible(500);
6519 if (signal_pending(current))
6520 break;
6521 }
6522 REG_WR(bp, BNX2_EMAC_LED, 0);
6523 REG_WR(bp, BNX2_MISC_CFG, save);
6524 return 0;
6525}
6526
4666f87a
MC
6527static int
6528bnx2_set_tx_csum(struct net_device *dev, u32 data)
6529{
6530 struct bnx2 *bp = netdev_priv(dev);
6531
6532 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6460d948 6533 return (ethtool_op_set_tx_ipv6_csum(dev, data));
4666f87a
MC
6534 else
6535 return (ethtool_op_set_tx_csum(dev, data));
6536}
6537
7282d491 6538static const struct ethtool_ops bnx2_ethtool_ops = {
b6016b76
MC
6539 .get_settings = bnx2_get_settings,
6540 .set_settings = bnx2_set_settings,
6541 .get_drvinfo = bnx2_get_drvinfo,
244ac4f4
MC
6542 .get_regs_len = bnx2_get_regs_len,
6543 .get_regs = bnx2_get_regs,
b6016b76
MC
6544 .get_wol = bnx2_get_wol,
6545 .set_wol = bnx2_set_wol,
6546 .nway_reset = bnx2_nway_reset,
6547 .get_link = ethtool_op_get_link,
6548 .get_eeprom_len = bnx2_get_eeprom_len,
6549 .get_eeprom = bnx2_get_eeprom,
6550 .set_eeprom = bnx2_set_eeprom,
6551 .get_coalesce = bnx2_get_coalesce,
6552 .set_coalesce = bnx2_set_coalesce,
6553 .get_ringparam = bnx2_get_ringparam,
6554 .set_ringparam = bnx2_set_ringparam,
6555 .get_pauseparam = bnx2_get_pauseparam,
6556 .set_pauseparam = bnx2_set_pauseparam,
6557 .get_rx_csum = bnx2_get_rx_csum,
6558 .set_rx_csum = bnx2_set_rx_csum,
4666f87a 6559 .set_tx_csum = bnx2_set_tx_csum,
b6016b76 6560 .set_sg = ethtool_op_set_sg,
b11d6213 6561 .set_tso = bnx2_set_tso,
b6016b76
MC
6562 .self_test = bnx2_self_test,
6563 .get_strings = bnx2_get_strings,
6564 .phys_id = bnx2_phys_id,
b6016b76 6565 .get_ethtool_stats = bnx2_get_ethtool_stats,
b9f2c044 6566 .get_sset_count = bnx2_get_sset_count,
b6016b76
MC
6567};
6568
6569/* Called with rtnl_lock */
6570static int
6571bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
6572{
14ab9b86 6573 struct mii_ioctl_data *data = if_mii(ifr);
972ec0d4 6574 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6575 int err;
6576
6577 switch(cmd) {
6578 case SIOCGMIIPHY:
6579 data->phy_id = bp->phy_addr;
6580
6581 /* fallthru */
6582 case SIOCGMIIREG: {
6583 u32 mii_regval;
6584
7b6b8347
MC
6585 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
6586 return -EOPNOTSUPP;
6587
dad3e452
MC
6588 if (!netif_running(dev))
6589 return -EAGAIN;
6590
c770a65c 6591 spin_lock_bh(&bp->phy_lock);
b6016b76 6592 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
c770a65c 6593 spin_unlock_bh(&bp->phy_lock);
b6016b76
MC
6594
6595 data->val_out = mii_regval;
6596
6597 return err;
6598 }
6599
6600 case SIOCSMIIREG:
6601 if (!capable(CAP_NET_ADMIN))
6602 return -EPERM;
6603
7b6b8347
MC
6604 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
6605 return -EOPNOTSUPP;
6606
dad3e452
MC
6607 if (!netif_running(dev))
6608 return -EAGAIN;
6609
c770a65c 6610 spin_lock_bh(&bp->phy_lock);
b6016b76 6611 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
c770a65c 6612 spin_unlock_bh(&bp->phy_lock);
b6016b76
MC
6613
6614 return err;
6615
6616 default:
6617 /* do nothing */
6618 break;
6619 }
6620 return -EOPNOTSUPP;
6621}
6622
6623/* Called with rtnl_lock */
6624static int
6625bnx2_change_mac_addr(struct net_device *dev, void *p)
6626{
6627 struct sockaddr *addr = p;
972ec0d4 6628 struct bnx2 *bp = netdev_priv(dev);
b6016b76 6629
73eef4cd
MC
6630 if (!is_valid_ether_addr(addr->sa_data))
6631 return -EINVAL;
6632
b6016b76
MC
6633 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
6634 if (netif_running(dev))
6635 bnx2_set_mac_addr(bp);
6636
6637 return 0;
6638}
6639
6640/* Called with rtnl_lock */
6641static int
6642bnx2_change_mtu(struct net_device *dev, int new_mtu)
6643{
972ec0d4 6644 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6645
6646 if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
6647 ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
6648 return -EINVAL;
6649
6650 dev->mtu = new_mtu;
5d5d0015 6651 return (bnx2_change_ring_size(bp, bp->rx_ring_size, bp->tx_ring_size));
b6016b76
MC
6652}
6653
6654#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
6655static void
6656poll_bnx2(struct net_device *dev)
6657{
972ec0d4 6658 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6659
6660 disable_irq(bp->pdev->irq);
7d12e780 6661 bnx2_interrupt(bp->pdev->irq, dev);
b6016b76
MC
6662 enable_irq(bp->pdev->irq);
6663}
6664#endif
6665
253c8b75
MC
6666static void __devinit
6667bnx2_get_5709_media(struct bnx2 *bp)
6668{
6669 u32 val = REG_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
6670 u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
6671 u32 strap;
6672
6673 if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
6674 return;
6675 else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
6676 bp->phy_flags |= PHY_SERDES_FLAG;
6677 return;
6678 }
6679
6680 if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
6681 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
6682 else
6683 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
6684
6685 if (PCI_FUNC(bp->pdev->devfn) == 0) {
6686 switch (strap) {
6687 case 0x4:
6688 case 0x5:
6689 case 0x6:
6690 bp->phy_flags |= PHY_SERDES_FLAG;
6691 return;
6692 }
6693 } else {
6694 switch (strap) {
6695 case 0x1:
6696 case 0x2:
6697 case 0x4:
6698 bp->phy_flags |= PHY_SERDES_FLAG;
6699 return;
6700 }
6701 }
6702}
6703
883e5151
MC
6704static void __devinit
6705bnx2_get_pci_speed(struct bnx2 *bp)
6706{
6707 u32 reg;
6708
6709 reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
6710 if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
6711 u32 clkreg;
6712
6713 bp->flags |= PCIX_FLAG;
6714
6715 clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
6716
6717 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
6718 switch (clkreg) {
6719 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
6720 bp->bus_speed_mhz = 133;
6721 break;
6722
6723 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
6724 bp->bus_speed_mhz = 100;
6725 break;
6726
6727 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
6728 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
6729 bp->bus_speed_mhz = 66;
6730 break;
6731
6732 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
6733 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
6734 bp->bus_speed_mhz = 50;
6735 break;
6736
6737 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
6738 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
6739 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
6740 bp->bus_speed_mhz = 33;
6741 break;
6742 }
6743 }
6744 else {
6745 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
6746 bp->bus_speed_mhz = 66;
6747 else
6748 bp->bus_speed_mhz = 33;
6749 }
6750
6751 if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
6752 bp->flags |= PCI_32BIT_FLAG;
6753
6754}
6755
b6016b76
MC
6756static int __devinit
6757bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
6758{
6759 struct bnx2 *bp;
6760 unsigned long mem_len;
58fc2ea4 6761 int rc, i, j;
b6016b76 6762 u32 reg;
40453c83 6763 u64 dma_mask, persist_dma_mask;
b6016b76 6764
b6016b76 6765 SET_NETDEV_DEV(dev, &pdev->dev);
972ec0d4 6766 bp = netdev_priv(dev);
b6016b76
MC
6767
6768 bp->flags = 0;
6769 bp->phy_flags = 0;
6770
6771 /* enable device (incl. PCI PM wakeup), and bus-mastering */
6772 rc = pci_enable_device(pdev);
6773 if (rc) {
898eb71c 6774 dev_err(&pdev->dev, "Cannot enable PCI device, aborting.\n");
b6016b76
MC
6775 goto err_out;
6776 }
6777
6778 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
9b91cf9d 6779 dev_err(&pdev->dev,
2e8a538d 6780 "Cannot find PCI device base address, aborting.\n");
b6016b76
MC
6781 rc = -ENODEV;
6782 goto err_out_disable;
6783 }
6784
6785 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
6786 if (rc) {
9b91cf9d 6787 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting.\n");
b6016b76
MC
6788 goto err_out_disable;
6789 }
6790
6791 pci_set_master(pdev);
6792
6793 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
6794 if (bp->pm_cap == 0) {
9b91cf9d 6795 dev_err(&pdev->dev,
2e8a538d 6796 "Cannot find power management capability, aborting.\n");
b6016b76
MC
6797 rc = -EIO;
6798 goto err_out_release;
6799 }
6800
b6016b76
MC
6801 bp->dev = dev;
6802 bp->pdev = pdev;
6803
6804 spin_lock_init(&bp->phy_lock);
1b8227c4 6805 spin_lock_init(&bp->indirect_lock);
c4028958 6806 INIT_WORK(&bp->reset_task, bnx2_reset_task);
b6016b76
MC
6807
6808 dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
59b47d8a 6809 mem_len = MB_GET_CID_ADDR(TX_TSS_CID + 1);
b6016b76
MC
6810 dev->mem_end = dev->mem_start + mem_len;
6811 dev->irq = pdev->irq;
6812
6813 bp->regview = ioremap_nocache(dev->base_addr, mem_len);
6814
6815 if (!bp->regview) {
9b91cf9d 6816 dev_err(&pdev->dev, "Cannot map register space, aborting.\n");
b6016b76
MC
6817 rc = -ENOMEM;
6818 goto err_out_release;
6819 }
6820
6821 /* Configure byte swap and enable write to the reg_window registers.
6822 * Rely on CPU to do target byte swapping on big endian systems
6823 * The chip's target access swapping will not swap all accesses
6824 */
6825 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG,
6826 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
6827 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
6828
829ca9a3 6829 bnx2_set_power_state(bp, PCI_D0);
b6016b76
MC
6830
6831 bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
6832
883e5151
MC
6833 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
6834 if (pci_find_capability(pdev, PCI_CAP_ID_EXP) == 0) {
6835 dev_err(&pdev->dev,
6836 "Cannot find PCIE capability, aborting.\n");
6837 rc = -EIO;
6838 goto err_out_unmap;
6839 }
6840 bp->flags |= PCIE_FLAG;
6841 } else {
59b47d8a
MC
6842 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
6843 if (bp->pcix_cap == 0) {
6844 dev_err(&pdev->dev,
6845 "Cannot find PCIX capability, aborting.\n");
6846 rc = -EIO;
6847 goto err_out_unmap;
6848 }
6849 }
6850
8e6a72c4
MC
6851 if (CHIP_ID(bp) != CHIP_ID_5706_A0 && CHIP_ID(bp) != CHIP_ID_5706_A1) {
6852 if (pci_find_capability(pdev, PCI_CAP_ID_MSI))
6853 bp->flags |= MSI_CAP_FLAG;
6854 }
6855
40453c83
MC
6856 /* 5708 cannot support DMA addresses > 40-bit. */
6857 if (CHIP_NUM(bp) == CHIP_NUM_5708)
6858 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
6859 else
6860 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
6861
6862 /* Configure DMA attributes. */
6863 if (pci_set_dma_mask(pdev, dma_mask) == 0) {
6864 dev->features |= NETIF_F_HIGHDMA;
6865 rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask);
6866 if (rc) {
6867 dev_err(&pdev->dev,
6868 "pci_set_consistent_dma_mask failed, aborting.\n");
6869 goto err_out_unmap;
6870 }
6871 } else if ((rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) != 0) {
6872 dev_err(&pdev->dev, "System does not support DMA, aborting.\n");
6873 goto err_out_unmap;
6874 }
6875
883e5151
MC
6876 if (!(bp->flags & PCIE_FLAG))
6877 bnx2_get_pci_speed(bp);
b6016b76
MC
6878
6879 /* 5706A0 may falsely detect SERR and PERR. */
6880 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
6881 reg = REG_RD(bp, PCI_COMMAND);
6882 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
6883 REG_WR(bp, PCI_COMMAND, reg);
6884 }
6885 else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
6886 !(bp->flags & PCIX_FLAG)) {
6887
9b91cf9d 6888 dev_err(&pdev->dev,
2e8a538d 6889 "5706 A1 can only be used in a PCIX bus, aborting.\n");
b6016b76
MC
6890 goto err_out_unmap;
6891 }
6892
6893 bnx2_init_nvram(bp);
6894
e3648b3d
MC
6895 reg = REG_RD_IND(bp, BNX2_SHM_HDR_SIGNATURE);
6896
6897 if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
24cb230b
MC
6898 BNX2_SHM_HDR_SIGNATURE_SIG) {
6899 u32 off = PCI_FUNC(pdev->devfn) << 2;
6900
6901 bp->shmem_base = REG_RD_IND(bp, BNX2_SHM_HDR_ADDR_0 + off);
6902 } else
e3648b3d
MC
6903 bp->shmem_base = HOST_VIEW_SHMEM_BASE;
6904
b6016b76
MC
6905 /* Get the permanent MAC address. First we need to make sure the
6906 * firmware is actually running.
6907 */
e3648b3d 6908 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_SIGNATURE);
b6016b76
MC
6909
6910 if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
6911 BNX2_DEV_INFO_SIGNATURE_MAGIC) {
9b91cf9d 6912 dev_err(&pdev->dev, "Firmware not running, aborting.\n");
b6016b76
MC
6913 rc = -ENODEV;
6914 goto err_out_unmap;
6915 }
6916
58fc2ea4
MC
6917 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_BC_REV);
6918 for (i = 0, j = 0; i < 3; i++) {
6919 u8 num, k, skip0;
6920
6921 num = (u8) (reg >> (24 - (i * 8)));
6922 for (k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) {
6923 if (num >= k || !skip0 || k == 1) {
6924 bp->fw_version[j++] = (num / k) + '0';
6925 skip0 = 0;
6926 }
6927 }
6928 if (i != 2)
6929 bp->fw_version[j++] = '.';
6930 }
846f5c62
MC
6931 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_FEATURE);
6932 if (reg & BNX2_PORT_FEATURE_WOL_ENABLED)
6933 bp->wol = 1;
6934
6935 if (reg & BNX2_PORT_FEATURE_ASF_ENABLED) {
c2d3db8c
MC
6936 bp->flags |= ASF_ENABLE_FLAG;
6937
6938 for (i = 0; i < 30; i++) {
6939 reg = REG_RD_IND(bp, bp->shmem_base +
6940 BNX2_BC_STATE_CONDITION);
6941 if (reg & BNX2_CONDITION_MFW_RUN_MASK)
6942 break;
6943 msleep(10);
6944 }
6945 }
58fc2ea4
MC
6946 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_BC_STATE_CONDITION);
6947 reg &= BNX2_CONDITION_MFW_RUN_MASK;
6948 if (reg != BNX2_CONDITION_MFW_RUN_UNKNOWN &&
6949 reg != BNX2_CONDITION_MFW_RUN_NONE) {
6950 int i;
6951 u32 addr = REG_RD_IND(bp, bp->shmem_base + BNX2_MFW_VER_PTR);
6952
6953 bp->fw_version[j++] = ' ';
6954 for (i = 0; i < 3; i++) {
6955 reg = REG_RD_IND(bp, addr + i * 4);
6956 reg = swab32(reg);
6957 memcpy(&bp->fw_version[j], &reg, 4);
6958 j += 4;
6959 }
6960 }
b6016b76 6961
e3648b3d 6962 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_UPPER);
b6016b76
MC
6963 bp->mac_addr[0] = (u8) (reg >> 8);
6964 bp->mac_addr[1] = (u8) reg;
6965
e3648b3d 6966 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_LOWER);
b6016b76
MC
6967 bp->mac_addr[2] = (u8) (reg >> 24);
6968 bp->mac_addr[3] = (u8) (reg >> 16);
6969 bp->mac_addr[4] = (u8) (reg >> 8);
6970 bp->mac_addr[5] = (u8) reg;
6971
5d5d0015
MC
6972 bp->rx_offset = sizeof(struct l2_fhdr) + 2;
6973
b6016b76 6974 bp->tx_ring_size = MAX_TX_DESC_CNT;
932f3772 6975 bnx2_set_rx_ring_size(bp, 255);
b6016b76
MC
6976
6977 bp->rx_csum = 1;
6978
b6016b76
MC
6979 bp->tx_quick_cons_trip_int = 20;
6980 bp->tx_quick_cons_trip = 20;
6981 bp->tx_ticks_int = 80;
6982 bp->tx_ticks = 80;
6aa20a22 6983
b6016b76
MC
6984 bp->rx_quick_cons_trip_int = 6;
6985 bp->rx_quick_cons_trip = 6;
6986 bp->rx_ticks_int = 18;
6987 bp->rx_ticks = 18;
6988
7ea6920e 6989 bp->stats_ticks = USEC_PER_SEC & BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
b6016b76
MC
6990
6991 bp->timer_interval = HZ;
cd339a0e 6992 bp->current_interval = HZ;
b6016b76 6993
5b0c76ad
MC
6994 bp->phy_addr = 1;
6995
b6016b76 6996 /* Disable WOL support if we are running on a SERDES chip. */
253c8b75
MC
6997 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6998 bnx2_get_5709_media(bp);
6999 else if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT)
b6016b76 7000 bp->phy_flags |= PHY_SERDES_FLAG;
bac0dff6 7001
0d8a6571 7002 bp->phy_port = PORT_TP;
bac0dff6 7003 if (bp->phy_flags & PHY_SERDES_FLAG) {
0d8a6571 7004 bp->phy_port = PORT_FIBRE;
846f5c62
MC
7005 reg = REG_RD_IND(bp, bp->shmem_base +
7006 BNX2_SHARED_HW_CFG_CONFIG);
7007 if (!(reg & BNX2_SHARED_HW_CFG_GIG_LINK_ON_VAUX)) {
7008 bp->flags |= NO_WOL_FLAG;
7009 bp->wol = 0;
7010 }
bac0dff6 7011 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
5b0c76ad 7012 bp->phy_addr = 2;
5b0c76ad
MC
7013 if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
7014 bp->phy_flags |= PHY_2_5G_CAPABLE_FLAG;
7015 }
0d8a6571
MC
7016 bnx2_init_remote_phy(bp);
7017
261dd5ca
MC
7018 } else if (CHIP_NUM(bp) == CHIP_NUM_5706 ||
7019 CHIP_NUM(bp) == CHIP_NUM_5708)
7020 bp->phy_flags |= PHY_CRC_FIX_FLAG;
fb0c18bd
MC
7021 else if (CHIP_NUM(bp) == CHIP_NUM_5709 &&
7022 (CHIP_REV(bp) == CHIP_REV_Ax ||
7023 CHIP_REV(bp) == CHIP_REV_Bx))
b659f44e 7024 bp->phy_flags |= PHY_DIS_EARLY_DAC_FLAG;
b6016b76 7025
16088272
MC
7026 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
7027 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
846f5c62 7028 (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
dda1e390 7029 bp->flags |= NO_WOL_FLAG;
846f5c62
MC
7030 bp->wol = 0;
7031 }
dda1e390 7032
b6016b76
MC
7033 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
7034 bp->tx_quick_cons_trip_int =
7035 bp->tx_quick_cons_trip;
7036 bp->tx_ticks_int = bp->tx_ticks;
7037 bp->rx_quick_cons_trip_int =
7038 bp->rx_quick_cons_trip;
7039 bp->rx_ticks_int = bp->rx_ticks;
7040 bp->comp_prod_trip_int = bp->comp_prod_trip;
7041 bp->com_ticks_int = bp->com_ticks;
7042 bp->cmd_ticks_int = bp->cmd_ticks;
7043 }
7044
f9317a40
MC
7045 /* Disable MSI on 5706 if AMD 8132 bridge is found.
7046 *
7047 * MSI is defined to be 32-bit write. The 5706 does 64-bit MSI writes
7048 * with byte enables disabled on the unused 32-bit word. This is legal
7049 * but causes problems on the AMD 8132 which will eventually stop
7050 * responding after a while.
7051 *
7052 * AMD believes this incompatibility is unique to the 5706, and
88187dfa 7053 * prefers to locally disable MSI rather than globally disabling it.
f9317a40
MC
7054 */
7055 if (CHIP_NUM(bp) == CHIP_NUM_5706 && disable_msi == 0) {
7056 struct pci_dev *amd_8132 = NULL;
7057
7058 while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
7059 PCI_DEVICE_ID_AMD_8132_BRIDGE,
7060 amd_8132))) {
f9317a40 7061
44c10138
AK
7062 if (amd_8132->revision >= 0x10 &&
7063 amd_8132->revision <= 0x13) {
f9317a40
MC
7064 disable_msi = 1;
7065 pci_dev_put(amd_8132);
7066 break;
7067 }
7068 }
7069 }
7070
deaf391b 7071 bnx2_set_default_link(bp);
b6016b76
MC
7072 bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
7073
cd339a0e
MC
7074 init_timer(&bp->timer);
7075 bp->timer.expires = RUN_AT(bp->timer_interval);
7076 bp->timer.data = (unsigned long) bp;
7077 bp->timer.function = bnx2_timer;
7078
b6016b76
MC
7079 return 0;
7080
7081err_out_unmap:
7082 if (bp->regview) {
7083 iounmap(bp->regview);
73eef4cd 7084 bp->regview = NULL;
b6016b76
MC
7085 }
7086
7087err_out_release:
7088 pci_release_regions(pdev);
7089
7090err_out_disable:
7091 pci_disable_device(pdev);
7092 pci_set_drvdata(pdev, NULL);
7093
7094err_out:
7095 return rc;
7096}
7097
883e5151
MC
7098static char * __devinit
7099bnx2_bus_string(struct bnx2 *bp, char *str)
7100{
7101 char *s = str;
7102
7103 if (bp->flags & PCIE_FLAG) {
7104 s += sprintf(s, "PCI Express");
7105 } else {
7106 s += sprintf(s, "PCI");
7107 if (bp->flags & PCIX_FLAG)
7108 s += sprintf(s, "-X");
7109 if (bp->flags & PCI_32BIT_FLAG)
7110 s += sprintf(s, " 32-bit");
7111 else
7112 s += sprintf(s, " 64-bit");
7113 s += sprintf(s, " %dMHz", bp->bus_speed_mhz);
7114 }
7115 return str;
7116}
7117
35efa7c1
MC
7118static int __devinit
7119bnx2_init_napi(struct bnx2 *bp)
7120{
7121 struct bnx2_napi *bnapi = &bp->bnx2_napi;
7122
7123 bnapi->bp = bp;
7124 netif_napi_add(bp->dev, &bnapi->napi, bnx2_poll, 64);
7125}
7126
b6016b76
MC
7127static int __devinit
7128bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
7129{
7130 static int version_printed = 0;
7131 struct net_device *dev = NULL;
7132 struct bnx2 *bp;
0795af57 7133 int rc;
883e5151 7134 char str[40];
0795af57 7135 DECLARE_MAC_BUF(mac);
b6016b76
MC
7136
7137 if (version_printed++ == 0)
7138 printk(KERN_INFO "%s", version);
7139
7140 /* dev zeroed in init_etherdev */
7141 dev = alloc_etherdev(sizeof(*bp));
7142
7143 if (!dev)
7144 return -ENOMEM;
7145
7146 rc = bnx2_init_board(pdev, dev);
7147 if (rc < 0) {
7148 free_netdev(dev);
7149 return rc;
7150 }
7151
7152 dev->open = bnx2_open;
7153 dev->hard_start_xmit = bnx2_start_xmit;
7154 dev->stop = bnx2_close;
7155 dev->get_stats = bnx2_get_stats;
7156 dev->set_multicast_list = bnx2_set_rx_mode;
7157 dev->do_ioctl = bnx2_ioctl;
7158 dev->set_mac_address = bnx2_change_mac_addr;
7159 dev->change_mtu = bnx2_change_mtu;
7160 dev->tx_timeout = bnx2_tx_timeout;
7161 dev->watchdog_timeo = TX_TIMEOUT;
7162#ifdef BCM_VLAN
7163 dev->vlan_rx_register = bnx2_vlan_rx_register;
b6016b76 7164#endif
b6016b76 7165 dev->ethtool_ops = &bnx2_ethtool_ops;
b6016b76 7166
972ec0d4 7167 bp = netdev_priv(dev);
35efa7c1 7168 bnx2_init_napi(bp);
b6016b76
MC
7169
7170#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
7171 dev->poll_controller = poll_bnx2;
7172#endif
7173
1b2f922f
MC
7174 pci_set_drvdata(pdev, dev);
7175
7176 memcpy(dev->dev_addr, bp->mac_addr, 6);
7177 memcpy(dev->perm_addr, bp->mac_addr, 6);
7178 bp->name = board_info[ent->driver_data].name;
7179
d212f87b 7180 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
4666f87a 7181 if (CHIP_NUM(bp) == CHIP_NUM_5709)
d212f87b
SH
7182 dev->features |= NETIF_F_IPV6_CSUM;
7183
1b2f922f
MC
7184#ifdef BCM_VLAN
7185 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
7186#endif
7187 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
4666f87a
MC
7188 if (CHIP_NUM(bp) == CHIP_NUM_5709)
7189 dev->features |= NETIF_F_TSO6;
1b2f922f 7190
b6016b76 7191 if ((rc = register_netdev(dev))) {
9b91cf9d 7192 dev_err(&pdev->dev, "Cannot register net device\n");
b6016b76
MC
7193 if (bp->regview)
7194 iounmap(bp->regview);
7195 pci_release_regions(pdev);
7196 pci_disable_device(pdev);
7197 pci_set_drvdata(pdev, NULL);
7198 free_netdev(dev);
7199 return rc;
7200 }
7201
883e5151 7202 printk(KERN_INFO "%s: %s (%c%d) %s found at mem %lx, "
0795af57 7203 "IRQ %d, node addr %s\n",
b6016b76
MC
7204 dev->name,
7205 bp->name,
7206 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
7207 ((CHIP_ID(bp) & 0x0ff0) >> 4),
883e5151 7208 bnx2_bus_string(bp, str),
b6016b76 7209 dev->base_addr,
0795af57 7210 bp->pdev->irq, print_mac(mac, dev->dev_addr));
b6016b76 7211
b6016b76
MC
7212 return 0;
7213}
7214
7215static void __devexit
7216bnx2_remove_one(struct pci_dev *pdev)
7217{
7218 struct net_device *dev = pci_get_drvdata(pdev);
972ec0d4 7219 struct bnx2 *bp = netdev_priv(dev);
b6016b76 7220
afdc08b9
MC
7221 flush_scheduled_work();
7222
b6016b76
MC
7223 unregister_netdev(dev);
7224
7225 if (bp->regview)
7226 iounmap(bp->regview);
7227
7228 free_netdev(dev);
7229 pci_release_regions(pdev);
7230 pci_disable_device(pdev);
7231 pci_set_drvdata(pdev, NULL);
7232}
7233
7234static int
829ca9a3 7235bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
b6016b76
MC
7236{
7237 struct net_device *dev = pci_get_drvdata(pdev);
972ec0d4 7238 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
7239 u32 reset_code;
7240
6caebb02
MC
7241 /* PCI register 4 needs to be saved whether netif_running() or not.
7242 * MSI address and data need to be saved if using MSI and
7243 * netif_running().
7244 */
7245 pci_save_state(pdev);
b6016b76
MC
7246 if (!netif_running(dev))
7247 return 0;
7248
1d60290f 7249 flush_scheduled_work();
b6016b76
MC
7250 bnx2_netif_stop(bp);
7251 netif_device_detach(dev);
7252 del_timer_sync(&bp->timer);
dda1e390 7253 if (bp->flags & NO_WOL_FLAG)
6c4f095e 7254 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
dda1e390 7255 else if (bp->wol)
b6016b76
MC
7256 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
7257 else
7258 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
7259 bnx2_reset_chip(bp, reset_code);
7260 bnx2_free_skbs(bp);
829ca9a3 7261 bnx2_set_power_state(bp, pci_choose_state(pdev, state));
b6016b76
MC
7262 return 0;
7263}
7264
7265static int
7266bnx2_resume(struct pci_dev *pdev)
7267{
7268 struct net_device *dev = pci_get_drvdata(pdev);
972ec0d4 7269 struct bnx2 *bp = netdev_priv(dev);
b6016b76 7270
6caebb02 7271 pci_restore_state(pdev);
b6016b76
MC
7272 if (!netif_running(dev))
7273 return 0;
7274
829ca9a3 7275 bnx2_set_power_state(bp, PCI_D0);
b6016b76
MC
7276 netif_device_attach(dev);
7277 bnx2_init_nic(bp);
7278 bnx2_netif_start(bp);
7279 return 0;
7280}
7281
7282static struct pci_driver bnx2_pci_driver = {
14ab9b86
PH
7283 .name = DRV_MODULE_NAME,
7284 .id_table = bnx2_pci_tbl,
7285 .probe = bnx2_init_one,
7286 .remove = __devexit_p(bnx2_remove_one),
7287 .suspend = bnx2_suspend,
7288 .resume = bnx2_resume,
b6016b76
MC
7289};
7290
7291static int __init bnx2_init(void)
7292{
29917620 7293 return pci_register_driver(&bnx2_pci_driver);
b6016b76
MC
7294}
7295
7296static void __exit bnx2_cleanup(void)
7297{
7298 pci_unregister_driver(&bnx2_pci_driver);
7299}
7300
7301module_init(bnx2_init);
7302module_exit(bnx2_cleanup);
7303
7304
7305
This page took 0.817657 seconds and 5 git commands to generate.