[BNX2]: Introduce new bnx2_napi structure.
[deliverable/linux.git] / drivers / net / bnx2.c
CommitLineData
b6016b76
MC
1/* bnx2.c: Broadcom NX2 network driver.
2 *
72fbaeb6 3 * Copyright (c) 2004-2007 Broadcom Corporation
b6016b76
MC
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Written by: Michael Chan (mchan@broadcom.com)
10 */
11
f2a4f052
MC
12
13#include <linux/module.h>
14#include <linux/moduleparam.h>
15
16#include <linux/kernel.h>
17#include <linux/timer.h>
18#include <linux/errno.h>
19#include <linux/ioport.h>
20#include <linux/slab.h>
21#include <linux/vmalloc.h>
22#include <linux/interrupt.h>
23#include <linux/pci.h>
24#include <linux/init.h>
25#include <linux/netdevice.h>
26#include <linux/etherdevice.h>
27#include <linux/skbuff.h>
28#include <linux/dma-mapping.h>
1977f032 29#include <linux/bitops.h>
f2a4f052
MC
30#include <asm/io.h>
31#include <asm/irq.h>
32#include <linux/delay.h>
33#include <asm/byteorder.h>
c86a31f4 34#include <asm/page.h>
f2a4f052
MC
35#include <linux/time.h>
36#include <linux/ethtool.h>
37#include <linux/mii.h>
38#ifdef NETIF_F_HW_VLAN_TX
39#include <linux/if_vlan.h>
40#define BCM_VLAN 1
41#endif
f2a4f052 42#include <net/ip.h>
de081fa5 43#include <net/tcp.h>
f2a4f052 44#include <net/checksum.h>
f2a4f052
MC
45#include <linux/workqueue.h>
46#include <linux/crc32.h>
47#include <linux/prefetch.h>
29b12174 48#include <linux/cache.h>
fba9fe91 49#include <linux/zlib.h>
f2a4f052 50
b6016b76
MC
51#include "bnx2.h"
52#include "bnx2_fw.h"
d43584c8 53#include "bnx2_fw2.h"
b6016b76 54
110d0ef9 55#define FW_BUF_SIZE 0x10000
b3448b0b 56
b6016b76
MC
57#define DRV_MODULE_NAME "bnx2"
58#define PFX DRV_MODULE_NAME ": "
a0d142c6
MC
59#define DRV_MODULE_VERSION "1.7.0"
60#define DRV_MODULE_RELDATE "December 11, 2007"
b6016b76
MC
61
62#define RUN_AT(x) (jiffies + (x))
63
64/* Time in jiffies before concluding the transmitter is hung. */
65#define TX_TIMEOUT (5*HZ)
66
e19360f2 67static const char version[] __devinitdata =
b6016b76
MC
68 "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
69
70MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
05d0f1cf 71MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708 Driver");
b6016b76
MC
72MODULE_LICENSE("GPL");
73MODULE_VERSION(DRV_MODULE_VERSION);
74
75static int disable_msi = 0;
76
77module_param(disable_msi, int, 0);
78MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
79
80typedef enum {
81 BCM5706 = 0,
82 NC370T,
83 NC370I,
84 BCM5706S,
85 NC370F,
5b0c76ad
MC
86 BCM5708,
87 BCM5708S,
bac0dff6 88 BCM5709,
27a005b8 89 BCM5709S,
b6016b76
MC
90} board_t;
91
92/* indexed by board_t, above */
f71e1309 93static const struct {
b6016b76
MC
94 char *name;
95} board_info[] __devinitdata = {
96 { "Broadcom NetXtreme II BCM5706 1000Base-T" },
97 { "HP NC370T Multifunction Gigabit Server Adapter" },
98 { "HP NC370i Multifunction Gigabit Server Adapter" },
99 { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
100 { "HP NC370F Multifunction Gigabit Server Adapter" },
5b0c76ad
MC
101 { "Broadcom NetXtreme II BCM5708 1000Base-T" },
102 { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
bac0dff6 103 { "Broadcom NetXtreme II BCM5709 1000Base-T" },
27a005b8 104 { "Broadcom NetXtreme II BCM5709 1000Base-SX" },
b6016b76
MC
105 };
106
107static struct pci_device_id bnx2_pci_tbl[] = {
108 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
109 PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
110 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
111 PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
112 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
113 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
5b0c76ad
MC
114 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
115 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
b6016b76
MC
116 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
117 PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
118 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
119 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
5b0c76ad
MC
120 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
121 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
bac0dff6
MC
122 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
123 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
27a005b8
MC
124 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
125 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
b6016b76
MC
126 { 0, }
127};
128
129static struct flash_spec flash_table[] =
130{
e30372c9
MC
131#define BUFFERED_FLAGS (BNX2_NV_BUFFERED | BNX2_NV_TRANSLATE)
132#define NONBUFFERED_FLAGS (BNX2_NV_WREN)
b6016b76 133 /* Slow EEPROM */
37137709 134 {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
e30372c9 135 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
b6016b76
MC
136 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
137 "EEPROM - slow"},
37137709
MC
138 /* Expansion entry 0001 */
139 {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
e30372c9 140 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
37137709
MC
141 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
142 "Entry 0001"},
b6016b76
MC
143 /* Saifun SA25F010 (non-buffered flash) */
144 /* strap, cfg1, & write1 need updates */
37137709 145 {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
e30372c9 146 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
b6016b76
MC
147 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
148 "Non-buffered flash (128kB)"},
149 /* Saifun SA25F020 (non-buffered flash) */
150 /* strap, cfg1, & write1 need updates */
37137709 151 {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
e30372c9 152 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
b6016b76
MC
153 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
154 "Non-buffered flash (256kB)"},
37137709
MC
155 /* Expansion entry 0100 */
156 {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
e30372c9 157 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
37137709
MC
158 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
159 "Entry 0100"},
160 /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
6aa20a22 161 {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
e30372c9 162 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
37137709
MC
163 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
164 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
165 /* Entry 0110: ST M45PE20 (non-buffered flash)*/
166 {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
e30372c9 167 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
37137709
MC
168 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
169 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
170 /* Saifun SA25F005 (non-buffered flash) */
171 /* strap, cfg1, & write1 need updates */
172 {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
e30372c9 173 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
37137709
MC
174 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
175 "Non-buffered flash (64kB)"},
176 /* Fast EEPROM */
177 {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
e30372c9 178 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
37137709
MC
179 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
180 "EEPROM - fast"},
181 /* Expansion entry 1001 */
182 {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
e30372c9 183 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
37137709
MC
184 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
185 "Entry 1001"},
186 /* Expansion entry 1010 */
187 {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
e30372c9 188 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
37137709
MC
189 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
190 "Entry 1010"},
191 /* ATMEL AT45DB011B (buffered flash) */
192 {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
e30372c9 193 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
37137709
MC
194 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
195 "Buffered flash (128kB)"},
196 /* Expansion entry 1100 */
197 {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
e30372c9 198 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
37137709
MC
199 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
200 "Entry 1100"},
201 /* Expansion entry 1101 */
202 {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
e30372c9 203 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
37137709
MC
204 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
205 "Entry 1101"},
206 /* Ateml Expansion entry 1110 */
207 {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
e30372c9 208 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
37137709
MC
209 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
210 "Entry 1110 (Atmel)"},
211 /* ATMEL AT45DB021B (buffered flash) */
212 {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
e30372c9 213 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
37137709
MC
214 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
215 "Buffered flash (256kB)"},
b6016b76
MC
216};
217
e30372c9
MC
218static struct flash_spec flash_5709 = {
219 .flags = BNX2_NV_BUFFERED,
220 .page_bits = BCM5709_FLASH_PAGE_BITS,
221 .page_size = BCM5709_FLASH_PAGE_SIZE,
222 .addr_mask = BCM5709_FLASH_BYTE_ADDR_MASK,
223 .total_size = BUFFERED_FLASH_TOTAL_SIZE*2,
224 .name = "5709 Buffered flash (256kB)",
225};
226
b6016b76
MC
227MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
228
e89bbf10
MC
229static inline u32 bnx2_tx_avail(struct bnx2 *bp)
230{
2f8af120 231 u32 diff;
e89bbf10 232
2f8af120 233 smp_mb();
faac9c4b
MC
234
235 /* The ring uses 256 indices for 255 entries, one of them
236 * needs to be skipped.
237 */
238 diff = bp->tx_prod - bp->tx_cons;
239 if (unlikely(diff >= TX_DESC_CNT)) {
240 diff &= 0xffff;
241 if (diff == TX_DESC_CNT)
242 diff = MAX_TX_DESC_CNT;
243 }
e89bbf10
MC
244 return (bp->tx_ring_size - diff);
245}
246
b6016b76
MC
247static u32
248bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
249{
1b8227c4
MC
250 u32 val;
251
252 spin_lock_bh(&bp->indirect_lock);
b6016b76 253 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
1b8227c4
MC
254 val = REG_RD(bp, BNX2_PCICFG_REG_WINDOW);
255 spin_unlock_bh(&bp->indirect_lock);
256 return val;
b6016b76
MC
257}
258
259static void
260bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
261{
1b8227c4 262 spin_lock_bh(&bp->indirect_lock);
b6016b76
MC
263 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
264 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
1b8227c4 265 spin_unlock_bh(&bp->indirect_lock);
b6016b76
MC
266}
267
268static void
269bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
270{
271 offset += cid_addr;
1b8227c4 272 spin_lock_bh(&bp->indirect_lock);
59b47d8a
MC
273 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
274 int i;
275
276 REG_WR(bp, BNX2_CTX_CTX_DATA, val);
277 REG_WR(bp, BNX2_CTX_CTX_CTRL,
278 offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
279 for (i = 0; i < 5; i++) {
280 u32 val;
281 val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
282 if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
283 break;
284 udelay(5);
285 }
286 } else {
287 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
288 REG_WR(bp, BNX2_CTX_DATA, val);
289 }
1b8227c4 290 spin_unlock_bh(&bp->indirect_lock);
b6016b76
MC
291}
292
293static int
294bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
295{
296 u32 val1;
297 int i, ret;
298
299 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
300 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
301 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
302
303 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
304 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
305
306 udelay(40);
307 }
308
309 val1 = (bp->phy_addr << 21) | (reg << 16) |
310 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
311 BNX2_EMAC_MDIO_COMM_START_BUSY;
312 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
313
314 for (i = 0; i < 50; i++) {
315 udelay(10);
316
317 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
318 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
319 udelay(5);
320
321 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
322 val1 &= BNX2_EMAC_MDIO_COMM_DATA;
323
324 break;
325 }
326 }
327
328 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
329 *val = 0x0;
330 ret = -EBUSY;
331 }
332 else {
333 *val = val1;
334 ret = 0;
335 }
336
337 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
338 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
339 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
340
341 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
342 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
343
344 udelay(40);
345 }
346
347 return ret;
348}
349
350static int
351bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
352{
353 u32 val1;
354 int i, ret;
355
356 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
357 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
358 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
359
360 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
361 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
362
363 udelay(40);
364 }
365
366 val1 = (bp->phy_addr << 21) | (reg << 16) | val |
367 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
368 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
369 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
6aa20a22 370
b6016b76
MC
371 for (i = 0; i < 50; i++) {
372 udelay(10);
373
374 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
375 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
376 udelay(5);
377 break;
378 }
379 }
380
381 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
382 ret = -EBUSY;
383 else
384 ret = 0;
385
386 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
387 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
388 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
389
390 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
391 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
392
393 udelay(40);
394 }
395
396 return ret;
397}
398
399static void
400bnx2_disable_int(struct bnx2 *bp)
401{
402 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
403 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
404 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
405}
406
407static void
408bnx2_enable_int(struct bnx2 *bp)
409{
35efa7c1
MC
410 struct bnx2_napi *bnapi = &bp->bnx2_napi;
411
1269a8a6
MC
412 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
413 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
35efa7c1 414 BNX2_PCICFG_INT_ACK_CMD_MASK_INT | bnapi->last_status_idx);
1269a8a6 415
b6016b76 416 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
35efa7c1 417 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | bnapi->last_status_idx);
b6016b76 418
bf5295bb 419 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
b6016b76
MC
420}
421
422static void
423bnx2_disable_int_sync(struct bnx2 *bp)
424{
425 atomic_inc(&bp->intr_sem);
426 bnx2_disable_int(bp);
427 synchronize_irq(bp->pdev->irq);
428}
429
35efa7c1
MC
430static void
431bnx2_napi_disable(struct bnx2 *bp)
432{
433 napi_disable(&bp->bnx2_napi.napi);
434}
435
436static void
437bnx2_napi_enable(struct bnx2 *bp)
438{
439 napi_enable(&bp->bnx2_napi.napi);
440}
441
b6016b76
MC
442static void
443bnx2_netif_stop(struct bnx2 *bp)
444{
445 bnx2_disable_int_sync(bp);
446 if (netif_running(bp->dev)) {
35efa7c1 447 bnx2_napi_disable(bp);
b6016b76
MC
448 netif_tx_disable(bp->dev);
449 bp->dev->trans_start = jiffies; /* prevent tx timeout */
450 }
451}
452
453static void
454bnx2_netif_start(struct bnx2 *bp)
455{
456 if (atomic_dec_and_test(&bp->intr_sem)) {
457 if (netif_running(bp->dev)) {
458 netif_wake_queue(bp->dev);
35efa7c1 459 bnx2_napi_enable(bp);
b6016b76
MC
460 bnx2_enable_int(bp);
461 }
462 }
463}
464
465static void
466bnx2_free_mem(struct bnx2 *bp)
467{
13daffa2
MC
468 int i;
469
59b47d8a
MC
470 for (i = 0; i < bp->ctx_pages; i++) {
471 if (bp->ctx_blk[i]) {
472 pci_free_consistent(bp->pdev, BCM_PAGE_SIZE,
473 bp->ctx_blk[i],
474 bp->ctx_blk_mapping[i]);
475 bp->ctx_blk[i] = NULL;
476 }
477 }
b6016b76 478 if (bp->status_blk) {
0f31f994 479 pci_free_consistent(bp->pdev, bp->status_stats_size,
b6016b76
MC
480 bp->status_blk, bp->status_blk_mapping);
481 bp->status_blk = NULL;
0f31f994 482 bp->stats_blk = NULL;
b6016b76
MC
483 }
484 if (bp->tx_desc_ring) {
e343d55c 485 pci_free_consistent(bp->pdev, TXBD_RING_SIZE,
b6016b76
MC
486 bp->tx_desc_ring, bp->tx_desc_mapping);
487 bp->tx_desc_ring = NULL;
488 }
b4558ea9
JJ
489 kfree(bp->tx_buf_ring);
490 bp->tx_buf_ring = NULL;
13daffa2
MC
491 for (i = 0; i < bp->rx_max_ring; i++) {
492 if (bp->rx_desc_ring[i])
e343d55c 493 pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
13daffa2
MC
494 bp->rx_desc_ring[i],
495 bp->rx_desc_mapping[i]);
496 bp->rx_desc_ring[i] = NULL;
497 }
498 vfree(bp->rx_buf_ring);
b4558ea9 499 bp->rx_buf_ring = NULL;
47bf4246
MC
500 for (i = 0; i < bp->rx_max_pg_ring; i++) {
501 if (bp->rx_pg_desc_ring[i])
502 pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
503 bp->rx_pg_desc_ring[i],
504 bp->rx_pg_desc_mapping[i]);
505 bp->rx_pg_desc_ring[i] = NULL;
506 }
507 if (bp->rx_pg_ring)
508 vfree(bp->rx_pg_ring);
509 bp->rx_pg_ring = NULL;
b6016b76
MC
510}
511
512static int
513bnx2_alloc_mem(struct bnx2 *bp)
514{
0f31f994 515 int i, status_blk_size;
13daffa2 516
e343d55c 517 bp->tx_buf_ring = kzalloc(SW_TXBD_RING_SIZE, GFP_KERNEL);
b6016b76
MC
518 if (bp->tx_buf_ring == NULL)
519 return -ENOMEM;
520
e343d55c 521 bp->tx_desc_ring = pci_alloc_consistent(bp->pdev, TXBD_RING_SIZE,
b6016b76
MC
522 &bp->tx_desc_mapping);
523 if (bp->tx_desc_ring == NULL)
524 goto alloc_mem_err;
525
e343d55c 526 bp->rx_buf_ring = vmalloc(SW_RXBD_RING_SIZE * bp->rx_max_ring);
b6016b76
MC
527 if (bp->rx_buf_ring == NULL)
528 goto alloc_mem_err;
529
e343d55c 530 memset(bp->rx_buf_ring, 0, SW_RXBD_RING_SIZE * bp->rx_max_ring);
13daffa2
MC
531
532 for (i = 0; i < bp->rx_max_ring; i++) {
533 bp->rx_desc_ring[i] =
e343d55c 534 pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
13daffa2
MC
535 &bp->rx_desc_mapping[i]);
536 if (bp->rx_desc_ring[i] == NULL)
537 goto alloc_mem_err;
538
539 }
b6016b76 540
47bf4246
MC
541 if (bp->rx_pg_ring_size) {
542 bp->rx_pg_ring = vmalloc(SW_RXPG_RING_SIZE *
543 bp->rx_max_pg_ring);
544 if (bp->rx_pg_ring == NULL)
545 goto alloc_mem_err;
546
547 memset(bp->rx_pg_ring, 0, SW_RXPG_RING_SIZE *
548 bp->rx_max_pg_ring);
549 }
550
551 for (i = 0; i < bp->rx_max_pg_ring; i++) {
552 bp->rx_pg_desc_ring[i] =
553 pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
554 &bp->rx_pg_desc_mapping[i]);
555 if (bp->rx_pg_desc_ring[i] == NULL)
556 goto alloc_mem_err;
557
558 }
559
0f31f994
MC
560 /* Combine status and statistics blocks into one allocation. */
561 status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
562 bp->status_stats_size = status_blk_size +
563 sizeof(struct statistics_block);
564
565 bp->status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
b6016b76
MC
566 &bp->status_blk_mapping);
567 if (bp->status_blk == NULL)
568 goto alloc_mem_err;
569
0f31f994 570 memset(bp->status_blk, 0, bp->status_stats_size);
b6016b76 571
35efa7c1
MC
572 bp->bnx2_napi.status_blk = bp->status_blk;
573
0f31f994
MC
574 bp->stats_blk = (void *) ((unsigned long) bp->status_blk +
575 status_blk_size);
b6016b76 576
0f31f994 577 bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
b6016b76 578
59b47d8a
MC
579 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
580 bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
581 if (bp->ctx_pages == 0)
582 bp->ctx_pages = 1;
583 for (i = 0; i < bp->ctx_pages; i++) {
584 bp->ctx_blk[i] = pci_alloc_consistent(bp->pdev,
585 BCM_PAGE_SIZE,
586 &bp->ctx_blk_mapping[i]);
587 if (bp->ctx_blk[i] == NULL)
588 goto alloc_mem_err;
589 }
590 }
b6016b76
MC
591 return 0;
592
593alloc_mem_err:
594 bnx2_free_mem(bp);
595 return -ENOMEM;
596}
597
e3648b3d
MC
598static void
599bnx2_report_fw_link(struct bnx2 *bp)
600{
601 u32 fw_link_status = 0;
602
0d8a6571
MC
603 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
604 return;
605
e3648b3d
MC
606 if (bp->link_up) {
607 u32 bmsr;
608
609 switch (bp->line_speed) {
610 case SPEED_10:
611 if (bp->duplex == DUPLEX_HALF)
612 fw_link_status = BNX2_LINK_STATUS_10HALF;
613 else
614 fw_link_status = BNX2_LINK_STATUS_10FULL;
615 break;
616 case SPEED_100:
617 if (bp->duplex == DUPLEX_HALF)
618 fw_link_status = BNX2_LINK_STATUS_100HALF;
619 else
620 fw_link_status = BNX2_LINK_STATUS_100FULL;
621 break;
622 case SPEED_1000:
623 if (bp->duplex == DUPLEX_HALF)
624 fw_link_status = BNX2_LINK_STATUS_1000HALF;
625 else
626 fw_link_status = BNX2_LINK_STATUS_1000FULL;
627 break;
628 case SPEED_2500:
629 if (bp->duplex == DUPLEX_HALF)
630 fw_link_status = BNX2_LINK_STATUS_2500HALF;
631 else
632 fw_link_status = BNX2_LINK_STATUS_2500FULL;
633 break;
634 }
635
636 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
637
638 if (bp->autoneg) {
639 fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
640
ca58c3af
MC
641 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
642 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
e3648b3d
MC
643
644 if (!(bmsr & BMSR_ANEGCOMPLETE) ||
645 bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)
646 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
647 else
648 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
649 }
650 }
651 else
652 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
653
654 REG_WR_IND(bp, bp->shmem_base + BNX2_LINK_STATUS, fw_link_status);
655}
656
9b1084b8
MC
657static char *
658bnx2_xceiver_str(struct bnx2 *bp)
659{
660 return ((bp->phy_port == PORT_FIBRE) ? "SerDes" :
661 ((bp->phy_flags & PHY_SERDES_FLAG) ? "Remote Copper" :
662 "Copper"));
663}
664
b6016b76
MC
665static void
666bnx2_report_link(struct bnx2 *bp)
667{
668 if (bp->link_up) {
669 netif_carrier_on(bp->dev);
9b1084b8
MC
670 printk(KERN_INFO PFX "%s NIC %s Link is Up, ", bp->dev->name,
671 bnx2_xceiver_str(bp));
b6016b76
MC
672
673 printk("%d Mbps ", bp->line_speed);
674
675 if (bp->duplex == DUPLEX_FULL)
676 printk("full duplex");
677 else
678 printk("half duplex");
679
680 if (bp->flow_ctrl) {
681 if (bp->flow_ctrl & FLOW_CTRL_RX) {
682 printk(", receive ");
683 if (bp->flow_ctrl & FLOW_CTRL_TX)
684 printk("& transmit ");
685 }
686 else {
687 printk(", transmit ");
688 }
689 printk("flow control ON");
690 }
691 printk("\n");
692 }
693 else {
694 netif_carrier_off(bp->dev);
9b1084b8
MC
695 printk(KERN_ERR PFX "%s NIC %s Link is Down\n", bp->dev->name,
696 bnx2_xceiver_str(bp));
b6016b76 697 }
e3648b3d
MC
698
699 bnx2_report_fw_link(bp);
b6016b76
MC
700}
701
702static void
703bnx2_resolve_flow_ctrl(struct bnx2 *bp)
704{
705 u32 local_adv, remote_adv;
706
707 bp->flow_ctrl = 0;
6aa20a22 708 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
b6016b76
MC
709 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
710
711 if (bp->duplex == DUPLEX_FULL) {
712 bp->flow_ctrl = bp->req_flow_ctrl;
713 }
714 return;
715 }
716
717 if (bp->duplex != DUPLEX_FULL) {
718 return;
719 }
720
5b0c76ad
MC
721 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
722 (CHIP_NUM(bp) == CHIP_NUM_5708)) {
723 u32 val;
724
725 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
726 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
727 bp->flow_ctrl |= FLOW_CTRL_TX;
728 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
729 bp->flow_ctrl |= FLOW_CTRL_RX;
730 return;
731 }
732
ca58c3af
MC
733 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
734 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
b6016b76
MC
735
736 if (bp->phy_flags & PHY_SERDES_FLAG) {
737 u32 new_local_adv = 0;
738 u32 new_remote_adv = 0;
739
740 if (local_adv & ADVERTISE_1000XPAUSE)
741 new_local_adv |= ADVERTISE_PAUSE_CAP;
742 if (local_adv & ADVERTISE_1000XPSE_ASYM)
743 new_local_adv |= ADVERTISE_PAUSE_ASYM;
744 if (remote_adv & ADVERTISE_1000XPAUSE)
745 new_remote_adv |= ADVERTISE_PAUSE_CAP;
746 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
747 new_remote_adv |= ADVERTISE_PAUSE_ASYM;
748
749 local_adv = new_local_adv;
750 remote_adv = new_remote_adv;
751 }
752
753 /* See Table 28B-3 of 802.3ab-1999 spec. */
754 if (local_adv & ADVERTISE_PAUSE_CAP) {
755 if(local_adv & ADVERTISE_PAUSE_ASYM) {
756 if (remote_adv & ADVERTISE_PAUSE_CAP) {
757 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
758 }
759 else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
760 bp->flow_ctrl = FLOW_CTRL_RX;
761 }
762 }
763 else {
764 if (remote_adv & ADVERTISE_PAUSE_CAP) {
765 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
766 }
767 }
768 }
769 else if (local_adv & ADVERTISE_PAUSE_ASYM) {
770 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
771 (remote_adv & ADVERTISE_PAUSE_ASYM)) {
772
773 bp->flow_ctrl = FLOW_CTRL_TX;
774 }
775 }
776}
777
27a005b8
MC
778static int
779bnx2_5709s_linkup(struct bnx2 *bp)
780{
781 u32 val, speed;
782
783 bp->link_up = 1;
784
785 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
786 bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
787 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
788
789 if ((bp->autoneg & AUTONEG_SPEED) == 0) {
790 bp->line_speed = bp->req_line_speed;
791 bp->duplex = bp->req_duplex;
792 return 0;
793 }
794 speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
795 switch (speed) {
796 case MII_BNX2_GP_TOP_AN_SPEED_10:
797 bp->line_speed = SPEED_10;
798 break;
799 case MII_BNX2_GP_TOP_AN_SPEED_100:
800 bp->line_speed = SPEED_100;
801 break;
802 case MII_BNX2_GP_TOP_AN_SPEED_1G:
803 case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
804 bp->line_speed = SPEED_1000;
805 break;
806 case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
807 bp->line_speed = SPEED_2500;
808 break;
809 }
810 if (val & MII_BNX2_GP_TOP_AN_FD)
811 bp->duplex = DUPLEX_FULL;
812 else
813 bp->duplex = DUPLEX_HALF;
814 return 0;
815}
816
b6016b76 817static int
5b0c76ad
MC
818bnx2_5708s_linkup(struct bnx2 *bp)
819{
820 u32 val;
821
822 bp->link_up = 1;
823 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
824 switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
825 case BCM5708S_1000X_STAT1_SPEED_10:
826 bp->line_speed = SPEED_10;
827 break;
828 case BCM5708S_1000X_STAT1_SPEED_100:
829 bp->line_speed = SPEED_100;
830 break;
831 case BCM5708S_1000X_STAT1_SPEED_1G:
832 bp->line_speed = SPEED_1000;
833 break;
834 case BCM5708S_1000X_STAT1_SPEED_2G5:
835 bp->line_speed = SPEED_2500;
836 break;
837 }
838 if (val & BCM5708S_1000X_STAT1_FD)
839 bp->duplex = DUPLEX_FULL;
840 else
841 bp->duplex = DUPLEX_HALF;
842
843 return 0;
844}
845
846static int
847bnx2_5706s_linkup(struct bnx2 *bp)
b6016b76
MC
848{
849 u32 bmcr, local_adv, remote_adv, common;
850
851 bp->link_up = 1;
852 bp->line_speed = SPEED_1000;
853
ca58c3af 854 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
b6016b76
MC
855 if (bmcr & BMCR_FULLDPLX) {
856 bp->duplex = DUPLEX_FULL;
857 }
858 else {
859 bp->duplex = DUPLEX_HALF;
860 }
861
862 if (!(bmcr & BMCR_ANENABLE)) {
863 return 0;
864 }
865
ca58c3af
MC
866 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
867 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
b6016b76
MC
868
869 common = local_adv & remote_adv;
870 if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
871
872 if (common & ADVERTISE_1000XFULL) {
873 bp->duplex = DUPLEX_FULL;
874 }
875 else {
876 bp->duplex = DUPLEX_HALF;
877 }
878 }
879
880 return 0;
881}
882
883static int
884bnx2_copper_linkup(struct bnx2 *bp)
885{
886 u32 bmcr;
887
ca58c3af 888 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
b6016b76
MC
889 if (bmcr & BMCR_ANENABLE) {
890 u32 local_adv, remote_adv, common;
891
892 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
893 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
894
895 common = local_adv & (remote_adv >> 2);
896 if (common & ADVERTISE_1000FULL) {
897 bp->line_speed = SPEED_1000;
898 bp->duplex = DUPLEX_FULL;
899 }
900 else if (common & ADVERTISE_1000HALF) {
901 bp->line_speed = SPEED_1000;
902 bp->duplex = DUPLEX_HALF;
903 }
904 else {
ca58c3af
MC
905 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
906 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
b6016b76
MC
907
908 common = local_adv & remote_adv;
909 if (common & ADVERTISE_100FULL) {
910 bp->line_speed = SPEED_100;
911 bp->duplex = DUPLEX_FULL;
912 }
913 else if (common & ADVERTISE_100HALF) {
914 bp->line_speed = SPEED_100;
915 bp->duplex = DUPLEX_HALF;
916 }
917 else if (common & ADVERTISE_10FULL) {
918 bp->line_speed = SPEED_10;
919 bp->duplex = DUPLEX_FULL;
920 }
921 else if (common & ADVERTISE_10HALF) {
922 bp->line_speed = SPEED_10;
923 bp->duplex = DUPLEX_HALF;
924 }
925 else {
926 bp->line_speed = 0;
927 bp->link_up = 0;
928 }
929 }
930 }
931 else {
932 if (bmcr & BMCR_SPEED100) {
933 bp->line_speed = SPEED_100;
934 }
935 else {
936 bp->line_speed = SPEED_10;
937 }
938 if (bmcr & BMCR_FULLDPLX) {
939 bp->duplex = DUPLEX_FULL;
940 }
941 else {
942 bp->duplex = DUPLEX_HALF;
943 }
944 }
945
946 return 0;
947}
948
949static int
950bnx2_set_mac_link(struct bnx2 *bp)
951{
952 u32 val;
953
954 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
955 if (bp->link_up && (bp->line_speed == SPEED_1000) &&
956 (bp->duplex == DUPLEX_HALF)) {
957 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
958 }
959
960 /* Configure the EMAC mode register. */
961 val = REG_RD(bp, BNX2_EMAC_MODE);
962
963 val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
5b0c76ad 964 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
59b47d8a 965 BNX2_EMAC_MODE_25G_MODE);
b6016b76
MC
966
967 if (bp->link_up) {
5b0c76ad
MC
968 switch (bp->line_speed) {
969 case SPEED_10:
59b47d8a
MC
970 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
971 val |= BNX2_EMAC_MODE_PORT_MII_10M;
5b0c76ad
MC
972 break;
973 }
974 /* fall through */
975 case SPEED_100:
976 val |= BNX2_EMAC_MODE_PORT_MII;
977 break;
978 case SPEED_2500:
59b47d8a 979 val |= BNX2_EMAC_MODE_25G_MODE;
5b0c76ad
MC
980 /* fall through */
981 case SPEED_1000:
982 val |= BNX2_EMAC_MODE_PORT_GMII;
983 break;
984 }
b6016b76
MC
985 }
986 else {
987 val |= BNX2_EMAC_MODE_PORT_GMII;
988 }
989
990 /* Set the MAC to operate in the appropriate duplex mode. */
991 if (bp->duplex == DUPLEX_HALF)
992 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
993 REG_WR(bp, BNX2_EMAC_MODE, val);
994
995 /* Enable/disable rx PAUSE. */
996 bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
997
998 if (bp->flow_ctrl & FLOW_CTRL_RX)
999 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
1000 REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
1001
1002 /* Enable/disable tx PAUSE. */
1003 val = REG_RD(bp, BNX2_EMAC_TX_MODE);
1004 val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
1005
1006 if (bp->flow_ctrl & FLOW_CTRL_TX)
1007 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
1008 REG_WR(bp, BNX2_EMAC_TX_MODE, val);
1009
1010 /* Acknowledge the interrupt. */
1011 REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
1012
1013 return 0;
1014}
1015
27a005b8
MC
1016static void
1017bnx2_enable_bmsr1(struct bnx2 *bp)
1018{
1019 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
1020 (CHIP_NUM(bp) == CHIP_NUM_5709))
1021 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1022 MII_BNX2_BLK_ADDR_GP_STATUS);
1023}
1024
1025static void
1026bnx2_disable_bmsr1(struct bnx2 *bp)
1027{
1028 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
1029 (CHIP_NUM(bp) == CHIP_NUM_5709))
1030 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1031 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1032}
1033
605a9e20
MC
1034static int
1035bnx2_test_and_enable_2g5(struct bnx2 *bp)
1036{
1037 u32 up1;
1038 int ret = 1;
1039
1040 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1041 return 0;
1042
1043 if (bp->autoneg & AUTONEG_SPEED)
1044 bp->advertising |= ADVERTISED_2500baseX_Full;
1045
27a005b8
MC
1046 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1047 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1048
605a9e20
MC
1049 bnx2_read_phy(bp, bp->mii_up1, &up1);
1050 if (!(up1 & BCM5708S_UP1_2G5)) {
1051 up1 |= BCM5708S_UP1_2G5;
1052 bnx2_write_phy(bp, bp->mii_up1, up1);
1053 ret = 0;
1054 }
1055
27a005b8
MC
1056 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1057 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1058 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1059
605a9e20
MC
1060 return ret;
1061}
1062
1063static int
1064bnx2_test_and_disable_2g5(struct bnx2 *bp)
1065{
1066 u32 up1;
1067 int ret = 0;
1068
1069 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1070 return 0;
1071
27a005b8
MC
1072 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1073 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1074
605a9e20
MC
1075 bnx2_read_phy(bp, bp->mii_up1, &up1);
1076 if (up1 & BCM5708S_UP1_2G5) {
1077 up1 &= ~BCM5708S_UP1_2G5;
1078 bnx2_write_phy(bp, bp->mii_up1, up1);
1079 ret = 1;
1080 }
1081
27a005b8
MC
1082 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1083 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1084 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1085
605a9e20
MC
1086 return ret;
1087}
1088
1089static void
1090bnx2_enable_forced_2g5(struct bnx2 *bp)
1091{
1092 u32 bmcr;
1093
1094 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1095 return;
1096
27a005b8
MC
1097 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1098 u32 val;
1099
1100 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1101 MII_BNX2_BLK_ADDR_SERDES_DIG);
1102 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1103 val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
1104 val |= MII_BNX2_SD_MISC1_FORCE | MII_BNX2_SD_MISC1_FORCE_2_5G;
1105 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1106
1107 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1108 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1109 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1110
1111 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
605a9e20
MC
1112 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1113 bmcr |= BCM5708S_BMCR_FORCE_2500;
1114 }
1115
1116 if (bp->autoneg & AUTONEG_SPEED) {
1117 bmcr &= ~BMCR_ANENABLE;
1118 if (bp->req_duplex == DUPLEX_FULL)
1119 bmcr |= BMCR_FULLDPLX;
1120 }
1121 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1122}
1123
1124static void
1125bnx2_disable_forced_2g5(struct bnx2 *bp)
1126{
1127 u32 bmcr;
1128
1129 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1130 return;
1131
27a005b8
MC
1132 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1133 u32 val;
1134
1135 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1136 MII_BNX2_BLK_ADDR_SERDES_DIG);
1137 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1138 val &= ~MII_BNX2_SD_MISC1_FORCE;
1139 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1140
1141 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1142 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1143 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1144
1145 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
605a9e20
MC
1146 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1147 bmcr &= ~BCM5708S_BMCR_FORCE_2500;
1148 }
1149
1150 if (bp->autoneg & AUTONEG_SPEED)
1151 bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
1152 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1153}
1154
b6016b76
MC
1155static int
1156bnx2_set_link(struct bnx2 *bp)
1157{
1158 u32 bmsr;
1159 u8 link_up;
1160
80be4434 1161 if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
b6016b76
MC
1162 bp->link_up = 1;
1163 return 0;
1164 }
1165
0d8a6571
MC
1166 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1167 return 0;
1168
b6016b76
MC
1169 link_up = bp->link_up;
1170
27a005b8
MC
1171 bnx2_enable_bmsr1(bp);
1172 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1173 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1174 bnx2_disable_bmsr1(bp);
b6016b76
MC
1175
1176 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
1177 (CHIP_NUM(bp) == CHIP_NUM_5706)) {
1178 u32 val;
1179
1180 val = REG_RD(bp, BNX2_EMAC_STATUS);
1181 if (val & BNX2_EMAC_STATUS_LINK)
1182 bmsr |= BMSR_LSTATUS;
1183 else
1184 bmsr &= ~BMSR_LSTATUS;
1185 }
1186
1187 if (bmsr & BMSR_LSTATUS) {
1188 bp->link_up = 1;
1189
1190 if (bp->phy_flags & PHY_SERDES_FLAG) {
5b0c76ad
MC
1191 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1192 bnx2_5706s_linkup(bp);
1193 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1194 bnx2_5708s_linkup(bp);
27a005b8
MC
1195 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1196 bnx2_5709s_linkup(bp);
b6016b76
MC
1197 }
1198 else {
1199 bnx2_copper_linkup(bp);
1200 }
1201 bnx2_resolve_flow_ctrl(bp);
1202 }
1203 else {
1204 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
605a9e20
MC
1205 (bp->autoneg & AUTONEG_SPEED))
1206 bnx2_disable_forced_2g5(bp);
b6016b76 1207
b6016b76
MC
1208 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1209 bp->link_up = 0;
1210 }
1211
1212 if (bp->link_up != link_up) {
1213 bnx2_report_link(bp);
1214 }
1215
1216 bnx2_set_mac_link(bp);
1217
1218 return 0;
1219}
1220
1221static int
1222bnx2_reset_phy(struct bnx2 *bp)
1223{
1224 int i;
1225 u32 reg;
1226
ca58c3af 1227 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
b6016b76
MC
1228
1229#define PHY_RESET_MAX_WAIT 100
1230 for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
1231 udelay(10);
1232
ca58c3af 1233 bnx2_read_phy(bp, bp->mii_bmcr, &reg);
b6016b76
MC
1234 if (!(reg & BMCR_RESET)) {
1235 udelay(20);
1236 break;
1237 }
1238 }
1239 if (i == PHY_RESET_MAX_WAIT) {
1240 return -EBUSY;
1241 }
1242 return 0;
1243}
1244
1245static u32
1246bnx2_phy_get_pause_adv(struct bnx2 *bp)
1247{
1248 u32 adv = 0;
1249
1250 if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1251 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1252
1253 if (bp->phy_flags & PHY_SERDES_FLAG) {
1254 adv = ADVERTISE_1000XPAUSE;
1255 }
1256 else {
1257 adv = ADVERTISE_PAUSE_CAP;
1258 }
1259 }
1260 else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1261 if (bp->phy_flags & PHY_SERDES_FLAG) {
1262 adv = ADVERTISE_1000XPSE_ASYM;
1263 }
1264 else {
1265 adv = ADVERTISE_PAUSE_ASYM;
1266 }
1267 }
1268 else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1269 if (bp->phy_flags & PHY_SERDES_FLAG) {
1270 adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1271 }
1272 else {
1273 adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1274 }
1275 }
1276 return adv;
1277}
1278
0d8a6571
MC
1279static int bnx2_fw_sync(struct bnx2 *, u32, int);
1280
b6016b76 1281static int
0d8a6571
MC
1282bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
1283{
1284 u32 speed_arg = 0, pause_adv;
1285
1286 pause_adv = bnx2_phy_get_pause_adv(bp);
1287
1288 if (bp->autoneg & AUTONEG_SPEED) {
1289 speed_arg |= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG;
1290 if (bp->advertising & ADVERTISED_10baseT_Half)
1291 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1292 if (bp->advertising & ADVERTISED_10baseT_Full)
1293 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1294 if (bp->advertising & ADVERTISED_100baseT_Half)
1295 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1296 if (bp->advertising & ADVERTISED_100baseT_Full)
1297 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1298 if (bp->advertising & ADVERTISED_1000baseT_Full)
1299 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1300 if (bp->advertising & ADVERTISED_2500baseX_Full)
1301 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1302 } else {
1303 if (bp->req_line_speed == SPEED_2500)
1304 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1305 else if (bp->req_line_speed == SPEED_1000)
1306 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1307 else if (bp->req_line_speed == SPEED_100) {
1308 if (bp->req_duplex == DUPLEX_FULL)
1309 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1310 else
1311 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1312 } else if (bp->req_line_speed == SPEED_10) {
1313 if (bp->req_duplex == DUPLEX_FULL)
1314 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1315 else
1316 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1317 }
1318 }
1319
1320 if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP))
1321 speed_arg |= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE;
1322 if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_1000XPSE_ASYM))
1323 speed_arg |= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE;
1324
1325 if (port == PORT_TP)
1326 speed_arg |= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE |
1327 BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED;
1328
1329 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB_ARG0, speed_arg);
1330
1331 spin_unlock_bh(&bp->phy_lock);
1332 bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 0);
1333 spin_lock_bh(&bp->phy_lock);
1334
1335 return 0;
1336}
1337
1338static int
1339bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
b6016b76 1340{
605a9e20 1341 u32 adv, bmcr;
b6016b76
MC
1342 u32 new_adv = 0;
1343
0d8a6571
MC
1344 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1345 return (bnx2_setup_remote_phy(bp, port));
1346
b6016b76
MC
1347 if (!(bp->autoneg & AUTONEG_SPEED)) {
1348 u32 new_bmcr;
5b0c76ad
MC
1349 int force_link_down = 0;
1350
605a9e20
MC
1351 if (bp->req_line_speed == SPEED_2500) {
1352 if (!bnx2_test_and_enable_2g5(bp))
1353 force_link_down = 1;
1354 } else if (bp->req_line_speed == SPEED_1000) {
1355 if (bnx2_test_and_disable_2g5(bp))
1356 force_link_down = 1;
1357 }
ca58c3af 1358 bnx2_read_phy(bp, bp->mii_adv, &adv);
80be4434
MC
1359 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1360
ca58c3af 1361 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
605a9e20 1362 new_bmcr = bmcr & ~BMCR_ANENABLE;
80be4434 1363 new_bmcr |= BMCR_SPEED1000;
605a9e20 1364
27a005b8
MC
1365 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1366 if (bp->req_line_speed == SPEED_2500)
1367 bnx2_enable_forced_2g5(bp);
1368 else if (bp->req_line_speed == SPEED_1000) {
1369 bnx2_disable_forced_2g5(bp);
1370 new_bmcr &= ~0x2000;
1371 }
1372
1373 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
605a9e20
MC
1374 if (bp->req_line_speed == SPEED_2500)
1375 new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1376 else
1377 new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
5b0c76ad
MC
1378 }
1379
b6016b76 1380 if (bp->req_duplex == DUPLEX_FULL) {
5b0c76ad 1381 adv |= ADVERTISE_1000XFULL;
b6016b76
MC
1382 new_bmcr |= BMCR_FULLDPLX;
1383 }
1384 else {
5b0c76ad 1385 adv |= ADVERTISE_1000XHALF;
b6016b76
MC
1386 new_bmcr &= ~BMCR_FULLDPLX;
1387 }
5b0c76ad 1388 if ((new_bmcr != bmcr) || (force_link_down)) {
b6016b76
MC
1389 /* Force a link down visible on the other side */
1390 if (bp->link_up) {
ca58c3af 1391 bnx2_write_phy(bp, bp->mii_adv, adv &
5b0c76ad
MC
1392 ~(ADVERTISE_1000XFULL |
1393 ADVERTISE_1000XHALF));
ca58c3af 1394 bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
b6016b76
MC
1395 BMCR_ANRESTART | BMCR_ANENABLE);
1396
1397 bp->link_up = 0;
1398 netif_carrier_off(bp->dev);
ca58c3af 1399 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
80be4434 1400 bnx2_report_link(bp);
b6016b76 1401 }
ca58c3af
MC
1402 bnx2_write_phy(bp, bp->mii_adv, adv);
1403 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
605a9e20
MC
1404 } else {
1405 bnx2_resolve_flow_ctrl(bp);
1406 bnx2_set_mac_link(bp);
b6016b76
MC
1407 }
1408 return 0;
1409 }
1410
605a9e20 1411 bnx2_test_and_enable_2g5(bp);
5b0c76ad 1412
b6016b76
MC
1413 if (bp->advertising & ADVERTISED_1000baseT_Full)
1414 new_adv |= ADVERTISE_1000XFULL;
1415
1416 new_adv |= bnx2_phy_get_pause_adv(bp);
1417
ca58c3af
MC
1418 bnx2_read_phy(bp, bp->mii_adv, &adv);
1419 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
b6016b76
MC
1420
1421 bp->serdes_an_pending = 0;
1422 if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1423 /* Force a link down visible on the other side */
1424 if (bp->link_up) {
ca58c3af 1425 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
80be4434
MC
1426 spin_unlock_bh(&bp->phy_lock);
1427 msleep(20);
1428 spin_lock_bh(&bp->phy_lock);
b6016b76
MC
1429 }
1430
ca58c3af
MC
1431 bnx2_write_phy(bp, bp->mii_adv, new_adv);
1432 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
b6016b76 1433 BMCR_ANENABLE);
f8dd064e
MC
1434 /* Speed up link-up time when the link partner
1435 * does not autonegotiate which is very common
1436 * in blade servers. Some blade servers use
1437 * IPMI for kerboard input and it's important
1438 * to minimize link disruptions. Autoneg. involves
1439 * exchanging base pages plus 3 next pages and
1440 * normally completes in about 120 msec.
1441 */
1442 bp->current_interval = SERDES_AN_TIMEOUT;
1443 bp->serdes_an_pending = 1;
1444 mod_timer(&bp->timer, jiffies + bp->current_interval);
605a9e20
MC
1445 } else {
1446 bnx2_resolve_flow_ctrl(bp);
1447 bnx2_set_mac_link(bp);
b6016b76
MC
1448 }
1449
1450 return 0;
1451}
1452
1453#define ETHTOOL_ALL_FIBRE_SPEED \
deaf391b
MC
1454 (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) ? \
1455 (ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
1456 (ADVERTISED_1000baseT_Full)
b6016b76
MC
1457
1458#define ETHTOOL_ALL_COPPER_SPEED \
1459 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1460 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1461 ADVERTISED_1000baseT_Full)
1462
1463#define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1464 ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
6aa20a22 1465
b6016b76
MC
1466#define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1467
0d8a6571
MC
1468static void
1469bnx2_set_default_remote_link(struct bnx2 *bp)
1470{
1471 u32 link;
1472
1473 if (bp->phy_port == PORT_TP)
1474 link = REG_RD_IND(bp, bp->shmem_base + BNX2_RPHY_COPPER_LINK);
1475 else
1476 link = REG_RD_IND(bp, bp->shmem_base + BNX2_RPHY_SERDES_LINK);
1477
1478 if (link & BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG) {
1479 bp->req_line_speed = 0;
1480 bp->autoneg |= AUTONEG_SPEED;
1481 bp->advertising = ADVERTISED_Autoneg;
1482 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1483 bp->advertising |= ADVERTISED_10baseT_Half;
1484 if (link & BNX2_NETLINK_SET_LINK_SPEED_10FULL)
1485 bp->advertising |= ADVERTISED_10baseT_Full;
1486 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1487 bp->advertising |= ADVERTISED_100baseT_Half;
1488 if (link & BNX2_NETLINK_SET_LINK_SPEED_100FULL)
1489 bp->advertising |= ADVERTISED_100baseT_Full;
1490 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1491 bp->advertising |= ADVERTISED_1000baseT_Full;
1492 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1493 bp->advertising |= ADVERTISED_2500baseX_Full;
1494 } else {
1495 bp->autoneg = 0;
1496 bp->advertising = 0;
1497 bp->req_duplex = DUPLEX_FULL;
1498 if (link & BNX2_NETLINK_SET_LINK_SPEED_10) {
1499 bp->req_line_speed = SPEED_10;
1500 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1501 bp->req_duplex = DUPLEX_HALF;
1502 }
1503 if (link & BNX2_NETLINK_SET_LINK_SPEED_100) {
1504 bp->req_line_speed = SPEED_100;
1505 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1506 bp->req_duplex = DUPLEX_HALF;
1507 }
1508 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1509 bp->req_line_speed = SPEED_1000;
1510 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1511 bp->req_line_speed = SPEED_2500;
1512 }
1513}
1514
deaf391b
MC
1515static void
1516bnx2_set_default_link(struct bnx2 *bp)
1517{
0d8a6571
MC
1518 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1519 return bnx2_set_default_remote_link(bp);
1520
deaf391b
MC
1521 bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
1522 bp->req_line_speed = 0;
1523 if (bp->phy_flags & PHY_SERDES_FLAG) {
1524 u32 reg;
1525
1526 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
1527
1528 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG);
1529 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
1530 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
1531 bp->autoneg = 0;
1532 bp->req_line_speed = bp->line_speed = SPEED_1000;
1533 bp->req_duplex = DUPLEX_FULL;
1534 }
1535 } else
1536 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
1537}
1538
df149d70
MC
1539static void
1540bnx2_send_heart_beat(struct bnx2 *bp)
1541{
1542 u32 msg;
1543 u32 addr;
1544
1545 spin_lock(&bp->indirect_lock);
1546 msg = (u32) (++bp->fw_drv_pulse_wr_seq & BNX2_DRV_PULSE_SEQ_MASK);
1547 addr = bp->shmem_base + BNX2_DRV_PULSE_MB;
1548 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr);
1549 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, msg);
1550 spin_unlock(&bp->indirect_lock);
1551}
1552
0d8a6571
MC
1553static void
1554bnx2_remote_phy_event(struct bnx2 *bp)
1555{
1556 u32 msg;
1557 u8 link_up = bp->link_up;
1558 u8 old_port;
1559
1560 msg = REG_RD_IND(bp, bp->shmem_base + BNX2_LINK_STATUS);
1561
df149d70
MC
1562 if (msg & BNX2_LINK_STATUS_HEART_BEAT_EXPIRED)
1563 bnx2_send_heart_beat(bp);
1564
1565 msg &= ~BNX2_LINK_STATUS_HEART_BEAT_EXPIRED;
1566
0d8a6571
MC
1567 if ((msg & BNX2_LINK_STATUS_LINK_UP) == BNX2_LINK_STATUS_LINK_DOWN)
1568 bp->link_up = 0;
1569 else {
1570 u32 speed;
1571
1572 bp->link_up = 1;
1573 speed = msg & BNX2_LINK_STATUS_SPEED_MASK;
1574 bp->duplex = DUPLEX_FULL;
1575 switch (speed) {
1576 case BNX2_LINK_STATUS_10HALF:
1577 bp->duplex = DUPLEX_HALF;
1578 case BNX2_LINK_STATUS_10FULL:
1579 bp->line_speed = SPEED_10;
1580 break;
1581 case BNX2_LINK_STATUS_100HALF:
1582 bp->duplex = DUPLEX_HALF;
1583 case BNX2_LINK_STATUS_100BASE_T4:
1584 case BNX2_LINK_STATUS_100FULL:
1585 bp->line_speed = SPEED_100;
1586 break;
1587 case BNX2_LINK_STATUS_1000HALF:
1588 bp->duplex = DUPLEX_HALF;
1589 case BNX2_LINK_STATUS_1000FULL:
1590 bp->line_speed = SPEED_1000;
1591 break;
1592 case BNX2_LINK_STATUS_2500HALF:
1593 bp->duplex = DUPLEX_HALF;
1594 case BNX2_LINK_STATUS_2500FULL:
1595 bp->line_speed = SPEED_2500;
1596 break;
1597 default:
1598 bp->line_speed = 0;
1599 break;
1600 }
1601
1602 spin_lock(&bp->phy_lock);
1603 bp->flow_ctrl = 0;
1604 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
1605 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
1606 if (bp->duplex == DUPLEX_FULL)
1607 bp->flow_ctrl = bp->req_flow_ctrl;
1608 } else {
1609 if (msg & BNX2_LINK_STATUS_TX_FC_ENABLED)
1610 bp->flow_ctrl |= FLOW_CTRL_TX;
1611 if (msg & BNX2_LINK_STATUS_RX_FC_ENABLED)
1612 bp->flow_ctrl |= FLOW_CTRL_RX;
1613 }
1614
1615 old_port = bp->phy_port;
1616 if (msg & BNX2_LINK_STATUS_SERDES_LINK)
1617 bp->phy_port = PORT_FIBRE;
1618 else
1619 bp->phy_port = PORT_TP;
1620
1621 if (old_port != bp->phy_port)
1622 bnx2_set_default_link(bp);
1623
1624 spin_unlock(&bp->phy_lock);
1625 }
1626 if (bp->link_up != link_up)
1627 bnx2_report_link(bp);
1628
1629 bnx2_set_mac_link(bp);
1630}
1631
1632static int
1633bnx2_set_remote_link(struct bnx2 *bp)
1634{
1635 u32 evt_code;
1636
1637 evt_code = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_EVT_CODE_MB);
1638 switch (evt_code) {
1639 case BNX2_FW_EVT_CODE_LINK_EVENT:
1640 bnx2_remote_phy_event(bp);
1641 break;
1642 case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT:
1643 default:
df149d70 1644 bnx2_send_heart_beat(bp);
0d8a6571
MC
1645 break;
1646 }
1647 return 0;
1648}
1649
b6016b76
MC
1650static int
1651bnx2_setup_copper_phy(struct bnx2 *bp)
1652{
1653 u32 bmcr;
1654 u32 new_bmcr;
1655
ca58c3af 1656 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
b6016b76
MC
1657
1658 if (bp->autoneg & AUTONEG_SPEED) {
1659 u32 adv_reg, adv1000_reg;
1660 u32 new_adv_reg = 0;
1661 u32 new_adv1000_reg = 0;
1662
ca58c3af 1663 bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
b6016b76
MC
1664 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
1665 ADVERTISE_PAUSE_ASYM);
1666
1667 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
1668 adv1000_reg &= PHY_ALL_1000_SPEED;
1669
1670 if (bp->advertising & ADVERTISED_10baseT_Half)
1671 new_adv_reg |= ADVERTISE_10HALF;
1672 if (bp->advertising & ADVERTISED_10baseT_Full)
1673 new_adv_reg |= ADVERTISE_10FULL;
1674 if (bp->advertising & ADVERTISED_100baseT_Half)
1675 new_adv_reg |= ADVERTISE_100HALF;
1676 if (bp->advertising & ADVERTISED_100baseT_Full)
1677 new_adv_reg |= ADVERTISE_100FULL;
1678 if (bp->advertising & ADVERTISED_1000baseT_Full)
1679 new_adv1000_reg |= ADVERTISE_1000FULL;
6aa20a22 1680
b6016b76
MC
1681 new_adv_reg |= ADVERTISE_CSMA;
1682
1683 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
1684
1685 if ((adv1000_reg != new_adv1000_reg) ||
1686 (adv_reg != new_adv_reg) ||
1687 ((bmcr & BMCR_ANENABLE) == 0)) {
1688
ca58c3af 1689 bnx2_write_phy(bp, bp->mii_adv, new_adv_reg);
b6016b76 1690 bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
ca58c3af 1691 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
b6016b76
MC
1692 BMCR_ANENABLE);
1693 }
1694 else if (bp->link_up) {
1695 /* Flow ctrl may have changed from auto to forced */
1696 /* or vice-versa. */
1697
1698 bnx2_resolve_flow_ctrl(bp);
1699 bnx2_set_mac_link(bp);
1700 }
1701 return 0;
1702 }
1703
1704 new_bmcr = 0;
1705 if (bp->req_line_speed == SPEED_100) {
1706 new_bmcr |= BMCR_SPEED100;
1707 }
1708 if (bp->req_duplex == DUPLEX_FULL) {
1709 new_bmcr |= BMCR_FULLDPLX;
1710 }
1711 if (new_bmcr != bmcr) {
1712 u32 bmsr;
b6016b76 1713
ca58c3af
MC
1714 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1715 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
6aa20a22 1716
b6016b76
MC
1717 if (bmsr & BMSR_LSTATUS) {
1718 /* Force link down */
ca58c3af 1719 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
a16dda0e
MC
1720 spin_unlock_bh(&bp->phy_lock);
1721 msleep(50);
1722 spin_lock_bh(&bp->phy_lock);
1723
ca58c3af
MC
1724 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1725 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
b6016b76
MC
1726 }
1727
ca58c3af 1728 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
b6016b76
MC
1729
1730 /* Normally, the new speed is setup after the link has
1731 * gone down and up again. In some cases, link will not go
1732 * down so we need to set up the new speed here.
1733 */
1734 if (bmsr & BMSR_LSTATUS) {
1735 bp->line_speed = bp->req_line_speed;
1736 bp->duplex = bp->req_duplex;
1737 bnx2_resolve_flow_ctrl(bp);
1738 bnx2_set_mac_link(bp);
1739 }
27a005b8
MC
1740 } else {
1741 bnx2_resolve_flow_ctrl(bp);
1742 bnx2_set_mac_link(bp);
b6016b76
MC
1743 }
1744 return 0;
1745}
1746
1747static int
0d8a6571 1748bnx2_setup_phy(struct bnx2 *bp, u8 port)
b6016b76
MC
1749{
1750 if (bp->loopback == MAC_LOOPBACK)
1751 return 0;
1752
1753 if (bp->phy_flags & PHY_SERDES_FLAG) {
0d8a6571 1754 return (bnx2_setup_serdes_phy(bp, port));
b6016b76
MC
1755 }
1756 else {
1757 return (bnx2_setup_copper_phy(bp));
1758 }
1759}
1760
27a005b8
MC
1761static int
1762bnx2_init_5709s_phy(struct bnx2 *bp)
1763{
1764 u32 val;
1765
1766 bp->mii_bmcr = MII_BMCR + 0x10;
1767 bp->mii_bmsr = MII_BMSR + 0x10;
1768 bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
1769 bp->mii_adv = MII_ADVERTISE + 0x10;
1770 bp->mii_lpa = MII_LPA + 0x10;
1771 bp->mii_up1 = MII_BNX2_OVER1G_UP1;
1772
1773 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
1774 bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
1775
1776 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1777 bnx2_reset_phy(bp);
1778
1779 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
1780
1781 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
1782 val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
1783 val |= MII_BNX2_SD_1000XCTL1_FIBER;
1784 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
1785
1786 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1787 bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
1788 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG)
1789 val |= BCM5708S_UP1_2G5;
1790 else
1791 val &= ~BCM5708S_UP1_2G5;
1792 bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
1793
1794 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
1795 bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
1796 val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
1797 bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
1798
1799 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
1800
1801 val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
1802 MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
1803 bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
1804
1805 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1806
1807 return 0;
1808}
1809
b6016b76 1810static int
5b0c76ad
MC
1811bnx2_init_5708s_phy(struct bnx2 *bp)
1812{
1813 u32 val;
1814
27a005b8
MC
1815 bnx2_reset_phy(bp);
1816
1817 bp->mii_up1 = BCM5708S_UP1;
1818
5b0c76ad
MC
1819 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
1820 bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
1821 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1822
1823 bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
1824 val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
1825 bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
1826
1827 bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
1828 val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
1829 bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
1830
1831 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) {
1832 bnx2_read_phy(bp, BCM5708S_UP1, &val);
1833 val |= BCM5708S_UP1_2G5;
1834 bnx2_write_phy(bp, BCM5708S_UP1, val);
1835 }
1836
1837 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
dda1e390
MC
1838 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
1839 (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
5b0c76ad
MC
1840 /* increase tx signal amplitude */
1841 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1842 BCM5708S_BLK_ADDR_TX_MISC);
1843 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
1844 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
1845 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
1846 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1847 }
1848
e3648b3d 1849 val = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG) &
5b0c76ad
MC
1850 BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
1851
1852 if (val) {
1853 u32 is_backplane;
1854
e3648b3d 1855 is_backplane = REG_RD_IND(bp, bp->shmem_base +
5b0c76ad
MC
1856 BNX2_SHARED_HW_CFG_CONFIG);
1857 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
1858 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1859 BCM5708S_BLK_ADDR_TX_MISC);
1860 bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
1861 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1862 BCM5708S_BLK_ADDR_DIG);
1863 }
1864 }
1865 return 0;
1866}
1867
1868static int
1869bnx2_init_5706s_phy(struct bnx2 *bp)
b6016b76 1870{
27a005b8
MC
1871 bnx2_reset_phy(bp);
1872
b6016b76
MC
1873 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1874
59b47d8a
MC
1875 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1876 REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
b6016b76
MC
1877
1878 if (bp->dev->mtu > 1500) {
1879 u32 val;
1880
1881 /* Set extended packet length bit */
1882 bnx2_write_phy(bp, 0x18, 0x7);
1883 bnx2_read_phy(bp, 0x18, &val);
1884 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
1885
1886 bnx2_write_phy(bp, 0x1c, 0x6c00);
1887 bnx2_read_phy(bp, 0x1c, &val);
1888 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
1889 }
1890 else {
1891 u32 val;
1892
1893 bnx2_write_phy(bp, 0x18, 0x7);
1894 bnx2_read_phy(bp, 0x18, &val);
1895 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1896
1897 bnx2_write_phy(bp, 0x1c, 0x6c00);
1898 bnx2_read_phy(bp, 0x1c, &val);
1899 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
1900 }
1901
1902 return 0;
1903}
1904
1905static int
1906bnx2_init_copper_phy(struct bnx2 *bp)
1907{
5b0c76ad
MC
1908 u32 val;
1909
27a005b8
MC
1910 bnx2_reset_phy(bp);
1911
b6016b76
MC
1912 if (bp->phy_flags & PHY_CRC_FIX_FLAG) {
1913 bnx2_write_phy(bp, 0x18, 0x0c00);
1914 bnx2_write_phy(bp, 0x17, 0x000a);
1915 bnx2_write_phy(bp, 0x15, 0x310b);
1916 bnx2_write_phy(bp, 0x17, 0x201f);
1917 bnx2_write_phy(bp, 0x15, 0x9506);
1918 bnx2_write_phy(bp, 0x17, 0x401f);
1919 bnx2_write_phy(bp, 0x15, 0x14e2);
1920 bnx2_write_phy(bp, 0x18, 0x0400);
1921 }
1922
b659f44e
MC
1923 if (bp->phy_flags & PHY_DIS_EARLY_DAC_FLAG) {
1924 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
1925 MII_BNX2_DSP_EXPAND_REG | 0x8);
1926 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1927 val &= ~(1 << 8);
1928 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
1929 }
1930
b6016b76 1931 if (bp->dev->mtu > 1500) {
b6016b76
MC
1932 /* Set extended packet length bit */
1933 bnx2_write_phy(bp, 0x18, 0x7);
1934 bnx2_read_phy(bp, 0x18, &val);
1935 bnx2_write_phy(bp, 0x18, val | 0x4000);
1936
1937 bnx2_read_phy(bp, 0x10, &val);
1938 bnx2_write_phy(bp, 0x10, val | 0x1);
1939 }
1940 else {
b6016b76
MC
1941 bnx2_write_phy(bp, 0x18, 0x7);
1942 bnx2_read_phy(bp, 0x18, &val);
1943 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1944
1945 bnx2_read_phy(bp, 0x10, &val);
1946 bnx2_write_phy(bp, 0x10, val & ~0x1);
1947 }
1948
5b0c76ad
MC
1949 /* ethernet@wirespeed */
1950 bnx2_write_phy(bp, 0x18, 0x7007);
1951 bnx2_read_phy(bp, 0x18, &val);
1952 bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
b6016b76
MC
1953 return 0;
1954}
1955
1956
1957static int
1958bnx2_init_phy(struct bnx2 *bp)
1959{
1960 u32 val;
1961 int rc = 0;
1962
1963 bp->phy_flags &= ~PHY_INT_MODE_MASK_FLAG;
1964 bp->phy_flags |= PHY_INT_MODE_LINK_READY_FLAG;
1965
ca58c3af
MC
1966 bp->mii_bmcr = MII_BMCR;
1967 bp->mii_bmsr = MII_BMSR;
27a005b8 1968 bp->mii_bmsr1 = MII_BMSR;
ca58c3af
MC
1969 bp->mii_adv = MII_ADVERTISE;
1970 bp->mii_lpa = MII_LPA;
1971
b6016b76
MC
1972 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
1973
0d8a6571
MC
1974 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1975 goto setup_phy;
1976
b6016b76
MC
1977 bnx2_read_phy(bp, MII_PHYSID1, &val);
1978 bp->phy_id = val << 16;
1979 bnx2_read_phy(bp, MII_PHYSID2, &val);
1980 bp->phy_id |= val & 0xffff;
1981
1982 if (bp->phy_flags & PHY_SERDES_FLAG) {
5b0c76ad
MC
1983 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1984 rc = bnx2_init_5706s_phy(bp);
1985 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1986 rc = bnx2_init_5708s_phy(bp);
27a005b8
MC
1987 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1988 rc = bnx2_init_5709s_phy(bp);
b6016b76
MC
1989 }
1990 else {
1991 rc = bnx2_init_copper_phy(bp);
1992 }
1993
0d8a6571
MC
1994setup_phy:
1995 if (!rc)
1996 rc = bnx2_setup_phy(bp, bp->phy_port);
b6016b76
MC
1997
1998 return rc;
1999}
2000
2001static int
2002bnx2_set_mac_loopback(struct bnx2 *bp)
2003{
2004 u32 mac_mode;
2005
2006 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2007 mac_mode &= ~BNX2_EMAC_MODE_PORT;
2008 mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
2009 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2010 bp->link_up = 1;
2011 return 0;
2012}
2013
bc5a0690
MC
2014static int bnx2_test_link(struct bnx2 *);
2015
2016static int
2017bnx2_set_phy_loopback(struct bnx2 *bp)
2018{
2019 u32 mac_mode;
2020 int rc, i;
2021
2022 spin_lock_bh(&bp->phy_lock);
ca58c3af 2023 rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
bc5a0690
MC
2024 BMCR_SPEED1000);
2025 spin_unlock_bh(&bp->phy_lock);
2026 if (rc)
2027 return rc;
2028
2029 for (i = 0; i < 10; i++) {
2030 if (bnx2_test_link(bp) == 0)
2031 break;
80be4434 2032 msleep(100);
bc5a0690
MC
2033 }
2034
2035 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2036 mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
2037 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
59b47d8a 2038 BNX2_EMAC_MODE_25G_MODE);
bc5a0690
MC
2039
2040 mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
2041 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2042 bp->link_up = 1;
2043 return 0;
2044}
2045
b6016b76 2046static int
b090ae2b 2047bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int silent)
b6016b76
MC
2048{
2049 int i;
2050 u32 val;
2051
b6016b76
MC
2052 bp->fw_wr_seq++;
2053 msg_data |= bp->fw_wr_seq;
2054
e3648b3d 2055 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
b6016b76
MC
2056
2057 /* wait for an acknowledgement. */
b090ae2b
MC
2058 for (i = 0; i < (FW_ACK_TIME_OUT_MS / 10); i++) {
2059 msleep(10);
b6016b76 2060
e3648b3d 2061 val = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_MB);
b6016b76
MC
2062
2063 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
2064 break;
2065 }
b090ae2b
MC
2066 if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
2067 return 0;
b6016b76
MC
2068
2069 /* If we timed out, inform the firmware that this is the case. */
b090ae2b
MC
2070 if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
2071 if (!silent)
2072 printk(KERN_ERR PFX "fw sync timeout, reset code = "
2073 "%x\n", msg_data);
b6016b76
MC
2074
2075 msg_data &= ~BNX2_DRV_MSG_CODE;
2076 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
2077
e3648b3d 2078 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
b6016b76 2079
b6016b76
MC
2080 return -EBUSY;
2081 }
2082
b090ae2b
MC
2083 if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
2084 return -EIO;
2085
b6016b76
MC
2086 return 0;
2087}
2088
59b47d8a
MC
2089static int
2090bnx2_init_5709_context(struct bnx2 *bp)
2091{
2092 int i, ret = 0;
2093 u32 val;
2094
2095 val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
2096 val |= (BCM_PAGE_BITS - 8) << 16;
2097 REG_WR(bp, BNX2_CTX_COMMAND, val);
641bdcd5
MC
2098 for (i = 0; i < 10; i++) {
2099 val = REG_RD(bp, BNX2_CTX_COMMAND);
2100 if (!(val & BNX2_CTX_COMMAND_MEM_INIT))
2101 break;
2102 udelay(2);
2103 }
2104 if (val & BNX2_CTX_COMMAND_MEM_INIT)
2105 return -EBUSY;
2106
59b47d8a
MC
2107 for (i = 0; i < bp->ctx_pages; i++) {
2108 int j;
2109
2110 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
2111 (bp->ctx_blk_mapping[i] & 0xffffffff) |
2112 BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
2113 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
2114 (u64) bp->ctx_blk_mapping[i] >> 32);
2115 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
2116 BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
2117 for (j = 0; j < 10; j++) {
2118
2119 val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
2120 if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
2121 break;
2122 udelay(5);
2123 }
2124 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
2125 ret = -EBUSY;
2126 break;
2127 }
2128 }
2129 return ret;
2130}
2131
b6016b76
MC
2132static void
2133bnx2_init_context(struct bnx2 *bp)
2134{
2135 u32 vcid;
2136
2137 vcid = 96;
2138 while (vcid) {
2139 u32 vcid_addr, pcid_addr, offset;
7947b20e 2140 int i;
b6016b76
MC
2141
2142 vcid--;
2143
2144 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
2145 u32 new_vcid;
2146
2147 vcid_addr = GET_PCID_ADDR(vcid);
2148 if (vcid & 0x8) {
2149 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
2150 }
2151 else {
2152 new_vcid = vcid;
2153 }
2154 pcid_addr = GET_PCID_ADDR(new_vcid);
2155 }
2156 else {
2157 vcid_addr = GET_CID_ADDR(vcid);
2158 pcid_addr = vcid_addr;
2159 }
2160
7947b20e
MC
2161 for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
2162 vcid_addr += (i << PHY_CTX_SHIFT);
2163 pcid_addr += (i << PHY_CTX_SHIFT);
b6016b76 2164
5d5d0015 2165 REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
7947b20e 2166 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
b6016b76 2167
7947b20e
MC
2168 /* Zero out the context. */
2169 for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
5d5d0015 2170 CTX_WR(bp, vcid_addr, offset, 0);
7947b20e 2171 }
b6016b76
MC
2172 }
2173}
2174
2175static int
2176bnx2_alloc_bad_rbuf(struct bnx2 *bp)
2177{
2178 u16 *good_mbuf;
2179 u32 good_mbuf_cnt;
2180 u32 val;
2181
2182 good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
2183 if (good_mbuf == NULL) {
2184 printk(KERN_ERR PFX "Failed to allocate memory in "
2185 "bnx2_alloc_bad_rbuf\n");
2186 return -ENOMEM;
2187 }
2188
2189 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2190 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
2191
2192 good_mbuf_cnt = 0;
2193
2194 /* Allocate a bunch of mbufs and save the good ones in an array. */
2195 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
2196 while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
2197 REG_WR_IND(bp, BNX2_RBUF_COMMAND, BNX2_RBUF_COMMAND_ALLOC_REQ);
2198
2199 val = REG_RD_IND(bp, BNX2_RBUF_FW_BUF_ALLOC);
2200
2201 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
2202
2203 /* The addresses with Bit 9 set are bad memory blocks. */
2204 if (!(val & (1 << 9))) {
2205 good_mbuf[good_mbuf_cnt] = (u16) val;
2206 good_mbuf_cnt++;
2207 }
2208
2209 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
2210 }
2211
2212 /* Free the good ones back to the mbuf pool thus discarding
2213 * all the bad ones. */
2214 while (good_mbuf_cnt) {
2215 good_mbuf_cnt--;
2216
2217 val = good_mbuf[good_mbuf_cnt];
2218 val = (val << 9) | val | 1;
2219
2220 REG_WR_IND(bp, BNX2_RBUF_FW_BUF_FREE, val);
2221 }
2222 kfree(good_mbuf);
2223 return 0;
2224}
2225
2226static void
6aa20a22 2227bnx2_set_mac_addr(struct bnx2 *bp)
b6016b76
MC
2228{
2229 u32 val;
2230 u8 *mac_addr = bp->dev->dev_addr;
2231
2232 val = (mac_addr[0] << 8) | mac_addr[1];
2233
2234 REG_WR(bp, BNX2_EMAC_MAC_MATCH0, val);
2235
6aa20a22 2236 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
b6016b76
MC
2237 (mac_addr[4] << 8) | mac_addr[5];
2238
2239 REG_WR(bp, BNX2_EMAC_MAC_MATCH1, val);
2240}
2241
47bf4246
MC
2242static inline int
2243bnx2_alloc_rx_page(struct bnx2 *bp, u16 index)
2244{
2245 dma_addr_t mapping;
2246 struct sw_pg *rx_pg = &bp->rx_pg_ring[index];
2247 struct rx_bd *rxbd =
2248 &bp->rx_pg_desc_ring[RX_RING(index)][RX_IDX(index)];
2249 struct page *page = alloc_page(GFP_ATOMIC);
2250
2251 if (!page)
2252 return -ENOMEM;
2253 mapping = pci_map_page(bp->pdev, page, 0, PAGE_SIZE,
2254 PCI_DMA_FROMDEVICE);
2255 rx_pg->page = page;
2256 pci_unmap_addr_set(rx_pg, mapping, mapping);
2257 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2258 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2259 return 0;
2260}
2261
2262static void
2263bnx2_free_rx_page(struct bnx2 *bp, u16 index)
2264{
2265 struct sw_pg *rx_pg = &bp->rx_pg_ring[index];
2266 struct page *page = rx_pg->page;
2267
2268 if (!page)
2269 return;
2270
2271 pci_unmap_page(bp->pdev, pci_unmap_addr(rx_pg, mapping), PAGE_SIZE,
2272 PCI_DMA_FROMDEVICE);
2273
2274 __free_page(page);
2275 rx_pg->page = NULL;
2276}
2277
b6016b76
MC
2278static inline int
2279bnx2_alloc_rx_skb(struct bnx2 *bp, u16 index)
2280{
2281 struct sk_buff *skb;
2282 struct sw_bd *rx_buf = &bp->rx_buf_ring[index];
2283 dma_addr_t mapping;
13daffa2 2284 struct rx_bd *rxbd = &bp->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
b6016b76
MC
2285 unsigned long align;
2286
932f3772 2287 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
b6016b76
MC
2288 if (skb == NULL) {
2289 return -ENOMEM;
2290 }
2291
59b47d8a
MC
2292 if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
2293 skb_reserve(skb, BNX2_RX_ALIGN - align);
b6016b76 2294
b6016b76
MC
2295 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
2296 PCI_DMA_FROMDEVICE);
2297
2298 rx_buf->skb = skb;
2299 pci_unmap_addr_set(rx_buf, mapping, mapping);
2300
2301 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2302 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2303
2304 bp->rx_prod_bseq += bp->rx_buf_use_size;
2305
2306 return 0;
2307}
2308
da3e4fbe 2309static int
35efa7c1 2310bnx2_phy_event_is_set(struct bnx2 *bp, struct bnx2_napi *bnapi, u32 event)
b6016b76 2311{
35efa7c1 2312 struct status_block *sblk = bnapi->status_blk;
b6016b76 2313 u32 new_link_state, old_link_state;
da3e4fbe 2314 int is_set = 1;
b6016b76 2315
da3e4fbe
MC
2316 new_link_state = sblk->status_attn_bits & event;
2317 old_link_state = sblk->status_attn_bits_ack & event;
b6016b76 2318 if (new_link_state != old_link_state) {
da3e4fbe
MC
2319 if (new_link_state)
2320 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
2321 else
2322 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
2323 } else
2324 is_set = 0;
2325
2326 return is_set;
2327}
2328
2329static void
35efa7c1 2330bnx2_phy_int(struct bnx2 *bp, struct bnx2_napi *bnapi)
da3e4fbe 2331{
35efa7c1 2332 if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_LINK_STATE)) {
da3e4fbe 2333 spin_lock(&bp->phy_lock);
b6016b76 2334 bnx2_set_link(bp);
da3e4fbe 2335 spin_unlock(&bp->phy_lock);
b6016b76 2336 }
35efa7c1 2337 if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_TIMER_ABORT))
0d8a6571
MC
2338 bnx2_set_remote_link(bp);
2339
b6016b76
MC
2340}
2341
ead7270b 2342static inline u16
35efa7c1 2343bnx2_get_hw_tx_cons(struct bnx2_napi *bnapi)
ead7270b
MC
2344{
2345 u16 cons;
2346
35efa7c1 2347 cons = bnapi->status_blk->status_tx_quick_consumer_index0;
ead7270b
MC
2348
2349 if (unlikely((cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT))
2350 cons++;
2351 return cons;
2352}
2353
b6016b76 2354static void
35efa7c1 2355bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi)
b6016b76
MC
2356{
2357 u16 hw_cons, sw_cons, sw_ring_cons;
2358 int tx_free_bd = 0;
2359
35efa7c1 2360 hw_cons = bnx2_get_hw_tx_cons(bnapi);
b6016b76
MC
2361 sw_cons = bp->tx_cons;
2362
2363 while (sw_cons != hw_cons) {
2364 struct sw_bd *tx_buf;
2365 struct sk_buff *skb;
2366 int i, last;
2367
2368 sw_ring_cons = TX_RING_IDX(sw_cons);
2369
2370 tx_buf = &bp->tx_buf_ring[sw_ring_cons];
2371 skb = tx_buf->skb;
1d39ed56 2372
b6016b76 2373 /* partial BD completions possible with TSO packets */
89114afd 2374 if (skb_is_gso(skb)) {
b6016b76
MC
2375 u16 last_idx, last_ring_idx;
2376
2377 last_idx = sw_cons +
2378 skb_shinfo(skb)->nr_frags + 1;
2379 last_ring_idx = sw_ring_cons +
2380 skb_shinfo(skb)->nr_frags + 1;
2381 if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
2382 last_idx++;
2383 }
2384 if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
2385 break;
2386 }
2387 }
1d39ed56 2388
b6016b76
MC
2389 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
2390 skb_headlen(skb), PCI_DMA_TODEVICE);
2391
2392 tx_buf->skb = NULL;
2393 last = skb_shinfo(skb)->nr_frags;
2394
2395 for (i = 0; i < last; i++) {
2396 sw_cons = NEXT_TX_BD(sw_cons);
2397
2398 pci_unmap_page(bp->pdev,
2399 pci_unmap_addr(
2400 &bp->tx_buf_ring[TX_RING_IDX(sw_cons)],
2401 mapping),
2402 skb_shinfo(skb)->frags[i].size,
2403 PCI_DMA_TODEVICE);
2404 }
2405
2406 sw_cons = NEXT_TX_BD(sw_cons);
2407
2408 tx_free_bd += last + 1;
2409
745720e5 2410 dev_kfree_skb(skb);
b6016b76 2411
35efa7c1 2412 hw_cons = bnx2_get_hw_tx_cons(bnapi);
b6016b76
MC
2413 }
2414
ead7270b 2415 bp->hw_tx_cons = hw_cons;
e89bbf10 2416 bp->tx_cons = sw_cons;
2f8af120
MC
2417 /* Need to make the tx_cons update visible to bnx2_start_xmit()
2418 * before checking for netif_queue_stopped(). Without the
2419 * memory barrier, there is a small possibility that bnx2_start_xmit()
2420 * will miss it and cause the queue to be stopped forever.
2421 */
2422 smp_mb();
b6016b76 2423
2f8af120
MC
2424 if (unlikely(netif_queue_stopped(bp->dev)) &&
2425 (bnx2_tx_avail(bp) > bp->tx_wake_thresh)) {
2426 netif_tx_lock(bp->dev);
b6016b76 2427 if ((netif_queue_stopped(bp->dev)) &&
2f8af120 2428 (bnx2_tx_avail(bp) > bp->tx_wake_thresh))
b6016b76 2429 netif_wake_queue(bp->dev);
2f8af120 2430 netif_tx_unlock(bp->dev);
b6016b76 2431 }
b6016b76
MC
2432}
2433
1db82f2a
MC
2434static void
2435bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct sk_buff *skb, int count)
2436{
2437 struct sw_pg *cons_rx_pg, *prod_rx_pg;
2438 struct rx_bd *cons_bd, *prod_bd;
2439 dma_addr_t mapping;
2440 int i;
2441 u16 hw_prod = bp->rx_pg_prod, prod;
2442 u16 cons = bp->rx_pg_cons;
2443
2444 for (i = 0; i < count; i++) {
2445 prod = RX_PG_RING_IDX(hw_prod);
2446
2447 prod_rx_pg = &bp->rx_pg_ring[prod];
2448 cons_rx_pg = &bp->rx_pg_ring[cons];
2449 cons_bd = &bp->rx_pg_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2450 prod_bd = &bp->rx_pg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2451
2452 if (i == 0 && skb) {
2453 struct page *page;
2454 struct skb_shared_info *shinfo;
2455
2456 shinfo = skb_shinfo(skb);
2457 shinfo->nr_frags--;
2458 page = shinfo->frags[shinfo->nr_frags].page;
2459 shinfo->frags[shinfo->nr_frags].page = NULL;
2460 mapping = pci_map_page(bp->pdev, page, 0, PAGE_SIZE,
2461 PCI_DMA_FROMDEVICE);
2462 cons_rx_pg->page = page;
2463 pci_unmap_addr_set(cons_rx_pg, mapping, mapping);
2464 dev_kfree_skb(skb);
2465 }
2466 if (prod != cons) {
2467 prod_rx_pg->page = cons_rx_pg->page;
2468 cons_rx_pg->page = NULL;
2469 pci_unmap_addr_set(prod_rx_pg, mapping,
2470 pci_unmap_addr(cons_rx_pg, mapping));
2471
2472 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2473 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2474
2475 }
2476 cons = RX_PG_RING_IDX(NEXT_RX_BD(cons));
2477 hw_prod = NEXT_RX_BD(hw_prod);
2478 }
2479 bp->rx_pg_prod = hw_prod;
2480 bp->rx_pg_cons = cons;
2481}
2482
b6016b76
MC
2483static inline void
2484bnx2_reuse_rx_skb(struct bnx2 *bp, struct sk_buff *skb,
2485 u16 cons, u16 prod)
2486{
236b6394
MC
2487 struct sw_bd *cons_rx_buf, *prod_rx_buf;
2488 struct rx_bd *cons_bd, *prod_bd;
2489
2490 cons_rx_buf = &bp->rx_buf_ring[cons];
2491 prod_rx_buf = &bp->rx_buf_ring[prod];
b6016b76
MC
2492
2493 pci_dma_sync_single_for_device(bp->pdev,
2494 pci_unmap_addr(cons_rx_buf, mapping),
2495 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2496
236b6394 2497 bp->rx_prod_bseq += bp->rx_buf_use_size;
b6016b76 2498
236b6394 2499 prod_rx_buf->skb = skb;
b6016b76 2500
236b6394
MC
2501 if (cons == prod)
2502 return;
b6016b76 2503
236b6394
MC
2504 pci_unmap_addr_set(prod_rx_buf, mapping,
2505 pci_unmap_addr(cons_rx_buf, mapping));
2506
3fdfcc2c
MC
2507 cons_bd = &bp->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2508 prod_bd = &bp->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
236b6394
MC
2509 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2510 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
b6016b76
MC
2511}
2512
85833c62
MC
2513static int
2514bnx2_rx_skb(struct bnx2 *bp, struct sk_buff *skb, unsigned int len,
1db82f2a 2515 unsigned int hdr_len, dma_addr_t dma_addr, u32 ring_idx)
85833c62
MC
2516{
2517 int err;
2518 u16 prod = ring_idx & 0xffff;
2519
2520 err = bnx2_alloc_rx_skb(bp, prod);
2521 if (unlikely(err)) {
2522 bnx2_reuse_rx_skb(bp, skb, (u16) (ring_idx >> 16), prod);
1db82f2a
MC
2523 if (hdr_len) {
2524 unsigned int raw_len = len + 4;
2525 int pages = PAGE_ALIGN(raw_len - hdr_len) >> PAGE_SHIFT;
2526
2527 bnx2_reuse_rx_skb_pages(bp, NULL, pages);
2528 }
85833c62
MC
2529 return err;
2530 }
2531
2532 skb_reserve(skb, bp->rx_offset);
2533 pci_unmap_single(bp->pdev, dma_addr, bp->rx_buf_use_size,
2534 PCI_DMA_FROMDEVICE);
2535
1db82f2a
MC
2536 if (hdr_len == 0) {
2537 skb_put(skb, len);
2538 return 0;
2539 } else {
2540 unsigned int i, frag_len, frag_size, pages;
2541 struct sw_pg *rx_pg;
2542 u16 pg_cons = bp->rx_pg_cons;
2543 u16 pg_prod = bp->rx_pg_prod;
2544
2545 frag_size = len + 4 - hdr_len;
2546 pages = PAGE_ALIGN(frag_size) >> PAGE_SHIFT;
2547 skb_put(skb, hdr_len);
2548
2549 for (i = 0; i < pages; i++) {
2550 frag_len = min(frag_size, (unsigned int) PAGE_SIZE);
2551 if (unlikely(frag_len <= 4)) {
2552 unsigned int tail = 4 - frag_len;
2553
2554 bp->rx_pg_cons = pg_cons;
2555 bp->rx_pg_prod = pg_prod;
2556 bnx2_reuse_rx_skb_pages(bp, NULL, pages - i);
2557 skb->len -= tail;
2558 if (i == 0) {
2559 skb->tail -= tail;
2560 } else {
2561 skb_frag_t *frag =
2562 &skb_shinfo(skb)->frags[i - 1];
2563 frag->size -= tail;
2564 skb->data_len -= tail;
2565 skb->truesize -= tail;
2566 }
2567 return 0;
2568 }
2569 rx_pg = &bp->rx_pg_ring[pg_cons];
2570
2571 pci_unmap_page(bp->pdev, pci_unmap_addr(rx_pg, mapping),
2572 PAGE_SIZE, PCI_DMA_FROMDEVICE);
2573
2574 if (i == pages - 1)
2575 frag_len -= 4;
2576
2577 skb_fill_page_desc(skb, i, rx_pg->page, 0, frag_len);
2578 rx_pg->page = NULL;
2579
2580 err = bnx2_alloc_rx_page(bp, RX_PG_RING_IDX(pg_prod));
2581 if (unlikely(err)) {
2582 bp->rx_pg_cons = pg_cons;
2583 bp->rx_pg_prod = pg_prod;
2584 bnx2_reuse_rx_skb_pages(bp, skb, pages - i);
2585 return err;
2586 }
2587
2588 frag_size -= frag_len;
2589 skb->data_len += frag_len;
2590 skb->truesize += frag_len;
2591 skb->len += frag_len;
2592
2593 pg_prod = NEXT_RX_BD(pg_prod);
2594 pg_cons = RX_PG_RING_IDX(NEXT_RX_BD(pg_cons));
2595 }
2596 bp->rx_pg_prod = pg_prod;
2597 bp->rx_pg_cons = pg_cons;
2598 }
85833c62
MC
2599 return 0;
2600}
2601
c09c2627 2602static inline u16
35efa7c1 2603bnx2_get_hw_rx_cons(struct bnx2_napi *bnapi)
c09c2627 2604{
35efa7c1 2605 u16 cons = bnapi->status_blk->status_rx_quick_consumer_index0;
c09c2627
MC
2606
2607 if (unlikely((cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT))
2608 cons++;
2609 return cons;
2610}
2611
b6016b76 2612static int
35efa7c1 2613bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
b6016b76
MC
2614{
2615 u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
2616 struct l2_fhdr *rx_hdr;
1db82f2a 2617 int rx_pkt = 0, pg_ring_used = 0;
b6016b76 2618
35efa7c1 2619 hw_cons = bnx2_get_hw_rx_cons(bnapi);
b6016b76
MC
2620 sw_cons = bp->rx_cons;
2621 sw_prod = bp->rx_prod;
2622
2623 /* Memory barrier necessary as speculative reads of the rx
2624 * buffer can be ahead of the index in the status block
2625 */
2626 rmb();
2627 while (sw_cons != hw_cons) {
1db82f2a 2628 unsigned int len, hdr_len;
ade2bfe7 2629 u32 status;
b6016b76
MC
2630 struct sw_bd *rx_buf;
2631 struct sk_buff *skb;
236b6394 2632 dma_addr_t dma_addr;
b6016b76
MC
2633
2634 sw_ring_cons = RX_RING_IDX(sw_cons);
2635 sw_ring_prod = RX_RING_IDX(sw_prod);
2636
2637 rx_buf = &bp->rx_buf_ring[sw_ring_cons];
2638 skb = rx_buf->skb;
236b6394
MC
2639
2640 rx_buf->skb = NULL;
2641
2642 dma_addr = pci_unmap_addr(rx_buf, mapping);
2643
2644 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
b6016b76
MC
2645 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2646
2647 rx_hdr = (struct l2_fhdr *) skb->data;
1db82f2a 2648 len = rx_hdr->l2_fhdr_pkt_len;
b6016b76 2649
ade2bfe7 2650 if ((status = rx_hdr->l2_fhdr_status) &
b6016b76
MC
2651 (L2_FHDR_ERRORS_BAD_CRC |
2652 L2_FHDR_ERRORS_PHY_DECODE |
2653 L2_FHDR_ERRORS_ALIGNMENT |
2654 L2_FHDR_ERRORS_TOO_SHORT |
2655 L2_FHDR_ERRORS_GIANT_FRAME)) {
2656
85833c62
MC
2657 bnx2_reuse_rx_skb(bp, skb, sw_ring_cons, sw_ring_prod);
2658 goto next_rx;
b6016b76 2659 }
1db82f2a
MC
2660 hdr_len = 0;
2661 if (status & L2_FHDR_STATUS_SPLIT) {
2662 hdr_len = rx_hdr->l2_fhdr_ip_xsum;
2663 pg_ring_used = 1;
2664 } else if (len > bp->rx_jumbo_thresh) {
2665 hdr_len = bp->rx_jumbo_thresh;
2666 pg_ring_used = 1;
2667 }
2668
2669 len -= 4;
b6016b76 2670
5d5d0015 2671 if (len <= bp->rx_copy_thresh) {
b6016b76
MC
2672 struct sk_buff *new_skb;
2673
932f3772 2674 new_skb = netdev_alloc_skb(bp->dev, len + 2);
85833c62
MC
2675 if (new_skb == NULL) {
2676 bnx2_reuse_rx_skb(bp, skb, sw_ring_cons,
2677 sw_ring_prod);
2678 goto next_rx;
2679 }
b6016b76
MC
2680
2681 /* aligned copy */
d626f62b
ACM
2682 skb_copy_from_linear_data_offset(skb, bp->rx_offset - 2,
2683 new_skb->data, len + 2);
b6016b76
MC
2684 skb_reserve(new_skb, 2);
2685 skb_put(new_skb, len);
b6016b76
MC
2686
2687 bnx2_reuse_rx_skb(bp, skb,
2688 sw_ring_cons, sw_ring_prod);
2689
2690 skb = new_skb;
1db82f2a 2691 } else if (unlikely(bnx2_rx_skb(bp, skb, len, hdr_len, dma_addr,
85833c62 2692 (sw_ring_cons << 16) | sw_ring_prod)))
b6016b76 2693 goto next_rx;
b6016b76
MC
2694
2695 skb->protocol = eth_type_trans(skb, bp->dev);
2696
2697 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
d1e100ba 2698 (ntohs(skb->protocol) != 0x8100)) {
b6016b76 2699
745720e5 2700 dev_kfree_skb(skb);
b6016b76
MC
2701 goto next_rx;
2702
2703 }
2704
b6016b76
MC
2705 skb->ip_summed = CHECKSUM_NONE;
2706 if (bp->rx_csum &&
2707 (status & (L2_FHDR_STATUS_TCP_SEGMENT |
2708 L2_FHDR_STATUS_UDP_DATAGRAM))) {
2709
ade2bfe7
MC
2710 if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
2711 L2_FHDR_ERRORS_UDP_XSUM)) == 0))
b6016b76
MC
2712 skb->ip_summed = CHECKSUM_UNNECESSARY;
2713 }
2714
2715#ifdef BCM_VLAN
2716 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) && (bp->vlgrp != 0)) {
2717 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
2718 rx_hdr->l2_fhdr_vlan_tag);
2719 }
2720 else
2721#endif
2722 netif_receive_skb(skb);
2723
2724 bp->dev->last_rx = jiffies;
2725 rx_pkt++;
2726
2727next_rx:
b6016b76
MC
2728 sw_cons = NEXT_RX_BD(sw_cons);
2729 sw_prod = NEXT_RX_BD(sw_prod);
2730
2731 if ((rx_pkt == budget))
2732 break;
f4e418f7
MC
2733
2734 /* Refresh hw_cons to see if there is new work */
2735 if (sw_cons == hw_cons) {
35efa7c1 2736 hw_cons = bnx2_get_hw_rx_cons(bnapi);
f4e418f7
MC
2737 rmb();
2738 }
b6016b76
MC
2739 }
2740 bp->rx_cons = sw_cons;
2741 bp->rx_prod = sw_prod;
2742
1db82f2a
MC
2743 if (pg_ring_used)
2744 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_PG_BDIDX,
2745 bp->rx_pg_prod);
2746
b6016b76
MC
2747 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, sw_prod);
2748
2749 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
2750
2751 mmiowb();
2752
2753 return rx_pkt;
2754
2755}
2756
2757/* MSI ISR - The only difference between this and the INTx ISR
2758 * is that the MSI interrupt is always serviced.
2759 */
2760static irqreturn_t
7d12e780 2761bnx2_msi(int irq, void *dev_instance)
b6016b76
MC
2762{
2763 struct net_device *dev = dev_instance;
972ec0d4 2764 struct bnx2 *bp = netdev_priv(dev);
35efa7c1 2765 struct bnx2_napi *bnapi = &bp->bnx2_napi;
b6016b76 2766
35efa7c1 2767 prefetch(bnapi->status_blk);
b6016b76
MC
2768 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2769 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
2770 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2771
2772 /* Return here if interrupt is disabled. */
73eef4cd
MC
2773 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2774 return IRQ_HANDLED;
b6016b76 2775
35efa7c1 2776 netif_rx_schedule(dev, &bnapi->napi);
b6016b76 2777
73eef4cd 2778 return IRQ_HANDLED;
b6016b76
MC
2779}
2780
8e6a72c4
MC
2781static irqreturn_t
2782bnx2_msi_1shot(int irq, void *dev_instance)
2783{
2784 struct net_device *dev = dev_instance;
2785 struct bnx2 *bp = netdev_priv(dev);
35efa7c1 2786 struct bnx2_napi *bnapi = &bp->bnx2_napi;
8e6a72c4 2787
35efa7c1 2788 prefetch(bnapi->status_blk);
8e6a72c4
MC
2789
2790 /* Return here if interrupt is disabled. */
2791 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2792 return IRQ_HANDLED;
2793
35efa7c1 2794 netif_rx_schedule(dev, &bnapi->napi);
8e6a72c4
MC
2795
2796 return IRQ_HANDLED;
2797}
2798
b6016b76 2799static irqreturn_t
7d12e780 2800bnx2_interrupt(int irq, void *dev_instance)
b6016b76
MC
2801{
2802 struct net_device *dev = dev_instance;
972ec0d4 2803 struct bnx2 *bp = netdev_priv(dev);
35efa7c1
MC
2804 struct bnx2_napi *bnapi = &bp->bnx2_napi;
2805 struct status_block *sblk = bnapi->status_blk;
b6016b76
MC
2806
2807 /* When using INTx, it is possible for the interrupt to arrive
2808 * at the CPU before the status block posted prior to the
2809 * interrupt. Reading a register will flush the status block.
2810 * When using MSI, the MSI message will always complete after
2811 * the status block write.
2812 */
35efa7c1 2813 if ((sblk->status_idx == bnapi->last_status_idx) &&
b6016b76
MC
2814 (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
2815 BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
73eef4cd 2816 return IRQ_NONE;
b6016b76
MC
2817
2818 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2819 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
2820 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2821
b8a7ce7b
MC
2822 /* Read back to deassert IRQ immediately to avoid too many
2823 * spurious interrupts.
2824 */
2825 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
2826
b6016b76 2827 /* Return here if interrupt is shared and is disabled. */
73eef4cd
MC
2828 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2829 return IRQ_HANDLED;
b6016b76 2830
35efa7c1
MC
2831 if (netif_rx_schedule_prep(dev, &bnapi->napi)) {
2832 bnapi->last_status_idx = sblk->status_idx;
2833 __netif_rx_schedule(dev, &bnapi->napi);
b8a7ce7b 2834 }
b6016b76 2835
73eef4cd 2836 return IRQ_HANDLED;
b6016b76
MC
2837}
2838
0d8a6571
MC
2839#define STATUS_ATTN_EVENTS (STATUS_ATTN_BITS_LINK_STATE | \
2840 STATUS_ATTN_BITS_TIMER_ABORT)
da3e4fbe 2841
f4e418f7 2842static inline int
35efa7c1 2843bnx2_has_work(struct bnx2_napi *bnapi)
f4e418f7 2844{
35efa7c1 2845 struct bnx2 *bp = bnapi->bp;
f4e418f7
MC
2846 struct status_block *sblk = bp->status_blk;
2847
35efa7c1
MC
2848 if ((bnx2_get_hw_rx_cons(bnapi) != bp->rx_cons) ||
2849 (bnx2_get_hw_tx_cons(bnapi) != bp->hw_tx_cons))
f4e418f7
MC
2850 return 1;
2851
da3e4fbe
MC
2852 if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
2853 (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
f4e418f7
MC
2854 return 1;
2855
2856 return 0;
2857}
2858
35efa7c1
MC
2859static int bnx2_poll_work(struct bnx2 *bp, struct bnx2_napi *bnapi,
2860 int work_done, int budget)
b6016b76 2861{
35efa7c1 2862 struct status_block *sblk = bnapi->status_blk;
da3e4fbe
MC
2863 u32 status_attn_bits = sblk->status_attn_bits;
2864 u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
b6016b76 2865
da3e4fbe
MC
2866 if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
2867 (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
b6016b76 2868
35efa7c1 2869 bnx2_phy_int(bp, bnapi);
bf5295bb
MC
2870
2871 /* This is needed to take care of transient status
2872 * during link changes.
2873 */
2874 REG_WR(bp, BNX2_HC_COMMAND,
2875 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
2876 REG_RD(bp, BNX2_HC_COMMAND);
b6016b76
MC
2877 }
2878
35efa7c1
MC
2879 if (bnx2_get_hw_tx_cons(bnapi) != bp->hw_tx_cons)
2880 bnx2_tx_int(bp, bnapi);
b6016b76 2881
35efa7c1
MC
2882 if (bnx2_get_hw_rx_cons(bnapi) != bp->rx_cons)
2883 work_done += bnx2_rx_int(bp, bnapi, budget - work_done);
6aa20a22 2884
6f535763
DM
2885 return work_done;
2886}
2887
2888static int bnx2_poll(struct napi_struct *napi, int budget)
2889{
35efa7c1
MC
2890 struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
2891 struct bnx2 *bp = bnapi->bp;
6f535763 2892 int work_done = 0;
35efa7c1 2893 struct status_block *sblk = bnapi->status_blk;
6f535763
DM
2894
2895 while (1) {
35efa7c1 2896 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
f4e418f7 2897
6f535763
DM
2898 if (unlikely(work_done >= budget))
2899 break;
2900
35efa7c1 2901 /* bnapi->last_status_idx is used below to tell the hw how
6dee6421
MC
2902 * much work has been processed, so we must read it before
2903 * checking for more work.
2904 */
35efa7c1 2905 bnapi->last_status_idx = sblk->status_idx;
6dee6421 2906 rmb();
35efa7c1 2907 if (likely(!bnx2_has_work(bnapi))) {
6f535763
DM
2908 netif_rx_complete(bp->dev, napi);
2909 if (likely(bp->flags & USING_MSI_FLAG)) {
2910 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2911 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
35efa7c1 2912 bnapi->last_status_idx);
6dee6421 2913 break;
6f535763 2914 }
1269a8a6
MC
2915 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2916 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
6f535763 2917 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
35efa7c1 2918 bnapi->last_status_idx);
1269a8a6 2919
6f535763
DM
2920 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2921 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
35efa7c1 2922 bnapi->last_status_idx);
6f535763
DM
2923 break;
2924 }
b6016b76
MC
2925 }
2926
bea3348e 2927 return work_done;
b6016b76
MC
2928}
2929
932ff279 2930/* Called with rtnl_lock from vlan functions and also netif_tx_lock
b6016b76
MC
2931 * from set_multicast.
2932 */
2933static void
2934bnx2_set_rx_mode(struct net_device *dev)
2935{
972ec0d4 2936 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
2937 u32 rx_mode, sort_mode;
2938 int i;
b6016b76 2939
c770a65c 2940 spin_lock_bh(&bp->phy_lock);
b6016b76
MC
2941
2942 rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
2943 BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
2944 sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
2945#ifdef BCM_VLAN
e29054f9 2946 if (!bp->vlgrp && !(bp->flags & ASF_ENABLE_FLAG))
b6016b76 2947 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
b6016b76 2948#else
e29054f9
MC
2949 if (!(bp->flags & ASF_ENABLE_FLAG))
2950 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
b6016b76
MC
2951#endif
2952 if (dev->flags & IFF_PROMISC) {
2953 /* Promiscuous mode. */
2954 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
7510873d
MC
2955 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
2956 BNX2_RPM_SORT_USER0_PROM_VLAN;
b6016b76
MC
2957 }
2958 else if (dev->flags & IFF_ALLMULTI) {
2959 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2960 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2961 0xffffffff);
2962 }
2963 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
2964 }
2965 else {
2966 /* Accept one or more multicast(s). */
2967 struct dev_mc_list *mclist;
2968 u32 mc_filter[NUM_MC_HASH_REGISTERS];
2969 u32 regidx;
2970 u32 bit;
2971 u32 crc;
2972
2973 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
2974
2975 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
2976 i++, mclist = mclist->next) {
2977
2978 crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
2979 bit = crc & 0xff;
2980 regidx = (bit & 0xe0) >> 5;
2981 bit &= 0x1f;
2982 mc_filter[regidx] |= (1 << bit);
2983 }
2984
2985 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2986 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2987 mc_filter[i]);
2988 }
2989
2990 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
2991 }
2992
2993 if (rx_mode != bp->rx_mode) {
2994 bp->rx_mode = rx_mode;
2995 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
2996 }
2997
2998 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
2999 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
3000 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
3001
c770a65c 3002 spin_unlock_bh(&bp->phy_lock);
b6016b76
MC
3003}
3004
3005static void
3006load_rv2p_fw(struct bnx2 *bp, u32 *rv2p_code, u32 rv2p_code_len,
3007 u32 rv2p_proc)
3008{
3009 int i;
3010 u32 val;
3011
3012
3013 for (i = 0; i < rv2p_code_len; i += 8) {
fba9fe91 3014 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, cpu_to_le32(*rv2p_code));
b6016b76 3015 rv2p_code++;
fba9fe91 3016 REG_WR(bp, BNX2_RV2P_INSTR_LOW, cpu_to_le32(*rv2p_code));
b6016b76
MC
3017 rv2p_code++;
3018
3019 if (rv2p_proc == RV2P_PROC1) {
3020 val = (i / 8) | BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
3021 REG_WR(bp, BNX2_RV2P_PROC1_ADDR_CMD, val);
3022 }
3023 else {
3024 val = (i / 8) | BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
3025 REG_WR(bp, BNX2_RV2P_PROC2_ADDR_CMD, val);
3026 }
3027 }
3028
3029 /* Reset the processor, un-stall is done later. */
3030 if (rv2p_proc == RV2P_PROC1) {
3031 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
3032 }
3033 else {
3034 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
3035 }
3036}
3037
af3ee519 3038static int
b6016b76
MC
3039load_cpu_fw(struct bnx2 *bp, struct cpu_reg *cpu_reg, struct fw_info *fw)
3040{
3041 u32 offset;
3042 u32 val;
af3ee519 3043 int rc;
b6016b76
MC
3044
3045 /* Halt the CPU. */
3046 val = REG_RD_IND(bp, cpu_reg->mode);
3047 val |= cpu_reg->mode_value_halt;
3048 REG_WR_IND(bp, cpu_reg->mode, val);
3049 REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
3050
3051 /* Load the Text area. */
3052 offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
af3ee519 3053 if (fw->gz_text) {
b6016b76
MC
3054 int j;
3055
ea1f8d5c
MC
3056 rc = zlib_inflate_blob(fw->text, FW_BUF_SIZE, fw->gz_text,
3057 fw->gz_text_len);
3058 if (rc < 0)
b3448b0b 3059 return rc;
ea1f8d5c 3060
b6016b76 3061 for (j = 0; j < (fw->text_len / 4); j++, offset += 4) {
ea1f8d5c 3062 REG_WR_IND(bp, offset, cpu_to_le32(fw->text[j]));
b6016b76
MC
3063 }
3064 }
3065
3066 /* Load the Data area. */
3067 offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
3068 if (fw->data) {
3069 int j;
3070
3071 for (j = 0; j < (fw->data_len / 4); j++, offset += 4) {
3072 REG_WR_IND(bp, offset, fw->data[j]);
3073 }
3074 }
3075
3076 /* Load the SBSS area. */
3077 offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
ea1f8d5c 3078 if (fw->sbss_len) {
b6016b76
MC
3079 int j;
3080
3081 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) {
ea1f8d5c 3082 REG_WR_IND(bp, offset, 0);
b6016b76
MC
3083 }
3084 }
3085
3086 /* Load the BSS area. */
3087 offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
ea1f8d5c 3088 if (fw->bss_len) {
b6016b76
MC
3089 int j;
3090
3091 for (j = 0; j < (fw->bss_len/4); j++, offset += 4) {
ea1f8d5c 3092 REG_WR_IND(bp, offset, 0);
b6016b76
MC
3093 }
3094 }
3095
3096 /* Load the Read-Only area. */
3097 offset = cpu_reg->spad_base +
3098 (fw->rodata_addr - cpu_reg->mips_view_base);
3099 if (fw->rodata) {
3100 int j;
3101
3102 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) {
3103 REG_WR_IND(bp, offset, fw->rodata[j]);
3104 }
3105 }
3106
3107 /* Clear the pre-fetch instruction. */
3108 REG_WR_IND(bp, cpu_reg->inst, 0);
3109 REG_WR_IND(bp, cpu_reg->pc, fw->start_addr);
3110
3111 /* Start the CPU. */
3112 val = REG_RD_IND(bp, cpu_reg->mode);
3113 val &= ~cpu_reg->mode_value_halt;
3114 REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
3115 REG_WR_IND(bp, cpu_reg->mode, val);
af3ee519
MC
3116
3117 return 0;
b6016b76
MC
3118}
3119
fba9fe91 3120static int
b6016b76
MC
3121bnx2_init_cpus(struct bnx2 *bp)
3122{
3123 struct cpu_reg cpu_reg;
af3ee519 3124 struct fw_info *fw;
110d0ef9
MC
3125 int rc, rv2p_len;
3126 void *text, *rv2p;
b6016b76
MC
3127
3128 /* Initialize the RV2P processor. */
b3448b0b
DV
3129 text = vmalloc(FW_BUF_SIZE);
3130 if (!text)
3131 return -ENOMEM;
110d0ef9
MC
3132 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3133 rv2p = bnx2_xi_rv2p_proc1;
3134 rv2p_len = sizeof(bnx2_xi_rv2p_proc1);
3135 } else {
3136 rv2p = bnx2_rv2p_proc1;
3137 rv2p_len = sizeof(bnx2_rv2p_proc1);
3138 }
3139 rc = zlib_inflate_blob(text, FW_BUF_SIZE, rv2p, rv2p_len);
ea1f8d5c 3140 if (rc < 0)
fba9fe91 3141 goto init_cpu_err;
ea1f8d5c 3142
b3448b0b 3143 load_rv2p_fw(bp, text, rc /* == len */, RV2P_PROC1);
fba9fe91 3144
110d0ef9
MC
3145 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3146 rv2p = bnx2_xi_rv2p_proc2;
3147 rv2p_len = sizeof(bnx2_xi_rv2p_proc2);
3148 } else {
3149 rv2p = bnx2_rv2p_proc2;
3150 rv2p_len = sizeof(bnx2_rv2p_proc2);
3151 }
3152 rc = zlib_inflate_blob(text, FW_BUF_SIZE, rv2p, rv2p_len);
ea1f8d5c 3153 if (rc < 0)
fba9fe91 3154 goto init_cpu_err;
ea1f8d5c 3155
b3448b0b 3156 load_rv2p_fw(bp, text, rc /* == len */, RV2P_PROC2);
b6016b76
MC
3157
3158 /* Initialize the RX Processor. */
3159 cpu_reg.mode = BNX2_RXP_CPU_MODE;
3160 cpu_reg.mode_value_halt = BNX2_RXP_CPU_MODE_SOFT_HALT;
3161 cpu_reg.mode_value_sstep = BNX2_RXP_CPU_MODE_STEP_ENA;
3162 cpu_reg.state = BNX2_RXP_CPU_STATE;
3163 cpu_reg.state_value_clear = 0xffffff;
3164 cpu_reg.gpr0 = BNX2_RXP_CPU_REG_FILE;
3165 cpu_reg.evmask = BNX2_RXP_CPU_EVENT_MASK;
3166 cpu_reg.pc = BNX2_RXP_CPU_PROGRAM_COUNTER;
3167 cpu_reg.inst = BNX2_RXP_CPU_INSTRUCTION;
3168 cpu_reg.bp = BNX2_RXP_CPU_HW_BREAKPOINT;
3169 cpu_reg.spad_base = BNX2_RXP_SCRATCH;
3170 cpu_reg.mips_view_base = 0x8000000;
6aa20a22 3171
d43584c8
MC
3172 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3173 fw = &bnx2_rxp_fw_09;
3174 else
3175 fw = &bnx2_rxp_fw_06;
fba9fe91 3176
ea1f8d5c 3177 fw->text = text;
af3ee519 3178 rc = load_cpu_fw(bp, &cpu_reg, fw);
fba9fe91
MC
3179 if (rc)
3180 goto init_cpu_err;
3181
b6016b76
MC
3182 /* Initialize the TX Processor. */
3183 cpu_reg.mode = BNX2_TXP_CPU_MODE;
3184 cpu_reg.mode_value_halt = BNX2_TXP_CPU_MODE_SOFT_HALT;
3185 cpu_reg.mode_value_sstep = BNX2_TXP_CPU_MODE_STEP_ENA;
3186 cpu_reg.state = BNX2_TXP_CPU_STATE;
3187 cpu_reg.state_value_clear = 0xffffff;
3188 cpu_reg.gpr0 = BNX2_TXP_CPU_REG_FILE;
3189 cpu_reg.evmask = BNX2_TXP_CPU_EVENT_MASK;
3190 cpu_reg.pc = BNX2_TXP_CPU_PROGRAM_COUNTER;
3191 cpu_reg.inst = BNX2_TXP_CPU_INSTRUCTION;
3192 cpu_reg.bp = BNX2_TXP_CPU_HW_BREAKPOINT;
3193 cpu_reg.spad_base = BNX2_TXP_SCRATCH;
3194 cpu_reg.mips_view_base = 0x8000000;
6aa20a22 3195
d43584c8
MC
3196 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3197 fw = &bnx2_txp_fw_09;
3198 else
3199 fw = &bnx2_txp_fw_06;
fba9fe91 3200
ea1f8d5c 3201 fw->text = text;
af3ee519 3202 rc = load_cpu_fw(bp, &cpu_reg, fw);
fba9fe91
MC
3203 if (rc)
3204 goto init_cpu_err;
3205
b6016b76
MC
3206 /* Initialize the TX Patch-up Processor. */
3207 cpu_reg.mode = BNX2_TPAT_CPU_MODE;
3208 cpu_reg.mode_value_halt = BNX2_TPAT_CPU_MODE_SOFT_HALT;
3209 cpu_reg.mode_value_sstep = BNX2_TPAT_CPU_MODE_STEP_ENA;
3210 cpu_reg.state = BNX2_TPAT_CPU_STATE;
3211 cpu_reg.state_value_clear = 0xffffff;
3212 cpu_reg.gpr0 = BNX2_TPAT_CPU_REG_FILE;
3213 cpu_reg.evmask = BNX2_TPAT_CPU_EVENT_MASK;
3214 cpu_reg.pc = BNX2_TPAT_CPU_PROGRAM_COUNTER;
3215 cpu_reg.inst = BNX2_TPAT_CPU_INSTRUCTION;
3216 cpu_reg.bp = BNX2_TPAT_CPU_HW_BREAKPOINT;
3217 cpu_reg.spad_base = BNX2_TPAT_SCRATCH;
3218 cpu_reg.mips_view_base = 0x8000000;
6aa20a22 3219
d43584c8
MC
3220 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3221 fw = &bnx2_tpat_fw_09;
3222 else
3223 fw = &bnx2_tpat_fw_06;
fba9fe91 3224
ea1f8d5c 3225 fw->text = text;
af3ee519 3226 rc = load_cpu_fw(bp, &cpu_reg, fw);
fba9fe91
MC
3227 if (rc)
3228 goto init_cpu_err;
3229
b6016b76
MC
3230 /* Initialize the Completion Processor. */
3231 cpu_reg.mode = BNX2_COM_CPU_MODE;
3232 cpu_reg.mode_value_halt = BNX2_COM_CPU_MODE_SOFT_HALT;
3233 cpu_reg.mode_value_sstep = BNX2_COM_CPU_MODE_STEP_ENA;
3234 cpu_reg.state = BNX2_COM_CPU_STATE;
3235 cpu_reg.state_value_clear = 0xffffff;
3236 cpu_reg.gpr0 = BNX2_COM_CPU_REG_FILE;
3237 cpu_reg.evmask = BNX2_COM_CPU_EVENT_MASK;
3238 cpu_reg.pc = BNX2_COM_CPU_PROGRAM_COUNTER;
3239 cpu_reg.inst = BNX2_COM_CPU_INSTRUCTION;
3240 cpu_reg.bp = BNX2_COM_CPU_HW_BREAKPOINT;
3241 cpu_reg.spad_base = BNX2_COM_SCRATCH;
3242 cpu_reg.mips_view_base = 0x8000000;
6aa20a22 3243
d43584c8
MC
3244 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3245 fw = &bnx2_com_fw_09;
3246 else
3247 fw = &bnx2_com_fw_06;
fba9fe91 3248
ea1f8d5c 3249 fw->text = text;
af3ee519 3250 rc = load_cpu_fw(bp, &cpu_reg, fw);
fba9fe91
MC
3251 if (rc)
3252 goto init_cpu_err;
3253
d43584c8
MC
3254 /* Initialize the Command Processor. */
3255 cpu_reg.mode = BNX2_CP_CPU_MODE;
3256 cpu_reg.mode_value_halt = BNX2_CP_CPU_MODE_SOFT_HALT;
3257 cpu_reg.mode_value_sstep = BNX2_CP_CPU_MODE_STEP_ENA;
3258 cpu_reg.state = BNX2_CP_CPU_STATE;
3259 cpu_reg.state_value_clear = 0xffffff;
3260 cpu_reg.gpr0 = BNX2_CP_CPU_REG_FILE;
3261 cpu_reg.evmask = BNX2_CP_CPU_EVENT_MASK;
3262 cpu_reg.pc = BNX2_CP_CPU_PROGRAM_COUNTER;
3263 cpu_reg.inst = BNX2_CP_CPU_INSTRUCTION;
3264 cpu_reg.bp = BNX2_CP_CPU_HW_BREAKPOINT;
3265 cpu_reg.spad_base = BNX2_CP_SCRATCH;
3266 cpu_reg.mips_view_base = 0x8000000;
b6016b76 3267
110d0ef9 3268 if (CHIP_NUM(bp) == CHIP_NUM_5709)
d43584c8 3269 fw = &bnx2_cp_fw_09;
110d0ef9
MC
3270 else
3271 fw = &bnx2_cp_fw_06;
3272
3273 fw->text = text;
3274 rc = load_cpu_fw(bp, &cpu_reg, fw);
b6016b76 3275
fba9fe91 3276init_cpu_err:
ea1f8d5c 3277 vfree(text);
fba9fe91 3278 return rc;
b6016b76
MC
3279}
3280
3281static int
829ca9a3 3282bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
b6016b76
MC
3283{
3284 u16 pmcsr;
3285
3286 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
3287
3288 switch (state) {
829ca9a3 3289 case PCI_D0: {
b6016b76
MC
3290 u32 val;
3291
3292 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3293 (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3294 PCI_PM_CTRL_PME_STATUS);
3295
3296 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3297 /* delay required during transition out of D3hot */
3298 msleep(20);
3299
3300 val = REG_RD(bp, BNX2_EMAC_MODE);
3301 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
3302 val &= ~BNX2_EMAC_MODE_MPKT;
3303 REG_WR(bp, BNX2_EMAC_MODE, val);
3304
3305 val = REG_RD(bp, BNX2_RPM_CONFIG);
3306 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3307 REG_WR(bp, BNX2_RPM_CONFIG, val);
3308 break;
3309 }
829ca9a3 3310 case PCI_D3hot: {
b6016b76
MC
3311 int i;
3312 u32 val, wol_msg;
3313
3314 if (bp->wol) {
3315 u32 advertising;
3316 u8 autoneg;
3317
3318 autoneg = bp->autoneg;
3319 advertising = bp->advertising;
3320
239cd343
MC
3321 if (bp->phy_port == PORT_TP) {
3322 bp->autoneg = AUTONEG_SPEED;
3323 bp->advertising = ADVERTISED_10baseT_Half |
3324 ADVERTISED_10baseT_Full |
3325 ADVERTISED_100baseT_Half |
3326 ADVERTISED_100baseT_Full |
3327 ADVERTISED_Autoneg;
3328 }
b6016b76 3329
239cd343
MC
3330 spin_lock_bh(&bp->phy_lock);
3331 bnx2_setup_phy(bp, bp->phy_port);
3332 spin_unlock_bh(&bp->phy_lock);
b6016b76
MC
3333
3334 bp->autoneg = autoneg;
3335 bp->advertising = advertising;
3336
3337 bnx2_set_mac_addr(bp);
3338
3339 val = REG_RD(bp, BNX2_EMAC_MODE);
3340
3341 /* Enable port mode. */
3342 val &= ~BNX2_EMAC_MODE_PORT;
239cd343 3343 val |= BNX2_EMAC_MODE_MPKT_RCVD |
b6016b76 3344 BNX2_EMAC_MODE_ACPI_RCVD |
b6016b76 3345 BNX2_EMAC_MODE_MPKT;
239cd343
MC
3346 if (bp->phy_port == PORT_TP)
3347 val |= BNX2_EMAC_MODE_PORT_MII;
3348 else {
3349 val |= BNX2_EMAC_MODE_PORT_GMII;
3350 if (bp->line_speed == SPEED_2500)
3351 val |= BNX2_EMAC_MODE_25G_MODE;
3352 }
b6016b76
MC
3353
3354 REG_WR(bp, BNX2_EMAC_MODE, val);
3355
3356 /* receive all multicast */
3357 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3358 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3359 0xffffffff);
3360 }
3361 REG_WR(bp, BNX2_EMAC_RX_MODE,
3362 BNX2_EMAC_RX_MODE_SORT_MODE);
3363
3364 val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
3365 BNX2_RPM_SORT_USER0_MC_EN;
3366 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3367 REG_WR(bp, BNX2_RPM_SORT_USER0, val);
3368 REG_WR(bp, BNX2_RPM_SORT_USER0, val |
3369 BNX2_RPM_SORT_USER0_ENA);
3370
3371 /* Need to enable EMAC and RPM for WOL. */
3372 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3373 BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
3374 BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
3375 BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
3376
3377 val = REG_RD(bp, BNX2_RPM_CONFIG);
3378 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3379 REG_WR(bp, BNX2_RPM_CONFIG, val);
3380
3381 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
3382 }
3383 else {
3384 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
3385 }
3386
dda1e390
MC
3387 if (!(bp->flags & NO_WOL_FLAG))
3388 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg, 0);
b6016b76
MC
3389
3390 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3391 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3392 (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
3393
3394 if (bp->wol)
3395 pmcsr |= 3;
3396 }
3397 else {
3398 pmcsr |= 3;
3399 }
3400 if (bp->wol) {
3401 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
3402 }
3403 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3404 pmcsr);
3405
3406 /* No more memory access after this point until
3407 * device is brought back to D0.
3408 */
3409 udelay(50);
3410 break;
3411 }
3412 default:
3413 return -EINVAL;
3414 }
3415 return 0;
3416}
3417
3418static int
3419bnx2_acquire_nvram_lock(struct bnx2 *bp)
3420{
3421 u32 val;
3422 int j;
3423
3424 /* Request access to the flash interface. */
3425 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
3426 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3427 val = REG_RD(bp, BNX2_NVM_SW_ARB);
3428 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
3429 break;
3430
3431 udelay(5);
3432 }
3433
3434 if (j >= NVRAM_TIMEOUT_COUNT)
3435 return -EBUSY;
3436
3437 return 0;
3438}
3439
3440static int
3441bnx2_release_nvram_lock(struct bnx2 *bp)
3442{
3443 int j;
3444 u32 val;
3445
3446 /* Relinquish nvram interface. */
3447 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
3448
3449 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3450 val = REG_RD(bp, BNX2_NVM_SW_ARB);
3451 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
3452 break;
3453
3454 udelay(5);
3455 }
3456
3457 if (j >= NVRAM_TIMEOUT_COUNT)
3458 return -EBUSY;
3459
3460 return 0;
3461}
3462
3463
3464static int
3465bnx2_enable_nvram_write(struct bnx2 *bp)
3466{
3467 u32 val;
3468
3469 val = REG_RD(bp, BNX2_MISC_CFG);
3470 REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
3471
e30372c9 3472 if (bp->flash_info->flags & BNX2_NV_WREN) {
b6016b76
MC
3473 int j;
3474
3475 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3476 REG_WR(bp, BNX2_NVM_COMMAND,
3477 BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
3478
3479 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3480 udelay(5);
3481
3482 val = REG_RD(bp, BNX2_NVM_COMMAND);
3483 if (val & BNX2_NVM_COMMAND_DONE)
3484 break;
3485 }
3486
3487 if (j >= NVRAM_TIMEOUT_COUNT)
3488 return -EBUSY;
3489 }
3490 return 0;
3491}
3492
3493static void
3494bnx2_disable_nvram_write(struct bnx2 *bp)
3495{
3496 u32 val;
3497
3498 val = REG_RD(bp, BNX2_MISC_CFG);
3499 REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
3500}
3501
3502
3503static void
3504bnx2_enable_nvram_access(struct bnx2 *bp)
3505{
3506 u32 val;
3507
3508 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3509 /* Enable both bits, even on read. */
6aa20a22 3510 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
b6016b76
MC
3511 val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
3512}
3513
3514static void
3515bnx2_disable_nvram_access(struct bnx2 *bp)
3516{
3517 u32 val;
3518
3519 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3520 /* Disable both bits, even after read. */
6aa20a22 3521 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
b6016b76
MC
3522 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
3523 BNX2_NVM_ACCESS_ENABLE_WR_EN));
3524}
3525
3526static int
3527bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
3528{
3529 u32 cmd;
3530 int j;
3531
e30372c9 3532 if (bp->flash_info->flags & BNX2_NV_BUFFERED)
b6016b76
MC
3533 /* Buffered flash, no erase needed */
3534 return 0;
3535
3536 /* Build an erase command */
3537 cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
3538 BNX2_NVM_COMMAND_DOIT;
3539
3540 /* Need to clear DONE bit separately. */
3541 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3542
3543 /* Address of the NVRAM to read from. */
3544 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3545
3546 /* Issue an erase command. */
3547 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3548
3549 /* Wait for completion. */
3550 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3551 u32 val;
3552
3553 udelay(5);
3554
3555 val = REG_RD(bp, BNX2_NVM_COMMAND);
3556 if (val & BNX2_NVM_COMMAND_DONE)
3557 break;
3558 }
3559
3560 if (j >= NVRAM_TIMEOUT_COUNT)
3561 return -EBUSY;
3562
3563 return 0;
3564}
3565
3566static int
3567bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
3568{
3569 u32 cmd;
3570 int j;
3571
3572 /* Build the command word. */
3573 cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
3574
e30372c9
MC
3575 /* Calculate an offset of a buffered flash, not needed for 5709. */
3576 if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
b6016b76
MC
3577 offset = ((offset / bp->flash_info->page_size) <<
3578 bp->flash_info->page_bits) +
3579 (offset % bp->flash_info->page_size);
3580 }
3581
3582 /* Need to clear DONE bit separately. */
3583 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3584
3585 /* Address of the NVRAM to read from. */
3586 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3587
3588 /* Issue a read command. */
3589 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3590
3591 /* Wait for completion. */
3592 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3593 u32 val;
3594
3595 udelay(5);
3596
3597 val = REG_RD(bp, BNX2_NVM_COMMAND);
3598 if (val & BNX2_NVM_COMMAND_DONE) {
3599 val = REG_RD(bp, BNX2_NVM_READ);
3600
3601 val = be32_to_cpu(val);
3602 memcpy(ret_val, &val, 4);
3603 break;
3604 }
3605 }
3606 if (j >= NVRAM_TIMEOUT_COUNT)
3607 return -EBUSY;
3608
3609 return 0;
3610}
3611
3612
3613static int
3614bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
3615{
3616 u32 cmd, val32;
3617 int j;
3618
3619 /* Build the command word. */
3620 cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
3621
e30372c9
MC
3622 /* Calculate an offset of a buffered flash, not needed for 5709. */
3623 if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
b6016b76
MC
3624 offset = ((offset / bp->flash_info->page_size) <<
3625 bp->flash_info->page_bits) +
3626 (offset % bp->flash_info->page_size);
3627 }
3628
3629 /* Need to clear DONE bit separately. */
3630 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3631
3632 memcpy(&val32, val, 4);
3633 val32 = cpu_to_be32(val32);
3634
3635 /* Write the data. */
3636 REG_WR(bp, BNX2_NVM_WRITE, val32);
3637
3638 /* Address of the NVRAM to write to. */
3639 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3640
3641 /* Issue the write command. */
3642 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3643
3644 /* Wait for completion. */
3645 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3646 udelay(5);
3647
3648 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
3649 break;
3650 }
3651 if (j >= NVRAM_TIMEOUT_COUNT)
3652 return -EBUSY;
3653
3654 return 0;
3655}
3656
3657static int
3658bnx2_init_nvram(struct bnx2 *bp)
3659{
3660 u32 val;
e30372c9 3661 int j, entry_count, rc = 0;
b6016b76
MC
3662 struct flash_spec *flash;
3663
e30372c9
MC
3664 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3665 bp->flash_info = &flash_5709;
3666 goto get_flash_size;
3667 }
3668
b6016b76
MC
3669 /* Determine the selected interface. */
3670 val = REG_RD(bp, BNX2_NVM_CFG1);
3671
ff8ac609 3672 entry_count = ARRAY_SIZE(flash_table);
b6016b76 3673
b6016b76
MC
3674 if (val & 0x40000000) {
3675
3676 /* Flash interface has been reconfigured */
3677 for (j = 0, flash = &flash_table[0]; j < entry_count;
37137709
MC
3678 j++, flash++) {
3679 if ((val & FLASH_BACKUP_STRAP_MASK) ==
3680 (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
b6016b76
MC
3681 bp->flash_info = flash;
3682 break;
3683 }
3684 }
3685 }
3686 else {
37137709 3687 u32 mask;
b6016b76
MC
3688 /* Not yet been reconfigured */
3689
37137709
MC
3690 if (val & (1 << 23))
3691 mask = FLASH_BACKUP_STRAP_MASK;
3692 else
3693 mask = FLASH_STRAP_MASK;
3694
b6016b76
MC
3695 for (j = 0, flash = &flash_table[0]; j < entry_count;
3696 j++, flash++) {
3697
37137709 3698 if ((val & mask) == (flash->strapping & mask)) {
b6016b76
MC
3699 bp->flash_info = flash;
3700
3701 /* Request access to the flash interface. */
3702 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3703 return rc;
3704
3705 /* Enable access to flash interface */
3706 bnx2_enable_nvram_access(bp);
3707
3708 /* Reconfigure the flash interface */
3709 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
3710 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
3711 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
3712 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
3713
3714 /* Disable access to flash interface */
3715 bnx2_disable_nvram_access(bp);
3716 bnx2_release_nvram_lock(bp);
3717
3718 break;
3719 }
3720 }
3721 } /* if (val & 0x40000000) */
3722
3723 if (j == entry_count) {
3724 bp->flash_info = NULL;
2f23c523 3725 printk(KERN_ALERT PFX "Unknown flash/EEPROM type.\n");
1122db71 3726 return -ENODEV;
b6016b76
MC
3727 }
3728
e30372c9 3729get_flash_size:
1122db71
MC
3730 val = REG_RD_IND(bp, bp->shmem_base + BNX2_SHARED_HW_CFG_CONFIG2);
3731 val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
3732 if (val)
3733 bp->flash_size = val;
3734 else
3735 bp->flash_size = bp->flash_info->total_size;
3736
b6016b76
MC
3737 return rc;
3738}
3739
3740static int
3741bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
3742 int buf_size)
3743{
3744 int rc = 0;
3745 u32 cmd_flags, offset32, len32, extra;
3746
3747 if (buf_size == 0)
3748 return 0;
3749
3750 /* Request access to the flash interface. */
3751 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3752 return rc;
3753
3754 /* Enable access to flash interface */
3755 bnx2_enable_nvram_access(bp);
3756
3757 len32 = buf_size;
3758 offset32 = offset;
3759 extra = 0;
3760
3761 cmd_flags = 0;
3762
3763 if (offset32 & 3) {
3764 u8 buf[4];
3765 u32 pre_len;
3766
3767 offset32 &= ~3;
3768 pre_len = 4 - (offset & 3);
3769
3770 if (pre_len >= len32) {
3771 pre_len = len32;
3772 cmd_flags = BNX2_NVM_COMMAND_FIRST |
3773 BNX2_NVM_COMMAND_LAST;
3774 }
3775 else {
3776 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3777 }
3778
3779 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3780
3781 if (rc)
3782 return rc;
3783
3784 memcpy(ret_buf, buf + (offset & 3), pre_len);
3785
3786 offset32 += 4;
3787 ret_buf += pre_len;
3788 len32 -= pre_len;
3789 }
3790 if (len32 & 3) {
3791 extra = 4 - (len32 & 3);
3792 len32 = (len32 + 4) & ~3;
3793 }
3794
3795 if (len32 == 4) {
3796 u8 buf[4];
3797
3798 if (cmd_flags)
3799 cmd_flags = BNX2_NVM_COMMAND_LAST;
3800 else
3801 cmd_flags = BNX2_NVM_COMMAND_FIRST |
3802 BNX2_NVM_COMMAND_LAST;
3803
3804 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3805
3806 memcpy(ret_buf, buf, 4 - extra);
3807 }
3808 else if (len32 > 0) {
3809 u8 buf[4];
3810
3811 /* Read the first word. */
3812 if (cmd_flags)
3813 cmd_flags = 0;
3814 else
3815 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3816
3817 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
3818
3819 /* Advance to the next dword. */
3820 offset32 += 4;
3821 ret_buf += 4;
3822 len32 -= 4;
3823
3824 while (len32 > 4 && rc == 0) {
3825 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
3826
3827 /* Advance to the next dword. */
3828 offset32 += 4;
3829 ret_buf += 4;
3830 len32 -= 4;
3831 }
3832
3833 if (rc)
3834 return rc;
3835
3836 cmd_flags = BNX2_NVM_COMMAND_LAST;
3837 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3838
3839 memcpy(ret_buf, buf, 4 - extra);
3840 }
3841
3842 /* Disable access to flash interface */
3843 bnx2_disable_nvram_access(bp);
3844
3845 bnx2_release_nvram_lock(bp);
3846
3847 return rc;
3848}
3849
3850static int
3851bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
3852 int buf_size)
3853{
3854 u32 written, offset32, len32;
e6be763f 3855 u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
b6016b76
MC
3856 int rc = 0;
3857 int align_start, align_end;
3858
3859 buf = data_buf;
3860 offset32 = offset;
3861 len32 = buf_size;
3862 align_start = align_end = 0;
3863
3864 if ((align_start = (offset32 & 3))) {
3865 offset32 &= ~3;
c873879c
MC
3866 len32 += align_start;
3867 if (len32 < 4)
3868 len32 = 4;
b6016b76
MC
3869 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
3870 return rc;
3871 }
3872
3873 if (len32 & 3) {
c873879c
MC
3874 align_end = 4 - (len32 & 3);
3875 len32 += align_end;
3876 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
3877 return rc;
b6016b76
MC
3878 }
3879
3880 if (align_start || align_end) {
e6be763f
MC
3881 align_buf = kmalloc(len32, GFP_KERNEL);
3882 if (align_buf == NULL)
b6016b76
MC
3883 return -ENOMEM;
3884 if (align_start) {
e6be763f 3885 memcpy(align_buf, start, 4);
b6016b76
MC
3886 }
3887 if (align_end) {
e6be763f 3888 memcpy(align_buf + len32 - 4, end, 4);
b6016b76 3889 }
e6be763f
MC
3890 memcpy(align_buf + align_start, data_buf, buf_size);
3891 buf = align_buf;
b6016b76
MC
3892 }
3893
e30372c9 3894 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
ae181bc4
MC
3895 flash_buffer = kmalloc(264, GFP_KERNEL);
3896 if (flash_buffer == NULL) {
3897 rc = -ENOMEM;
3898 goto nvram_write_end;
3899 }
3900 }
3901
b6016b76
MC
3902 written = 0;
3903 while ((written < len32) && (rc == 0)) {
3904 u32 page_start, page_end, data_start, data_end;
3905 u32 addr, cmd_flags;
3906 int i;
b6016b76
MC
3907
3908 /* Find the page_start addr */
3909 page_start = offset32 + written;
3910 page_start -= (page_start % bp->flash_info->page_size);
3911 /* Find the page_end addr */
3912 page_end = page_start + bp->flash_info->page_size;
3913 /* Find the data_start addr */
3914 data_start = (written == 0) ? offset32 : page_start;
3915 /* Find the data_end addr */
6aa20a22 3916 data_end = (page_end > offset32 + len32) ?
b6016b76
MC
3917 (offset32 + len32) : page_end;
3918
3919 /* Request access to the flash interface. */
3920 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3921 goto nvram_write_end;
3922
3923 /* Enable access to flash interface */
3924 bnx2_enable_nvram_access(bp);
3925
3926 cmd_flags = BNX2_NVM_COMMAND_FIRST;
e30372c9 3927 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
b6016b76
MC
3928 int j;
3929
3930 /* Read the whole page into the buffer
3931 * (non-buffer flash only) */
3932 for (j = 0; j < bp->flash_info->page_size; j += 4) {
3933 if (j == (bp->flash_info->page_size - 4)) {
3934 cmd_flags |= BNX2_NVM_COMMAND_LAST;
3935 }
3936 rc = bnx2_nvram_read_dword(bp,
6aa20a22
JG
3937 page_start + j,
3938 &flash_buffer[j],
b6016b76
MC
3939 cmd_flags);
3940
3941 if (rc)
3942 goto nvram_write_end;
3943
3944 cmd_flags = 0;
3945 }
3946 }
3947
3948 /* Enable writes to flash interface (unlock write-protect) */
3949 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
3950 goto nvram_write_end;
3951
b6016b76
MC
3952 /* Loop to write back the buffer data from page_start to
3953 * data_start */
3954 i = 0;
e30372c9 3955 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
c873879c
MC
3956 /* Erase the page */
3957 if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
3958 goto nvram_write_end;
3959
3960 /* Re-enable the write again for the actual write */
3961 bnx2_enable_nvram_write(bp);
3962
b6016b76
MC
3963 for (addr = page_start; addr < data_start;
3964 addr += 4, i += 4) {
6aa20a22 3965
b6016b76
MC
3966 rc = bnx2_nvram_write_dword(bp, addr,
3967 &flash_buffer[i], cmd_flags);
3968
3969 if (rc != 0)
3970 goto nvram_write_end;
3971
3972 cmd_flags = 0;
3973 }
3974 }
3975
3976 /* Loop to write the new data from data_start to data_end */
bae25761 3977 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
b6016b76 3978 if ((addr == page_end - 4) ||
e30372c9 3979 ((bp->flash_info->flags & BNX2_NV_BUFFERED) &&
b6016b76
MC
3980 (addr == data_end - 4))) {
3981
3982 cmd_flags |= BNX2_NVM_COMMAND_LAST;
3983 }
3984 rc = bnx2_nvram_write_dword(bp, addr, buf,
3985 cmd_flags);
3986
3987 if (rc != 0)
3988 goto nvram_write_end;
3989
3990 cmd_flags = 0;
3991 buf += 4;
3992 }
3993
3994 /* Loop to write back the buffer data from data_end
3995 * to page_end */
e30372c9 3996 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
b6016b76
MC
3997 for (addr = data_end; addr < page_end;
3998 addr += 4, i += 4) {
6aa20a22 3999
b6016b76
MC
4000 if (addr == page_end-4) {
4001 cmd_flags = BNX2_NVM_COMMAND_LAST;
4002 }
4003 rc = bnx2_nvram_write_dword(bp, addr,
4004 &flash_buffer[i], cmd_flags);
4005
4006 if (rc != 0)
4007 goto nvram_write_end;
4008
4009 cmd_flags = 0;
4010 }
4011 }
4012
4013 /* Disable writes to flash interface (lock write-protect) */
4014 bnx2_disable_nvram_write(bp);
4015
4016 /* Disable access to flash interface */
4017 bnx2_disable_nvram_access(bp);
4018 bnx2_release_nvram_lock(bp);
4019
4020 /* Increment written */
4021 written += data_end - data_start;
4022 }
4023
4024nvram_write_end:
e6be763f
MC
4025 kfree(flash_buffer);
4026 kfree(align_buf);
b6016b76
MC
4027 return rc;
4028}
4029
0d8a6571
MC
4030static void
4031bnx2_init_remote_phy(struct bnx2 *bp)
4032{
4033 u32 val;
4034
4035 bp->phy_flags &= ~REMOTE_PHY_CAP_FLAG;
4036 if (!(bp->phy_flags & PHY_SERDES_FLAG))
4037 return;
4038
4039 val = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_CAP_MB);
4040 if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE)
4041 return;
4042
4043 if (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE) {
0d8a6571
MC
4044 bp->phy_flags |= REMOTE_PHY_CAP_FLAG;
4045
4046 val = REG_RD_IND(bp, bp->shmem_base + BNX2_LINK_STATUS);
4047 if (val & BNX2_LINK_STATUS_SERDES_LINK)
4048 bp->phy_port = PORT_FIBRE;
4049 else
4050 bp->phy_port = PORT_TP;
489310a4
MC
4051
4052 if (netif_running(bp->dev)) {
4053 u32 sig;
4054
4055 if (val & BNX2_LINK_STATUS_LINK_UP) {
4056 bp->link_up = 1;
4057 netif_carrier_on(bp->dev);
4058 } else {
4059 bp->link_up = 0;
4060 netif_carrier_off(bp->dev);
4061 }
4062 sig = BNX2_DRV_ACK_CAP_SIGNATURE |
4063 BNX2_FW_CAP_REMOTE_PHY_CAPABLE;
4064 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_ACK_CAP_MB,
4065 sig);
4066 }
0d8a6571
MC
4067 }
4068}
4069
b6016b76
MC
4070static int
4071bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
4072{
4073 u32 val;
4074 int i, rc = 0;
489310a4 4075 u8 old_port;
b6016b76
MC
4076
4077 /* Wait for the current PCI transaction to complete before
4078 * issuing a reset. */
4079 REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
4080 BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
4081 BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
4082 BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
4083 BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
4084 val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
4085 udelay(5);
4086
b090ae2b
MC
4087 /* Wait for the firmware to tell us it is ok to issue a reset. */
4088 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1);
4089
b6016b76
MC
4090 /* Deposit a driver reset signature so the firmware knows that
4091 * this is a soft reset. */
e3648b3d 4092 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_RESET_SIGNATURE,
b6016b76
MC
4093 BNX2_DRV_RESET_SIGNATURE_MAGIC);
4094
b6016b76
MC
4095 /* Do a dummy read to force the chip to complete all current transaction
4096 * before we issue a reset. */
4097 val = REG_RD(bp, BNX2_MISC_ID);
4098
234754d5
MC
4099 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4100 REG_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
4101 REG_RD(bp, BNX2_MISC_COMMAND);
4102 udelay(5);
b6016b76 4103
234754d5
MC
4104 val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4105 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
b6016b76 4106
234754d5 4107 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, val);
b6016b76 4108
234754d5
MC
4109 } else {
4110 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4111 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4112 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4113
4114 /* Chip reset. */
4115 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4116
594a9dfa
MC
4117 /* Reading back any register after chip reset will hang the
4118 * bus on 5706 A0 and A1. The msleep below provides plenty
4119 * of margin for write posting.
4120 */
234754d5 4121 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
8e545881
AV
4122 (CHIP_ID(bp) == CHIP_ID_5706_A1))
4123 msleep(20);
b6016b76 4124
234754d5
MC
4125 /* Reset takes approximate 30 usec */
4126 for (i = 0; i < 10; i++) {
4127 val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
4128 if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4129 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
4130 break;
4131 udelay(10);
4132 }
4133
4134 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4135 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
4136 printk(KERN_ERR PFX "Chip reset did not complete\n");
4137 return -EBUSY;
4138 }
b6016b76
MC
4139 }
4140
4141 /* Make sure byte swapping is properly configured. */
4142 val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
4143 if (val != 0x01020304) {
4144 printk(KERN_ERR PFX "Chip not in correct endian mode\n");
4145 return -ENODEV;
4146 }
4147
b6016b76 4148 /* Wait for the firmware to finish its initialization. */
b090ae2b
MC
4149 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 0);
4150 if (rc)
4151 return rc;
b6016b76 4152
0d8a6571 4153 spin_lock_bh(&bp->phy_lock);
489310a4 4154 old_port = bp->phy_port;
0d8a6571 4155 bnx2_init_remote_phy(bp);
489310a4 4156 if ((bp->phy_flags & REMOTE_PHY_CAP_FLAG) && old_port != bp->phy_port)
0d8a6571
MC
4157 bnx2_set_default_remote_link(bp);
4158 spin_unlock_bh(&bp->phy_lock);
4159
b6016b76
MC
4160 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4161 /* Adjust the voltage regular to two steps lower. The default
4162 * of this register is 0x0000000e. */
4163 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
4164
4165 /* Remove bad rbuf memory from the free pool. */
4166 rc = bnx2_alloc_bad_rbuf(bp);
4167 }
4168
4169 return rc;
4170}
4171
4172static int
4173bnx2_init_chip(struct bnx2 *bp)
4174{
4175 u32 val;
b090ae2b 4176 int rc;
b6016b76
MC
4177
4178 /* Make sure the interrupt is not active. */
4179 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
4180
4181 val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
4182 BNX2_DMA_CONFIG_DATA_WORD_SWAP |
4183#ifdef __BIG_ENDIAN
6aa20a22 4184 BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
b6016b76 4185#endif
6aa20a22 4186 BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
b6016b76
MC
4187 DMA_READ_CHANS << 12 |
4188 DMA_WRITE_CHANS << 16;
4189
4190 val |= (0x2 << 20) | (1 << 11);
4191
dda1e390 4192 if ((bp->flags & PCIX_FLAG) && (bp->bus_speed_mhz == 133))
b6016b76
MC
4193 val |= (1 << 23);
4194
4195 if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
4196 (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & PCIX_FLAG))
4197 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
4198
4199 REG_WR(bp, BNX2_DMA_CONFIG, val);
4200
4201 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4202 val = REG_RD(bp, BNX2_TDMA_CONFIG);
4203 val |= BNX2_TDMA_CONFIG_ONE_DMA;
4204 REG_WR(bp, BNX2_TDMA_CONFIG, val);
4205 }
4206
4207 if (bp->flags & PCIX_FLAG) {
4208 u16 val16;
4209
4210 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4211 &val16);
4212 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4213 val16 & ~PCI_X_CMD_ERO);
4214 }
4215
4216 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
4217 BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
4218 BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
4219 BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
4220
4221 /* Initialize context mapping and zero out the quick contexts. The
4222 * context block must have already been enabled. */
641bdcd5
MC
4223 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4224 rc = bnx2_init_5709_context(bp);
4225 if (rc)
4226 return rc;
4227 } else
59b47d8a 4228 bnx2_init_context(bp);
b6016b76 4229
fba9fe91
MC
4230 if ((rc = bnx2_init_cpus(bp)) != 0)
4231 return rc;
4232
b6016b76
MC
4233 bnx2_init_nvram(bp);
4234
4235 bnx2_set_mac_addr(bp);
4236
4237 val = REG_RD(bp, BNX2_MQ_CONFIG);
4238 val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4239 val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
68c9f75a
MC
4240 if (CHIP_ID(bp) == CHIP_ID_5709_A0 || CHIP_ID(bp) == CHIP_ID_5709_A1)
4241 val |= BNX2_MQ_CONFIG_HALT_DIS;
4242
b6016b76
MC
4243 REG_WR(bp, BNX2_MQ_CONFIG, val);
4244
4245 val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
4246 REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
4247 REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
4248
4249 val = (BCM_PAGE_BITS - 8) << 24;
4250 REG_WR(bp, BNX2_RV2P_CONFIG, val);
4251
4252 /* Configure page size. */
4253 val = REG_RD(bp, BNX2_TBDR_CONFIG);
4254 val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
4255 val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
4256 REG_WR(bp, BNX2_TBDR_CONFIG, val);
4257
4258 val = bp->mac_addr[0] +
4259 (bp->mac_addr[1] << 8) +
4260 (bp->mac_addr[2] << 16) +
4261 bp->mac_addr[3] +
4262 (bp->mac_addr[4] << 8) +
4263 (bp->mac_addr[5] << 16);
4264 REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
4265
4266 /* Program the MTU. Also include 4 bytes for CRC32. */
4267 val = bp->dev->mtu + ETH_HLEN + 4;
4268 if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
4269 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
4270 REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
4271
35efa7c1 4272 bp->bnx2_napi.last_status_idx = 0;
b6016b76
MC
4273 bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
4274
4275 /* Set up how to generate a link change interrupt. */
4276 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
4277
4278 REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
4279 (u64) bp->status_blk_mapping & 0xffffffff);
4280 REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
4281
4282 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
4283 (u64) bp->stats_blk_mapping & 0xffffffff);
4284 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
4285 (u64) bp->stats_blk_mapping >> 32);
4286
6aa20a22 4287 REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
b6016b76
MC
4288 (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
4289
4290 REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
4291 (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
4292
4293 REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
4294 (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
4295
4296 REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
4297
4298 REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
4299
4300 REG_WR(bp, BNX2_HC_COM_TICKS,
4301 (bp->com_ticks_int << 16) | bp->com_ticks);
4302
4303 REG_WR(bp, BNX2_HC_CMD_TICKS,
4304 (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
4305
02537b06
MC
4306 if (CHIP_NUM(bp) == CHIP_NUM_5708)
4307 REG_WR(bp, BNX2_HC_STATS_TICKS, 0);
4308 else
7ea6920e 4309 REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks);
b6016b76
MC
4310 REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */
4311
4312 if (CHIP_ID(bp) == CHIP_ID_5706_A1)
8e6a72c4 4313 val = BNX2_HC_CONFIG_COLLECT_STATS;
b6016b76 4314 else {
8e6a72c4
MC
4315 val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE |
4316 BNX2_HC_CONFIG_COLLECT_STATS;
b6016b76
MC
4317 }
4318
8e6a72c4
MC
4319 if (bp->flags & ONE_SHOT_MSI_FLAG)
4320 val |= BNX2_HC_CONFIG_ONE_SHOT;
4321
4322 REG_WR(bp, BNX2_HC_CONFIG, val);
4323
b6016b76
MC
4324 /* Clear internal stats counters. */
4325 REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
4326
da3e4fbe 4327 REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
b6016b76
MC
4328
4329 /* Initialize the receive filter. */
4330 bnx2_set_rx_mode(bp->dev);
4331
0aa38df7
MC
4332 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4333 val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4334 val |= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
4335 REG_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
4336 }
b090ae2b
MC
4337 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
4338 0);
b6016b76 4339
df149d70 4340 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT);
b6016b76
MC
4341 REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
4342
4343 udelay(20);
4344
bf5295bb
MC
4345 bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
4346
b090ae2b 4347 return rc;
b6016b76
MC
4348}
4349
59b47d8a
MC
4350static void
4351bnx2_init_tx_context(struct bnx2 *bp, u32 cid)
4352{
4353 u32 val, offset0, offset1, offset2, offset3;
4354
4355 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4356 offset0 = BNX2_L2CTX_TYPE_XI;
4357 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
4358 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
4359 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
4360 } else {
4361 offset0 = BNX2_L2CTX_TYPE;
4362 offset1 = BNX2_L2CTX_CMD_TYPE;
4363 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
4364 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
4365 }
4366 val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
4367 CTX_WR(bp, GET_CID_ADDR(cid), offset0, val);
4368
4369 val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
4370 CTX_WR(bp, GET_CID_ADDR(cid), offset1, val);
4371
4372 val = (u64) bp->tx_desc_mapping >> 32;
4373 CTX_WR(bp, GET_CID_ADDR(cid), offset2, val);
4374
4375 val = (u64) bp->tx_desc_mapping & 0xffffffff;
4376 CTX_WR(bp, GET_CID_ADDR(cid), offset3, val);
4377}
b6016b76
MC
4378
4379static void
4380bnx2_init_tx_ring(struct bnx2 *bp)
4381{
4382 struct tx_bd *txbd;
59b47d8a 4383 u32 cid;
b6016b76 4384
2f8af120
MC
4385 bp->tx_wake_thresh = bp->tx_ring_size / 2;
4386
b6016b76 4387 txbd = &bp->tx_desc_ring[MAX_TX_DESC_CNT];
6aa20a22 4388
b6016b76
MC
4389 txbd->tx_bd_haddr_hi = (u64) bp->tx_desc_mapping >> 32;
4390 txbd->tx_bd_haddr_lo = (u64) bp->tx_desc_mapping & 0xffffffff;
4391
4392 bp->tx_prod = 0;
4393 bp->tx_cons = 0;
f4e418f7 4394 bp->hw_tx_cons = 0;
b6016b76 4395 bp->tx_prod_bseq = 0;
6aa20a22 4396
59b47d8a
MC
4397 cid = TX_CID;
4398 bp->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
4399 bp->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
b6016b76 4400
59b47d8a 4401 bnx2_init_tx_context(bp, cid);
b6016b76
MC
4402}
4403
4404static void
5d5d0015
MC
4405bnx2_init_rxbd_rings(struct rx_bd *rx_ring[], dma_addr_t dma[], u32 buf_size,
4406 int num_rings)
b6016b76 4407{
b6016b76 4408 int i;
5d5d0015 4409 struct rx_bd *rxbd;
6aa20a22 4410
5d5d0015 4411 for (i = 0; i < num_rings; i++) {
13daffa2 4412 int j;
b6016b76 4413
5d5d0015 4414 rxbd = &rx_ring[i][0];
13daffa2 4415 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
5d5d0015 4416 rxbd->rx_bd_len = buf_size;
13daffa2
MC
4417 rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
4418 }
5d5d0015 4419 if (i == (num_rings - 1))
13daffa2
MC
4420 j = 0;
4421 else
4422 j = i + 1;
5d5d0015
MC
4423 rxbd->rx_bd_haddr_hi = (u64) dma[j] >> 32;
4424 rxbd->rx_bd_haddr_lo = (u64) dma[j] & 0xffffffff;
13daffa2 4425 }
5d5d0015
MC
4426}
4427
4428static void
4429bnx2_init_rx_ring(struct bnx2 *bp)
4430{
4431 int i;
4432 u16 prod, ring_prod;
4433 u32 val, rx_cid_addr = GET_CID_ADDR(RX_CID);
4434
4435 bp->rx_prod = 0;
4436 bp->rx_cons = 0;
4437 bp->rx_prod_bseq = 0;
47bf4246
MC
4438 bp->rx_pg_prod = 0;
4439 bp->rx_pg_cons = 0;
5d5d0015
MC
4440
4441 bnx2_init_rxbd_rings(bp->rx_desc_ring, bp->rx_desc_mapping,
4442 bp->rx_buf_use_size, bp->rx_max_ring);
4443
4444 CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, 0);
47bf4246
MC
4445 if (bp->rx_pg_ring_size) {
4446 bnx2_init_rxbd_rings(bp->rx_pg_desc_ring,
4447 bp->rx_pg_desc_mapping,
4448 PAGE_SIZE, bp->rx_max_pg_ring);
4449 val = (bp->rx_buf_use_size << 16) | PAGE_SIZE;
4450 CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, val);
4451 CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_RBDC_KEY,
4452 BNX2_L2CTX_RBDC_JUMBO_KEY);
4453
4454 val = (u64) bp->rx_pg_desc_mapping[0] >> 32;
4455 CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_HI, val);
4456
4457 val = (u64) bp->rx_pg_desc_mapping[0] & 0xffffffff;
4458 CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_LO, val);
4459
4460 if (CHIP_NUM(bp) == CHIP_NUM_5709)
4461 REG_WR(bp, BNX2_MQ_MAP_L2_3, BNX2_MQ_MAP_L2_3_DEFAULT);
4462 }
b6016b76
MC
4463
4464 val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
4465 val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
4466 val |= 0x02 << 8;
5d5d0015 4467 CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_CTX_TYPE, val);
b6016b76 4468
13daffa2 4469 val = (u64) bp->rx_desc_mapping[0] >> 32;
5d5d0015 4470 CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
b6016b76 4471
13daffa2 4472 val = (u64) bp->rx_desc_mapping[0] & 0xffffffff;
5d5d0015 4473 CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
b6016b76 4474
47bf4246
MC
4475 ring_prod = prod = bp->rx_pg_prod;
4476 for (i = 0; i < bp->rx_pg_ring_size; i++) {
4477 if (bnx2_alloc_rx_page(bp, ring_prod) < 0)
4478 break;
4479 prod = NEXT_RX_BD(prod);
4480 ring_prod = RX_PG_RING_IDX(prod);
4481 }
4482 bp->rx_pg_prod = prod;
4483
5d5d0015 4484 ring_prod = prod = bp->rx_prod;
236b6394 4485 for (i = 0; i < bp->rx_ring_size; i++) {
b6016b76
MC
4486 if (bnx2_alloc_rx_skb(bp, ring_prod) < 0) {
4487 break;
4488 }
4489 prod = NEXT_RX_BD(prod);
4490 ring_prod = RX_RING_IDX(prod);
4491 }
4492 bp->rx_prod = prod;
4493
47bf4246 4494 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_PG_BDIDX, bp->rx_pg_prod);
b6016b76
MC
4495 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, prod);
4496
4497 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
4498}
4499
5d5d0015 4500static u32 bnx2_find_max_ring(u32 ring_size, u32 max_size)
13daffa2 4501{
5d5d0015 4502 u32 max, num_rings = 1;
13daffa2 4503
5d5d0015
MC
4504 while (ring_size > MAX_RX_DESC_CNT) {
4505 ring_size -= MAX_RX_DESC_CNT;
13daffa2
MC
4506 num_rings++;
4507 }
4508 /* round to next power of 2 */
5d5d0015 4509 max = max_size;
13daffa2
MC
4510 while ((max & num_rings) == 0)
4511 max >>= 1;
4512
4513 if (num_rings != max)
4514 max <<= 1;
4515
5d5d0015
MC
4516 return max;
4517}
4518
4519static void
4520bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
4521{
84eaa187 4522 u32 rx_size, rx_space, jumbo_size;
5d5d0015
MC
4523
4524 /* 8 for CRC and VLAN */
4525 rx_size = bp->dev->mtu + ETH_HLEN + bp->rx_offset + 8;
4526
84eaa187
MC
4527 rx_space = SKB_DATA_ALIGN(rx_size + BNX2_RX_ALIGN) + NET_SKB_PAD +
4528 sizeof(struct skb_shared_info);
4529
5d5d0015 4530 bp->rx_copy_thresh = RX_COPY_THRESH;
47bf4246
MC
4531 bp->rx_pg_ring_size = 0;
4532 bp->rx_max_pg_ring = 0;
4533 bp->rx_max_pg_ring_idx = 0;
84eaa187
MC
4534 if (rx_space > PAGE_SIZE) {
4535 int pages = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
4536
4537 jumbo_size = size * pages;
4538 if (jumbo_size > MAX_TOTAL_RX_PG_DESC_CNT)
4539 jumbo_size = MAX_TOTAL_RX_PG_DESC_CNT;
4540
4541 bp->rx_pg_ring_size = jumbo_size;
4542 bp->rx_max_pg_ring = bnx2_find_max_ring(jumbo_size,
4543 MAX_RX_PG_RINGS);
4544 bp->rx_max_pg_ring_idx = (bp->rx_max_pg_ring * RX_DESC_CNT) - 1;
4545 rx_size = RX_COPY_THRESH + bp->rx_offset;
4546 bp->rx_copy_thresh = 0;
4547 }
5d5d0015
MC
4548
4549 bp->rx_buf_use_size = rx_size;
4550 /* hw alignment */
4551 bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN;
1db82f2a 4552 bp->rx_jumbo_thresh = rx_size - bp->rx_offset;
5d5d0015
MC
4553 bp->rx_ring_size = size;
4554 bp->rx_max_ring = bnx2_find_max_ring(size, MAX_RX_RINGS);
13daffa2
MC
4555 bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
4556}
4557
b6016b76
MC
4558static void
4559bnx2_free_tx_skbs(struct bnx2 *bp)
4560{
4561 int i;
4562
4563 if (bp->tx_buf_ring == NULL)
4564 return;
4565
4566 for (i = 0; i < TX_DESC_CNT; ) {
4567 struct sw_bd *tx_buf = &bp->tx_buf_ring[i];
4568 struct sk_buff *skb = tx_buf->skb;
4569 int j, last;
4570
4571 if (skb == NULL) {
4572 i++;
4573 continue;
4574 }
4575
4576 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
4577 skb_headlen(skb), PCI_DMA_TODEVICE);
4578
4579 tx_buf->skb = NULL;
4580
4581 last = skb_shinfo(skb)->nr_frags;
4582 for (j = 0; j < last; j++) {
4583 tx_buf = &bp->tx_buf_ring[i + j + 1];
4584 pci_unmap_page(bp->pdev,
4585 pci_unmap_addr(tx_buf, mapping),
4586 skb_shinfo(skb)->frags[j].size,
4587 PCI_DMA_TODEVICE);
4588 }
745720e5 4589 dev_kfree_skb(skb);
b6016b76
MC
4590 i += j + 1;
4591 }
4592
4593}
4594
4595static void
4596bnx2_free_rx_skbs(struct bnx2 *bp)
4597{
4598 int i;
4599
4600 if (bp->rx_buf_ring == NULL)
4601 return;
4602
13daffa2 4603 for (i = 0; i < bp->rx_max_ring_idx; i++) {
b6016b76
MC
4604 struct sw_bd *rx_buf = &bp->rx_buf_ring[i];
4605 struct sk_buff *skb = rx_buf->skb;
4606
05d0f1cf 4607 if (skb == NULL)
b6016b76
MC
4608 continue;
4609
4610 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
4611 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
4612
4613 rx_buf->skb = NULL;
4614
745720e5 4615 dev_kfree_skb(skb);
b6016b76 4616 }
47bf4246
MC
4617 for (i = 0; i < bp->rx_max_pg_ring_idx; i++)
4618 bnx2_free_rx_page(bp, i);
b6016b76
MC
4619}
4620
4621static void
4622bnx2_free_skbs(struct bnx2 *bp)
4623{
4624 bnx2_free_tx_skbs(bp);
4625 bnx2_free_rx_skbs(bp);
4626}
4627
4628static int
4629bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
4630{
4631 int rc;
4632
4633 rc = bnx2_reset_chip(bp, reset_code);
4634 bnx2_free_skbs(bp);
4635 if (rc)
4636 return rc;
4637
fba9fe91
MC
4638 if ((rc = bnx2_init_chip(bp)) != 0)
4639 return rc;
4640
b6016b76
MC
4641 bnx2_init_tx_ring(bp);
4642 bnx2_init_rx_ring(bp);
4643 return 0;
4644}
4645
4646static int
4647bnx2_init_nic(struct bnx2 *bp)
4648{
4649 int rc;
4650
4651 if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
4652 return rc;
4653
80be4434 4654 spin_lock_bh(&bp->phy_lock);
b6016b76
MC
4655 bnx2_init_phy(bp);
4656 bnx2_set_link(bp);
0d8a6571 4657 spin_unlock_bh(&bp->phy_lock);
b6016b76
MC
4658 return 0;
4659}
4660
4661static int
4662bnx2_test_registers(struct bnx2 *bp)
4663{
4664 int ret;
5bae30c9 4665 int i, is_5709;
f71e1309 4666 static const struct {
b6016b76
MC
4667 u16 offset;
4668 u16 flags;
5bae30c9 4669#define BNX2_FL_NOT_5709 1
b6016b76
MC
4670 u32 rw_mask;
4671 u32 ro_mask;
4672 } reg_tbl[] = {
4673 { 0x006c, 0, 0x00000000, 0x0000003f },
4674 { 0x0090, 0, 0xffffffff, 0x00000000 },
4675 { 0x0094, 0, 0x00000000, 0x00000000 },
4676
5bae30c9
MC
4677 { 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
4678 { 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4679 { 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4680 { 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
4681 { 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
4682 { 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
4683 { 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
4684 { 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4685 { 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4686
4687 { 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4688 { 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4689 { 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4690 { 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4691 { 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4692 { 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4693
4694 { 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
4695 { 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
4696 { 0x0c08, BNX2_FL_NOT_5709, 0x0f0ff073, 0x00000000 },
b6016b76
MC
4697
4698 { 0x1000, 0, 0x00000000, 0x00000001 },
4699 { 0x1004, 0, 0x00000000, 0x000f0001 },
b6016b76
MC
4700
4701 { 0x1408, 0, 0x01c00800, 0x00000000 },
4702 { 0x149c, 0, 0x8000ffff, 0x00000000 },
4703 { 0x14a8, 0, 0x00000000, 0x000001ff },
5b0c76ad 4704 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
b6016b76
MC
4705 { 0x14b0, 0, 0x00000002, 0x00000001 },
4706 { 0x14b8, 0, 0x00000000, 0x00000000 },
4707 { 0x14c0, 0, 0x00000000, 0x00000009 },
4708 { 0x14c4, 0, 0x00003fff, 0x00000000 },
4709 { 0x14cc, 0, 0x00000000, 0x00000001 },
4710 { 0x14d0, 0, 0xffffffff, 0x00000000 },
b6016b76
MC
4711
4712 { 0x1800, 0, 0x00000000, 0x00000001 },
4713 { 0x1804, 0, 0x00000000, 0x00000003 },
b6016b76
MC
4714
4715 { 0x2800, 0, 0x00000000, 0x00000001 },
4716 { 0x2804, 0, 0x00000000, 0x00003f01 },
4717 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
4718 { 0x2810, 0, 0xffff0000, 0x00000000 },
4719 { 0x2814, 0, 0xffff0000, 0x00000000 },
4720 { 0x2818, 0, 0xffff0000, 0x00000000 },
4721 { 0x281c, 0, 0xffff0000, 0x00000000 },
4722 { 0x2834, 0, 0xffffffff, 0x00000000 },
4723 { 0x2840, 0, 0x00000000, 0xffffffff },
4724 { 0x2844, 0, 0x00000000, 0xffffffff },
4725 { 0x2848, 0, 0xffffffff, 0x00000000 },
4726 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
4727
4728 { 0x2c00, 0, 0x00000000, 0x00000011 },
4729 { 0x2c04, 0, 0x00000000, 0x00030007 },
4730
b6016b76
MC
4731 { 0x3c00, 0, 0x00000000, 0x00000001 },
4732 { 0x3c04, 0, 0x00000000, 0x00070000 },
4733 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
4734 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
4735 { 0x3c10, 0, 0xffffffff, 0x00000000 },
4736 { 0x3c14, 0, 0x00000000, 0xffffffff },
4737 { 0x3c18, 0, 0x00000000, 0xffffffff },
4738 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
4739 { 0x3c20, 0, 0xffffff00, 0x00000000 },
b6016b76
MC
4740
4741 { 0x5004, 0, 0x00000000, 0x0000007f },
4742 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
b6016b76 4743
b6016b76
MC
4744 { 0x5c00, 0, 0x00000000, 0x00000001 },
4745 { 0x5c04, 0, 0x00000000, 0x0003000f },
4746 { 0x5c08, 0, 0x00000003, 0x00000000 },
4747 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
4748 { 0x5c10, 0, 0x00000000, 0xffffffff },
4749 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
4750 { 0x5c84, 0, 0x00000000, 0x0000f333 },
4751 { 0x5c88, 0, 0x00000000, 0x00077373 },
4752 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
4753
4754 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
4755 { 0x680c, 0, 0xffffffff, 0x00000000 },
4756 { 0x6810, 0, 0xffffffff, 0x00000000 },
4757 { 0x6814, 0, 0xffffffff, 0x00000000 },
4758 { 0x6818, 0, 0xffffffff, 0x00000000 },
4759 { 0x681c, 0, 0xffffffff, 0x00000000 },
4760 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
4761 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
4762 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
4763 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
4764 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
4765 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
4766 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
4767 { 0x683c, 0, 0x0000ffff, 0x00000000 },
4768 { 0x6840, 0, 0x00000ff0, 0x00000000 },
4769 { 0x6844, 0, 0x00ffff00, 0x00000000 },
4770 { 0x684c, 0, 0xffffffff, 0x00000000 },
4771 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
4772 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
4773 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
4774 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
4775 { 0x6908, 0, 0x00000000, 0x0001ff0f },
4776 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
4777
4778 { 0xffff, 0, 0x00000000, 0x00000000 },
4779 };
4780
4781 ret = 0;
5bae30c9
MC
4782 is_5709 = 0;
4783 if (CHIP_NUM(bp) == CHIP_NUM_5709)
4784 is_5709 = 1;
4785
b6016b76
MC
4786 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
4787 u32 offset, rw_mask, ro_mask, save_val, val;
5bae30c9
MC
4788 u16 flags = reg_tbl[i].flags;
4789
4790 if (is_5709 && (flags & BNX2_FL_NOT_5709))
4791 continue;
b6016b76
MC
4792
4793 offset = (u32) reg_tbl[i].offset;
4794 rw_mask = reg_tbl[i].rw_mask;
4795 ro_mask = reg_tbl[i].ro_mask;
4796
14ab9b86 4797 save_val = readl(bp->regview + offset);
b6016b76 4798
14ab9b86 4799 writel(0, bp->regview + offset);
b6016b76 4800
14ab9b86 4801 val = readl(bp->regview + offset);
b6016b76
MC
4802 if ((val & rw_mask) != 0) {
4803 goto reg_test_err;
4804 }
4805
4806 if ((val & ro_mask) != (save_val & ro_mask)) {
4807 goto reg_test_err;
4808 }
4809
14ab9b86 4810 writel(0xffffffff, bp->regview + offset);
b6016b76 4811
14ab9b86 4812 val = readl(bp->regview + offset);
b6016b76
MC
4813 if ((val & rw_mask) != rw_mask) {
4814 goto reg_test_err;
4815 }
4816
4817 if ((val & ro_mask) != (save_val & ro_mask)) {
4818 goto reg_test_err;
4819 }
4820
14ab9b86 4821 writel(save_val, bp->regview + offset);
b6016b76
MC
4822 continue;
4823
4824reg_test_err:
14ab9b86 4825 writel(save_val, bp->regview + offset);
b6016b76
MC
4826 ret = -ENODEV;
4827 break;
4828 }
4829 return ret;
4830}
4831
4832static int
4833bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
4834{
f71e1309 4835 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
b6016b76
MC
4836 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
4837 int i;
4838
4839 for (i = 0; i < sizeof(test_pattern) / 4; i++) {
4840 u32 offset;
4841
4842 for (offset = 0; offset < size; offset += 4) {
4843
4844 REG_WR_IND(bp, start + offset, test_pattern[i]);
4845
4846 if (REG_RD_IND(bp, start + offset) !=
4847 test_pattern[i]) {
4848 return -ENODEV;
4849 }
4850 }
4851 }
4852 return 0;
4853}
4854
4855static int
4856bnx2_test_memory(struct bnx2 *bp)
4857{
4858 int ret = 0;
4859 int i;
5bae30c9 4860 static struct mem_entry {
b6016b76
MC
4861 u32 offset;
4862 u32 len;
5bae30c9 4863 } mem_tbl_5706[] = {
b6016b76 4864 { 0x60000, 0x4000 },
5b0c76ad 4865 { 0xa0000, 0x3000 },
b6016b76
MC
4866 { 0xe0000, 0x4000 },
4867 { 0x120000, 0x4000 },
4868 { 0x1a0000, 0x4000 },
4869 { 0x160000, 0x4000 },
4870 { 0xffffffff, 0 },
5bae30c9
MC
4871 },
4872 mem_tbl_5709[] = {
4873 { 0x60000, 0x4000 },
4874 { 0xa0000, 0x3000 },
4875 { 0xe0000, 0x4000 },
4876 { 0x120000, 0x4000 },
4877 { 0x1a0000, 0x4000 },
4878 { 0xffffffff, 0 },
b6016b76 4879 };
5bae30c9
MC
4880 struct mem_entry *mem_tbl;
4881
4882 if (CHIP_NUM(bp) == CHIP_NUM_5709)
4883 mem_tbl = mem_tbl_5709;
4884 else
4885 mem_tbl = mem_tbl_5706;
b6016b76
MC
4886
4887 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
4888 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
4889 mem_tbl[i].len)) != 0) {
4890 return ret;
4891 }
4892 }
6aa20a22 4893
b6016b76
MC
4894 return ret;
4895}
4896
bc5a0690
MC
4897#define BNX2_MAC_LOOPBACK 0
4898#define BNX2_PHY_LOOPBACK 1
4899
b6016b76 4900static int
bc5a0690 4901bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
b6016b76
MC
4902{
4903 unsigned int pkt_size, num_pkts, i;
4904 struct sk_buff *skb, *rx_skb;
4905 unsigned char *packet;
bc5a0690 4906 u16 rx_start_idx, rx_idx;
b6016b76
MC
4907 dma_addr_t map;
4908 struct tx_bd *txbd;
4909 struct sw_bd *rx_buf;
4910 struct l2_fhdr *rx_hdr;
4911 int ret = -ENODEV;
35efa7c1 4912 struct bnx2_napi *bnapi = &bp->bnx2_napi;
b6016b76 4913
bc5a0690
MC
4914 if (loopback_mode == BNX2_MAC_LOOPBACK) {
4915 bp->loopback = MAC_LOOPBACK;
4916 bnx2_set_mac_loopback(bp);
4917 }
4918 else if (loopback_mode == BNX2_PHY_LOOPBACK) {
489310a4
MC
4919 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
4920 return 0;
4921
80be4434 4922 bp->loopback = PHY_LOOPBACK;
bc5a0690
MC
4923 bnx2_set_phy_loopback(bp);
4924 }
4925 else
4926 return -EINVAL;
b6016b76 4927
84eaa187 4928 pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_jumbo_thresh - 4);
932f3772 4929 skb = netdev_alloc_skb(bp->dev, pkt_size);
b6cbc3b6
JL
4930 if (!skb)
4931 return -ENOMEM;
b6016b76 4932 packet = skb_put(skb, pkt_size);
6634292b 4933 memcpy(packet, bp->dev->dev_addr, 6);
b6016b76
MC
4934 memset(packet + 6, 0x0, 8);
4935 for (i = 14; i < pkt_size; i++)
4936 packet[i] = (unsigned char) (i & 0xff);
4937
4938 map = pci_map_single(bp->pdev, skb->data, pkt_size,
4939 PCI_DMA_TODEVICE);
4940
bf5295bb
MC
4941 REG_WR(bp, BNX2_HC_COMMAND,
4942 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4943
b6016b76
MC
4944 REG_RD(bp, BNX2_HC_COMMAND);
4945
4946 udelay(5);
35efa7c1 4947 rx_start_idx = bnx2_get_hw_rx_cons(bnapi);
b6016b76 4948
b6016b76
MC
4949 num_pkts = 0;
4950
bc5a0690 4951 txbd = &bp->tx_desc_ring[TX_RING_IDX(bp->tx_prod)];
b6016b76
MC
4952
4953 txbd->tx_bd_haddr_hi = (u64) map >> 32;
4954 txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
4955 txbd->tx_bd_mss_nbytes = pkt_size;
4956 txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
4957
4958 num_pkts++;
bc5a0690
MC
4959 bp->tx_prod = NEXT_TX_BD(bp->tx_prod);
4960 bp->tx_prod_bseq += pkt_size;
b6016b76 4961
234754d5
MC
4962 REG_WR16(bp, bp->tx_bidx_addr, bp->tx_prod);
4963 REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
b6016b76
MC
4964
4965 udelay(100);
4966
bf5295bb
MC
4967 REG_WR(bp, BNX2_HC_COMMAND,
4968 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4969
b6016b76
MC
4970 REG_RD(bp, BNX2_HC_COMMAND);
4971
4972 udelay(5);
4973
4974 pci_unmap_single(bp->pdev, map, pkt_size, PCI_DMA_TODEVICE);
745720e5 4975 dev_kfree_skb(skb);
b6016b76 4976
35efa7c1 4977 if (bnx2_get_hw_tx_cons(bnapi) != bp->tx_prod)
b6016b76 4978 goto loopback_test_done;
b6016b76 4979
35efa7c1 4980 rx_idx = bnx2_get_hw_rx_cons(bnapi);
b6016b76
MC
4981 if (rx_idx != rx_start_idx + num_pkts) {
4982 goto loopback_test_done;
4983 }
4984
4985 rx_buf = &bp->rx_buf_ring[rx_start_idx];
4986 rx_skb = rx_buf->skb;
4987
4988 rx_hdr = (struct l2_fhdr *) rx_skb->data;
4989 skb_reserve(rx_skb, bp->rx_offset);
4990
4991 pci_dma_sync_single_for_cpu(bp->pdev,
4992 pci_unmap_addr(rx_buf, mapping),
4993 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
4994
ade2bfe7 4995 if (rx_hdr->l2_fhdr_status &
b6016b76
MC
4996 (L2_FHDR_ERRORS_BAD_CRC |
4997 L2_FHDR_ERRORS_PHY_DECODE |
4998 L2_FHDR_ERRORS_ALIGNMENT |
4999 L2_FHDR_ERRORS_TOO_SHORT |
5000 L2_FHDR_ERRORS_GIANT_FRAME)) {
5001
5002 goto loopback_test_done;
5003 }
5004
5005 if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
5006 goto loopback_test_done;
5007 }
5008
5009 for (i = 14; i < pkt_size; i++) {
5010 if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
5011 goto loopback_test_done;
5012 }
5013 }
5014
5015 ret = 0;
5016
5017loopback_test_done:
5018 bp->loopback = 0;
5019 return ret;
5020}
5021
bc5a0690
MC
5022#define BNX2_MAC_LOOPBACK_FAILED 1
5023#define BNX2_PHY_LOOPBACK_FAILED 2
5024#define BNX2_LOOPBACK_FAILED (BNX2_MAC_LOOPBACK_FAILED | \
5025 BNX2_PHY_LOOPBACK_FAILED)
5026
5027static int
5028bnx2_test_loopback(struct bnx2 *bp)
5029{
5030 int rc = 0;
5031
5032 if (!netif_running(bp->dev))
5033 return BNX2_LOOPBACK_FAILED;
5034
5035 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
5036 spin_lock_bh(&bp->phy_lock);
5037 bnx2_init_phy(bp);
5038 spin_unlock_bh(&bp->phy_lock);
5039 if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
5040 rc |= BNX2_MAC_LOOPBACK_FAILED;
5041 if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
5042 rc |= BNX2_PHY_LOOPBACK_FAILED;
5043 return rc;
5044}
5045
b6016b76
MC
5046#define NVRAM_SIZE 0x200
5047#define CRC32_RESIDUAL 0xdebb20e3
5048
5049static int
5050bnx2_test_nvram(struct bnx2 *bp)
5051{
5052 u32 buf[NVRAM_SIZE / 4];
5053 u8 *data = (u8 *) buf;
5054 int rc = 0;
5055 u32 magic, csum;
5056
5057 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
5058 goto test_nvram_done;
5059
5060 magic = be32_to_cpu(buf[0]);
5061 if (magic != 0x669955aa) {
5062 rc = -ENODEV;
5063 goto test_nvram_done;
5064 }
5065
5066 if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
5067 goto test_nvram_done;
5068
5069 csum = ether_crc_le(0x100, data);
5070 if (csum != CRC32_RESIDUAL) {
5071 rc = -ENODEV;
5072 goto test_nvram_done;
5073 }
5074
5075 csum = ether_crc_le(0x100, data + 0x100);
5076 if (csum != CRC32_RESIDUAL) {
5077 rc = -ENODEV;
5078 }
5079
5080test_nvram_done:
5081 return rc;
5082}
5083
5084static int
5085bnx2_test_link(struct bnx2 *bp)
5086{
5087 u32 bmsr;
5088
489310a4
MC
5089 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG) {
5090 if (bp->link_up)
5091 return 0;
5092 return -ENODEV;
5093 }
c770a65c 5094 spin_lock_bh(&bp->phy_lock);
27a005b8
MC
5095 bnx2_enable_bmsr1(bp);
5096 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5097 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5098 bnx2_disable_bmsr1(bp);
c770a65c 5099 spin_unlock_bh(&bp->phy_lock);
6aa20a22 5100
b6016b76
MC
5101 if (bmsr & BMSR_LSTATUS) {
5102 return 0;
5103 }
5104 return -ENODEV;
5105}
5106
5107static int
5108bnx2_test_intr(struct bnx2 *bp)
5109{
5110 int i;
b6016b76
MC
5111 u16 status_idx;
5112
5113 if (!netif_running(bp->dev))
5114 return -ENODEV;
5115
5116 status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
5117
5118 /* This register is not touched during run-time. */
bf5295bb 5119 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
b6016b76
MC
5120 REG_RD(bp, BNX2_HC_COMMAND);
5121
5122 for (i = 0; i < 10; i++) {
5123 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
5124 status_idx) {
5125
5126 break;
5127 }
5128
5129 msleep_interruptible(10);
5130 }
5131 if (i < 10)
5132 return 0;
5133
5134 return -ENODEV;
5135}
5136
5137static void
48b01e2d 5138bnx2_5706_serdes_timer(struct bnx2 *bp)
b6016b76 5139{
48b01e2d
MC
5140 spin_lock(&bp->phy_lock);
5141 if (bp->serdes_an_pending)
5142 bp->serdes_an_pending--;
5143 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
5144 u32 bmcr;
b6016b76 5145
48b01e2d 5146 bp->current_interval = bp->timer_interval;
cd339a0e 5147
ca58c3af 5148 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
b6016b76 5149
48b01e2d
MC
5150 if (bmcr & BMCR_ANENABLE) {
5151 u32 phy1, phy2;
b6016b76 5152
48b01e2d
MC
5153 bnx2_write_phy(bp, 0x1c, 0x7c00);
5154 bnx2_read_phy(bp, 0x1c, &phy1);
cea94db9 5155
48b01e2d
MC
5156 bnx2_write_phy(bp, 0x17, 0x0f01);
5157 bnx2_read_phy(bp, 0x15, &phy2);
5158 bnx2_write_phy(bp, 0x17, 0x0f01);
5159 bnx2_read_phy(bp, 0x15, &phy2);
b6016b76 5160
48b01e2d
MC
5161 if ((phy1 & 0x10) && /* SIGNAL DETECT */
5162 !(phy2 & 0x20)) { /* no CONFIG */
5163
5164 bmcr &= ~BMCR_ANENABLE;
5165 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
ca58c3af 5166 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
48b01e2d
MC
5167 bp->phy_flags |= PHY_PARALLEL_DETECT_FLAG;
5168 }
b6016b76 5169 }
48b01e2d
MC
5170 }
5171 else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
5172 (bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)) {
5173 u32 phy2;
b6016b76 5174
48b01e2d
MC
5175 bnx2_write_phy(bp, 0x17, 0x0f01);
5176 bnx2_read_phy(bp, 0x15, &phy2);
5177 if (phy2 & 0x20) {
5178 u32 bmcr;
cd339a0e 5179
ca58c3af 5180 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
48b01e2d 5181 bmcr |= BMCR_ANENABLE;
ca58c3af 5182 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
b6016b76 5183
48b01e2d
MC
5184 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
5185 }
5186 } else
5187 bp->current_interval = bp->timer_interval;
b6016b76 5188
48b01e2d
MC
5189 spin_unlock(&bp->phy_lock);
5190}
b6016b76 5191
f8dd064e
MC
5192static void
5193bnx2_5708_serdes_timer(struct bnx2 *bp)
5194{
0d8a6571
MC
5195 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
5196 return;
5197
f8dd064e
MC
5198 if ((bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) == 0) {
5199 bp->serdes_an_pending = 0;
5200 return;
5201 }
b6016b76 5202
f8dd064e
MC
5203 spin_lock(&bp->phy_lock);
5204 if (bp->serdes_an_pending)
5205 bp->serdes_an_pending--;
5206 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
5207 u32 bmcr;
b6016b76 5208
ca58c3af 5209 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
f8dd064e 5210 if (bmcr & BMCR_ANENABLE) {
605a9e20 5211 bnx2_enable_forced_2g5(bp);
f8dd064e
MC
5212 bp->current_interval = SERDES_FORCED_TIMEOUT;
5213 } else {
605a9e20 5214 bnx2_disable_forced_2g5(bp);
f8dd064e
MC
5215 bp->serdes_an_pending = 2;
5216 bp->current_interval = bp->timer_interval;
b6016b76 5217 }
b6016b76 5218
f8dd064e
MC
5219 } else
5220 bp->current_interval = bp->timer_interval;
b6016b76 5221
f8dd064e
MC
5222 spin_unlock(&bp->phy_lock);
5223}
5224
48b01e2d
MC
5225static void
5226bnx2_timer(unsigned long data)
5227{
5228 struct bnx2 *bp = (struct bnx2 *) data;
b6016b76 5229
48b01e2d
MC
5230 if (!netif_running(bp->dev))
5231 return;
b6016b76 5232
48b01e2d
MC
5233 if (atomic_read(&bp->intr_sem) != 0)
5234 goto bnx2_restart_timer;
b6016b76 5235
df149d70 5236 bnx2_send_heart_beat(bp);
b6016b76 5237
48b01e2d 5238 bp->stats_blk->stat_FwRxDrop = REG_RD_IND(bp, BNX2_FW_RX_DROP_COUNT);
b6016b76 5239
02537b06
MC
5240 /* workaround occasional corrupted counters */
5241 if (CHIP_NUM(bp) == CHIP_NUM_5708 && bp->stats_ticks)
5242 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd |
5243 BNX2_HC_COMMAND_STATS_NOW);
5244
f8dd064e
MC
5245 if (bp->phy_flags & PHY_SERDES_FLAG) {
5246 if (CHIP_NUM(bp) == CHIP_NUM_5706)
5247 bnx2_5706_serdes_timer(bp);
27a005b8 5248 else
f8dd064e 5249 bnx2_5708_serdes_timer(bp);
b6016b76
MC
5250 }
5251
5252bnx2_restart_timer:
cd339a0e 5253 mod_timer(&bp->timer, jiffies + bp->current_interval);
b6016b76
MC
5254}
5255
8e6a72c4
MC
5256static int
5257bnx2_request_irq(struct bnx2 *bp)
5258{
5259 struct net_device *dev = bp->dev;
6d866ffc
MC
5260 unsigned long flags;
5261 struct bnx2_irq *irq = &bp->irq_tbl[0];
5262 int rc;
8e6a72c4 5263
6d866ffc
MC
5264 if (bp->flags & USING_MSI_FLAG)
5265 flags = 0;
5266 else
5267 flags = IRQF_SHARED;
5268 rc = request_irq(irq->vector, irq->handler, flags, dev->name, dev);
8e6a72c4
MC
5269 return rc;
5270}
5271
5272static void
5273bnx2_free_irq(struct bnx2 *bp)
5274{
5275 struct net_device *dev = bp->dev;
5276
6d866ffc 5277 free_irq(bp->irq_tbl[0].vector, dev);
8e6a72c4 5278 if (bp->flags & USING_MSI_FLAG) {
8e6a72c4
MC
5279 pci_disable_msi(bp->pdev);
5280 bp->flags &= ~(USING_MSI_FLAG | ONE_SHOT_MSI_FLAG);
6d866ffc
MC
5281 }
5282}
5283
5284static void
5285bnx2_setup_int_mode(struct bnx2 *bp, int dis_msi)
5286{
5287 bp->irq_tbl[0].handler = bnx2_interrupt;
5288 strcpy(bp->irq_tbl[0].name, bp->dev->name);
5289
5290 if ((bp->flags & MSI_CAP_FLAG) && !dis_msi) {
5291 if (pci_enable_msi(bp->pdev) == 0) {
5292 bp->flags |= USING_MSI_FLAG;
5293 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
5294 bp->flags |= ONE_SHOT_MSI_FLAG;
5295 bp->irq_tbl[0].handler = bnx2_msi_1shot;
5296 } else
5297 bp->irq_tbl[0].handler = bnx2_msi;
5298 }
5299 }
5300
5301 bp->irq_tbl[0].vector = bp->pdev->irq;
8e6a72c4
MC
5302}
5303
b6016b76
MC
5304/* Called with rtnl_lock */
5305static int
5306bnx2_open(struct net_device *dev)
5307{
972ec0d4 5308 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5309 int rc;
5310
1b2f922f
MC
5311 netif_carrier_off(dev);
5312
829ca9a3 5313 bnx2_set_power_state(bp, PCI_D0);
b6016b76
MC
5314 bnx2_disable_int(bp);
5315
5316 rc = bnx2_alloc_mem(bp);
5317 if (rc)
5318 return rc;
5319
6d866ffc 5320 bnx2_setup_int_mode(bp, disable_msi);
35efa7c1 5321 bnx2_napi_enable(bp);
8e6a72c4
MC
5322 rc = bnx2_request_irq(bp);
5323
b6016b76 5324 if (rc) {
35efa7c1 5325 bnx2_napi_disable(bp);
b6016b76
MC
5326 bnx2_free_mem(bp);
5327 return rc;
5328 }
5329
5330 rc = bnx2_init_nic(bp);
5331
5332 if (rc) {
35efa7c1 5333 bnx2_napi_disable(bp);
8e6a72c4 5334 bnx2_free_irq(bp);
b6016b76
MC
5335 bnx2_free_skbs(bp);
5336 bnx2_free_mem(bp);
5337 return rc;
5338 }
6aa20a22 5339
cd339a0e 5340 mod_timer(&bp->timer, jiffies + bp->current_interval);
b6016b76
MC
5341
5342 atomic_set(&bp->intr_sem, 0);
5343
5344 bnx2_enable_int(bp);
5345
5346 if (bp->flags & USING_MSI_FLAG) {
5347 /* Test MSI to make sure it is working
5348 * If MSI test fails, go back to INTx mode
5349 */
5350 if (bnx2_test_intr(bp) != 0) {
5351 printk(KERN_WARNING PFX "%s: No interrupt was generated"
5352 " using MSI, switching to INTx mode. Please"
5353 " report this failure to the PCI maintainer"
5354 " and include system chipset information.\n",
5355 bp->dev->name);
5356
5357 bnx2_disable_int(bp);
8e6a72c4 5358 bnx2_free_irq(bp);
b6016b76 5359
6d866ffc
MC
5360 bnx2_setup_int_mode(bp, 1);
5361
b6016b76
MC
5362 rc = bnx2_init_nic(bp);
5363
8e6a72c4
MC
5364 if (!rc)
5365 rc = bnx2_request_irq(bp);
5366
b6016b76 5367 if (rc) {
35efa7c1 5368 bnx2_napi_disable(bp);
b6016b76
MC
5369 bnx2_free_skbs(bp);
5370 bnx2_free_mem(bp);
5371 del_timer_sync(&bp->timer);
5372 return rc;
5373 }
5374 bnx2_enable_int(bp);
5375 }
5376 }
5377 if (bp->flags & USING_MSI_FLAG) {
5378 printk(KERN_INFO PFX "%s: using MSI\n", dev->name);
5379 }
5380
5381 netif_start_queue(dev);
5382
5383 return 0;
5384}
5385
5386static void
c4028958 5387bnx2_reset_task(struct work_struct *work)
b6016b76 5388{
c4028958 5389 struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
b6016b76 5390
afdc08b9
MC
5391 if (!netif_running(bp->dev))
5392 return;
5393
5394 bp->in_reset_task = 1;
b6016b76
MC
5395 bnx2_netif_stop(bp);
5396
5397 bnx2_init_nic(bp);
5398
5399 atomic_set(&bp->intr_sem, 1);
5400 bnx2_netif_start(bp);
afdc08b9 5401 bp->in_reset_task = 0;
b6016b76
MC
5402}
5403
5404static void
5405bnx2_tx_timeout(struct net_device *dev)
5406{
972ec0d4 5407 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5408
5409 /* This allows the netif to be shutdown gracefully before resetting */
5410 schedule_work(&bp->reset_task);
5411}
5412
5413#ifdef BCM_VLAN
5414/* Called with rtnl_lock */
5415static void
5416bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
5417{
972ec0d4 5418 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5419
5420 bnx2_netif_stop(bp);
5421
5422 bp->vlgrp = vlgrp;
5423 bnx2_set_rx_mode(dev);
5424
5425 bnx2_netif_start(bp);
5426}
b6016b76
MC
5427#endif
5428
932ff279 5429/* Called with netif_tx_lock.
2f8af120
MC
5430 * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
5431 * netif_wake_queue().
b6016b76
MC
5432 */
5433static int
5434bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
5435{
972ec0d4 5436 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5437 dma_addr_t mapping;
5438 struct tx_bd *txbd;
5439 struct sw_bd *tx_buf;
5440 u32 len, vlan_tag_flags, last_frag, mss;
5441 u16 prod, ring_prod;
5442 int i;
5443
e89bbf10 5444 if (unlikely(bnx2_tx_avail(bp) < (skb_shinfo(skb)->nr_frags + 1))) {
b6016b76
MC
5445 netif_stop_queue(dev);
5446 printk(KERN_ERR PFX "%s: BUG! Tx ring full when queue awake!\n",
5447 dev->name);
5448
5449 return NETDEV_TX_BUSY;
5450 }
5451 len = skb_headlen(skb);
5452 prod = bp->tx_prod;
5453 ring_prod = TX_RING_IDX(prod);
5454
5455 vlan_tag_flags = 0;
84fa7933 5456 if (skb->ip_summed == CHECKSUM_PARTIAL) {
b6016b76
MC
5457 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
5458 }
5459
5460 if (bp->vlgrp != 0 && vlan_tx_tag_present(skb)) {
5461 vlan_tag_flags |=
5462 (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
5463 }
fde82055 5464 if ((mss = skb_shinfo(skb)->gso_size)) {
b6016b76 5465 u32 tcp_opt_len, ip_tcp_len;
eddc9ec5 5466 struct iphdr *iph;
b6016b76 5467
b6016b76
MC
5468 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
5469
4666f87a
MC
5470 tcp_opt_len = tcp_optlen(skb);
5471
5472 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
5473 u32 tcp_off = skb_transport_offset(skb) -
5474 sizeof(struct ipv6hdr) - ETH_HLEN;
ab6a5bb6 5475
4666f87a
MC
5476 vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) |
5477 TX_BD_FLAGS_SW_FLAGS;
5478 if (likely(tcp_off == 0))
5479 vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK;
5480 else {
5481 tcp_off >>= 3;
5482 vlan_tag_flags |= ((tcp_off & 0x3) <<
5483 TX_BD_FLAGS_TCP6_OFF0_SHL) |
5484 ((tcp_off & 0x10) <<
5485 TX_BD_FLAGS_TCP6_OFF4_SHL);
5486 mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL;
5487 }
5488 } else {
5489 if (skb_header_cloned(skb) &&
5490 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
5491 dev_kfree_skb(skb);
5492 return NETDEV_TX_OK;
5493 }
b6016b76 5494
4666f87a
MC
5495 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
5496
5497 iph = ip_hdr(skb);
5498 iph->check = 0;
5499 iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
5500 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
5501 iph->daddr, 0,
5502 IPPROTO_TCP,
5503 0);
5504 if (tcp_opt_len || (iph->ihl > 5)) {
5505 vlan_tag_flags |= ((iph->ihl - 5) +
5506 (tcp_opt_len >> 2)) << 8;
5507 }
b6016b76 5508 }
4666f87a 5509 } else
b6016b76 5510 mss = 0;
b6016b76
MC
5511
5512 mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
6aa20a22 5513
b6016b76
MC
5514 tx_buf = &bp->tx_buf_ring[ring_prod];
5515 tx_buf->skb = skb;
5516 pci_unmap_addr_set(tx_buf, mapping, mapping);
5517
5518 txbd = &bp->tx_desc_ring[ring_prod];
5519
5520 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
5521 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
5522 txbd->tx_bd_mss_nbytes = len | (mss << 16);
5523 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
5524
5525 last_frag = skb_shinfo(skb)->nr_frags;
5526
5527 for (i = 0; i < last_frag; i++) {
5528 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5529
5530 prod = NEXT_TX_BD(prod);
5531 ring_prod = TX_RING_IDX(prod);
5532 txbd = &bp->tx_desc_ring[ring_prod];
5533
5534 len = frag->size;
5535 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
5536 len, PCI_DMA_TODEVICE);
5537 pci_unmap_addr_set(&bp->tx_buf_ring[ring_prod],
5538 mapping, mapping);
5539
5540 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
5541 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
5542 txbd->tx_bd_mss_nbytes = len | (mss << 16);
5543 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
5544
5545 }
5546 txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
5547
5548 prod = NEXT_TX_BD(prod);
5549 bp->tx_prod_bseq += skb->len;
5550
234754d5
MC
5551 REG_WR16(bp, bp->tx_bidx_addr, prod);
5552 REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
b6016b76
MC
5553
5554 mmiowb();
5555
5556 bp->tx_prod = prod;
5557 dev->trans_start = jiffies;
5558
e89bbf10 5559 if (unlikely(bnx2_tx_avail(bp) <= MAX_SKB_FRAGS)) {
e89bbf10 5560 netif_stop_queue(dev);
2f8af120 5561 if (bnx2_tx_avail(bp) > bp->tx_wake_thresh)
e89bbf10 5562 netif_wake_queue(dev);
b6016b76
MC
5563 }
5564
5565 return NETDEV_TX_OK;
5566}
5567
5568/* Called with rtnl_lock */
5569static int
5570bnx2_close(struct net_device *dev)
5571{
972ec0d4 5572 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5573 u32 reset_code;
5574
afdc08b9
MC
5575 /* Calling flush_scheduled_work() may deadlock because
5576 * linkwatch_event() may be on the workqueue and it will try to get
5577 * the rtnl_lock which we are holding.
5578 */
5579 while (bp->in_reset_task)
5580 msleep(1);
5581
bea3348e 5582 bnx2_disable_int_sync(bp);
35efa7c1 5583 bnx2_napi_disable(bp);
b6016b76 5584 del_timer_sync(&bp->timer);
dda1e390 5585 if (bp->flags & NO_WOL_FLAG)
6c4f095e 5586 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
dda1e390 5587 else if (bp->wol)
b6016b76
MC
5588 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
5589 else
5590 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
5591 bnx2_reset_chip(bp, reset_code);
8e6a72c4 5592 bnx2_free_irq(bp);
b6016b76
MC
5593 bnx2_free_skbs(bp);
5594 bnx2_free_mem(bp);
5595 bp->link_up = 0;
5596 netif_carrier_off(bp->dev);
829ca9a3 5597 bnx2_set_power_state(bp, PCI_D3hot);
b6016b76
MC
5598 return 0;
5599}
5600
5601#define GET_NET_STATS64(ctr) \
5602 (unsigned long) ((unsigned long) (ctr##_hi) << 32) + \
5603 (unsigned long) (ctr##_lo)
5604
5605#define GET_NET_STATS32(ctr) \
5606 (ctr##_lo)
5607
5608#if (BITS_PER_LONG == 64)
5609#define GET_NET_STATS GET_NET_STATS64
5610#else
5611#define GET_NET_STATS GET_NET_STATS32
5612#endif
5613
5614static struct net_device_stats *
5615bnx2_get_stats(struct net_device *dev)
5616{
972ec0d4 5617 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5618 struct statistics_block *stats_blk = bp->stats_blk;
5619 struct net_device_stats *net_stats = &bp->net_stats;
5620
5621 if (bp->stats_blk == NULL) {
5622 return net_stats;
5623 }
5624 net_stats->rx_packets =
5625 GET_NET_STATS(stats_blk->stat_IfHCInUcastPkts) +
5626 GET_NET_STATS(stats_blk->stat_IfHCInMulticastPkts) +
5627 GET_NET_STATS(stats_blk->stat_IfHCInBroadcastPkts);
5628
5629 net_stats->tx_packets =
5630 GET_NET_STATS(stats_blk->stat_IfHCOutUcastPkts) +
5631 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts) +
5632 GET_NET_STATS(stats_blk->stat_IfHCOutBroadcastPkts);
5633
5634 net_stats->rx_bytes =
5635 GET_NET_STATS(stats_blk->stat_IfHCInOctets);
5636
5637 net_stats->tx_bytes =
5638 GET_NET_STATS(stats_blk->stat_IfHCOutOctets);
5639
6aa20a22 5640 net_stats->multicast =
b6016b76
MC
5641 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts);
5642
6aa20a22 5643 net_stats->collisions =
b6016b76
MC
5644 (unsigned long) stats_blk->stat_EtherStatsCollisions;
5645
6aa20a22 5646 net_stats->rx_length_errors =
b6016b76
MC
5647 (unsigned long) (stats_blk->stat_EtherStatsUndersizePkts +
5648 stats_blk->stat_EtherStatsOverrsizePkts);
5649
6aa20a22 5650 net_stats->rx_over_errors =
b6016b76
MC
5651 (unsigned long) stats_blk->stat_IfInMBUFDiscards;
5652
6aa20a22 5653 net_stats->rx_frame_errors =
b6016b76
MC
5654 (unsigned long) stats_blk->stat_Dot3StatsAlignmentErrors;
5655
6aa20a22 5656 net_stats->rx_crc_errors =
b6016b76
MC
5657 (unsigned long) stats_blk->stat_Dot3StatsFCSErrors;
5658
5659 net_stats->rx_errors = net_stats->rx_length_errors +
5660 net_stats->rx_over_errors + net_stats->rx_frame_errors +
5661 net_stats->rx_crc_errors;
5662
5663 net_stats->tx_aborted_errors =
5664 (unsigned long) (stats_blk->stat_Dot3StatsExcessiveCollisions +
5665 stats_blk->stat_Dot3StatsLateCollisions);
5666
5b0c76ad
MC
5667 if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
5668 (CHIP_ID(bp) == CHIP_ID_5708_A0))
b6016b76
MC
5669 net_stats->tx_carrier_errors = 0;
5670 else {
5671 net_stats->tx_carrier_errors =
5672 (unsigned long)
5673 stats_blk->stat_Dot3StatsCarrierSenseErrors;
5674 }
5675
5676 net_stats->tx_errors =
6aa20a22 5677 (unsigned long)
b6016b76
MC
5678 stats_blk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors
5679 +
5680 net_stats->tx_aborted_errors +
5681 net_stats->tx_carrier_errors;
5682
cea94db9
MC
5683 net_stats->rx_missed_errors =
5684 (unsigned long) (stats_blk->stat_IfInMBUFDiscards +
5685 stats_blk->stat_FwRxDrop);
5686
b6016b76
MC
5687 return net_stats;
5688}
5689
5690/* All ethtool functions called with rtnl_lock */
5691
5692static int
5693bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
5694{
972ec0d4 5695 struct bnx2 *bp = netdev_priv(dev);
7b6b8347 5696 int support_serdes = 0, support_copper = 0;
b6016b76
MC
5697
5698 cmd->supported = SUPPORTED_Autoneg;
7b6b8347
MC
5699 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG) {
5700 support_serdes = 1;
5701 support_copper = 1;
5702 } else if (bp->phy_port == PORT_FIBRE)
5703 support_serdes = 1;
5704 else
5705 support_copper = 1;
5706
5707 if (support_serdes) {
b6016b76
MC
5708 cmd->supported |= SUPPORTED_1000baseT_Full |
5709 SUPPORTED_FIBRE;
605a9e20
MC
5710 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG)
5711 cmd->supported |= SUPPORTED_2500baseX_Full;
b6016b76 5712
b6016b76 5713 }
7b6b8347 5714 if (support_copper) {
b6016b76
MC
5715 cmd->supported |= SUPPORTED_10baseT_Half |
5716 SUPPORTED_10baseT_Full |
5717 SUPPORTED_100baseT_Half |
5718 SUPPORTED_100baseT_Full |
5719 SUPPORTED_1000baseT_Full |
5720 SUPPORTED_TP;
5721
b6016b76
MC
5722 }
5723
7b6b8347
MC
5724 spin_lock_bh(&bp->phy_lock);
5725 cmd->port = bp->phy_port;
b6016b76
MC
5726 cmd->advertising = bp->advertising;
5727
5728 if (bp->autoneg & AUTONEG_SPEED) {
5729 cmd->autoneg = AUTONEG_ENABLE;
5730 }
5731 else {
5732 cmd->autoneg = AUTONEG_DISABLE;
5733 }
5734
5735 if (netif_carrier_ok(dev)) {
5736 cmd->speed = bp->line_speed;
5737 cmd->duplex = bp->duplex;
5738 }
5739 else {
5740 cmd->speed = -1;
5741 cmd->duplex = -1;
5742 }
7b6b8347 5743 spin_unlock_bh(&bp->phy_lock);
b6016b76
MC
5744
5745 cmd->transceiver = XCVR_INTERNAL;
5746 cmd->phy_address = bp->phy_addr;
5747
5748 return 0;
5749}
6aa20a22 5750
b6016b76
MC
5751static int
5752bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
5753{
972ec0d4 5754 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5755 u8 autoneg = bp->autoneg;
5756 u8 req_duplex = bp->req_duplex;
5757 u16 req_line_speed = bp->req_line_speed;
5758 u32 advertising = bp->advertising;
7b6b8347
MC
5759 int err = -EINVAL;
5760
5761 spin_lock_bh(&bp->phy_lock);
5762
5763 if (cmd->port != PORT_TP && cmd->port != PORT_FIBRE)
5764 goto err_out_unlock;
5765
5766 if (cmd->port != bp->phy_port && !(bp->phy_flags & REMOTE_PHY_CAP_FLAG))
5767 goto err_out_unlock;
b6016b76
MC
5768
5769 if (cmd->autoneg == AUTONEG_ENABLE) {
5770 autoneg |= AUTONEG_SPEED;
5771
6aa20a22 5772 cmd->advertising &= ETHTOOL_ALL_COPPER_SPEED;
b6016b76
MC
5773
5774 /* allow advertising 1 speed */
5775 if ((cmd->advertising == ADVERTISED_10baseT_Half) ||
5776 (cmd->advertising == ADVERTISED_10baseT_Full) ||
5777 (cmd->advertising == ADVERTISED_100baseT_Half) ||
5778 (cmd->advertising == ADVERTISED_100baseT_Full)) {
5779
7b6b8347
MC
5780 if (cmd->port == PORT_FIBRE)
5781 goto err_out_unlock;
b6016b76
MC
5782
5783 advertising = cmd->advertising;
5784
27a005b8 5785 } else if (cmd->advertising == ADVERTISED_2500baseX_Full) {
7b6b8347
MC
5786 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) ||
5787 (cmd->port == PORT_TP))
5788 goto err_out_unlock;
5789 } else if (cmd->advertising == ADVERTISED_1000baseT_Full)
b6016b76 5790 advertising = cmd->advertising;
7b6b8347
MC
5791 else if (cmd->advertising == ADVERTISED_1000baseT_Half)
5792 goto err_out_unlock;
b6016b76 5793 else {
7b6b8347 5794 if (cmd->port == PORT_FIBRE)
b6016b76 5795 advertising = ETHTOOL_ALL_FIBRE_SPEED;
7b6b8347 5796 else
b6016b76 5797 advertising = ETHTOOL_ALL_COPPER_SPEED;
b6016b76
MC
5798 }
5799 advertising |= ADVERTISED_Autoneg;
5800 }
5801 else {
7b6b8347 5802 if (cmd->port == PORT_FIBRE) {
80be4434
MC
5803 if ((cmd->speed != SPEED_1000 &&
5804 cmd->speed != SPEED_2500) ||
5805 (cmd->duplex != DUPLEX_FULL))
7b6b8347 5806 goto err_out_unlock;
80be4434
MC
5807
5808 if (cmd->speed == SPEED_2500 &&
5809 !(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
7b6b8347 5810 goto err_out_unlock;
b6016b76 5811 }
7b6b8347
MC
5812 else if (cmd->speed == SPEED_1000 || cmd->speed == SPEED_2500)
5813 goto err_out_unlock;
5814
b6016b76
MC
5815 autoneg &= ~AUTONEG_SPEED;
5816 req_line_speed = cmd->speed;
5817 req_duplex = cmd->duplex;
5818 advertising = 0;
5819 }
5820
5821 bp->autoneg = autoneg;
5822 bp->advertising = advertising;
5823 bp->req_line_speed = req_line_speed;
5824 bp->req_duplex = req_duplex;
5825
7b6b8347 5826 err = bnx2_setup_phy(bp, cmd->port);
b6016b76 5827
7b6b8347 5828err_out_unlock:
c770a65c 5829 spin_unlock_bh(&bp->phy_lock);
b6016b76 5830
7b6b8347 5831 return err;
b6016b76
MC
5832}
5833
5834static void
5835bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
5836{
972ec0d4 5837 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5838
5839 strcpy(info->driver, DRV_MODULE_NAME);
5840 strcpy(info->version, DRV_MODULE_VERSION);
5841 strcpy(info->bus_info, pci_name(bp->pdev));
58fc2ea4 5842 strcpy(info->fw_version, bp->fw_version);
b6016b76
MC
5843}
5844
244ac4f4
MC
5845#define BNX2_REGDUMP_LEN (32 * 1024)
5846
5847static int
5848bnx2_get_regs_len(struct net_device *dev)
5849{
5850 return BNX2_REGDUMP_LEN;
5851}
5852
5853static void
5854bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
5855{
5856 u32 *p = _p, i, offset;
5857 u8 *orig_p = _p;
5858 struct bnx2 *bp = netdev_priv(dev);
5859 u32 reg_boundaries[] = { 0x0000, 0x0098, 0x0400, 0x045c,
5860 0x0800, 0x0880, 0x0c00, 0x0c10,
5861 0x0c30, 0x0d08, 0x1000, 0x101c,
5862 0x1040, 0x1048, 0x1080, 0x10a4,
5863 0x1400, 0x1490, 0x1498, 0x14f0,
5864 0x1500, 0x155c, 0x1580, 0x15dc,
5865 0x1600, 0x1658, 0x1680, 0x16d8,
5866 0x1800, 0x1820, 0x1840, 0x1854,
5867 0x1880, 0x1894, 0x1900, 0x1984,
5868 0x1c00, 0x1c0c, 0x1c40, 0x1c54,
5869 0x1c80, 0x1c94, 0x1d00, 0x1d84,
5870 0x2000, 0x2030, 0x23c0, 0x2400,
5871 0x2800, 0x2820, 0x2830, 0x2850,
5872 0x2b40, 0x2c10, 0x2fc0, 0x3058,
5873 0x3c00, 0x3c94, 0x4000, 0x4010,
5874 0x4080, 0x4090, 0x43c0, 0x4458,
5875 0x4c00, 0x4c18, 0x4c40, 0x4c54,
5876 0x4fc0, 0x5010, 0x53c0, 0x5444,
5877 0x5c00, 0x5c18, 0x5c80, 0x5c90,
5878 0x5fc0, 0x6000, 0x6400, 0x6428,
5879 0x6800, 0x6848, 0x684c, 0x6860,
5880 0x6888, 0x6910, 0x8000 };
5881
5882 regs->version = 0;
5883
5884 memset(p, 0, BNX2_REGDUMP_LEN);
5885
5886 if (!netif_running(bp->dev))
5887 return;
5888
5889 i = 0;
5890 offset = reg_boundaries[0];
5891 p += offset;
5892 while (offset < BNX2_REGDUMP_LEN) {
5893 *p++ = REG_RD(bp, offset);
5894 offset += 4;
5895 if (offset == reg_boundaries[i + 1]) {
5896 offset = reg_boundaries[i + 2];
5897 p = (u32 *) (orig_p + offset);
5898 i += 2;
5899 }
5900 }
5901}
5902
b6016b76
MC
5903static void
5904bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
5905{
972ec0d4 5906 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5907
5908 if (bp->flags & NO_WOL_FLAG) {
5909 wol->supported = 0;
5910 wol->wolopts = 0;
5911 }
5912 else {
5913 wol->supported = WAKE_MAGIC;
5914 if (bp->wol)
5915 wol->wolopts = WAKE_MAGIC;
5916 else
5917 wol->wolopts = 0;
5918 }
5919 memset(&wol->sopass, 0, sizeof(wol->sopass));
5920}
5921
5922static int
5923bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
5924{
972ec0d4 5925 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5926
5927 if (wol->wolopts & ~WAKE_MAGIC)
5928 return -EINVAL;
5929
5930 if (wol->wolopts & WAKE_MAGIC) {
5931 if (bp->flags & NO_WOL_FLAG)
5932 return -EINVAL;
5933
5934 bp->wol = 1;
5935 }
5936 else {
5937 bp->wol = 0;
5938 }
5939 return 0;
5940}
5941
5942static int
5943bnx2_nway_reset(struct net_device *dev)
5944{
972ec0d4 5945 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5946 u32 bmcr;
5947
5948 if (!(bp->autoneg & AUTONEG_SPEED)) {
5949 return -EINVAL;
5950 }
5951
c770a65c 5952 spin_lock_bh(&bp->phy_lock);
b6016b76 5953
7b6b8347
MC
5954 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG) {
5955 int rc;
5956
5957 rc = bnx2_setup_remote_phy(bp, bp->phy_port);
5958 spin_unlock_bh(&bp->phy_lock);
5959 return rc;
5960 }
5961
b6016b76
MC
5962 /* Force a link down visible on the other side */
5963 if (bp->phy_flags & PHY_SERDES_FLAG) {
ca58c3af 5964 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
c770a65c 5965 spin_unlock_bh(&bp->phy_lock);
b6016b76
MC
5966
5967 msleep(20);
5968
c770a65c 5969 spin_lock_bh(&bp->phy_lock);
f8dd064e
MC
5970
5971 bp->current_interval = SERDES_AN_TIMEOUT;
5972 bp->serdes_an_pending = 1;
5973 mod_timer(&bp->timer, jiffies + bp->current_interval);
b6016b76
MC
5974 }
5975
ca58c3af 5976 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
b6016b76 5977 bmcr &= ~BMCR_LOOPBACK;
ca58c3af 5978 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
b6016b76 5979
c770a65c 5980 spin_unlock_bh(&bp->phy_lock);
b6016b76
MC
5981
5982 return 0;
5983}
5984
5985static int
5986bnx2_get_eeprom_len(struct net_device *dev)
5987{
972ec0d4 5988 struct bnx2 *bp = netdev_priv(dev);
b6016b76 5989
1122db71 5990 if (bp->flash_info == NULL)
b6016b76
MC
5991 return 0;
5992
1122db71 5993 return (int) bp->flash_size;
b6016b76
MC
5994}
5995
5996static int
5997bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
5998 u8 *eebuf)
5999{
972ec0d4 6000 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6001 int rc;
6002
1064e944 6003 /* parameters already validated in ethtool_get_eeprom */
b6016b76
MC
6004
6005 rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
6006
6007 return rc;
6008}
6009
6010static int
6011bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
6012 u8 *eebuf)
6013{
972ec0d4 6014 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6015 int rc;
6016
1064e944 6017 /* parameters already validated in ethtool_set_eeprom */
b6016b76
MC
6018
6019 rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
6020
6021 return rc;
6022}
6023
6024static int
6025bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
6026{
972ec0d4 6027 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6028
6029 memset(coal, 0, sizeof(struct ethtool_coalesce));
6030
6031 coal->rx_coalesce_usecs = bp->rx_ticks;
6032 coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
6033 coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
6034 coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
6035
6036 coal->tx_coalesce_usecs = bp->tx_ticks;
6037 coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
6038 coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
6039 coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
6040
6041 coal->stats_block_coalesce_usecs = bp->stats_ticks;
6042
6043 return 0;
6044}
6045
6046static int
6047bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
6048{
972ec0d4 6049 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6050
6051 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
6052 if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
6053
6aa20a22 6054 bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
b6016b76
MC
6055 if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
6056
6057 bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
6058 if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
6059
6060 bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
6061 if (bp->rx_quick_cons_trip_int > 0xff)
6062 bp->rx_quick_cons_trip_int = 0xff;
6063
6064 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
6065 if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
6066
6067 bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
6068 if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
6069
6070 bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
6071 if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
6072
6073 bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
6074 if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
6075 0xff;
6076
6077 bp->stats_ticks = coal->stats_block_coalesce_usecs;
02537b06
MC
6078 if (CHIP_NUM(bp) == CHIP_NUM_5708) {
6079 if (bp->stats_ticks != 0 && bp->stats_ticks != USEC_PER_SEC)
6080 bp->stats_ticks = USEC_PER_SEC;
6081 }
7ea6920e
MC
6082 if (bp->stats_ticks > BNX2_HC_STATS_TICKS_HC_STAT_TICKS)
6083 bp->stats_ticks = BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
6084 bp->stats_ticks &= BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
b6016b76
MC
6085
6086 if (netif_running(bp->dev)) {
6087 bnx2_netif_stop(bp);
6088 bnx2_init_nic(bp);
6089 bnx2_netif_start(bp);
6090 }
6091
6092 return 0;
6093}
6094
6095static void
6096bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
6097{
972ec0d4 6098 struct bnx2 *bp = netdev_priv(dev);
b6016b76 6099
13daffa2 6100 ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT;
b6016b76 6101 ering->rx_mini_max_pending = 0;
47bf4246 6102 ering->rx_jumbo_max_pending = MAX_TOTAL_RX_PG_DESC_CNT;
b6016b76
MC
6103
6104 ering->rx_pending = bp->rx_ring_size;
6105 ering->rx_mini_pending = 0;
47bf4246 6106 ering->rx_jumbo_pending = bp->rx_pg_ring_size;
b6016b76
MC
6107
6108 ering->tx_max_pending = MAX_TX_DESC_CNT;
6109 ering->tx_pending = bp->tx_ring_size;
6110}
6111
6112static int
5d5d0015 6113bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx)
b6016b76 6114{
13daffa2
MC
6115 if (netif_running(bp->dev)) {
6116 bnx2_netif_stop(bp);
6117 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
6118 bnx2_free_skbs(bp);
6119 bnx2_free_mem(bp);
6120 }
6121
5d5d0015
MC
6122 bnx2_set_rx_ring_size(bp, rx);
6123 bp->tx_ring_size = tx;
b6016b76
MC
6124
6125 if (netif_running(bp->dev)) {
13daffa2
MC
6126 int rc;
6127
6128 rc = bnx2_alloc_mem(bp);
6129 if (rc)
6130 return rc;
b6016b76
MC
6131 bnx2_init_nic(bp);
6132 bnx2_netif_start(bp);
6133 }
b6016b76
MC
6134 return 0;
6135}
6136
5d5d0015
MC
6137static int
6138bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
6139{
6140 struct bnx2 *bp = netdev_priv(dev);
6141 int rc;
6142
6143 if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
6144 (ering->tx_pending > MAX_TX_DESC_CNT) ||
6145 (ering->tx_pending <= MAX_SKB_FRAGS)) {
6146
6147 return -EINVAL;
6148 }
6149 rc = bnx2_change_ring_size(bp, ering->rx_pending, ering->tx_pending);
6150 return rc;
6151}
6152
b6016b76
MC
6153static void
6154bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
6155{
972ec0d4 6156 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6157
6158 epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
6159 epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
6160 epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
6161}
6162
6163static int
6164bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
6165{
972ec0d4 6166 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6167
6168 bp->req_flow_ctrl = 0;
6169 if (epause->rx_pause)
6170 bp->req_flow_ctrl |= FLOW_CTRL_RX;
6171 if (epause->tx_pause)
6172 bp->req_flow_ctrl |= FLOW_CTRL_TX;
6173
6174 if (epause->autoneg) {
6175 bp->autoneg |= AUTONEG_FLOW_CTRL;
6176 }
6177 else {
6178 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
6179 }
6180
c770a65c 6181 spin_lock_bh(&bp->phy_lock);
b6016b76 6182
0d8a6571 6183 bnx2_setup_phy(bp, bp->phy_port);
b6016b76 6184
c770a65c 6185 spin_unlock_bh(&bp->phy_lock);
b6016b76
MC
6186
6187 return 0;
6188}
6189
6190static u32
6191bnx2_get_rx_csum(struct net_device *dev)
6192{
972ec0d4 6193 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6194
6195 return bp->rx_csum;
6196}
6197
6198static int
6199bnx2_set_rx_csum(struct net_device *dev, u32 data)
6200{
972ec0d4 6201 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6202
6203 bp->rx_csum = data;
6204 return 0;
6205}
6206
b11d6213
MC
6207static int
6208bnx2_set_tso(struct net_device *dev, u32 data)
6209{
4666f87a
MC
6210 struct bnx2 *bp = netdev_priv(dev);
6211
6212 if (data) {
b11d6213 6213 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
4666f87a
MC
6214 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6215 dev->features |= NETIF_F_TSO6;
6216 } else
6217 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6 |
6218 NETIF_F_TSO_ECN);
b11d6213
MC
6219 return 0;
6220}
6221
cea94db9 6222#define BNX2_NUM_STATS 46
b6016b76 6223
14ab9b86 6224static struct {
b6016b76
MC
6225 char string[ETH_GSTRING_LEN];
6226} bnx2_stats_str_arr[BNX2_NUM_STATS] = {
6227 { "rx_bytes" },
6228 { "rx_error_bytes" },
6229 { "tx_bytes" },
6230 { "tx_error_bytes" },
6231 { "rx_ucast_packets" },
6232 { "rx_mcast_packets" },
6233 { "rx_bcast_packets" },
6234 { "tx_ucast_packets" },
6235 { "tx_mcast_packets" },
6236 { "tx_bcast_packets" },
6237 { "tx_mac_errors" },
6238 { "tx_carrier_errors" },
6239 { "rx_crc_errors" },
6240 { "rx_align_errors" },
6241 { "tx_single_collisions" },
6242 { "tx_multi_collisions" },
6243 { "tx_deferred" },
6244 { "tx_excess_collisions" },
6245 { "tx_late_collisions" },
6246 { "tx_total_collisions" },
6247 { "rx_fragments" },
6248 { "rx_jabbers" },
6249 { "rx_undersize_packets" },
6250 { "rx_oversize_packets" },
6251 { "rx_64_byte_packets" },
6252 { "rx_65_to_127_byte_packets" },
6253 { "rx_128_to_255_byte_packets" },
6254 { "rx_256_to_511_byte_packets" },
6255 { "rx_512_to_1023_byte_packets" },
6256 { "rx_1024_to_1522_byte_packets" },
6257 { "rx_1523_to_9022_byte_packets" },
6258 { "tx_64_byte_packets" },
6259 { "tx_65_to_127_byte_packets" },
6260 { "tx_128_to_255_byte_packets" },
6261 { "tx_256_to_511_byte_packets" },
6262 { "tx_512_to_1023_byte_packets" },
6263 { "tx_1024_to_1522_byte_packets" },
6264 { "tx_1523_to_9022_byte_packets" },
6265 { "rx_xon_frames" },
6266 { "rx_xoff_frames" },
6267 { "tx_xon_frames" },
6268 { "tx_xoff_frames" },
6269 { "rx_mac_ctrl_frames" },
6270 { "rx_filtered_packets" },
6271 { "rx_discards" },
cea94db9 6272 { "rx_fw_discards" },
b6016b76
MC
6273};
6274
6275#define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
6276
f71e1309 6277static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
b6016b76
MC
6278 STATS_OFFSET32(stat_IfHCInOctets_hi),
6279 STATS_OFFSET32(stat_IfHCInBadOctets_hi),
6280 STATS_OFFSET32(stat_IfHCOutOctets_hi),
6281 STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
6282 STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
6283 STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
6284 STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
6285 STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
6286 STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
6287 STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
6288 STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
6aa20a22
JG
6289 STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
6290 STATS_OFFSET32(stat_Dot3StatsFCSErrors),
6291 STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
6292 STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
6293 STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
6294 STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
6295 STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
6296 STATS_OFFSET32(stat_Dot3StatsLateCollisions),
6297 STATS_OFFSET32(stat_EtherStatsCollisions),
6298 STATS_OFFSET32(stat_EtherStatsFragments),
6299 STATS_OFFSET32(stat_EtherStatsJabbers),
6300 STATS_OFFSET32(stat_EtherStatsUndersizePkts),
6301 STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
6302 STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
6303 STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
6304 STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
6305 STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
6306 STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
6307 STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
6308 STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
6309 STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
6310 STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
6311 STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
6312 STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
6313 STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
6314 STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
6315 STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
6316 STATS_OFFSET32(stat_XonPauseFramesReceived),
6317 STATS_OFFSET32(stat_XoffPauseFramesReceived),
6318 STATS_OFFSET32(stat_OutXonSent),
6319 STATS_OFFSET32(stat_OutXoffSent),
6320 STATS_OFFSET32(stat_MacControlFramesReceived),
6321 STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
6322 STATS_OFFSET32(stat_IfInMBUFDiscards),
cea94db9 6323 STATS_OFFSET32(stat_FwRxDrop),
b6016b76
MC
6324};
6325
6326/* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
6327 * skipped because of errata.
6aa20a22 6328 */
14ab9b86 6329static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
b6016b76
MC
6330 8,0,8,8,8,8,8,8,8,8,
6331 4,0,4,4,4,4,4,4,4,4,
6332 4,4,4,4,4,4,4,4,4,4,
6333 4,4,4,4,4,4,4,4,4,4,
cea94db9 6334 4,4,4,4,4,4,
b6016b76
MC
6335};
6336
5b0c76ad
MC
6337static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
6338 8,0,8,8,8,8,8,8,8,8,
6339 4,4,4,4,4,4,4,4,4,4,
6340 4,4,4,4,4,4,4,4,4,4,
6341 4,4,4,4,4,4,4,4,4,4,
cea94db9 6342 4,4,4,4,4,4,
5b0c76ad
MC
6343};
6344
b6016b76
MC
6345#define BNX2_NUM_TESTS 6
6346
14ab9b86 6347static struct {
b6016b76
MC
6348 char string[ETH_GSTRING_LEN];
6349} bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
6350 { "register_test (offline)" },
6351 { "memory_test (offline)" },
6352 { "loopback_test (offline)" },
6353 { "nvram_test (online)" },
6354 { "interrupt_test (online)" },
6355 { "link_test (online)" },
6356};
6357
6358static int
b9f2c044 6359bnx2_get_sset_count(struct net_device *dev, int sset)
b6016b76 6360{
b9f2c044
JG
6361 switch (sset) {
6362 case ETH_SS_TEST:
6363 return BNX2_NUM_TESTS;
6364 case ETH_SS_STATS:
6365 return BNX2_NUM_STATS;
6366 default:
6367 return -EOPNOTSUPP;
6368 }
b6016b76
MC
6369}
6370
6371static void
6372bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
6373{
972ec0d4 6374 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6375
6376 memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
6377 if (etest->flags & ETH_TEST_FL_OFFLINE) {
80be4434
MC
6378 int i;
6379
b6016b76
MC
6380 bnx2_netif_stop(bp);
6381 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
6382 bnx2_free_skbs(bp);
6383
6384 if (bnx2_test_registers(bp) != 0) {
6385 buf[0] = 1;
6386 etest->flags |= ETH_TEST_FL_FAILED;
6387 }
6388 if (bnx2_test_memory(bp) != 0) {
6389 buf[1] = 1;
6390 etest->flags |= ETH_TEST_FL_FAILED;
6391 }
bc5a0690 6392 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
b6016b76 6393 etest->flags |= ETH_TEST_FL_FAILED;
b6016b76
MC
6394
6395 if (!netif_running(bp->dev)) {
6396 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
6397 }
6398 else {
6399 bnx2_init_nic(bp);
6400 bnx2_netif_start(bp);
6401 }
6402
6403 /* wait for link up */
80be4434
MC
6404 for (i = 0; i < 7; i++) {
6405 if (bp->link_up)
6406 break;
6407 msleep_interruptible(1000);
6408 }
b6016b76
MC
6409 }
6410
6411 if (bnx2_test_nvram(bp) != 0) {
6412 buf[3] = 1;
6413 etest->flags |= ETH_TEST_FL_FAILED;
6414 }
6415 if (bnx2_test_intr(bp) != 0) {
6416 buf[4] = 1;
6417 etest->flags |= ETH_TEST_FL_FAILED;
6418 }
6419
6420 if (bnx2_test_link(bp) != 0) {
6421 buf[5] = 1;
6422 etest->flags |= ETH_TEST_FL_FAILED;
6423
6424 }
6425}
6426
6427static void
6428bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
6429{
6430 switch (stringset) {
6431 case ETH_SS_STATS:
6432 memcpy(buf, bnx2_stats_str_arr,
6433 sizeof(bnx2_stats_str_arr));
6434 break;
6435 case ETH_SS_TEST:
6436 memcpy(buf, bnx2_tests_str_arr,
6437 sizeof(bnx2_tests_str_arr));
6438 break;
6439 }
6440}
6441
b6016b76
MC
6442static void
6443bnx2_get_ethtool_stats(struct net_device *dev,
6444 struct ethtool_stats *stats, u64 *buf)
6445{
972ec0d4 6446 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6447 int i;
6448 u32 *hw_stats = (u32 *) bp->stats_blk;
14ab9b86 6449 u8 *stats_len_arr = NULL;
b6016b76
MC
6450
6451 if (hw_stats == NULL) {
6452 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
6453 return;
6454 }
6455
5b0c76ad
MC
6456 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
6457 (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
6458 (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
6459 (CHIP_ID(bp) == CHIP_ID_5708_A0))
b6016b76 6460 stats_len_arr = bnx2_5706_stats_len_arr;
5b0c76ad
MC
6461 else
6462 stats_len_arr = bnx2_5708_stats_len_arr;
b6016b76
MC
6463
6464 for (i = 0; i < BNX2_NUM_STATS; i++) {
6465 if (stats_len_arr[i] == 0) {
6466 /* skip this counter */
6467 buf[i] = 0;
6468 continue;
6469 }
6470 if (stats_len_arr[i] == 4) {
6471 /* 4-byte counter */
6472 buf[i] = (u64)
6473 *(hw_stats + bnx2_stats_offset_arr[i]);
6474 continue;
6475 }
6476 /* 8-byte counter */
6477 buf[i] = (((u64) *(hw_stats +
6478 bnx2_stats_offset_arr[i])) << 32) +
6479 *(hw_stats + bnx2_stats_offset_arr[i] + 1);
6480 }
6481}
6482
6483static int
6484bnx2_phys_id(struct net_device *dev, u32 data)
6485{
972ec0d4 6486 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6487 int i;
6488 u32 save;
6489
6490 if (data == 0)
6491 data = 2;
6492
6493 save = REG_RD(bp, BNX2_MISC_CFG);
6494 REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
6495
6496 for (i = 0; i < (data * 2); i++) {
6497 if ((i % 2) == 0) {
6498 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
6499 }
6500 else {
6501 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
6502 BNX2_EMAC_LED_1000MB_OVERRIDE |
6503 BNX2_EMAC_LED_100MB_OVERRIDE |
6504 BNX2_EMAC_LED_10MB_OVERRIDE |
6505 BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
6506 BNX2_EMAC_LED_TRAFFIC);
6507 }
6508 msleep_interruptible(500);
6509 if (signal_pending(current))
6510 break;
6511 }
6512 REG_WR(bp, BNX2_EMAC_LED, 0);
6513 REG_WR(bp, BNX2_MISC_CFG, save);
6514 return 0;
6515}
6516
4666f87a
MC
6517static int
6518bnx2_set_tx_csum(struct net_device *dev, u32 data)
6519{
6520 struct bnx2 *bp = netdev_priv(dev);
6521
6522 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6460d948 6523 return (ethtool_op_set_tx_ipv6_csum(dev, data));
4666f87a
MC
6524 else
6525 return (ethtool_op_set_tx_csum(dev, data));
6526}
6527
7282d491 6528static const struct ethtool_ops bnx2_ethtool_ops = {
b6016b76
MC
6529 .get_settings = bnx2_get_settings,
6530 .set_settings = bnx2_set_settings,
6531 .get_drvinfo = bnx2_get_drvinfo,
244ac4f4
MC
6532 .get_regs_len = bnx2_get_regs_len,
6533 .get_regs = bnx2_get_regs,
b6016b76
MC
6534 .get_wol = bnx2_get_wol,
6535 .set_wol = bnx2_set_wol,
6536 .nway_reset = bnx2_nway_reset,
6537 .get_link = ethtool_op_get_link,
6538 .get_eeprom_len = bnx2_get_eeprom_len,
6539 .get_eeprom = bnx2_get_eeprom,
6540 .set_eeprom = bnx2_set_eeprom,
6541 .get_coalesce = bnx2_get_coalesce,
6542 .set_coalesce = bnx2_set_coalesce,
6543 .get_ringparam = bnx2_get_ringparam,
6544 .set_ringparam = bnx2_set_ringparam,
6545 .get_pauseparam = bnx2_get_pauseparam,
6546 .set_pauseparam = bnx2_set_pauseparam,
6547 .get_rx_csum = bnx2_get_rx_csum,
6548 .set_rx_csum = bnx2_set_rx_csum,
4666f87a 6549 .set_tx_csum = bnx2_set_tx_csum,
b6016b76 6550 .set_sg = ethtool_op_set_sg,
b11d6213 6551 .set_tso = bnx2_set_tso,
b6016b76
MC
6552 .self_test = bnx2_self_test,
6553 .get_strings = bnx2_get_strings,
6554 .phys_id = bnx2_phys_id,
b6016b76 6555 .get_ethtool_stats = bnx2_get_ethtool_stats,
b9f2c044 6556 .get_sset_count = bnx2_get_sset_count,
b6016b76
MC
6557};
6558
6559/* Called with rtnl_lock */
6560static int
6561bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
6562{
14ab9b86 6563 struct mii_ioctl_data *data = if_mii(ifr);
972ec0d4 6564 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6565 int err;
6566
6567 switch(cmd) {
6568 case SIOCGMIIPHY:
6569 data->phy_id = bp->phy_addr;
6570
6571 /* fallthru */
6572 case SIOCGMIIREG: {
6573 u32 mii_regval;
6574
7b6b8347
MC
6575 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
6576 return -EOPNOTSUPP;
6577
dad3e452
MC
6578 if (!netif_running(dev))
6579 return -EAGAIN;
6580
c770a65c 6581 spin_lock_bh(&bp->phy_lock);
b6016b76 6582 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
c770a65c 6583 spin_unlock_bh(&bp->phy_lock);
b6016b76
MC
6584
6585 data->val_out = mii_regval;
6586
6587 return err;
6588 }
6589
6590 case SIOCSMIIREG:
6591 if (!capable(CAP_NET_ADMIN))
6592 return -EPERM;
6593
7b6b8347
MC
6594 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
6595 return -EOPNOTSUPP;
6596
dad3e452
MC
6597 if (!netif_running(dev))
6598 return -EAGAIN;
6599
c770a65c 6600 spin_lock_bh(&bp->phy_lock);
b6016b76 6601 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
c770a65c 6602 spin_unlock_bh(&bp->phy_lock);
b6016b76
MC
6603
6604 return err;
6605
6606 default:
6607 /* do nothing */
6608 break;
6609 }
6610 return -EOPNOTSUPP;
6611}
6612
6613/* Called with rtnl_lock */
6614static int
6615bnx2_change_mac_addr(struct net_device *dev, void *p)
6616{
6617 struct sockaddr *addr = p;
972ec0d4 6618 struct bnx2 *bp = netdev_priv(dev);
b6016b76 6619
73eef4cd
MC
6620 if (!is_valid_ether_addr(addr->sa_data))
6621 return -EINVAL;
6622
b6016b76
MC
6623 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
6624 if (netif_running(dev))
6625 bnx2_set_mac_addr(bp);
6626
6627 return 0;
6628}
6629
6630/* Called with rtnl_lock */
6631static int
6632bnx2_change_mtu(struct net_device *dev, int new_mtu)
6633{
972ec0d4 6634 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6635
6636 if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
6637 ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
6638 return -EINVAL;
6639
6640 dev->mtu = new_mtu;
5d5d0015 6641 return (bnx2_change_ring_size(bp, bp->rx_ring_size, bp->tx_ring_size));
b6016b76
MC
6642}
6643
6644#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
6645static void
6646poll_bnx2(struct net_device *dev)
6647{
972ec0d4 6648 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6649
6650 disable_irq(bp->pdev->irq);
7d12e780 6651 bnx2_interrupt(bp->pdev->irq, dev);
b6016b76
MC
6652 enable_irq(bp->pdev->irq);
6653}
6654#endif
6655
253c8b75
MC
6656static void __devinit
6657bnx2_get_5709_media(struct bnx2 *bp)
6658{
6659 u32 val = REG_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
6660 u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
6661 u32 strap;
6662
6663 if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
6664 return;
6665 else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
6666 bp->phy_flags |= PHY_SERDES_FLAG;
6667 return;
6668 }
6669
6670 if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
6671 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
6672 else
6673 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
6674
6675 if (PCI_FUNC(bp->pdev->devfn) == 0) {
6676 switch (strap) {
6677 case 0x4:
6678 case 0x5:
6679 case 0x6:
6680 bp->phy_flags |= PHY_SERDES_FLAG;
6681 return;
6682 }
6683 } else {
6684 switch (strap) {
6685 case 0x1:
6686 case 0x2:
6687 case 0x4:
6688 bp->phy_flags |= PHY_SERDES_FLAG;
6689 return;
6690 }
6691 }
6692}
6693
883e5151
MC
6694static void __devinit
6695bnx2_get_pci_speed(struct bnx2 *bp)
6696{
6697 u32 reg;
6698
6699 reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
6700 if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
6701 u32 clkreg;
6702
6703 bp->flags |= PCIX_FLAG;
6704
6705 clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
6706
6707 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
6708 switch (clkreg) {
6709 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
6710 bp->bus_speed_mhz = 133;
6711 break;
6712
6713 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
6714 bp->bus_speed_mhz = 100;
6715 break;
6716
6717 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
6718 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
6719 bp->bus_speed_mhz = 66;
6720 break;
6721
6722 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
6723 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
6724 bp->bus_speed_mhz = 50;
6725 break;
6726
6727 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
6728 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
6729 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
6730 bp->bus_speed_mhz = 33;
6731 break;
6732 }
6733 }
6734 else {
6735 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
6736 bp->bus_speed_mhz = 66;
6737 else
6738 bp->bus_speed_mhz = 33;
6739 }
6740
6741 if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
6742 bp->flags |= PCI_32BIT_FLAG;
6743
6744}
6745
b6016b76
MC
6746static int __devinit
6747bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
6748{
6749 struct bnx2 *bp;
6750 unsigned long mem_len;
58fc2ea4 6751 int rc, i, j;
b6016b76 6752 u32 reg;
40453c83 6753 u64 dma_mask, persist_dma_mask;
b6016b76 6754
b6016b76 6755 SET_NETDEV_DEV(dev, &pdev->dev);
972ec0d4 6756 bp = netdev_priv(dev);
b6016b76
MC
6757
6758 bp->flags = 0;
6759 bp->phy_flags = 0;
6760
6761 /* enable device (incl. PCI PM wakeup), and bus-mastering */
6762 rc = pci_enable_device(pdev);
6763 if (rc) {
898eb71c 6764 dev_err(&pdev->dev, "Cannot enable PCI device, aborting.\n");
b6016b76
MC
6765 goto err_out;
6766 }
6767
6768 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
9b91cf9d 6769 dev_err(&pdev->dev,
2e8a538d 6770 "Cannot find PCI device base address, aborting.\n");
b6016b76
MC
6771 rc = -ENODEV;
6772 goto err_out_disable;
6773 }
6774
6775 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
6776 if (rc) {
9b91cf9d 6777 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting.\n");
b6016b76
MC
6778 goto err_out_disable;
6779 }
6780
6781 pci_set_master(pdev);
6782
6783 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
6784 if (bp->pm_cap == 0) {
9b91cf9d 6785 dev_err(&pdev->dev,
2e8a538d 6786 "Cannot find power management capability, aborting.\n");
b6016b76
MC
6787 rc = -EIO;
6788 goto err_out_release;
6789 }
6790
b6016b76
MC
6791 bp->dev = dev;
6792 bp->pdev = pdev;
6793
6794 spin_lock_init(&bp->phy_lock);
1b8227c4 6795 spin_lock_init(&bp->indirect_lock);
c4028958 6796 INIT_WORK(&bp->reset_task, bnx2_reset_task);
b6016b76
MC
6797
6798 dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
59b47d8a 6799 mem_len = MB_GET_CID_ADDR(TX_TSS_CID + 1);
b6016b76
MC
6800 dev->mem_end = dev->mem_start + mem_len;
6801 dev->irq = pdev->irq;
6802
6803 bp->regview = ioremap_nocache(dev->base_addr, mem_len);
6804
6805 if (!bp->regview) {
9b91cf9d 6806 dev_err(&pdev->dev, "Cannot map register space, aborting.\n");
b6016b76
MC
6807 rc = -ENOMEM;
6808 goto err_out_release;
6809 }
6810
6811 /* Configure byte swap and enable write to the reg_window registers.
6812 * Rely on CPU to do target byte swapping on big endian systems
6813 * The chip's target access swapping will not swap all accesses
6814 */
6815 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG,
6816 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
6817 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
6818
829ca9a3 6819 bnx2_set_power_state(bp, PCI_D0);
b6016b76
MC
6820
6821 bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
6822
883e5151
MC
6823 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
6824 if (pci_find_capability(pdev, PCI_CAP_ID_EXP) == 0) {
6825 dev_err(&pdev->dev,
6826 "Cannot find PCIE capability, aborting.\n");
6827 rc = -EIO;
6828 goto err_out_unmap;
6829 }
6830 bp->flags |= PCIE_FLAG;
6831 } else {
59b47d8a
MC
6832 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
6833 if (bp->pcix_cap == 0) {
6834 dev_err(&pdev->dev,
6835 "Cannot find PCIX capability, aborting.\n");
6836 rc = -EIO;
6837 goto err_out_unmap;
6838 }
6839 }
6840
8e6a72c4
MC
6841 if (CHIP_ID(bp) != CHIP_ID_5706_A0 && CHIP_ID(bp) != CHIP_ID_5706_A1) {
6842 if (pci_find_capability(pdev, PCI_CAP_ID_MSI))
6843 bp->flags |= MSI_CAP_FLAG;
6844 }
6845
40453c83
MC
6846 /* 5708 cannot support DMA addresses > 40-bit. */
6847 if (CHIP_NUM(bp) == CHIP_NUM_5708)
6848 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
6849 else
6850 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
6851
6852 /* Configure DMA attributes. */
6853 if (pci_set_dma_mask(pdev, dma_mask) == 0) {
6854 dev->features |= NETIF_F_HIGHDMA;
6855 rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask);
6856 if (rc) {
6857 dev_err(&pdev->dev,
6858 "pci_set_consistent_dma_mask failed, aborting.\n");
6859 goto err_out_unmap;
6860 }
6861 } else if ((rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) != 0) {
6862 dev_err(&pdev->dev, "System does not support DMA, aborting.\n");
6863 goto err_out_unmap;
6864 }
6865
883e5151
MC
6866 if (!(bp->flags & PCIE_FLAG))
6867 bnx2_get_pci_speed(bp);
b6016b76
MC
6868
6869 /* 5706A0 may falsely detect SERR and PERR. */
6870 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
6871 reg = REG_RD(bp, PCI_COMMAND);
6872 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
6873 REG_WR(bp, PCI_COMMAND, reg);
6874 }
6875 else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
6876 !(bp->flags & PCIX_FLAG)) {
6877
9b91cf9d 6878 dev_err(&pdev->dev,
2e8a538d 6879 "5706 A1 can only be used in a PCIX bus, aborting.\n");
b6016b76
MC
6880 goto err_out_unmap;
6881 }
6882
6883 bnx2_init_nvram(bp);
6884
e3648b3d
MC
6885 reg = REG_RD_IND(bp, BNX2_SHM_HDR_SIGNATURE);
6886
6887 if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
24cb230b
MC
6888 BNX2_SHM_HDR_SIGNATURE_SIG) {
6889 u32 off = PCI_FUNC(pdev->devfn) << 2;
6890
6891 bp->shmem_base = REG_RD_IND(bp, BNX2_SHM_HDR_ADDR_0 + off);
6892 } else
e3648b3d
MC
6893 bp->shmem_base = HOST_VIEW_SHMEM_BASE;
6894
b6016b76
MC
6895 /* Get the permanent MAC address. First we need to make sure the
6896 * firmware is actually running.
6897 */
e3648b3d 6898 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_SIGNATURE);
b6016b76
MC
6899
6900 if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
6901 BNX2_DEV_INFO_SIGNATURE_MAGIC) {
9b91cf9d 6902 dev_err(&pdev->dev, "Firmware not running, aborting.\n");
b6016b76
MC
6903 rc = -ENODEV;
6904 goto err_out_unmap;
6905 }
6906
58fc2ea4
MC
6907 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_BC_REV);
6908 for (i = 0, j = 0; i < 3; i++) {
6909 u8 num, k, skip0;
6910
6911 num = (u8) (reg >> (24 - (i * 8)));
6912 for (k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) {
6913 if (num >= k || !skip0 || k == 1) {
6914 bp->fw_version[j++] = (num / k) + '0';
6915 skip0 = 0;
6916 }
6917 }
6918 if (i != 2)
6919 bp->fw_version[j++] = '.';
6920 }
846f5c62
MC
6921 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_FEATURE);
6922 if (reg & BNX2_PORT_FEATURE_WOL_ENABLED)
6923 bp->wol = 1;
6924
6925 if (reg & BNX2_PORT_FEATURE_ASF_ENABLED) {
c2d3db8c
MC
6926 bp->flags |= ASF_ENABLE_FLAG;
6927
6928 for (i = 0; i < 30; i++) {
6929 reg = REG_RD_IND(bp, bp->shmem_base +
6930 BNX2_BC_STATE_CONDITION);
6931 if (reg & BNX2_CONDITION_MFW_RUN_MASK)
6932 break;
6933 msleep(10);
6934 }
6935 }
58fc2ea4
MC
6936 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_BC_STATE_CONDITION);
6937 reg &= BNX2_CONDITION_MFW_RUN_MASK;
6938 if (reg != BNX2_CONDITION_MFW_RUN_UNKNOWN &&
6939 reg != BNX2_CONDITION_MFW_RUN_NONE) {
6940 int i;
6941 u32 addr = REG_RD_IND(bp, bp->shmem_base + BNX2_MFW_VER_PTR);
6942
6943 bp->fw_version[j++] = ' ';
6944 for (i = 0; i < 3; i++) {
6945 reg = REG_RD_IND(bp, addr + i * 4);
6946 reg = swab32(reg);
6947 memcpy(&bp->fw_version[j], &reg, 4);
6948 j += 4;
6949 }
6950 }
b6016b76 6951
e3648b3d 6952 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_UPPER);
b6016b76
MC
6953 bp->mac_addr[0] = (u8) (reg >> 8);
6954 bp->mac_addr[1] = (u8) reg;
6955
e3648b3d 6956 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_LOWER);
b6016b76
MC
6957 bp->mac_addr[2] = (u8) (reg >> 24);
6958 bp->mac_addr[3] = (u8) (reg >> 16);
6959 bp->mac_addr[4] = (u8) (reg >> 8);
6960 bp->mac_addr[5] = (u8) reg;
6961
5d5d0015
MC
6962 bp->rx_offset = sizeof(struct l2_fhdr) + 2;
6963
b6016b76 6964 bp->tx_ring_size = MAX_TX_DESC_CNT;
932f3772 6965 bnx2_set_rx_ring_size(bp, 255);
b6016b76
MC
6966
6967 bp->rx_csum = 1;
6968
b6016b76
MC
6969 bp->tx_quick_cons_trip_int = 20;
6970 bp->tx_quick_cons_trip = 20;
6971 bp->tx_ticks_int = 80;
6972 bp->tx_ticks = 80;
6aa20a22 6973
b6016b76
MC
6974 bp->rx_quick_cons_trip_int = 6;
6975 bp->rx_quick_cons_trip = 6;
6976 bp->rx_ticks_int = 18;
6977 bp->rx_ticks = 18;
6978
7ea6920e 6979 bp->stats_ticks = USEC_PER_SEC & BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
b6016b76
MC
6980
6981 bp->timer_interval = HZ;
cd339a0e 6982 bp->current_interval = HZ;
b6016b76 6983
5b0c76ad
MC
6984 bp->phy_addr = 1;
6985
b6016b76 6986 /* Disable WOL support if we are running on a SERDES chip. */
253c8b75
MC
6987 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6988 bnx2_get_5709_media(bp);
6989 else if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT)
b6016b76 6990 bp->phy_flags |= PHY_SERDES_FLAG;
bac0dff6 6991
0d8a6571 6992 bp->phy_port = PORT_TP;
bac0dff6 6993 if (bp->phy_flags & PHY_SERDES_FLAG) {
0d8a6571 6994 bp->phy_port = PORT_FIBRE;
846f5c62
MC
6995 reg = REG_RD_IND(bp, bp->shmem_base +
6996 BNX2_SHARED_HW_CFG_CONFIG);
6997 if (!(reg & BNX2_SHARED_HW_CFG_GIG_LINK_ON_VAUX)) {
6998 bp->flags |= NO_WOL_FLAG;
6999 bp->wol = 0;
7000 }
bac0dff6 7001 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
5b0c76ad 7002 bp->phy_addr = 2;
5b0c76ad
MC
7003 if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
7004 bp->phy_flags |= PHY_2_5G_CAPABLE_FLAG;
7005 }
0d8a6571
MC
7006 bnx2_init_remote_phy(bp);
7007
261dd5ca
MC
7008 } else if (CHIP_NUM(bp) == CHIP_NUM_5706 ||
7009 CHIP_NUM(bp) == CHIP_NUM_5708)
7010 bp->phy_flags |= PHY_CRC_FIX_FLAG;
fb0c18bd
MC
7011 else if (CHIP_NUM(bp) == CHIP_NUM_5709 &&
7012 (CHIP_REV(bp) == CHIP_REV_Ax ||
7013 CHIP_REV(bp) == CHIP_REV_Bx))
b659f44e 7014 bp->phy_flags |= PHY_DIS_EARLY_DAC_FLAG;
b6016b76 7015
16088272
MC
7016 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
7017 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
846f5c62 7018 (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
dda1e390 7019 bp->flags |= NO_WOL_FLAG;
846f5c62
MC
7020 bp->wol = 0;
7021 }
dda1e390 7022
b6016b76
MC
7023 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
7024 bp->tx_quick_cons_trip_int =
7025 bp->tx_quick_cons_trip;
7026 bp->tx_ticks_int = bp->tx_ticks;
7027 bp->rx_quick_cons_trip_int =
7028 bp->rx_quick_cons_trip;
7029 bp->rx_ticks_int = bp->rx_ticks;
7030 bp->comp_prod_trip_int = bp->comp_prod_trip;
7031 bp->com_ticks_int = bp->com_ticks;
7032 bp->cmd_ticks_int = bp->cmd_ticks;
7033 }
7034
f9317a40
MC
7035 /* Disable MSI on 5706 if AMD 8132 bridge is found.
7036 *
7037 * MSI is defined to be 32-bit write. The 5706 does 64-bit MSI writes
7038 * with byte enables disabled on the unused 32-bit word. This is legal
7039 * but causes problems on the AMD 8132 which will eventually stop
7040 * responding after a while.
7041 *
7042 * AMD believes this incompatibility is unique to the 5706, and
88187dfa 7043 * prefers to locally disable MSI rather than globally disabling it.
f9317a40
MC
7044 */
7045 if (CHIP_NUM(bp) == CHIP_NUM_5706 && disable_msi == 0) {
7046 struct pci_dev *amd_8132 = NULL;
7047
7048 while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
7049 PCI_DEVICE_ID_AMD_8132_BRIDGE,
7050 amd_8132))) {
f9317a40 7051
44c10138
AK
7052 if (amd_8132->revision >= 0x10 &&
7053 amd_8132->revision <= 0x13) {
f9317a40
MC
7054 disable_msi = 1;
7055 pci_dev_put(amd_8132);
7056 break;
7057 }
7058 }
7059 }
7060
deaf391b 7061 bnx2_set_default_link(bp);
b6016b76
MC
7062 bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
7063
cd339a0e
MC
7064 init_timer(&bp->timer);
7065 bp->timer.expires = RUN_AT(bp->timer_interval);
7066 bp->timer.data = (unsigned long) bp;
7067 bp->timer.function = bnx2_timer;
7068
b6016b76
MC
7069 return 0;
7070
7071err_out_unmap:
7072 if (bp->regview) {
7073 iounmap(bp->regview);
73eef4cd 7074 bp->regview = NULL;
b6016b76
MC
7075 }
7076
7077err_out_release:
7078 pci_release_regions(pdev);
7079
7080err_out_disable:
7081 pci_disable_device(pdev);
7082 pci_set_drvdata(pdev, NULL);
7083
7084err_out:
7085 return rc;
7086}
7087
883e5151
MC
7088static char * __devinit
7089bnx2_bus_string(struct bnx2 *bp, char *str)
7090{
7091 char *s = str;
7092
7093 if (bp->flags & PCIE_FLAG) {
7094 s += sprintf(s, "PCI Express");
7095 } else {
7096 s += sprintf(s, "PCI");
7097 if (bp->flags & PCIX_FLAG)
7098 s += sprintf(s, "-X");
7099 if (bp->flags & PCI_32BIT_FLAG)
7100 s += sprintf(s, " 32-bit");
7101 else
7102 s += sprintf(s, " 64-bit");
7103 s += sprintf(s, " %dMHz", bp->bus_speed_mhz);
7104 }
7105 return str;
7106}
7107
35efa7c1
MC
7108static int __devinit
7109bnx2_init_napi(struct bnx2 *bp)
7110{
7111 struct bnx2_napi *bnapi = &bp->bnx2_napi;
7112
7113 bnapi->bp = bp;
7114 netif_napi_add(bp->dev, &bnapi->napi, bnx2_poll, 64);
7115}
7116
b6016b76
MC
7117static int __devinit
7118bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
7119{
7120 static int version_printed = 0;
7121 struct net_device *dev = NULL;
7122 struct bnx2 *bp;
0795af57 7123 int rc;
883e5151 7124 char str[40];
0795af57 7125 DECLARE_MAC_BUF(mac);
b6016b76
MC
7126
7127 if (version_printed++ == 0)
7128 printk(KERN_INFO "%s", version);
7129
7130 /* dev zeroed in init_etherdev */
7131 dev = alloc_etherdev(sizeof(*bp));
7132
7133 if (!dev)
7134 return -ENOMEM;
7135
7136 rc = bnx2_init_board(pdev, dev);
7137 if (rc < 0) {
7138 free_netdev(dev);
7139 return rc;
7140 }
7141
7142 dev->open = bnx2_open;
7143 dev->hard_start_xmit = bnx2_start_xmit;
7144 dev->stop = bnx2_close;
7145 dev->get_stats = bnx2_get_stats;
7146 dev->set_multicast_list = bnx2_set_rx_mode;
7147 dev->do_ioctl = bnx2_ioctl;
7148 dev->set_mac_address = bnx2_change_mac_addr;
7149 dev->change_mtu = bnx2_change_mtu;
7150 dev->tx_timeout = bnx2_tx_timeout;
7151 dev->watchdog_timeo = TX_TIMEOUT;
7152#ifdef BCM_VLAN
7153 dev->vlan_rx_register = bnx2_vlan_rx_register;
b6016b76 7154#endif
b6016b76 7155 dev->ethtool_ops = &bnx2_ethtool_ops;
b6016b76 7156
972ec0d4 7157 bp = netdev_priv(dev);
35efa7c1 7158 bnx2_init_napi(bp);
b6016b76
MC
7159
7160#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
7161 dev->poll_controller = poll_bnx2;
7162#endif
7163
1b2f922f
MC
7164 pci_set_drvdata(pdev, dev);
7165
7166 memcpy(dev->dev_addr, bp->mac_addr, 6);
7167 memcpy(dev->perm_addr, bp->mac_addr, 6);
7168 bp->name = board_info[ent->driver_data].name;
7169
d212f87b 7170 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
4666f87a 7171 if (CHIP_NUM(bp) == CHIP_NUM_5709)
d212f87b
SH
7172 dev->features |= NETIF_F_IPV6_CSUM;
7173
1b2f922f
MC
7174#ifdef BCM_VLAN
7175 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
7176#endif
7177 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
4666f87a
MC
7178 if (CHIP_NUM(bp) == CHIP_NUM_5709)
7179 dev->features |= NETIF_F_TSO6;
1b2f922f 7180
b6016b76 7181 if ((rc = register_netdev(dev))) {
9b91cf9d 7182 dev_err(&pdev->dev, "Cannot register net device\n");
b6016b76
MC
7183 if (bp->regview)
7184 iounmap(bp->regview);
7185 pci_release_regions(pdev);
7186 pci_disable_device(pdev);
7187 pci_set_drvdata(pdev, NULL);
7188 free_netdev(dev);
7189 return rc;
7190 }
7191
883e5151 7192 printk(KERN_INFO "%s: %s (%c%d) %s found at mem %lx, "
0795af57 7193 "IRQ %d, node addr %s\n",
b6016b76
MC
7194 dev->name,
7195 bp->name,
7196 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
7197 ((CHIP_ID(bp) & 0x0ff0) >> 4),
883e5151 7198 bnx2_bus_string(bp, str),
b6016b76 7199 dev->base_addr,
0795af57 7200 bp->pdev->irq, print_mac(mac, dev->dev_addr));
b6016b76 7201
b6016b76
MC
7202 return 0;
7203}
7204
7205static void __devexit
7206bnx2_remove_one(struct pci_dev *pdev)
7207{
7208 struct net_device *dev = pci_get_drvdata(pdev);
972ec0d4 7209 struct bnx2 *bp = netdev_priv(dev);
b6016b76 7210
afdc08b9
MC
7211 flush_scheduled_work();
7212
b6016b76
MC
7213 unregister_netdev(dev);
7214
7215 if (bp->regview)
7216 iounmap(bp->regview);
7217
7218 free_netdev(dev);
7219 pci_release_regions(pdev);
7220 pci_disable_device(pdev);
7221 pci_set_drvdata(pdev, NULL);
7222}
7223
7224static int
829ca9a3 7225bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
b6016b76
MC
7226{
7227 struct net_device *dev = pci_get_drvdata(pdev);
972ec0d4 7228 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
7229 u32 reset_code;
7230
6caebb02
MC
7231 /* PCI register 4 needs to be saved whether netif_running() or not.
7232 * MSI address and data need to be saved if using MSI and
7233 * netif_running().
7234 */
7235 pci_save_state(pdev);
b6016b76
MC
7236 if (!netif_running(dev))
7237 return 0;
7238
1d60290f 7239 flush_scheduled_work();
b6016b76
MC
7240 bnx2_netif_stop(bp);
7241 netif_device_detach(dev);
7242 del_timer_sync(&bp->timer);
dda1e390 7243 if (bp->flags & NO_WOL_FLAG)
6c4f095e 7244 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
dda1e390 7245 else if (bp->wol)
b6016b76
MC
7246 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
7247 else
7248 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
7249 bnx2_reset_chip(bp, reset_code);
7250 bnx2_free_skbs(bp);
829ca9a3 7251 bnx2_set_power_state(bp, pci_choose_state(pdev, state));
b6016b76
MC
7252 return 0;
7253}
7254
7255static int
7256bnx2_resume(struct pci_dev *pdev)
7257{
7258 struct net_device *dev = pci_get_drvdata(pdev);
972ec0d4 7259 struct bnx2 *bp = netdev_priv(dev);
b6016b76 7260
6caebb02 7261 pci_restore_state(pdev);
b6016b76
MC
7262 if (!netif_running(dev))
7263 return 0;
7264
829ca9a3 7265 bnx2_set_power_state(bp, PCI_D0);
b6016b76
MC
7266 netif_device_attach(dev);
7267 bnx2_init_nic(bp);
7268 bnx2_netif_start(bp);
7269 return 0;
7270}
7271
7272static struct pci_driver bnx2_pci_driver = {
14ab9b86
PH
7273 .name = DRV_MODULE_NAME,
7274 .id_table = bnx2_pci_tbl,
7275 .probe = bnx2_init_one,
7276 .remove = __devexit_p(bnx2_remove_one),
7277 .suspend = bnx2_suspend,
7278 .resume = bnx2_resume,
b6016b76
MC
7279};
7280
7281static int __init bnx2_init(void)
7282{
29917620 7283 return pci_register_driver(&bnx2_pci_driver);
b6016b76
MC
7284}
7285
7286static void __exit bnx2_cleanup(void)
7287{
7288 pci_unregister_driver(&bnx2_pci_driver);
7289}
7290
7291module_init(bnx2_init);
7292module_exit(bnx2_cleanup);
7293
7294
7295
This page took 1.165402 seconds and 5 git commands to generate.