[BNX2]: Enhance the heartbeat.
[deliverable/linux.git] / drivers / net / bnx2.c
CommitLineData
b6016b76
MC
1/* bnx2.c: Broadcom NX2 network driver.
2 *
72fbaeb6 3 * Copyright (c) 2004-2007 Broadcom Corporation
b6016b76
MC
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Written by: Michael Chan (mchan@broadcom.com)
10 */
11
f2a4f052
MC
12
13#include <linux/module.h>
14#include <linux/moduleparam.h>
15
16#include <linux/kernel.h>
17#include <linux/timer.h>
18#include <linux/errno.h>
19#include <linux/ioport.h>
20#include <linux/slab.h>
21#include <linux/vmalloc.h>
22#include <linux/interrupt.h>
23#include <linux/pci.h>
24#include <linux/init.h>
25#include <linux/netdevice.h>
26#include <linux/etherdevice.h>
27#include <linux/skbuff.h>
28#include <linux/dma-mapping.h>
29#include <asm/bitops.h>
30#include <asm/io.h>
31#include <asm/irq.h>
32#include <linux/delay.h>
33#include <asm/byteorder.h>
c86a31f4 34#include <asm/page.h>
f2a4f052
MC
35#include <linux/time.h>
36#include <linux/ethtool.h>
37#include <linux/mii.h>
38#ifdef NETIF_F_HW_VLAN_TX
39#include <linux/if_vlan.h>
40#define BCM_VLAN 1
41#endif
f2a4f052
MC
42#include <net/ip.h>
43#include <net/tcp.h>
44#include <net/checksum.h>
f2a4f052
MC
45#include <linux/workqueue.h>
46#include <linux/crc32.h>
47#include <linux/prefetch.h>
29b12174 48#include <linux/cache.h>
fba9fe91 49#include <linux/zlib.h>
f2a4f052 50
b6016b76
MC
51#include "bnx2.h"
52#include "bnx2_fw.h"
d43584c8 53#include "bnx2_fw2.h"
b6016b76
MC
54
55#define DRV_MODULE_NAME "bnx2"
56#define PFX DRV_MODULE_NAME ": "
b91b9fd1
MC
57#define DRV_MODULE_VERSION "1.5.11"
58#define DRV_MODULE_RELDATE "June 4, 2007"
b6016b76
MC
59
60#define RUN_AT(x) (jiffies + (x))
61
62/* Time in jiffies before concluding the transmitter is hung. */
63#define TX_TIMEOUT (5*HZ)
64
e19360f2 65static const char version[] __devinitdata =
b6016b76
MC
66 "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
67
68MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
05d0f1cf 69MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708 Driver");
b6016b76
MC
70MODULE_LICENSE("GPL");
71MODULE_VERSION(DRV_MODULE_VERSION);
72
73static int disable_msi = 0;
74
75module_param(disable_msi, int, 0);
76MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
77
78typedef enum {
79 BCM5706 = 0,
80 NC370T,
81 NC370I,
82 BCM5706S,
83 NC370F,
5b0c76ad
MC
84 BCM5708,
85 BCM5708S,
bac0dff6 86 BCM5709,
27a005b8 87 BCM5709S,
b6016b76
MC
88} board_t;
89
90/* indexed by board_t, above */
f71e1309 91static const struct {
b6016b76
MC
92 char *name;
93} board_info[] __devinitdata = {
94 { "Broadcom NetXtreme II BCM5706 1000Base-T" },
95 { "HP NC370T Multifunction Gigabit Server Adapter" },
96 { "HP NC370i Multifunction Gigabit Server Adapter" },
97 { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
98 { "HP NC370F Multifunction Gigabit Server Adapter" },
5b0c76ad
MC
99 { "Broadcom NetXtreme II BCM5708 1000Base-T" },
100 { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
bac0dff6 101 { "Broadcom NetXtreme II BCM5709 1000Base-T" },
27a005b8 102 { "Broadcom NetXtreme II BCM5709 1000Base-SX" },
b6016b76
MC
103 };
104
105static struct pci_device_id bnx2_pci_tbl[] = {
106 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
107 PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
108 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
109 PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
110 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
111 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
5b0c76ad
MC
112 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
113 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
b6016b76
MC
114 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
115 PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
116 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
117 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
5b0c76ad
MC
118 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
119 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
bac0dff6
MC
120 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
121 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
27a005b8
MC
122 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
123 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
b6016b76
MC
124 { 0, }
125};
126
127static struct flash_spec flash_table[] =
128{
129 /* Slow EEPROM */
37137709 130 {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
b6016b76
MC
131 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
132 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
133 "EEPROM - slow"},
37137709
MC
134 /* Expansion entry 0001 */
135 {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
b6016b76 136 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
37137709
MC
137 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
138 "Entry 0001"},
b6016b76
MC
139 /* Saifun SA25F010 (non-buffered flash) */
140 /* strap, cfg1, & write1 need updates */
37137709 141 {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
b6016b76
MC
142 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
143 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
144 "Non-buffered flash (128kB)"},
145 /* Saifun SA25F020 (non-buffered flash) */
146 /* strap, cfg1, & write1 need updates */
37137709 147 {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
b6016b76
MC
148 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
149 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
150 "Non-buffered flash (256kB)"},
37137709
MC
151 /* Expansion entry 0100 */
152 {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
153 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
154 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
155 "Entry 0100"},
156 /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
6aa20a22 157 {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
37137709
MC
158 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
159 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
160 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
161 /* Entry 0110: ST M45PE20 (non-buffered flash)*/
162 {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
163 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
164 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
165 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
166 /* Saifun SA25F005 (non-buffered flash) */
167 /* strap, cfg1, & write1 need updates */
168 {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
169 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
170 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
171 "Non-buffered flash (64kB)"},
172 /* Fast EEPROM */
173 {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
174 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
175 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
176 "EEPROM - fast"},
177 /* Expansion entry 1001 */
178 {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
179 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
180 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
181 "Entry 1001"},
182 /* Expansion entry 1010 */
183 {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
184 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
185 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
186 "Entry 1010"},
187 /* ATMEL AT45DB011B (buffered flash) */
188 {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
189 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
190 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
191 "Buffered flash (128kB)"},
192 /* Expansion entry 1100 */
193 {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
194 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
195 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
196 "Entry 1100"},
197 /* Expansion entry 1101 */
198 {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
199 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
200 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
201 "Entry 1101"},
202 /* Ateml Expansion entry 1110 */
203 {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
204 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
205 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
206 "Entry 1110 (Atmel)"},
207 /* ATMEL AT45DB021B (buffered flash) */
208 {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
209 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
210 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
211 "Buffered flash (256kB)"},
b6016b76
MC
212};
213
214MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
215
e89bbf10
MC
216static inline u32 bnx2_tx_avail(struct bnx2 *bp)
217{
2f8af120 218 u32 diff;
e89bbf10 219
2f8af120 220 smp_mb();
faac9c4b
MC
221
222 /* The ring uses 256 indices for 255 entries, one of them
223 * needs to be skipped.
224 */
225 diff = bp->tx_prod - bp->tx_cons;
226 if (unlikely(diff >= TX_DESC_CNT)) {
227 diff &= 0xffff;
228 if (diff == TX_DESC_CNT)
229 diff = MAX_TX_DESC_CNT;
230 }
e89bbf10
MC
231 return (bp->tx_ring_size - diff);
232}
233
b6016b76
MC
234static u32
235bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
236{
1b8227c4
MC
237 u32 val;
238
239 spin_lock_bh(&bp->indirect_lock);
b6016b76 240 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
1b8227c4
MC
241 val = REG_RD(bp, BNX2_PCICFG_REG_WINDOW);
242 spin_unlock_bh(&bp->indirect_lock);
243 return val;
b6016b76
MC
244}
245
246static void
247bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
248{
1b8227c4 249 spin_lock_bh(&bp->indirect_lock);
b6016b76
MC
250 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
251 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
1b8227c4 252 spin_unlock_bh(&bp->indirect_lock);
b6016b76
MC
253}
254
255static void
256bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
257{
258 offset += cid_addr;
1b8227c4 259 spin_lock_bh(&bp->indirect_lock);
59b47d8a
MC
260 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
261 int i;
262
263 REG_WR(bp, BNX2_CTX_CTX_DATA, val);
264 REG_WR(bp, BNX2_CTX_CTX_CTRL,
265 offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
266 for (i = 0; i < 5; i++) {
267 u32 val;
268 val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
269 if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
270 break;
271 udelay(5);
272 }
273 } else {
274 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
275 REG_WR(bp, BNX2_CTX_DATA, val);
276 }
1b8227c4 277 spin_unlock_bh(&bp->indirect_lock);
b6016b76
MC
278}
279
280static int
281bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
282{
283 u32 val1;
284 int i, ret;
285
286 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
287 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
288 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
289
290 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
291 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
292
293 udelay(40);
294 }
295
296 val1 = (bp->phy_addr << 21) | (reg << 16) |
297 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
298 BNX2_EMAC_MDIO_COMM_START_BUSY;
299 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
300
301 for (i = 0; i < 50; i++) {
302 udelay(10);
303
304 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
305 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
306 udelay(5);
307
308 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
309 val1 &= BNX2_EMAC_MDIO_COMM_DATA;
310
311 break;
312 }
313 }
314
315 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
316 *val = 0x0;
317 ret = -EBUSY;
318 }
319 else {
320 *val = val1;
321 ret = 0;
322 }
323
324 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
325 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
326 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
327
328 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
329 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
330
331 udelay(40);
332 }
333
334 return ret;
335}
336
337static int
338bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
339{
340 u32 val1;
341 int i, ret;
342
343 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
344 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
345 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
346
347 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
348 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
349
350 udelay(40);
351 }
352
353 val1 = (bp->phy_addr << 21) | (reg << 16) | val |
354 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
355 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
356 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
6aa20a22 357
b6016b76
MC
358 for (i = 0; i < 50; i++) {
359 udelay(10);
360
361 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
362 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
363 udelay(5);
364 break;
365 }
366 }
367
368 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
369 ret = -EBUSY;
370 else
371 ret = 0;
372
373 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
374 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
375 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
376
377 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
378 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
379
380 udelay(40);
381 }
382
383 return ret;
384}
385
386static void
387bnx2_disable_int(struct bnx2 *bp)
388{
389 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
390 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
391 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
392}
393
394static void
395bnx2_enable_int(struct bnx2 *bp)
396{
1269a8a6
MC
397 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
398 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
399 BNX2_PCICFG_INT_ACK_CMD_MASK_INT | bp->last_status_idx);
400
b6016b76
MC
401 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
402 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | bp->last_status_idx);
403
bf5295bb 404 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
b6016b76
MC
405}
406
407static void
408bnx2_disable_int_sync(struct bnx2 *bp)
409{
410 atomic_inc(&bp->intr_sem);
411 bnx2_disable_int(bp);
412 synchronize_irq(bp->pdev->irq);
413}
414
415static void
416bnx2_netif_stop(struct bnx2 *bp)
417{
418 bnx2_disable_int_sync(bp);
419 if (netif_running(bp->dev)) {
420 netif_poll_disable(bp->dev);
421 netif_tx_disable(bp->dev);
422 bp->dev->trans_start = jiffies; /* prevent tx timeout */
423 }
424}
425
426static void
427bnx2_netif_start(struct bnx2 *bp)
428{
429 if (atomic_dec_and_test(&bp->intr_sem)) {
430 if (netif_running(bp->dev)) {
431 netif_wake_queue(bp->dev);
432 netif_poll_enable(bp->dev);
433 bnx2_enable_int(bp);
434 }
435 }
436}
437
438static void
439bnx2_free_mem(struct bnx2 *bp)
440{
13daffa2
MC
441 int i;
442
59b47d8a
MC
443 for (i = 0; i < bp->ctx_pages; i++) {
444 if (bp->ctx_blk[i]) {
445 pci_free_consistent(bp->pdev, BCM_PAGE_SIZE,
446 bp->ctx_blk[i],
447 bp->ctx_blk_mapping[i]);
448 bp->ctx_blk[i] = NULL;
449 }
450 }
b6016b76 451 if (bp->status_blk) {
0f31f994 452 pci_free_consistent(bp->pdev, bp->status_stats_size,
b6016b76
MC
453 bp->status_blk, bp->status_blk_mapping);
454 bp->status_blk = NULL;
0f31f994 455 bp->stats_blk = NULL;
b6016b76
MC
456 }
457 if (bp->tx_desc_ring) {
458 pci_free_consistent(bp->pdev,
459 sizeof(struct tx_bd) * TX_DESC_CNT,
460 bp->tx_desc_ring, bp->tx_desc_mapping);
461 bp->tx_desc_ring = NULL;
462 }
b4558ea9
JJ
463 kfree(bp->tx_buf_ring);
464 bp->tx_buf_ring = NULL;
13daffa2
MC
465 for (i = 0; i < bp->rx_max_ring; i++) {
466 if (bp->rx_desc_ring[i])
467 pci_free_consistent(bp->pdev,
468 sizeof(struct rx_bd) * RX_DESC_CNT,
469 bp->rx_desc_ring[i],
470 bp->rx_desc_mapping[i]);
471 bp->rx_desc_ring[i] = NULL;
472 }
473 vfree(bp->rx_buf_ring);
b4558ea9 474 bp->rx_buf_ring = NULL;
b6016b76
MC
475}
476
477static int
478bnx2_alloc_mem(struct bnx2 *bp)
479{
0f31f994 480 int i, status_blk_size;
13daffa2 481
0f31f994
MC
482 bp->tx_buf_ring = kzalloc(sizeof(struct sw_bd) * TX_DESC_CNT,
483 GFP_KERNEL);
b6016b76
MC
484 if (bp->tx_buf_ring == NULL)
485 return -ENOMEM;
486
b6016b76
MC
487 bp->tx_desc_ring = pci_alloc_consistent(bp->pdev,
488 sizeof(struct tx_bd) *
489 TX_DESC_CNT,
490 &bp->tx_desc_mapping);
491 if (bp->tx_desc_ring == NULL)
492 goto alloc_mem_err;
493
13daffa2
MC
494 bp->rx_buf_ring = vmalloc(sizeof(struct sw_bd) * RX_DESC_CNT *
495 bp->rx_max_ring);
b6016b76
MC
496 if (bp->rx_buf_ring == NULL)
497 goto alloc_mem_err;
498
13daffa2
MC
499 memset(bp->rx_buf_ring, 0, sizeof(struct sw_bd) * RX_DESC_CNT *
500 bp->rx_max_ring);
501
502 for (i = 0; i < bp->rx_max_ring; i++) {
503 bp->rx_desc_ring[i] =
504 pci_alloc_consistent(bp->pdev,
505 sizeof(struct rx_bd) * RX_DESC_CNT,
506 &bp->rx_desc_mapping[i]);
507 if (bp->rx_desc_ring[i] == NULL)
508 goto alloc_mem_err;
509
510 }
b6016b76 511
0f31f994
MC
512 /* Combine status and statistics blocks into one allocation. */
513 status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
514 bp->status_stats_size = status_blk_size +
515 sizeof(struct statistics_block);
516
517 bp->status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
b6016b76
MC
518 &bp->status_blk_mapping);
519 if (bp->status_blk == NULL)
520 goto alloc_mem_err;
521
0f31f994 522 memset(bp->status_blk, 0, bp->status_stats_size);
b6016b76 523
0f31f994
MC
524 bp->stats_blk = (void *) ((unsigned long) bp->status_blk +
525 status_blk_size);
b6016b76 526
0f31f994 527 bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
b6016b76 528
59b47d8a
MC
529 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
530 bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
531 if (bp->ctx_pages == 0)
532 bp->ctx_pages = 1;
533 for (i = 0; i < bp->ctx_pages; i++) {
534 bp->ctx_blk[i] = pci_alloc_consistent(bp->pdev,
535 BCM_PAGE_SIZE,
536 &bp->ctx_blk_mapping[i]);
537 if (bp->ctx_blk[i] == NULL)
538 goto alloc_mem_err;
539 }
540 }
b6016b76
MC
541 return 0;
542
543alloc_mem_err:
544 bnx2_free_mem(bp);
545 return -ENOMEM;
546}
547
e3648b3d
MC
548static void
549bnx2_report_fw_link(struct bnx2 *bp)
550{
551 u32 fw_link_status = 0;
552
0d8a6571
MC
553 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
554 return;
555
e3648b3d
MC
556 if (bp->link_up) {
557 u32 bmsr;
558
559 switch (bp->line_speed) {
560 case SPEED_10:
561 if (bp->duplex == DUPLEX_HALF)
562 fw_link_status = BNX2_LINK_STATUS_10HALF;
563 else
564 fw_link_status = BNX2_LINK_STATUS_10FULL;
565 break;
566 case SPEED_100:
567 if (bp->duplex == DUPLEX_HALF)
568 fw_link_status = BNX2_LINK_STATUS_100HALF;
569 else
570 fw_link_status = BNX2_LINK_STATUS_100FULL;
571 break;
572 case SPEED_1000:
573 if (bp->duplex == DUPLEX_HALF)
574 fw_link_status = BNX2_LINK_STATUS_1000HALF;
575 else
576 fw_link_status = BNX2_LINK_STATUS_1000FULL;
577 break;
578 case SPEED_2500:
579 if (bp->duplex == DUPLEX_HALF)
580 fw_link_status = BNX2_LINK_STATUS_2500HALF;
581 else
582 fw_link_status = BNX2_LINK_STATUS_2500FULL;
583 break;
584 }
585
586 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
587
588 if (bp->autoneg) {
589 fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
590
ca58c3af
MC
591 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
592 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
e3648b3d
MC
593
594 if (!(bmsr & BMSR_ANEGCOMPLETE) ||
595 bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)
596 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
597 else
598 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
599 }
600 }
601 else
602 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
603
604 REG_WR_IND(bp, bp->shmem_base + BNX2_LINK_STATUS, fw_link_status);
605}
606
9b1084b8
MC
607static char *
608bnx2_xceiver_str(struct bnx2 *bp)
609{
610 return ((bp->phy_port == PORT_FIBRE) ? "SerDes" :
611 ((bp->phy_flags & PHY_SERDES_FLAG) ? "Remote Copper" :
612 "Copper"));
613}
614
b6016b76
MC
615static void
616bnx2_report_link(struct bnx2 *bp)
617{
618 if (bp->link_up) {
619 netif_carrier_on(bp->dev);
9b1084b8
MC
620 printk(KERN_INFO PFX "%s NIC %s Link is Up, ", bp->dev->name,
621 bnx2_xceiver_str(bp));
b6016b76
MC
622
623 printk("%d Mbps ", bp->line_speed);
624
625 if (bp->duplex == DUPLEX_FULL)
626 printk("full duplex");
627 else
628 printk("half duplex");
629
630 if (bp->flow_ctrl) {
631 if (bp->flow_ctrl & FLOW_CTRL_RX) {
632 printk(", receive ");
633 if (bp->flow_ctrl & FLOW_CTRL_TX)
634 printk("& transmit ");
635 }
636 else {
637 printk(", transmit ");
638 }
639 printk("flow control ON");
640 }
641 printk("\n");
642 }
643 else {
644 netif_carrier_off(bp->dev);
9b1084b8
MC
645 printk(KERN_ERR PFX "%s NIC %s Link is Down\n", bp->dev->name,
646 bnx2_xceiver_str(bp));
b6016b76 647 }
e3648b3d
MC
648
649 bnx2_report_fw_link(bp);
b6016b76
MC
650}
651
652static void
653bnx2_resolve_flow_ctrl(struct bnx2 *bp)
654{
655 u32 local_adv, remote_adv;
656
657 bp->flow_ctrl = 0;
6aa20a22 658 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
b6016b76
MC
659 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
660
661 if (bp->duplex == DUPLEX_FULL) {
662 bp->flow_ctrl = bp->req_flow_ctrl;
663 }
664 return;
665 }
666
667 if (bp->duplex != DUPLEX_FULL) {
668 return;
669 }
670
5b0c76ad
MC
671 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
672 (CHIP_NUM(bp) == CHIP_NUM_5708)) {
673 u32 val;
674
675 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
676 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
677 bp->flow_ctrl |= FLOW_CTRL_TX;
678 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
679 bp->flow_ctrl |= FLOW_CTRL_RX;
680 return;
681 }
682
ca58c3af
MC
683 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
684 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
b6016b76
MC
685
686 if (bp->phy_flags & PHY_SERDES_FLAG) {
687 u32 new_local_adv = 0;
688 u32 new_remote_adv = 0;
689
690 if (local_adv & ADVERTISE_1000XPAUSE)
691 new_local_adv |= ADVERTISE_PAUSE_CAP;
692 if (local_adv & ADVERTISE_1000XPSE_ASYM)
693 new_local_adv |= ADVERTISE_PAUSE_ASYM;
694 if (remote_adv & ADVERTISE_1000XPAUSE)
695 new_remote_adv |= ADVERTISE_PAUSE_CAP;
696 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
697 new_remote_adv |= ADVERTISE_PAUSE_ASYM;
698
699 local_adv = new_local_adv;
700 remote_adv = new_remote_adv;
701 }
702
703 /* See Table 28B-3 of 802.3ab-1999 spec. */
704 if (local_adv & ADVERTISE_PAUSE_CAP) {
705 if(local_adv & ADVERTISE_PAUSE_ASYM) {
706 if (remote_adv & ADVERTISE_PAUSE_CAP) {
707 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
708 }
709 else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
710 bp->flow_ctrl = FLOW_CTRL_RX;
711 }
712 }
713 else {
714 if (remote_adv & ADVERTISE_PAUSE_CAP) {
715 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
716 }
717 }
718 }
719 else if (local_adv & ADVERTISE_PAUSE_ASYM) {
720 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
721 (remote_adv & ADVERTISE_PAUSE_ASYM)) {
722
723 bp->flow_ctrl = FLOW_CTRL_TX;
724 }
725 }
726}
727
27a005b8
MC
728static int
729bnx2_5709s_linkup(struct bnx2 *bp)
730{
731 u32 val, speed;
732
733 bp->link_up = 1;
734
735 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
736 bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
737 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
738
739 if ((bp->autoneg & AUTONEG_SPEED) == 0) {
740 bp->line_speed = bp->req_line_speed;
741 bp->duplex = bp->req_duplex;
742 return 0;
743 }
744 speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
745 switch (speed) {
746 case MII_BNX2_GP_TOP_AN_SPEED_10:
747 bp->line_speed = SPEED_10;
748 break;
749 case MII_BNX2_GP_TOP_AN_SPEED_100:
750 bp->line_speed = SPEED_100;
751 break;
752 case MII_BNX2_GP_TOP_AN_SPEED_1G:
753 case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
754 bp->line_speed = SPEED_1000;
755 break;
756 case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
757 bp->line_speed = SPEED_2500;
758 break;
759 }
760 if (val & MII_BNX2_GP_TOP_AN_FD)
761 bp->duplex = DUPLEX_FULL;
762 else
763 bp->duplex = DUPLEX_HALF;
764 return 0;
765}
766
b6016b76 767static int
5b0c76ad
MC
768bnx2_5708s_linkup(struct bnx2 *bp)
769{
770 u32 val;
771
772 bp->link_up = 1;
773 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
774 switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
775 case BCM5708S_1000X_STAT1_SPEED_10:
776 bp->line_speed = SPEED_10;
777 break;
778 case BCM5708S_1000X_STAT1_SPEED_100:
779 bp->line_speed = SPEED_100;
780 break;
781 case BCM5708S_1000X_STAT1_SPEED_1G:
782 bp->line_speed = SPEED_1000;
783 break;
784 case BCM5708S_1000X_STAT1_SPEED_2G5:
785 bp->line_speed = SPEED_2500;
786 break;
787 }
788 if (val & BCM5708S_1000X_STAT1_FD)
789 bp->duplex = DUPLEX_FULL;
790 else
791 bp->duplex = DUPLEX_HALF;
792
793 return 0;
794}
795
796static int
797bnx2_5706s_linkup(struct bnx2 *bp)
b6016b76
MC
798{
799 u32 bmcr, local_adv, remote_adv, common;
800
801 bp->link_up = 1;
802 bp->line_speed = SPEED_1000;
803
ca58c3af 804 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
b6016b76
MC
805 if (bmcr & BMCR_FULLDPLX) {
806 bp->duplex = DUPLEX_FULL;
807 }
808 else {
809 bp->duplex = DUPLEX_HALF;
810 }
811
812 if (!(bmcr & BMCR_ANENABLE)) {
813 return 0;
814 }
815
ca58c3af
MC
816 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
817 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
b6016b76
MC
818
819 common = local_adv & remote_adv;
820 if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
821
822 if (common & ADVERTISE_1000XFULL) {
823 bp->duplex = DUPLEX_FULL;
824 }
825 else {
826 bp->duplex = DUPLEX_HALF;
827 }
828 }
829
830 return 0;
831}
832
833static int
834bnx2_copper_linkup(struct bnx2 *bp)
835{
836 u32 bmcr;
837
ca58c3af 838 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
b6016b76
MC
839 if (bmcr & BMCR_ANENABLE) {
840 u32 local_adv, remote_adv, common;
841
842 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
843 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
844
845 common = local_adv & (remote_adv >> 2);
846 if (common & ADVERTISE_1000FULL) {
847 bp->line_speed = SPEED_1000;
848 bp->duplex = DUPLEX_FULL;
849 }
850 else if (common & ADVERTISE_1000HALF) {
851 bp->line_speed = SPEED_1000;
852 bp->duplex = DUPLEX_HALF;
853 }
854 else {
ca58c3af
MC
855 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
856 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
b6016b76
MC
857
858 common = local_adv & remote_adv;
859 if (common & ADVERTISE_100FULL) {
860 bp->line_speed = SPEED_100;
861 bp->duplex = DUPLEX_FULL;
862 }
863 else if (common & ADVERTISE_100HALF) {
864 bp->line_speed = SPEED_100;
865 bp->duplex = DUPLEX_HALF;
866 }
867 else if (common & ADVERTISE_10FULL) {
868 bp->line_speed = SPEED_10;
869 bp->duplex = DUPLEX_FULL;
870 }
871 else if (common & ADVERTISE_10HALF) {
872 bp->line_speed = SPEED_10;
873 bp->duplex = DUPLEX_HALF;
874 }
875 else {
876 bp->line_speed = 0;
877 bp->link_up = 0;
878 }
879 }
880 }
881 else {
882 if (bmcr & BMCR_SPEED100) {
883 bp->line_speed = SPEED_100;
884 }
885 else {
886 bp->line_speed = SPEED_10;
887 }
888 if (bmcr & BMCR_FULLDPLX) {
889 bp->duplex = DUPLEX_FULL;
890 }
891 else {
892 bp->duplex = DUPLEX_HALF;
893 }
894 }
895
896 return 0;
897}
898
899static int
900bnx2_set_mac_link(struct bnx2 *bp)
901{
902 u32 val;
903
904 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
905 if (bp->link_up && (bp->line_speed == SPEED_1000) &&
906 (bp->duplex == DUPLEX_HALF)) {
907 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
908 }
909
910 /* Configure the EMAC mode register. */
911 val = REG_RD(bp, BNX2_EMAC_MODE);
912
913 val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
5b0c76ad 914 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
59b47d8a 915 BNX2_EMAC_MODE_25G_MODE);
b6016b76
MC
916
917 if (bp->link_up) {
5b0c76ad
MC
918 switch (bp->line_speed) {
919 case SPEED_10:
59b47d8a
MC
920 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
921 val |= BNX2_EMAC_MODE_PORT_MII_10M;
5b0c76ad
MC
922 break;
923 }
924 /* fall through */
925 case SPEED_100:
926 val |= BNX2_EMAC_MODE_PORT_MII;
927 break;
928 case SPEED_2500:
59b47d8a 929 val |= BNX2_EMAC_MODE_25G_MODE;
5b0c76ad
MC
930 /* fall through */
931 case SPEED_1000:
932 val |= BNX2_EMAC_MODE_PORT_GMII;
933 break;
934 }
b6016b76
MC
935 }
936 else {
937 val |= BNX2_EMAC_MODE_PORT_GMII;
938 }
939
940 /* Set the MAC to operate in the appropriate duplex mode. */
941 if (bp->duplex == DUPLEX_HALF)
942 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
943 REG_WR(bp, BNX2_EMAC_MODE, val);
944
945 /* Enable/disable rx PAUSE. */
946 bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
947
948 if (bp->flow_ctrl & FLOW_CTRL_RX)
949 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
950 REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
951
952 /* Enable/disable tx PAUSE. */
953 val = REG_RD(bp, BNX2_EMAC_TX_MODE);
954 val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
955
956 if (bp->flow_ctrl & FLOW_CTRL_TX)
957 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
958 REG_WR(bp, BNX2_EMAC_TX_MODE, val);
959
960 /* Acknowledge the interrupt. */
961 REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
962
963 return 0;
964}
965
27a005b8
MC
966static void
967bnx2_enable_bmsr1(struct bnx2 *bp)
968{
969 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
970 (CHIP_NUM(bp) == CHIP_NUM_5709))
971 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
972 MII_BNX2_BLK_ADDR_GP_STATUS);
973}
974
975static void
976bnx2_disable_bmsr1(struct bnx2 *bp)
977{
978 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
979 (CHIP_NUM(bp) == CHIP_NUM_5709))
980 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
981 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
982}
983
605a9e20
MC
984static int
985bnx2_test_and_enable_2g5(struct bnx2 *bp)
986{
987 u32 up1;
988 int ret = 1;
989
990 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
991 return 0;
992
993 if (bp->autoneg & AUTONEG_SPEED)
994 bp->advertising |= ADVERTISED_2500baseX_Full;
995
27a005b8
MC
996 if (CHIP_NUM(bp) == CHIP_NUM_5709)
997 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
998
605a9e20
MC
999 bnx2_read_phy(bp, bp->mii_up1, &up1);
1000 if (!(up1 & BCM5708S_UP1_2G5)) {
1001 up1 |= BCM5708S_UP1_2G5;
1002 bnx2_write_phy(bp, bp->mii_up1, up1);
1003 ret = 0;
1004 }
1005
27a005b8
MC
1006 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1007 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1008 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1009
605a9e20
MC
1010 return ret;
1011}
1012
1013static int
1014bnx2_test_and_disable_2g5(struct bnx2 *bp)
1015{
1016 u32 up1;
1017 int ret = 0;
1018
1019 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1020 return 0;
1021
27a005b8
MC
1022 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1023 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1024
605a9e20
MC
1025 bnx2_read_phy(bp, bp->mii_up1, &up1);
1026 if (up1 & BCM5708S_UP1_2G5) {
1027 up1 &= ~BCM5708S_UP1_2G5;
1028 bnx2_write_phy(bp, bp->mii_up1, up1);
1029 ret = 1;
1030 }
1031
27a005b8
MC
1032 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1033 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1034 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1035
605a9e20
MC
1036 return ret;
1037}
1038
1039static void
1040bnx2_enable_forced_2g5(struct bnx2 *bp)
1041{
1042 u32 bmcr;
1043
1044 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1045 return;
1046
27a005b8
MC
1047 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1048 u32 val;
1049
1050 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1051 MII_BNX2_BLK_ADDR_SERDES_DIG);
1052 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1053 val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
1054 val |= MII_BNX2_SD_MISC1_FORCE | MII_BNX2_SD_MISC1_FORCE_2_5G;
1055 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1056
1057 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1058 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1059 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1060
1061 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
605a9e20
MC
1062 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1063 bmcr |= BCM5708S_BMCR_FORCE_2500;
1064 }
1065
1066 if (bp->autoneg & AUTONEG_SPEED) {
1067 bmcr &= ~BMCR_ANENABLE;
1068 if (bp->req_duplex == DUPLEX_FULL)
1069 bmcr |= BMCR_FULLDPLX;
1070 }
1071 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1072}
1073
1074static void
1075bnx2_disable_forced_2g5(struct bnx2 *bp)
1076{
1077 u32 bmcr;
1078
1079 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1080 return;
1081
27a005b8
MC
1082 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1083 u32 val;
1084
1085 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1086 MII_BNX2_BLK_ADDR_SERDES_DIG);
1087 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1088 val &= ~MII_BNX2_SD_MISC1_FORCE;
1089 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1090
1091 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1092 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1093 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1094
1095 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
605a9e20
MC
1096 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1097 bmcr &= ~BCM5708S_BMCR_FORCE_2500;
1098 }
1099
1100 if (bp->autoneg & AUTONEG_SPEED)
1101 bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
1102 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1103}
1104
b6016b76
MC
1105static int
1106bnx2_set_link(struct bnx2 *bp)
1107{
1108 u32 bmsr;
1109 u8 link_up;
1110
80be4434 1111 if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
b6016b76
MC
1112 bp->link_up = 1;
1113 return 0;
1114 }
1115
0d8a6571
MC
1116 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1117 return 0;
1118
b6016b76
MC
1119 link_up = bp->link_up;
1120
27a005b8
MC
1121 bnx2_enable_bmsr1(bp);
1122 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1123 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1124 bnx2_disable_bmsr1(bp);
b6016b76
MC
1125
1126 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
1127 (CHIP_NUM(bp) == CHIP_NUM_5706)) {
1128 u32 val;
1129
1130 val = REG_RD(bp, BNX2_EMAC_STATUS);
1131 if (val & BNX2_EMAC_STATUS_LINK)
1132 bmsr |= BMSR_LSTATUS;
1133 else
1134 bmsr &= ~BMSR_LSTATUS;
1135 }
1136
1137 if (bmsr & BMSR_LSTATUS) {
1138 bp->link_up = 1;
1139
1140 if (bp->phy_flags & PHY_SERDES_FLAG) {
5b0c76ad
MC
1141 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1142 bnx2_5706s_linkup(bp);
1143 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1144 bnx2_5708s_linkup(bp);
27a005b8
MC
1145 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1146 bnx2_5709s_linkup(bp);
b6016b76
MC
1147 }
1148 else {
1149 bnx2_copper_linkup(bp);
1150 }
1151 bnx2_resolve_flow_ctrl(bp);
1152 }
1153 else {
1154 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
605a9e20
MC
1155 (bp->autoneg & AUTONEG_SPEED))
1156 bnx2_disable_forced_2g5(bp);
b6016b76 1157
b6016b76
MC
1158 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1159 bp->link_up = 0;
1160 }
1161
1162 if (bp->link_up != link_up) {
1163 bnx2_report_link(bp);
1164 }
1165
1166 bnx2_set_mac_link(bp);
1167
1168 return 0;
1169}
1170
1171static int
1172bnx2_reset_phy(struct bnx2 *bp)
1173{
1174 int i;
1175 u32 reg;
1176
ca58c3af 1177 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
b6016b76
MC
1178
1179#define PHY_RESET_MAX_WAIT 100
1180 for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
1181 udelay(10);
1182
ca58c3af 1183 bnx2_read_phy(bp, bp->mii_bmcr, &reg);
b6016b76
MC
1184 if (!(reg & BMCR_RESET)) {
1185 udelay(20);
1186 break;
1187 }
1188 }
1189 if (i == PHY_RESET_MAX_WAIT) {
1190 return -EBUSY;
1191 }
1192 return 0;
1193}
1194
1195static u32
1196bnx2_phy_get_pause_adv(struct bnx2 *bp)
1197{
1198 u32 adv = 0;
1199
1200 if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1201 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1202
1203 if (bp->phy_flags & PHY_SERDES_FLAG) {
1204 adv = ADVERTISE_1000XPAUSE;
1205 }
1206 else {
1207 adv = ADVERTISE_PAUSE_CAP;
1208 }
1209 }
1210 else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1211 if (bp->phy_flags & PHY_SERDES_FLAG) {
1212 adv = ADVERTISE_1000XPSE_ASYM;
1213 }
1214 else {
1215 adv = ADVERTISE_PAUSE_ASYM;
1216 }
1217 }
1218 else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1219 if (bp->phy_flags & PHY_SERDES_FLAG) {
1220 adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1221 }
1222 else {
1223 adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1224 }
1225 }
1226 return adv;
1227}
1228
0d8a6571
MC
1229static int bnx2_fw_sync(struct bnx2 *, u32, int);
1230
b6016b76 1231static int
0d8a6571
MC
1232bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
1233{
1234 u32 speed_arg = 0, pause_adv;
1235
1236 pause_adv = bnx2_phy_get_pause_adv(bp);
1237
1238 if (bp->autoneg & AUTONEG_SPEED) {
1239 speed_arg |= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG;
1240 if (bp->advertising & ADVERTISED_10baseT_Half)
1241 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1242 if (bp->advertising & ADVERTISED_10baseT_Full)
1243 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1244 if (bp->advertising & ADVERTISED_100baseT_Half)
1245 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1246 if (bp->advertising & ADVERTISED_100baseT_Full)
1247 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1248 if (bp->advertising & ADVERTISED_1000baseT_Full)
1249 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1250 if (bp->advertising & ADVERTISED_2500baseX_Full)
1251 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1252 } else {
1253 if (bp->req_line_speed == SPEED_2500)
1254 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1255 else if (bp->req_line_speed == SPEED_1000)
1256 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1257 else if (bp->req_line_speed == SPEED_100) {
1258 if (bp->req_duplex == DUPLEX_FULL)
1259 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1260 else
1261 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1262 } else if (bp->req_line_speed == SPEED_10) {
1263 if (bp->req_duplex == DUPLEX_FULL)
1264 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1265 else
1266 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1267 }
1268 }
1269
1270 if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP))
1271 speed_arg |= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE;
1272 if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_1000XPSE_ASYM))
1273 speed_arg |= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE;
1274
1275 if (port == PORT_TP)
1276 speed_arg |= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE |
1277 BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED;
1278
1279 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB_ARG0, speed_arg);
1280
1281 spin_unlock_bh(&bp->phy_lock);
1282 bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 0);
1283 spin_lock_bh(&bp->phy_lock);
1284
1285 return 0;
1286}
1287
1288static int
1289bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
b6016b76 1290{
605a9e20 1291 u32 adv, bmcr;
b6016b76
MC
1292 u32 new_adv = 0;
1293
0d8a6571
MC
1294 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1295 return (bnx2_setup_remote_phy(bp, port));
1296
b6016b76
MC
1297 if (!(bp->autoneg & AUTONEG_SPEED)) {
1298 u32 new_bmcr;
5b0c76ad
MC
1299 int force_link_down = 0;
1300
605a9e20
MC
1301 if (bp->req_line_speed == SPEED_2500) {
1302 if (!bnx2_test_and_enable_2g5(bp))
1303 force_link_down = 1;
1304 } else if (bp->req_line_speed == SPEED_1000) {
1305 if (bnx2_test_and_disable_2g5(bp))
1306 force_link_down = 1;
1307 }
ca58c3af 1308 bnx2_read_phy(bp, bp->mii_adv, &adv);
80be4434
MC
1309 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1310
ca58c3af 1311 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
605a9e20 1312 new_bmcr = bmcr & ~BMCR_ANENABLE;
80be4434 1313 new_bmcr |= BMCR_SPEED1000;
605a9e20 1314
27a005b8
MC
1315 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1316 if (bp->req_line_speed == SPEED_2500)
1317 bnx2_enable_forced_2g5(bp);
1318 else if (bp->req_line_speed == SPEED_1000) {
1319 bnx2_disable_forced_2g5(bp);
1320 new_bmcr &= ~0x2000;
1321 }
1322
1323 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
605a9e20
MC
1324 if (bp->req_line_speed == SPEED_2500)
1325 new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1326 else
1327 new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
5b0c76ad
MC
1328 }
1329
b6016b76 1330 if (bp->req_duplex == DUPLEX_FULL) {
5b0c76ad 1331 adv |= ADVERTISE_1000XFULL;
b6016b76
MC
1332 new_bmcr |= BMCR_FULLDPLX;
1333 }
1334 else {
5b0c76ad 1335 adv |= ADVERTISE_1000XHALF;
b6016b76
MC
1336 new_bmcr &= ~BMCR_FULLDPLX;
1337 }
5b0c76ad 1338 if ((new_bmcr != bmcr) || (force_link_down)) {
b6016b76
MC
1339 /* Force a link down visible on the other side */
1340 if (bp->link_up) {
ca58c3af 1341 bnx2_write_phy(bp, bp->mii_adv, adv &
5b0c76ad
MC
1342 ~(ADVERTISE_1000XFULL |
1343 ADVERTISE_1000XHALF));
ca58c3af 1344 bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
b6016b76
MC
1345 BMCR_ANRESTART | BMCR_ANENABLE);
1346
1347 bp->link_up = 0;
1348 netif_carrier_off(bp->dev);
ca58c3af 1349 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
80be4434 1350 bnx2_report_link(bp);
b6016b76 1351 }
ca58c3af
MC
1352 bnx2_write_phy(bp, bp->mii_adv, adv);
1353 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
605a9e20
MC
1354 } else {
1355 bnx2_resolve_flow_ctrl(bp);
1356 bnx2_set_mac_link(bp);
b6016b76
MC
1357 }
1358 return 0;
1359 }
1360
605a9e20 1361 bnx2_test_and_enable_2g5(bp);
5b0c76ad 1362
b6016b76
MC
1363 if (bp->advertising & ADVERTISED_1000baseT_Full)
1364 new_adv |= ADVERTISE_1000XFULL;
1365
1366 new_adv |= bnx2_phy_get_pause_adv(bp);
1367
ca58c3af
MC
1368 bnx2_read_phy(bp, bp->mii_adv, &adv);
1369 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
b6016b76
MC
1370
1371 bp->serdes_an_pending = 0;
1372 if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1373 /* Force a link down visible on the other side */
1374 if (bp->link_up) {
ca58c3af 1375 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
80be4434
MC
1376 spin_unlock_bh(&bp->phy_lock);
1377 msleep(20);
1378 spin_lock_bh(&bp->phy_lock);
b6016b76
MC
1379 }
1380
ca58c3af
MC
1381 bnx2_write_phy(bp, bp->mii_adv, new_adv);
1382 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
b6016b76 1383 BMCR_ANENABLE);
f8dd064e
MC
1384 /* Speed up link-up time when the link partner
1385 * does not autonegotiate which is very common
1386 * in blade servers. Some blade servers use
1387 * IPMI for kerboard input and it's important
1388 * to minimize link disruptions. Autoneg. involves
1389 * exchanging base pages plus 3 next pages and
1390 * normally completes in about 120 msec.
1391 */
1392 bp->current_interval = SERDES_AN_TIMEOUT;
1393 bp->serdes_an_pending = 1;
1394 mod_timer(&bp->timer, jiffies + bp->current_interval);
605a9e20
MC
1395 } else {
1396 bnx2_resolve_flow_ctrl(bp);
1397 bnx2_set_mac_link(bp);
b6016b76
MC
1398 }
1399
1400 return 0;
1401}
1402
1403#define ETHTOOL_ALL_FIBRE_SPEED \
deaf391b
MC
1404 (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) ? \
1405 (ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
1406 (ADVERTISED_1000baseT_Full)
b6016b76
MC
1407
1408#define ETHTOOL_ALL_COPPER_SPEED \
1409 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1410 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1411 ADVERTISED_1000baseT_Full)
1412
1413#define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1414 ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
6aa20a22 1415
b6016b76
MC
1416#define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1417
0d8a6571
MC
1418static void
1419bnx2_set_default_remote_link(struct bnx2 *bp)
1420{
1421 u32 link;
1422
1423 if (bp->phy_port == PORT_TP)
1424 link = REG_RD_IND(bp, bp->shmem_base + BNX2_RPHY_COPPER_LINK);
1425 else
1426 link = REG_RD_IND(bp, bp->shmem_base + BNX2_RPHY_SERDES_LINK);
1427
1428 if (link & BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG) {
1429 bp->req_line_speed = 0;
1430 bp->autoneg |= AUTONEG_SPEED;
1431 bp->advertising = ADVERTISED_Autoneg;
1432 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1433 bp->advertising |= ADVERTISED_10baseT_Half;
1434 if (link & BNX2_NETLINK_SET_LINK_SPEED_10FULL)
1435 bp->advertising |= ADVERTISED_10baseT_Full;
1436 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1437 bp->advertising |= ADVERTISED_100baseT_Half;
1438 if (link & BNX2_NETLINK_SET_LINK_SPEED_100FULL)
1439 bp->advertising |= ADVERTISED_100baseT_Full;
1440 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1441 bp->advertising |= ADVERTISED_1000baseT_Full;
1442 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1443 bp->advertising |= ADVERTISED_2500baseX_Full;
1444 } else {
1445 bp->autoneg = 0;
1446 bp->advertising = 0;
1447 bp->req_duplex = DUPLEX_FULL;
1448 if (link & BNX2_NETLINK_SET_LINK_SPEED_10) {
1449 bp->req_line_speed = SPEED_10;
1450 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1451 bp->req_duplex = DUPLEX_HALF;
1452 }
1453 if (link & BNX2_NETLINK_SET_LINK_SPEED_100) {
1454 bp->req_line_speed = SPEED_100;
1455 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1456 bp->req_duplex = DUPLEX_HALF;
1457 }
1458 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1459 bp->req_line_speed = SPEED_1000;
1460 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1461 bp->req_line_speed = SPEED_2500;
1462 }
1463}
1464
deaf391b
MC
1465static void
1466bnx2_set_default_link(struct bnx2 *bp)
1467{
0d8a6571
MC
1468 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1469 return bnx2_set_default_remote_link(bp);
1470
deaf391b
MC
1471 bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
1472 bp->req_line_speed = 0;
1473 if (bp->phy_flags & PHY_SERDES_FLAG) {
1474 u32 reg;
1475
1476 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
1477
1478 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG);
1479 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
1480 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
1481 bp->autoneg = 0;
1482 bp->req_line_speed = bp->line_speed = SPEED_1000;
1483 bp->req_duplex = DUPLEX_FULL;
1484 }
1485 } else
1486 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
1487}
1488
df149d70
MC
1489static void
1490bnx2_send_heart_beat(struct bnx2 *bp)
1491{
1492 u32 msg;
1493 u32 addr;
1494
1495 spin_lock(&bp->indirect_lock);
1496 msg = (u32) (++bp->fw_drv_pulse_wr_seq & BNX2_DRV_PULSE_SEQ_MASK);
1497 addr = bp->shmem_base + BNX2_DRV_PULSE_MB;
1498 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr);
1499 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, msg);
1500 spin_unlock(&bp->indirect_lock);
1501}
1502
0d8a6571
MC
1503static void
1504bnx2_remote_phy_event(struct bnx2 *bp)
1505{
1506 u32 msg;
1507 u8 link_up = bp->link_up;
1508 u8 old_port;
1509
1510 msg = REG_RD_IND(bp, bp->shmem_base + BNX2_LINK_STATUS);
1511
df149d70
MC
1512 if (msg & BNX2_LINK_STATUS_HEART_BEAT_EXPIRED)
1513 bnx2_send_heart_beat(bp);
1514
1515 msg &= ~BNX2_LINK_STATUS_HEART_BEAT_EXPIRED;
1516
0d8a6571
MC
1517 if ((msg & BNX2_LINK_STATUS_LINK_UP) == BNX2_LINK_STATUS_LINK_DOWN)
1518 bp->link_up = 0;
1519 else {
1520 u32 speed;
1521
1522 bp->link_up = 1;
1523 speed = msg & BNX2_LINK_STATUS_SPEED_MASK;
1524 bp->duplex = DUPLEX_FULL;
1525 switch (speed) {
1526 case BNX2_LINK_STATUS_10HALF:
1527 bp->duplex = DUPLEX_HALF;
1528 case BNX2_LINK_STATUS_10FULL:
1529 bp->line_speed = SPEED_10;
1530 break;
1531 case BNX2_LINK_STATUS_100HALF:
1532 bp->duplex = DUPLEX_HALF;
1533 case BNX2_LINK_STATUS_100BASE_T4:
1534 case BNX2_LINK_STATUS_100FULL:
1535 bp->line_speed = SPEED_100;
1536 break;
1537 case BNX2_LINK_STATUS_1000HALF:
1538 bp->duplex = DUPLEX_HALF;
1539 case BNX2_LINK_STATUS_1000FULL:
1540 bp->line_speed = SPEED_1000;
1541 break;
1542 case BNX2_LINK_STATUS_2500HALF:
1543 bp->duplex = DUPLEX_HALF;
1544 case BNX2_LINK_STATUS_2500FULL:
1545 bp->line_speed = SPEED_2500;
1546 break;
1547 default:
1548 bp->line_speed = 0;
1549 break;
1550 }
1551
1552 spin_lock(&bp->phy_lock);
1553 bp->flow_ctrl = 0;
1554 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
1555 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
1556 if (bp->duplex == DUPLEX_FULL)
1557 bp->flow_ctrl = bp->req_flow_ctrl;
1558 } else {
1559 if (msg & BNX2_LINK_STATUS_TX_FC_ENABLED)
1560 bp->flow_ctrl |= FLOW_CTRL_TX;
1561 if (msg & BNX2_LINK_STATUS_RX_FC_ENABLED)
1562 bp->flow_ctrl |= FLOW_CTRL_RX;
1563 }
1564
1565 old_port = bp->phy_port;
1566 if (msg & BNX2_LINK_STATUS_SERDES_LINK)
1567 bp->phy_port = PORT_FIBRE;
1568 else
1569 bp->phy_port = PORT_TP;
1570
1571 if (old_port != bp->phy_port)
1572 bnx2_set_default_link(bp);
1573
1574 spin_unlock(&bp->phy_lock);
1575 }
1576 if (bp->link_up != link_up)
1577 bnx2_report_link(bp);
1578
1579 bnx2_set_mac_link(bp);
1580}
1581
1582static int
1583bnx2_set_remote_link(struct bnx2 *bp)
1584{
1585 u32 evt_code;
1586
1587 evt_code = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_EVT_CODE_MB);
1588 switch (evt_code) {
1589 case BNX2_FW_EVT_CODE_LINK_EVENT:
1590 bnx2_remote_phy_event(bp);
1591 break;
1592 case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT:
1593 default:
df149d70 1594 bnx2_send_heart_beat(bp);
0d8a6571
MC
1595 break;
1596 }
1597 return 0;
1598}
1599
b6016b76
MC
1600static int
1601bnx2_setup_copper_phy(struct bnx2 *bp)
1602{
1603 u32 bmcr;
1604 u32 new_bmcr;
1605
ca58c3af 1606 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
b6016b76
MC
1607
1608 if (bp->autoneg & AUTONEG_SPEED) {
1609 u32 adv_reg, adv1000_reg;
1610 u32 new_adv_reg = 0;
1611 u32 new_adv1000_reg = 0;
1612
ca58c3af 1613 bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
b6016b76
MC
1614 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
1615 ADVERTISE_PAUSE_ASYM);
1616
1617 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
1618 adv1000_reg &= PHY_ALL_1000_SPEED;
1619
1620 if (bp->advertising & ADVERTISED_10baseT_Half)
1621 new_adv_reg |= ADVERTISE_10HALF;
1622 if (bp->advertising & ADVERTISED_10baseT_Full)
1623 new_adv_reg |= ADVERTISE_10FULL;
1624 if (bp->advertising & ADVERTISED_100baseT_Half)
1625 new_adv_reg |= ADVERTISE_100HALF;
1626 if (bp->advertising & ADVERTISED_100baseT_Full)
1627 new_adv_reg |= ADVERTISE_100FULL;
1628 if (bp->advertising & ADVERTISED_1000baseT_Full)
1629 new_adv1000_reg |= ADVERTISE_1000FULL;
6aa20a22 1630
b6016b76
MC
1631 new_adv_reg |= ADVERTISE_CSMA;
1632
1633 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
1634
1635 if ((adv1000_reg != new_adv1000_reg) ||
1636 (adv_reg != new_adv_reg) ||
1637 ((bmcr & BMCR_ANENABLE) == 0)) {
1638
ca58c3af 1639 bnx2_write_phy(bp, bp->mii_adv, new_adv_reg);
b6016b76 1640 bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
ca58c3af 1641 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
b6016b76
MC
1642 BMCR_ANENABLE);
1643 }
1644 else if (bp->link_up) {
1645 /* Flow ctrl may have changed from auto to forced */
1646 /* or vice-versa. */
1647
1648 bnx2_resolve_flow_ctrl(bp);
1649 bnx2_set_mac_link(bp);
1650 }
1651 return 0;
1652 }
1653
1654 new_bmcr = 0;
1655 if (bp->req_line_speed == SPEED_100) {
1656 new_bmcr |= BMCR_SPEED100;
1657 }
1658 if (bp->req_duplex == DUPLEX_FULL) {
1659 new_bmcr |= BMCR_FULLDPLX;
1660 }
1661 if (new_bmcr != bmcr) {
1662 u32 bmsr;
b6016b76 1663
ca58c3af
MC
1664 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1665 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
6aa20a22 1666
b6016b76
MC
1667 if (bmsr & BMSR_LSTATUS) {
1668 /* Force link down */
ca58c3af 1669 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
a16dda0e
MC
1670 spin_unlock_bh(&bp->phy_lock);
1671 msleep(50);
1672 spin_lock_bh(&bp->phy_lock);
1673
ca58c3af
MC
1674 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1675 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
b6016b76
MC
1676 }
1677
ca58c3af 1678 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
b6016b76
MC
1679
1680 /* Normally, the new speed is setup after the link has
1681 * gone down and up again. In some cases, link will not go
1682 * down so we need to set up the new speed here.
1683 */
1684 if (bmsr & BMSR_LSTATUS) {
1685 bp->line_speed = bp->req_line_speed;
1686 bp->duplex = bp->req_duplex;
1687 bnx2_resolve_flow_ctrl(bp);
1688 bnx2_set_mac_link(bp);
1689 }
27a005b8
MC
1690 } else {
1691 bnx2_resolve_flow_ctrl(bp);
1692 bnx2_set_mac_link(bp);
b6016b76
MC
1693 }
1694 return 0;
1695}
1696
1697static int
0d8a6571 1698bnx2_setup_phy(struct bnx2 *bp, u8 port)
b6016b76
MC
1699{
1700 if (bp->loopback == MAC_LOOPBACK)
1701 return 0;
1702
1703 if (bp->phy_flags & PHY_SERDES_FLAG) {
0d8a6571 1704 return (bnx2_setup_serdes_phy(bp, port));
b6016b76
MC
1705 }
1706 else {
1707 return (bnx2_setup_copper_phy(bp));
1708 }
1709}
1710
27a005b8
MC
1711static int
1712bnx2_init_5709s_phy(struct bnx2 *bp)
1713{
1714 u32 val;
1715
1716 bp->mii_bmcr = MII_BMCR + 0x10;
1717 bp->mii_bmsr = MII_BMSR + 0x10;
1718 bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
1719 bp->mii_adv = MII_ADVERTISE + 0x10;
1720 bp->mii_lpa = MII_LPA + 0x10;
1721 bp->mii_up1 = MII_BNX2_OVER1G_UP1;
1722
1723 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
1724 bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
1725
1726 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1727 bnx2_reset_phy(bp);
1728
1729 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
1730
1731 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
1732 val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
1733 val |= MII_BNX2_SD_1000XCTL1_FIBER;
1734 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
1735
1736 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1737 bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
1738 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG)
1739 val |= BCM5708S_UP1_2G5;
1740 else
1741 val &= ~BCM5708S_UP1_2G5;
1742 bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
1743
1744 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
1745 bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
1746 val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
1747 bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
1748
1749 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
1750
1751 val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
1752 MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
1753 bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
1754
1755 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1756
1757 return 0;
1758}
1759
b6016b76 1760static int
5b0c76ad
MC
1761bnx2_init_5708s_phy(struct bnx2 *bp)
1762{
1763 u32 val;
1764
27a005b8
MC
1765 bnx2_reset_phy(bp);
1766
1767 bp->mii_up1 = BCM5708S_UP1;
1768
5b0c76ad
MC
1769 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
1770 bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
1771 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1772
1773 bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
1774 val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
1775 bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
1776
1777 bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
1778 val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
1779 bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
1780
1781 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) {
1782 bnx2_read_phy(bp, BCM5708S_UP1, &val);
1783 val |= BCM5708S_UP1_2G5;
1784 bnx2_write_phy(bp, BCM5708S_UP1, val);
1785 }
1786
1787 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
dda1e390
MC
1788 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
1789 (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
5b0c76ad
MC
1790 /* increase tx signal amplitude */
1791 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1792 BCM5708S_BLK_ADDR_TX_MISC);
1793 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
1794 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
1795 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
1796 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1797 }
1798
e3648b3d 1799 val = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG) &
5b0c76ad
MC
1800 BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
1801
1802 if (val) {
1803 u32 is_backplane;
1804
e3648b3d 1805 is_backplane = REG_RD_IND(bp, bp->shmem_base +
5b0c76ad
MC
1806 BNX2_SHARED_HW_CFG_CONFIG);
1807 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
1808 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1809 BCM5708S_BLK_ADDR_TX_MISC);
1810 bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
1811 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1812 BCM5708S_BLK_ADDR_DIG);
1813 }
1814 }
1815 return 0;
1816}
1817
1818static int
1819bnx2_init_5706s_phy(struct bnx2 *bp)
b6016b76 1820{
27a005b8
MC
1821 bnx2_reset_phy(bp);
1822
b6016b76
MC
1823 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1824
59b47d8a
MC
1825 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1826 REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
b6016b76
MC
1827
1828 if (bp->dev->mtu > 1500) {
1829 u32 val;
1830
1831 /* Set extended packet length bit */
1832 bnx2_write_phy(bp, 0x18, 0x7);
1833 bnx2_read_phy(bp, 0x18, &val);
1834 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
1835
1836 bnx2_write_phy(bp, 0x1c, 0x6c00);
1837 bnx2_read_phy(bp, 0x1c, &val);
1838 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
1839 }
1840 else {
1841 u32 val;
1842
1843 bnx2_write_phy(bp, 0x18, 0x7);
1844 bnx2_read_phy(bp, 0x18, &val);
1845 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1846
1847 bnx2_write_phy(bp, 0x1c, 0x6c00);
1848 bnx2_read_phy(bp, 0x1c, &val);
1849 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
1850 }
1851
1852 return 0;
1853}
1854
1855static int
1856bnx2_init_copper_phy(struct bnx2 *bp)
1857{
5b0c76ad
MC
1858 u32 val;
1859
27a005b8
MC
1860 bnx2_reset_phy(bp);
1861
b6016b76
MC
1862 if (bp->phy_flags & PHY_CRC_FIX_FLAG) {
1863 bnx2_write_phy(bp, 0x18, 0x0c00);
1864 bnx2_write_phy(bp, 0x17, 0x000a);
1865 bnx2_write_phy(bp, 0x15, 0x310b);
1866 bnx2_write_phy(bp, 0x17, 0x201f);
1867 bnx2_write_phy(bp, 0x15, 0x9506);
1868 bnx2_write_phy(bp, 0x17, 0x401f);
1869 bnx2_write_phy(bp, 0x15, 0x14e2);
1870 bnx2_write_phy(bp, 0x18, 0x0400);
1871 }
1872
b659f44e
MC
1873 if (bp->phy_flags & PHY_DIS_EARLY_DAC_FLAG) {
1874 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
1875 MII_BNX2_DSP_EXPAND_REG | 0x8);
1876 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1877 val &= ~(1 << 8);
1878 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
1879 }
1880
b6016b76 1881 if (bp->dev->mtu > 1500) {
b6016b76
MC
1882 /* Set extended packet length bit */
1883 bnx2_write_phy(bp, 0x18, 0x7);
1884 bnx2_read_phy(bp, 0x18, &val);
1885 bnx2_write_phy(bp, 0x18, val | 0x4000);
1886
1887 bnx2_read_phy(bp, 0x10, &val);
1888 bnx2_write_phy(bp, 0x10, val | 0x1);
1889 }
1890 else {
b6016b76
MC
1891 bnx2_write_phy(bp, 0x18, 0x7);
1892 bnx2_read_phy(bp, 0x18, &val);
1893 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1894
1895 bnx2_read_phy(bp, 0x10, &val);
1896 bnx2_write_phy(bp, 0x10, val & ~0x1);
1897 }
1898
5b0c76ad
MC
1899 /* ethernet@wirespeed */
1900 bnx2_write_phy(bp, 0x18, 0x7007);
1901 bnx2_read_phy(bp, 0x18, &val);
1902 bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
b6016b76
MC
1903 return 0;
1904}
1905
1906
1907static int
1908bnx2_init_phy(struct bnx2 *bp)
1909{
1910 u32 val;
1911 int rc = 0;
1912
1913 bp->phy_flags &= ~PHY_INT_MODE_MASK_FLAG;
1914 bp->phy_flags |= PHY_INT_MODE_LINK_READY_FLAG;
1915
ca58c3af
MC
1916 bp->mii_bmcr = MII_BMCR;
1917 bp->mii_bmsr = MII_BMSR;
27a005b8 1918 bp->mii_bmsr1 = MII_BMSR;
ca58c3af
MC
1919 bp->mii_adv = MII_ADVERTISE;
1920 bp->mii_lpa = MII_LPA;
1921
b6016b76
MC
1922 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
1923
0d8a6571
MC
1924 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1925 goto setup_phy;
1926
b6016b76
MC
1927 bnx2_read_phy(bp, MII_PHYSID1, &val);
1928 bp->phy_id = val << 16;
1929 bnx2_read_phy(bp, MII_PHYSID2, &val);
1930 bp->phy_id |= val & 0xffff;
1931
1932 if (bp->phy_flags & PHY_SERDES_FLAG) {
5b0c76ad
MC
1933 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1934 rc = bnx2_init_5706s_phy(bp);
1935 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1936 rc = bnx2_init_5708s_phy(bp);
27a005b8
MC
1937 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1938 rc = bnx2_init_5709s_phy(bp);
b6016b76
MC
1939 }
1940 else {
1941 rc = bnx2_init_copper_phy(bp);
1942 }
1943
0d8a6571
MC
1944setup_phy:
1945 if (!rc)
1946 rc = bnx2_setup_phy(bp, bp->phy_port);
b6016b76
MC
1947
1948 return rc;
1949}
1950
1951static int
1952bnx2_set_mac_loopback(struct bnx2 *bp)
1953{
1954 u32 mac_mode;
1955
1956 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1957 mac_mode &= ~BNX2_EMAC_MODE_PORT;
1958 mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
1959 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1960 bp->link_up = 1;
1961 return 0;
1962}
1963
bc5a0690
MC
1964static int bnx2_test_link(struct bnx2 *);
1965
1966static int
1967bnx2_set_phy_loopback(struct bnx2 *bp)
1968{
1969 u32 mac_mode;
1970 int rc, i;
1971
1972 spin_lock_bh(&bp->phy_lock);
ca58c3af 1973 rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
bc5a0690
MC
1974 BMCR_SPEED1000);
1975 spin_unlock_bh(&bp->phy_lock);
1976 if (rc)
1977 return rc;
1978
1979 for (i = 0; i < 10; i++) {
1980 if (bnx2_test_link(bp) == 0)
1981 break;
80be4434 1982 msleep(100);
bc5a0690
MC
1983 }
1984
1985 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1986 mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1987 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
59b47d8a 1988 BNX2_EMAC_MODE_25G_MODE);
bc5a0690
MC
1989
1990 mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
1991 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1992 bp->link_up = 1;
1993 return 0;
1994}
1995
b6016b76 1996static int
b090ae2b 1997bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int silent)
b6016b76
MC
1998{
1999 int i;
2000 u32 val;
2001
b6016b76
MC
2002 bp->fw_wr_seq++;
2003 msg_data |= bp->fw_wr_seq;
2004
e3648b3d 2005 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
b6016b76
MC
2006
2007 /* wait for an acknowledgement. */
b090ae2b
MC
2008 for (i = 0; i < (FW_ACK_TIME_OUT_MS / 10); i++) {
2009 msleep(10);
b6016b76 2010
e3648b3d 2011 val = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_MB);
b6016b76
MC
2012
2013 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
2014 break;
2015 }
b090ae2b
MC
2016 if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
2017 return 0;
b6016b76
MC
2018
2019 /* If we timed out, inform the firmware that this is the case. */
b090ae2b
MC
2020 if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
2021 if (!silent)
2022 printk(KERN_ERR PFX "fw sync timeout, reset code = "
2023 "%x\n", msg_data);
b6016b76
MC
2024
2025 msg_data &= ~BNX2_DRV_MSG_CODE;
2026 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
2027
e3648b3d 2028 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
b6016b76 2029
b6016b76
MC
2030 return -EBUSY;
2031 }
2032
b090ae2b
MC
2033 if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
2034 return -EIO;
2035
b6016b76
MC
2036 return 0;
2037}
2038
59b47d8a
MC
2039static int
2040bnx2_init_5709_context(struct bnx2 *bp)
2041{
2042 int i, ret = 0;
2043 u32 val;
2044
2045 val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
2046 val |= (BCM_PAGE_BITS - 8) << 16;
2047 REG_WR(bp, BNX2_CTX_COMMAND, val);
641bdcd5
MC
2048 for (i = 0; i < 10; i++) {
2049 val = REG_RD(bp, BNX2_CTX_COMMAND);
2050 if (!(val & BNX2_CTX_COMMAND_MEM_INIT))
2051 break;
2052 udelay(2);
2053 }
2054 if (val & BNX2_CTX_COMMAND_MEM_INIT)
2055 return -EBUSY;
2056
59b47d8a
MC
2057 for (i = 0; i < bp->ctx_pages; i++) {
2058 int j;
2059
2060 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
2061 (bp->ctx_blk_mapping[i] & 0xffffffff) |
2062 BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
2063 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
2064 (u64) bp->ctx_blk_mapping[i] >> 32);
2065 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
2066 BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
2067 for (j = 0; j < 10; j++) {
2068
2069 val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
2070 if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
2071 break;
2072 udelay(5);
2073 }
2074 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
2075 ret = -EBUSY;
2076 break;
2077 }
2078 }
2079 return ret;
2080}
2081
b6016b76
MC
2082static void
2083bnx2_init_context(struct bnx2 *bp)
2084{
2085 u32 vcid;
2086
2087 vcid = 96;
2088 while (vcid) {
2089 u32 vcid_addr, pcid_addr, offset;
7947b20e 2090 int i;
b6016b76
MC
2091
2092 vcid--;
2093
2094 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
2095 u32 new_vcid;
2096
2097 vcid_addr = GET_PCID_ADDR(vcid);
2098 if (vcid & 0x8) {
2099 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
2100 }
2101 else {
2102 new_vcid = vcid;
2103 }
2104 pcid_addr = GET_PCID_ADDR(new_vcid);
2105 }
2106 else {
2107 vcid_addr = GET_CID_ADDR(vcid);
2108 pcid_addr = vcid_addr;
2109 }
2110
7947b20e
MC
2111 for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
2112 vcid_addr += (i << PHY_CTX_SHIFT);
2113 pcid_addr += (i << PHY_CTX_SHIFT);
b6016b76 2114
7947b20e
MC
2115 REG_WR(bp, BNX2_CTX_VIRT_ADDR, 0x00);
2116 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
b6016b76 2117
7947b20e
MC
2118 /* Zero out the context. */
2119 for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
2120 CTX_WR(bp, 0x00, offset, 0);
2121
2122 REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
2123 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
2124 }
b6016b76
MC
2125 }
2126}
2127
2128static int
2129bnx2_alloc_bad_rbuf(struct bnx2 *bp)
2130{
2131 u16 *good_mbuf;
2132 u32 good_mbuf_cnt;
2133 u32 val;
2134
2135 good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
2136 if (good_mbuf == NULL) {
2137 printk(KERN_ERR PFX "Failed to allocate memory in "
2138 "bnx2_alloc_bad_rbuf\n");
2139 return -ENOMEM;
2140 }
2141
2142 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2143 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
2144
2145 good_mbuf_cnt = 0;
2146
2147 /* Allocate a bunch of mbufs and save the good ones in an array. */
2148 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
2149 while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
2150 REG_WR_IND(bp, BNX2_RBUF_COMMAND, BNX2_RBUF_COMMAND_ALLOC_REQ);
2151
2152 val = REG_RD_IND(bp, BNX2_RBUF_FW_BUF_ALLOC);
2153
2154 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
2155
2156 /* The addresses with Bit 9 set are bad memory blocks. */
2157 if (!(val & (1 << 9))) {
2158 good_mbuf[good_mbuf_cnt] = (u16) val;
2159 good_mbuf_cnt++;
2160 }
2161
2162 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
2163 }
2164
2165 /* Free the good ones back to the mbuf pool thus discarding
2166 * all the bad ones. */
2167 while (good_mbuf_cnt) {
2168 good_mbuf_cnt--;
2169
2170 val = good_mbuf[good_mbuf_cnt];
2171 val = (val << 9) | val | 1;
2172
2173 REG_WR_IND(bp, BNX2_RBUF_FW_BUF_FREE, val);
2174 }
2175 kfree(good_mbuf);
2176 return 0;
2177}
2178
2179static void
6aa20a22 2180bnx2_set_mac_addr(struct bnx2 *bp)
b6016b76
MC
2181{
2182 u32 val;
2183 u8 *mac_addr = bp->dev->dev_addr;
2184
2185 val = (mac_addr[0] << 8) | mac_addr[1];
2186
2187 REG_WR(bp, BNX2_EMAC_MAC_MATCH0, val);
2188
6aa20a22 2189 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
b6016b76
MC
2190 (mac_addr[4] << 8) | mac_addr[5];
2191
2192 REG_WR(bp, BNX2_EMAC_MAC_MATCH1, val);
2193}
2194
2195static inline int
2196bnx2_alloc_rx_skb(struct bnx2 *bp, u16 index)
2197{
2198 struct sk_buff *skb;
2199 struct sw_bd *rx_buf = &bp->rx_buf_ring[index];
2200 dma_addr_t mapping;
13daffa2 2201 struct rx_bd *rxbd = &bp->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
b6016b76
MC
2202 unsigned long align;
2203
932f3772 2204 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
b6016b76
MC
2205 if (skb == NULL) {
2206 return -ENOMEM;
2207 }
2208
59b47d8a
MC
2209 if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
2210 skb_reserve(skb, BNX2_RX_ALIGN - align);
b6016b76 2211
b6016b76
MC
2212 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
2213 PCI_DMA_FROMDEVICE);
2214
2215 rx_buf->skb = skb;
2216 pci_unmap_addr_set(rx_buf, mapping, mapping);
2217
2218 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2219 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2220
2221 bp->rx_prod_bseq += bp->rx_buf_use_size;
2222
2223 return 0;
2224}
2225
da3e4fbe
MC
2226static int
2227bnx2_phy_event_is_set(struct bnx2 *bp, u32 event)
b6016b76 2228{
da3e4fbe 2229 struct status_block *sblk = bp->status_blk;
b6016b76 2230 u32 new_link_state, old_link_state;
da3e4fbe 2231 int is_set = 1;
b6016b76 2232
da3e4fbe
MC
2233 new_link_state = sblk->status_attn_bits & event;
2234 old_link_state = sblk->status_attn_bits_ack & event;
b6016b76 2235 if (new_link_state != old_link_state) {
da3e4fbe
MC
2236 if (new_link_state)
2237 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
2238 else
2239 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
2240 } else
2241 is_set = 0;
2242
2243 return is_set;
2244}
2245
2246static void
2247bnx2_phy_int(struct bnx2 *bp)
2248{
2249 if (bnx2_phy_event_is_set(bp, STATUS_ATTN_BITS_LINK_STATE)) {
2250 spin_lock(&bp->phy_lock);
b6016b76 2251 bnx2_set_link(bp);
da3e4fbe 2252 spin_unlock(&bp->phy_lock);
b6016b76 2253 }
0d8a6571
MC
2254 if (bnx2_phy_event_is_set(bp, STATUS_ATTN_BITS_TIMER_ABORT))
2255 bnx2_set_remote_link(bp);
2256
b6016b76
MC
2257}
2258
2259static void
2260bnx2_tx_int(struct bnx2 *bp)
2261{
f4e418f7 2262 struct status_block *sblk = bp->status_blk;
b6016b76
MC
2263 u16 hw_cons, sw_cons, sw_ring_cons;
2264 int tx_free_bd = 0;
2265
f4e418f7 2266 hw_cons = bp->hw_tx_cons = sblk->status_tx_quick_consumer_index0;
b6016b76
MC
2267 if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
2268 hw_cons++;
2269 }
2270 sw_cons = bp->tx_cons;
2271
2272 while (sw_cons != hw_cons) {
2273 struct sw_bd *tx_buf;
2274 struct sk_buff *skb;
2275 int i, last;
2276
2277 sw_ring_cons = TX_RING_IDX(sw_cons);
2278
2279 tx_buf = &bp->tx_buf_ring[sw_ring_cons];
2280 skb = tx_buf->skb;
1d39ed56 2281
b6016b76 2282 /* partial BD completions possible with TSO packets */
89114afd 2283 if (skb_is_gso(skb)) {
b6016b76
MC
2284 u16 last_idx, last_ring_idx;
2285
2286 last_idx = sw_cons +
2287 skb_shinfo(skb)->nr_frags + 1;
2288 last_ring_idx = sw_ring_cons +
2289 skb_shinfo(skb)->nr_frags + 1;
2290 if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
2291 last_idx++;
2292 }
2293 if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
2294 break;
2295 }
2296 }
1d39ed56 2297
b6016b76
MC
2298 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
2299 skb_headlen(skb), PCI_DMA_TODEVICE);
2300
2301 tx_buf->skb = NULL;
2302 last = skb_shinfo(skb)->nr_frags;
2303
2304 for (i = 0; i < last; i++) {
2305 sw_cons = NEXT_TX_BD(sw_cons);
2306
2307 pci_unmap_page(bp->pdev,
2308 pci_unmap_addr(
2309 &bp->tx_buf_ring[TX_RING_IDX(sw_cons)],
2310 mapping),
2311 skb_shinfo(skb)->frags[i].size,
2312 PCI_DMA_TODEVICE);
2313 }
2314
2315 sw_cons = NEXT_TX_BD(sw_cons);
2316
2317 tx_free_bd += last + 1;
2318
745720e5 2319 dev_kfree_skb(skb);
b6016b76 2320
f4e418f7
MC
2321 hw_cons = bp->hw_tx_cons =
2322 sblk->status_tx_quick_consumer_index0;
2323
b6016b76
MC
2324 if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
2325 hw_cons++;
2326 }
2327 }
2328
e89bbf10 2329 bp->tx_cons = sw_cons;
2f8af120
MC
2330 /* Need to make the tx_cons update visible to bnx2_start_xmit()
2331 * before checking for netif_queue_stopped(). Without the
2332 * memory barrier, there is a small possibility that bnx2_start_xmit()
2333 * will miss it and cause the queue to be stopped forever.
2334 */
2335 smp_mb();
b6016b76 2336
2f8af120
MC
2337 if (unlikely(netif_queue_stopped(bp->dev)) &&
2338 (bnx2_tx_avail(bp) > bp->tx_wake_thresh)) {
2339 netif_tx_lock(bp->dev);
b6016b76 2340 if ((netif_queue_stopped(bp->dev)) &&
2f8af120 2341 (bnx2_tx_avail(bp) > bp->tx_wake_thresh))
b6016b76 2342 netif_wake_queue(bp->dev);
2f8af120 2343 netif_tx_unlock(bp->dev);
b6016b76 2344 }
b6016b76
MC
2345}
2346
2347static inline void
2348bnx2_reuse_rx_skb(struct bnx2 *bp, struct sk_buff *skb,
2349 u16 cons, u16 prod)
2350{
236b6394
MC
2351 struct sw_bd *cons_rx_buf, *prod_rx_buf;
2352 struct rx_bd *cons_bd, *prod_bd;
2353
2354 cons_rx_buf = &bp->rx_buf_ring[cons];
2355 prod_rx_buf = &bp->rx_buf_ring[prod];
b6016b76
MC
2356
2357 pci_dma_sync_single_for_device(bp->pdev,
2358 pci_unmap_addr(cons_rx_buf, mapping),
2359 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2360
236b6394 2361 bp->rx_prod_bseq += bp->rx_buf_use_size;
b6016b76 2362
236b6394 2363 prod_rx_buf->skb = skb;
b6016b76 2364
236b6394
MC
2365 if (cons == prod)
2366 return;
b6016b76 2367
236b6394
MC
2368 pci_unmap_addr_set(prod_rx_buf, mapping,
2369 pci_unmap_addr(cons_rx_buf, mapping));
2370
3fdfcc2c
MC
2371 cons_bd = &bp->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2372 prod_bd = &bp->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
236b6394
MC
2373 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2374 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
b6016b76
MC
2375}
2376
2377static int
2378bnx2_rx_int(struct bnx2 *bp, int budget)
2379{
f4e418f7 2380 struct status_block *sblk = bp->status_blk;
b6016b76
MC
2381 u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
2382 struct l2_fhdr *rx_hdr;
2383 int rx_pkt = 0;
2384
f4e418f7 2385 hw_cons = bp->hw_rx_cons = sblk->status_rx_quick_consumer_index0;
b6016b76
MC
2386 if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT) {
2387 hw_cons++;
2388 }
2389 sw_cons = bp->rx_cons;
2390 sw_prod = bp->rx_prod;
2391
2392 /* Memory barrier necessary as speculative reads of the rx
2393 * buffer can be ahead of the index in the status block
2394 */
2395 rmb();
2396 while (sw_cons != hw_cons) {
2397 unsigned int len;
ade2bfe7 2398 u32 status;
b6016b76
MC
2399 struct sw_bd *rx_buf;
2400 struct sk_buff *skb;
236b6394 2401 dma_addr_t dma_addr;
b6016b76
MC
2402
2403 sw_ring_cons = RX_RING_IDX(sw_cons);
2404 sw_ring_prod = RX_RING_IDX(sw_prod);
2405
2406 rx_buf = &bp->rx_buf_ring[sw_ring_cons];
2407 skb = rx_buf->skb;
236b6394
MC
2408
2409 rx_buf->skb = NULL;
2410
2411 dma_addr = pci_unmap_addr(rx_buf, mapping);
2412
2413 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
b6016b76
MC
2414 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2415
2416 rx_hdr = (struct l2_fhdr *) skb->data;
2417 len = rx_hdr->l2_fhdr_pkt_len - 4;
2418
ade2bfe7 2419 if ((status = rx_hdr->l2_fhdr_status) &
b6016b76
MC
2420 (L2_FHDR_ERRORS_BAD_CRC |
2421 L2_FHDR_ERRORS_PHY_DECODE |
2422 L2_FHDR_ERRORS_ALIGNMENT |
2423 L2_FHDR_ERRORS_TOO_SHORT |
2424 L2_FHDR_ERRORS_GIANT_FRAME)) {
2425
2426 goto reuse_rx;
2427 }
2428
2429 /* Since we don't have a jumbo ring, copy small packets
2430 * if mtu > 1500
2431 */
2432 if ((bp->dev->mtu > 1500) && (len <= RX_COPY_THRESH)) {
2433 struct sk_buff *new_skb;
2434
932f3772 2435 new_skb = netdev_alloc_skb(bp->dev, len + 2);
b6016b76
MC
2436 if (new_skb == NULL)
2437 goto reuse_rx;
2438
2439 /* aligned copy */
d626f62b
ACM
2440 skb_copy_from_linear_data_offset(skb, bp->rx_offset - 2,
2441 new_skb->data, len + 2);
b6016b76
MC
2442 skb_reserve(new_skb, 2);
2443 skb_put(new_skb, len);
b6016b76
MC
2444
2445 bnx2_reuse_rx_skb(bp, skb,
2446 sw_ring_cons, sw_ring_prod);
2447
2448 skb = new_skb;
2449 }
2450 else if (bnx2_alloc_rx_skb(bp, sw_ring_prod) == 0) {
236b6394 2451 pci_unmap_single(bp->pdev, dma_addr,
b6016b76
MC
2452 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
2453
2454 skb_reserve(skb, bp->rx_offset);
2455 skb_put(skb, len);
2456 }
2457 else {
2458reuse_rx:
2459 bnx2_reuse_rx_skb(bp, skb,
2460 sw_ring_cons, sw_ring_prod);
2461 goto next_rx;
2462 }
2463
2464 skb->protocol = eth_type_trans(skb, bp->dev);
2465
2466 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
d1e100ba 2467 (ntohs(skb->protocol) != 0x8100)) {
b6016b76 2468
745720e5 2469 dev_kfree_skb(skb);
b6016b76
MC
2470 goto next_rx;
2471
2472 }
2473
b6016b76
MC
2474 skb->ip_summed = CHECKSUM_NONE;
2475 if (bp->rx_csum &&
2476 (status & (L2_FHDR_STATUS_TCP_SEGMENT |
2477 L2_FHDR_STATUS_UDP_DATAGRAM))) {
2478
ade2bfe7
MC
2479 if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
2480 L2_FHDR_ERRORS_UDP_XSUM)) == 0))
b6016b76
MC
2481 skb->ip_summed = CHECKSUM_UNNECESSARY;
2482 }
2483
2484#ifdef BCM_VLAN
2485 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) && (bp->vlgrp != 0)) {
2486 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
2487 rx_hdr->l2_fhdr_vlan_tag);
2488 }
2489 else
2490#endif
2491 netif_receive_skb(skb);
2492
2493 bp->dev->last_rx = jiffies;
2494 rx_pkt++;
2495
2496next_rx:
b6016b76
MC
2497 sw_cons = NEXT_RX_BD(sw_cons);
2498 sw_prod = NEXT_RX_BD(sw_prod);
2499
2500 if ((rx_pkt == budget))
2501 break;
f4e418f7
MC
2502
2503 /* Refresh hw_cons to see if there is new work */
2504 if (sw_cons == hw_cons) {
2505 hw_cons = bp->hw_rx_cons =
2506 sblk->status_rx_quick_consumer_index0;
2507 if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT)
2508 hw_cons++;
2509 rmb();
2510 }
b6016b76
MC
2511 }
2512 bp->rx_cons = sw_cons;
2513 bp->rx_prod = sw_prod;
2514
2515 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, sw_prod);
2516
2517 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
2518
2519 mmiowb();
2520
2521 return rx_pkt;
2522
2523}
2524
2525/* MSI ISR - The only difference between this and the INTx ISR
2526 * is that the MSI interrupt is always serviced.
2527 */
2528static irqreturn_t
7d12e780 2529bnx2_msi(int irq, void *dev_instance)
b6016b76
MC
2530{
2531 struct net_device *dev = dev_instance;
972ec0d4 2532 struct bnx2 *bp = netdev_priv(dev);
b6016b76 2533
c921e4c4 2534 prefetch(bp->status_blk);
b6016b76
MC
2535 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2536 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
2537 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2538
2539 /* Return here if interrupt is disabled. */
73eef4cd
MC
2540 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2541 return IRQ_HANDLED;
b6016b76 2542
73eef4cd 2543 netif_rx_schedule(dev);
b6016b76 2544
73eef4cd 2545 return IRQ_HANDLED;
b6016b76
MC
2546}
2547
8e6a72c4
MC
2548static irqreturn_t
2549bnx2_msi_1shot(int irq, void *dev_instance)
2550{
2551 struct net_device *dev = dev_instance;
2552 struct bnx2 *bp = netdev_priv(dev);
2553
2554 prefetch(bp->status_blk);
2555
2556 /* Return here if interrupt is disabled. */
2557 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2558 return IRQ_HANDLED;
2559
2560 netif_rx_schedule(dev);
2561
2562 return IRQ_HANDLED;
2563}
2564
b6016b76 2565static irqreturn_t
7d12e780 2566bnx2_interrupt(int irq, void *dev_instance)
b6016b76
MC
2567{
2568 struct net_device *dev = dev_instance;
972ec0d4 2569 struct bnx2 *bp = netdev_priv(dev);
b8a7ce7b 2570 struct status_block *sblk = bp->status_blk;
b6016b76
MC
2571
2572 /* When using INTx, it is possible for the interrupt to arrive
2573 * at the CPU before the status block posted prior to the
2574 * interrupt. Reading a register will flush the status block.
2575 * When using MSI, the MSI message will always complete after
2576 * the status block write.
2577 */
b8a7ce7b 2578 if ((sblk->status_idx == bp->last_status_idx) &&
b6016b76
MC
2579 (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
2580 BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
73eef4cd 2581 return IRQ_NONE;
b6016b76
MC
2582
2583 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2584 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
2585 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2586
b8a7ce7b
MC
2587 /* Read back to deassert IRQ immediately to avoid too many
2588 * spurious interrupts.
2589 */
2590 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
2591
b6016b76 2592 /* Return here if interrupt is shared and is disabled. */
73eef4cd
MC
2593 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2594 return IRQ_HANDLED;
b6016b76 2595
b8a7ce7b
MC
2596 if (netif_rx_schedule_prep(dev)) {
2597 bp->last_status_idx = sblk->status_idx;
2598 __netif_rx_schedule(dev);
2599 }
b6016b76 2600
73eef4cd 2601 return IRQ_HANDLED;
b6016b76
MC
2602}
2603
0d8a6571
MC
2604#define STATUS_ATTN_EVENTS (STATUS_ATTN_BITS_LINK_STATE | \
2605 STATUS_ATTN_BITS_TIMER_ABORT)
da3e4fbe 2606
f4e418f7
MC
2607static inline int
2608bnx2_has_work(struct bnx2 *bp)
2609{
2610 struct status_block *sblk = bp->status_blk;
2611
2612 if ((sblk->status_rx_quick_consumer_index0 != bp->hw_rx_cons) ||
2613 (sblk->status_tx_quick_consumer_index0 != bp->hw_tx_cons))
2614 return 1;
2615
da3e4fbe
MC
2616 if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
2617 (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
f4e418f7
MC
2618 return 1;
2619
2620 return 0;
2621}
2622
b6016b76
MC
2623static int
2624bnx2_poll(struct net_device *dev, int *budget)
2625{
972ec0d4 2626 struct bnx2 *bp = netdev_priv(dev);
da3e4fbe
MC
2627 struct status_block *sblk = bp->status_blk;
2628 u32 status_attn_bits = sblk->status_attn_bits;
2629 u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
b6016b76 2630
da3e4fbe
MC
2631 if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
2632 (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
b6016b76 2633
b6016b76 2634 bnx2_phy_int(bp);
bf5295bb
MC
2635
2636 /* This is needed to take care of transient status
2637 * during link changes.
2638 */
2639 REG_WR(bp, BNX2_HC_COMMAND,
2640 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
2641 REG_RD(bp, BNX2_HC_COMMAND);
b6016b76
MC
2642 }
2643
f4e418f7 2644 if (bp->status_blk->status_tx_quick_consumer_index0 != bp->hw_tx_cons)
b6016b76 2645 bnx2_tx_int(bp);
b6016b76 2646
f4e418f7 2647 if (bp->status_blk->status_rx_quick_consumer_index0 != bp->hw_rx_cons) {
b6016b76
MC
2648 int orig_budget = *budget;
2649 int work_done;
2650
2651 if (orig_budget > dev->quota)
2652 orig_budget = dev->quota;
6aa20a22 2653
b6016b76
MC
2654 work_done = bnx2_rx_int(bp, orig_budget);
2655 *budget -= work_done;
2656 dev->quota -= work_done;
b6016b76 2657 }
6aa20a22 2658
f4e418f7
MC
2659 bp->last_status_idx = bp->status_blk->status_idx;
2660 rmb();
2661
2662 if (!bnx2_has_work(bp)) {
b6016b76 2663 netif_rx_complete(dev);
1269a8a6
MC
2664 if (likely(bp->flags & USING_MSI_FLAG)) {
2665 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2666 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2667 bp->last_status_idx);
2668 return 0;
2669 }
2670 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2671 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2672 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
2673 bp->last_status_idx);
2674
b6016b76 2675 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
1269a8a6
MC
2676 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2677 bp->last_status_idx);
b6016b76
MC
2678 return 0;
2679 }
2680
2681 return 1;
2682}
2683
932ff279 2684/* Called with rtnl_lock from vlan functions and also netif_tx_lock
b6016b76
MC
2685 * from set_multicast.
2686 */
2687static void
2688bnx2_set_rx_mode(struct net_device *dev)
2689{
972ec0d4 2690 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
2691 u32 rx_mode, sort_mode;
2692 int i;
b6016b76 2693
c770a65c 2694 spin_lock_bh(&bp->phy_lock);
b6016b76
MC
2695
2696 rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
2697 BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
2698 sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
2699#ifdef BCM_VLAN
e29054f9 2700 if (!bp->vlgrp && !(bp->flags & ASF_ENABLE_FLAG))
b6016b76 2701 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
b6016b76 2702#else
e29054f9
MC
2703 if (!(bp->flags & ASF_ENABLE_FLAG))
2704 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
b6016b76
MC
2705#endif
2706 if (dev->flags & IFF_PROMISC) {
2707 /* Promiscuous mode. */
2708 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
7510873d
MC
2709 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
2710 BNX2_RPM_SORT_USER0_PROM_VLAN;
b6016b76
MC
2711 }
2712 else if (dev->flags & IFF_ALLMULTI) {
2713 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2714 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2715 0xffffffff);
2716 }
2717 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
2718 }
2719 else {
2720 /* Accept one or more multicast(s). */
2721 struct dev_mc_list *mclist;
2722 u32 mc_filter[NUM_MC_HASH_REGISTERS];
2723 u32 regidx;
2724 u32 bit;
2725 u32 crc;
2726
2727 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
2728
2729 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
2730 i++, mclist = mclist->next) {
2731
2732 crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
2733 bit = crc & 0xff;
2734 regidx = (bit & 0xe0) >> 5;
2735 bit &= 0x1f;
2736 mc_filter[regidx] |= (1 << bit);
2737 }
2738
2739 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2740 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2741 mc_filter[i]);
2742 }
2743
2744 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
2745 }
2746
2747 if (rx_mode != bp->rx_mode) {
2748 bp->rx_mode = rx_mode;
2749 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
2750 }
2751
2752 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
2753 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
2754 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
2755
c770a65c 2756 spin_unlock_bh(&bp->phy_lock);
b6016b76
MC
2757}
2758
fba9fe91
MC
2759#define FW_BUF_SIZE 0x8000
2760
2761static int
2762bnx2_gunzip_init(struct bnx2 *bp)
2763{
2764 if ((bp->gunzip_buf = vmalloc(FW_BUF_SIZE)) == NULL)
2765 goto gunzip_nomem1;
2766
2767 if ((bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL)) == NULL)
2768 goto gunzip_nomem2;
2769
2770 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(), GFP_KERNEL);
2771 if (bp->strm->workspace == NULL)
2772 goto gunzip_nomem3;
2773
2774 return 0;
2775
2776gunzip_nomem3:
2777 kfree(bp->strm);
2778 bp->strm = NULL;
2779
2780gunzip_nomem2:
2781 vfree(bp->gunzip_buf);
2782 bp->gunzip_buf = NULL;
2783
2784gunzip_nomem1:
2785 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for "
2786 "uncompression.\n", bp->dev->name);
2787 return -ENOMEM;
2788}
2789
2790static void
2791bnx2_gunzip_end(struct bnx2 *bp)
2792{
2793 kfree(bp->strm->workspace);
2794
2795 kfree(bp->strm);
2796 bp->strm = NULL;
2797
2798 if (bp->gunzip_buf) {
2799 vfree(bp->gunzip_buf);
2800 bp->gunzip_buf = NULL;
2801 }
2802}
2803
2804static int
2805bnx2_gunzip(struct bnx2 *bp, u8 *zbuf, int len, void **outbuf, int *outlen)
2806{
2807 int n, rc;
2808
2809 /* check gzip header */
2810 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
2811 return -EINVAL;
2812
2813 n = 10;
2814
2815#define FNAME 0x8
2816 if (zbuf[3] & FNAME)
2817 while ((zbuf[n++] != 0) && (n < len));
2818
2819 bp->strm->next_in = zbuf + n;
2820 bp->strm->avail_in = len - n;
2821 bp->strm->next_out = bp->gunzip_buf;
2822 bp->strm->avail_out = FW_BUF_SIZE;
2823
2824 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
2825 if (rc != Z_OK)
2826 return rc;
2827
2828 rc = zlib_inflate(bp->strm, Z_FINISH);
2829
2830 *outlen = FW_BUF_SIZE - bp->strm->avail_out;
2831 *outbuf = bp->gunzip_buf;
2832
2833 if ((rc != Z_OK) && (rc != Z_STREAM_END))
2834 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
2835 bp->dev->name, bp->strm->msg);
2836
2837 zlib_inflateEnd(bp->strm);
2838
2839 if (rc == Z_STREAM_END)
2840 return 0;
2841
2842 return rc;
2843}
2844
b6016b76
MC
2845static void
2846load_rv2p_fw(struct bnx2 *bp, u32 *rv2p_code, u32 rv2p_code_len,
2847 u32 rv2p_proc)
2848{
2849 int i;
2850 u32 val;
2851
2852
2853 for (i = 0; i < rv2p_code_len; i += 8) {
fba9fe91 2854 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, cpu_to_le32(*rv2p_code));
b6016b76 2855 rv2p_code++;
fba9fe91 2856 REG_WR(bp, BNX2_RV2P_INSTR_LOW, cpu_to_le32(*rv2p_code));
b6016b76
MC
2857 rv2p_code++;
2858
2859 if (rv2p_proc == RV2P_PROC1) {
2860 val = (i / 8) | BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
2861 REG_WR(bp, BNX2_RV2P_PROC1_ADDR_CMD, val);
2862 }
2863 else {
2864 val = (i / 8) | BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
2865 REG_WR(bp, BNX2_RV2P_PROC2_ADDR_CMD, val);
2866 }
2867 }
2868
2869 /* Reset the processor, un-stall is done later. */
2870 if (rv2p_proc == RV2P_PROC1) {
2871 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
2872 }
2873 else {
2874 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
2875 }
2876}
2877
af3ee519 2878static int
b6016b76
MC
2879load_cpu_fw(struct bnx2 *bp, struct cpu_reg *cpu_reg, struct fw_info *fw)
2880{
2881 u32 offset;
2882 u32 val;
af3ee519 2883 int rc;
b6016b76
MC
2884
2885 /* Halt the CPU. */
2886 val = REG_RD_IND(bp, cpu_reg->mode);
2887 val |= cpu_reg->mode_value_halt;
2888 REG_WR_IND(bp, cpu_reg->mode, val);
2889 REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2890
2891 /* Load the Text area. */
2892 offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
af3ee519
MC
2893 if (fw->gz_text) {
2894 u32 text_len;
2895 void *text;
2896
2897 rc = bnx2_gunzip(bp, fw->gz_text, fw->gz_text_len, &text,
2898 &text_len);
2899 if (rc)
2900 return rc;
2901
2902 fw->text = text;
2903 }
2904 if (fw->gz_text) {
b6016b76
MC
2905 int j;
2906
2907 for (j = 0; j < (fw->text_len / 4); j++, offset += 4) {
fba9fe91 2908 REG_WR_IND(bp, offset, cpu_to_le32(fw->text[j]));
b6016b76
MC
2909 }
2910 }
2911
2912 /* Load the Data area. */
2913 offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
2914 if (fw->data) {
2915 int j;
2916
2917 for (j = 0; j < (fw->data_len / 4); j++, offset += 4) {
2918 REG_WR_IND(bp, offset, fw->data[j]);
2919 }
2920 }
2921
2922 /* Load the SBSS area. */
2923 offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
2924 if (fw->sbss) {
2925 int j;
2926
2927 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) {
2928 REG_WR_IND(bp, offset, fw->sbss[j]);
2929 }
2930 }
2931
2932 /* Load the BSS area. */
2933 offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
2934 if (fw->bss) {
2935 int j;
2936
2937 for (j = 0; j < (fw->bss_len/4); j++, offset += 4) {
2938 REG_WR_IND(bp, offset, fw->bss[j]);
2939 }
2940 }
2941
2942 /* Load the Read-Only area. */
2943 offset = cpu_reg->spad_base +
2944 (fw->rodata_addr - cpu_reg->mips_view_base);
2945 if (fw->rodata) {
2946 int j;
2947
2948 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) {
2949 REG_WR_IND(bp, offset, fw->rodata[j]);
2950 }
2951 }
2952
2953 /* Clear the pre-fetch instruction. */
2954 REG_WR_IND(bp, cpu_reg->inst, 0);
2955 REG_WR_IND(bp, cpu_reg->pc, fw->start_addr);
2956
2957 /* Start the CPU. */
2958 val = REG_RD_IND(bp, cpu_reg->mode);
2959 val &= ~cpu_reg->mode_value_halt;
2960 REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2961 REG_WR_IND(bp, cpu_reg->mode, val);
af3ee519
MC
2962
2963 return 0;
b6016b76
MC
2964}
2965
fba9fe91 2966static int
b6016b76
MC
2967bnx2_init_cpus(struct bnx2 *bp)
2968{
2969 struct cpu_reg cpu_reg;
af3ee519 2970 struct fw_info *fw;
fba9fe91
MC
2971 int rc = 0;
2972 void *text;
2973 u32 text_len;
2974
2975 if ((rc = bnx2_gunzip_init(bp)) != 0)
2976 return rc;
b6016b76
MC
2977
2978 /* Initialize the RV2P processor. */
fba9fe91
MC
2979 rc = bnx2_gunzip(bp, bnx2_rv2p_proc1, sizeof(bnx2_rv2p_proc1), &text,
2980 &text_len);
2981 if (rc)
2982 goto init_cpu_err;
2983
2984 load_rv2p_fw(bp, text, text_len, RV2P_PROC1);
2985
2986 rc = bnx2_gunzip(bp, bnx2_rv2p_proc2, sizeof(bnx2_rv2p_proc2), &text,
2987 &text_len);
2988 if (rc)
2989 goto init_cpu_err;
2990
2991 load_rv2p_fw(bp, text, text_len, RV2P_PROC2);
b6016b76
MC
2992
2993 /* Initialize the RX Processor. */
2994 cpu_reg.mode = BNX2_RXP_CPU_MODE;
2995 cpu_reg.mode_value_halt = BNX2_RXP_CPU_MODE_SOFT_HALT;
2996 cpu_reg.mode_value_sstep = BNX2_RXP_CPU_MODE_STEP_ENA;
2997 cpu_reg.state = BNX2_RXP_CPU_STATE;
2998 cpu_reg.state_value_clear = 0xffffff;
2999 cpu_reg.gpr0 = BNX2_RXP_CPU_REG_FILE;
3000 cpu_reg.evmask = BNX2_RXP_CPU_EVENT_MASK;
3001 cpu_reg.pc = BNX2_RXP_CPU_PROGRAM_COUNTER;
3002 cpu_reg.inst = BNX2_RXP_CPU_INSTRUCTION;
3003 cpu_reg.bp = BNX2_RXP_CPU_HW_BREAKPOINT;
3004 cpu_reg.spad_base = BNX2_RXP_SCRATCH;
3005 cpu_reg.mips_view_base = 0x8000000;
6aa20a22 3006
d43584c8
MC
3007 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3008 fw = &bnx2_rxp_fw_09;
3009 else
3010 fw = &bnx2_rxp_fw_06;
fba9fe91 3011
af3ee519 3012 rc = load_cpu_fw(bp, &cpu_reg, fw);
fba9fe91
MC
3013 if (rc)
3014 goto init_cpu_err;
3015
b6016b76
MC
3016 /* Initialize the TX Processor. */
3017 cpu_reg.mode = BNX2_TXP_CPU_MODE;
3018 cpu_reg.mode_value_halt = BNX2_TXP_CPU_MODE_SOFT_HALT;
3019 cpu_reg.mode_value_sstep = BNX2_TXP_CPU_MODE_STEP_ENA;
3020 cpu_reg.state = BNX2_TXP_CPU_STATE;
3021 cpu_reg.state_value_clear = 0xffffff;
3022 cpu_reg.gpr0 = BNX2_TXP_CPU_REG_FILE;
3023 cpu_reg.evmask = BNX2_TXP_CPU_EVENT_MASK;
3024 cpu_reg.pc = BNX2_TXP_CPU_PROGRAM_COUNTER;
3025 cpu_reg.inst = BNX2_TXP_CPU_INSTRUCTION;
3026 cpu_reg.bp = BNX2_TXP_CPU_HW_BREAKPOINT;
3027 cpu_reg.spad_base = BNX2_TXP_SCRATCH;
3028 cpu_reg.mips_view_base = 0x8000000;
6aa20a22 3029
d43584c8
MC
3030 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3031 fw = &bnx2_txp_fw_09;
3032 else
3033 fw = &bnx2_txp_fw_06;
fba9fe91 3034
af3ee519 3035 rc = load_cpu_fw(bp, &cpu_reg, fw);
fba9fe91
MC
3036 if (rc)
3037 goto init_cpu_err;
3038
b6016b76
MC
3039 /* Initialize the TX Patch-up Processor. */
3040 cpu_reg.mode = BNX2_TPAT_CPU_MODE;
3041 cpu_reg.mode_value_halt = BNX2_TPAT_CPU_MODE_SOFT_HALT;
3042 cpu_reg.mode_value_sstep = BNX2_TPAT_CPU_MODE_STEP_ENA;
3043 cpu_reg.state = BNX2_TPAT_CPU_STATE;
3044 cpu_reg.state_value_clear = 0xffffff;
3045 cpu_reg.gpr0 = BNX2_TPAT_CPU_REG_FILE;
3046 cpu_reg.evmask = BNX2_TPAT_CPU_EVENT_MASK;
3047 cpu_reg.pc = BNX2_TPAT_CPU_PROGRAM_COUNTER;
3048 cpu_reg.inst = BNX2_TPAT_CPU_INSTRUCTION;
3049 cpu_reg.bp = BNX2_TPAT_CPU_HW_BREAKPOINT;
3050 cpu_reg.spad_base = BNX2_TPAT_SCRATCH;
3051 cpu_reg.mips_view_base = 0x8000000;
6aa20a22 3052
d43584c8
MC
3053 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3054 fw = &bnx2_tpat_fw_09;
3055 else
3056 fw = &bnx2_tpat_fw_06;
fba9fe91 3057
af3ee519 3058 rc = load_cpu_fw(bp, &cpu_reg, fw);
fba9fe91
MC
3059 if (rc)
3060 goto init_cpu_err;
3061
b6016b76
MC
3062 /* Initialize the Completion Processor. */
3063 cpu_reg.mode = BNX2_COM_CPU_MODE;
3064 cpu_reg.mode_value_halt = BNX2_COM_CPU_MODE_SOFT_HALT;
3065 cpu_reg.mode_value_sstep = BNX2_COM_CPU_MODE_STEP_ENA;
3066 cpu_reg.state = BNX2_COM_CPU_STATE;
3067 cpu_reg.state_value_clear = 0xffffff;
3068 cpu_reg.gpr0 = BNX2_COM_CPU_REG_FILE;
3069 cpu_reg.evmask = BNX2_COM_CPU_EVENT_MASK;
3070 cpu_reg.pc = BNX2_COM_CPU_PROGRAM_COUNTER;
3071 cpu_reg.inst = BNX2_COM_CPU_INSTRUCTION;
3072 cpu_reg.bp = BNX2_COM_CPU_HW_BREAKPOINT;
3073 cpu_reg.spad_base = BNX2_COM_SCRATCH;
3074 cpu_reg.mips_view_base = 0x8000000;
6aa20a22 3075
d43584c8
MC
3076 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3077 fw = &bnx2_com_fw_09;
3078 else
3079 fw = &bnx2_com_fw_06;
fba9fe91 3080
af3ee519 3081 rc = load_cpu_fw(bp, &cpu_reg, fw);
fba9fe91
MC
3082 if (rc)
3083 goto init_cpu_err;
3084
d43584c8
MC
3085 /* Initialize the Command Processor. */
3086 cpu_reg.mode = BNX2_CP_CPU_MODE;
3087 cpu_reg.mode_value_halt = BNX2_CP_CPU_MODE_SOFT_HALT;
3088 cpu_reg.mode_value_sstep = BNX2_CP_CPU_MODE_STEP_ENA;
3089 cpu_reg.state = BNX2_CP_CPU_STATE;
3090 cpu_reg.state_value_clear = 0xffffff;
3091 cpu_reg.gpr0 = BNX2_CP_CPU_REG_FILE;
3092 cpu_reg.evmask = BNX2_CP_CPU_EVENT_MASK;
3093 cpu_reg.pc = BNX2_CP_CPU_PROGRAM_COUNTER;
3094 cpu_reg.inst = BNX2_CP_CPU_INSTRUCTION;
3095 cpu_reg.bp = BNX2_CP_CPU_HW_BREAKPOINT;
3096 cpu_reg.spad_base = BNX2_CP_SCRATCH;
3097 cpu_reg.mips_view_base = 0x8000000;
b6016b76 3098
d43584c8
MC
3099 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3100 fw = &bnx2_cp_fw_09;
b6016b76 3101
6c1bbcc8 3102 rc = load_cpu_fw(bp, &cpu_reg, fw);
d43584c8
MC
3103 if (rc)
3104 goto init_cpu_err;
3105 }
fba9fe91
MC
3106init_cpu_err:
3107 bnx2_gunzip_end(bp);
3108 return rc;
b6016b76
MC
3109}
3110
3111static int
829ca9a3 3112bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
b6016b76
MC
3113{
3114 u16 pmcsr;
3115
3116 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
3117
3118 switch (state) {
829ca9a3 3119 case PCI_D0: {
b6016b76
MC
3120 u32 val;
3121
3122 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3123 (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3124 PCI_PM_CTRL_PME_STATUS);
3125
3126 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3127 /* delay required during transition out of D3hot */
3128 msleep(20);
3129
3130 val = REG_RD(bp, BNX2_EMAC_MODE);
3131 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
3132 val &= ~BNX2_EMAC_MODE_MPKT;
3133 REG_WR(bp, BNX2_EMAC_MODE, val);
3134
3135 val = REG_RD(bp, BNX2_RPM_CONFIG);
3136 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3137 REG_WR(bp, BNX2_RPM_CONFIG, val);
3138 break;
3139 }
829ca9a3 3140 case PCI_D3hot: {
b6016b76
MC
3141 int i;
3142 u32 val, wol_msg;
3143
3144 if (bp->wol) {
3145 u32 advertising;
3146 u8 autoneg;
3147
3148 autoneg = bp->autoneg;
3149 advertising = bp->advertising;
3150
3151 bp->autoneg = AUTONEG_SPEED;
3152 bp->advertising = ADVERTISED_10baseT_Half |
3153 ADVERTISED_10baseT_Full |
3154 ADVERTISED_100baseT_Half |
3155 ADVERTISED_100baseT_Full |
3156 ADVERTISED_Autoneg;
3157
3158 bnx2_setup_copper_phy(bp);
3159
3160 bp->autoneg = autoneg;
3161 bp->advertising = advertising;
3162
3163 bnx2_set_mac_addr(bp);
3164
3165 val = REG_RD(bp, BNX2_EMAC_MODE);
3166
3167 /* Enable port mode. */
3168 val &= ~BNX2_EMAC_MODE_PORT;
3169 val |= BNX2_EMAC_MODE_PORT_MII |
3170 BNX2_EMAC_MODE_MPKT_RCVD |
3171 BNX2_EMAC_MODE_ACPI_RCVD |
b6016b76
MC
3172 BNX2_EMAC_MODE_MPKT;
3173
3174 REG_WR(bp, BNX2_EMAC_MODE, val);
3175
3176 /* receive all multicast */
3177 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3178 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3179 0xffffffff);
3180 }
3181 REG_WR(bp, BNX2_EMAC_RX_MODE,
3182 BNX2_EMAC_RX_MODE_SORT_MODE);
3183
3184 val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
3185 BNX2_RPM_SORT_USER0_MC_EN;
3186 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3187 REG_WR(bp, BNX2_RPM_SORT_USER0, val);
3188 REG_WR(bp, BNX2_RPM_SORT_USER0, val |
3189 BNX2_RPM_SORT_USER0_ENA);
3190
3191 /* Need to enable EMAC and RPM for WOL. */
3192 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3193 BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
3194 BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
3195 BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
3196
3197 val = REG_RD(bp, BNX2_RPM_CONFIG);
3198 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3199 REG_WR(bp, BNX2_RPM_CONFIG, val);
3200
3201 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
3202 }
3203 else {
3204 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
3205 }
3206
dda1e390
MC
3207 if (!(bp->flags & NO_WOL_FLAG))
3208 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg, 0);
b6016b76
MC
3209
3210 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3211 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3212 (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
3213
3214 if (bp->wol)
3215 pmcsr |= 3;
3216 }
3217 else {
3218 pmcsr |= 3;
3219 }
3220 if (bp->wol) {
3221 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
3222 }
3223 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3224 pmcsr);
3225
3226 /* No more memory access after this point until
3227 * device is brought back to D0.
3228 */
3229 udelay(50);
3230 break;
3231 }
3232 default:
3233 return -EINVAL;
3234 }
3235 return 0;
3236}
3237
3238static int
3239bnx2_acquire_nvram_lock(struct bnx2 *bp)
3240{
3241 u32 val;
3242 int j;
3243
3244 /* Request access to the flash interface. */
3245 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
3246 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3247 val = REG_RD(bp, BNX2_NVM_SW_ARB);
3248 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
3249 break;
3250
3251 udelay(5);
3252 }
3253
3254 if (j >= NVRAM_TIMEOUT_COUNT)
3255 return -EBUSY;
3256
3257 return 0;
3258}
3259
3260static int
3261bnx2_release_nvram_lock(struct bnx2 *bp)
3262{
3263 int j;
3264 u32 val;
3265
3266 /* Relinquish nvram interface. */
3267 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
3268
3269 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3270 val = REG_RD(bp, BNX2_NVM_SW_ARB);
3271 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
3272 break;
3273
3274 udelay(5);
3275 }
3276
3277 if (j >= NVRAM_TIMEOUT_COUNT)
3278 return -EBUSY;
3279
3280 return 0;
3281}
3282
3283
3284static int
3285bnx2_enable_nvram_write(struct bnx2 *bp)
3286{
3287 u32 val;
3288
3289 val = REG_RD(bp, BNX2_MISC_CFG);
3290 REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
3291
3292 if (!bp->flash_info->buffered) {
3293 int j;
3294
3295 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3296 REG_WR(bp, BNX2_NVM_COMMAND,
3297 BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
3298
3299 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3300 udelay(5);
3301
3302 val = REG_RD(bp, BNX2_NVM_COMMAND);
3303 if (val & BNX2_NVM_COMMAND_DONE)
3304 break;
3305 }
3306
3307 if (j >= NVRAM_TIMEOUT_COUNT)
3308 return -EBUSY;
3309 }
3310 return 0;
3311}
3312
3313static void
3314bnx2_disable_nvram_write(struct bnx2 *bp)
3315{
3316 u32 val;
3317
3318 val = REG_RD(bp, BNX2_MISC_CFG);
3319 REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
3320}
3321
3322
3323static void
3324bnx2_enable_nvram_access(struct bnx2 *bp)
3325{
3326 u32 val;
3327
3328 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3329 /* Enable both bits, even on read. */
6aa20a22 3330 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
b6016b76
MC
3331 val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
3332}
3333
3334static void
3335bnx2_disable_nvram_access(struct bnx2 *bp)
3336{
3337 u32 val;
3338
3339 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3340 /* Disable both bits, even after read. */
6aa20a22 3341 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
b6016b76
MC
3342 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
3343 BNX2_NVM_ACCESS_ENABLE_WR_EN));
3344}
3345
3346static int
3347bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
3348{
3349 u32 cmd;
3350 int j;
3351
3352 if (bp->flash_info->buffered)
3353 /* Buffered flash, no erase needed */
3354 return 0;
3355
3356 /* Build an erase command */
3357 cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
3358 BNX2_NVM_COMMAND_DOIT;
3359
3360 /* Need to clear DONE bit separately. */
3361 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3362
3363 /* Address of the NVRAM to read from. */
3364 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3365
3366 /* Issue an erase command. */
3367 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3368
3369 /* Wait for completion. */
3370 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3371 u32 val;
3372
3373 udelay(5);
3374
3375 val = REG_RD(bp, BNX2_NVM_COMMAND);
3376 if (val & BNX2_NVM_COMMAND_DONE)
3377 break;
3378 }
3379
3380 if (j >= NVRAM_TIMEOUT_COUNT)
3381 return -EBUSY;
3382
3383 return 0;
3384}
3385
3386static int
3387bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
3388{
3389 u32 cmd;
3390 int j;
3391
3392 /* Build the command word. */
3393 cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
3394
3395 /* Calculate an offset of a buffered flash. */
3396 if (bp->flash_info->buffered) {
3397 offset = ((offset / bp->flash_info->page_size) <<
3398 bp->flash_info->page_bits) +
3399 (offset % bp->flash_info->page_size);
3400 }
3401
3402 /* Need to clear DONE bit separately. */
3403 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3404
3405 /* Address of the NVRAM to read from. */
3406 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3407
3408 /* Issue a read command. */
3409 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3410
3411 /* Wait for completion. */
3412 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3413 u32 val;
3414
3415 udelay(5);
3416
3417 val = REG_RD(bp, BNX2_NVM_COMMAND);
3418 if (val & BNX2_NVM_COMMAND_DONE) {
3419 val = REG_RD(bp, BNX2_NVM_READ);
3420
3421 val = be32_to_cpu(val);
3422 memcpy(ret_val, &val, 4);
3423 break;
3424 }
3425 }
3426 if (j >= NVRAM_TIMEOUT_COUNT)
3427 return -EBUSY;
3428
3429 return 0;
3430}
3431
3432
3433static int
3434bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
3435{
3436 u32 cmd, val32;
3437 int j;
3438
3439 /* Build the command word. */
3440 cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
3441
3442 /* Calculate an offset of a buffered flash. */
3443 if (bp->flash_info->buffered) {
3444 offset = ((offset / bp->flash_info->page_size) <<
3445 bp->flash_info->page_bits) +
3446 (offset % bp->flash_info->page_size);
3447 }
3448
3449 /* Need to clear DONE bit separately. */
3450 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3451
3452 memcpy(&val32, val, 4);
3453 val32 = cpu_to_be32(val32);
3454
3455 /* Write the data. */
3456 REG_WR(bp, BNX2_NVM_WRITE, val32);
3457
3458 /* Address of the NVRAM to write to. */
3459 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3460
3461 /* Issue the write command. */
3462 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3463
3464 /* Wait for completion. */
3465 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3466 udelay(5);
3467
3468 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
3469 break;
3470 }
3471 if (j >= NVRAM_TIMEOUT_COUNT)
3472 return -EBUSY;
3473
3474 return 0;
3475}
3476
3477static int
3478bnx2_init_nvram(struct bnx2 *bp)
3479{
3480 u32 val;
3481 int j, entry_count, rc;
3482 struct flash_spec *flash;
3483
3484 /* Determine the selected interface. */
3485 val = REG_RD(bp, BNX2_NVM_CFG1);
3486
3487 entry_count = sizeof(flash_table) / sizeof(struct flash_spec);
3488
3489 rc = 0;
3490 if (val & 0x40000000) {
3491
3492 /* Flash interface has been reconfigured */
3493 for (j = 0, flash = &flash_table[0]; j < entry_count;
37137709
MC
3494 j++, flash++) {
3495 if ((val & FLASH_BACKUP_STRAP_MASK) ==
3496 (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
b6016b76
MC
3497 bp->flash_info = flash;
3498 break;
3499 }
3500 }
3501 }
3502 else {
37137709 3503 u32 mask;
b6016b76
MC
3504 /* Not yet been reconfigured */
3505
37137709
MC
3506 if (val & (1 << 23))
3507 mask = FLASH_BACKUP_STRAP_MASK;
3508 else
3509 mask = FLASH_STRAP_MASK;
3510
b6016b76
MC
3511 for (j = 0, flash = &flash_table[0]; j < entry_count;
3512 j++, flash++) {
3513
37137709 3514 if ((val & mask) == (flash->strapping & mask)) {
b6016b76
MC
3515 bp->flash_info = flash;
3516
3517 /* Request access to the flash interface. */
3518 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3519 return rc;
3520
3521 /* Enable access to flash interface */
3522 bnx2_enable_nvram_access(bp);
3523
3524 /* Reconfigure the flash interface */
3525 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
3526 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
3527 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
3528 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
3529
3530 /* Disable access to flash interface */
3531 bnx2_disable_nvram_access(bp);
3532 bnx2_release_nvram_lock(bp);
3533
3534 break;
3535 }
3536 }
3537 } /* if (val & 0x40000000) */
3538
3539 if (j == entry_count) {
3540 bp->flash_info = NULL;
2f23c523 3541 printk(KERN_ALERT PFX "Unknown flash/EEPROM type.\n");
1122db71 3542 return -ENODEV;
b6016b76
MC
3543 }
3544
1122db71
MC
3545 val = REG_RD_IND(bp, bp->shmem_base + BNX2_SHARED_HW_CFG_CONFIG2);
3546 val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
3547 if (val)
3548 bp->flash_size = val;
3549 else
3550 bp->flash_size = bp->flash_info->total_size;
3551
b6016b76
MC
3552 return rc;
3553}
3554
3555static int
3556bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
3557 int buf_size)
3558{
3559 int rc = 0;
3560 u32 cmd_flags, offset32, len32, extra;
3561
3562 if (buf_size == 0)
3563 return 0;
3564
3565 /* Request access to the flash interface. */
3566 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3567 return rc;
3568
3569 /* Enable access to flash interface */
3570 bnx2_enable_nvram_access(bp);
3571
3572 len32 = buf_size;
3573 offset32 = offset;
3574 extra = 0;
3575
3576 cmd_flags = 0;
3577
3578 if (offset32 & 3) {
3579 u8 buf[4];
3580 u32 pre_len;
3581
3582 offset32 &= ~3;
3583 pre_len = 4 - (offset & 3);
3584
3585 if (pre_len >= len32) {
3586 pre_len = len32;
3587 cmd_flags = BNX2_NVM_COMMAND_FIRST |
3588 BNX2_NVM_COMMAND_LAST;
3589 }
3590 else {
3591 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3592 }
3593
3594 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3595
3596 if (rc)
3597 return rc;
3598
3599 memcpy(ret_buf, buf + (offset & 3), pre_len);
3600
3601 offset32 += 4;
3602 ret_buf += pre_len;
3603 len32 -= pre_len;
3604 }
3605 if (len32 & 3) {
3606 extra = 4 - (len32 & 3);
3607 len32 = (len32 + 4) & ~3;
3608 }
3609
3610 if (len32 == 4) {
3611 u8 buf[4];
3612
3613 if (cmd_flags)
3614 cmd_flags = BNX2_NVM_COMMAND_LAST;
3615 else
3616 cmd_flags = BNX2_NVM_COMMAND_FIRST |
3617 BNX2_NVM_COMMAND_LAST;
3618
3619 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3620
3621 memcpy(ret_buf, buf, 4 - extra);
3622 }
3623 else if (len32 > 0) {
3624 u8 buf[4];
3625
3626 /* Read the first word. */
3627 if (cmd_flags)
3628 cmd_flags = 0;
3629 else
3630 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3631
3632 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
3633
3634 /* Advance to the next dword. */
3635 offset32 += 4;
3636 ret_buf += 4;
3637 len32 -= 4;
3638
3639 while (len32 > 4 && rc == 0) {
3640 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
3641
3642 /* Advance to the next dword. */
3643 offset32 += 4;
3644 ret_buf += 4;
3645 len32 -= 4;
3646 }
3647
3648 if (rc)
3649 return rc;
3650
3651 cmd_flags = BNX2_NVM_COMMAND_LAST;
3652 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3653
3654 memcpy(ret_buf, buf, 4 - extra);
3655 }
3656
3657 /* Disable access to flash interface */
3658 bnx2_disable_nvram_access(bp);
3659
3660 bnx2_release_nvram_lock(bp);
3661
3662 return rc;
3663}
3664
3665static int
3666bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
3667 int buf_size)
3668{
3669 u32 written, offset32, len32;
e6be763f 3670 u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
b6016b76
MC
3671 int rc = 0;
3672 int align_start, align_end;
3673
3674 buf = data_buf;
3675 offset32 = offset;
3676 len32 = buf_size;
3677 align_start = align_end = 0;
3678
3679 if ((align_start = (offset32 & 3))) {
3680 offset32 &= ~3;
c873879c
MC
3681 len32 += align_start;
3682 if (len32 < 4)
3683 len32 = 4;
b6016b76
MC
3684 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
3685 return rc;
3686 }
3687
3688 if (len32 & 3) {
c873879c
MC
3689 align_end = 4 - (len32 & 3);
3690 len32 += align_end;
3691 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
3692 return rc;
b6016b76
MC
3693 }
3694
3695 if (align_start || align_end) {
e6be763f
MC
3696 align_buf = kmalloc(len32, GFP_KERNEL);
3697 if (align_buf == NULL)
b6016b76
MC
3698 return -ENOMEM;
3699 if (align_start) {
e6be763f 3700 memcpy(align_buf, start, 4);
b6016b76
MC
3701 }
3702 if (align_end) {
e6be763f 3703 memcpy(align_buf + len32 - 4, end, 4);
b6016b76 3704 }
e6be763f
MC
3705 memcpy(align_buf + align_start, data_buf, buf_size);
3706 buf = align_buf;
b6016b76
MC
3707 }
3708
ae181bc4
MC
3709 if (bp->flash_info->buffered == 0) {
3710 flash_buffer = kmalloc(264, GFP_KERNEL);
3711 if (flash_buffer == NULL) {
3712 rc = -ENOMEM;
3713 goto nvram_write_end;
3714 }
3715 }
3716
b6016b76
MC
3717 written = 0;
3718 while ((written < len32) && (rc == 0)) {
3719 u32 page_start, page_end, data_start, data_end;
3720 u32 addr, cmd_flags;
3721 int i;
b6016b76
MC
3722
3723 /* Find the page_start addr */
3724 page_start = offset32 + written;
3725 page_start -= (page_start % bp->flash_info->page_size);
3726 /* Find the page_end addr */
3727 page_end = page_start + bp->flash_info->page_size;
3728 /* Find the data_start addr */
3729 data_start = (written == 0) ? offset32 : page_start;
3730 /* Find the data_end addr */
6aa20a22 3731 data_end = (page_end > offset32 + len32) ?
b6016b76
MC
3732 (offset32 + len32) : page_end;
3733
3734 /* Request access to the flash interface. */
3735 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3736 goto nvram_write_end;
3737
3738 /* Enable access to flash interface */
3739 bnx2_enable_nvram_access(bp);
3740
3741 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3742 if (bp->flash_info->buffered == 0) {
3743 int j;
3744
3745 /* Read the whole page into the buffer
3746 * (non-buffer flash only) */
3747 for (j = 0; j < bp->flash_info->page_size; j += 4) {
3748 if (j == (bp->flash_info->page_size - 4)) {
3749 cmd_flags |= BNX2_NVM_COMMAND_LAST;
3750 }
3751 rc = bnx2_nvram_read_dword(bp,
6aa20a22
JG
3752 page_start + j,
3753 &flash_buffer[j],
b6016b76
MC
3754 cmd_flags);
3755
3756 if (rc)
3757 goto nvram_write_end;
3758
3759 cmd_flags = 0;
3760 }
3761 }
3762
3763 /* Enable writes to flash interface (unlock write-protect) */
3764 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
3765 goto nvram_write_end;
3766
b6016b76
MC
3767 /* Loop to write back the buffer data from page_start to
3768 * data_start */
3769 i = 0;
3770 if (bp->flash_info->buffered == 0) {
c873879c
MC
3771 /* Erase the page */
3772 if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
3773 goto nvram_write_end;
3774
3775 /* Re-enable the write again for the actual write */
3776 bnx2_enable_nvram_write(bp);
3777
b6016b76
MC
3778 for (addr = page_start; addr < data_start;
3779 addr += 4, i += 4) {
6aa20a22 3780
b6016b76
MC
3781 rc = bnx2_nvram_write_dword(bp, addr,
3782 &flash_buffer[i], cmd_flags);
3783
3784 if (rc != 0)
3785 goto nvram_write_end;
3786
3787 cmd_flags = 0;
3788 }
3789 }
3790
3791 /* Loop to write the new data from data_start to data_end */
bae25761 3792 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
b6016b76
MC
3793 if ((addr == page_end - 4) ||
3794 ((bp->flash_info->buffered) &&
3795 (addr == data_end - 4))) {
3796
3797 cmd_flags |= BNX2_NVM_COMMAND_LAST;
3798 }
3799 rc = bnx2_nvram_write_dword(bp, addr, buf,
3800 cmd_flags);
3801
3802 if (rc != 0)
3803 goto nvram_write_end;
3804
3805 cmd_flags = 0;
3806 buf += 4;
3807 }
3808
3809 /* Loop to write back the buffer data from data_end
3810 * to page_end */
3811 if (bp->flash_info->buffered == 0) {
3812 for (addr = data_end; addr < page_end;
3813 addr += 4, i += 4) {
6aa20a22 3814
b6016b76
MC
3815 if (addr == page_end-4) {
3816 cmd_flags = BNX2_NVM_COMMAND_LAST;
3817 }
3818 rc = bnx2_nvram_write_dword(bp, addr,
3819 &flash_buffer[i], cmd_flags);
3820
3821 if (rc != 0)
3822 goto nvram_write_end;
3823
3824 cmd_flags = 0;
3825 }
3826 }
3827
3828 /* Disable writes to flash interface (lock write-protect) */
3829 bnx2_disable_nvram_write(bp);
3830
3831 /* Disable access to flash interface */
3832 bnx2_disable_nvram_access(bp);
3833 bnx2_release_nvram_lock(bp);
3834
3835 /* Increment written */
3836 written += data_end - data_start;
3837 }
3838
3839nvram_write_end:
e6be763f
MC
3840 kfree(flash_buffer);
3841 kfree(align_buf);
b6016b76
MC
3842 return rc;
3843}
3844
0d8a6571
MC
3845static void
3846bnx2_init_remote_phy(struct bnx2 *bp)
3847{
3848 u32 val;
3849
3850 bp->phy_flags &= ~REMOTE_PHY_CAP_FLAG;
3851 if (!(bp->phy_flags & PHY_SERDES_FLAG))
3852 return;
3853
3854 val = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_CAP_MB);
3855 if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE)
3856 return;
3857
3858 if (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE) {
3859 if (netif_running(bp->dev)) {
3860 val = BNX2_DRV_ACK_CAP_SIGNATURE |
3861 BNX2_FW_CAP_REMOTE_PHY_CAPABLE;
3862 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_ACK_CAP_MB,
3863 val);
3864 }
3865 bp->phy_flags |= REMOTE_PHY_CAP_FLAG;
3866
3867 val = REG_RD_IND(bp, bp->shmem_base + BNX2_LINK_STATUS);
3868 if (val & BNX2_LINK_STATUS_SERDES_LINK)
3869 bp->phy_port = PORT_FIBRE;
3870 else
3871 bp->phy_port = PORT_TP;
3872 }
3873}
3874
b6016b76
MC
3875static int
3876bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
3877{
3878 u32 val;
3879 int i, rc = 0;
3880
3881 /* Wait for the current PCI transaction to complete before
3882 * issuing a reset. */
3883 REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
3884 BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
3885 BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
3886 BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
3887 BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
3888 val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
3889 udelay(5);
3890
b090ae2b
MC
3891 /* Wait for the firmware to tell us it is ok to issue a reset. */
3892 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1);
3893
b6016b76
MC
3894 /* Deposit a driver reset signature so the firmware knows that
3895 * this is a soft reset. */
e3648b3d 3896 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_RESET_SIGNATURE,
b6016b76
MC
3897 BNX2_DRV_RESET_SIGNATURE_MAGIC);
3898
b6016b76
MC
3899 /* Do a dummy read to force the chip to complete all current transaction
3900 * before we issue a reset. */
3901 val = REG_RD(bp, BNX2_MISC_ID);
3902
234754d5
MC
3903 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3904 REG_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
3905 REG_RD(bp, BNX2_MISC_COMMAND);
3906 udelay(5);
b6016b76 3907
234754d5
MC
3908 val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3909 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
b6016b76 3910
234754d5 3911 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, val);
b6016b76 3912
234754d5
MC
3913 } else {
3914 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3915 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3916 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3917
3918 /* Chip reset. */
3919 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
3920
3921 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3922 (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
3923 current->state = TASK_UNINTERRUPTIBLE;
3924 schedule_timeout(HZ / 50);
b6016b76 3925 }
b6016b76 3926
234754d5
MC
3927 /* Reset takes approximate 30 usec */
3928 for (i = 0; i < 10; i++) {
3929 val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
3930 if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3931 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
3932 break;
3933 udelay(10);
3934 }
3935
3936 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3937 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
3938 printk(KERN_ERR PFX "Chip reset did not complete\n");
3939 return -EBUSY;
3940 }
b6016b76
MC
3941 }
3942
3943 /* Make sure byte swapping is properly configured. */
3944 val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
3945 if (val != 0x01020304) {
3946 printk(KERN_ERR PFX "Chip not in correct endian mode\n");
3947 return -ENODEV;
3948 }
3949
b6016b76 3950 /* Wait for the firmware to finish its initialization. */
b090ae2b
MC
3951 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 0);
3952 if (rc)
3953 return rc;
b6016b76 3954
0d8a6571
MC
3955 spin_lock_bh(&bp->phy_lock);
3956 bnx2_init_remote_phy(bp);
3957 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
3958 bnx2_set_default_remote_link(bp);
3959 spin_unlock_bh(&bp->phy_lock);
3960
b6016b76
MC
3961 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
3962 /* Adjust the voltage regular to two steps lower. The default
3963 * of this register is 0x0000000e. */
3964 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
3965
3966 /* Remove bad rbuf memory from the free pool. */
3967 rc = bnx2_alloc_bad_rbuf(bp);
3968 }
3969
3970 return rc;
3971}
3972
3973static int
3974bnx2_init_chip(struct bnx2 *bp)
3975{
3976 u32 val;
b090ae2b 3977 int rc;
b6016b76
MC
3978
3979 /* Make sure the interrupt is not active. */
3980 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3981
3982 val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
3983 BNX2_DMA_CONFIG_DATA_WORD_SWAP |
3984#ifdef __BIG_ENDIAN
6aa20a22 3985 BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
b6016b76 3986#endif
6aa20a22 3987 BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
b6016b76
MC
3988 DMA_READ_CHANS << 12 |
3989 DMA_WRITE_CHANS << 16;
3990
3991 val |= (0x2 << 20) | (1 << 11);
3992
dda1e390 3993 if ((bp->flags & PCIX_FLAG) && (bp->bus_speed_mhz == 133))
b6016b76
MC
3994 val |= (1 << 23);
3995
3996 if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
3997 (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & PCIX_FLAG))
3998 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
3999
4000 REG_WR(bp, BNX2_DMA_CONFIG, val);
4001
4002 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4003 val = REG_RD(bp, BNX2_TDMA_CONFIG);
4004 val |= BNX2_TDMA_CONFIG_ONE_DMA;
4005 REG_WR(bp, BNX2_TDMA_CONFIG, val);
4006 }
4007
4008 if (bp->flags & PCIX_FLAG) {
4009 u16 val16;
4010
4011 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4012 &val16);
4013 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4014 val16 & ~PCI_X_CMD_ERO);
4015 }
4016
4017 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
4018 BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
4019 BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
4020 BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
4021
4022 /* Initialize context mapping and zero out the quick contexts. The
4023 * context block must have already been enabled. */
641bdcd5
MC
4024 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4025 rc = bnx2_init_5709_context(bp);
4026 if (rc)
4027 return rc;
4028 } else
59b47d8a 4029 bnx2_init_context(bp);
b6016b76 4030
fba9fe91
MC
4031 if ((rc = bnx2_init_cpus(bp)) != 0)
4032 return rc;
4033
b6016b76
MC
4034 bnx2_init_nvram(bp);
4035
4036 bnx2_set_mac_addr(bp);
4037
4038 val = REG_RD(bp, BNX2_MQ_CONFIG);
4039 val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4040 val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
68c9f75a
MC
4041 if (CHIP_ID(bp) == CHIP_ID_5709_A0 || CHIP_ID(bp) == CHIP_ID_5709_A1)
4042 val |= BNX2_MQ_CONFIG_HALT_DIS;
4043
b6016b76
MC
4044 REG_WR(bp, BNX2_MQ_CONFIG, val);
4045
4046 val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
4047 REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
4048 REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
4049
4050 val = (BCM_PAGE_BITS - 8) << 24;
4051 REG_WR(bp, BNX2_RV2P_CONFIG, val);
4052
4053 /* Configure page size. */
4054 val = REG_RD(bp, BNX2_TBDR_CONFIG);
4055 val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
4056 val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
4057 REG_WR(bp, BNX2_TBDR_CONFIG, val);
4058
4059 val = bp->mac_addr[0] +
4060 (bp->mac_addr[1] << 8) +
4061 (bp->mac_addr[2] << 16) +
4062 bp->mac_addr[3] +
4063 (bp->mac_addr[4] << 8) +
4064 (bp->mac_addr[5] << 16);
4065 REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
4066
4067 /* Program the MTU. Also include 4 bytes for CRC32. */
4068 val = bp->dev->mtu + ETH_HLEN + 4;
4069 if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
4070 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
4071 REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
4072
4073 bp->last_status_idx = 0;
4074 bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
4075
4076 /* Set up how to generate a link change interrupt. */
4077 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
4078
4079 REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
4080 (u64) bp->status_blk_mapping & 0xffffffff);
4081 REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
4082
4083 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
4084 (u64) bp->stats_blk_mapping & 0xffffffff);
4085 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
4086 (u64) bp->stats_blk_mapping >> 32);
4087
6aa20a22 4088 REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
b6016b76
MC
4089 (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
4090
4091 REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
4092 (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
4093
4094 REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
4095 (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
4096
4097 REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
4098
4099 REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
4100
4101 REG_WR(bp, BNX2_HC_COM_TICKS,
4102 (bp->com_ticks_int << 16) | bp->com_ticks);
4103
4104 REG_WR(bp, BNX2_HC_CMD_TICKS,
4105 (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
4106
02537b06
MC
4107 if (CHIP_NUM(bp) == CHIP_NUM_5708)
4108 REG_WR(bp, BNX2_HC_STATS_TICKS, 0);
4109 else
4110 REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks & 0xffff00);
b6016b76
MC
4111 REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */
4112
4113 if (CHIP_ID(bp) == CHIP_ID_5706_A1)
8e6a72c4 4114 val = BNX2_HC_CONFIG_COLLECT_STATS;
b6016b76 4115 else {
8e6a72c4
MC
4116 val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE |
4117 BNX2_HC_CONFIG_COLLECT_STATS;
b6016b76
MC
4118 }
4119
8e6a72c4
MC
4120 if (bp->flags & ONE_SHOT_MSI_FLAG)
4121 val |= BNX2_HC_CONFIG_ONE_SHOT;
4122
4123 REG_WR(bp, BNX2_HC_CONFIG, val);
4124
b6016b76
MC
4125 /* Clear internal stats counters. */
4126 REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
4127
da3e4fbe 4128 REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
b6016b76 4129
e29054f9
MC
4130 if (REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_FEATURE) &
4131 BNX2_PORT_FEATURE_ASF_ENABLED)
4132 bp->flags |= ASF_ENABLE_FLAG;
4133
b6016b76
MC
4134 /* Initialize the receive filter. */
4135 bnx2_set_rx_mode(bp->dev);
4136
0aa38df7
MC
4137 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4138 val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4139 val |= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
4140 REG_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
4141 }
b090ae2b
MC
4142 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
4143 0);
b6016b76 4144
df149d70 4145 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT);
b6016b76
MC
4146 REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
4147
4148 udelay(20);
4149
bf5295bb
MC
4150 bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
4151
b090ae2b 4152 return rc;
b6016b76
MC
4153}
4154
59b47d8a
MC
4155static void
4156bnx2_init_tx_context(struct bnx2 *bp, u32 cid)
4157{
4158 u32 val, offset0, offset1, offset2, offset3;
4159
4160 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4161 offset0 = BNX2_L2CTX_TYPE_XI;
4162 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
4163 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
4164 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
4165 } else {
4166 offset0 = BNX2_L2CTX_TYPE;
4167 offset1 = BNX2_L2CTX_CMD_TYPE;
4168 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
4169 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
4170 }
4171 val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
4172 CTX_WR(bp, GET_CID_ADDR(cid), offset0, val);
4173
4174 val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
4175 CTX_WR(bp, GET_CID_ADDR(cid), offset1, val);
4176
4177 val = (u64) bp->tx_desc_mapping >> 32;
4178 CTX_WR(bp, GET_CID_ADDR(cid), offset2, val);
4179
4180 val = (u64) bp->tx_desc_mapping & 0xffffffff;
4181 CTX_WR(bp, GET_CID_ADDR(cid), offset3, val);
4182}
b6016b76
MC
4183
4184static void
4185bnx2_init_tx_ring(struct bnx2 *bp)
4186{
4187 struct tx_bd *txbd;
59b47d8a 4188 u32 cid;
b6016b76 4189
2f8af120
MC
4190 bp->tx_wake_thresh = bp->tx_ring_size / 2;
4191
b6016b76 4192 txbd = &bp->tx_desc_ring[MAX_TX_DESC_CNT];
6aa20a22 4193
b6016b76
MC
4194 txbd->tx_bd_haddr_hi = (u64) bp->tx_desc_mapping >> 32;
4195 txbd->tx_bd_haddr_lo = (u64) bp->tx_desc_mapping & 0xffffffff;
4196
4197 bp->tx_prod = 0;
4198 bp->tx_cons = 0;
f4e418f7 4199 bp->hw_tx_cons = 0;
b6016b76 4200 bp->tx_prod_bseq = 0;
6aa20a22 4201
59b47d8a
MC
4202 cid = TX_CID;
4203 bp->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
4204 bp->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
b6016b76 4205
59b47d8a 4206 bnx2_init_tx_context(bp, cid);
b6016b76
MC
4207}
4208
4209static void
4210bnx2_init_rx_ring(struct bnx2 *bp)
4211{
4212 struct rx_bd *rxbd;
4213 int i;
6aa20a22 4214 u16 prod, ring_prod;
b6016b76
MC
4215 u32 val;
4216
4217 /* 8 for CRC and VLAN */
4218 bp->rx_buf_use_size = bp->dev->mtu + ETH_HLEN + bp->rx_offset + 8;
59b47d8a
MC
4219 /* hw alignment */
4220 bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN;
b6016b76
MC
4221
4222 ring_prod = prod = bp->rx_prod = 0;
4223 bp->rx_cons = 0;
f4e418f7 4224 bp->hw_rx_cons = 0;
b6016b76 4225 bp->rx_prod_bseq = 0;
6aa20a22 4226
13daffa2
MC
4227 for (i = 0; i < bp->rx_max_ring; i++) {
4228 int j;
b6016b76 4229
13daffa2
MC
4230 rxbd = &bp->rx_desc_ring[i][0];
4231 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
4232 rxbd->rx_bd_len = bp->rx_buf_use_size;
4233 rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
4234 }
4235 if (i == (bp->rx_max_ring - 1))
4236 j = 0;
4237 else
4238 j = i + 1;
4239 rxbd->rx_bd_haddr_hi = (u64) bp->rx_desc_mapping[j] >> 32;
4240 rxbd->rx_bd_haddr_lo = (u64) bp->rx_desc_mapping[j] &
4241 0xffffffff;
4242 }
b6016b76
MC
4243
4244 val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
4245 val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
4246 val |= 0x02 << 8;
4247 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_CTX_TYPE, val);
4248
13daffa2 4249 val = (u64) bp->rx_desc_mapping[0] >> 32;
b6016b76
MC
4250 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_HI, val);
4251
13daffa2 4252 val = (u64) bp->rx_desc_mapping[0] & 0xffffffff;
b6016b76
MC
4253 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_LO, val);
4254
236b6394 4255 for (i = 0; i < bp->rx_ring_size; i++) {
b6016b76
MC
4256 if (bnx2_alloc_rx_skb(bp, ring_prod) < 0) {
4257 break;
4258 }
4259 prod = NEXT_RX_BD(prod);
4260 ring_prod = RX_RING_IDX(prod);
4261 }
4262 bp->rx_prod = prod;
4263
4264 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, prod);
4265
4266 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
4267}
4268
13daffa2
MC
4269static void
4270bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
4271{
4272 u32 num_rings, max;
4273
4274 bp->rx_ring_size = size;
4275 num_rings = 1;
4276 while (size > MAX_RX_DESC_CNT) {
4277 size -= MAX_RX_DESC_CNT;
4278 num_rings++;
4279 }
4280 /* round to next power of 2 */
4281 max = MAX_RX_RINGS;
4282 while ((max & num_rings) == 0)
4283 max >>= 1;
4284
4285 if (num_rings != max)
4286 max <<= 1;
4287
4288 bp->rx_max_ring = max;
4289 bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
4290}
4291
b6016b76
MC
4292static void
4293bnx2_free_tx_skbs(struct bnx2 *bp)
4294{
4295 int i;
4296
4297 if (bp->tx_buf_ring == NULL)
4298 return;
4299
4300 for (i = 0; i < TX_DESC_CNT; ) {
4301 struct sw_bd *tx_buf = &bp->tx_buf_ring[i];
4302 struct sk_buff *skb = tx_buf->skb;
4303 int j, last;
4304
4305 if (skb == NULL) {
4306 i++;
4307 continue;
4308 }
4309
4310 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
4311 skb_headlen(skb), PCI_DMA_TODEVICE);
4312
4313 tx_buf->skb = NULL;
4314
4315 last = skb_shinfo(skb)->nr_frags;
4316 for (j = 0; j < last; j++) {
4317 tx_buf = &bp->tx_buf_ring[i + j + 1];
4318 pci_unmap_page(bp->pdev,
4319 pci_unmap_addr(tx_buf, mapping),
4320 skb_shinfo(skb)->frags[j].size,
4321 PCI_DMA_TODEVICE);
4322 }
745720e5 4323 dev_kfree_skb(skb);
b6016b76
MC
4324 i += j + 1;
4325 }
4326
4327}
4328
4329static void
4330bnx2_free_rx_skbs(struct bnx2 *bp)
4331{
4332 int i;
4333
4334 if (bp->rx_buf_ring == NULL)
4335 return;
4336
13daffa2 4337 for (i = 0; i < bp->rx_max_ring_idx; i++) {
b6016b76
MC
4338 struct sw_bd *rx_buf = &bp->rx_buf_ring[i];
4339 struct sk_buff *skb = rx_buf->skb;
4340
05d0f1cf 4341 if (skb == NULL)
b6016b76
MC
4342 continue;
4343
4344 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
4345 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
4346
4347 rx_buf->skb = NULL;
4348
745720e5 4349 dev_kfree_skb(skb);
b6016b76
MC
4350 }
4351}
4352
4353static void
4354bnx2_free_skbs(struct bnx2 *bp)
4355{
4356 bnx2_free_tx_skbs(bp);
4357 bnx2_free_rx_skbs(bp);
4358}
4359
4360static int
4361bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
4362{
4363 int rc;
4364
4365 rc = bnx2_reset_chip(bp, reset_code);
4366 bnx2_free_skbs(bp);
4367 if (rc)
4368 return rc;
4369
fba9fe91
MC
4370 if ((rc = bnx2_init_chip(bp)) != 0)
4371 return rc;
4372
b6016b76
MC
4373 bnx2_init_tx_ring(bp);
4374 bnx2_init_rx_ring(bp);
4375 return 0;
4376}
4377
4378static int
4379bnx2_init_nic(struct bnx2 *bp)
4380{
4381 int rc;
4382
4383 if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
4384 return rc;
4385
80be4434 4386 spin_lock_bh(&bp->phy_lock);
b6016b76
MC
4387 bnx2_init_phy(bp);
4388 bnx2_set_link(bp);
0d8a6571 4389 spin_unlock_bh(&bp->phy_lock);
b6016b76
MC
4390 return 0;
4391}
4392
4393static int
4394bnx2_test_registers(struct bnx2 *bp)
4395{
4396 int ret;
5bae30c9 4397 int i, is_5709;
f71e1309 4398 static const struct {
b6016b76
MC
4399 u16 offset;
4400 u16 flags;
5bae30c9 4401#define BNX2_FL_NOT_5709 1
b6016b76
MC
4402 u32 rw_mask;
4403 u32 ro_mask;
4404 } reg_tbl[] = {
4405 { 0x006c, 0, 0x00000000, 0x0000003f },
4406 { 0x0090, 0, 0xffffffff, 0x00000000 },
4407 { 0x0094, 0, 0x00000000, 0x00000000 },
4408
5bae30c9
MC
4409 { 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
4410 { 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4411 { 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4412 { 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
4413 { 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
4414 { 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
4415 { 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
4416 { 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4417 { 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4418
4419 { 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4420 { 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4421 { 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4422 { 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4423 { 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4424 { 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4425
4426 { 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
4427 { 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
4428 { 0x0c08, BNX2_FL_NOT_5709, 0x0f0ff073, 0x00000000 },
b6016b76
MC
4429
4430 { 0x1000, 0, 0x00000000, 0x00000001 },
4431 { 0x1004, 0, 0x00000000, 0x000f0001 },
b6016b76
MC
4432
4433 { 0x1408, 0, 0x01c00800, 0x00000000 },
4434 { 0x149c, 0, 0x8000ffff, 0x00000000 },
4435 { 0x14a8, 0, 0x00000000, 0x000001ff },
5b0c76ad 4436 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
b6016b76
MC
4437 { 0x14b0, 0, 0x00000002, 0x00000001 },
4438 { 0x14b8, 0, 0x00000000, 0x00000000 },
4439 { 0x14c0, 0, 0x00000000, 0x00000009 },
4440 { 0x14c4, 0, 0x00003fff, 0x00000000 },
4441 { 0x14cc, 0, 0x00000000, 0x00000001 },
4442 { 0x14d0, 0, 0xffffffff, 0x00000000 },
b6016b76
MC
4443
4444 { 0x1800, 0, 0x00000000, 0x00000001 },
4445 { 0x1804, 0, 0x00000000, 0x00000003 },
b6016b76
MC
4446
4447 { 0x2800, 0, 0x00000000, 0x00000001 },
4448 { 0x2804, 0, 0x00000000, 0x00003f01 },
4449 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
4450 { 0x2810, 0, 0xffff0000, 0x00000000 },
4451 { 0x2814, 0, 0xffff0000, 0x00000000 },
4452 { 0x2818, 0, 0xffff0000, 0x00000000 },
4453 { 0x281c, 0, 0xffff0000, 0x00000000 },
4454 { 0x2834, 0, 0xffffffff, 0x00000000 },
4455 { 0x2840, 0, 0x00000000, 0xffffffff },
4456 { 0x2844, 0, 0x00000000, 0xffffffff },
4457 { 0x2848, 0, 0xffffffff, 0x00000000 },
4458 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
4459
4460 { 0x2c00, 0, 0x00000000, 0x00000011 },
4461 { 0x2c04, 0, 0x00000000, 0x00030007 },
4462
b6016b76
MC
4463 { 0x3c00, 0, 0x00000000, 0x00000001 },
4464 { 0x3c04, 0, 0x00000000, 0x00070000 },
4465 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
4466 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
4467 { 0x3c10, 0, 0xffffffff, 0x00000000 },
4468 { 0x3c14, 0, 0x00000000, 0xffffffff },
4469 { 0x3c18, 0, 0x00000000, 0xffffffff },
4470 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
4471 { 0x3c20, 0, 0xffffff00, 0x00000000 },
b6016b76
MC
4472
4473 { 0x5004, 0, 0x00000000, 0x0000007f },
4474 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
b6016b76 4475
b6016b76
MC
4476 { 0x5c00, 0, 0x00000000, 0x00000001 },
4477 { 0x5c04, 0, 0x00000000, 0x0003000f },
4478 { 0x5c08, 0, 0x00000003, 0x00000000 },
4479 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
4480 { 0x5c10, 0, 0x00000000, 0xffffffff },
4481 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
4482 { 0x5c84, 0, 0x00000000, 0x0000f333 },
4483 { 0x5c88, 0, 0x00000000, 0x00077373 },
4484 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
4485
4486 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
4487 { 0x680c, 0, 0xffffffff, 0x00000000 },
4488 { 0x6810, 0, 0xffffffff, 0x00000000 },
4489 { 0x6814, 0, 0xffffffff, 0x00000000 },
4490 { 0x6818, 0, 0xffffffff, 0x00000000 },
4491 { 0x681c, 0, 0xffffffff, 0x00000000 },
4492 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
4493 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
4494 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
4495 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
4496 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
4497 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
4498 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
4499 { 0x683c, 0, 0x0000ffff, 0x00000000 },
4500 { 0x6840, 0, 0x00000ff0, 0x00000000 },
4501 { 0x6844, 0, 0x00ffff00, 0x00000000 },
4502 { 0x684c, 0, 0xffffffff, 0x00000000 },
4503 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
4504 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
4505 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
4506 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
4507 { 0x6908, 0, 0x00000000, 0x0001ff0f },
4508 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
4509
4510 { 0xffff, 0, 0x00000000, 0x00000000 },
4511 };
4512
4513 ret = 0;
5bae30c9
MC
4514 is_5709 = 0;
4515 if (CHIP_NUM(bp) == CHIP_NUM_5709)
4516 is_5709 = 1;
4517
b6016b76
MC
4518 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
4519 u32 offset, rw_mask, ro_mask, save_val, val;
5bae30c9
MC
4520 u16 flags = reg_tbl[i].flags;
4521
4522 if (is_5709 && (flags & BNX2_FL_NOT_5709))
4523 continue;
b6016b76
MC
4524
4525 offset = (u32) reg_tbl[i].offset;
4526 rw_mask = reg_tbl[i].rw_mask;
4527 ro_mask = reg_tbl[i].ro_mask;
4528
14ab9b86 4529 save_val = readl(bp->regview + offset);
b6016b76 4530
14ab9b86 4531 writel(0, bp->regview + offset);
b6016b76 4532
14ab9b86 4533 val = readl(bp->regview + offset);
b6016b76
MC
4534 if ((val & rw_mask) != 0) {
4535 goto reg_test_err;
4536 }
4537
4538 if ((val & ro_mask) != (save_val & ro_mask)) {
4539 goto reg_test_err;
4540 }
4541
14ab9b86 4542 writel(0xffffffff, bp->regview + offset);
b6016b76 4543
14ab9b86 4544 val = readl(bp->regview + offset);
b6016b76
MC
4545 if ((val & rw_mask) != rw_mask) {
4546 goto reg_test_err;
4547 }
4548
4549 if ((val & ro_mask) != (save_val & ro_mask)) {
4550 goto reg_test_err;
4551 }
4552
14ab9b86 4553 writel(save_val, bp->regview + offset);
b6016b76
MC
4554 continue;
4555
4556reg_test_err:
14ab9b86 4557 writel(save_val, bp->regview + offset);
b6016b76
MC
4558 ret = -ENODEV;
4559 break;
4560 }
4561 return ret;
4562}
4563
4564static int
4565bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
4566{
f71e1309 4567 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
b6016b76
MC
4568 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
4569 int i;
4570
4571 for (i = 0; i < sizeof(test_pattern) / 4; i++) {
4572 u32 offset;
4573
4574 for (offset = 0; offset < size; offset += 4) {
4575
4576 REG_WR_IND(bp, start + offset, test_pattern[i]);
4577
4578 if (REG_RD_IND(bp, start + offset) !=
4579 test_pattern[i]) {
4580 return -ENODEV;
4581 }
4582 }
4583 }
4584 return 0;
4585}
4586
4587static int
4588bnx2_test_memory(struct bnx2 *bp)
4589{
4590 int ret = 0;
4591 int i;
5bae30c9 4592 static struct mem_entry {
b6016b76
MC
4593 u32 offset;
4594 u32 len;
5bae30c9 4595 } mem_tbl_5706[] = {
b6016b76 4596 { 0x60000, 0x4000 },
5b0c76ad 4597 { 0xa0000, 0x3000 },
b6016b76
MC
4598 { 0xe0000, 0x4000 },
4599 { 0x120000, 0x4000 },
4600 { 0x1a0000, 0x4000 },
4601 { 0x160000, 0x4000 },
4602 { 0xffffffff, 0 },
5bae30c9
MC
4603 },
4604 mem_tbl_5709[] = {
4605 { 0x60000, 0x4000 },
4606 { 0xa0000, 0x3000 },
4607 { 0xe0000, 0x4000 },
4608 { 0x120000, 0x4000 },
4609 { 0x1a0000, 0x4000 },
4610 { 0xffffffff, 0 },
b6016b76 4611 };
5bae30c9
MC
4612 struct mem_entry *mem_tbl;
4613
4614 if (CHIP_NUM(bp) == CHIP_NUM_5709)
4615 mem_tbl = mem_tbl_5709;
4616 else
4617 mem_tbl = mem_tbl_5706;
b6016b76
MC
4618
4619 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
4620 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
4621 mem_tbl[i].len)) != 0) {
4622 return ret;
4623 }
4624 }
6aa20a22 4625
b6016b76
MC
4626 return ret;
4627}
4628
bc5a0690
MC
4629#define BNX2_MAC_LOOPBACK 0
4630#define BNX2_PHY_LOOPBACK 1
4631
b6016b76 4632static int
bc5a0690 4633bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
b6016b76
MC
4634{
4635 unsigned int pkt_size, num_pkts, i;
4636 struct sk_buff *skb, *rx_skb;
4637 unsigned char *packet;
bc5a0690 4638 u16 rx_start_idx, rx_idx;
b6016b76
MC
4639 dma_addr_t map;
4640 struct tx_bd *txbd;
4641 struct sw_bd *rx_buf;
4642 struct l2_fhdr *rx_hdr;
4643 int ret = -ENODEV;
4644
bc5a0690
MC
4645 if (loopback_mode == BNX2_MAC_LOOPBACK) {
4646 bp->loopback = MAC_LOOPBACK;
4647 bnx2_set_mac_loopback(bp);
4648 }
4649 else if (loopback_mode == BNX2_PHY_LOOPBACK) {
80be4434 4650 bp->loopback = PHY_LOOPBACK;
bc5a0690
MC
4651 bnx2_set_phy_loopback(bp);
4652 }
4653 else
4654 return -EINVAL;
b6016b76
MC
4655
4656 pkt_size = 1514;
932f3772 4657 skb = netdev_alloc_skb(bp->dev, pkt_size);
b6cbc3b6
JL
4658 if (!skb)
4659 return -ENOMEM;
b6016b76 4660 packet = skb_put(skb, pkt_size);
6634292b 4661 memcpy(packet, bp->dev->dev_addr, 6);
b6016b76
MC
4662 memset(packet + 6, 0x0, 8);
4663 for (i = 14; i < pkt_size; i++)
4664 packet[i] = (unsigned char) (i & 0xff);
4665
4666 map = pci_map_single(bp->pdev, skb->data, pkt_size,
4667 PCI_DMA_TODEVICE);
4668
bf5295bb
MC
4669 REG_WR(bp, BNX2_HC_COMMAND,
4670 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4671
b6016b76
MC
4672 REG_RD(bp, BNX2_HC_COMMAND);
4673
4674 udelay(5);
4675 rx_start_idx = bp->status_blk->status_rx_quick_consumer_index0;
4676
b6016b76
MC
4677 num_pkts = 0;
4678
bc5a0690 4679 txbd = &bp->tx_desc_ring[TX_RING_IDX(bp->tx_prod)];
b6016b76
MC
4680
4681 txbd->tx_bd_haddr_hi = (u64) map >> 32;
4682 txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
4683 txbd->tx_bd_mss_nbytes = pkt_size;
4684 txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
4685
4686 num_pkts++;
bc5a0690
MC
4687 bp->tx_prod = NEXT_TX_BD(bp->tx_prod);
4688 bp->tx_prod_bseq += pkt_size;
b6016b76 4689
234754d5
MC
4690 REG_WR16(bp, bp->tx_bidx_addr, bp->tx_prod);
4691 REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
b6016b76
MC
4692
4693 udelay(100);
4694
bf5295bb
MC
4695 REG_WR(bp, BNX2_HC_COMMAND,
4696 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4697
b6016b76
MC
4698 REG_RD(bp, BNX2_HC_COMMAND);
4699
4700 udelay(5);
4701
4702 pci_unmap_single(bp->pdev, map, pkt_size, PCI_DMA_TODEVICE);
745720e5 4703 dev_kfree_skb(skb);
b6016b76 4704
bc5a0690 4705 if (bp->status_blk->status_tx_quick_consumer_index0 != bp->tx_prod) {
b6016b76
MC
4706 goto loopback_test_done;
4707 }
4708
4709 rx_idx = bp->status_blk->status_rx_quick_consumer_index0;
4710 if (rx_idx != rx_start_idx + num_pkts) {
4711 goto loopback_test_done;
4712 }
4713
4714 rx_buf = &bp->rx_buf_ring[rx_start_idx];
4715 rx_skb = rx_buf->skb;
4716
4717 rx_hdr = (struct l2_fhdr *) rx_skb->data;
4718 skb_reserve(rx_skb, bp->rx_offset);
4719
4720 pci_dma_sync_single_for_cpu(bp->pdev,
4721 pci_unmap_addr(rx_buf, mapping),
4722 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
4723
ade2bfe7 4724 if (rx_hdr->l2_fhdr_status &
b6016b76
MC
4725 (L2_FHDR_ERRORS_BAD_CRC |
4726 L2_FHDR_ERRORS_PHY_DECODE |
4727 L2_FHDR_ERRORS_ALIGNMENT |
4728 L2_FHDR_ERRORS_TOO_SHORT |
4729 L2_FHDR_ERRORS_GIANT_FRAME)) {
4730
4731 goto loopback_test_done;
4732 }
4733
4734 if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
4735 goto loopback_test_done;
4736 }
4737
4738 for (i = 14; i < pkt_size; i++) {
4739 if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
4740 goto loopback_test_done;
4741 }
4742 }
4743
4744 ret = 0;
4745
4746loopback_test_done:
4747 bp->loopback = 0;
4748 return ret;
4749}
4750
bc5a0690
MC
4751#define BNX2_MAC_LOOPBACK_FAILED 1
4752#define BNX2_PHY_LOOPBACK_FAILED 2
4753#define BNX2_LOOPBACK_FAILED (BNX2_MAC_LOOPBACK_FAILED | \
4754 BNX2_PHY_LOOPBACK_FAILED)
4755
4756static int
4757bnx2_test_loopback(struct bnx2 *bp)
4758{
4759 int rc = 0;
4760
4761 if (!netif_running(bp->dev))
4762 return BNX2_LOOPBACK_FAILED;
4763
4764 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
4765 spin_lock_bh(&bp->phy_lock);
4766 bnx2_init_phy(bp);
4767 spin_unlock_bh(&bp->phy_lock);
4768 if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
4769 rc |= BNX2_MAC_LOOPBACK_FAILED;
4770 if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
4771 rc |= BNX2_PHY_LOOPBACK_FAILED;
4772 return rc;
4773}
4774
b6016b76
MC
4775#define NVRAM_SIZE 0x200
4776#define CRC32_RESIDUAL 0xdebb20e3
4777
4778static int
4779bnx2_test_nvram(struct bnx2 *bp)
4780{
4781 u32 buf[NVRAM_SIZE / 4];
4782 u8 *data = (u8 *) buf;
4783 int rc = 0;
4784 u32 magic, csum;
4785
4786 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
4787 goto test_nvram_done;
4788
4789 magic = be32_to_cpu(buf[0]);
4790 if (magic != 0x669955aa) {
4791 rc = -ENODEV;
4792 goto test_nvram_done;
4793 }
4794
4795 if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
4796 goto test_nvram_done;
4797
4798 csum = ether_crc_le(0x100, data);
4799 if (csum != CRC32_RESIDUAL) {
4800 rc = -ENODEV;
4801 goto test_nvram_done;
4802 }
4803
4804 csum = ether_crc_le(0x100, data + 0x100);
4805 if (csum != CRC32_RESIDUAL) {
4806 rc = -ENODEV;
4807 }
4808
4809test_nvram_done:
4810 return rc;
4811}
4812
4813static int
4814bnx2_test_link(struct bnx2 *bp)
4815{
4816 u32 bmsr;
4817
c770a65c 4818 spin_lock_bh(&bp->phy_lock);
27a005b8
MC
4819 bnx2_enable_bmsr1(bp);
4820 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
4821 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
4822 bnx2_disable_bmsr1(bp);
c770a65c 4823 spin_unlock_bh(&bp->phy_lock);
6aa20a22 4824
b6016b76
MC
4825 if (bmsr & BMSR_LSTATUS) {
4826 return 0;
4827 }
4828 return -ENODEV;
4829}
4830
4831static int
4832bnx2_test_intr(struct bnx2 *bp)
4833{
4834 int i;
b6016b76
MC
4835 u16 status_idx;
4836
4837 if (!netif_running(bp->dev))
4838 return -ENODEV;
4839
4840 status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
4841
4842 /* This register is not touched during run-time. */
bf5295bb 4843 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
b6016b76
MC
4844 REG_RD(bp, BNX2_HC_COMMAND);
4845
4846 for (i = 0; i < 10; i++) {
4847 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
4848 status_idx) {
4849
4850 break;
4851 }
4852
4853 msleep_interruptible(10);
4854 }
4855 if (i < 10)
4856 return 0;
4857
4858 return -ENODEV;
4859}
4860
4861static void
48b01e2d 4862bnx2_5706_serdes_timer(struct bnx2 *bp)
b6016b76 4863{
48b01e2d
MC
4864 spin_lock(&bp->phy_lock);
4865 if (bp->serdes_an_pending)
4866 bp->serdes_an_pending--;
4867 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
4868 u32 bmcr;
b6016b76 4869
48b01e2d 4870 bp->current_interval = bp->timer_interval;
cd339a0e 4871
ca58c3af 4872 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
b6016b76 4873
48b01e2d
MC
4874 if (bmcr & BMCR_ANENABLE) {
4875 u32 phy1, phy2;
b6016b76 4876
48b01e2d
MC
4877 bnx2_write_phy(bp, 0x1c, 0x7c00);
4878 bnx2_read_phy(bp, 0x1c, &phy1);
cea94db9 4879
48b01e2d
MC
4880 bnx2_write_phy(bp, 0x17, 0x0f01);
4881 bnx2_read_phy(bp, 0x15, &phy2);
4882 bnx2_write_phy(bp, 0x17, 0x0f01);
4883 bnx2_read_phy(bp, 0x15, &phy2);
b6016b76 4884
48b01e2d
MC
4885 if ((phy1 & 0x10) && /* SIGNAL DETECT */
4886 !(phy2 & 0x20)) { /* no CONFIG */
4887
4888 bmcr &= ~BMCR_ANENABLE;
4889 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
ca58c3af 4890 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
48b01e2d
MC
4891 bp->phy_flags |= PHY_PARALLEL_DETECT_FLAG;
4892 }
b6016b76 4893 }
48b01e2d
MC
4894 }
4895 else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
4896 (bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)) {
4897 u32 phy2;
b6016b76 4898
48b01e2d
MC
4899 bnx2_write_phy(bp, 0x17, 0x0f01);
4900 bnx2_read_phy(bp, 0x15, &phy2);
4901 if (phy2 & 0x20) {
4902 u32 bmcr;
cd339a0e 4903
ca58c3af 4904 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
48b01e2d 4905 bmcr |= BMCR_ANENABLE;
ca58c3af 4906 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
b6016b76 4907
48b01e2d
MC
4908 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
4909 }
4910 } else
4911 bp->current_interval = bp->timer_interval;
b6016b76 4912
48b01e2d
MC
4913 spin_unlock(&bp->phy_lock);
4914}
b6016b76 4915
f8dd064e
MC
4916static void
4917bnx2_5708_serdes_timer(struct bnx2 *bp)
4918{
0d8a6571
MC
4919 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
4920 return;
4921
f8dd064e
MC
4922 if ((bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) == 0) {
4923 bp->serdes_an_pending = 0;
4924 return;
4925 }
b6016b76 4926
f8dd064e
MC
4927 spin_lock(&bp->phy_lock);
4928 if (bp->serdes_an_pending)
4929 bp->serdes_an_pending--;
4930 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
4931 u32 bmcr;
b6016b76 4932
ca58c3af 4933 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
f8dd064e 4934 if (bmcr & BMCR_ANENABLE) {
605a9e20 4935 bnx2_enable_forced_2g5(bp);
f8dd064e
MC
4936 bp->current_interval = SERDES_FORCED_TIMEOUT;
4937 } else {
605a9e20 4938 bnx2_disable_forced_2g5(bp);
f8dd064e
MC
4939 bp->serdes_an_pending = 2;
4940 bp->current_interval = bp->timer_interval;
b6016b76 4941 }
b6016b76 4942
f8dd064e
MC
4943 } else
4944 bp->current_interval = bp->timer_interval;
b6016b76 4945
f8dd064e
MC
4946 spin_unlock(&bp->phy_lock);
4947}
4948
48b01e2d
MC
4949static void
4950bnx2_timer(unsigned long data)
4951{
4952 struct bnx2 *bp = (struct bnx2 *) data;
b6016b76 4953
48b01e2d
MC
4954 if (!netif_running(bp->dev))
4955 return;
b6016b76 4956
48b01e2d
MC
4957 if (atomic_read(&bp->intr_sem) != 0)
4958 goto bnx2_restart_timer;
b6016b76 4959
df149d70 4960 bnx2_send_heart_beat(bp);
b6016b76 4961
48b01e2d 4962 bp->stats_blk->stat_FwRxDrop = REG_RD_IND(bp, BNX2_FW_RX_DROP_COUNT);
b6016b76 4963
02537b06
MC
4964 /* workaround occasional corrupted counters */
4965 if (CHIP_NUM(bp) == CHIP_NUM_5708 && bp->stats_ticks)
4966 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd |
4967 BNX2_HC_COMMAND_STATS_NOW);
4968
f8dd064e
MC
4969 if (bp->phy_flags & PHY_SERDES_FLAG) {
4970 if (CHIP_NUM(bp) == CHIP_NUM_5706)
4971 bnx2_5706_serdes_timer(bp);
27a005b8 4972 else
f8dd064e 4973 bnx2_5708_serdes_timer(bp);
b6016b76
MC
4974 }
4975
4976bnx2_restart_timer:
cd339a0e 4977 mod_timer(&bp->timer, jiffies + bp->current_interval);
b6016b76
MC
4978}
4979
8e6a72c4
MC
4980static int
4981bnx2_request_irq(struct bnx2 *bp)
4982{
4983 struct net_device *dev = bp->dev;
4984 int rc = 0;
4985
4986 if (bp->flags & USING_MSI_FLAG) {
4987 irq_handler_t fn = bnx2_msi;
4988
4989 if (bp->flags & ONE_SHOT_MSI_FLAG)
4990 fn = bnx2_msi_1shot;
4991
4992 rc = request_irq(bp->pdev->irq, fn, 0, dev->name, dev);
4993 } else
4994 rc = request_irq(bp->pdev->irq, bnx2_interrupt,
4995 IRQF_SHARED, dev->name, dev);
4996 return rc;
4997}
4998
4999static void
5000bnx2_free_irq(struct bnx2 *bp)
5001{
5002 struct net_device *dev = bp->dev;
5003
5004 if (bp->flags & USING_MSI_FLAG) {
5005 free_irq(bp->pdev->irq, dev);
5006 pci_disable_msi(bp->pdev);
5007 bp->flags &= ~(USING_MSI_FLAG | ONE_SHOT_MSI_FLAG);
5008 } else
5009 free_irq(bp->pdev->irq, dev);
5010}
5011
b6016b76
MC
5012/* Called with rtnl_lock */
5013static int
5014bnx2_open(struct net_device *dev)
5015{
972ec0d4 5016 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5017 int rc;
5018
1b2f922f
MC
5019 netif_carrier_off(dev);
5020
829ca9a3 5021 bnx2_set_power_state(bp, PCI_D0);
b6016b76
MC
5022 bnx2_disable_int(bp);
5023
5024 rc = bnx2_alloc_mem(bp);
5025 if (rc)
5026 return rc;
5027
8e6a72c4 5028 if ((bp->flags & MSI_CAP_FLAG) && !disable_msi) {
b6016b76
MC
5029 if (pci_enable_msi(bp->pdev) == 0) {
5030 bp->flags |= USING_MSI_FLAG;
8e6a72c4
MC
5031 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5032 bp->flags |= ONE_SHOT_MSI_FLAG;
b6016b76 5033 }
b6016b76 5034 }
8e6a72c4
MC
5035 rc = bnx2_request_irq(bp);
5036
b6016b76
MC
5037 if (rc) {
5038 bnx2_free_mem(bp);
5039 return rc;
5040 }
5041
5042 rc = bnx2_init_nic(bp);
5043
5044 if (rc) {
8e6a72c4 5045 bnx2_free_irq(bp);
b6016b76
MC
5046 bnx2_free_skbs(bp);
5047 bnx2_free_mem(bp);
5048 return rc;
5049 }
6aa20a22 5050
cd339a0e 5051 mod_timer(&bp->timer, jiffies + bp->current_interval);
b6016b76
MC
5052
5053 atomic_set(&bp->intr_sem, 0);
5054
5055 bnx2_enable_int(bp);
5056
5057 if (bp->flags & USING_MSI_FLAG) {
5058 /* Test MSI to make sure it is working
5059 * If MSI test fails, go back to INTx mode
5060 */
5061 if (bnx2_test_intr(bp) != 0) {
5062 printk(KERN_WARNING PFX "%s: No interrupt was generated"
5063 " using MSI, switching to INTx mode. Please"
5064 " report this failure to the PCI maintainer"
5065 " and include system chipset information.\n",
5066 bp->dev->name);
5067
5068 bnx2_disable_int(bp);
8e6a72c4 5069 bnx2_free_irq(bp);
b6016b76
MC
5070
5071 rc = bnx2_init_nic(bp);
5072
8e6a72c4
MC
5073 if (!rc)
5074 rc = bnx2_request_irq(bp);
5075
b6016b76
MC
5076 if (rc) {
5077 bnx2_free_skbs(bp);
5078 bnx2_free_mem(bp);
5079 del_timer_sync(&bp->timer);
5080 return rc;
5081 }
5082 bnx2_enable_int(bp);
5083 }
5084 }
5085 if (bp->flags & USING_MSI_FLAG) {
5086 printk(KERN_INFO PFX "%s: using MSI\n", dev->name);
5087 }
5088
5089 netif_start_queue(dev);
5090
5091 return 0;
5092}
5093
5094static void
c4028958 5095bnx2_reset_task(struct work_struct *work)
b6016b76 5096{
c4028958 5097 struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
b6016b76 5098
afdc08b9
MC
5099 if (!netif_running(bp->dev))
5100 return;
5101
5102 bp->in_reset_task = 1;
b6016b76
MC
5103 bnx2_netif_stop(bp);
5104
5105 bnx2_init_nic(bp);
5106
5107 atomic_set(&bp->intr_sem, 1);
5108 bnx2_netif_start(bp);
afdc08b9 5109 bp->in_reset_task = 0;
b6016b76
MC
5110}
5111
5112static void
5113bnx2_tx_timeout(struct net_device *dev)
5114{
972ec0d4 5115 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5116
5117 /* This allows the netif to be shutdown gracefully before resetting */
5118 schedule_work(&bp->reset_task);
5119}
5120
5121#ifdef BCM_VLAN
5122/* Called with rtnl_lock */
5123static void
5124bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
5125{
972ec0d4 5126 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5127
5128 bnx2_netif_stop(bp);
5129
5130 bp->vlgrp = vlgrp;
5131 bnx2_set_rx_mode(dev);
5132
5133 bnx2_netif_start(bp);
5134}
b6016b76
MC
5135#endif
5136
932ff279 5137/* Called with netif_tx_lock.
2f8af120
MC
5138 * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
5139 * netif_wake_queue().
b6016b76
MC
5140 */
5141static int
5142bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
5143{
972ec0d4 5144 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5145 dma_addr_t mapping;
5146 struct tx_bd *txbd;
5147 struct sw_bd *tx_buf;
5148 u32 len, vlan_tag_flags, last_frag, mss;
5149 u16 prod, ring_prod;
5150 int i;
5151
e89bbf10 5152 if (unlikely(bnx2_tx_avail(bp) < (skb_shinfo(skb)->nr_frags + 1))) {
b6016b76
MC
5153 netif_stop_queue(dev);
5154 printk(KERN_ERR PFX "%s: BUG! Tx ring full when queue awake!\n",
5155 dev->name);
5156
5157 return NETDEV_TX_BUSY;
5158 }
5159 len = skb_headlen(skb);
5160 prod = bp->tx_prod;
5161 ring_prod = TX_RING_IDX(prod);
5162
5163 vlan_tag_flags = 0;
84fa7933 5164 if (skb->ip_summed == CHECKSUM_PARTIAL) {
b6016b76
MC
5165 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
5166 }
5167
5168 if (bp->vlgrp != 0 && vlan_tx_tag_present(skb)) {
5169 vlan_tag_flags |=
5170 (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
5171 }
fde82055 5172 if ((mss = skb_shinfo(skb)->gso_size)) {
b6016b76 5173 u32 tcp_opt_len, ip_tcp_len;
eddc9ec5 5174 struct iphdr *iph;
b6016b76 5175
b6016b76
MC
5176 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
5177
4666f87a
MC
5178 tcp_opt_len = tcp_optlen(skb);
5179
5180 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
5181 u32 tcp_off = skb_transport_offset(skb) -
5182 sizeof(struct ipv6hdr) - ETH_HLEN;
ab6a5bb6 5183
4666f87a
MC
5184 vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) |
5185 TX_BD_FLAGS_SW_FLAGS;
5186 if (likely(tcp_off == 0))
5187 vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK;
5188 else {
5189 tcp_off >>= 3;
5190 vlan_tag_flags |= ((tcp_off & 0x3) <<
5191 TX_BD_FLAGS_TCP6_OFF0_SHL) |
5192 ((tcp_off & 0x10) <<
5193 TX_BD_FLAGS_TCP6_OFF4_SHL);
5194 mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL;
5195 }
5196 } else {
5197 if (skb_header_cloned(skb) &&
5198 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
5199 dev_kfree_skb(skb);
5200 return NETDEV_TX_OK;
5201 }
b6016b76 5202
4666f87a
MC
5203 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
5204
5205 iph = ip_hdr(skb);
5206 iph->check = 0;
5207 iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
5208 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
5209 iph->daddr, 0,
5210 IPPROTO_TCP,
5211 0);
5212 if (tcp_opt_len || (iph->ihl > 5)) {
5213 vlan_tag_flags |= ((iph->ihl - 5) +
5214 (tcp_opt_len >> 2)) << 8;
5215 }
b6016b76 5216 }
4666f87a 5217 } else
b6016b76 5218 mss = 0;
b6016b76
MC
5219
5220 mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
6aa20a22 5221
b6016b76
MC
5222 tx_buf = &bp->tx_buf_ring[ring_prod];
5223 tx_buf->skb = skb;
5224 pci_unmap_addr_set(tx_buf, mapping, mapping);
5225
5226 txbd = &bp->tx_desc_ring[ring_prod];
5227
5228 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
5229 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
5230 txbd->tx_bd_mss_nbytes = len | (mss << 16);
5231 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
5232
5233 last_frag = skb_shinfo(skb)->nr_frags;
5234
5235 for (i = 0; i < last_frag; i++) {
5236 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5237
5238 prod = NEXT_TX_BD(prod);
5239 ring_prod = TX_RING_IDX(prod);
5240 txbd = &bp->tx_desc_ring[ring_prod];
5241
5242 len = frag->size;
5243 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
5244 len, PCI_DMA_TODEVICE);
5245 pci_unmap_addr_set(&bp->tx_buf_ring[ring_prod],
5246 mapping, mapping);
5247
5248 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
5249 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
5250 txbd->tx_bd_mss_nbytes = len | (mss << 16);
5251 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
5252
5253 }
5254 txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
5255
5256 prod = NEXT_TX_BD(prod);
5257 bp->tx_prod_bseq += skb->len;
5258
234754d5
MC
5259 REG_WR16(bp, bp->tx_bidx_addr, prod);
5260 REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
b6016b76
MC
5261
5262 mmiowb();
5263
5264 bp->tx_prod = prod;
5265 dev->trans_start = jiffies;
5266
e89bbf10 5267 if (unlikely(bnx2_tx_avail(bp) <= MAX_SKB_FRAGS)) {
e89bbf10 5268 netif_stop_queue(dev);
2f8af120 5269 if (bnx2_tx_avail(bp) > bp->tx_wake_thresh)
e89bbf10 5270 netif_wake_queue(dev);
b6016b76
MC
5271 }
5272
5273 return NETDEV_TX_OK;
5274}
5275
5276/* Called with rtnl_lock */
5277static int
5278bnx2_close(struct net_device *dev)
5279{
972ec0d4 5280 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5281 u32 reset_code;
5282
afdc08b9
MC
5283 /* Calling flush_scheduled_work() may deadlock because
5284 * linkwatch_event() may be on the workqueue and it will try to get
5285 * the rtnl_lock which we are holding.
5286 */
5287 while (bp->in_reset_task)
5288 msleep(1);
5289
b6016b76
MC
5290 bnx2_netif_stop(bp);
5291 del_timer_sync(&bp->timer);
dda1e390 5292 if (bp->flags & NO_WOL_FLAG)
6c4f095e 5293 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
dda1e390 5294 else if (bp->wol)
b6016b76
MC
5295 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
5296 else
5297 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
5298 bnx2_reset_chip(bp, reset_code);
8e6a72c4 5299 bnx2_free_irq(bp);
b6016b76
MC
5300 bnx2_free_skbs(bp);
5301 bnx2_free_mem(bp);
5302 bp->link_up = 0;
5303 netif_carrier_off(bp->dev);
829ca9a3 5304 bnx2_set_power_state(bp, PCI_D3hot);
b6016b76
MC
5305 return 0;
5306}
5307
5308#define GET_NET_STATS64(ctr) \
5309 (unsigned long) ((unsigned long) (ctr##_hi) << 32) + \
5310 (unsigned long) (ctr##_lo)
5311
5312#define GET_NET_STATS32(ctr) \
5313 (ctr##_lo)
5314
5315#if (BITS_PER_LONG == 64)
5316#define GET_NET_STATS GET_NET_STATS64
5317#else
5318#define GET_NET_STATS GET_NET_STATS32
5319#endif
5320
5321static struct net_device_stats *
5322bnx2_get_stats(struct net_device *dev)
5323{
972ec0d4 5324 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5325 struct statistics_block *stats_blk = bp->stats_blk;
5326 struct net_device_stats *net_stats = &bp->net_stats;
5327
5328 if (bp->stats_blk == NULL) {
5329 return net_stats;
5330 }
5331 net_stats->rx_packets =
5332 GET_NET_STATS(stats_blk->stat_IfHCInUcastPkts) +
5333 GET_NET_STATS(stats_blk->stat_IfHCInMulticastPkts) +
5334 GET_NET_STATS(stats_blk->stat_IfHCInBroadcastPkts);
5335
5336 net_stats->tx_packets =
5337 GET_NET_STATS(stats_blk->stat_IfHCOutUcastPkts) +
5338 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts) +
5339 GET_NET_STATS(stats_blk->stat_IfHCOutBroadcastPkts);
5340
5341 net_stats->rx_bytes =
5342 GET_NET_STATS(stats_blk->stat_IfHCInOctets);
5343
5344 net_stats->tx_bytes =
5345 GET_NET_STATS(stats_blk->stat_IfHCOutOctets);
5346
6aa20a22 5347 net_stats->multicast =
b6016b76
MC
5348 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts);
5349
6aa20a22 5350 net_stats->collisions =
b6016b76
MC
5351 (unsigned long) stats_blk->stat_EtherStatsCollisions;
5352
6aa20a22 5353 net_stats->rx_length_errors =
b6016b76
MC
5354 (unsigned long) (stats_blk->stat_EtherStatsUndersizePkts +
5355 stats_blk->stat_EtherStatsOverrsizePkts);
5356
6aa20a22 5357 net_stats->rx_over_errors =
b6016b76
MC
5358 (unsigned long) stats_blk->stat_IfInMBUFDiscards;
5359
6aa20a22 5360 net_stats->rx_frame_errors =
b6016b76
MC
5361 (unsigned long) stats_blk->stat_Dot3StatsAlignmentErrors;
5362
6aa20a22 5363 net_stats->rx_crc_errors =
b6016b76
MC
5364 (unsigned long) stats_blk->stat_Dot3StatsFCSErrors;
5365
5366 net_stats->rx_errors = net_stats->rx_length_errors +
5367 net_stats->rx_over_errors + net_stats->rx_frame_errors +
5368 net_stats->rx_crc_errors;
5369
5370 net_stats->tx_aborted_errors =
5371 (unsigned long) (stats_blk->stat_Dot3StatsExcessiveCollisions +
5372 stats_blk->stat_Dot3StatsLateCollisions);
5373
5b0c76ad
MC
5374 if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
5375 (CHIP_ID(bp) == CHIP_ID_5708_A0))
b6016b76
MC
5376 net_stats->tx_carrier_errors = 0;
5377 else {
5378 net_stats->tx_carrier_errors =
5379 (unsigned long)
5380 stats_blk->stat_Dot3StatsCarrierSenseErrors;
5381 }
5382
5383 net_stats->tx_errors =
6aa20a22 5384 (unsigned long)
b6016b76
MC
5385 stats_blk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors
5386 +
5387 net_stats->tx_aborted_errors +
5388 net_stats->tx_carrier_errors;
5389
cea94db9
MC
5390 net_stats->rx_missed_errors =
5391 (unsigned long) (stats_blk->stat_IfInMBUFDiscards +
5392 stats_blk->stat_FwRxDrop);
5393
b6016b76
MC
5394 return net_stats;
5395}
5396
5397/* All ethtool functions called with rtnl_lock */
5398
5399static int
5400bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
5401{
972ec0d4 5402 struct bnx2 *bp = netdev_priv(dev);
7b6b8347 5403 int support_serdes = 0, support_copper = 0;
b6016b76
MC
5404
5405 cmd->supported = SUPPORTED_Autoneg;
7b6b8347
MC
5406 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG) {
5407 support_serdes = 1;
5408 support_copper = 1;
5409 } else if (bp->phy_port == PORT_FIBRE)
5410 support_serdes = 1;
5411 else
5412 support_copper = 1;
5413
5414 if (support_serdes) {
b6016b76
MC
5415 cmd->supported |= SUPPORTED_1000baseT_Full |
5416 SUPPORTED_FIBRE;
605a9e20
MC
5417 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG)
5418 cmd->supported |= SUPPORTED_2500baseX_Full;
b6016b76 5419
b6016b76 5420 }
7b6b8347 5421 if (support_copper) {
b6016b76
MC
5422 cmd->supported |= SUPPORTED_10baseT_Half |
5423 SUPPORTED_10baseT_Full |
5424 SUPPORTED_100baseT_Half |
5425 SUPPORTED_100baseT_Full |
5426 SUPPORTED_1000baseT_Full |
5427 SUPPORTED_TP;
5428
b6016b76
MC
5429 }
5430
7b6b8347
MC
5431 spin_lock_bh(&bp->phy_lock);
5432 cmd->port = bp->phy_port;
b6016b76
MC
5433 cmd->advertising = bp->advertising;
5434
5435 if (bp->autoneg & AUTONEG_SPEED) {
5436 cmd->autoneg = AUTONEG_ENABLE;
5437 }
5438 else {
5439 cmd->autoneg = AUTONEG_DISABLE;
5440 }
5441
5442 if (netif_carrier_ok(dev)) {
5443 cmd->speed = bp->line_speed;
5444 cmd->duplex = bp->duplex;
5445 }
5446 else {
5447 cmd->speed = -1;
5448 cmd->duplex = -1;
5449 }
7b6b8347 5450 spin_unlock_bh(&bp->phy_lock);
b6016b76
MC
5451
5452 cmd->transceiver = XCVR_INTERNAL;
5453 cmd->phy_address = bp->phy_addr;
5454
5455 return 0;
5456}
6aa20a22 5457
b6016b76
MC
5458static int
5459bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
5460{
972ec0d4 5461 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5462 u8 autoneg = bp->autoneg;
5463 u8 req_duplex = bp->req_duplex;
5464 u16 req_line_speed = bp->req_line_speed;
5465 u32 advertising = bp->advertising;
7b6b8347
MC
5466 int err = -EINVAL;
5467
5468 spin_lock_bh(&bp->phy_lock);
5469
5470 if (cmd->port != PORT_TP && cmd->port != PORT_FIBRE)
5471 goto err_out_unlock;
5472
5473 if (cmd->port != bp->phy_port && !(bp->phy_flags & REMOTE_PHY_CAP_FLAG))
5474 goto err_out_unlock;
b6016b76
MC
5475
5476 if (cmd->autoneg == AUTONEG_ENABLE) {
5477 autoneg |= AUTONEG_SPEED;
5478
6aa20a22 5479 cmd->advertising &= ETHTOOL_ALL_COPPER_SPEED;
b6016b76
MC
5480
5481 /* allow advertising 1 speed */
5482 if ((cmd->advertising == ADVERTISED_10baseT_Half) ||
5483 (cmd->advertising == ADVERTISED_10baseT_Full) ||
5484 (cmd->advertising == ADVERTISED_100baseT_Half) ||
5485 (cmd->advertising == ADVERTISED_100baseT_Full)) {
5486
7b6b8347
MC
5487 if (cmd->port == PORT_FIBRE)
5488 goto err_out_unlock;
b6016b76
MC
5489
5490 advertising = cmd->advertising;
5491
27a005b8 5492 } else if (cmd->advertising == ADVERTISED_2500baseX_Full) {
7b6b8347
MC
5493 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) ||
5494 (cmd->port == PORT_TP))
5495 goto err_out_unlock;
5496 } else if (cmd->advertising == ADVERTISED_1000baseT_Full)
b6016b76 5497 advertising = cmd->advertising;
7b6b8347
MC
5498 else if (cmd->advertising == ADVERTISED_1000baseT_Half)
5499 goto err_out_unlock;
b6016b76 5500 else {
7b6b8347 5501 if (cmd->port == PORT_FIBRE)
b6016b76 5502 advertising = ETHTOOL_ALL_FIBRE_SPEED;
7b6b8347 5503 else
b6016b76 5504 advertising = ETHTOOL_ALL_COPPER_SPEED;
b6016b76
MC
5505 }
5506 advertising |= ADVERTISED_Autoneg;
5507 }
5508 else {
7b6b8347 5509 if (cmd->port == PORT_FIBRE) {
80be4434
MC
5510 if ((cmd->speed != SPEED_1000 &&
5511 cmd->speed != SPEED_2500) ||
5512 (cmd->duplex != DUPLEX_FULL))
7b6b8347 5513 goto err_out_unlock;
80be4434
MC
5514
5515 if (cmd->speed == SPEED_2500 &&
5516 !(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
7b6b8347 5517 goto err_out_unlock;
b6016b76 5518 }
7b6b8347
MC
5519 else if (cmd->speed == SPEED_1000 || cmd->speed == SPEED_2500)
5520 goto err_out_unlock;
5521
b6016b76
MC
5522 autoneg &= ~AUTONEG_SPEED;
5523 req_line_speed = cmd->speed;
5524 req_duplex = cmd->duplex;
5525 advertising = 0;
5526 }
5527
5528 bp->autoneg = autoneg;
5529 bp->advertising = advertising;
5530 bp->req_line_speed = req_line_speed;
5531 bp->req_duplex = req_duplex;
5532
7b6b8347 5533 err = bnx2_setup_phy(bp, cmd->port);
b6016b76 5534
7b6b8347 5535err_out_unlock:
c770a65c 5536 spin_unlock_bh(&bp->phy_lock);
b6016b76 5537
7b6b8347 5538 return err;
b6016b76
MC
5539}
5540
5541static void
5542bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
5543{
972ec0d4 5544 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5545
5546 strcpy(info->driver, DRV_MODULE_NAME);
5547 strcpy(info->version, DRV_MODULE_VERSION);
5548 strcpy(info->bus_info, pci_name(bp->pdev));
5549 info->fw_version[0] = ((bp->fw_ver & 0xff000000) >> 24) + '0';
5550 info->fw_version[2] = ((bp->fw_ver & 0xff0000) >> 16) + '0';
5551 info->fw_version[4] = ((bp->fw_ver & 0xff00) >> 8) + '0';
206cc83c
MC
5552 info->fw_version[1] = info->fw_version[3] = '.';
5553 info->fw_version[5] = 0;
b6016b76
MC
5554}
5555
244ac4f4
MC
5556#define BNX2_REGDUMP_LEN (32 * 1024)
5557
5558static int
5559bnx2_get_regs_len(struct net_device *dev)
5560{
5561 return BNX2_REGDUMP_LEN;
5562}
5563
5564static void
5565bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
5566{
5567 u32 *p = _p, i, offset;
5568 u8 *orig_p = _p;
5569 struct bnx2 *bp = netdev_priv(dev);
5570 u32 reg_boundaries[] = { 0x0000, 0x0098, 0x0400, 0x045c,
5571 0x0800, 0x0880, 0x0c00, 0x0c10,
5572 0x0c30, 0x0d08, 0x1000, 0x101c,
5573 0x1040, 0x1048, 0x1080, 0x10a4,
5574 0x1400, 0x1490, 0x1498, 0x14f0,
5575 0x1500, 0x155c, 0x1580, 0x15dc,
5576 0x1600, 0x1658, 0x1680, 0x16d8,
5577 0x1800, 0x1820, 0x1840, 0x1854,
5578 0x1880, 0x1894, 0x1900, 0x1984,
5579 0x1c00, 0x1c0c, 0x1c40, 0x1c54,
5580 0x1c80, 0x1c94, 0x1d00, 0x1d84,
5581 0x2000, 0x2030, 0x23c0, 0x2400,
5582 0x2800, 0x2820, 0x2830, 0x2850,
5583 0x2b40, 0x2c10, 0x2fc0, 0x3058,
5584 0x3c00, 0x3c94, 0x4000, 0x4010,
5585 0x4080, 0x4090, 0x43c0, 0x4458,
5586 0x4c00, 0x4c18, 0x4c40, 0x4c54,
5587 0x4fc0, 0x5010, 0x53c0, 0x5444,
5588 0x5c00, 0x5c18, 0x5c80, 0x5c90,
5589 0x5fc0, 0x6000, 0x6400, 0x6428,
5590 0x6800, 0x6848, 0x684c, 0x6860,
5591 0x6888, 0x6910, 0x8000 };
5592
5593 regs->version = 0;
5594
5595 memset(p, 0, BNX2_REGDUMP_LEN);
5596
5597 if (!netif_running(bp->dev))
5598 return;
5599
5600 i = 0;
5601 offset = reg_boundaries[0];
5602 p += offset;
5603 while (offset < BNX2_REGDUMP_LEN) {
5604 *p++ = REG_RD(bp, offset);
5605 offset += 4;
5606 if (offset == reg_boundaries[i + 1]) {
5607 offset = reg_boundaries[i + 2];
5608 p = (u32 *) (orig_p + offset);
5609 i += 2;
5610 }
5611 }
5612}
5613
b6016b76
MC
5614static void
5615bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
5616{
972ec0d4 5617 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5618
5619 if (bp->flags & NO_WOL_FLAG) {
5620 wol->supported = 0;
5621 wol->wolopts = 0;
5622 }
5623 else {
5624 wol->supported = WAKE_MAGIC;
5625 if (bp->wol)
5626 wol->wolopts = WAKE_MAGIC;
5627 else
5628 wol->wolopts = 0;
5629 }
5630 memset(&wol->sopass, 0, sizeof(wol->sopass));
5631}
5632
5633static int
5634bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
5635{
972ec0d4 5636 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5637
5638 if (wol->wolopts & ~WAKE_MAGIC)
5639 return -EINVAL;
5640
5641 if (wol->wolopts & WAKE_MAGIC) {
5642 if (bp->flags & NO_WOL_FLAG)
5643 return -EINVAL;
5644
5645 bp->wol = 1;
5646 }
5647 else {
5648 bp->wol = 0;
5649 }
5650 return 0;
5651}
5652
5653static int
5654bnx2_nway_reset(struct net_device *dev)
5655{
972ec0d4 5656 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5657 u32 bmcr;
5658
5659 if (!(bp->autoneg & AUTONEG_SPEED)) {
5660 return -EINVAL;
5661 }
5662
c770a65c 5663 spin_lock_bh(&bp->phy_lock);
b6016b76 5664
7b6b8347
MC
5665 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG) {
5666 int rc;
5667
5668 rc = bnx2_setup_remote_phy(bp, bp->phy_port);
5669 spin_unlock_bh(&bp->phy_lock);
5670 return rc;
5671 }
5672
b6016b76
MC
5673 /* Force a link down visible on the other side */
5674 if (bp->phy_flags & PHY_SERDES_FLAG) {
ca58c3af 5675 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
c770a65c 5676 spin_unlock_bh(&bp->phy_lock);
b6016b76
MC
5677
5678 msleep(20);
5679
c770a65c 5680 spin_lock_bh(&bp->phy_lock);
f8dd064e
MC
5681
5682 bp->current_interval = SERDES_AN_TIMEOUT;
5683 bp->serdes_an_pending = 1;
5684 mod_timer(&bp->timer, jiffies + bp->current_interval);
b6016b76
MC
5685 }
5686
ca58c3af 5687 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
b6016b76 5688 bmcr &= ~BMCR_LOOPBACK;
ca58c3af 5689 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
b6016b76 5690
c770a65c 5691 spin_unlock_bh(&bp->phy_lock);
b6016b76
MC
5692
5693 return 0;
5694}
5695
5696static int
5697bnx2_get_eeprom_len(struct net_device *dev)
5698{
972ec0d4 5699 struct bnx2 *bp = netdev_priv(dev);
b6016b76 5700
1122db71 5701 if (bp->flash_info == NULL)
b6016b76
MC
5702 return 0;
5703
1122db71 5704 return (int) bp->flash_size;
b6016b76
MC
5705}
5706
5707static int
5708bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
5709 u8 *eebuf)
5710{
972ec0d4 5711 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5712 int rc;
5713
1064e944 5714 /* parameters already validated in ethtool_get_eeprom */
b6016b76
MC
5715
5716 rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
5717
5718 return rc;
5719}
5720
5721static int
5722bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
5723 u8 *eebuf)
5724{
972ec0d4 5725 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5726 int rc;
5727
1064e944 5728 /* parameters already validated in ethtool_set_eeprom */
b6016b76
MC
5729
5730 rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
5731
5732 return rc;
5733}
5734
5735static int
5736bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
5737{
972ec0d4 5738 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5739
5740 memset(coal, 0, sizeof(struct ethtool_coalesce));
5741
5742 coal->rx_coalesce_usecs = bp->rx_ticks;
5743 coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
5744 coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
5745 coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
5746
5747 coal->tx_coalesce_usecs = bp->tx_ticks;
5748 coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
5749 coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
5750 coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
5751
5752 coal->stats_block_coalesce_usecs = bp->stats_ticks;
5753
5754 return 0;
5755}
5756
5757static int
5758bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
5759{
972ec0d4 5760 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5761
5762 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
5763 if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
5764
6aa20a22 5765 bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
b6016b76
MC
5766 if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
5767
5768 bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
5769 if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
5770
5771 bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
5772 if (bp->rx_quick_cons_trip_int > 0xff)
5773 bp->rx_quick_cons_trip_int = 0xff;
5774
5775 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
5776 if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
5777
5778 bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
5779 if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
5780
5781 bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
5782 if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
5783
5784 bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
5785 if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
5786 0xff;
5787
5788 bp->stats_ticks = coal->stats_block_coalesce_usecs;
02537b06
MC
5789 if (CHIP_NUM(bp) == CHIP_NUM_5708) {
5790 if (bp->stats_ticks != 0 && bp->stats_ticks != USEC_PER_SEC)
5791 bp->stats_ticks = USEC_PER_SEC;
5792 }
b6016b76
MC
5793 if (bp->stats_ticks > 0xffff00) bp->stats_ticks = 0xffff00;
5794 bp->stats_ticks &= 0xffff00;
5795
5796 if (netif_running(bp->dev)) {
5797 bnx2_netif_stop(bp);
5798 bnx2_init_nic(bp);
5799 bnx2_netif_start(bp);
5800 }
5801
5802 return 0;
5803}
5804
5805static void
5806bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
5807{
972ec0d4 5808 struct bnx2 *bp = netdev_priv(dev);
b6016b76 5809
13daffa2 5810 ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT;
b6016b76
MC
5811 ering->rx_mini_max_pending = 0;
5812 ering->rx_jumbo_max_pending = 0;
5813
5814 ering->rx_pending = bp->rx_ring_size;
5815 ering->rx_mini_pending = 0;
5816 ering->rx_jumbo_pending = 0;
5817
5818 ering->tx_max_pending = MAX_TX_DESC_CNT;
5819 ering->tx_pending = bp->tx_ring_size;
5820}
5821
5822static int
5823bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
5824{
972ec0d4 5825 struct bnx2 *bp = netdev_priv(dev);
b6016b76 5826
13daffa2 5827 if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
b6016b76
MC
5828 (ering->tx_pending > MAX_TX_DESC_CNT) ||
5829 (ering->tx_pending <= MAX_SKB_FRAGS)) {
5830
5831 return -EINVAL;
5832 }
13daffa2
MC
5833 if (netif_running(bp->dev)) {
5834 bnx2_netif_stop(bp);
5835 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
5836 bnx2_free_skbs(bp);
5837 bnx2_free_mem(bp);
5838 }
5839
5840 bnx2_set_rx_ring_size(bp, ering->rx_pending);
b6016b76
MC
5841 bp->tx_ring_size = ering->tx_pending;
5842
5843 if (netif_running(bp->dev)) {
13daffa2
MC
5844 int rc;
5845
5846 rc = bnx2_alloc_mem(bp);
5847 if (rc)
5848 return rc;
b6016b76
MC
5849 bnx2_init_nic(bp);
5850 bnx2_netif_start(bp);
5851 }
5852
5853 return 0;
5854}
5855
5856static void
5857bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
5858{
972ec0d4 5859 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5860
5861 epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
5862 epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
5863 epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
5864}
5865
5866static int
5867bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
5868{
972ec0d4 5869 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5870
5871 bp->req_flow_ctrl = 0;
5872 if (epause->rx_pause)
5873 bp->req_flow_ctrl |= FLOW_CTRL_RX;
5874 if (epause->tx_pause)
5875 bp->req_flow_ctrl |= FLOW_CTRL_TX;
5876
5877 if (epause->autoneg) {
5878 bp->autoneg |= AUTONEG_FLOW_CTRL;
5879 }
5880 else {
5881 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
5882 }
5883
c770a65c 5884 spin_lock_bh(&bp->phy_lock);
b6016b76 5885
0d8a6571 5886 bnx2_setup_phy(bp, bp->phy_port);
b6016b76 5887
c770a65c 5888 spin_unlock_bh(&bp->phy_lock);
b6016b76
MC
5889
5890 return 0;
5891}
5892
5893static u32
5894bnx2_get_rx_csum(struct net_device *dev)
5895{
972ec0d4 5896 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5897
5898 return bp->rx_csum;
5899}
5900
5901static int
5902bnx2_set_rx_csum(struct net_device *dev, u32 data)
5903{
972ec0d4 5904 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5905
5906 bp->rx_csum = data;
5907 return 0;
5908}
5909
b11d6213
MC
5910static int
5911bnx2_set_tso(struct net_device *dev, u32 data)
5912{
4666f87a
MC
5913 struct bnx2 *bp = netdev_priv(dev);
5914
5915 if (data) {
b11d6213 5916 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
4666f87a
MC
5917 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5918 dev->features |= NETIF_F_TSO6;
5919 } else
5920 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6 |
5921 NETIF_F_TSO_ECN);
b11d6213
MC
5922 return 0;
5923}
5924
cea94db9 5925#define BNX2_NUM_STATS 46
b6016b76 5926
14ab9b86 5927static struct {
b6016b76
MC
5928 char string[ETH_GSTRING_LEN];
5929} bnx2_stats_str_arr[BNX2_NUM_STATS] = {
5930 { "rx_bytes" },
5931 { "rx_error_bytes" },
5932 { "tx_bytes" },
5933 { "tx_error_bytes" },
5934 { "rx_ucast_packets" },
5935 { "rx_mcast_packets" },
5936 { "rx_bcast_packets" },
5937 { "tx_ucast_packets" },
5938 { "tx_mcast_packets" },
5939 { "tx_bcast_packets" },
5940 { "tx_mac_errors" },
5941 { "tx_carrier_errors" },
5942 { "rx_crc_errors" },
5943 { "rx_align_errors" },
5944 { "tx_single_collisions" },
5945 { "tx_multi_collisions" },
5946 { "tx_deferred" },
5947 { "tx_excess_collisions" },
5948 { "tx_late_collisions" },
5949 { "tx_total_collisions" },
5950 { "rx_fragments" },
5951 { "rx_jabbers" },
5952 { "rx_undersize_packets" },
5953 { "rx_oversize_packets" },
5954 { "rx_64_byte_packets" },
5955 { "rx_65_to_127_byte_packets" },
5956 { "rx_128_to_255_byte_packets" },
5957 { "rx_256_to_511_byte_packets" },
5958 { "rx_512_to_1023_byte_packets" },
5959 { "rx_1024_to_1522_byte_packets" },
5960 { "rx_1523_to_9022_byte_packets" },
5961 { "tx_64_byte_packets" },
5962 { "tx_65_to_127_byte_packets" },
5963 { "tx_128_to_255_byte_packets" },
5964 { "tx_256_to_511_byte_packets" },
5965 { "tx_512_to_1023_byte_packets" },
5966 { "tx_1024_to_1522_byte_packets" },
5967 { "tx_1523_to_9022_byte_packets" },
5968 { "rx_xon_frames" },
5969 { "rx_xoff_frames" },
5970 { "tx_xon_frames" },
5971 { "tx_xoff_frames" },
5972 { "rx_mac_ctrl_frames" },
5973 { "rx_filtered_packets" },
5974 { "rx_discards" },
cea94db9 5975 { "rx_fw_discards" },
b6016b76
MC
5976};
5977
5978#define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
5979
f71e1309 5980static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
b6016b76
MC
5981 STATS_OFFSET32(stat_IfHCInOctets_hi),
5982 STATS_OFFSET32(stat_IfHCInBadOctets_hi),
5983 STATS_OFFSET32(stat_IfHCOutOctets_hi),
5984 STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
5985 STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
5986 STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
5987 STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
5988 STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
5989 STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
5990 STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
5991 STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
6aa20a22
JG
5992 STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
5993 STATS_OFFSET32(stat_Dot3StatsFCSErrors),
5994 STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
5995 STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
5996 STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
5997 STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
5998 STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
5999 STATS_OFFSET32(stat_Dot3StatsLateCollisions),
6000 STATS_OFFSET32(stat_EtherStatsCollisions),
6001 STATS_OFFSET32(stat_EtherStatsFragments),
6002 STATS_OFFSET32(stat_EtherStatsJabbers),
6003 STATS_OFFSET32(stat_EtherStatsUndersizePkts),
6004 STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
6005 STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
6006 STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
6007 STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
6008 STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
6009 STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
6010 STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
6011 STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
6012 STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
6013 STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
6014 STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
6015 STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
6016 STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
6017 STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
6018 STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
6019 STATS_OFFSET32(stat_XonPauseFramesReceived),
6020 STATS_OFFSET32(stat_XoffPauseFramesReceived),
6021 STATS_OFFSET32(stat_OutXonSent),
6022 STATS_OFFSET32(stat_OutXoffSent),
6023 STATS_OFFSET32(stat_MacControlFramesReceived),
6024 STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
6025 STATS_OFFSET32(stat_IfInMBUFDiscards),
cea94db9 6026 STATS_OFFSET32(stat_FwRxDrop),
b6016b76
MC
6027};
6028
6029/* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
6030 * skipped because of errata.
6aa20a22 6031 */
14ab9b86 6032static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
b6016b76
MC
6033 8,0,8,8,8,8,8,8,8,8,
6034 4,0,4,4,4,4,4,4,4,4,
6035 4,4,4,4,4,4,4,4,4,4,
6036 4,4,4,4,4,4,4,4,4,4,
cea94db9 6037 4,4,4,4,4,4,
b6016b76
MC
6038};
6039
5b0c76ad
MC
6040static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
6041 8,0,8,8,8,8,8,8,8,8,
6042 4,4,4,4,4,4,4,4,4,4,
6043 4,4,4,4,4,4,4,4,4,4,
6044 4,4,4,4,4,4,4,4,4,4,
cea94db9 6045 4,4,4,4,4,4,
5b0c76ad
MC
6046};
6047
b6016b76
MC
6048#define BNX2_NUM_TESTS 6
6049
14ab9b86 6050static struct {
b6016b76
MC
6051 char string[ETH_GSTRING_LEN];
6052} bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
6053 { "register_test (offline)" },
6054 { "memory_test (offline)" },
6055 { "loopback_test (offline)" },
6056 { "nvram_test (online)" },
6057 { "interrupt_test (online)" },
6058 { "link_test (online)" },
6059};
6060
6061static int
6062bnx2_self_test_count(struct net_device *dev)
6063{
6064 return BNX2_NUM_TESTS;
6065}
6066
6067static void
6068bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
6069{
972ec0d4 6070 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6071
6072 memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
6073 if (etest->flags & ETH_TEST_FL_OFFLINE) {
80be4434
MC
6074 int i;
6075
b6016b76
MC
6076 bnx2_netif_stop(bp);
6077 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
6078 bnx2_free_skbs(bp);
6079
6080 if (bnx2_test_registers(bp) != 0) {
6081 buf[0] = 1;
6082 etest->flags |= ETH_TEST_FL_FAILED;
6083 }
6084 if (bnx2_test_memory(bp) != 0) {
6085 buf[1] = 1;
6086 etest->flags |= ETH_TEST_FL_FAILED;
6087 }
bc5a0690 6088 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
b6016b76 6089 etest->flags |= ETH_TEST_FL_FAILED;
b6016b76
MC
6090
6091 if (!netif_running(bp->dev)) {
6092 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
6093 }
6094 else {
6095 bnx2_init_nic(bp);
6096 bnx2_netif_start(bp);
6097 }
6098
6099 /* wait for link up */
80be4434
MC
6100 for (i = 0; i < 7; i++) {
6101 if (bp->link_up)
6102 break;
6103 msleep_interruptible(1000);
6104 }
b6016b76
MC
6105 }
6106
6107 if (bnx2_test_nvram(bp) != 0) {
6108 buf[3] = 1;
6109 etest->flags |= ETH_TEST_FL_FAILED;
6110 }
6111 if (bnx2_test_intr(bp) != 0) {
6112 buf[4] = 1;
6113 etest->flags |= ETH_TEST_FL_FAILED;
6114 }
6115
6116 if (bnx2_test_link(bp) != 0) {
6117 buf[5] = 1;
6118 etest->flags |= ETH_TEST_FL_FAILED;
6119
6120 }
6121}
6122
6123static void
6124bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
6125{
6126 switch (stringset) {
6127 case ETH_SS_STATS:
6128 memcpy(buf, bnx2_stats_str_arr,
6129 sizeof(bnx2_stats_str_arr));
6130 break;
6131 case ETH_SS_TEST:
6132 memcpy(buf, bnx2_tests_str_arr,
6133 sizeof(bnx2_tests_str_arr));
6134 break;
6135 }
6136}
6137
6138static int
6139bnx2_get_stats_count(struct net_device *dev)
6140{
6141 return BNX2_NUM_STATS;
6142}
6143
6144static void
6145bnx2_get_ethtool_stats(struct net_device *dev,
6146 struct ethtool_stats *stats, u64 *buf)
6147{
972ec0d4 6148 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6149 int i;
6150 u32 *hw_stats = (u32 *) bp->stats_blk;
14ab9b86 6151 u8 *stats_len_arr = NULL;
b6016b76
MC
6152
6153 if (hw_stats == NULL) {
6154 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
6155 return;
6156 }
6157
5b0c76ad
MC
6158 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
6159 (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
6160 (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
6161 (CHIP_ID(bp) == CHIP_ID_5708_A0))
b6016b76 6162 stats_len_arr = bnx2_5706_stats_len_arr;
5b0c76ad
MC
6163 else
6164 stats_len_arr = bnx2_5708_stats_len_arr;
b6016b76
MC
6165
6166 for (i = 0; i < BNX2_NUM_STATS; i++) {
6167 if (stats_len_arr[i] == 0) {
6168 /* skip this counter */
6169 buf[i] = 0;
6170 continue;
6171 }
6172 if (stats_len_arr[i] == 4) {
6173 /* 4-byte counter */
6174 buf[i] = (u64)
6175 *(hw_stats + bnx2_stats_offset_arr[i]);
6176 continue;
6177 }
6178 /* 8-byte counter */
6179 buf[i] = (((u64) *(hw_stats +
6180 bnx2_stats_offset_arr[i])) << 32) +
6181 *(hw_stats + bnx2_stats_offset_arr[i] + 1);
6182 }
6183}
6184
6185static int
6186bnx2_phys_id(struct net_device *dev, u32 data)
6187{
972ec0d4 6188 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6189 int i;
6190 u32 save;
6191
6192 if (data == 0)
6193 data = 2;
6194
6195 save = REG_RD(bp, BNX2_MISC_CFG);
6196 REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
6197
6198 for (i = 0; i < (data * 2); i++) {
6199 if ((i % 2) == 0) {
6200 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
6201 }
6202 else {
6203 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
6204 BNX2_EMAC_LED_1000MB_OVERRIDE |
6205 BNX2_EMAC_LED_100MB_OVERRIDE |
6206 BNX2_EMAC_LED_10MB_OVERRIDE |
6207 BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
6208 BNX2_EMAC_LED_TRAFFIC);
6209 }
6210 msleep_interruptible(500);
6211 if (signal_pending(current))
6212 break;
6213 }
6214 REG_WR(bp, BNX2_EMAC_LED, 0);
6215 REG_WR(bp, BNX2_MISC_CFG, save);
6216 return 0;
6217}
6218
4666f87a
MC
6219static int
6220bnx2_set_tx_csum(struct net_device *dev, u32 data)
6221{
6222 struct bnx2 *bp = netdev_priv(dev);
6223
6224 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6225 return (ethtool_op_set_tx_hw_csum(dev, data));
6226 else
6227 return (ethtool_op_set_tx_csum(dev, data));
6228}
6229
7282d491 6230static const struct ethtool_ops bnx2_ethtool_ops = {
b6016b76
MC
6231 .get_settings = bnx2_get_settings,
6232 .set_settings = bnx2_set_settings,
6233 .get_drvinfo = bnx2_get_drvinfo,
244ac4f4
MC
6234 .get_regs_len = bnx2_get_regs_len,
6235 .get_regs = bnx2_get_regs,
b6016b76
MC
6236 .get_wol = bnx2_get_wol,
6237 .set_wol = bnx2_set_wol,
6238 .nway_reset = bnx2_nway_reset,
6239 .get_link = ethtool_op_get_link,
6240 .get_eeprom_len = bnx2_get_eeprom_len,
6241 .get_eeprom = bnx2_get_eeprom,
6242 .set_eeprom = bnx2_set_eeprom,
6243 .get_coalesce = bnx2_get_coalesce,
6244 .set_coalesce = bnx2_set_coalesce,
6245 .get_ringparam = bnx2_get_ringparam,
6246 .set_ringparam = bnx2_set_ringparam,
6247 .get_pauseparam = bnx2_get_pauseparam,
6248 .set_pauseparam = bnx2_set_pauseparam,
6249 .get_rx_csum = bnx2_get_rx_csum,
6250 .set_rx_csum = bnx2_set_rx_csum,
6251 .get_tx_csum = ethtool_op_get_tx_csum,
4666f87a 6252 .set_tx_csum = bnx2_set_tx_csum,
b6016b76
MC
6253 .get_sg = ethtool_op_get_sg,
6254 .set_sg = ethtool_op_set_sg,
b6016b76 6255 .get_tso = ethtool_op_get_tso,
b11d6213 6256 .set_tso = bnx2_set_tso,
b6016b76
MC
6257 .self_test_count = bnx2_self_test_count,
6258 .self_test = bnx2_self_test,
6259 .get_strings = bnx2_get_strings,
6260 .phys_id = bnx2_phys_id,
6261 .get_stats_count = bnx2_get_stats_count,
6262 .get_ethtool_stats = bnx2_get_ethtool_stats,
24b8e05d 6263 .get_perm_addr = ethtool_op_get_perm_addr,
b6016b76
MC
6264};
6265
6266/* Called with rtnl_lock */
6267static int
6268bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
6269{
14ab9b86 6270 struct mii_ioctl_data *data = if_mii(ifr);
972ec0d4 6271 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6272 int err;
6273
6274 switch(cmd) {
6275 case SIOCGMIIPHY:
6276 data->phy_id = bp->phy_addr;
6277
6278 /* fallthru */
6279 case SIOCGMIIREG: {
6280 u32 mii_regval;
6281
7b6b8347
MC
6282 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
6283 return -EOPNOTSUPP;
6284
dad3e452
MC
6285 if (!netif_running(dev))
6286 return -EAGAIN;
6287
c770a65c 6288 spin_lock_bh(&bp->phy_lock);
b6016b76 6289 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
c770a65c 6290 spin_unlock_bh(&bp->phy_lock);
b6016b76
MC
6291
6292 data->val_out = mii_regval;
6293
6294 return err;
6295 }
6296
6297 case SIOCSMIIREG:
6298 if (!capable(CAP_NET_ADMIN))
6299 return -EPERM;
6300
7b6b8347
MC
6301 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
6302 return -EOPNOTSUPP;
6303
dad3e452
MC
6304 if (!netif_running(dev))
6305 return -EAGAIN;
6306
c770a65c 6307 spin_lock_bh(&bp->phy_lock);
b6016b76 6308 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
c770a65c 6309 spin_unlock_bh(&bp->phy_lock);
b6016b76
MC
6310
6311 return err;
6312
6313 default:
6314 /* do nothing */
6315 break;
6316 }
6317 return -EOPNOTSUPP;
6318}
6319
6320/* Called with rtnl_lock */
6321static int
6322bnx2_change_mac_addr(struct net_device *dev, void *p)
6323{
6324 struct sockaddr *addr = p;
972ec0d4 6325 struct bnx2 *bp = netdev_priv(dev);
b6016b76 6326
73eef4cd
MC
6327 if (!is_valid_ether_addr(addr->sa_data))
6328 return -EINVAL;
6329
b6016b76
MC
6330 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
6331 if (netif_running(dev))
6332 bnx2_set_mac_addr(bp);
6333
6334 return 0;
6335}
6336
6337/* Called with rtnl_lock */
6338static int
6339bnx2_change_mtu(struct net_device *dev, int new_mtu)
6340{
972ec0d4 6341 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6342
6343 if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
6344 ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
6345 return -EINVAL;
6346
6347 dev->mtu = new_mtu;
6348 if (netif_running(dev)) {
6349 bnx2_netif_stop(bp);
6350
6351 bnx2_init_nic(bp);
6352
6353 bnx2_netif_start(bp);
6354 }
6355 return 0;
6356}
6357
6358#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
6359static void
6360poll_bnx2(struct net_device *dev)
6361{
972ec0d4 6362 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6363
6364 disable_irq(bp->pdev->irq);
7d12e780 6365 bnx2_interrupt(bp->pdev->irq, dev);
b6016b76
MC
6366 enable_irq(bp->pdev->irq);
6367}
6368#endif
6369
253c8b75
MC
6370static void __devinit
6371bnx2_get_5709_media(struct bnx2 *bp)
6372{
6373 u32 val = REG_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
6374 u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
6375 u32 strap;
6376
6377 if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
6378 return;
6379 else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
6380 bp->phy_flags |= PHY_SERDES_FLAG;
6381 return;
6382 }
6383
6384 if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
6385 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
6386 else
6387 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
6388
6389 if (PCI_FUNC(bp->pdev->devfn) == 0) {
6390 switch (strap) {
6391 case 0x4:
6392 case 0x5:
6393 case 0x6:
6394 bp->phy_flags |= PHY_SERDES_FLAG;
6395 return;
6396 }
6397 } else {
6398 switch (strap) {
6399 case 0x1:
6400 case 0x2:
6401 case 0x4:
6402 bp->phy_flags |= PHY_SERDES_FLAG;
6403 return;
6404 }
6405 }
6406}
6407
883e5151
MC
6408static void __devinit
6409bnx2_get_pci_speed(struct bnx2 *bp)
6410{
6411 u32 reg;
6412
6413 reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
6414 if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
6415 u32 clkreg;
6416
6417 bp->flags |= PCIX_FLAG;
6418
6419 clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
6420
6421 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
6422 switch (clkreg) {
6423 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
6424 bp->bus_speed_mhz = 133;
6425 break;
6426
6427 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
6428 bp->bus_speed_mhz = 100;
6429 break;
6430
6431 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
6432 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
6433 bp->bus_speed_mhz = 66;
6434 break;
6435
6436 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
6437 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
6438 bp->bus_speed_mhz = 50;
6439 break;
6440
6441 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
6442 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
6443 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
6444 bp->bus_speed_mhz = 33;
6445 break;
6446 }
6447 }
6448 else {
6449 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
6450 bp->bus_speed_mhz = 66;
6451 else
6452 bp->bus_speed_mhz = 33;
6453 }
6454
6455 if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
6456 bp->flags |= PCI_32BIT_FLAG;
6457
6458}
6459
b6016b76
MC
6460static int __devinit
6461bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
6462{
6463 struct bnx2 *bp;
6464 unsigned long mem_len;
6465 int rc;
6466 u32 reg;
40453c83 6467 u64 dma_mask, persist_dma_mask;
b6016b76
MC
6468
6469 SET_MODULE_OWNER(dev);
6470 SET_NETDEV_DEV(dev, &pdev->dev);
972ec0d4 6471 bp = netdev_priv(dev);
b6016b76
MC
6472
6473 bp->flags = 0;
6474 bp->phy_flags = 0;
6475
6476 /* enable device (incl. PCI PM wakeup), and bus-mastering */
6477 rc = pci_enable_device(pdev);
6478 if (rc) {
9b91cf9d 6479 dev_err(&pdev->dev, "Cannot enable PCI device, aborting.");
b6016b76
MC
6480 goto err_out;
6481 }
6482
6483 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
9b91cf9d 6484 dev_err(&pdev->dev,
2e8a538d 6485 "Cannot find PCI device base address, aborting.\n");
b6016b76
MC
6486 rc = -ENODEV;
6487 goto err_out_disable;
6488 }
6489
6490 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
6491 if (rc) {
9b91cf9d 6492 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting.\n");
b6016b76
MC
6493 goto err_out_disable;
6494 }
6495
6496 pci_set_master(pdev);
6497
6498 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
6499 if (bp->pm_cap == 0) {
9b91cf9d 6500 dev_err(&pdev->dev,
2e8a538d 6501 "Cannot find power management capability, aborting.\n");
b6016b76
MC
6502 rc = -EIO;
6503 goto err_out_release;
6504 }
6505
b6016b76
MC
6506 bp->dev = dev;
6507 bp->pdev = pdev;
6508
6509 spin_lock_init(&bp->phy_lock);
1b8227c4 6510 spin_lock_init(&bp->indirect_lock);
c4028958 6511 INIT_WORK(&bp->reset_task, bnx2_reset_task);
b6016b76
MC
6512
6513 dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
59b47d8a 6514 mem_len = MB_GET_CID_ADDR(TX_TSS_CID + 1);
b6016b76
MC
6515 dev->mem_end = dev->mem_start + mem_len;
6516 dev->irq = pdev->irq;
6517
6518 bp->regview = ioremap_nocache(dev->base_addr, mem_len);
6519
6520 if (!bp->regview) {
9b91cf9d 6521 dev_err(&pdev->dev, "Cannot map register space, aborting.\n");
b6016b76
MC
6522 rc = -ENOMEM;
6523 goto err_out_release;
6524 }
6525
6526 /* Configure byte swap and enable write to the reg_window registers.
6527 * Rely on CPU to do target byte swapping on big endian systems
6528 * The chip's target access swapping will not swap all accesses
6529 */
6530 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG,
6531 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
6532 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
6533
829ca9a3 6534 bnx2_set_power_state(bp, PCI_D0);
b6016b76
MC
6535
6536 bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
6537
883e5151
MC
6538 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
6539 if (pci_find_capability(pdev, PCI_CAP_ID_EXP) == 0) {
6540 dev_err(&pdev->dev,
6541 "Cannot find PCIE capability, aborting.\n");
6542 rc = -EIO;
6543 goto err_out_unmap;
6544 }
6545 bp->flags |= PCIE_FLAG;
6546 } else {
59b47d8a
MC
6547 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
6548 if (bp->pcix_cap == 0) {
6549 dev_err(&pdev->dev,
6550 "Cannot find PCIX capability, aborting.\n");
6551 rc = -EIO;
6552 goto err_out_unmap;
6553 }
6554 }
6555
8e6a72c4
MC
6556 if (CHIP_ID(bp) != CHIP_ID_5706_A0 && CHIP_ID(bp) != CHIP_ID_5706_A1) {
6557 if (pci_find_capability(pdev, PCI_CAP_ID_MSI))
6558 bp->flags |= MSI_CAP_FLAG;
6559 }
6560
40453c83
MC
6561 /* 5708 cannot support DMA addresses > 40-bit. */
6562 if (CHIP_NUM(bp) == CHIP_NUM_5708)
6563 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
6564 else
6565 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
6566
6567 /* Configure DMA attributes. */
6568 if (pci_set_dma_mask(pdev, dma_mask) == 0) {
6569 dev->features |= NETIF_F_HIGHDMA;
6570 rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask);
6571 if (rc) {
6572 dev_err(&pdev->dev,
6573 "pci_set_consistent_dma_mask failed, aborting.\n");
6574 goto err_out_unmap;
6575 }
6576 } else if ((rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) != 0) {
6577 dev_err(&pdev->dev, "System does not support DMA, aborting.\n");
6578 goto err_out_unmap;
6579 }
6580
883e5151
MC
6581 if (!(bp->flags & PCIE_FLAG))
6582 bnx2_get_pci_speed(bp);
b6016b76
MC
6583
6584 /* 5706A0 may falsely detect SERR and PERR. */
6585 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
6586 reg = REG_RD(bp, PCI_COMMAND);
6587 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
6588 REG_WR(bp, PCI_COMMAND, reg);
6589 }
6590 else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
6591 !(bp->flags & PCIX_FLAG)) {
6592
9b91cf9d 6593 dev_err(&pdev->dev,
2e8a538d 6594 "5706 A1 can only be used in a PCIX bus, aborting.\n");
b6016b76
MC
6595 goto err_out_unmap;
6596 }
6597
6598 bnx2_init_nvram(bp);
6599
e3648b3d
MC
6600 reg = REG_RD_IND(bp, BNX2_SHM_HDR_SIGNATURE);
6601
6602 if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
24cb230b
MC
6603 BNX2_SHM_HDR_SIGNATURE_SIG) {
6604 u32 off = PCI_FUNC(pdev->devfn) << 2;
6605
6606 bp->shmem_base = REG_RD_IND(bp, BNX2_SHM_HDR_ADDR_0 + off);
6607 } else
e3648b3d
MC
6608 bp->shmem_base = HOST_VIEW_SHMEM_BASE;
6609
b6016b76
MC
6610 /* Get the permanent MAC address. First we need to make sure the
6611 * firmware is actually running.
6612 */
e3648b3d 6613 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_SIGNATURE);
b6016b76
MC
6614
6615 if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
6616 BNX2_DEV_INFO_SIGNATURE_MAGIC) {
9b91cf9d 6617 dev_err(&pdev->dev, "Firmware not running, aborting.\n");
b6016b76
MC
6618 rc = -ENODEV;
6619 goto err_out_unmap;
6620 }
6621
e3648b3d 6622 bp->fw_ver = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_BC_REV);
b6016b76 6623
e3648b3d 6624 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_UPPER);
b6016b76
MC
6625 bp->mac_addr[0] = (u8) (reg >> 8);
6626 bp->mac_addr[1] = (u8) reg;
6627
e3648b3d 6628 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_LOWER);
b6016b76
MC
6629 bp->mac_addr[2] = (u8) (reg >> 24);
6630 bp->mac_addr[3] = (u8) (reg >> 16);
6631 bp->mac_addr[4] = (u8) (reg >> 8);
6632 bp->mac_addr[5] = (u8) reg;
6633
6634 bp->tx_ring_size = MAX_TX_DESC_CNT;
932f3772 6635 bnx2_set_rx_ring_size(bp, 255);
b6016b76
MC
6636
6637 bp->rx_csum = 1;
6638
6639 bp->rx_offset = sizeof(struct l2_fhdr) + 2;
6640
6641 bp->tx_quick_cons_trip_int = 20;
6642 bp->tx_quick_cons_trip = 20;
6643 bp->tx_ticks_int = 80;
6644 bp->tx_ticks = 80;
6aa20a22 6645
b6016b76
MC
6646 bp->rx_quick_cons_trip_int = 6;
6647 bp->rx_quick_cons_trip = 6;
6648 bp->rx_ticks_int = 18;
6649 bp->rx_ticks = 18;
6650
6651 bp->stats_ticks = 1000000 & 0xffff00;
6652
6653 bp->timer_interval = HZ;
cd339a0e 6654 bp->current_interval = HZ;
b6016b76 6655
5b0c76ad
MC
6656 bp->phy_addr = 1;
6657
b6016b76 6658 /* Disable WOL support if we are running on a SERDES chip. */
253c8b75
MC
6659 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6660 bnx2_get_5709_media(bp);
6661 else if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT)
b6016b76 6662 bp->phy_flags |= PHY_SERDES_FLAG;
bac0dff6 6663
0d8a6571 6664 bp->phy_port = PORT_TP;
bac0dff6 6665 if (bp->phy_flags & PHY_SERDES_FLAG) {
0d8a6571 6666 bp->phy_port = PORT_FIBRE;
b6016b76 6667 bp->flags |= NO_WOL_FLAG;
bac0dff6 6668 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
5b0c76ad 6669 bp->phy_addr = 2;
e3648b3d 6670 reg = REG_RD_IND(bp, bp->shmem_base +
5b0c76ad
MC
6671 BNX2_SHARED_HW_CFG_CONFIG);
6672 if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
6673 bp->phy_flags |= PHY_2_5G_CAPABLE_FLAG;
6674 }
0d8a6571
MC
6675 bnx2_init_remote_phy(bp);
6676
261dd5ca
MC
6677 } else if (CHIP_NUM(bp) == CHIP_NUM_5706 ||
6678 CHIP_NUM(bp) == CHIP_NUM_5708)
6679 bp->phy_flags |= PHY_CRC_FIX_FLAG;
b659f44e
MC
6680 else if (CHIP_ID(bp) == CHIP_ID_5709_A0)
6681 bp->phy_flags |= PHY_DIS_EARLY_DAC_FLAG;
b6016b76 6682
16088272
MC
6683 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
6684 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
6685 (CHIP_ID(bp) == CHIP_ID_5708_B1))
dda1e390
MC
6686 bp->flags |= NO_WOL_FLAG;
6687
b6016b76
MC
6688 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
6689 bp->tx_quick_cons_trip_int =
6690 bp->tx_quick_cons_trip;
6691 bp->tx_ticks_int = bp->tx_ticks;
6692 bp->rx_quick_cons_trip_int =
6693 bp->rx_quick_cons_trip;
6694 bp->rx_ticks_int = bp->rx_ticks;
6695 bp->comp_prod_trip_int = bp->comp_prod_trip;
6696 bp->com_ticks_int = bp->com_ticks;
6697 bp->cmd_ticks_int = bp->cmd_ticks;
6698 }
6699
f9317a40
MC
6700 /* Disable MSI on 5706 if AMD 8132 bridge is found.
6701 *
6702 * MSI is defined to be 32-bit write. The 5706 does 64-bit MSI writes
6703 * with byte enables disabled on the unused 32-bit word. This is legal
6704 * but causes problems on the AMD 8132 which will eventually stop
6705 * responding after a while.
6706 *
6707 * AMD believes this incompatibility is unique to the 5706, and
88187dfa 6708 * prefers to locally disable MSI rather than globally disabling it.
f9317a40
MC
6709 */
6710 if (CHIP_NUM(bp) == CHIP_NUM_5706 && disable_msi == 0) {
6711 struct pci_dev *amd_8132 = NULL;
6712
6713 while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
6714 PCI_DEVICE_ID_AMD_8132_BRIDGE,
6715 amd_8132))) {
6716 u8 rev;
6717
6718 pci_read_config_byte(amd_8132, PCI_REVISION_ID, &rev);
6719 if (rev >= 0x10 && rev <= 0x13) {
6720 disable_msi = 1;
6721 pci_dev_put(amd_8132);
6722 break;
6723 }
6724 }
6725 }
6726
deaf391b 6727 bnx2_set_default_link(bp);
b6016b76
MC
6728 bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
6729
cd339a0e
MC
6730 init_timer(&bp->timer);
6731 bp->timer.expires = RUN_AT(bp->timer_interval);
6732 bp->timer.data = (unsigned long) bp;
6733 bp->timer.function = bnx2_timer;
6734
b6016b76
MC
6735 return 0;
6736
6737err_out_unmap:
6738 if (bp->regview) {
6739 iounmap(bp->regview);
73eef4cd 6740 bp->regview = NULL;
b6016b76
MC
6741 }
6742
6743err_out_release:
6744 pci_release_regions(pdev);
6745
6746err_out_disable:
6747 pci_disable_device(pdev);
6748 pci_set_drvdata(pdev, NULL);
6749
6750err_out:
6751 return rc;
6752}
6753
883e5151
MC
6754static char * __devinit
6755bnx2_bus_string(struct bnx2 *bp, char *str)
6756{
6757 char *s = str;
6758
6759 if (bp->flags & PCIE_FLAG) {
6760 s += sprintf(s, "PCI Express");
6761 } else {
6762 s += sprintf(s, "PCI");
6763 if (bp->flags & PCIX_FLAG)
6764 s += sprintf(s, "-X");
6765 if (bp->flags & PCI_32BIT_FLAG)
6766 s += sprintf(s, " 32-bit");
6767 else
6768 s += sprintf(s, " 64-bit");
6769 s += sprintf(s, " %dMHz", bp->bus_speed_mhz);
6770 }
6771 return str;
6772}
6773
b6016b76
MC
6774static int __devinit
6775bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
6776{
6777 static int version_printed = 0;
6778 struct net_device *dev = NULL;
6779 struct bnx2 *bp;
6780 int rc, i;
883e5151 6781 char str[40];
b6016b76
MC
6782
6783 if (version_printed++ == 0)
6784 printk(KERN_INFO "%s", version);
6785
6786 /* dev zeroed in init_etherdev */
6787 dev = alloc_etherdev(sizeof(*bp));
6788
6789 if (!dev)
6790 return -ENOMEM;
6791
6792 rc = bnx2_init_board(pdev, dev);
6793 if (rc < 0) {
6794 free_netdev(dev);
6795 return rc;
6796 }
6797
6798 dev->open = bnx2_open;
6799 dev->hard_start_xmit = bnx2_start_xmit;
6800 dev->stop = bnx2_close;
6801 dev->get_stats = bnx2_get_stats;
6802 dev->set_multicast_list = bnx2_set_rx_mode;
6803 dev->do_ioctl = bnx2_ioctl;
6804 dev->set_mac_address = bnx2_change_mac_addr;
6805 dev->change_mtu = bnx2_change_mtu;
6806 dev->tx_timeout = bnx2_tx_timeout;
6807 dev->watchdog_timeo = TX_TIMEOUT;
6808#ifdef BCM_VLAN
6809 dev->vlan_rx_register = bnx2_vlan_rx_register;
b6016b76
MC
6810#endif
6811 dev->poll = bnx2_poll;
6812 dev->ethtool_ops = &bnx2_ethtool_ops;
6813 dev->weight = 64;
6814
972ec0d4 6815 bp = netdev_priv(dev);
b6016b76
MC
6816
6817#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
6818 dev->poll_controller = poll_bnx2;
6819#endif
6820
1b2f922f
MC
6821 pci_set_drvdata(pdev, dev);
6822
6823 memcpy(dev->dev_addr, bp->mac_addr, 6);
6824 memcpy(dev->perm_addr, bp->mac_addr, 6);
6825 bp->name = board_info[ent->driver_data].name;
6826
d212f87b 6827 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
4666f87a 6828 if (CHIP_NUM(bp) == CHIP_NUM_5709)
d212f87b
SH
6829 dev->features |= NETIF_F_IPV6_CSUM;
6830
1b2f922f
MC
6831#ifdef BCM_VLAN
6832 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
6833#endif
6834 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
4666f87a
MC
6835 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6836 dev->features |= NETIF_F_TSO6;
1b2f922f 6837
b6016b76 6838 if ((rc = register_netdev(dev))) {
9b91cf9d 6839 dev_err(&pdev->dev, "Cannot register net device\n");
b6016b76
MC
6840 if (bp->regview)
6841 iounmap(bp->regview);
6842 pci_release_regions(pdev);
6843 pci_disable_device(pdev);
6844 pci_set_drvdata(pdev, NULL);
6845 free_netdev(dev);
6846 return rc;
6847 }
6848
883e5151 6849 printk(KERN_INFO "%s: %s (%c%d) %s found at mem %lx, "
b6016b76
MC
6850 "IRQ %d, ",
6851 dev->name,
6852 bp->name,
6853 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
6854 ((CHIP_ID(bp) & 0x0ff0) >> 4),
883e5151 6855 bnx2_bus_string(bp, str),
b6016b76
MC
6856 dev->base_addr,
6857 bp->pdev->irq);
6858
6859 printk("node addr ");
6860 for (i = 0; i < 6; i++)
6861 printk("%2.2x", dev->dev_addr[i]);
6862 printk("\n");
6863
b6016b76
MC
6864 return 0;
6865}
6866
6867static void __devexit
6868bnx2_remove_one(struct pci_dev *pdev)
6869{
6870 struct net_device *dev = pci_get_drvdata(pdev);
972ec0d4 6871 struct bnx2 *bp = netdev_priv(dev);
b6016b76 6872
afdc08b9
MC
6873 flush_scheduled_work();
6874
b6016b76
MC
6875 unregister_netdev(dev);
6876
6877 if (bp->regview)
6878 iounmap(bp->regview);
6879
6880 free_netdev(dev);
6881 pci_release_regions(pdev);
6882 pci_disable_device(pdev);
6883 pci_set_drvdata(pdev, NULL);
6884}
6885
6886static int
829ca9a3 6887bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
b6016b76
MC
6888{
6889 struct net_device *dev = pci_get_drvdata(pdev);
972ec0d4 6890 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6891 u32 reset_code;
6892
6893 if (!netif_running(dev))
6894 return 0;
6895
1d60290f 6896 flush_scheduled_work();
b6016b76
MC
6897 bnx2_netif_stop(bp);
6898 netif_device_detach(dev);
6899 del_timer_sync(&bp->timer);
dda1e390 6900 if (bp->flags & NO_WOL_FLAG)
6c4f095e 6901 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
dda1e390 6902 else if (bp->wol)
b6016b76
MC
6903 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
6904 else
6905 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
6906 bnx2_reset_chip(bp, reset_code);
6907 bnx2_free_skbs(bp);
30c517b2 6908 pci_save_state(pdev);
829ca9a3 6909 bnx2_set_power_state(bp, pci_choose_state(pdev, state));
b6016b76
MC
6910 return 0;
6911}
6912
6913static int
6914bnx2_resume(struct pci_dev *pdev)
6915{
6916 struct net_device *dev = pci_get_drvdata(pdev);
972ec0d4 6917 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6918
6919 if (!netif_running(dev))
6920 return 0;
6921
30c517b2 6922 pci_restore_state(pdev);
829ca9a3 6923 bnx2_set_power_state(bp, PCI_D0);
b6016b76
MC
6924 netif_device_attach(dev);
6925 bnx2_init_nic(bp);
6926 bnx2_netif_start(bp);
6927 return 0;
6928}
6929
6930static struct pci_driver bnx2_pci_driver = {
14ab9b86
PH
6931 .name = DRV_MODULE_NAME,
6932 .id_table = bnx2_pci_tbl,
6933 .probe = bnx2_init_one,
6934 .remove = __devexit_p(bnx2_remove_one),
6935 .suspend = bnx2_suspend,
6936 .resume = bnx2_resume,
b6016b76
MC
6937};
6938
6939static int __init bnx2_init(void)
6940{
29917620 6941 return pci_register_driver(&bnx2_pci_driver);
b6016b76
MC
6942}
6943
6944static void __exit bnx2_cleanup(void)
6945{
6946 pci_unregister_driver(&bnx2_pci_driver);
6947}
6948
6949module_init(bnx2_init);
6950module_exit(bnx2_cleanup);
6951
6952
6953
This page took 0.69206 seconds and 5 git commands to generate.