1 /* bnx2.c: Broadcom NX2 network driver.
3 * Copyright (c) 2004-2007 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Written by: Michael Chan (mchan@broadcom.com)
13 #include <linux/module.h>
14 #include <linux/moduleparam.h>
16 #include <linux/kernel.h>
17 #include <linux/timer.h>
18 #include <linux/errno.h>
19 #include <linux/ioport.h>
20 #include <linux/slab.h>
21 #include <linux/vmalloc.h>
22 #include <linux/interrupt.h>
23 #include <linux/pci.h>
24 #include <linux/init.h>
25 #include <linux/netdevice.h>
26 #include <linux/etherdevice.h>
27 #include <linux/skbuff.h>
28 #include <linux/dma-mapping.h>
29 #include <asm/bitops.h>
32 #include <linux/delay.h>
33 #include <asm/byteorder.h>
35 #include <linux/time.h>
36 #include <linux/ethtool.h>
37 #include <linux/mii.h>
38 #ifdef NETIF_F_HW_VLAN_TX
39 #include <linux/if_vlan.h>
44 #include <net/checksum.h>
45 #include <linux/workqueue.h>
46 #include <linux/crc32.h>
47 #include <linux/prefetch.h>
48 #include <linux/cache.h>
49 #include <linux/zlib.h>
55 #define DRV_MODULE_NAME "bnx2"
56 #define PFX DRV_MODULE_NAME ": "
57 #define DRV_MODULE_VERSION "1.5.11"
58 #define DRV_MODULE_RELDATE "June 4, 2007"
60 #define RUN_AT(x) (jiffies + (x))
62 /* Time in jiffies before concluding the transmitter is hung. */
63 #define TX_TIMEOUT (5*HZ)
65 static const char version
[] __devinitdata
=
66 "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME
" v" DRV_MODULE_VERSION
" (" DRV_MODULE_RELDATE
")\n";
68 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
69 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708 Driver");
70 MODULE_LICENSE("GPL");
71 MODULE_VERSION(DRV_MODULE_VERSION
);
73 static int disable_msi
= 0;
75 module_param(disable_msi
, int, 0);
76 MODULE_PARM_DESC(disable_msi
, "Disable Message Signaled Interrupt (MSI)");
90 /* indexed by board_t, above */
93 } board_info
[] __devinitdata
= {
94 { "Broadcom NetXtreme II BCM5706 1000Base-T" },
95 { "HP NC370T Multifunction Gigabit Server Adapter" },
96 { "HP NC370i Multifunction Gigabit Server Adapter" },
97 { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
98 { "HP NC370F Multifunction Gigabit Server Adapter" },
99 { "Broadcom NetXtreme II BCM5708 1000Base-T" },
100 { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
101 { "Broadcom NetXtreme II BCM5709 1000Base-T" },
102 { "Broadcom NetXtreme II BCM5709 1000Base-SX" },
105 static struct pci_device_id bnx2_pci_tbl
[] = {
106 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_NX2_5706
,
107 PCI_VENDOR_ID_HP
, 0x3101, 0, 0, NC370T
},
108 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_NX2_5706
,
109 PCI_VENDOR_ID_HP
, 0x3106, 0, 0, NC370I
},
110 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_NX2_5706
,
111 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, BCM5706
},
112 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_NX2_5708
,
113 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, BCM5708
},
114 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_NX2_5706S
,
115 PCI_VENDOR_ID_HP
, 0x3102, 0, 0, NC370F
},
116 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_NX2_5706S
,
117 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, BCM5706S
},
118 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_NX2_5708S
,
119 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, BCM5708S
},
120 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_NX2_5709
,
121 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, BCM5709
},
122 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_NX2_5709S
,
123 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, BCM5709S
},
127 static struct flash_spec flash_table
[] =
130 {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
131 1, SEEPROM_PAGE_BITS
, SEEPROM_PAGE_SIZE
,
132 SEEPROM_BYTE_ADDR_MASK
, SEEPROM_TOTAL_SIZE
,
134 /* Expansion entry 0001 */
135 {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
136 0, SAIFUN_FLASH_PAGE_BITS
, SAIFUN_FLASH_PAGE_SIZE
,
137 SAIFUN_FLASH_BYTE_ADDR_MASK
, 0,
139 /* Saifun SA25F010 (non-buffered flash) */
140 /* strap, cfg1, & write1 need updates */
141 {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
142 0, SAIFUN_FLASH_PAGE_BITS
, SAIFUN_FLASH_PAGE_SIZE
,
143 SAIFUN_FLASH_BYTE_ADDR_MASK
, SAIFUN_FLASH_BASE_TOTAL_SIZE
*2,
144 "Non-buffered flash (128kB)"},
145 /* Saifun SA25F020 (non-buffered flash) */
146 /* strap, cfg1, & write1 need updates */
147 {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
148 0, SAIFUN_FLASH_PAGE_BITS
, SAIFUN_FLASH_PAGE_SIZE
,
149 SAIFUN_FLASH_BYTE_ADDR_MASK
, SAIFUN_FLASH_BASE_TOTAL_SIZE
*4,
150 "Non-buffered flash (256kB)"},
151 /* Expansion entry 0100 */
152 {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
153 0, SAIFUN_FLASH_PAGE_BITS
, SAIFUN_FLASH_PAGE_SIZE
,
154 SAIFUN_FLASH_BYTE_ADDR_MASK
, 0,
156 /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
157 {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
158 0, ST_MICRO_FLASH_PAGE_BITS
, ST_MICRO_FLASH_PAGE_SIZE
,
159 ST_MICRO_FLASH_BYTE_ADDR_MASK
, ST_MICRO_FLASH_BASE_TOTAL_SIZE
*2,
160 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
161 /* Entry 0110: ST M45PE20 (non-buffered flash)*/
162 {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
163 0, ST_MICRO_FLASH_PAGE_BITS
, ST_MICRO_FLASH_PAGE_SIZE
,
164 ST_MICRO_FLASH_BYTE_ADDR_MASK
, ST_MICRO_FLASH_BASE_TOTAL_SIZE
*4,
165 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
166 /* Saifun SA25F005 (non-buffered flash) */
167 /* strap, cfg1, & write1 need updates */
168 {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
169 0, SAIFUN_FLASH_PAGE_BITS
, SAIFUN_FLASH_PAGE_SIZE
,
170 SAIFUN_FLASH_BYTE_ADDR_MASK
, SAIFUN_FLASH_BASE_TOTAL_SIZE
,
171 "Non-buffered flash (64kB)"},
173 {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
174 1, SEEPROM_PAGE_BITS
, SEEPROM_PAGE_SIZE
,
175 SEEPROM_BYTE_ADDR_MASK
, SEEPROM_TOTAL_SIZE
,
177 /* Expansion entry 1001 */
178 {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
179 0, SAIFUN_FLASH_PAGE_BITS
, SAIFUN_FLASH_PAGE_SIZE
,
180 SAIFUN_FLASH_BYTE_ADDR_MASK
, 0,
182 /* Expansion entry 1010 */
183 {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
184 0, SAIFUN_FLASH_PAGE_BITS
, SAIFUN_FLASH_PAGE_SIZE
,
185 SAIFUN_FLASH_BYTE_ADDR_MASK
, 0,
187 /* ATMEL AT45DB011B (buffered flash) */
188 {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
189 1, BUFFERED_FLASH_PAGE_BITS
, BUFFERED_FLASH_PAGE_SIZE
,
190 BUFFERED_FLASH_BYTE_ADDR_MASK
, BUFFERED_FLASH_TOTAL_SIZE
,
191 "Buffered flash (128kB)"},
192 /* Expansion entry 1100 */
193 {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
194 0, SAIFUN_FLASH_PAGE_BITS
, SAIFUN_FLASH_PAGE_SIZE
,
195 SAIFUN_FLASH_BYTE_ADDR_MASK
, 0,
197 /* Expansion entry 1101 */
198 {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
199 0, SAIFUN_FLASH_PAGE_BITS
, SAIFUN_FLASH_PAGE_SIZE
,
200 SAIFUN_FLASH_BYTE_ADDR_MASK
, 0,
202 /* Ateml Expansion entry 1110 */
203 {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
204 1, BUFFERED_FLASH_PAGE_BITS
, BUFFERED_FLASH_PAGE_SIZE
,
205 BUFFERED_FLASH_BYTE_ADDR_MASK
, 0,
206 "Entry 1110 (Atmel)"},
207 /* ATMEL AT45DB021B (buffered flash) */
208 {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
209 1, BUFFERED_FLASH_PAGE_BITS
, BUFFERED_FLASH_PAGE_SIZE
,
210 BUFFERED_FLASH_BYTE_ADDR_MASK
, BUFFERED_FLASH_TOTAL_SIZE
*2,
211 "Buffered flash (256kB)"},
214 MODULE_DEVICE_TABLE(pci
, bnx2_pci_tbl
);
216 static inline u32
bnx2_tx_avail(struct bnx2
*bp
)
222 /* The ring uses 256 indices for 255 entries, one of them
223 * needs to be skipped.
225 diff
= bp
->tx_prod
- bp
->tx_cons
;
226 if (unlikely(diff
>= TX_DESC_CNT
)) {
228 if (diff
== TX_DESC_CNT
)
229 diff
= MAX_TX_DESC_CNT
;
231 return (bp
->tx_ring_size
- diff
);
235 bnx2_reg_rd_ind(struct bnx2
*bp
, u32 offset
)
239 spin_lock_bh(&bp
->indirect_lock
);
240 REG_WR(bp
, BNX2_PCICFG_REG_WINDOW_ADDRESS
, offset
);
241 val
= REG_RD(bp
, BNX2_PCICFG_REG_WINDOW
);
242 spin_unlock_bh(&bp
->indirect_lock
);
247 bnx2_reg_wr_ind(struct bnx2
*bp
, u32 offset
, u32 val
)
249 spin_lock_bh(&bp
->indirect_lock
);
250 REG_WR(bp
, BNX2_PCICFG_REG_WINDOW_ADDRESS
, offset
);
251 REG_WR(bp
, BNX2_PCICFG_REG_WINDOW
, val
);
252 spin_unlock_bh(&bp
->indirect_lock
);
256 bnx2_ctx_wr(struct bnx2
*bp
, u32 cid_addr
, u32 offset
, u32 val
)
259 spin_lock_bh(&bp
->indirect_lock
);
260 if (CHIP_NUM(bp
) == CHIP_NUM_5709
) {
263 REG_WR(bp
, BNX2_CTX_CTX_DATA
, val
);
264 REG_WR(bp
, BNX2_CTX_CTX_CTRL
,
265 offset
| BNX2_CTX_CTX_CTRL_WRITE_REQ
);
266 for (i
= 0; i
< 5; i
++) {
268 val
= REG_RD(bp
, BNX2_CTX_CTX_CTRL
);
269 if ((val
& BNX2_CTX_CTX_CTRL_WRITE_REQ
) == 0)
274 REG_WR(bp
, BNX2_CTX_DATA_ADR
, offset
);
275 REG_WR(bp
, BNX2_CTX_DATA
, val
);
277 spin_unlock_bh(&bp
->indirect_lock
);
281 bnx2_read_phy(struct bnx2
*bp
, u32 reg
, u32
*val
)
286 if (bp
->phy_flags
& PHY_INT_MODE_AUTO_POLLING_FLAG
) {
287 val1
= REG_RD(bp
, BNX2_EMAC_MDIO_MODE
);
288 val1
&= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL
;
290 REG_WR(bp
, BNX2_EMAC_MDIO_MODE
, val1
);
291 REG_RD(bp
, BNX2_EMAC_MDIO_MODE
);
296 val1
= (bp
->phy_addr
<< 21) | (reg
<< 16) |
297 BNX2_EMAC_MDIO_COMM_COMMAND_READ
| BNX2_EMAC_MDIO_COMM_DISEXT
|
298 BNX2_EMAC_MDIO_COMM_START_BUSY
;
299 REG_WR(bp
, BNX2_EMAC_MDIO_COMM
, val1
);
301 for (i
= 0; i
< 50; i
++) {
304 val1
= REG_RD(bp
, BNX2_EMAC_MDIO_COMM
);
305 if (!(val1
& BNX2_EMAC_MDIO_COMM_START_BUSY
)) {
308 val1
= REG_RD(bp
, BNX2_EMAC_MDIO_COMM
);
309 val1
&= BNX2_EMAC_MDIO_COMM_DATA
;
315 if (val1
& BNX2_EMAC_MDIO_COMM_START_BUSY
) {
324 if (bp
->phy_flags
& PHY_INT_MODE_AUTO_POLLING_FLAG
) {
325 val1
= REG_RD(bp
, BNX2_EMAC_MDIO_MODE
);
326 val1
|= BNX2_EMAC_MDIO_MODE_AUTO_POLL
;
328 REG_WR(bp
, BNX2_EMAC_MDIO_MODE
, val1
);
329 REG_RD(bp
, BNX2_EMAC_MDIO_MODE
);
338 bnx2_write_phy(struct bnx2
*bp
, u32 reg
, u32 val
)
343 if (bp
->phy_flags
& PHY_INT_MODE_AUTO_POLLING_FLAG
) {
344 val1
= REG_RD(bp
, BNX2_EMAC_MDIO_MODE
);
345 val1
&= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL
;
347 REG_WR(bp
, BNX2_EMAC_MDIO_MODE
, val1
);
348 REG_RD(bp
, BNX2_EMAC_MDIO_MODE
);
353 val1
= (bp
->phy_addr
<< 21) | (reg
<< 16) | val
|
354 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE
|
355 BNX2_EMAC_MDIO_COMM_START_BUSY
| BNX2_EMAC_MDIO_COMM_DISEXT
;
356 REG_WR(bp
, BNX2_EMAC_MDIO_COMM
, val1
);
358 for (i
= 0; i
< 50; i
++) {
361 val1
= REG_RD(bp
, BNX2_EMAC_MDIO_COMM
);
362 if (!(val1
& BNX2_EMAC_MDIO_COMM_START_BUSY
)) {
368 if (val1
& BNX2_EMAC_MDIO_COMM_START_BUSY
)
373 if (bp
->phy_flags
& PHY_INT_MODE_AUTO_POLLING_FLAG
) {
374 val1
= REG_RD(bp
, BNX2_EMAC_MDIO_MODE
);
375 val1
|= BNX2_EMAC_MDIO_MODE_AUTO_POLL
;
377 REG_WR(bp
, BNX2_EMAC_MDIO_MODE
, val1
);
378 REG_RD(bp
, BNX2_EMAC_MDIO_MODE
);
387 bnx2_disable_int(struct bnx2
*bp
)
389 REG_WR(bp
, BNX2_PCICFG_INT_ACK_CMD
,
390 BNX2_PCICFG_INT_ACK_CMD_MASK_INT
);
391 REG_RD(bp
, BNX2_PCICFG_INT_ACK_CMD
);
395 bnx2_enable_int(struct bnx2
*bp
)
397 REG_WR(bp
, BNX2_PCICFG_INT_ACK_CMD
,
398 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID
|
399 BNX2_PCICFG_INT_ACK_CMD_MASK_INT
| bp
->last_status_idx
);
401 REG_WR(bp
, BNX2_PCICFG_INT_ACK_CMD
,
402 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID
| bp
->last_status_idx
);
404 REG_WR(bp
, BNX2_HC_COMMAND
, bp
->hc_cmd
| BNX2_HC_COMMAND_COAL_NOW
);
408 bnx2_disable_int_sync(struct bnx2
*bp
)
410 atomic_inc(&bp
->intr_sem
);
411 bnx2_disable_int(bp
);
412 synchronize_irq(bp
->pdev
->irq
);
416 bnx2_netif_stop(struct bnx2
*bp
)
418 bnx2_disable_int_sync(bp
);
419 if (netif_running(bp
->dev
)) {
420 netif_poll_disable(bp
->dev
);
421 netif_tx_disable(bp
->dev
);
422 bp
->dev
->trans_start
= jiffies
; /* prevent tx timeout */
427 bnx2_netif_start(struct bnx2
*bp
)
429 if (atomic_dec_and_test(&bp
->intr_sem
)) {
430 if (netif_running(bp
->dev
)) {
431 netif_wake_queue(bp
->dev
);
432 netif_poll_enable(bp
->dev
);
439 bnx2_free_mem(struct bnx2
*bp
)
443 for (i
= 0; i
< bp
->ctx_pages
; i
++) {
444 if (bp
->ctx_blk
[i
]) {
445 pci_free_consistent(bp
->pdev
, BCM_PAGE_SIZE
,
447 bp
->ctx_blk_mapping
[i
]);
448 bp
->ctx_blk
[i
] = NULL
;
451 if (bp
->status_blk
) {
452 pci_free_consistent(bp
->pdev
, bp
->status_stats_size
,
453 bp
->status_blk
, bp
->status_blk_mapping
);
454 bp
->status_blk
= NULL
;
455 bp
->stats_blk
= NULL
;
457 if (bp
->tx_desc_ring
) {
458 pci_free_consistent(bp
->pdev
,
459 sizeof(struct tx_bd
) * TX_DESC_CNT
,
460 bp
->tx_desc_ring
, bp
->tx_desc_mapping
);
461 bp
->tx_desc_ring
= NULL
;
463 kfree(bp
->tx_buf_ring
);
464 bp
->tx_buf_ring
= NULL
;
465 for (i
= 0; i
< bp
->rx_max_ring
; i
++) {
466 if (bp
->rx_desc_ring
[i
])
467 pci_free_consistent(bp
->pdev
,
468 sizeof(struct rx_bd
) * RX_DESC_CNT
,
470 bp
->rx_desc_mapping
[i
]);
471 bp
->rx_desc_ring
[i
] = NULL
;
473 vfree(bp
->rx_buf_ring
);
474 bp
->rx_buf_ring
= NULL
;
478 bnx2_alloc_mem(struct bnx2
*bp
)
480 int i
, status_blk_size
;
482 bp
->tx_buf_ring
= kzalloc(sizeof(struct sw_bd
) * TX_DESC_CNT
,
484 if (bp
->tx_buf_ring
== NULL
)
487 bp
->tx_desc_ring
= pci_alloc_consistent(bp
->pdev
,
488 sizeof(struct tx_bd
) *
490 &bp
->tx_desc_mapping
);
491 if (bp
->tx_desc_ring
== NULL
)
494 bp
->rx_buf_ring
= vmalloc(sizeof(struct sw_bd
) * RX_DESC_CNT
*
496 if (bp
->rx_buf_ring
== NULL
)
499 memset(bp
->rx_buf_ring
, 0, sizeof(struct sw_bd
) * RX_DESC_CNT
*
502 for (i
= 0; i
< bp
->rx_max_ring
; i
++) {
503 bp
->rx_desc_ring
[i
] =
504 pci_alloc_consistent(bp
->pdev
,
505 sizeof(struct rx_bd
) * RX_DESC_CNT
,
506 &bp
->rx_desc_mapping
[i
]);
507 if (bp
->rx_desc_ring
[i
] == NULL
)
512 /* Combine status and statistics blocks into one allocation. */
513 status_blk_size
= L1_CACHE_ALIGN(sizeof(struct status_block
));
514 bp
->status_stats_size
= status_blk_size
+
515 sizeof(struct statistics_block
);
517 bp
->status_blk
= pci_alloc_consistent(bp
->pdev
, bp
->status_stats_size
,
518 &bp
->status_blk_mapping
);
519 if (bp
->status_blk
== NULL
)
522 memset(bp
->status_blk
, 0, bp
->status_stats_size
);
524 bp
->stats_blk
= (void *) ((unsigned long) bp
->status_blk
+
527 bp
->stats_blk_mapping
= bp
->status_blk_mapping
+ status_blk_size
;
529 if (CHIP_NUM(bp
) == CHIP_NUM_5709
) {
530 bp
->ctx_pages
= 0x2000 / BCM_PAGE_SIZE
;
531 if (bp
->ctx_pages
== 0)
533 for (i
= 0; i
< bp
->ctx_pages
; i
++) {
534 bp
->ctx_blk
[i
] = pci_alloc_consistent(bp
->pdev
,
536 &bp
->ctx_blk_mapping
[i
]);
537 if (bp
->ctx_blk
[i
] == NULL
)
549 bnx2_report_fw_link(struct bnx2
*bp
)
551 u32 fw_link_status
= 0;
556 switch (bp
->line_speed
) {
558 if (bp
->duplex
== DUPLEX_HALF
)
559 fw_link_status
= BNX2_LINK_STATUS_10HALF
;
561 fw_link_status
= BNX2_LINK_STATUS_10FULL
;
564 if (bp
->duplex
== DUPLEX_HALF
)
565 fw_link_status
= BNX2_LINK_STATUS_100HALF
;
567 fw_link_status
= BNX2_LINK_STATUS_100FULL
;
570 if (bp
->duplex
== DUPLEX_HALF
)
571 fw_link_status
= BNX2_LINK_STATUS_1000HALF
;
573 fw_link_status
= BNX2_LINK_STATUS_1000FULL
;
576 if (bp
->duplex
== DUPLEX_HALF
)
577 fw_link_status
= BNX2_LINK_STATUS_2500HALF
;
579 fw_link_status
= BNX2_LINK_STATUS_2500FULL
;
583 fw_link_status
|= BNX2_LINK_STATUS_LINK_UP
;
586 fw_link_status
|= BNX2_LINK_STATUS_AN_ENABLED
;
588 bnx2_read_phy(bp
, bp
->mii_bmsr
, &bmsr
);
589 bnx2_read_phy(bp
, bp
->mii_bmsr
, &bmsr
);
591 if (!(bmsr
& BMSR_ANEGCOMPLETE
) ||
592 bp
->phy_flags
& PHY_PARALLEL_DETECT_FLAG
)
593 fw_link_status
|= BNX2_LINK_STATUS_PARALLEL_DET
;
595 fw_link_status
|= BNX2_LINK_STATUS_AN_COMPLETE
;
599 fw_link_status
= BNX2_LINK_STATUS_LINK_DOWN
;
601 REG_WR_IND(bp
, bp
->shmem_base
+ BNX2_LINK_STATUS
, fw_link_status
);
605 bnx2_report_link(struct bnx2
*bp
)
608 netif_carrier_on(bp
->dev
);
609 printk(KERN_INFO PFX
"%s NIC Link is Up, ", bp
->dev
->name
);
611 printk("%d Mbps ", bp
->line_speed
);
613 if (bp
->duplex
== DUPLEX_FULL
)
614 printk("full duplex");
616 printk("half duplex");
619 if (bp
->flow_ctrl
& FLOW_CTRL_RX
) {
620 printk(", receive ");
621 if (bp
->flow_ctrl
& FLOW_CTRL_TX
)
622 printk("& transmit ");
625 printk(", transmit ");
627 printk("flow control ON");
632 netif_carrier_off(bp
->dev
);
633 printk(KERN_ERR PFX
"%s NIC Link is Down\n", bp
->dev
->name
);
636 bnx2_report_fw_link(bp
);
640 bnx2_resolve_flow_ctrl(struct bnx2
*bp
)
642 u32 local_adv
, remote_adv
;
645 if ((bp
->autoneg
& (AUTONEG_SPEED
| AUTONEG_FLOW_CTRL
)) !=
646 (AUTONEG_SPEED
| AUTONEG_FLOW_CTRL
)) {
648 if (bp
->duplex
== DUPLEX_FULL
) {
649 bp
->flow_ctrl
= bp
->req_flow_ctrl
;
654 if (bp
->duplex
!= DUPLEX_FULL
) {
658 if ((bp
->phy_flags
& PHY_SERDES_FLAG
) &&
659 (CHIP_NUM(bp
) == CHIP_NUM_5708
)) {
662 bnx2_read_phy(bp
, BCM5708S_1000X_STAT1
, &val
);
663 if (val
& BCM5708S_1000X_STAT1_TX_PAUSE
)
664 bp
->flow_ctrl
|= FLOW_CTRL_TX
;
665 if (val
& BCM5708S_1000X_STAT1_RX_PAUSE
)
666 bp
->flow_ctrl
|= FLOW_CTRL_RX
;
670 bnx2_read_phy(bp
, bp
->mii_adv
, &local_adv
);
671 bnx2_read_phy(bp
, bp
->mii_lpa
, &remote_adv
);
673 if (bp
->phy_flags
& PHY_SERDES_FLAG
) {
674 u32 new_local_adv
= 0;
675 u32 new_remote_adv
= 0;
677 if (local_adv
& ADVERTISE_1000XPAUSE
)
678 new_local_adv
|= ADVERTISE_PAUSE_CAP
;
679 if (local_adv
& ADVERTISE_1000XPSE_ASYM
)
680 new_local_adv
|= ADVERTISE_PAUSE_ASYM
;
681 if (remote_adv
& ADVERTISE_1000XPAUSE
)
682 new_remote_adv
|= ADVERTISE_PAUSE_CAP
;
683 if (remote_adv
& ADVERTISE_1000XPSE_ASYM
)
684 new_remote_adv
|= ADVERTISE_PAUSE_ASYM
;
686 local_adv
= new_local_adv
;
687 remote_adv
= new_remote_adv
;
690 /* See Table 28B-3 of 802.3ab-1999 spec. */
691 if (local_adv
& ADVERTISE_PAUSE_CAP
) {
692 if(local_adv
& ADVERTISE_PAUSE_ASYM
) {
693 if (remote_adv
& ADVERTISE_PAUSE_CAP
) {
694 bp
->flow_ctrl
= FLOW_CTRL_TX
| FLOW_CTRL_RX
;
696 else if (remote_adv
& ADVERTISE_PAUSE_ASYM
) {
697 bp
->flow_ctrl
= FLOW_CTRL_RX
;
701 if (remote_adv
& ADVERTISE_PAUSE_CAP
) {
702 bp
->flow_ctrl
= FLOW_CTRL_TX
| FLOW_CTRL_RX
;
706 else if (local_adv
& ADVERTISE_PAUSE_ASYM
) {
707 if ((remote_adv
& ADVERTISE_PAUSE_CAP
) &&
708 (remote_adv
& ADVERTISE_PAUSE_ASYM
)) {
710 bp
->flow_ctrl
= FLOW_CTRL_TX
;
716 bnx2_5709s_linkup(struct bnx2
*bp
)
722 bnx2_write_phy(bp
, MII_BNX2_BLK_ADDR
, MII_BNX2_BLK_ADDR_GP_STATUS
);
723 bnx2_read_phy(bp
, MII_BNX2_GP_TOP_AN_STATUS1
, &val
);
724 bnx2_write_phy(bp
, MII_BNX2_BLK_ADDR
, MII_BNX2_BLK_ADDR_COMBO_IEEEB0
);
726 if ((bp
->autoneg
& AUTONEG_SPEED
) == 0) {
727 bp
->line_speed
= bp
->req_line_speed
;
728 bp
->duplex
= bp
->req_duplex
;
731 speed
= val
& MII_BNX2_GP_TOP_AN_SPEED_MSK
;
733 case MII_BNX2_GP_TOP_AN_SPEED_10
:
734 bp
->line_speed
= SPEED_10
;
736 case MII_BNX2_GP_TOP_AN_SPEED_100
:
737 bp
->line_speed
= SPEED_100
;
739 case MII_BNX2_GP_TOP_AN_SPEED_1G
:
740 case MII_BNX2_GP_TOP_AN_SPEED_1GKV
:
741 bp
->line_speed
= SPEED_1000
;
743 case MII_BNX2_GP_TOP_AN_SPEED_2_5G
:
744 bp
->line_speed
= SPEED_2500
;
747 if (val
& MII_BNX2_GP_TOP_AN_FD
)
748 bp
->duplex
= DUPLEX_FULL
;
750 bp
->duplex
= DUPLEX_HALF
;
755 bnx2_5708s_linkup(struct bnx2
*bp
)
760 bnx2_read_phy(bp
, BCM5708S_1000X_STAT1
, &val
);
761 switch (val
& BCM5708S_1000X_STAT1_SPEED_MASK
) {
762 case BCM5708S_1000X_STAT1_SPEED_10
:
763 bp
->line_speed
= SPEED_10
;
765 case BCM5708S_1000X_STAT1_SPEED_100
:
766 bp
->line_speed
= SPEED_100
;
768 case BCM5708S_1000X_STAT1_SPEED_1G
:
769 bp
->line_speed
= SPEED_1000
;
771 case BCM5708S_1000X_STAT1_SPEED_2G5
:
772 bp
->line_speed
= SPEED_2500
;
775 if (val
& BCM5708S_1000X_STAT1_FD
)
776 bp
->duplex
= DUPLEX_FULL
;
778 bp
->duplex
= DUPLEX_HALF
;
784 bnx2_5706s_linkup(struct bnx2
*bp
)
786 u32 bmcr
, local_adv
, remote_adv
, common
;
789 bp
->line_speed
= SPEED_1000
;
791 bnx2_read_phy(bp
, bp
->mii_bmcr
, &bmcr
);
792 if (bmcr
& BMCR_FULLDPLX
) {
793 bp
->duplex
= DUPLEX_FULL
;
796 bp
->duplex
= DUPLEX_HALF
;
799 if (!(bmcr
& BMCR_ANENABLE
)) {
803 bnx2_read_phy(bp
, bp
->mii_adv
, &local_adv
);
804 bnx2_read_phy(bp
, bp
->mii_lpa
, &remote_adv
);
806 common
= local_adv
& remote_adv
;
807 if (common
& (ADVERTISE_1000XHALF
| ADVERTISE_1000XFULL
)) {
809 if (common
& ADVERTISE_1000XFULL
) {
810 bp
->duplex
= DUPLEX_FULL
;
813 bp
->duplex
= DUPLEX_HALF
;
821 bnx2_copper_linkup(struct bnx2
*bp
)
825 bnx2_read_phy(bp
, bp
->mii_bmcr
, &bmcr
);
826 if (bmcr
& BMCR_ANENABLE
) {
827 u32 local_adv
, remote_adv
, common
;
829 bnx2_read_phy(bp
, MII_CTRL1000
, &local_adv
);
830 bnx2_read_phy(bp
, MII_STAT1000
, &remote_adv
);
832 common
= local_adv
& (remote_adv
>> 2);
833 if (common
& ADVERTISE_1000FULL
) {
834 bp
->line_speed
= SPEED_1000
;
835 bp
->duplex
= DUPLEX_FULL
;
837 else if (common
& ADVERTISE_1000HALF
) {
838 bp
->line_speed
= SPEED_1000
;
839 bp
->duplex
= DUPLEX_HALF
;
842 bnx2_read_phy(bp
, bp
->mii_adv
, &local_adv
);
843 bnx2_read_phy(bp
, bp
->mii_lpa
, &remote_adv
);
845 common
= local_adv
& remote_adv
;
846 if (common
& ADVERTISE_100FULL
) {
847 bp
->line_speed
= SPEED_100
;
848 bp
->duplex
= DUPLEX_FULL
;
850 else if (common
& ADVERTISE_100HALF
) {
851 bp
->line_speed
= SPEED_100
;
852 bp
->duplex
= DUPLEX_HALF
;
854 else if (common
& ADVERTISE_10FULL
) {
855 bp
->line_speed
= SPEED_10
;
856 bp
->duplex
= DUPLEX_FULL
;
858 else if (common
& ADVERTISE_10HALF
) {
859 bp
->line_speed
= SPEED_10
;
860 bp
->duplex
= DUPLEX_HALF
;
869 if (bmcr
& BMCR_SPEED100
) {
870 bp
->line_speed
= SPEED_100
;
873 bp
->line_speed
= SPEED_10
;
875 if (bmcr
& BMCR_FULLDPLX
) {
876 bp
->duplex
= DUPLEX_FULL
;
879 bp
->duplex
= DUPLEX_HALF
;
887 bnx2_set_mac_link(struct bnx2
*bp
)
891 REG_WR(bp
, BNX2_EMAC_TX_LENGTHS
, 0x2620);
892 if (bp
->link_up
&& (bp
->line_speed
== SPEED_1000
) &&
893 (bp
->duplex
== DUPLEX_HALF
)) {
894 REG_WR(bp
, BNX2_EMAC_TX_LENGTHS
, 0x26ff);
897 /* Configure the EMAC mode register. */
898 val
= REG_RD(bp
, BNX2_EMAC_MODE
);
900 val
&= ~(BNX2_EMAC_MODE_PORT
| BNX2_EMAC_MODE_HALF_DUPLEX
|
901 BNX2_EMAC_MODE_MAC_LOOP
| BNX2_EMAC_MODE_FORCE_LINK
|
902 BNX2_EMAC_MODE_25G_MODE
);
905 switch (bp
->line_speed
) {
907 if (CHIP_NUM(bp
) != CHIP_NUM_5706
) {
908 val
|= BNX2_EMAC_MODE_PORT_MII_10M
;
913 val
|= BNX2_EMAC_MODE_PORT_MII
;
916 val
|= BNX2_EMAC_MODE_25G_MODE
;
919 val
|= BNX2_EMAC_MODE_PORT_GMII
;
924 val
|= BNX2_EMAC_MODE_PORT_GMII
;
927 /* Set the MAC to operate in the appropriate duplex mode. */
928 if (bp
->duplex
== DUPLEX_HALF
)
929 val
|= BNX2_EMAC_MODE_HALF_DUPLEX
;
930 REG_WR(bp
, BNX2_EMAC_MODE
, val
);
932 /* Enable/disable rx PAUSE. */
933 bp
->rx_mode
&= ~BNX2_EMAC_RX_MODE_FLOW_EN
;
935 if (bp
->flow_ctrl
& FLOW_CTRL_RX
)
936 bp
->rx_mode
|= BNX2_EMAC_RX_MODE_FLOW_EN
;
937 REG_WR(bp
, BNX2_EMAC_RX_MODE
, bp
->rx_mode
);
939 /* Enable/disable tx PAUSE. */
940 val
= REG_RD(bp
, BNX2_EMAC_TX_MODE
);
941 val
&= ~BNX2_EMAC_TX_MODE_FLOW_EN
;
943 if (bp
->flow_ctrl
& FLOW_CTRL_TX
)
944 val
|= BNX2_EMAC_TX_MODE_FLOW_EN
;
945 REG_WR(bp
, BNX2_EMAC_TX_MODE
, val
);
947 /* Acknowledge the interrupt. */
948 REG_WR(bp
, BNX2_EMAC_STATUS
, BNX2_EMAC_STATUS_LINK_CHANGE
);
954 bnx2_enable_bmsr1(struct bnx2
*bp
)
956 if ((bp
->phy_flags
& PHY_SERDES_FLAG
) &&
957 (CHIP_NUM(bp
) == CHIP_NUM_5709
))
958 bnx2_write_phy(bp
, MII_BNX2_BLK_ADDR
,
959 MII_BNX2_BLK_ADDR_GP_STATUS
);
963 bnx2_disable_bmsr1(struct bnx2
*bp
)
965 if ((bp
->phy_flags
& PHY_SERDES_FLAG
) &&
966 (CHIP_NUM(bp
) == CHIP_NUM_5709
))
967 bnx2_write_phy(bp
, MII_BNX2_BLK_ADDR
,
968 MII_BNX2_BLK_ADDR_COMBO_IEEEB0
);
972 bnx2_test_and_enable_2g5(struct bnx2
*bp
)
977 if (!(bp
->phy_flags
& PHY_2_5G_CAPABLE_FLAG
))
980 if (bp
->autoneg
& AUTONEG_SPEED
)
981 bp
->advertising
|= ADVERTISED_2500baseX_Full
;
983 if (CHIP_NUM(bp
) == CHIP_NUM_5709
)
984 bnx2_write_phy(bp
, MII_BNX2_BLK_ADDR
, MII_BNX2_BLK_ADDR_OVER1G
);
986 bnx2_read_phy(bp
, bp
->mii_up1
, &up1
);
987 if (!(up1
& BCM5708S_UP1_2G5
)) {
988 up1
|= BCM5708S_UP1_2G5
;
989 bnx2_write_phy(bp
, bp
->mii_up1
, up1
);
993 if (CHIP_NUM(bp
) == CHIP_NUM_5709
)
994 bnx2_write_phy(bp
, MII_BNX2_BLK_ADDR
,
995 MII_BNX2_BLK_ADDR_COMBO_IEEEB0
);
1001 bnx2_test_and_disable_2g5(struct bnx2
*bp
)
1006 if (!(bp
->phy_flags
& PHY_2_5G_CAPABLE_FLAG
))
1009 if (CHIP_NUM(bp
) == CHIP_NUM_5709
)
1010 bnx2_write_phy(bp
, MII_BNX2_BLK_ADDR
, MII_BNX2_BLK_ADDR_OVER1G
);
1012 bnx2_read_phy(bp
, bp
->mii_up1
, &up1
);
1013 if (up1
& BCM5708S_UP1_2G5
) {
1014 up1
&= ~BCM5708S_UP1_2G5
;
1015 bnx2_write_phy(bp
, bp
->mii_up1
, up1
);
1019 if (CHIP_NUM(bp
) == CHIP_NUM_5709
)
1020 bnx2_write_phy(bp
, MII_BNX2_BLK_ADDR
,
1021 MII_BNX2_BLK_ADDR_COMBO_IEEEB0
);
1027 bnx2_enable_forced_2g5(struct bnx2
*bp
)
1031 if (!(bp
->phy_flags
& PHY_2_5G_CAPABLE_FLAG
))
1034 if (CHIP_NUM(bp
) == CHIP_NUM_5709
) {
1037 bnx2_write_phy(bp
, MII_BNX2_BLK_ADDR
,
1038 MII_BNX2_BLK_ADDR_SERDES_DIG
);
1039 bnx2_read_phy(bp
, MII_BNX2_SERDES_DIG_MISC1
, &val
);
1040 val
&= ~MII_BNX2_SD_MISC1_FORCE_MSK
;
1041 val
|= MII_BNX2_SD_MISC1_FORCE
| MII_BNX2_SD_MISC1_FORCE_2_5G
;
1042 bnx2_write_phy(bp
, MII_BNX2_SERDES_DIG_MISC1
, val
);
1044 bnx2_write_phy(bp
, MII_BNX2_BLK_ADDR
,
1045 MII_BNX2_BLK_ADDR_COMBO_IEEEB0
);
1046 bnx2_read_phy(bp
, bp
->mii_bmcr
, &bmcr
);
1048 } else if (CHIP_NUM(bp
) == CHIP_NUM_5708
) {
1049 bnx2_read_phy(bp
, bp
->mii_bmcr
, &bmcr
);
1050 bmcr
|= BCM5708S_BMCR_FORCE_2500
;
1053 if (bp
->autoneg
& AUTONEG_SPEED
) {
1054 bmcr
&= ~BMCR_ANENABLE
;
1055 if (bp
->req_duplex
== DUPLEX_FULL
)
1056 bmcr
|= BMCR_FULLDPLX
;
1058 bnx2_write_phy(bp
, bp
->mii_bmcr
, bmcr
);
1062 bnx2_disable_forced_2g5(struct bnx2
*bp
)
1066 if (!(bp
->phy_flags
& PHY_2_5G_CAPABLE_FLAG
))
1069 if (CHIP_NUM(bp
) == CHIP_NUM_5709
) {
1072 bnx2_write_phy(bp
, MII_BNX2_BLK_ADDR
,
1073 MII_BNX2_BLK_ADDR_SERDES_DIG
);
1074 bnx2_read_phy(bp
, MII_BNX2_SERDES_DIG_MISC1
, &val
);
1075 val
&= ~MII_BNX2_SD_MISC1_FORCE
;
1076 bnx2_write_phy(bp
, MII_BNX2_SERDES_DIG_MISC1
, val
);
1078 bnx2_write_phy(bp
, MII_BNX2_BLK_ADDR
,
1079 MII_BNX2_BLK_ADDR_COMBO_IEEEB0
);
1080 bnx2_read_phy(bp
, bp
->mii_bmcr
, &bmcr
);
1082 } else if (CHIP_NUM(bp
) == CHIP_NUM_5708
) {
1083 bnx2_read_phy(bp
, bp
->mii_bmcr
, &bmcr
);
1084 bmcr
&= ~BCM5708S_BMCR_FORCE_2500
;
1087 if (bp
->autoneg
& AUTONEG_SPEED
)
1088 bmcr
|= BMCR_SPEED1000
| BMCR_ANENABLE
| BMCR_ANRESTART
;
1089 bnx2_write_phy(bp
, bp
->mii_bmcr
, bmcr
);
1093 bnx2_set_link(struct bnx2
*bp
)
1098 if (bp
->loopback
== MAC_LOOPBACK
|| bp
->loopback
== PHY_LOOPBACK
) {
1103 link_up
= bp
->link_up
;
1105 bnx2_enable_bmsr1(bp
);
1106 bnx2_read_phy(bp
, bp
->mii_bmsr1
, &bmsr
);
1107 bnx2_read_phy(bp
, bp
->mii_bmsr1
, &bmsr
);
1108 bnx2_disable_bmsr1(bp
);
1110 if ((bp
->phy_flags
& PHY_SERDES_FLAG
) &&
1111 (CHIP_NUM(bp
) == CHIP_NUM_5706
)) {
1114 val
= REG_RD(bp
, BNX2_EMAC_STATUS
);
1115 if (val
& BNX2_EMAC_STATUS_LINK
)
1116 bmsr
|= BMSR_LSTATUS
;
1118 bmsr
&= ~BMSR_LSTATUS
;
1121 if (bmsr
& BMSR_LSTATUS
) {
1124 if (bp
->phy_flags
& PHY_SERDES_FLAG
) {
1125 if (CHIP_NUM(bp
) == CHIP_NUM_5706
)
1126 bnx2_5706s_linkup(bp
);
1127 else if (CHIP_NUM(bp
) == CHIP_NUM_5708
)
1128 bnx2_5708s_linkup(bp
);
1129 else if (CHIP_NUM(bp
) == CHIP_NUM_5709
)
1130 bnx2_5709s_linkup(bp
);
1133 bnx2_copper_linkup(bp
);
1135 bnx2_resolve_flow_ctrl(bp
);
1138 if ((bp
->phy_flags
& PHY_SERDES_FLAG
) &&
1139 (bp
->autoneg
& AUTONEG_SPEED
))
1140 bnx2_disable_forced_2g5(bp
);
1142 bp
->phy_flags
&= ~PHY_PARALLEL_DETECT_FLAG
;
1146 if (bp
->link_up
!= link_up
) {
1147 bnx2_report_link(bp
);
1150 bnx2_set_mac_link(bp
);
1156 bnx2_reset_phy(struct bnx2
*bp
)
1161 bnx2_write_phy(bp
, bp
->mii_bmcr
, BMCR_RESET
);
1163 #define PHY_RESET_MAX_WAIT 100
1164 for (i
= 0; i
< PHY_RESET_MAX_WAIT
; i
++) {
1167 bnx2_read_phy(bp
, bp
->mii_bmcr
, ®
);
1168 if (!(reg
& BMCR_RESET
)) {
1173 if (i
== PHY_RESET_MAX_WAIT
) {
1180 bnx2_phy_get_pause_adv(struct bnx2
*bp
)
1184 if ((bp
->req_flow_ctrl
& (FLOW_CTRL_RX
| FLOW_CTRL_TX
)) ==
1185 (FLOW_CTRL_RX
| FLOW_CTRL_TX
)) {
1187 if (bp
->phy_flags
& PHY_SERDES_FLAG
) {
1188 adv
= ADVERTISE_1000XPAUSE
;
1191 adv
= ADVERTISE_PAUSE_CAP
;
1194 else if (bp
->req_flow_ctrl
& FLOW_CTRL_TX
) {
1195 if (bp
->phy_flags
& PHY_SERDES_FLAG
) {
1196 adv
= ADVERTISE_1000XPSE_ASYM
;
1199 adv
= ADVERTISE_PAUSE_ASYM
;
1202 else if (bp
->req_flow_ctrl
& FLOW_CTRL_RX
) {
1203 if (bp
->phy_flags
& PHY_SERDES_FLAG
) {
1204 adv
= ADVERTISE_1000XPAUSE
| ADVERTISE_1000XPSE_ASYM
;
1207 adv
= ADVERTISE_PAUSE_CAP
| ADVERTISE_PAUSE_ASYM
;
1214 bnx2_setup_serdes_phy(struct bnx2
*bp
)
1219 if (!(bp
->autoneg
& AUTONEG_SPEED
)) {
1221 int force_link_down
= 0;
1223 if (bp
->req_line_speed
== SPEED_2500
) {
1224 if (!bnx2_test_and_enable_2g5(bp
))
1225 force_link_down
= 1;
1226 } else if (bp
->req_line_speed
== SPEED_1000
) {
1227 if (bnx2_test_and_disable_2g5(bp
))
1228 force_link_down
= 1;
1230 bnx2_read_phy(bp
, bp
->mii_adv
, &adv
);
1231 adv
&= ~(ADVERTISE_1000XFULL
| ADVERTISE_1000XHALF
);
1233 bnx2_read_phy(bp
, bp
->mii_bmcr
, &bmcr
);
1234 new_bmcr
= bmcr
& ~BMCR_ANENABLE
;
1235 new_bmcr
|= BMCR_SPEED1000
;
1237 if (CHIP_NUM(bp
) == CHIP_NUM_5709
) {
1238 if (bp
->req_line_speed
== SPEED_2500
)
1239 bnx2_enable_forced_2g5(bp
);
1240 else if (bp
->req_line_speed
== SPEED_1000
) {
1241 bnx2_disable_forced_2g5(bp
);
1242 new_bmcr
&= ~0x2000;
1245 } else if (CHIP_NUM(bp
) == CHIP_NUM_5708
) {
1246 if (bp
->req_line_speed
== SPEED_2500
)
1247 new_bmcr
|= BCM5708S_BMCR_FORCE_2500
;
1249 new_bmcr
= bmcr
& ~BCM5708S_BMCR_FORCE_2500
;
1252 if (bp
->req_duplex
== DUPLEX_FULL
) {
1253 adv
|= ADVERTISE_1000XFULL
;
1254 new_bmcr
|= BMCR_FULLDPLX
;
1257 adv
|= ADVERTISE_1000XHALF
;
1258 new_bmcr
&= ~BMCR_FULLDPLX
;
1260 if ((new_bmcr
!= bmcr
) || (force_link_down
)) {
1261 /* Force a link down visible on the other side */
1263 bnx2_write_phy(bp
, bp
->mii_adv
, adv
&
1264 ~(ADVERTISE_1000XFULL
|
1265 ADVERTISE_1000XHALF
));
1266 bnx2_write_phy(bp
, bp
->mii_bmcr
, bmcr
|
1267 BMCR_ANRESTART
| BMCR_ANENABLE
);
1270 netif_carrier_off(bp
->dev
);
1271 bnx2_write_phy(bp
, bp
->mii_bmcr
, new_bmcr
);
1272 bnx2_report_link(bp
);
1274 bnx2_write_phy(bp
, bp
->mii_adv
, adv
);
1275 bnx2_write_phy(bp
, bp
->mii_bmcr
, new_bmcr
);
1277 bnx2_resolve_flow_ctrl(bp
);
1278 bnx2_set_mac_link(bp
);
1283 bnx2_test_and_enable_2g5(bp
);
1285 if (bp
->advertising
& ADVERTISED_1000baseT_Full
)
1286 new_adv
|= ADVERTISE_1000XFULL
;
1288 new_adv
|= bnx2_phy_get_pause_adv(bp
);
1290 bnx2_read_phy(bp
, bp
->mii_adv
, &adv
);
1291 bnx2_read_phy(bp
, bp
->mii_bmcr
, &bmcr
);
1293 bp
->serdes_an_pending
= 0;
1294 if ((adv
!= new_adv
) || ((bmcr
& BMCR_ANENABLE
) == 0)) {
1295 /* Force a link down visible on the other side */
1297 bnx2_write_phy(bp
, bp
->mii_bmcr
, BMCR_LOOPBACK
);
1298 spin_unlock_bh(&bp
->phy_lock
);
1300 spin_lock_bh(&bp
->phy_lock
);
1303 bnx2_write_phy(bp
, bp
->mii_adv
, new_adv
);
1304 bnx2_write_phy(bp
, bp
->mii_bmcr
, bmcr
| BMCR_ANRESTART
|
1306 /* Speed up link-up time when the link partner
1307 * does not autonegotiate which is very common
1308 * in blade servers. Some blade servers use
1309 * IPMI for kerboard input and it's important
1310 * to minimize link disruptions. Autoneg. involves
1311 * exchanging base pages plus 3 next pages and
1312 * normally completes in about 120 msec.
1314 bp
->current_interval
= SERDES_AN_TIMEOUT
;
1315 bp
->serdes_an_pending
= 1;
1316 mod_timer(&bp
->timer
, jiffies
+ bp
->current_interval
);
1318 bnx2_resolve_flow_ctrl(bp
);
1319 bnx2_set_mac_link(bp
);
1325 #define ETHTOOL_ALL_FIBRE_SPEED \
1326 (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) ? \
1327 (ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
1328 (ADVERTISED_1000baseT_Full)
1330 #define ETHTOOL_ALL_COPPER_SPEED \
1331 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1332 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1333 ADVERTISED_1000baseT_Full)
1335 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1336 ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1338 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1341 bnx2_set_default_link(struct bnx2
*bp
)
1343 bp
->autoneg
= AUTONEG_SPEED
| AUTONEG_FLOW_CTRL
;
1344 bp
->req_line_speed
= 0;
1345 if (bp
->phy_flags
& PHY_SERDES_FLAG
) {
1348 bp
->advertising
= ETHTOOL_ALL_FIBRE_SPEED
| ADVERTISED_Autoneg
;
1350 reg
= REG_RD_IND(bp
, bp
->shmem_base
+ BNX2_PORT_HW_CFG_CONFIG
);
1351 reg
&= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK
;
1352 if (reg
== BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G
) {
1354 bp
->req_line_speed
= bp
->line_speed
= SPEED_1000
;
1355 bp
->req_duplex
= DUPLEX_FULL
;
1358 bp
->advertising
= ETHTOOL_ALL_COPPER_SPEED
| ADVERTISED_Autoneg
;
1362 bnx2_setup_copper_phy(struct bnx2
*bp
)
1367 bnx2_read_phy(bp
, bp
->mii_bmcr
, &bmcr
);
1369 if (bp
->autoneg
& AUTONEG_SPEED
) {
1370 u32 adv_reg
, adv1000_reg
;
1371 u32 new_adv_reg
= 0;
1372 u32 new_adv1000_reg
= 0;
1374 bnx2_read_phy(bp
, bp
->mii_adv
, &adv_reg
);
1375 adv_reg
&= (PHY_ALL_10_100_SPEED
| ADVERTISE_PAUSE_CAP
|
1376 ADVERTISE_PAUSE_ASYM
);
1378 bnx2_read_phy(bp
, MII_CTRL1000
, &adv1000_reg
);
1379 adv1000_reg
&= PHY_ALL_1000_SPEED
;
1381 if (bp
->advertising
& ADVERTISED_10baseT_Half
)
1382 new_adv_reg
|= ADVERTISE_10HALF
;
1383 if (bp
->advertising
& ADVERTISED_10baseT_Full
)
1384 new_adv_reg
|= ADVERTISE_10FULL
;
1385 if (bp
->advertising
& ADVERTISED_100baseT_Half
)
1386 new_adv_reg
|= ADVERTISE_100HALF
;
1387 if (bp
->advertising
& ADVERTISED_100baseT_Full
)
1388 new_adv_reg
|= ADVERTISE_100FULL
;
1389 if (bp
->advertising
& ADVERTISED_1000baseT_Full
)
1390 new_adv1000_reg
|= ADVERTISE_1000FULL
;
1392 new_adv_reg
|= ADVERTISE_CSMA
;
1394 new_adv_reg
|= bnx2_phy_get_pause_adv(bp
);
1396 if ((adv1000_reg
!= new_adv1000_reg
) ||
1397 (adv_reg
!= new_adv_reg
) ||
1398 ((bmcr
& BMCR_ANENABLE
) == 0)) {
1400 bnx2_write_phy(bp
, bp
->mii_adv
, new_adv_reg
);
1401 bnx2_write_phy(bp
, MII_CTRL1000
, new_adv1000_reg
);
1402 bnx2_write_phy(bp
, bp
->mii_bmcr
, BMCR_ANRESTART
|
1405 else if (bp
->link_up
) {
1406 /* Flow ctrl may have changed from auto to forced */
1407 /* or vice-versa. */
1409 bnx2_resolve_flow_ctrl(bp
);
1410 bnx2_set_mac_link(bp
);
1416 if (bp
->req_line_speed
== SPEED_100
) {
1417 new_bmcr
|= BMCR_SPEED100
;
1419 if (bp
->req_duplex
== DUPLEX_FULL
) {
1420 new_bmcr
|= BMCR_FULLDPLX
;
1422 if (new_bmcr
!= bmcr
) {
1425 bnx2_read_phy(bp
, bp
->mii_bmsr
, &bmsr
);
1426 bnx2_read_phy(bp
, bp
->mii_bmsr
, &bmsr
);
1428 if (bmsr
& BMSR_LSTATUS
) {
1429 /* Force link down */
1430 bnx2_write_phy(bp
, bp
->mii_bmcr
, BMCR_LOOPBACK
);
1431 spin_unlock_bh(&bp
->phy_lock
);
1433 spin_lock_bh(&bp
->phy_lock
);
1435 bnx2_read_phy(bp
, bp
->mii_bmsr
, &bmsr
);
1436 bnx2_read_phy(bp
, bp
->mii_bmsr
, &bmsr
);
1439 bnx2_write_phy(bp
, bp
->mii_bmcr
, new_bmcr
);
1441 /* Normally, the new speed is setup after the link has
1442 * gone down and up again. In some cases, link will not go
1443 * down so we need to set up the new speed here.
1445 if (bmsr
& BMSR_LSTATUS
) {
1446 bp
->line_speed
= bp
->req_line_speed
;
1447 bp
->duplex
= bp
->req_duplex
;
1448 bnx2_resolve_flow_ctrl(bp
);
1449 bnx2_set_mac_link(bp
);
1452 bnx2_resolve_flow_ctrl(bp
);
1453 bnx2_set_mac_link(bp
);
1459 bnx2_setup_phy(struct bnx2
*bp
)
1461 if (bp
->loopback
== MAC_LOOPBACK
)
1464 if (bp
->phy_flags
& PHY_SERDES_FLAG
) {
1465 return (bnx2_setup_serdes_phy(bp
));
1468 return (bnx2_setup_copper_phy(bp
));
1473 bnx2_init_5709s_phy(struct bnx2
*bp
)
1477 bp
->mii_bmcr
= MII_BMCR
+ 0x10;
1478 bp
->mii_bmsr
= MII_BMSR
+ 0x10;
1479 bp
->mii_bmsr1
= MII_BNX2_GP_TOP_AN_STATUS1
;
1480 bp
->mii_adv
= MII_ADVERTISE
+ 0x10;
1481 bp
->mii_lpa
= MII_LPA
+ 0x10;
1482 bp
->mii_up1
= MII_BNX2_OVER1G_UP1
;
1484 bnx2_write_phy(bp
, MII_BNX2_BLK_ADDR
, MII_BNX2_BLK_ADDR_AER
);
1485 bnx2_write_phy(bp
, MII_BNX2_AER_AER
, MII_BNX2_AER_AER_AN_MMD
);
1487 bnx2_write_phy(bp
, MII_BNX2_BLK_ADDR
, MII_BNX2_BLK_ADDR_COMBO_IEEEB0
);
1490 bnx2_write_phy(bp
, MII_BNX2_BLK_ADDR
, MII_BNX2_BLK_ADDR_SERDES_DIG
);
1492 bnx2_read_phy(bp
, MII_BNX2_SERDES_DIG_1000XCTL1
, &val
);
1493 val
&= ~MII_BNX2_SD_1000XCTL1_AUTODET
;
1494 val
|= MII_BNX2_SD_1000XCTL1_FIBER
;
1495 bnx2_write_phy(bp
, MII_BNX2_SERDES_DIG_1000XCTL1
, val
);
1497 bnx2_write_phy(bp
, MII_BNX2_BLK_ADDR
, MII_BNX2_BLK_ADDR_OVER1G
);
1498 bnx2_read_phy(bp
, MII_BNX2_OVER1G_UP1
, &val
);
1499 if (bp
->phy_flags
& PHY_2_5G_CAPABLE_FLAG
)
1500 val
|= BCM5708S_UP1_2G5
;
1502 val
&= ~BCM5708S_UP1_2G5
;
1503 bnx2_write_phy(bp
, MII_BNX2_OVER1G_UP1
, val
);
1505 bnx2_write_phy(bp
, MII_BNX2_BLK_ADDR
, MII_BNX2_BLK_ADDR_BAM_NXTPG
);
1506 bnx2_read_phy(bp
, MII_BNX2_BAM_NXTPG_CTL
, &val
);
1507 val
|= MII_BNX2_NXTPG_CTL_T2
| MII_BNX2_NXTPG_CTL_BAM
;
1508 bnx2_write_phy(bp
, MII_BNX2_BAM_NXTPG_CTL
, val
);
1510 bnx2_write_phy(bp
, MII_BNX2_BLK_ADDR
, MII_BNX2_BLK_ADDR_CL73_USERB0
);
1512 val
= MII_BNX2_CL73_BAM_EN
| MII_BNX2_CL73_BAM_STA_MGR_EN
|
1513 MII_BNX2_CL73_BAM_NP_AFT_BP_EN
;
1514 bnx2_write_phy(bp
, MII_BNX2_CL73_BAM_CTL1
, val
);
1516 bnx2_write_phy(bp
, MII_BNX2_BLK_ADDR
, MII_BNX2_BLK_ADDR_COMBO_IEEEB0
);
1522 bnx2_init_5708s_phy(struct bnx2
*bp
)
1528 bp
->mii_up1
= BCM5708S_UP1
;
1530 bnx2_write_phy(bp
, BCM5708S_BLK_ADDR
, BCM5708S_BLK_ADDR_DIG3
);
1531 bnx2_write_phy(bp
, BCM5708S_DIG_3_0
, BCM5708S_DIG_3_0_USE_IEEE
);
1532 bnx2_write_phy(bp
, BCM5708S_BLK_ADDR
, BCM5708S_BLK_ADDR_DIG
);
1534 bnx2_read_phy(bp
, BCM5708S_1000X_CTL1
, &val
);
1535 val
|= BCM5708S_1000X_CTL1_FIBER_MODE
| BCM5708S_1000X_CTL1_AUTODET_EN
;
1536 bnx2_write_phy(bp
, BCM5708S_1000X_CTL1
, val
);
1538 bnx2_read_phy(bp
, BCM5708S_1000X_CTL2
, &val
);
1539 val
|= BCM5708S_1000X_CTL2_PLLEL_DET_EN
;
1540 bnx2_write_phy(bp
, BCM5708S_1000X_CTL2
, val
);
1542 if (bp
->phy_flags
& PHY_2_5G_CAPABLE_FLAG
) {
1543 bnx2_read_phy(bp
, BCM5708S_UP1
, &val
);
1544 val
|= BCM5708S_UP1_2G5
;
1545 bnx2_write_phy(bp
, BCM5708S_UP1
, val
);
1548 if ((CHIP_ID(bp
) == CHIP_ID_5708_A0
) ||
1549 (CHIP_ID(bp
) == CHIP_ID_5708_B0
) ||
1550 (CHIP_ID(bp
) == CHIP_ID_5708_B1
)) {
1551 /* increase tx signal amplitude */
1552 bnx2_write_phy(bp
, BCM5708S_BLK_ADDR
,
1553 BCM5708S_BLK_ADDR_TX_MISC
);
1554 bnx2_read_phy(bp
, BCM5708S_TX_ACTL1
, &val
);
1555 val
&= ~BCM5708S_TX_ACTL1_DRIVER_VCM
;
1556 bnx2_write_phy(bp
, BCM5708S_TX_ACTL1
, val
);
1557 bnx2_write_phy(bp
, BCM5708S_BLK_ADDR
, BCM5708S_BLK_ADDR_DIG
);
1560 val
= REG_RD_IND(bp
, bp
->shmem_base
+ BNX2_PORT_HW_CFG_CONFIG
) &
1561 BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK
;
1566 is_backplane
= REG_RD_IND(bp
, bp
->shmem_base
+
1567 BNX2_SHARED_HW_CFG_CONFIG
);
1568 if (is_backplane
& BNX2_SHARED_HW_CFG_PHY_BACKPLANE
) {
1569 bnx2_write_phy(bp
, BCM5708S_BLK_ADDR
,
1570 BCM5708S_BLK_ADDR_TX_MISC
);
1571 bnx2_write_phy(bp
, BCM5708S_TX_ACTL3
, val
);
1572 bnx2_write_phy(bp
, BCM5708S_BLK_ADDR
,
1573 BCM5708S_BLK_ADDR_DIG
);
1580 bnx2_init_5706s_phy(struct bnx2
*bp
)
1584 bp
->phy_flags
&= ~PHY_PARALLEL_DETECT_FLAG
;
1586 if (CHIP_NUM(bp
) == CHIP_NUM_5706
)
1587 REG_WR(bp
, BNX2_MISC_GP_HW_CTL0
, 0x300);
1589 if (bp
->dev
->mtu
> 1500) {
1592 /* Set extended packet length bit */
1593 bnx2_write_phy(bp
, 0x18, 0x7);
1594 bnx2_read_phy(bp
, 0x18, &val
);
1595 bnx2_write_phy(bp
, 0x18, (val
& 0xfff8) | 0x4000);
1597 bnx2_write_phy(bp
, 0x1c, 0x6c00);
1598 bnx2_read_phy(bp
, 0x1c, &val
);
1599 bnx2_write_phy(bp
, 0x1c, (val
& 0x3ff) | 0xec02);
1604 bnx2_write_phy(bp
, 0x18, 0x7);
1605 bnx2_read_phy(bp
, 0x18, &val
);
1606 bnx2_write_phy(bp
, 0x18, val
& ~0x4007);
1608 bnx2_write_phy(bp
, 0x1c, 0x6c00);
1609 bnx2_read_phy(bp
, 0x1c, &val
);
1610 bnx2_write_phy(bp
, 0x1c, (val
& 0x3fd) | 0xec00);
1617 bnx2_init_copper_phy(struct bnx2
*bp
)
1623 if (bp
->phy_flags
& PHY_CRC_FIX_FLAG
) {
1624 bnx2_write_phy(bp
, 0x18, 0x0c00);
1625 bnx2_write_phy(bp
, 0x17, 0x000a);
1626 bnx2_write_phy(bp
, 0x15, 0x310b);
1627 bnx2_write_phy(bp
, 0x17, 0x201f);
1628 bnx2_write_phy(bp
, 0x15, 0x9506);
1629 bnx2_write_phy(bp
, 0x17, 0x401f);
1630 bnx2_write_phy(bp
, 0x15, 0x14e2);
1631 bnx2_write_phy(bp
, 0x18, 0x0400);
1634 if (bp
->phy_flags
& PHY_DIS_EARLY_DAC_FLAG
) {
1635 bnx2_write_phy(bp
, MII_BNX2_DSP_ADDRESS
,
1636 MII_BNX2_DSP_EXPAND_REG
| 0x8);
1637 bnx2_read_phy(bp
, MII_BNX2_DSP_RW_PORT
, &val
);
1639 bnx2_write_phy(bp
, MII_BNX2_DSP_RW_PORT
, val
);
1642 if (bp
->dev
->mtu
> 1500) {
1643 /* Set extended packet length bit */
1644 bnx2_write_phy(bp
, 0x18, 0x7);
1645 bnx2_read_phy(bp
, 0x18, &val
);
1646 bnx2_write_phy(bp
, 0x18, val
| 0x4000);
1648 bnx2_read_phy(bp
, 0x10, &val
);
1649 bnx2_write_phy(bp
, 0x10, val
| 0x1);
1652 bnx2_write_phy(bp
, 0x18, 0x7);
1653 bnx2_read_phy(bp
, 0x18, &val
);
1654 bnx2_write_phy(bp
, 0x18, val
& ~0x4007);
1656 bnx2_read_phy(bp
, 0x10, &val
);
1657 bnx2_write_phy(bp
, 0x10, val
& ~0x1);
1660 /* ethernet@wirespeed */
1661 bnx2_write_phy(bp
, 0x18, 0x7007);
1662 bnx2_read_phy(bp
, 0x18, &val
);
1663 bnx2_write_phy(bp
, 0x18, val
| (1 << 15) | (1 << 4));
1669 bnx2_init_phy(struct bnx2
*bp
)
1674 bp
->phy_flags
&= ~PHY_INT_MODE_MASK_FLAG
;
1675 bp
->phy_flags
|= PHY_INT_MODE_LINK_READY_FLAG
;
1677 bp
->mii_bmcr
= MII_BMCR
;
1678 bp
->mii_bmsr
= MII_BMSR
;
1679 bp
->mii_bmsr1
= MII_BMSR
;
1680 bp
->mii_adv
= MII_ADVERTISE
;
1681 bp
->mii_lpa
= MII_LPA
;
1683 REG_WR(bp
, BNX2_EMAC_ATTENTION_ENA
, BNX2_EMAC_ATTENTION_ENA_LINK
);
1685 bnx2_read_phy(bp
, MII_PHYSID1
, &val
);
1686 bp
->phy_id
= val
<< 16;
1687 bnx2_read_phy(bp
, MII_PHYSID2
, &val
);
1688 bp
->phy_id
|= val
& 0xffff;
1690 if (bp
->phy_flags
& PHY_SERDES_FLAG
) {
1691 if (CHIP_NUM(bp
) == CHIP_NUM_5706
)
1692 rc
= bnx2_init_5706s_phy(bp
);
1693 else if (CHIP_NUM(bp
) == CHIP_NUM_5708
)
1694 rc
= bnx2_init_5708s_phy(bp
);
1695 else if (CHIP_NUM(bp
) == CHIP_NUM_5709
)
1696 rc
= bnx2_init_5709s_phy(bp
);
1699 rc
= bnx2_init_copper_phy(bp
);
1708 bnx2_set_mac_loopback(struct bnx2
*bp
)
1712 mac_mode
= REG_RD(bp
, BNX2_EMAC_MODE
);
1713 mac_mode
&= ~BNX2_EMAC_MODE_PORT
;
1714 mac_mode
|= BNX2_EMAC_MODE_MAC_LOOP
| BNX2_EMAC_MODE_FORCE_LINK
;
1715 REG_WR(bp
, BNX2_EMAC_MODE
, mac_mode
);
1720 static int bnx2_test_link(struct bnx2
*);
1723 bnx2_set_phy_loopback(struct bnx2
*bp
)
1728 spin_lock_bh(&bp
->phy_lock
);
1729 rc
= bnx2_write_phy(bp
, bp
->mii_bmcr
, BMCR_LOOPBACK
| BMCR_FULLDPLX
|
1731 spin_unlock_bh(&bp
->phy_lock
);
1735 for (i
= 0; i
< 10; i
++) {
1736 if (bnx2_test_link(bp
) == 0)
1741 mac_mode
= REG_RD(bp
, BNX2_EMAC_MODE
);
1742 mac_mode
&= ~(BNX2_EMAC_MODE_PORT
| BNX2_EMAC_MODE_HALF_DUPLEX
|
1743 BNX2_EMAC_MODE_MAC_LOOP
| BNX2_EMAC_MODE_FORCE_LINK
|
1744 BNX2_EMAC_MODE_25G_MODE
);
1746 mac_mode
|= BNX2_EMAC_MODE_PORT_GMII
;
1747 REG_WR(bp
, BNX2_EMAC_MODE
, mac_mode
);
1753 bnx2_fw_sync(struct bnx2
*bp
, u32 msg_data
, int silent
)
1759 msg_data
|= bp
->fw_wr_seq
;
1761 REG_WR_IND(bp
, bp
->shmem_base
+ BNX2_DRV_MB
, msg_data
);
1763 /* wait for an acknowledgement. */
1764 for (i
= 0; i
< (FW_ACK_TIME_OUT_MS
/ 10); i
++) {
1767 val
= REG_RD_IND(bp
, bp
->shmem_base
+ BNX2_FW_MB
);
1769 if ((val
& BNX2_FW_MSG_ACK
) == (msg_data
& BNX2_DRV_MSG_SEQ
))
1772 if ((msg_data
& BNX2_DRV_MSG_DATA
) == BNX2_DRV_MSG_DATA_WAIT0
)
1775 /* If we timed out, inform the firmware that this is the case. */
1776 if ((val
& BNX2_FW_MSG_ACK
) != (msg_data
& BNX2_DRV_MSG_SEQ
)) {
1778 printk(KERN_ERR PFX
"fw sync timeout, reset code = "
1781 msg_data
&= ~BNX2_DRV_MSG_CODE
;
1782 msg_data
|= BNX2_DRV_MSG_CODE_FW_TIMEOUT
;
1784 REG_WR_IND(bp
, bp
->shmem_base
+ BNX2_DRV_MB
, msg_data
);
1789 if ((val
& BNX2_FW_MSG_STATUS_MASK
) != BNX2_FW_MSG_STATUS_OK
)
1796 bnx2_init_5709_context(struct bnx2
*bp
)
1801 val
= BNX2_CTX_COMMAND_ENABLED
| BNX2_CTX_COMMAND_MEM_INIT
| (1 << 12);
1802 val
|= (BCM_PAGE_BITS
- 8) << 16;
1803 REG_WR(bp
, BNX2_CTX_COMMAND
, val
);
1804 for (i
= 0; i
< 10; i
++) {
1805 val
= REG_RD(bp
, BNX2_CTX_COMMAND
);
1806 if (!(val
& BNX2_CTX_COMMAND_MEM_INIT
))
1810 if (val
& BNX2_CTX_COMMAND_MEM_INIT
)
1813 for (i
= 0; i
< bp
->ctx_pages
; i
++) {
1816 REG_WR(bp
, BNX2_CTX_HOST_PAGE_TBL_DATA0
,
1817 (bp
->ctx_blk_mapping
[i
] & 0xffffffff) |
1818 BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID
);
1819 REG_WR(bp
, BNX2_CTX_HOST_PAGE_TBL_DATA1
,
1820 (u64
) bp
->ctx_blk_mapping
[i
] >> 32);
1821 REG_WR(bp
, BNX2_CTX_HOST_PAGE_TBL_CTRL
, i
|
1822 BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ
);
1823 for (j
= 0; j
< 10; j
++) {
1825 val
= REG_RD(bp
, BNX2_CTX_HOST_PAGE_TBL_CTRL
);
1826 if (!(val
& BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ
))
1830 if (val
& BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ
) {
1839 bnx2_init_context(struct bnx2
*bp
)
1845 u32 vcid_addr
, pcid_addr
, offset
;
1850 if (CHIP_ID(bp
) == CHIP_ID_5706_A0
) {
1853 vcid_addr
= GET_PCID_ADDR(vcid
);
1855 new_vcid
= 0x60 + (vcid
& 0xf0) + (vcid
& 0x7);
1860 pcid_addr
= GET_PCID_ADDR(new_vcid
);
1863 vcid_addr
= GET_CID_ADDR(vcid
);
1864 pcid_addr
= vcid_addr
;
1867 for (i
= 0; i
< (CTX_SIZE
/ PHY_CTX_SIZE
); i
++) {
1868 vcid_addr
+= (i
<< PHY_CTX_SHIFT
);
1869 pcid_addr
+= (i
<< PHY_CTX_SHIFT
);
1871 REG_WR(bp
, BNX2_CTX_VIRT_ADDR
, 0x00);
1872 REG_WR(bp
, BNX2_CTX_PAGE_TBL
, pcid_addr
);
1874 /* Zero out the context. */
1875 for (offset
= 0; offset
< PHY_CTX_SIZE
; offset
+= 4)
1876 CTX_WR(bp
, 0x00, offset
, 0);
1878 REG_WR(bp
, BNX2_CTX_VIRT_ADDR
, vcid_addr
);
1879 REG_WR(bp
, BNX2_CTX_PAGE_TBL
, pcid_addr
);
1885 bnx2_alloc_bad_rbuf(struct bnx2
*bp
)
1891 good_mbuf
= kmalloc(512 * sizeof(u16
), GFP_KERNEL
);
1892 if (good_mbuf
== NULL
) {
1893 printk(KERN_ERR PFX
"Failed to allocate memory in "
1894 "bnx2_alloc_bad_rbuf\n");
1898 REG_WR(bp
, BNX2_MISC_ENABLE_SET_BITS
,
1899 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE
);
1903 /* Allocate a bunch of mbufs and save the good ones in an array. */
1904 val
= REG_RD_IND(bp
, BNX2_RBUF_STATUS1
);
1905 while (val
& BNX2_RBUF_STATUS1_FREE_COUNT
) {
1906 REG_WR_IND(bp
, BNX2_RBUF_COMMAND
, BNX2_RBUF_COMMAND_ALLOC_REQ
);
1908 val
= REG_RD_IND(bp
, BNX2_RBUF_FW_BUF_ALLOC
);
1910 val
&= BNX2_RBUF_FW_BUF_ALLOC_VALUE
;
1912 /* The addresses with Bit 9 set are bad memory blocks. */
1913 if (!(val
& (1 << 9))) {
1914 good_mbuf
[good_mbuf_cnt
] = (u16
) val
;
1918 val
= REG_RD_IND(bp
, BNX2_RBUF_STATUS1
);
1921 /* Free the good ones back to the mbuf pool thus discarding
1922 * all the bad ones. */
1923 while (good_mbuf_cnt
) {
1926 val
= good_mbuf
[good_mbuf_cnt
];
1927 val
= (val
<< 9) | val
| 1;
1929 REG_WR_IND(bp
, BNX2_RBUF_FW_BUF_FREE
, val
);
1936 bnx2_set_mac_addr(struct bnx2
*bp
)
1939 u8
*mac_addr
= bp
->dev
->dev_addr
;
1941 val
= (mac_addr
[0] << 8) | mac_addr
[1];
1943 REG_WR(bp
, BNX2_EMAC_MAC_MATCH0
, val
);
1945 val
= (mac_addr
[2] << 24) | (mac_addr
[3] << 16) |
1946 (mac_addr
[4] << 8) | mac_addr
[5];
1948 REG_WR(bp
, BNX2_EMAC_MAC_MATCH1
, val
);
1952 bnx2_alloc_rx_skb(struct bnx2
*bp
, u16 index
)
1954 struct sk_buff
*skb
;
1955 struct sw_bd
*rx_buf
= &bp
->rx_buf_ring
[index
];
1957 struct rx_bd
*rxbd
= &bp
->rx_desc_ring
[RX_RING(index
)][RX_IDX(index
)];
1958 unsigned long align
;
1960 skb
= netdev_alloc_skb(bp
->dev
, bp
->rx_buf_size
);
1965 if (unlikely((align
= (unsigned long) skb
->data
& (BNX2_RX_ALIGN
- 1))))
1966 skb_reserve(skb
, BNX2_RX_ALIGN
- align
);
1968 mapping
= pci_map_single(bp
->pdev
, skb
->data
, bp
->rx_buf_use_size
,
1969 PCI_DMA_FROMDEVICE
);
1972 pci_unmap_addr_set(rx_buf
, mapping
, mapping
);
1974 rxbd
->rx_bd_haddr_hi
= (u64
) mapping
>> 32;
1975 rxbd
->rx_bd_haddr_lo
= (u64
) mapping
& 0xffffffff;
1977 bp
->rx_prod_bseq
+= bp
->rx_buf_use_size
;
1983 bnx2_phy_event_is_set(struct bnx2
*bp
, u32 event
)
1985 struct status_block
*sblk
= bp
->status_blk
;
1986 u32 new_link_state
, old_link_state
;
1989 new_link_state
= sblk
->status_attn_bits
& event
;
1990 old_link_state
= sblk
->status_attn_bits_ack
& event
;
1991 if (new_link_state
!= old_link_state
) {
1993 REG_WR(bp
, BNX2_PCICFG_STATUS_BIT_SET_CMD
, event
);
1995 REG_WR(bp
, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD
, event
);
2003 bnx2_phy_int(struct bnx2
*bp
)
2005 if (bnx2_phy_event_is_set(bp
, STATUS_ATTN_BITS_LINK_STATE
)) {
2006 spin_lock(&bp
->phy_lock
);
2008 spin_unlock(&bp
->phy_lock
);
2013 bnx2_tx_int(struct bnx2
*bp
)
2015 struct status_block
*sblk
= bp
->status_blk
;
2016 u16 hw_cons
, sw_cons
, sw_ring_cons
;
2019 hw_cons
= bp
->hw_tx_cons
= sblk
->status_tx_quick_consumer_index0
;
2020 if ((hw_cons
& MAX_TX_DESC_CNT
) == MAX_TX_DESC_CNT
) {
2023 sw_cons
= bp
->tx_cons
;
2025 while (sw_cons
!= hw_cons
) {
2026 struct sw_bd
*tx_buf
;
2027 struct sk_buff
*skb
;
2030 sw_ring_cons
= TX_RING_IDX(sw_cons
);
2032 tx_buf
= &bp
->tx_buf_ring
[sw_ring_cons
];
2035 /* partial BD completions possible with TSO packets */
2036 if (skb_is_gso(skb
)) {
2037 u16 last_idx
, last_ring_idx
;
2039 last_idx
= sw_cons
+
2040 skb_shinfo(skb
)->nr_frags
+ 1;
2041 last_ring_idx
= sw_ring_cons
+
2042 skb_shinfo(skb
)->nr_frags
+ 1;
2043 if (unlikely(last_ring_idx
>= MAX_TX_DESC_CNT
)) {
2046 if (((s16
) ((s16
) last_idx
- (s16
) hw_cons
)) > 0) {
2051 pci_unmap_single(bp
->pdev
, pci_unmap_addr(tx_buf
, mapping
),
2052 skb_headlen(skb
), PCI_DMA_TODEVICE
);
2055 last
= skb_shinfo(skb
)->nr_frags
;
2057 for (i
= 0; i
< last
; i
++) {
2058 sw_cons
= NEXT_TX_BD(sw_cons
);
2060 pci_unmap_page(bp
->pdev
,
2062 &bp
->tx_buf_ring
[TX_RING_IDX(sw_cons
)],
2064 skb_shinfo(skb
)->frags
[i
].size
,
2068 sw_cons
= NEXT_TX_BD(sw_cons
);
2070 tx_free_bd
+= last
+ 1;
2074 hw_cons
= bp
->hw_tx_cons
=
2075 sblk
->status_tx_quick_consumer_index0
;
2077 if ((hw_cons
& MAX_TX_DESC_CNT
) == MAX_TX_DESC_CNT
) {
2082 bp
->tx_cons
= sw_cons
;
2083 /* Need to make the tx_cons update visible to bnx2_start_xmit()
2084 * before checking for netif_queue_stopped(). Without the
2085 * memory barrier, there is a small possibility that bnx2_start_xmit()
2086 * will miss it and cause the queue to be stopped forever.
2090 if (unlikely(netif_queue_stopped(bp
->dev
)) &&
2091 (bnx2_tx_avail(bp
) > bp
->tx_wake_thresh
)) {
2092 netif_tx_lock(bp
->dev
);
2093 if ((netif_queue_stopped(bp
->dev
)) &&
2094 (bnx2_tx_avail(bp
) > bp
->tx_wake_thresh
))
2095 netif_wake_queue(bp
->dev
);
2096 netif_tx_unlock(bp
->dev
);
2101 bnx2_reuse_rx_skb(struct bnx2
*bp
, struct sk_buff
*skb
,
2104 struct sw_bd
*cons_rx_buf
, *prod_rx_buf
;
2105 struct rx_bd
*cons_bd
, *prod_bd
;
2107 cons_rx_buf
= &bp
->rx_buf_ring
[cons
];
2108 prod_rx_buf
= &bp
->rx_buf_ring
[prod
];
2110 pci_dma_sync_single_for_device(bp
->pdev
,
2111 pci_unmap_addr(cons_rx_buf
, mapping
),
2112 bp
->rx_offset
+ RX_COPY_THRESH
, PCI_DMA_FROMDEVICE
);
2114 bp
->rx_prod_bseq
+= bp
->rx_buf_use_size
;
2116 prod_rx_buf
->skb
= skb
;
2121 pci_unmap_addr_set(prod_rx_buf
, mapping
,
2122 pci_unmap_addr(cons_rx_buf
, mapping
));
2124 cons_bd
= &bp
->rx_desc_ring
[RX_RING(cons
)][RX_IDX(cons
)];
2125 prod_bd
= &bp
->rx_desc_ring
[RX_RING(prod
)][RX_IDX(prod
)];
2126 prod_bd
->rx_bd_haddr_hi
= cons_bd
->rx_bd_haddr_hi
;
2127 prod_bd
->rx_bd_haddr_lo
= cons_bd
->rx_bd_haddr_lo
;
2131 bnx2_rx_int(struct bnx2
*bp
, int budget
)
2133 struct status_block
*sblk
= bp
->status_blk
;
2134 u16 hw_cons
, sw_cons
, sw_ring_cons
, sw_prod
, sw_ring_prod
;
2135 struct l2_fhdr
*rx_hdr
;
2138 hw_cons
= bp
->hw_rx_cons
= sblk
->status_rx_quick_consumer_index0
;
2139 if ((hw_cons
& MAX_RX_DESC_CNT
) == MAX_RX_DESC_CNT
) {
2142 sw_cons
= bp
->rx_cons
;
2143 sw_prod
= bp
->rx_prod
;
2145 /* Memory barrier necessary as speculative reads of the rx
2146 * buffer can be ahead of the index in the status block
2149 while (sw_cons
!= hw_cons
) {
2152 struct sw_bd
*rx_buf
;
2153 struct sk_buff
*skb
;
2154 dma_addr_t dma_addr
;
2156 sw_ring_cons
= RX_RING_IDX(sw_cons
);
2157 sw_ring_prod
= RX_RING_IDX(sw_prod
);
2159 rx_buf
= &bp
->rx_buf_ring
[sw_ring_cons
];
2164 dma_addr
= pci_unmap_addr(rx_buf
, mapping
);
2166 pci_dma_sync_single_for_cpu(bp
->pdev
, dma_addr
,
2167 bp
->rx_offset
+ RX_COPY_THRESH
, PCI_DMA_FROMDEVICE
);
2169 rx_hdr
= (struct l2_fhdr
*) skb
->data
;
2170 len
= rx_hdr
->l2_fhdr_pkt_len
- 4;
2172 if ((status
= rx_hdr
->l2_fhdr_status
) &
2173 (L2_FHDR_ERRORS_BAD_CRC
|
2174 L2_FHDR_ERRORS_PHY_DECODE
|
2175 L2_FHDR_ERRORS_ALIGNMENT
|
2176 L2_FHDR_ERRORS_TOO_SHORT
|
2177 L2_FHDR_ERRORS_GIANT_FRAME
)) {
2182 /* Since we don't have a jumbo ring, copy small packets
2185 if ((bp
->dev
->mtu
> 1500) && (len
<= RX_COPY_THRESH
)) {
2186 struct sk_buff
*new_skb
;
2188 new_skb
= netdev_alloc_skb(bp
->dev
, len
+ 2);
2189 if (new_skb
== NULL
)
2193 skb_copy_from_linear_data_offset(skb
, bp
->rx_offset
- 2,
2194 new_skb
->data
, len
+ 2);
2195 skb_reserve(new_skb
, 2);
2196 skb_put(new_skb
, len
);
2198 bnx2_reuse_rx_skb(bp
, skb
,
2199 sw_ring_cons
, sw_ring_prod
);
2203 else if (bnx2_alloc_rx_skb(bp
, sw_ring_prod
) == 0) {
2204 pci_unmap_single(bp
->pdev
, dma_addr
,
2205 bp
->rx_buf_use_size
, PCI_DMA_FROMDEVICE
);
2207 skb_reserve(skb
, bp
->rx_offset
);
2212 bnx2_reuse_rx_skb(bp
, skb
,
2213 sw_ring_cons
, sw_ring_prod
);
2217 skb
->protocol
= eth_type_trans(skb
, bp
->dev
);
2219 if ((len
> (bp
->dev
->mtu
+ ETH_HLEN
)) &&
2220 (ntohs(skb
->protocol
) != 0x8100)) {
2227 skb
->ip_summed
= CHECKSUM_NONE
;
2229 (status
& (L2_FHDR_STATUS_TCP_SEGMENT
|
2230 L2_FHDR_STATUS_UDP_DATAGRAM
))) {
2232 if (likely((status
& (L2_FHDR_ERRORS_TCP_XSUM
|
2233 L2_FHDR_ERRORS_UDP_XSUM
)) == 0))
2234 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
2238 if ((status
& L2_FHDR_STATUS_L2_VLAN_TAG
) && (bp
->vlgrp
!= 0)) {
2239 vlan_hwaccel_receive_skb(skb
, bp
->vlgrp
,
2240 rx_hdr
->l2_fhdr_vlan_tag
);
2244 netif_receive_skb(skb
);
2246 bp
->dev
->last_rx
= jiffies
;
2250 sw_cons
= NEXT_RX_BD(sw_cons
);
2251 sw_prod
= NEXT_RX_BD(sw_prod
);
2253 if ((rx_pkt
== budget
))
2256 /* Refresh hw_cons to see if there is new work */
2257 if (sw_cons
== hw_cons
) {
2258 hw_cons
= bp
->hw_rx_cons
=
2259 sblk
->status_rx_quick_consumer_index0
;
2260 if ((hw_cons
& MAX_RX_DESC_CNT
) == MAX_RX_DESC_CNT
)
2265 bp
->rx_cons
= sw_cons
;
2266 bp
->rx_prod
= sw_prod
;
2268 REG_WR16(bp
, MB_RX_CID_ADDR
+ BNX2_L2CTX_HOST_BDIDX
, sw_prod
);
2270 REG_WR(bp
, MB_RX_CID_ADDR
+ BNX2_L2CTX_HOST_BSEQ
, bp
->rx_prod_bseq
);
2278 /* MSI ISR - The only difference between this and the INTx ISR
2279 * is that the MSI interrupt is always serviced.
2282 bnx2_msi(int irq
, void *dev_instance
)
2284 struct net_device
*dev
= dev_instance
;
2285 struct bnx2
*bp
= netdev_priv(dev
);
2287 prefetch(bp
->status_blk
);
2288 REG_WR(bp
, BNX2_PCICFG_INT_ACK_CMD
,
2289 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM
|
2290 BNX2_PCICFG_INT_ACK_CMD_MASK_INT
);
2292 /* Return here if interrupt is disabled. */
2293 if (unlikely(atomic_read(&bp
->intr_sem
) != 0))
2296 netif_rx_schedule(dev
);
2302 bnx2_msi_1shot(int irq
, void *dev_instance
)
2304 struct net_device
*dev
= dev_instance
;
2305 struct bnx2
*bp
= netdev_priv(dev
);
2307 prefetch(bp
->status_blk
);
2309 /* Return here if interrupt is disabled. */
2310 if (unlikely(atomic_read(&bp
->intr_sem
) != 0))
2313 netif_rx_schedule(dev
);
2319 bnx2_interrupt(int irq
, void *dev_instance
)
2321 struct net_device
*dev
= dev_instance
;
2322 struct bnx2
*bp
= netdev_priv(dev
);
2324 /* When using INTx, it is possible for the interrupt to arrive
2325 * at the CPU before the status block posted prior to the
2326 * interrupt. Reading a register will flush the status block.
2327 * When using MSI, the MSI message will always complete after
2328 * the status block write.
2330 if ((bp
->status_blk
->status_idx
== bp
->last_status_idx
) &&
2331 (REG_RD(bp
, BNX2_PCICFG_MISC_STATUS
) &
2332 BNX2_PCICFG_MISC_STATUS_INTA_VALUE
))
2335 REG_WR(bp
, BNX2_PCICFG_INT_ACK_CMD
,
2336 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM
|
2337 BNX2_PCICFG_INT_ACK_CMD_MASK_INT
);
2339 /* Return here if interrupt is shared and is disabled. */
2340 if (unlikely(atomic_read(&bp
->intr_sem
) != 0))
2343 netif_rx_schedule(dev
);
2348 #define STATUS_ATTN_EVENTS STATUS_ATTN_BITS_LINK_STATE
2351 bnx2_has_work(struct bnx2
*bp
)
2353 struct status_block
*sblk
= bp
->status_blk
;
2355 if ((sblk
->status_rx_quick_consumer_index0
!= bp
->hw_rx_cons
) ||
2356 (sblk
->status_tx_quick_consumer_index0
!= bp
->hw_tx_cons
))
2359 if ((sblk
->status_attn_bits
& STATUS_ATTN_EVENTS
) !=
2360 (sblk
->status_attn_bits_ack
& STATUS_ATTN_EVENTS
))
2367 bnx2_poll(struct net_device
*dev
, int *budget
)
2369 struct bnx2
*bp
= netdev_priv(dev
);
2370 struct status_block
*sblk
= bp
->status_blk
;
2371 u32 status_attn_bits
= sblk
->status_attn_bits
;
2372 u32 status_attn_bits_ack
= sblk
->status_attn_bits_ack
;
2374 if ((status_attn_bits
& STATUS_ATTN_EVENTS
) !=
2375 (status_attn_bits_ack
& STATUS_ATTN_EVENTS
)) {
2379 /* This is needed to take care of transient status
2380 * during link changes.
2382 REG_WR(bp
, BNX2_HC_COMMAND
,
2383 bp
->hc_cmd
| BNX2_HC_COMMAND_COAL_NOW_WO_INT
);
2384 REG_RD(bp
, BNX2_HC_COMMAND
);
2387 if (bp
->status_blk
->status_tx_quick_consumer_index0
!= bp
->hw_tx_cons
)
2390 if (bp
->status_blk
->status_rx_quick_consumer_index0
!= bp
->hw_rx_cons
) {
2391 int orig_budget
= *budget
;
2394 if (orig_budget
> dev
->quota
)
2395 orig_budget
= dev
->quota
;
2397 work_done
= bnx2_rx_int(bp
, orig_budget
);
2398 *budget
-= work_done
;
2399 dev
->quota
-= work_done
;
2402 bp
->last_status_idx
= bp
->status_blk
->status_idx
;
2405 if (!bnx2_has_work(bp
)) {
2406 netif_rx_complete(dev
);
2407 if (likely(bp
->flags
& USING_MSI_FLAG
)) {
2408 REG_WR(bp
, BNX2_PCICFG_INT_ACK_CMD
,
2409 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID
|
2410 bp
->last_status_idx
);
2413 REG_WR(bp
, BNX2_PCICFG_INT_ACK_CMD
,
2414 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID
|
2415 BNX2_PCICFG_INT_ACK_CMD_MASK_INT
|
2416 bp
->last_status_idx
);
2418 REG_WR(bp
, BNX2_PCICFG_INT_ACK_CMD
,
2419 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID
|
2420 bp
->last_status_idx
);
2427 /* Called with rtnl_lock from vlan functions and also netif_tx_lock
2428 * from set_multicast.
2431 bnx2_set_rx_mode(struct net_device
*dev
)
2433 struct bnx2
*bp
= netdev_priv(dev
);
2434 u32 rx_mode
, sort_mode
;
2437 spin_lock_bh(&bp
->phy_lock
);
2439 rx_mode
= bp
->rx_mode
& ~(BNX2_EMAC_RX_MODE_PROMISCUOUS
|
2440 BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG
);
2441 sort_mode
= 1 | BNX2_RPM_SORT_USER0_BC_EN
;
2443 if (!bp
->vlgrp
&& !(bp
->flags
& ASF_ENABLE_FLAG
))
2444 rx_mode
|= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG
;
2446 if (!(bp
->flags
& ASF_ENABLE_FLAG
))
2447 rx_mode
|= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG
;
2449 if (dev
->flags
& IFF_PROMISC
) {
2450 /* Promiscuous mode. */
2451 rx_mode
|= BNX2_EMAC_RX_MODE_PROMISCUOUS
;
2452 sort_mode
|= BNX2_RPM_SORT_USER0_PROM_EN
|
2453 BNX2_RPM_SORT_USER0_PROM_VLAN
;
2455 else if (dev
->flags
& IFF_ALLMULTI
) {
2456 for (i
= 0; i
< NUM_MC_HASH_REGISTERS
; i
++) {
2457 REG_WR(bp
, BNX2_EMAC_MULTICAST_HASH0
+ (i
* 4),
2460 sort_mode
|= BNX2_RPM_SORT_USER0_MC_EN
;
2463 /* Accept one or more multicast(s). */
2464 struct dev_mc_list
*mclist
;
2465 u32 mc_filter
[NUM_MC_HASH_REGISTERS
];
2470 memset(mc_filter
, 0, 4 * NUM_MC_HASH_REGISTERS
);
2472 for (i
= 0, mclist
= dev
->mc_list
; mclist
&& i
< dev
->mc_count
;
2473 i
++, mclist
= mclist
->next
) {
2475 crc
= ether_crc_le(ETH_ALEN
, mclist
->dmi_addr
);
2477 regidx
= (bit
& 0xe0) >> 5;
2479 mc_filter
[regidx
] |= (1 << bit
);
2482 for (i
= 0; i
< NUM_MC_HASH_REGISTERS
; i
++) {
2483 REG_WR(bp
, BNX2_EMAC_MULTICAST_HASH0
+ (i
* 4),
2487 sort_mode
|= BNX2_RPM_SORT_USER0_MC_HSH_EN
;
2490 if (rx_mode
!= bp
->rx_mode
) {
2491 bp
->rx_mode
= rx_mode
;
2492 REG_WR(bp
, BNX2_EMAC_RX_MODE
, rx_mode
);
2495 REG_WR(bp
, BNX2_RPM_SORT_USER0
, 0x0);
2496 REG_WR(bp
, BNX2_RPM_SORT_USER0
, sort_mode
);
2497 REG_WR(bp
, BNX2_RPM_SORT_USER0
, sort_mode
| BNX2_RPM_SORT_USER0_ENA
);
2499 spin_unlock_bh(&bp
->phy_lock
);
2502 #define FW_BUF_SIZE 0x8000
2505 bnx2_gunzip_init(struct bnx2
*bp
)
2507 if ((bp
->gunzip_buf
= vmalloc(FW_BUF_SIZE
)) == NULL
)
2510 if ((bp
->strm
= kmalloc(sizeof(*bp
->strm
), GFP_KERNEL
)) == NULL
)
2513 bp
->strm
->workspace
= kmalloc(zlib_inflate_workspacesize(), GFP_KERNEL
);
2514 if (bp
->strm
->workspace
== NULL
)
2524 vfree(bp
->gunzip_buf
);
2525 bp
->gunzip_buf
= NULL
;
2528 printk(KERN_ERR PFX
"%s: Cannot allocate firmware buffer for "
2529 "uncompression.\n", bp
->dev
->name
);
2534 bnx2_gunzip_end(struct bnx2
*bp
)
2536 kfree(bp
->strm
->workspace
);
2541 if (bp
->gunzip_buf
) {
2542 vfree(bp
->gunzip_buf
);
2543 bp
->gunzip_buf
= NULL
;
2548 bnx2_gunzip(struct bnx2
*bp
, u8
*zbuf
, int len
, void **outbuf
, int *outlen
)
2552 /* check gzip header */
2553 if ((zbuf
[0] != 0x1f) || (zbuf
[1] != 0x8b) || (zbuf
[2] != Z_DEFLATED
))
2559 if (zbuf
[3] & FNAME
)
2560 while ((zbuf
[n
++] != 0) && (n
< len
));
2562 bp
->strm
->next_in
= zbuf
+ n
;
2563 bp
->strm
->avail_in
= len
- n
;
2564 bp
->strm
->next_out
= bp
->gunzip_buf
;
2565 bp
->strm
->avail_out
= FW_BUF_SIZE
;
2567 rc
= zlib_inflateInit2(bp
->strm
, -MAX_WBITS
);
2571 rc
= zlib_inflate(bp
->strm
, Z_FINISH
);
2573 *outlen
= FW_BUF_SIZE
- bp
->strm
->avail_out
;
2574 *outbuf
= bp
->gunzip_buf
;
2576 if ((rc
!= Z_OK
) && (rc
!= Z_STREAM_END
))
2577 printk(KERN_ERR PFX
"%s: Firmware decompression error: %s\n",
2578 bp
->dev
->name
, bp
->strm
->msg
);
2580 zlib_inflateEnd(bp
->strm
);
2582 if (rc
== Z_STREAM_END
)
2589 load_rv2p_fw(struct bnx2
*bp
, u32
*rv2p_code
, u32 rv2p_code_len
,
2596 for (i
= 0; i
< rv2p_code_len
; i
+= 8) {
2597 REG_WR(bp
, BNX2_RV2P_INSTR_HIGH
, cpu_to_le32(*rv2p_code
));
2599 REG_WR(bp
, BNX2_RV2P_INSTR_LOW
, cpu_to_le32(*rv2p_code
));
2602 if (rv2p_proc
== RV2P_PROC1
) {
2603 val
= (i
/ 8) | BNX2_RV2P_PROC1_ADDR_CMD_RDWR
;
2604 REG_WR(bp
, BNX2_RV2P_PROC1_ADDR_CMD
, val
);
2607 val
= (i
/ 8) | BNX2_RV2P_PROC2_ADDR_CMD_RDWR
;
2608 REG_WR(bp
, BNX2_RV2P_PROC2_ADDR_CMD
, val
);
2612 /* Reset the processor, un-stall is done later. */
2613 if (rv2p_proc
== RV2P_PROC1
) {
2614 REG_WR(bp
, BNX2_RV2P_COMMAND
, BNX2_RV2P_COMMAND_PROC1_RESET
);
2617 REG_WR(bp
, BNX2_RV2P_COMMAND
, BNX2_RV2P_COMMAND_PROC2_RESET
);
2622 load_cpu_fw(struct bnx2
*bp
, struct cpu_reg
*cpu_reg
, struct fw_info
*fw
)
2629 val
= REG_RD_IND(bp
, cpu_reg
->mode
);
2630 val
|= cpu_reg
->mode_value_halt
;
2631 REG_WR_IND(bp
, cpu_reg
->mode
, val
);
2632 REG_WR_IND(bp
, cpu_reg
->state
, cpu_reg
->state_value_clear
);
2634 /* Load the Text area. */
2635 offset
= cpu_reg
->spad_base
+ (fw
->text_addr
- cpu_reg
->mips_view_base
);
2640 rc
= bnx2_gunzip(bp
, fw
->gz_text
, fw
->gz_text_len
, &text
,
2650 for (j
= 0; j
< (fw
->text_len
/ 4); j
++, offset
+= 4) {
2651 REG_WR_IND(bp
, offset
, cpu_to_le32(fw
->text
[j
]));
2655 /* Load the Data area. */
2656 offset
= cpu_reg
->spad_base
+ (fw
->data_addr
- cpu_reg
->mips_view_base
);
2660 for (j
= 0; j
< (fw
->data_len
/ 4); j
++, offset
+= 4) {
2661 REG_WR_IND(bp
, offset
, fw
->data
[j
]);
2665 /* Load the SBSS area. */
2666 offset
= cpu_reg
->spad_base
+ (fw
->sbss_addr
- cpu_reg
->mips_view_base
);
2670 for (j
= 0; j
< (fw
->sbss_len
/ 4); j
++, offset
+= 4) {
2671 REG_WR_IND(bp
, offset
, fw
->sbss
[j
]);
2675 /* Load the BSS area. */
2676 offset
= cpu_reg
->spad_base
+ (fw
->bss_addr
- cpu_reg
->mips_view_base
);
2680 for (j
= 0; j
< (fw
->bss_len
/4); j
++, offset
+= 4) {
2681 REG_WR_IND(bp
, offset
, fw
->bss
[j
]);
2685 /* Load the Read-Only area. */
2686 offset
= cpu_reg
->spad_base
+
2687 (fw
->rodata_addr
- cpu_reg
->mips_view_base
);
2691 for (j
= 0; j
< (fw
->rodata_len
/ 4); j
++, offset
+= 4) {
2692 REG_WR_IND(bp
, offset
, fw
->rodata
[j
]);
2696 /* Clear the pre-fetch instruction. */
2697 REG_WR_IND(bp
, cpu_reg
->inst
, 0);
2698 REG_WR_IND(bp
, cpu_reg
->pc
, fw
->start_addr
);
2700 /* Start the CPU. */
2701 val
= REG_RD_IND(bp
, cpu_reg
->mode
);
2702 val
&= ~cpu_reg
->mode_value_halt
;
2703 REG_WR_IND(bp
, cpu_reg
->state
, cpu_reg
->state_value_clear
);
2704 REG_WR_IND(bp
, cpu_reg
->mode
, val
);
2710 bnx2_init_cpus(struct bnx2
*bp
)
2712 struct cpu_reg cpu_reg
;
2718 if ((rc
= bnx2_gunzip_init(bp
)) != 0)
2721 /* Initialize the RV2P processor. */
2722 rc
= bnx2_gunzip(bp
, bnx2_rv2p_proc1
, sizeof(bnx2_rv2p_proc1
), &text
,
2727 load_rv2p_fw(bp
, text
, text_len
, RV2P_PROC1
);
2729 rc
= bnx2_gunzip(bp
, bnx2_rv2p_proc2
, sizeof(bnx2_rv2p_proc2
), &text
,
2734 load_rv2p_fw(bp
, text
, text_len
, RV2P_PROC2
);
2736 /* Initialize the RX Processor. */
2737 cpu_reg
.mode
= BNX2_RXP_CPU_MODE
;
2738 cpu_reg
.mode_value_halt
= BNX2_RXP_CPU_MODE_SOFT_HALT
;
2739 cpu_reg
.mode_value_sstep
= BNX2_RXP_CPU_MODE_STEP_ENA
;
2740 cpu_reg
.state
= BNX2_RXP_CPU_STATE
;
2741 cpu_reg
.state_value_clear
= 0xffffff;
2742 cpu_reg
.gpr0
= BNX2_RXP_CPU_REG_FILE
;
2743 cpu_reg
.evmask
= BNX2_RXP_CPU_EVENT_MASK
;
2744 cpu_reg
.pc
= BNX2_RXP_CPU_PROGRAM_COUNTER
;
2745 cpu_reg
.inst
= BNX2_RXP_CPU_INSTRUCTION
;
2746 cpu_reg
.bp
= BNX2_RXP_CPU_HW_BREAKPOINT
;
2747 cpu_reg
.spad_base
= BNX2_RXP_SCRATCH
;
2748 cpu_reg
.mips_view_base
= 0x8000000;
2750 if (CHIP_NUM(bp
) == CHIP_NUM_5709
)
2751 fw
= &bnx2_rxp_fw_09
;
2753 fw
= &bnx2_rxp_fw_06
;
2755 rc
= load_cpu_fw(bp
, &cpu_reg
, fw
);
2759 /* Initialize the TX Processor. */
2760 cpu_reg
.mode
= BNX2_TXP_CPU_MODE
;
2761 cpu_reg
.mode_value_halt
= BNX2_TXP_CPU_MODE_SOFT_HALT
;
2762 cpu_reg
.mode_value_sstep
= BNX2_TXP_CPU_MODE_STEP_ENA
;
2763 cpu_reg
.state
= BNX2_TXP_CPU_STATE
;
2764 cpu_reg
.state_value_clear
= 0xffffff;
2765 cpu_reg
.gpr0
= BNX2_TXP_CPU_REG_FILE
;
2766 cpu_reg
.evmask
= BNX2_TXP_CPU_EVENT_MASK
;
2767 cpu_reg
.pc
= BNX2_TXP_CPU_PROGRAM_COUNTER
;
2768 cpu_reg
.inst
= BNX2_TXP_CPU_INSTRUCTION
;
2769 cpu_reg
.bp
= BNX2_TXP_CPU_HW_BREAKPOINT
;
2770 cpu_reg
.spad_base
= BNX2_TXP_SCRATCH
;
2771 cpu_reg
.mips_view_base
= 0x8000000;
2773 if (CHIP_NUM(bp
) == CHIP_NUM_5709
)
2774 fw
= &bnx2_txp_fw_09
;
2776 fw
= &bnx2_txp_fw_06
;
2778 rc
= load_cpu_fw(bp
, &cpu_reg
, fw
);
2782 /* Initialize the TX Patch-up Processor. */
2783 cpu_reg
.mode
= BNX2_TPAT_CPU_MODE
;
2784 cpu_reg
.mode_value_halt
= BNX2_TPAT_CPU_MODE_SOFT_HALT
;
2785 cpu_reg
.mode_value_sstep
= BNX2_TPAT_CPU_MODE_STEP_ENA
;
2786 cpu_reg
.state
= BNX2_TPAT_CPU_STATE
;
2787 cpu_reg
.state_value_clear
= 0xffffff;
2788 cpu_reg
.gpr0
= BNX2_TPAT_CPU_REG_FILE
;
2789 cpu_reg
.evmask
= BNX2_TPAT_CPU_EVENT_MASK
;
2790 cpu_reg
.pc
= BNX2_TPAT_CPU_PROGRAM_COUNTER
;
2791 cpu_reg
.inst
= BNX2_TPAT_CPU_INSTRUCTION
;
2792 cpu_reg
.bp
= BNX2_TPAT_CPU_HW_BREAKPOINT
;
2793 cpu_reg
.spad_base
= BNX2_TPAT_SCRATCH
;
2794 cpu_reg
.mips_view_base
= 0x8000000;
2796 if (CHIP_NUM(bp
) == CHIP_NUM_5709
)
2797 fw
= &bnx2_tpat_fw_09
;
2799 fw
= &bnx2_tpat_fw_06
;
2801 rc
= load_cpu_fw(bp
, &cpu_reg
, fw
);
2805 /* Initialize the Completion Processor. */
2806 cpu_reg
.mode
= BNX2_COM_CPU_MODE
;
2807 cpu_reg
.mode_value_halt
= BNX2_COM_CPU_MODE_SOFT_HALT
;
2808 cpu_reg
.mode_value_sstep
= BNX2_COM_CPU_MODE_STEP_ENA
;
2809 cpu_reg
.state
= BNX2_COM_CPU_STATE
;
2810 cpu_reg
.state_value_clear
= 0xffffff;
2811 cpu_reg
.gpr0
= BNX2_COM_CPU_REG_FILE
;
2812 cpu_reg
.evmask
= BNX2_COM_CPU_EVENT_MASK
;
2813 cpu_reg
.pc
= BNX2_COM_CPU_PROGRAM_COUNTER
;
2814 cpu_reg
.inst
= BNX2_COM_CPU_INSTRUCTION
;
2815 cpu_reg
.bp
= BNX2_COM_CPU_HW_BREAKPOINT
;
2816 cpu_reg
.spad_base
= BNX2_COM_SCRATCH
;
2817 cpu_reg
.mips_view_base
= 0x8000000;
2819 if (CHIP_NUM(bp
) == CHIP_NUM_5709
)
2820 fw
= &bnx2_com_fw_09
;
2822 fw
= &bnx2_com_fw_06
;
2824 rc
= load_cpu_fw(bp
, &cpu_reg
, fw
);
2828 /* Initialize the Command Processor. */
2829 cpu_reg
.mode
= BNX2_CP_CPU_MODE
;
2830 cpu_reg
.mode_value_halt
= BNX2_CP_CPU_MODE_SOFT_HALT
;
2831 cpu_reg
.mode_value_sstep
= BNX2_CP_CPU_MODE_STEP_ENA
;
2832 cpu_reg
.state
= BNX2_CP_CPU_STATE
;
2833 cpu_reg
.state_value_clear
= 0xffffff;
2834 cpu_reg
.gpr0
= BNX2_CP_CPU_REG_FILE
;
2835 cpu_reg
.evmask
= BNX2_CP_CPU_EVENT_MASK
;
2836 cpu_reg
.pc
= BNX2_CP_CPU_PROGRAM_COUNTER
;
2837 cpu_reg
.inst
= BNX2_CP_CPU_INSTRUCTION
;
2838 cpu_reg
.bp
= BNX2_CP_CPU_HW_BREAKPOINT
;
2839 cpu_reg
.spad_base
= BNX2_CP_SCRATCH
;
2840 cpu_reg
.mips_view_base
= 0x8000000;
2842 if (CHIP_NUM(bp
) == CHIP_NUM_5709
) {
2843 fw
= &bnx2_cp_fw_09
;
2845 rc
= load_cpu_fw(bp
, &cpu_reg
, fw
);
2850 bnx2_gunzip_end(bp
);
2855 bnx2_set_power_state(struct bnx2
*bp
, pci_power_t state
)
2859 pci_read_config_word(bp
->pdev
, bp
->pm_cap
+ PCI_PM_CTRL
, &pmcsr
);
2865 pci_write_config_word(bp
->pdev
, bp
->pm_cap
+ PCI_PM_CTRL
,
2866 (pmcsr
& ~PCI_PM_CTRL_STATE_MASK
) |
2867 PCI_PM_CTRL_PME_STATUS
);
2869 if (pmcsr
& PCI_PM_CTRL_STATE_MASK
)
2870 /* delay required during transition out of D3hot */
2873 val
= REG_RD(bp
, BNX2_EMAC_MODE
);
2874 val
|= BNX2_EMAC_MODE_MPKT_RCVD
| BNX2_EMAC_MODE_ACPI_RCVD
;
2875 val
&= ~BNX2_EMAC_MODE_MPKT
;
2876 REG_WR(bp
, BNX2_EMAC_MODE
, val
);
2878 val
= REG_RD(bp
, BNX2_RPM_CONFIG
);
2879 val
&= ~BNX2_RPM_CONFIG_ACPI_ENA
;
2880 REG_WR(bp
, BNX2_RPM_CONFIG
, val
);
2891 autoneg
= bp
->autoneg
;
2892 advertising
= bp
->advertising
;
2894 bp
->autoneg
= AUTONEG_SPEED
;
2895 bp
->advertising
= ADVERTISED_10baseT_Half
|
2896 ADVERTISED_10baseT_Full
|
2897 ADVERTISED_100baseT_Half
|
2898 ADVERTISED_100baseT_Full
|
2901 bnx2_setup_copper_phy(bp
);
2903 bp
->autoneg
= autoneg
;
2904 bp
->advertising
= advertising
;
2906 bnx2_set_mac_addr(bp
);
2908 val
= REG_RD(bp
, BNX2_EMAC_MODE
);
2910 /* Enable port mode. */
2911 val
&= ~BNX2_EMAC_MODE_PORT
;
2912 val
|= BNX2_EMAC_MODE_PORT_MII
|
2913 BNX2_EMAC_MODE_MPKT_RCVD
|
2914 BNX2_EMAC_MODE_ACPI_RCVD
|
2915 BNX2_EMAC_MODE_MPKT
;
2917 REG_WR(bp
, BNX2_EMAC_MODE
, val
);
2919 /* receive all multicast */
2920 for (i
= 0; i
< NUM_MC_HASH_REGISTERS
; i
++) {
2921 REG_WR(bp
, BNX2_EMAC_MULTICAST_HASH0
+ (i
* 4),
2924 REG_WR(bp
, BNX2_EMAC_RX_MODE
,
2925 BNX2_EMAC_RX_MODE_SORT_MODE
);
2927 val
= 1 | BNX2_RPM_SORT_USER0_BC_EN
|
2928 BNX2_RPM_SORT_USER0_MC_EN
;
2929 REG_WR(bp
, BNX2_RPM_SORT_USER0
, 0x0);
2930 REG_WR(bp
, BNX2_RPM_SORT_USER0
, val
);
2931 REG_WR(bp
, BNX2_RPM_SORT_USER0
, val
|
2932 BNX2_RPM_SORT_USER0_ENA
);
2934 /* Need to enable EMAC and RPM for WOL. */
2935 REG_WR(bp
, BNX2_MISC_ENABLE_SET_BITS
,
2936 BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE
|
2937 BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE
|
2938 BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE
);
2940 val
= REG_RD(bp
, BNX2_RPM_CONFIG
);
2941 val
&= ~BNX2_RPM_CONFIG_ACPI_ENA
;
2942 REG_WR(bp
, BNX2_RPM_CONFIG
, val
);
2944 wol_msg
= BNX2_DRV_MSG_CODE_SUSPEND_WOL
;
2947 wol_msg
= BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL
;
2950 if (!(bp
->flags
& NO_WOL_FLAG
))
2951 bnx2_fw_sync(bp
, BNX2_DRV_MSG_DATA_WAIT3
| wol_msg
, 0);
2953 pmcsr
&= ~PCI_PM_CTRL_STATE_MASK
;
2954 if ((CHIP_ID(bp
) == CHIP_ID_5706_A0
) ||
2955 (CHIP_ID(bp
) == CHIP_ID_5706_A1
)) {
2964 pmcsr
|= PCI_PM_CTRL_PME_ENABLE
;
2966 pci_write_config_word(bp
->pdev
, bp
->pm_cap
+ PCI_PM_CTRL
,
2969 /* No more memory access after this point until
2970 * device is brought back to D0.
2982 bnx2_acquire_nvram_lock(struct bnx2
*bp
)
2987 /* Request access to the flash interface. */
2988 REG_WR(bp
, BNX2_NVM_SW_ARB
, BNX2_NVM_SW_ARB_ARB_REQ_SET2
);
2989 for (j
= 0; j
< NVRAM_TIMEOUT_COUNT
; j
++) {
2990 val
= REG_RD(bp
, BNX2_NVM_SW_ARB
);
2991 if (val
& BNX2_NVM_SW_ARB_ARB_ARB2
)
2997 if (j
>= NVRAM_TIMEOUT_COUNT
)
3004 bnx2_release_nvram_lock(struct bnx2
*bp
)
3009 /* Relinquish nvram interface. */
3010 REG_WR(bp
, BNX2_NVM_SW_ARB
, BNX2_NVM_SW_ARB_ARB_REQ_CLR2
);
3012 for (j
= 0; j
< NVRAM_TIMEOUT_COUNT
; j
++) {
3013 val
= REG_RD(bp
, BNX2_NVM_SW_ARB
);
3014 if (!(val
& BNX2_NVM_SW_ARB_ARB_ARB2
))
3020 if (j
>= NVRAM_TIMEOUT_COUNT
)
3028 bnx2_enable_nvram_write(struct bnx2
*bp
)
3032 val
= REG_RD(bp
, BNX2_MISC_CFG
);
3033 REG_WR(bp
, BNX2_MISC_CFG
, val
| BNX2_MISC_CFG_NVM_WR_EN_PCI
);
3035 if (!bp
->flash_info
->buffered
) {
3038 REG_WR(bp
, BNX2_NVM_COMMAND
, BNX2_NVM_COMMAND_DONE
);
3039 REG_WR(bp
, BNX2_NVM_COMMAND
,
3040 BNX2_NVM_COMMAND_WREN
| BNX2_NVM_COMMAND_DOIT
);
3042 for (j
= 0; j
< NVRAM_TIMEOUT_COUNT
; j
++) {
3045 val
= REG_RD(bp
, BNX2_NVM_COMMAND
);
3046 if (val
& BNX2_NVM_COMMAND_DONE
)
3050 if (j
>= NVRAM_TIMEOUT_COUNT
)
3057 bnx2_disable_nvram_write(struct bnx2
*bp
)
3061 val
= REG_RD(bp
, BNX2_MISC_CFG
);
3062 REG_WR(bp
, BNX2_MISC_CFG
, val
& ~BNX2_MISC_CFG_NVM_WR_EN
);
3067 bnx2_enable_nvram_access(struct bnx2
*bp
)
3071 val
= REG_RD(bp
, BNX2_NVM_ACCESS_ENABLE
);
3072 /* Enable both bits, even on read. */
3073 REG_WR(bp
, BNX2_NVM_ACCESS_ENABLE
,
3074 val
| BNX2_NVM_ACCESS_ENABLE_EN
| BNX2_NVM_ACCESS_ENABLE_WR_EN
);
3078 bnx2_disable_nvram_access(struct bnx2
*bp
)
3082 val
= REG_RD(bp
, BNX2_NVM_ACCESS_ENABLE
);
3083 /* Disable both bits, even after read. */
3084 REG_WR(bp
, BNX2_NVM_ACCESS_ENABLE
,
3085 val
& ~(BNX2_NVM_ACCESS_ENABLE_EN
|
3086 BNX2_NVM_ACCESS_ENABLE_WR_EN
));
3090 bnx2_nvram_erase_page(struct bnx2
*bp
, u32 offset
)
3095 if (bp
->flash_info
->buffered
)
3096 /* Buffered flash, no erase needed */
3099 /* Build an erase command */
3100 cmd
= BNX2_NVM_COMMAND_ERASE
| BNX2_NVM_COMMAND_WR
|
3101 BNX2_NVM_COMMAND_DOIT
;
3103 /* Need to clear DONE bit separately. */
3104 REG_WR(bp
, BNX2_NVM_COMMAND
, BNX2_NVM_COMMAND_DONE
);
3106 /* Address of the NVRAM to read from. */
3107 REG_WR(bp
, BNX2_NVM_ADDR
, offset
& BNX2_NVM_ADDR_NVM_ADDR_VALUE
);
3109 /* Issue an erase command. */
3110 REG_WR(bp
, BNX2_NVM_COMMAND
, cmd
);
3112 /* Wait for completion. */
3113 for (j
= 0; j
< NVRAM_TIMEOUT_COUNT
; j
++) {
3118 val
= REG_RD(bp
, BNX2_NVM_COMMAND
);
3119 if (val
& BNX2_NVM_COMMAND_DONE
)
3123 if (j
>= NVRAM_TIMEOUT_COUNT
)
3130 bnx2_nvram_read_dword(struct bnx2
*bp
, u32 offset
, u8
*ret_val
, u32 cmd_flags
)
3135 /* Build the command word. */
3136 cmd
= BNX2_NVM_COMMAND_DOIT
| cmd_flags
;
3138 /* Calculate an offset of a buffered flash. */
3139 if (bp
->flash_info
->buffered
) {
3140 offset
= ((offset
/ bp
->flash_info
->page_size
) <<
3141 bp
->flash_info
->page_bits
) +
3142 (offset
% bp
->flash_info
->page_size
);
3145 /* Need to clear DONE bit separately. */
3146 REG_WR(bp
, BNX2_NVM_COMMAND
, BNX2_NVM_COMMAND_DONE
);
3148 /* Address of the NVRAM to read from. */
3149 REG_WR(bp
, BNX2_NVM_ADDR
, offset
& BNX2_NVM_ADDR_NVM_ADDR_VALUE
);
3151 /* Issue a read command. */
3152 REG_WR(bp
, BNX2_NVM_COMMAND
, cmd
);
3154 /* Wait for completion. */
3155 for (j
= 0; j
< NVRAM_TIMEOUT_COUNT
; j
++) {
3160 val
= REG_RD(bp
, BNX2_NVM_COMMAND
);
3161 if (val
& BNX2_NVM_COMMAND_DONE
) {
3162 val
= REG_RD(bp
, BNX2_NVM_READ
);
3164 val
= be32_to_cpu(val
);
3165 memcpy(ret_val
, &val
, 4);
3169 if (j
>= NVRAM_TIMEOUT_COUNT
)
3177 bnx2_nvram_write_dword(struct bnx2
*bp
, u32 offset
, u8
*val
, u32 cmd_flags
)
3182 /* Build the command word. */
3183 cmd
= BNX2_NVM_COMMAND_DOIT
| BNX2_NVM_COMMAND_WR
| cmd_flags
;
3185 /* Calculate an offset of a buffered flash. */
3186 if (bp
->flash_info
->buffered
) {
3187 offset
= ((offset
/ bp
->flash_info
->page_size
) <<
3188 bp
->flash_info
->page_bits
) +
3189 (offset
% bp
->flash_info
->page_size
);
3192 /* Need to clear DONE bit separately. */
3193 REG_WR(bp
, BNX2_NVM_COMMAND
, BNX2_NVM_COMMAND_DONE
);
3195 memcpy(&val32
, val
, 4);
3196 val32
= cpu_to_be32(val32
);
3198 /* Write the data. */
3199 REG_WR(bp
, BNX2_NVM_WRITE
, val32
);
3201 /* Address of the NVRAM to write to. */
3202 REG_WR(bp
, BNX2_NVM_ADDR
, offset
& BNX2_NVM_ADDR_NVM_ADDR_VALUE
);
3204 /* Issue the write command. */
3205 REG_WR(bp
, BNX2_NVM_COMMAND
, cmd
);
3207 /* Wait for completion. */
3208 for (j
= 0; j
< NVRAM_TIMEOUT_COUNT
; j
++) {
3211 if (REG_RD(bp
, BNX2_NVM_COMMAND
) & BNX2_NVM_COMMAND_DONE
)
3214 if (j
>= NVRAM_TIMEOUT_COUNT
)
3221 bnx2_init_nvram(struct bnx2
*bp
)
3224 int j
, entry_count
, rc
;
3225 struct flash_spec
*flash
;
3227 /* Determine the selected interface. */
3228 val
= REG_RD(bp
, BNX2_NVM_CFG1
);
3230 entry_count
= sizeof(flash_table
) / sizeof(struct flash_spec
);
3233 if (val
& 0x40000000) {
3235 /* Flash interface has been reconfigured */
3236 for (j
= 0, flash
= &flash_table
[0]; j
< entry_count
;
3238 if ((val
& FLASH_BACKUP_STRAP_MASK
) ==
3239 (flash
->config1
& FLASH_BACKUP_STRAP_MASK
)) {
3240 bp
->flash_info
= flash
;
3247 /* Not yet been reconfigured */
3249 if (val
& (1 << 23))
3250 mask
= FLASH_BACKUP_STRAP_MASK
;
3252 mask
= FLASH_STRAP_MASK
;
3254 for (j
= 0, flash
= &flash_table
[0]; j
< entry_count
;
3257 if ((val
& mask
) == (flash
->strapping
& mask
)) {
3258 bp
->flash_info
= flash
;
3260 /* Request access to the flash interface. */
3261 if ((rc
= bnx2_acquire_nvram_lock(bp
)) != 0)
3264 /* Enable access to flash interface */
3265 bnx2_enable_nvram_access(bp
);
3267 /* Reconfigure the flash interface */
3268 REG_WR(bp
, BNX2_NVM_CFG1
, flash
->config1
);
3269 REG_WR(bp
, BNX2_NVM_CFG2
, flash
->config2
);
3270 REG_WR(bp
, BNX2_NVM_CFG3
, flash
->config3
);
3271 REG_WR(bp
, BNX2_NVM_WRITE1
, flash
->write1
);
3273 /* Disable access to flash interface */
3274 bnx2_disable_nvram_access(bp
);
3275 bnx2_release_nvram_lock(bp
);
3280 } /* if (val & 0x40000000) */
3282 if (j
== entry_count
) {
3283 bp
->flash_info
= NULL
;
3284 printk(KERN_ALERT PFX
"Unknown flash/EEPROM type.\n");
3288 val
= REG_RD_IND(bp
, bp
->shmem_base
+ BNX2_SHARED_HW_CFG_CONFIG2
);
3289 val
&= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK
;
3291 bp
->flash_size
= val
;
3293 bp
->flash_size
= bp
->flash_info
->total_size
;
3299 bnx2_nvram_read(struct bnx2
*bp
, u32 offset
, u8
*ret_buf
,
3303 u32 cmd_flags
, offset32
, len32
, extra
;
3308 /* Request access to the flash interface. */
3309 if ((rc
= bnx2_acquire_nvram_lock(bp
)) != 0)
3312 /* Enable access to flash interface */
3313 bnx2_enable_nvram_access(bp
);
3326 pre_len
= 4 - (offset
& 3);
3328 if (pre_len
>= len32
) {
3330 cmd_flags
= BNX2_NVM_COMMAND_FIRST
|
3331 BNX2_NVM_COMMAND_LAST
;
3334 cmd_flags
= BNX2_NVM_COMMAND_FIRST
;
3337 rc
= bnx2_nvram_read_dword(bp
, offset32
, buf
, cmd_flags
);
3342 memcpy(ret_buf
, buf
+ (offset
& 3), pre_len
);
3349 extra
= 4 - (len32
& 3);
3350 len32
= (len32
+ 4) & ~3;
3357 cmd_flags
= BNX2_NVM_COMMAND_LAST
;
3359 cmd_flags
= BNX2_NVM_COMMAND_FIRST
|
3360 BNX2_NVM_COMMAND_LAST
;
3362 rc
= bnx2_nvram_read_dword(bp
, offset32
, buf
, cmd_flags
);
3364 memcpy(ret_buf
, buf
, 4 - extra
);
3366 else if (len32
> 0) {
3369 /* Read the first word. */
3373 cmd_flags
= BNX2_NVM_COMMAND_FIRST
;
3375 rc
= bnx2_nvram_read_dword(bp
, offset32
, ret_buf
, cmd_flags
);
3377 /* Advance to the next dword. */
3382 while (len32
> 4 && rc
== 0) {
3383 rc
= bnx2_nvram_read_dword(bp
, offset32
, ret_buf
, 0);
3385 /* Advance to the next dword. */
3394 cmd_flags
= BNX2_NVM_COMMAND_LAST
;
3395 rc
= bnx2_nvram_read_dword(bp
, offset32
, buf
, cmd_flags
);
3397 memcpy(ret_buf
, buf
, 4 - extra
);
3400 /* Disable access to flash interface */
3401 bnx2_disable_nvram_access(bp
);
3403 bnx2_release_nvram_lock(bp
);
3409 bnx2_nvram_write(struct bnx2
*bp
, u32 offset
, u8
*data_buf
,
3412 u32 written
, offset32
, len32
;
3413 u8
*buf
, start
[4], end
[4], *align_buf
= NULL
, *flash_buffer
= NULL
;
3415 int align_start
, align_end
;
3420 align_start
= align_end
= 0;
3422 if ((align_start
= (offset32
& 3))) {
3424 len32
+= align_start
;
3427 if ((rc
= bnx2_nvram_read(bp
, offset32
, start
, 4)))
3432 align_end
= 4 - (len32
& 3);
3434 if ((rc
= bnx2_nvram_read(bp
, offset32
+ len32
- 4, end
, 4)))
3438 if (align_start
|| align_end
) {
3439 align_buf
= kmalloc(len32
, GFP_KERNEL
);
3440 if (align_buf
== NULL
)
3443 memcpy(align_buf
, start
, 4);
3446 memcpy(align_buf
+ len32
- 4, end
, 4);
3448 memcpy(align_buf
+ align_start
, data_buf
, buf_size
);
3452 if (bp
->flash_info
->buffered
== 0) {
3453 flash_buffer
= kmalloc(264, GFP_KERNEL
);
3454 if (flash_buffer
== NULL
) {
3456 goto nvram_write_end
;
3461 while ((written
< len32
) && (rc
== 0)) {
3462 u32 page_start
, page_end
, data_start
, data_end
;
3463 u32 addr
, cmd_flags
;
3466 /* Find the page_start addr */
3467 page_start
= offset32
+ written
;
3468 page_start
-= (page_start
% bp
->flash_info
->page_size
);
3469 /* Find the page_end addr */
3470 page_end
= page_start
+ bp
->flash_info
->page_size
;
3471 /* Find the data_start addr */
3472 data_start
= (written
== 0) ? offset32
: page_start
;
3473 /* Find the data_end addr */
3474 data_end
= (page_end
> offset32
+ len32
) ?
3475 (offset32
+ len32
) : page_end
;
3477 /* Request access to the flash interface. */
3478 if ((rc
= bnx2_acquire_nvram_lock(bp
)) != 0)
3479 goto nvram_write_end
;
3481 /* Enable access to flash interface */
3482 bnx2_enable_nvram_access(bp
);
3484 cmd_flags
= BNX2_NVM_COMMAND_FIRST
;
3485 if (bp
->flash_info
->buffered
== 0) {
3488 /* Read the whole page into the buffer
3489 * (non-buffer flash only) */
3490 for (j
= 0; j
< bp
->flash_info
->page_size
; j
+= 4) {
3491 if (j
== (bp
->flash_info
->page_size
- 4)) {
3492 cmd_flags
|= BNX2_NVM_COMMAND_LAST
;
3494 rc
= bnx2_nvram_read_dword(bp
,
3500 goto nvram_write_end
;
3506 /* Enable writes to flash interface (unlock write-protect) */
3507 if ((rc
= bnx2_enable_nvram_write(bp
)) != 0)
3508 goto nvram_write_end
;
3510 /* Loop to write back the buffer data from page_start to
3513 if (bp
->flash_info
->buffered
== 0) {
3514 /* Erase the page */
3515 if ((rc
= bnx2_nvram_erase_page(bp
, page_start
)) != 0)
3516 goto nvram_write_end
;
3518 /* Re-enable the write again for the actual write */
3519 bnx2_enable_nvram_write(bp
);
3521 for (addr
= page_start
; addr
< data_start
;
3522 addr
+= 4, i
+= 4) {
3524 rc
= bnx2_nvram_write_dword(bp
, addr
,
3525 &flash_buffer
[i
], cmd_flags
);
3528 goto nvram_write_end
;
3534 /* Loop to write the new data from data_start to data_end */
3535 for (addr
= data_start
; addr
< data_end
; addr
+= 4, i
+= 4) {
3536 if ((addr
== page_end
- 4) ||
3537 ((bp
->flash_info
->buffered
) &&
3538 (addr
== data_end
- 4))) {
3540 cmd_flags
|= BNX2_NVM_COMMAND_LAST
;
3542 rc
= bnx2_nvram_write_dword(bp
, addr
, buf
,
3546 goto nvram_write_end
;
3552 /* Loop to write back the buffer data from data_end
3554 if (bp
->flash_info
->buffered
== 0) {
3555 for (addr
= data_end
; addr
< page_end
;
3556 addr
+= 4, i
+= 4) {
3558 if (addr
== page_end
-4) {
3559 cmd_flags
= BNX2_NVM_COMMAND_LAST
;
3561 rc
= bnx2_nvram_write_dword(bp
, addr
,
3562 &flash_buffer
[i
], cmd_flags
);
3565 goto nvram_write_end
;
3571 /* Disable writes to flash interface (lock write-protect) */
3572 bnx2_disable_nvram_write(bp
);
3574 /* Disable access to flash interface */
3575 bnx2_disable_nvram_access(bp
);
3576 bnx2_release_nvram_lock(bp
);
3578 /* Increment written */
3579 written
+= data_end
- data_start
;
3583 kfree(flash_buffer
);
3589 bnx2_reset_chip(struct bnx2
*bp
, u32 reset_code
)
3594 /* Wait for the current PCI transaction to complete before
3595 * issuing a reset. */
3596 REG_WR(bp
, BNX2_MISC_ENABLE_CLR_BITS
,
3597 BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE
|
3598 BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE
|
3599 BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE
|
3600 BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE
);
3601 val
= REG_RD(bp
, BNX2_MISC_ENABLE_CLR_BITS
);
3604 /* Wait for the firmware to tell us it is ok to issue a reset. */
3605 bnx2_fw_sync(bp
, BNX2_DRV_MSG_DATA_WAIT0
| reset_code
, 1);
3607 /* Deposit a driver reset signature so the firmware knows that
3608 * this is a soft reset. */
3609 REG_WR_IND(bp
, bp
->shmem_base
+ BNX2_DRV_RESET_SIGNATURE
,
3610 BNX2_DRV_RESET_SIGNATURE_MAGIC
);
3612 /* Do a dummy read to force the chip to complete all current transaction
3613 * before we issue a reset. */
3614 val
= REG_RD(bp
, BNX2_MISC_ID
);
3616 if (CHIP_NUM(bp
) == CHIP_NUM_5709
) {
3617 REG_WR(bp
, BNX2_MISC_COMMAND
, BNX2_MISC_COMMAND_SW_RESET
);
3618 REG_RD(bp
, BNX2_MISC_COMMAND
);
3621 val
= BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA
|
3622 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP
;
3624 pci_write_config_dword(bp
->pdev
, BNX2_PCICFG_MISC_CONFIG
, val
);
3627 val
= BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ
|
3628 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA
|
3629 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP
;
3632 REG_WR(bp
, BNX2_PCICFG_MISC_CONFIG
, val
);
3634 if ((CHIP_ID(bp
) == CHIP_ID_5706_A0
) ||
3635 (CHIP_ID(bp
) == CHIP_ID_5706_A1
)) {
3636 current
->state
= TASK_UNINTERRUPTIBLE
;
3637 schedule_timeout(HZ
/ 50);
3640 /* Reset takes approximate 30 usec */
3641 for (i
= 0; i
< 10; i
++) {
3642 val
= REG_RD(bp
, BNX2_PCICFG_MISC_CONFIG
);
3643 if ((val
& (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ
|
3644 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY
)) == 0)
3649 if (val
& (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ
|
3650 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY
)) {
3651 printk(KERN_ERR PFX
"Chip reset did not complete\n");
3656 /* Make sure byte swapping is properly configured. */
3657 val
= REG_RD(bp
, BNX2_PCI_SWAP_DIAG0
);
3658 if (val
!= 0x01020304) {
3659 printk(KERN_ERR PFX
"Chip not in correct endian mode\n");
3663 /* Wait for the firmware to finish its initialization. */
3664 rc
= bnx2_fw_sync(bp
, BNX2_DRV_MSG_DATA_WAIT1
| reset_code
, 0);
3668 if (CHIP_ID(bp
) == CHIP_ID_5706_A0
) {
3669 /* Adjust the voltage regular to two steps lower. The default
3670 * of this register is 0x0000000e. */
3671 REG_WR(bp
, BNX2_MISC_VREG_CONTROL
, 0x000000fa);
3673 /* Remove bad rbuf memory from the free pool. */
3674 rc
= bnx2_alloc_bad_rbuf(bp
);
3681 bnx2_init_chip(struct bnx2
*bp
)
3686 /* Make sure the interrupt is not active. */
3687 REG_WR(bp
, BNX2_PCICFG_INT_ACK_CMD
, BNX2_PCICFG_INT_ACK_CMD_MASK_INT
);
3689 val
= BNX2_DMA_CONFIG_DATA_BYTE_SWAP
|
3690 BNX2_DMA_CONFIG_DATA_WORD_SWAP
|
3692 BNX2_DMA_CONFIG_CNTL_BYTE_SWAP
|
3694 BNX2_DMA_CONFIG_CNTL_WORD_SWAP
|
3695 DMA_READ_CHANS
<< 12 |
3696 DMA_WRITE_CHANS
<< 16;
3698 val
|= (0x2 << 20) | (1 << 11);
3700 if ((bp
->flags
& PCIX_FLAG
) && (bp
->bus_speed_mhz
== 133))
3703 if ((CHIP_NUM(bp
) == CHIP_NUM_5706
) &&
3704 (CHIP_ID(bp
) != CHIP_ID_5706_A0
) && !(bp
->flags
& PCIX_FLAG
))
3705 val
|= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA
;
3707 REG_WR(bp
, BNX2_DMA_CONFIG
, val
);
3709 if (CHIP_ID(bp
) == CHIP_ID_5706_A0
) {
3710 val
= REG_RD(bp
, BNX2_TDMA_CONFIG
);
3711 val
|= BNX2_TDMA_CONFIG_ONE_DMA
;
3712 REG_WR(bp
, BNX2_TDMA_CONFIG
, val
);
3715 if (bp
->flags
& PCIX_FLAG
) {
3718 pci_read_config_word(bp
->pdev
, bp
->pcix_cap
+ PCI_X_CMD
,
3720 pci_write_config_word(bp
->pdev
, bp
->pcix_cap
+ PCI_X_CMD
,
3721 val16
& ~PCI_X_CMD_ERO
);
3724 REG_WR(bp
, BNX2_MISC_ENABLE_SET_BITS
,
3725 BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE
|
3726 BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE
|
3727 BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE
);
3729 /* Initialize context mapping and zero out the quick contexts. The
3730 * context block must have already been enabled. */
3731 if (CHIP_NUM(bp
) == CHIP_NUM_5709
) {
3732 rc
= bnx2_init_5709_context(bp
);
3736 bnx2_init_context(bp
);
3738 if ((rc
= bnx2_init_cpus(bp
)) != 0)
3741 bnx2_init_nvram(bp
);
3743 bnx2_set_mac_addr(bp
);
3745 val
= REG_RD(bp
, BNX2_MQ_CONFIG
);
3746 val
&= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE
;
3747 val
|= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256
;
3748 if (CHIP_ID(bp
) == CHIP_ID_5709_A0
|| CHIP_ID(bp
) == CHIP_ID_5709_A1
)
3749 val
|= BNX2_MQ_CONFIG_HALT_DIS
;
3751 REG_WR(bp
, BNX2_MQ_CONFIG
, val
);
3753 val
= 0x10000 + (MAX_CID_CNT
* MB_KERNEL_CTX_SIZE
);
3754 REG_WR(bp
, BNX2_MQ_KNL_BYP_WIND_START
, val
);
3755 REG_WR(bp
, BNX2_MQ_KNL_WIND_END
, val
);
3757 val
= (BCM_PAGE_BITS
- 8) << 24;
3758 REG_WR(bp
, BNX2_RV2P_CONFIG
, val
);
3760 /* Configure page size. */
3761 val
= REG_RD(bp
, BNX2_TBDR_CONFIG
);
3762 val
&= ~BNX2_TBDR_CONFIG_PAGE_SIZE
;
3763 val
|= (BCM_PAGE_BITS
- 8) << 24 | 0x40;
3764 REG_WR(bp
, BNX2_TBDR_CONFIG
, val
);
3766 val
= bp
->mac_addr
[0] +
3767 (bp
->mac_addr
[1] << 8) +
3768 (bp
->mac_addr
[2] << 16) +
3770 (bp
->mac_addr
[4] << 8) +
3771 (bp
->mac_addr
[5] << 16);
3772 REG_WR(bp
, BNX2_EMAC_BACKOFF_SEED
, val
);
3774 /* Program the MTU. Also include 4 bytes for CRC32. */
3775 val
= bp
->dev
->mtu
+ ETH_HLEN
+ 4;
3776 if (val
> (MAX_ETHERNET_PACKET_SIZE
+ 4))
3777 val
|= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA
;
3778 REG_WR(bp
, BNX2_EMAC_RX_MTU_SIZE
, val
);
3780 bp
->last_status_idx
= 0;
3781 bp
->rx_mode
= BNX2_EMAC_RX_MODE_SORT_MODE
;
3783 /* Set up how to generate a link change interrupt. */
3784 REG_WR(bp
, BNX2_EMAC_ATTENTION_ENA
, BNX2_EMAC_ATTENTION_ENA_LINK
);
3786 REG_WR(bp
, BNX2_HC_STATUS_ADDR_L
,
3787 (u64
) bp
->status_blk_mapping
& 0xffffffff);
3788 REG_WR(bp
, BNX2_HC_STATUS_ADDR_H
, (u64
) bp
->status_blk_mapping
>> 32);
3790 REG_WR(bp
, BNX2_HC_STATISTICS_ADDR_L
,
3791 (u64
) bp
->stats_blk_mapping
& 0xffffffff);
3792 REG_WR(bp
, BNX2_HC_STATISTICS_ADDR_H
,
3793 (u64
) bp
->stats_blk_mapping
>> 32);
3795 REG_WR(bp
, BNX2_HC_TX_QUICK_CONS_TRIP
,
3796 (bp
->tx_quick_cons_trip_int
<< 16) | bp
->tx_quick_cons_trip
);
3798 REG_WR(bp
, BNX2_HC_RX_QUICK_CONS_TRIP
,
3799 (bp
->rx_quick_cons_trip_int
<< 16) | bp
->rx_quick_cons_trip
);
3801 REG_WR(bp
, BNX2_HC_COMP_PROD_TRIP
,
3802 (bp
->comp_prod_trip_int
<< 16) | bp
->comp_prod_trip
);
3804 REG_WR(bp
, BNX2_HC_TX_TICKS
, (bp
->tx_ticks_int
<< 16) | bp
->tx_ticks
);
3806 REG_WR(bp
, BNX2_HC_RX_TICKS
, (bp
->rx_ticks_int
<< 16) | bp
->rx_ticks
);
3808 REG_WR(bp
, BNX2_HC_COM_TICKS
,
3809 (bp
->com_ticks_int
<< 16) | bp
->com_ticks
);
3811 REG_WR(bp
, BNX2_HC_CMD_TICKS
,
3812 (bp
->cmd_ticks_int
<< 16) | bp
->cmd_ticks
);
3814 if (CHIP_NUM(bp
) == CHIP_NUM_5708
)
3815 REG_WR(bp
, BNX2_HC_STATS_TICKS
, 0);
3817 REG_WR(bp
, BNX2_HC_STATS_TICKS
, bp
->stats_ticks
& 0xffff00);
3818 REG_WR(bp
, BNX2_HC_STAT_COLLECT_TICKS
, 0xbb8); /* 3ms */
3820 if (CHIP_ID(bp
) == CHIP_ID_5706_A1
)
3821 val
= BNX2_HC_CONFIG_COLLECT_STATS
;
3823 val
= BNX2_HC_CONFIG_RX_TMR_MODE
| BNX2_HC_CONFIG_TX_TMR_MODE
|
3824 BNX2_HC_CONFIG_COLLECT_STATS
;
3827 if (bp
->flags
& ONE_SHOT_MSI_FLAG
)
3828 val
|= BNX2_HC_CONFIG_ONE_SHOT
;
3830 REG_WR(bp
, BNX2_HC_CONFIG
, val
);
3832 /* Clear internal stats counters. */
3833 REG_WR(bp
, BNX2_HC_COMMAND
, BNX2_HC_COMMAND_CLR_STAT_NOW
);
3835 REG_WR(bp
, BNX2_HC_ATTN_BITS_ENABLE
, STATUS_ATTN_EVENTS
);
3837 if (REG_RD_IND(bp
, bp
->shmem_base
+ BNX2_PORT_FEATURE
) &
3838 BNX2_PORT_FEATURE_ASF_ENABLED
)
3839 bp
->flags
|= ASF_ENABLE_FLAG
;
3841 /* Initialize the receive filter. */
3842 bnx2_set_rx_mode(bp
->dev
);
3844 if (CHIP_NUM(bp
) == CHIP_NUM_5709
) {
3845 val
= REG_RD(bp
, BNX2_MISC_NEW_CORE_CTL
);
3846 val
|= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE
;
3847 REG_WR(bp
, BNX2_MISC_NEW_CORE_CTL
, val
);
3849 rc
= bnx2_fw_sync(bp
, BNX2_DRV_MSG_DATA_WAIT2
| BNX2_DRV_MSG_CODE_RESET
,
3852 REG_WR(bp
, BNX2_MISC_ENABLE_SET_BITS
, 0x5ffffff);
3853 REG_RD(bp
, BNX2_MISC_ENABLE_SET_BITS
);
3857 bp
->hc_cmd
= REG_RD(bp
, BNX2_HC_COMMAND
);
3863 bnx2_init_tx_context(struct bnx2
*bp
, u32 cid
)
3865 u32 val
, offset0
, offset1
, offset2
, offset3
;
3867 if (CHIP_NUM(bp
) == CHIP_NUM_5709
) {
3868 offset0
= BNX2_L2CTX_TYPE_XI
;
3869 offset1
= BNX2_L2CTX_CMD_TYPE_XI
;
3870 offset2
= BNX2_L2CTX_TBDR_BHADDR_HI_XI
;
3871 offset3
= BNX2_L2CTX_TBDR_BHADDR_LO_XI
;
3873 offset0
= BNX2_L2CTX_TYPE
;
3874 offset1
= BNX2_L2CTX_CMD_TYPE
;
3875 offset2
= BNX2_L2CTX_TBDR_BHADDR_HI
;
3876 offset3
= BNX2_L2CTX_TBDR_BHADDR_LO
;
3878 val
= BNX2_L2CTX_TYPE_TYPE_L2
| BNX2_L2CTX_TYPE_SIZE_L2
;
3879 CTX_WR(bp
, GET_CID_ADDR(cid
), offset0
, val
);
3881 val
= BNX2_L2CTX_CMD_TYPE_TYPE_L2
| (8 << 16);
3882 CTX_WR(bp
, GET_CID_ADDR(cid
), offset1
, val
);
3884 val
= (u64
) bp
->tx_desc_mapping
>> 32;
3885 CTX_WR(bp
, GET_CID_ADDR(cid
), offset2
, val
);
3887 val
= (u64
) bp
->tx_desc_mapping
& 0xffffffff;
3888 CTX_WR(bp
, GET_CID_ADDR(cid
), offset3
, val
);
3892 bnx2_init_tx_ring(struct bnx2
*bp
)
3897 bp
->tx_wake_thresh
= bp
->tx_ring_size
/ 2;
3899 txbd
= &bp
->tx_desc_ring
[MAX_TX_DESC_CNT
];
3901 txbd
->tx_bd_haddr_hi
= (u64
) bp
->tx_desc_mapping
>> 32;
3902 txbd
->tx_bd_haddr_lo
= (u64
) bp
->tx_desc_mapping
& 0xffffffff;
3907 bp
->tx_prod_bseq
= 0;
3910 bp
->tx_bidx_addr
= MB_GET_CID_ADDR(cid
) + BNX2_L2CTX_TX_HOST_BIDX
;
3911 bp
->tx_bseq_addr
= MB_GET_CID_ADDR(cid
) + BNX2_L2CTX_TX_HOST_BSEQ
;
3913 bnx2_init_tx_context(bp
, cid
);
3917 bnx2_init_rx_ring(struct bnx2
*bp
)
3921 u16 prod
, ring_prod
;
3924 /* 8 for CRC and VLAN */
3925 bp
->rx_buf_use_size
= bp
->dev
->mtu
+ ETH_HLEN
+ bp
->rx_offset
+ 8;
3927 bp
->rx_buf_size
= bp
->rx_buf_use_size
+ BNX2_RX_ALIGN
;
3929 ring_prod
= prod
= bp
->rx_prod
= 0;
3932 bp
->rx_prod_bseq
= 0;
3934 for (i
= 0; i
< bp
->rx_max_ring
; i
++) {
3937 rxbd
= &bp
->rx_desc_ring
[i
][0];
3938 for (j
= 0; j
< MAX_RX_DESC_CNT
; j
++, rxbd
++) {
3939 rxbd
->rx_bd_len
= bp
->rx_buf_use_size
;
3940 rxbd
->rx_bd_flags
= RX_BD_FLAGS_START
| RX_BD_FLAGS_END
;
3942 if (i
== (bp
->rx_max_ring
- 1))
3946 rxbd
->rx_bd_haddr_hi
= (u64
) bp
->rx_desc_mapping
[j
] >> 32;
3947 rxbd
->rx_bd_haddr_lo
= (u64
) bp
->rx_desc_mapping
[j
] &
3951 val
= BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE
;
3952 val
|= BNX2_L2CTX_CTX_TYPE_SIZE_L2
;
3954 CTX_WR(bp
, GET_CID_ADDR(RX_CID
), BNX2_L2CTX_CTX_TYPE
, val
);
3956 val
= (u64
) bp
->rx_desc_mapping
[0] >> 32;
3957 CTX_WR(bp
, GET_CID_ADDR(RX_CID
), BNX2_L2CTX_NX_BDHADDR_HI
, val
);
3959 val
= (u64
) bp
->rx_desc_mapping
[0] & 0xffffffff;
3960 CTX_WR(bp
, GET_CID_ADDR(RX_CID
), BNX2_L2CTX_NX_BDHADDR_LO
, val
);
3962 for (i
= 0; i
< bp
->rx_ring_size
; i
++) {
3963 if (bnx2_alloc_rx_skb(bp
, ring_prod
) < 0) {
3966 prod
= NEXT_RX_BD(prod
);
3967 ring_prod
= RX_RING_IDX(prod
);
3971 REG_WR16(bp
, MB_RX_CID_ADDR
+ BNX2_L2CTX_HOST_BDIDX
, prod
);
3973 REG_WR(bp
, MB_RX_CID_ADDR
+ BNX2_L2CTX_HOST_BSEQ
, bp
->rx_prod_bseq
);
3977 bnx2_set_rx_ring_size(struct bnx2
*bp
, u32 size
)
3981 bp
->rx_ring_size
= size
;
3983 while (size
> MAX_RX_DESC_CNT
) {
3984 size
-= MAX_RX_DESC_CNT
;
3987 /* round to next power of 2 */
3989 while ((max
& num_rings
) == 0)
3992 if (num_rings
!= max
)
3995 bp
->rx_max_ring
= max
;
3996 bp
->rx_max_ring_idx
= (bp
->rx_max_ring
* RX_DESC_CNT
) - 1;
4000 bnx2_free_tx_skbs(struct bnx2
*bp
)
4004 if (bp
->tx_buf_ring
== NULL
)
4007 for (i
= 0; i
< TX_DESC_CNT
; ) {
4008 struct sw_bd
*tx_buf
= &bp
->tx_buf_ring
[i
];
4009 struct sk_buff
*skb
= tx_buf
->skb
;
4017 pci_unmap_single(bp
->pdev
, pci_unmap_addr(tx_buf
, mapping
),
4018 skb_headlen(skb
), PCI_DMA_TODEVICE
);
4022 last
= skb_shinfo(skb
)->nr_frags
;
4023 for (j
= 0; j
< last
; j
++) {
4024 tx_buf
= &bp
->tx_buf_ring
[i
+ j
+ 1];
4025 pci_unmap_page(bp
->pdev
,
4026 pci_unmap_addr(tx_buf
, mapping
),
4027 skb_shinfo(skb
)->frags
[j
].size
,
4037 bnx2_free_rx_skbs(struct bnx2
*bp
)
4041 if (bp
->rx_buf_ring
== NULL
)
4044 for (i
= 0; i
< bp
->rx_max_ring_idx
; i
++) {
4045 struct sw_bd
*rx_buf
= &bp
->rx_buf_ring
[i
];
4046 struct sk_buff
*skb
= rx_buf
->skb
;
4051 pci_unmap_single(bp
->pdev
, pci_unmap_addr(rx_buf
, mapping
),
4052 bp
->rx_buf_use_size
, PCI_DMA_FROMDEVICE
);
4061 bnx2_free_skbs(struct bnx2
*bp
)
4063 bnx2_free_tx_skbs(bp
);
4064 bnx2_free_rx_skbs(bp
);
4068 bnx2_reset_nic(struct bnx2
*bp
, u32 reset_code
)
4072 rc
= bnx2_reset_chip(bp
, reset_code
);
4077 if ((rc
= bnx2_init_chip(bp
)) != 0)
4080 bnx2_init_tx_ring(bp
);
4081 bnx2_init_rx_ring(bp
);
4086 bnx2_init_nic(struct bnx2
*bp
)
4090 if ((rc
= bnx2_reset_nic(bp
, BNX2_DRV_MSG_CODE_RESET
)) != 0)
4093 spin_lock_bh(&bp
->phy_lock
);
4095 spin_unlock_bh(&bp
->phy_lock
);
4101 bnx2_test_registers(struct bnx2
*bp
)
4105 static const struct {
4108 #define BNX2_FL_NOT_5709 1
4112 { 0x006c, 0, 0x00000000, 0x0000003f },
4113 { 0x0090, 0, 0xffffffff, 0x00000000 },
4114 { 0x0094, 0, 0x00000000, 0x00000000 },
4116 { 0x0404, BNX2_FL_NOT_5709
, 0x00003f00, 0x00000000 },
4117 { 0x0418, BNX2_FL_NOT_5709
, 0x00000000, 0xffffffff },
4118 { 0x041c, BNX2_FL_NOT_5709
, 0x00000000, 0xffffffff },
4119 { 0x0420, BNX2_FL_NOT_5709
, 0x00000000, 0x80ffffff },
4120 { 0x0424, BNX2_FL_NOT_5709
, 0x00000000, 0x00000000 },
4121 { 0x0428, BNX2_FL_NOT_5709
, 0x00000000, 0x00000001 },
4122 { 0x0450, BNX2_FL_NOT_5709
, 0x00000000, 0x0000ffff },
4123 { 0x0454, BNX2_FL_NOT_5709
, 0x00000000, 0xffffffff },
4124 { 0x0458, BNX2_FL_NOT_5709
, 0x00000000, 0xffffffff },
4126 { 0x0808, BNX2_FL_NOT_5709
, 0x00000000, 0xffffffff },
4127 { 0x0854, BNX2_FL_NOT_5709
, 0x00000000, 0xffffffff },
4128 { 0x0868, BNX2_FL_NOT_5709
, 0x00000000, 0x77777777 },
4129 { 0x086c, BNX2_FL_NOT_5709
, 0x00000000, 0x77777777 },
4130 { 0x0870, BNX2_FL_NOT_5709
, 0x00000000, 0x77777777 },
4131 { 0x0874, BNX2_FL_NOT_5709
, 0x00000000, 0x77777777 },
4133 { 0x0c00, BNX2_FL_NOT_5709
, 0x00000000, 0x00000001 },
4134 { 0x0c04, BNX2_FL_NOT_5709
, 0x00000000, 0x03ff0001 },
4135 { 0x0c08, BNX2_FL_NOT_5709
, 0x0f0ff073, 0x00000000 },
4137 { 0x1000, 0, 0x00000000, 0x00000001 },
4138 { 0x1004, 0, 0x00000000, 0x000f0001 },
4140 { 0x1408, 0, 0x01c00800, 0x00000000 },
4141 { 0x149c, 0, 0x8000ffff, 0x00000000 },
4142 { 0x14a8, 0, 0x00000000, 0x000001ff },
4143 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
4144 { 0x14b0, 0, 0x00000002, 0x00000001 },
4145 { 0x14b8, 0, 0x00000000, 0x00000000 },
4146 { 0x14c0, 0, 0x00000000, 0x00000009 },
4147 { 0x14c4, 0, 0x00003fff, 0x00000000 },
4148 { 0x14cc, 0, 0x00000000, 0x00000001 },
4149 { 0x14d0, 0, 0xffffffff, 0x00000000 },
4151 { 0x1800, 0, 0x00000000, 0x00000001 },
4152 { 0x1804, 0, 0x00000000, 0x00000003 },
4154 { 0x2800, 0, 0x00000000, 0x00000001 },
4155 { 0x2804, 0, 0x00000000, 0x00003f01 },
4156 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
4157 { 0x2810, 0, 0xffff0000, 0x00000000 },
4158 { 0x2814, 0, 0xffff0000, 0x00000000 },
4159 { 0x2818, 0, 0xffff0000, 0x00000000 },
4160 { 0x281c, 0, 0xffff0000, 0x00000000 },
4161 { 0x2834, 0, 0xffffffff, 0x00000000 },
4162 { 0x2840, 0, 0x00000000, 0xffffffff },
4163 { 0x2844, 0, 0x00000000, 0xffffffff },
4164 { 0x2848, 0, 0xffffffff, 0x00000000 },
4165 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
4167 { 0x2c00, 0, 0x00000000, 0x00000011 },
4168 { 0x2c04, 0, 0x00000000, 0x00030007 },
4170 { 0x3c00, 0, 0x00000000, 0x00000001 },
4171 { 0x3c04, 0, 0x00000000, 0x00070000 },
4172 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
4173 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
4174 { 0x3c10, 0, 0xffffffff, 0x00000000 },
4175 { 0x3c14, 0, 0x00000000, 0xffffffff },
4176 { 0x3c18, 0, 0x00000000, 0xffffffff },
4177 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
4178 { 0x3c20, 0, 0xffffff00, 0x00000000 },
4180 { 0x5004, 0, 0x00000000, 0x0000007f },
4181 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
4183 { 0x5c00, 0, 0x00000000, 0x00000001 },
4184 { 0x5c04, 0, 0x00000000, 0x0003000f },
4185 { 0x5c08, 0, 0x00000003, 0x00000000 },
4186 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
4187 { 0x5c10, 0, 0x00000000, 0xffffffff },
4188 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
4189 { 0x5c84, 0, 0x00000000, 0x0000f333 },
4190 { 0x5c88, 0, 0x00000000, 0x00077373 },
4191 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
4193 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
4194 { 0x680c, 0, 0xffffffff, 0x00000000 },
4195 { 0x6810, 0, 0xffffffff, 0x00000000 },
4196 { 0x6814, 0, 0xffffffff, 0x00000000 },
4197 { 0x6818, 0, 0xffffffff, 0x00000000 },
4198 { 0x681c, 0, 0xffffffff, 0x00000000 },
4199 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
4200 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
4201 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
4202 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
4203 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
4204 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
4205 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
4206 { 0x683c, 0, 0x0000ffff, 0x00000000 },
4207 { 0x6840, 0, 0x00000ff0, 0x00000000 },
4208 { 0x6844, 0, 0x00ffff00, 0x00000000 },
4209 { 0x684c, 0, 0xffffffff, 0x00000000 },
4210 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
4211 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
4212 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
4213 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
4214 { 0x6908, 0, 0x00000000, 0x0001ff0f },
4215 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
4217 { 0xffff, 0, 0x00000000, 0x00000000 },
4222 if (CHIP_NUM(bp
) == CHIP_NUM_5709
)
4225 for (i
= 0; reg_tbl
[i
].offset
!= 0xffff; i
++) {
4226 u32 offset
, rw_mask
, ro_mask
, save_val
, val
;
4227 u16 flags
= reg_tbl
[i
].flags
;
4229 if (is_5709
&& (flags
& BNX2_FL_NOT_5709
))
4232 offset
= (u32
) reg_tbl
[i
].offset
;
4233 rw_mask
= reg_tbl
[i
].rw_mask
;
4234 ro_mask
= reg_tbl
[i
].ro_mask
;
4236 save_val
= readl(bp
->regview
+ offset
);
4238 writel(0, bp
->regview
+ offset
);
4240 val
= readl(bp
->regview
+ offset
);
4241 if ((val
& rw_mask
) != 0) {
4245 if ((val
& ro_mask
) != (save_val
& ro_mask
)) {
4249 writel(0xffffffff, bp
->regview
+ offset
);
4251 val
= readl(bp
->regview
+ offset
);
4252 if ((val
& rw_mask
) != rw_mask
) {
4256 if ((val
& ro_mask
) != (save_val
& ro_mask
)) {
4260 writel(save_val
, bp
->regview
+ offset
);
4264 writel(save_val
, bp
->regview
+ offset
);
4272 bnx2_do_mem_test(struct bnx2
*bp
, u32 start
, u32 size
)
4274 static const u32 test_pattern
[] = { 0x00000000, 0xffffffff, 0x55555555,
4275 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
4278 for (i
= 0; i
< sizeof(test_pattern
) / 4; i
++) {
4281 for (offset
= 0; offset
< size
; offset
+= 4) {
4283 REG_WR_IND(bp
, start
+ offset
, test_pattern
[i
]);
4285 if (REG_RD_IND(bp
, start
+ offset
) !=
4295 bnx2_test_memory(struct bnx2
*bp
)
4299 static struct mem_entry
{
4302 } mem_tbl_5706
[] = {
4303 { 0x60000, 0x4000 },
4304 { 0xa0000, 0x3000 },
4305 { 0xe0000, 0x4000 },
4306 { 0x120000, 0x4000 },
4307 { 0x1a0000, 0x4000 },
4308 { 0x160000, 0x4000 },
4312 { 0x60000, 0x4000 },
4313 { 0xa0000, 0x3000 },
4314 { 0xe0000, 0x4000 },
4315 { 0x120000, 0x4000 },
4316 { 0x1a0000, 0x4000 },
4319 struct mem_entry
*mem_tbl
;
4321 if (CHIP_NUM(bp
) == CHIP_NUM_5709
)
4322 mem_tbl
= mem_tbl_5709
;
4324 mem_tbl
= mem_tbl_5706
;
4326 for (i
= 0; mem_tbl
[i
].offset
!= 0xffffffff; i
++) {
4327 if ((ret
= bnx2_do_mem_test(bp
, mem_tbl
[i
].offset
,
4328 mem_tbl
[i
].len
)) != 0) {
4336 #define BNX2_MAC_LOOPBACK 0
4337 #define BNX2_PHY_LOOPBACK 1
4340 bnx2_run_loopback(struct bnx2
*bp
, int loopback_mode
)
4342 unsigned int pkt_size
, num_pkts
, i
;
4343 struct sk_buff
*skb
, *rx_skb
;
4344 unsigned char *packet
;
4345 u16 rx_start_idx
, rx_idx
;
4348 struct sw_bd
*rx_buf
;
4349 struct l2_fhdr
*rx_hdr
;
4352 if (loopback_mode
== BNX2_MAC_LOOPBACK
) {
4353 bp
->loopback
= MAC_LOOPBACK
;
4354 bnx2_set_mac_loopback(bp
);
4356 else if (loopback_mode
== BNX2_PHY_LOOPBACK
) {
4357 bp
->loopback
= PHY_LOOPBACK
;
4358 bnx2_set_phy_loopback(bp
);
4364 skb
= netdev_alloc_skb(bp
->dev
, pkt_size
);
4367 packet
= skb_put(skb
, pkt_size
);
4368 memcpy(packet
, bp
->dev
->dev_addr
, 6);
4369 memset(packet
+ 6, 0x0, 8);
4370 for (i
= 14; i
< pkt_size
; i
++)
4371 packet
[i
] = (unsigned char) (i
& 0xff);
4373 map
= pci_map_single(bp
->pdev
, skb
->data
, pkt_size
,
4376 REG_WR(bp
, BNX2_HC_COMMAND
,
4377 bp
->hc_cmd
| BNX2_HC_COMMAND_COAL_NOW_WO_INT
);
4379 REG_RD(bp
, BNX2_HC_COMMAND
);
4382 rx_start_idx
= bp
->status_blk
->status_rx_quick_consumer_index0
;
4386 txbd
= &bp
->tx_desc_ring
[TX_RING_IDX(bp
->tx_prod
)];
4388 txbd
->tx_bd_haddr_hi
= (u64
) map
>> 32;
4389 txbd
->tx_bd_haddr_lo
= (u64
) map
& 0xffffffff;
4390 txbd
->tx_bd_mss_nbytes
= pkt_size
;
4391 txbd
->tx_bd_vlan_tag_flags
= TX_BD_FLAGS_START
| TX_BD_FLAGS_END
;
4394 bp
->tx_prod
= NEXT_TX_BD(bp
->tx_prod
);
4395 bp
->tx_prod_bseq
+= pkt_size
;
4397 REG_WR16(bp
, bp
->tx_bidx_addr
, bp
->tx_prod
);
4398 REG_WR(bp
, bp
->tx_bseq_addr
, bp
->tx_prod_bseq
);
4402 REG_WR(bp
, BNX2_HC_COMMAND
,
4403 bp
->hc_cmd
| BNX2_HC_COMMAND_COAL_NOW_WO_INT
);
4405 REG_RD(bp
, BNX2_HC_COMMAND
);
4409 pci_unmap_single(bp
->pdev
, map
, pkt_size
, PCI_DMA_TODEVICE
);
4412 if (bp
->status_blk
->status_tx_quick_consumer_index0
!= bp
->tx_prod
) {
4413 goto loopback_test_done
;
4416 rx_idx
= bp
->status_blk
->status_rx_quick_consumer_index0
;
4417 if (rx_idx
!= rx_start_idx
+ num_pkts
) {
4418 goto loopback_test_done
;
4421 rx_buf
= &bp
->rx_buf_ring
[rx_start_idx
];
4422 rx_skb
= rx_buf
->skb
;
4424 rx_hdr
= (struct l2_fhdr
*) rx_skb
->data
;
4425 skb_reserve(rx_skb
, bp
->rx_offset
);
4427 pci_dma_sync_single_for_cpu(bp
->pdev
,
4428 pci_unmap_addr(rx_buf
, mapping
),
4429 bp
->rx_buf_size
, PCI_DMA_FROMDEVICE
);
4431 if (rx_hdr
->l2_fhdr_status
&
4432 (L2_FHDR_ERRORS_BAD_CRC
|
4433 L2_FHDR_ERRORS_PHY_DECODE
|
4434 L2_FHDR_ERRORS_ALIGNMENT
|
4435 L2_FHDR_ERRORS_TOO_SHORT
|
4436 L2_FHDR_ERRORS_GIANT_FRAME
)) {
4438 goto loopback_test_done
;
4441 if ((rx_hdr
->l2_fhdr_pkt_len
- 4) != pkt_size
) {
4442 goto loopback_test_done
;
4445 for (i
= 14; i
< pkt_size
; i
++) {
4446 if (*(rx_skb
->data
+ i
) != (unsigned char) (i
& 0xff)) {
4447 goto loopback_test_done
;
4458 #define BNX2_MAC_LOOPBACK_FAILED 1
4459 #define BNX2_PHY_LOOPBACK_FAILED 2
4460 #define BNX2_LOOPBACK_FAILED (BNX2_MAC_LOOPBACK_FAILED | \
4461 BNX2_PHY_LOOPBACK_FAILED)
4464 bnx2_test_loopback(struct bnx2
*bp
)
4468 if (!netif_running(bp
->dev
))
4469 return BNX2_LOOPBACK_FAILED
;
4471 bnx2_reset_nic(bp
, BNX2_DRV_MSG_CODE_RESET
);
4472 spin_lock_bh(&bp
->phy_lock
);
4474 spin_unlock_bh(&bp
->phy_lock
);
4475 if (bnx2_run_loopback(bp
, BNX2_MAC_LOOPBACK
))
4476 rc
|= BNX2_MAC_LOOPBACK_FAILED
;
4477 if (bnx2_run_loopback(bp
, BNX2_PHY_LOOPBACK
))
4478 rc
|= BNX2_PHY_LOOPBACK_FAILED
;
4482 #define NVRAM_SIZE 0x200
4483 #define CRC32_RESIDUAL 0xdebb20e3
4486 bnx2_test_nvram(struct bnx2
*bp
)
4488 u32 buf
[NVRAM_SIZE
/ 4];
4489 u8
*data
= (u8
*) buf
;
4493 if ((rc
= bnx2_nvram_read(bp
, 0, data
, 4)) != 0)
4494 goto test_nvram_done
;
4496 magic
= be32_to_cpu(buf
[0]);
4497 if (magic
!= 0x669955aa) {
4499 goto test_nvram_done
;
4502 if ((rc
= bnx2_nvram_read(bp
, 0x100, data
, NVRAM_SIZE
)) != 0)
4503 goto test_nvram_done
;
4505 csum
= ether_crc_le(0x100, data
);
4506 if (csum
!= CRC32_RESIDUAL
) {
4508 goto test_nvram_done
;
4511 csum
= ether_crc_le(0x100, data
+ 0x100);
4512 if (csum
!= CRC32_RESIDUAL
) {
4521 bnx2_test_link(struct bnx2
*bp
)
4525 spin_lock_bh(&bp
->phy_lock
);
4526 bnx2_enable_bmsr1(bp
);
4527 bnx2_read_phy(bp
, bp
->mii_bmsr1
, &bmsr
);
4528 bnx2_read_phy(bp
, bp
->mii_bmsr1
, &bmsr
);
4529 bnx2_disable_bmsr1(bp
);
4530 spin_unlock_bh(&bp
->phy_lock
);
4532 if (bmsr
& BMSR_LSTATUS
) {
4539 bnx2_test_intr(struct bnx2
*bp
)
4544 if (!netif_running(bp
->dev
))
4547 status_idx
= REG_RD(bp
, BNX2_PCICFG_INT_ACK_CMD
) & 0xffff;
4549 /* This register is not touched during run-time. */
4550 REG_WR(bp
, BNX2_HC_COMMAND
, bp
->hc_cmd
| BNX2_HC_COMMAND_COAL_NOW
);
4551 REG_RD(bp
, BNX2_HC_COMMAND
);
4553 for (i
= 0; i
< 10; i
++) {
4554 if ((REG_RD(bp
, BNX2_PCICFG_INT_ACK_CMD
) & 0xffff) !=
4560 msleep_interruptible(10);
4569 bnx2_5706_serdes_timer(struct bnx2
*bp
)
4571 spin_lock(&bp
->phy_lock
);
4572 if (bp
->serdes_an_pending
)
4573 bp
->serdes_an_pending
--;
4574 else if ((bp
->link_up
== 0) && (bp
->autoneg
& AUTONEG_SPEED
)) {
4577 bp
->current_interval
= bp
->timer_interval
;
4579 bnx2_read_phy(bp
, bp
->mii_bmcr
, &bmcr
);
4581 if (bmcr
& BMCR_ANENABLE
) {
4584 bnx2_write_phy(bp
, 0x1c, 0x7c00);
4585 bnx2_read_phy(bp
, 0x1c, &phy1
);
4587 bnx2_write_phy(bp
, 0x17, 0x0f01);
4588 bnx2_read_phy(bp
, 0x15, &phy2
);
4589 bnx2_write_phy(bp
, 0x17, 0x0f01);
4590 bnx2_read_phy(bp
, 0x15, &phy2
);
4592 if ((phy1
& 0x10) && /* SIGNAL DETECT */
4593 !(phy2
& 0x20)) { /* no CONFIG */
4595 bmcr
&= ~BMCR_ANENABLE
;
4596 bmcr
|= BMCR_SPEED1000
| BMCR_FULLDPLX
;
4597 bnx2_write_phy(bp
, bp
->mii_bmcr
, bmcr
);
4598 bp
->phy_flags
|= PHY_PARALLEL_DETECT_FLAG
;
4602 else if ((bp
->link_up
) && (bp
->autoneg
& AUTONEG_SPEED
) &&
4603 (bp
->phy_flags
& PHY_PARALLEL_DETECT_FLAG
)) {
4606 bnx2_write_phy(bp
, 0x17, 0x0f01);
4607 bnx2_read_phy(bp
, 0x15, &phy2
);
4611 bnx2_read_phy(bp
, bp
->mii_bmcr
, &bmcr
);
4612 bmcr
|= BMCR_ANENABLE
;
4613 bnx2_write_phy(bp
, bp
->mii_bmcr
, bmcr
);
4615 bp
->phy_flags
&= ~PHY_PARALLEL_DETECT_FLAG
;
4618 bp
->current_interval
= bp
->timer_interval
;
4620 spin_unlock(&bp
->phy_lock
);
4624 bnx2_5708_serdes_timer(struct bnx2
*bp
)
4626 if ((bp
->phy_flags
& PHY_2_5G_CAPABLE_FLAG
) == 0) {
4627 bp
->serdes_an_pending
= 0;
4631 spin_lock(&bp
->phy_lock
);
4632 if (bp
->serdes_an_pending
)
4633 bp
->serdes_an_pending
--;
4634 else if ((bp
->link_up
== 0) && (bp
->autoneg
& AUTONEG_SPEED
)) {
4637 bnx2_read_phy(bp
, bp
->mii_bmcr
, &bmcr
);
4638 if (bmcr
& BMCR_ANENABLE
) {
4639 bnx2_enable_forced_2g5(bp
);
4640 bp
->current_interval
= SERDES_FORCED_TIMEOUT
;
4642 bnx2_disable_forced_2g5(bp
);
4643 bp
->serdes_an_pending
= 2;
4644 bp
->current_interval
= bp
->timer_interval
;
4648 bp
->current_interval
= bp
->timer_interval
;
4650 spin_unlock(&bp
->phy_lock
);
4654 bnx2_timer(unsigned long data
)
4656 struct bnx2
*bp
= (struct bnx2
*) data
;
4659 if (!netif_running(bp
->dev
))
4662 if (atomic_read(&bp
->intr_sem
) != 0)
4663 goto bnx2_restart_timer
;
4665 msg
= (u32
) ++bp
->fw_drv_pulse_wr_seq
;
4666 REG_WR_IND(bp
, bp
->shmem_base
+ BNX2_DRV_PULSE_MB
, msg
);
4668 bp
->stats_blk
->stat_FwRxDrop
= REG_RD_IND(bp
, BNX2_FW_RX_DROP_COUNT
);
4670 /* workaround occasional corrupted counters */
4671 if (CHIP_NUM(bp
) == CHIP_NUM_5708
&& bp
->stats_ticks
)
4672 REG_WR(bp
, BNX2_HC_COMMAND
, bp
->hc_cmd
|
4673 BNX2_HC_COMMAND_STATS_NOW
);
4675 if (bp
->phy_flags
& PHY_SERDES_FLAG
) {
4676 if (CHIP_NUM(bp
) == CHIP_NUM_5706
)
4677 bnx2_5706_serdes_timer(bp
);
4679 bnx2_5708_serdes_timer(bp
);
4683 mod_timer(&bp
->timer
, jiffies
+ bp
->current_interval
);
4687 bnx2_request_irq(struct bnx2
*bp
)
4689 struct net_device
*dev
= bp
->dev
;
4692 if (bp
->flags
& USING_MSI_FLAG
) {
4693 irq_handler_t fn
= bnx2_msi
;
4695 if (bp
->flags
& ONE_SHOT_MSI_FLAG
)
4696 fn
= bnx2_msi_1shot
;
4698 rc
= request_irq(bp
->pdev
->irq
, fn
, 0, dev
->name
, dev
);
4700 rc
= request_irq(bp
->pdev
->irq
, bnx2_interrupt
,
4701 IRQF_SHARED
, dev
->name
, dev
);
4706 bnx2_free_irq(struct bnx2
*bp
)
4708 struct net_device
*dev
= bp
->dev
;
4710 if (bp
->flags
& USING_MSI_FLAG
) {
4711 free_irq(bp
->pdev
->irq
, dev
);
4712 pci_disable_msi(bp
->pdev
);
4713 bp
->flags
&= ~(USING_MSI_FLAG
| ONE_SHOT_MSI_FLAG
);
4715 free_irq(bp
->pdev
->irq
, dev
);
4718 /* Called with rtnl_lock */
4720 bnx2_open(struct net_device
*dev
)
4722 struct bnx2
*bp
= netdev_priv(dev
);
4725 netif_carrier_off(dev
);
4727 bnx2_set_power_state(bp
, PCI_D0
);
4728 bnx2_disable_int(bp
);
4730 rc
= bnx2_alloc_mem(bp
);
4734 if ((bp
->flags
& MSI_CAP_FLAG
) && !disable_msi
) {
4735 if (pci_enable_msi(bp
->pdev
) == 0) {
4736 bp
->flags
|= USING_MSI_FLAG
;
4737 if (CHIP_NUM(bp
) == CHIP_NUM_5709
)
4738 bp
->flags
|= ONE_SHOT_MSI_FLAG
;
4741 rc
= bnx2_request_irq(bp
);
4748 rc
= bnx2_init_nic(bp
);
4757 mod_timer(&bp
->timer
, jiffies
+ bp
->current_interval
);
4759 atomic_set(&bp
->intr_sem
, 0);
4761 bnx2_enable_int(bp
);
4763 if (bp
->flags
& USING_MSI_FLAG
) {
4764 /* Test MSI to make sure it is working
4765 * If MSI test fails, go back to INTx mode
4767 if (bnx2_test_intr(bp
) != 0) {
4768 printk(KERN_WARNING PFX
"%s: No interrupt was generated"
4769 " using MSI, switching to INTx mode. Please"
4770 " report this failure to the PCI maintainer"
4771 " and include system chipset information.\n",
4774 bnx2_disable_int(bp
);
4777 rc
= bnx2_init_nic(bp
);
4780 rc
= bnx2_request_irq(bp
);
4785 del_timer_sync(&bp
->timer
);
4788 bnx2_enable_int(bp
);
4791 if (bp
->flags
& USING_MSI_FLAG
) {
4792 printk(KERN_INFO PFX
"%s: using MSI\n", dev
->name
);
4795 netif_start_queue(dev
);
4801 bnx2_reset_task(struct work_struct
*work
)
4803 struct bnx2
*bp
= container_of(work
, struct bnx2
, reset_task
);
4805 if (!netif_running(bp
->dev
))
4808 bp
->in_reset_task
= 1;
4809 bnx2_netif_stop(bp
);
4813 atomic_set(&bp
->intr_sem
, 1);
4814 bnx2_netif_start(bp
);
4815 bp
->in_reset_task
= 0;
4819 bnx2_tx_timeout(struct net_device
*dev
)
4821 struct bnx2
*bp
= netdev_priv(dev
);
4823 /* This allows the netif to be shutdown gracefully before resetting */
4824 schedule_work(&bp
->reset_task
);
4828 /* Called with rtnl_lock */
4830 bnx2_vlan_rx_register(struct net_device
*dev
, struct vlan_group
*vlgrp
)
4832 struct bnx2
*bp
= netdev_priv(dev
);
4834 bnx2_netif_stop(bp
);
4837 bnx2_set_rx_mode(dev
);
4839 bnx2_netif_start(bp
);
4843 /* Called with netif_tx_lock.
4844 * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
4845 * netif_wake_queue().
4848 bnx2_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
4850 struct bnx2
*bp
= netdev_priv(dev
);
4853 struct sw_bd
*tx_buf
;
4854 u32 len
, vlan_tag_flags
, last_frag
, mss
;
4855 u16 prod
, ring_prod
;
4858 if (unlikely(bnx2_tx_avail(bp
) < (skb_shinfo(skb
)->nr_frags
+ 1))) {
4859 netif_stop_queue(dev
);
4860 printk(KERN_ERR PFX
"%s: BUG! Tx ring full when queue awake!\n",
4863 return NETDEV_TX_BUSY
;
4865 len
= skb_headlen(skb
);
4867 ring_prod
= TX_RING_IDX(prod
);
4870 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
4871 vlan_tag_flags
|= TX_BD_FLAGS_TCP_UDP_CKSUM
;
4874 if (bp
->vlgrp
!= 0 && vlan_tx_tag_present(skb
)) {
4876 (TX_BD_FLAGS_VLAN_TAG
| (vlan_tx_tag_get(skb
) << 16));
4878 if ((mss
= skb_shinfo(skb
)->gso_size
)) {
4879 u32 tcp_opt_len
, ip_tcp_len
;
4882 vlan_tag_flags
|= TX_BD_FLAGS_SW_LSO
;
4884 tcp_opt_len
= tcp_optlen(skb
);
4886 if (skb_shinfo(skb
)->gso_type
& SKB_GSO_TCPV6
) {
4887 u32 tcp_off
= skb_transport_offset(skb
) -
4888 sizeof(struct ipv6hdr
) - ETH_HLEN
;
4890 vlan_tag_flags
|= ((tcp_opt_len
>> 2) << 8) |
4891 TX_BD_FLAGS_SW_FLAGS
;
4892 if (likely(tcp_off
== 0))
4893 vlan_tag_flags
&= ~TX_BD_FLAGS_TCP6_OFF0_MSK
;
4896 vlan_tag_flags
|= ((tcp_off
& 0x3) <<
4897 TX_BD_FLAGS_TCP6_OFF0_SHL
) |
4898 ((tcp_off
& 0x10) <<
4899 TX_BD_FLAGS_TCP6_OFF4_SHL
);
4900 mss
|= (tcp_off
& 0xc) << TX_BD_TCP6_OFF2_SHL
;
4903 if (skb_header_cloned(skb
) &&
4904 pskb_expand_head(skb
, 0, 0, GFP_ATOMIC
)) {
4906 return NETDEV_TX_OK
;
4909 ip_tcp_len
= ip_hdrlen(skb
) + sizeof(struct tcphdr
);
4913 iph
->tot_len
= htons(mss
+ ip_tcp_len
+ tcp_opt_len
);
4914 tcp_hdr(skb
)->check
= ~csum_tcpudp_magic(iph
->saddr
,
4918 if (tcp_opt_len
|| (iph
->ihl
> 5)) {
4919 vlan_tag_flags
|= ((iph
->ihl
- 5) +
4920 (tcp_opt_len
>> 2)) << 8;
4926 mapping
= pci_map_single(bp
->pdev
, skb
->data
, len
, PCI_DMA_TODEVICE
);
4928 tx_buf
= &bp
->tx_buf_ring
[ring_prod
];
4930 pci_unmap_addr_set(tx_buf
, mapping
, mapping
);
4932 txbd
= &bp
->tx_desc_ring
[ring_prod
];
4934 txbd
->tx_bd_haddr_hi
= (u64
) mapping
>> 32;
4935 txbd
->tx_bd_haddr_lo
= (u64
) mapping
& 0xffffffff;
4936 txbd
->tx_bd_mss_nbytes
= len
| (mss
<< 16);
4937 txbd
->tx_bd_vlan_tag_flags
= vlan_tag_flags
| TX_BD_FLAGS_START
;
4939 last_frag
= skb_shinfo(skb
)->nr_frags
;
4941 for (i
= 0; i
< last_frag
; i
++) {
4942 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
4944 prod
= NEXT_TX_BD(prod
);
4945 ring_prod
= TX_RING_IDX(prod
);
4946 txbd
= &bp
->tx_desc_ring
[ring_prod
];
4949 mapping
= pci_map_page(bp
->pdev
, frag
->page
, frag
->page_offset
,
4950 len
, PCI_DMA_TODEVICE
);
4951 pci_unmap_addr_set(&bp
->tx_buf_ring
[ring_prod
],
4954 txbd
->tx_bd_haddr_hi
= (u64
) mapping
>> 32;
4955 txbd
->tx_bd_haddr_lo
= (u64
) mapping
& 0xffffffff;
4956 txbd
->tx_bd_mss_nbytes
= len
| (mss
<< 16);
4957 txbd
->tx_bd_vlan_tag_flags
= vlan_tag_flags
;
4960 txbd
->tx_bd_vlan_tag_flags
|= TX_BD_FLAGS_END
;
4962 prod
= NEXT_TX_BD(prod
);
4963 bp
->tx_prod_bseq
+= skb
->len
;
4965 REG_WR16(bp
, bp
->tx_bidx_addr
, prod
);
4966 REG_WR(bp
, bp
->tx_bseq_addr
, bp
->tx_prod_bseq
);
4971 dev
->trans_start
= jiffies
;
4973 if (unlikely(bnx2_tx_avail(bp
) <= MAX_SKB_FRAGS
)) {
4974 netif_stop_queue(dev
);
4975 if (bnx2_tx_avail(bp
) > bp
->tx_wake_thresh
)
4976 netif_wake_queue(dev
);
4979 return NETDEV_TX_OK
;
4982 /* Called with rtnl_lock */
4984 bnx2_close(struct net_device
*dev
)
4986 struct bnx2
*bp
= netdev_priv(dev
);
4989 /* Calling flush_scheduled_work() may deadlock because
4990 * linkwatch_event() may be on the workqueue and it will try to get
4991 * the rtnl_lock which we are holding.
4993 while (bp
->in_reset_task
)
4996 bnx2_netif_stop(bp
);
4997 del_timer_sync(&bp
->timer
);
4998 if (bp
->flags
& NO_WOL_FLAG
)
4999 reset_code
= BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN
;
5001 reset_code
= BNX2_DRV_MSG_CODE_SUSPEND_WOL
;
5003 reset_code
= BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL
;
5004 bnx2_reset_chip(bp
, reset_code
);
5009 netif_carrier_off(bp
->dev
);
5010 bnx2_set_power_state(bp
, PCI_D3hot
);
5014 #define GET_NET_STATS64(ctr) \
5015 (unsigned long) ((unsigned long) (ctr##_hi) << 32) + \
5016 (unsigned long) (ctr##_lo)
5018 #define GET_NET_STATS32(ctr) \
5021 #if (BITS_PER_LONG == 64)
5022 #define GET_NET_STATS GET_NET_STATS64
5024 #define GET_NET_STATS GET_NET_STATS32
5027 static struct net_device_stats
*
5028 bnx2_get_stats(struct net_device
*dev
)
5030 struct bnx2
*bp
= netdev_priv(dev
);
5031 struct statistics_block
*stats_blk
= bp
->stats_blk
;
5032 struct net_device_stats
*net_stats
= &bp
->net_stats
;
5034 if (bp
->stats_blk
== NULL
) {
5037 net_stats
->rx_packets
=
5038 GET_NET_STATS(stats_blk
->stat_IfHCInUcastPkts
) +
5039 GET_NET_STATS(stats_blk
->stat_IfHCInMulticastPkts
) +
5040 GET_NET_STATS(stats_blk
->stat_IfHCInBroadcastPkts
);
5042 net_stats
->tx_packets
=
5043 GET_NET_STATS(stats_blk
->stat_IfHCOutUcastPkts
) +
5044 GET_NET_STATS(stats_blk
->stat_IfHCOutMulticastPkts
) +
5045 GET_NET_STATS(stats_blk
->stat_IfHCOutBroadcastPkts
);
5047 net_stats
->rx_bytes
=
5048 GET_NET_STATS(stats_blk
->stat_IfHCInOctets
);
5050 net_stats
->tx_bytes
=
5051 GET_NET_STATS(stats_blk
->stat_IfHCOutOctets
);
5053 net_stats
->multicast
=
5054 GET_NET_STATS(stats_blk
->stat_IfHCOutMulticastPkts
);
5056 net_stats
->collisions
=
5057 (unsigned long) stats_blk
->stat_EtherStatsCollisions
;
5059 net_stats
->rx_length_errors
=
5060 (unsigned long) (stats_blk
->stat_EtherStatsUndersizePkts
+
5061 stats_blk
->stat_EtherStatsOverrsizePkts
);
5063 net_stats
->rx_over_errors
=
5064 (unsigned long) stats_blk
->stat_IfInMBUFDiscards
;
5066 net_stats
->rx_frame_errors
=
5067 (unsigned long) stats_blk
->stat_Dot3StatsAlignmentErrors
;
5069 net_stats
->rx_crc_errors
=
5070 (unsigned long) stats_blk
->stat_Dot3StatsFCSErrors
;
5072 net_stats
->rx_errors
= net_stats
->rx_length_errors
+
5073 net_stats
->rx_over_errors
+ net_stats
->rx_frame_errors
+
5074 net_stats
->rx_crc_errors
;
5076 net_stats
->tx_aborted_errors
=
5077 (unsigned long) (stats_blk
->stat_Dot3StatsExcessiveCollisions
+
5078 stats_blk
->stat_Dot3StatsLateCollisions
);
5080 if ((CHIP_NUM(bp
) == CHIP_NUM_5706
) ||
5081 (CHIP_ID(bp
) == CHIP_ID_5708_A0
))
5082 net_stats
->tx_carrier_errors
= 0;
5084 net_stats
->tx_carrier_errors
=
5086 stats_blk
->stat_Dot3StatsCarrierSenseErrors
;
5089 net_stats
->tx_errors
=
5091 stats_blk
->stat_emac_tx_stat_dot3statsinternalmactransmiterrors
5093 net_stats
->tx_aborted_errors
+
5094 net_stats
->tx_carrier_errors
;
5096 net_stats
->rx_missed_errors
=
5097 (unsigned long) (stats_blk
->stat_IfInMBUFDiscards
+
5098 stats_blk
->stat_FwRxDrop
);
5103 /* All ethtool functions called with rtnl_lock */
5106 bnx2_get_settings(struct net_device
*dev
, struct ethtool_cmd
*cmd
)
5108 struct bnx2
*bp
= netdev_priv(dev
);
5110 cmd
->supported
= SUPPORTED_Autoneg
;
5111 if (bp
->phy_flags
& PHY_SERDES_FLAG
) {
5112 cmd
->supported
|= SUPPORTED_1000baseT_Full
|
5114 if (bp
->phy_flags
& PHY_2_5G_CAPABLE_FLAG
)
5115 cmd
->supported
|= SUPPORTED_2500baseX_Full
;
5117 cmd
->port
= PORT_FIBRE
;
5120 cmd
->supported
|= SUPPORTED_10baseT_Half
|
5121 SUPPORTED_10baseT_Full
|
5122 SUPPORTED_100baseT_Half
|
5123 SUPPORTED_100baseT_Full
|
5124 SUPPORTED_1000baseT_Full
|
5127 cmd
->port
= PORT_TP
;
5130 cmd
->advertising
= bp
->advertising
;
5132 if (bp
->autoneg
& AUTONEG_SPEED
) {
5133 cmd
->autoneg
= AUTONEG_ENABLE
;
5136 cmd
->autoneg
= AUTONEG_DISABLE
;
5139 if (netif_carrier_ok(dev
)) {
5140 cmd
->speed
= bp
->line_speed
;
5141 cmd
->duplex
= bp
->duplex
;
5148 cmd
->transceiver
= XCVR_INTERNAL
;
5149 cmd
->phy_address
= bp
->phy_addr
;
5155 bnx2_set_settings(struct net_device
*dev
, struct ethtool_cmd
*cmd
)
5157 struct bnx2
*bp
= netdev_priv(dev
);
5158 u8 autoneg
= bp
->autoneg
;
5159 u8 req_duplex
= bp
->req_duplex
;
5160 u16 req_line_speed
= bp
->req_line_speed
;
5161 u32 advertising
= bp
->advertising
;
5163 if (cmd
->autoneg
== AUTONEG_ENABLE
) {
5164 autoneg
|= AUTONEG_SPEED
;
5166 cmd
->advertising
&= ETHTOOL_ALL_COPPER_SPEED
;
5168 /* allow advertising 1 speed */
5169 if ((cmd
->advertising
== ADVERTISED_10baseT_Half
) ||
5170 (cmd
->advertising
== ADVERTISED_10baseT_Full
) ||
5171 (cmd
->advertising
== ADVERTISED_100baseT_Half
) ||
5172 (cmd
->advertising
== ADVERTISED_100baseT_Full
)) {
5174 if (bp
->phy_flags
& PHY_SERDES_FLAG
)
5177 advertising
= cmd
->advertising
;
5179 } else if (cmd
->advertising
== ADVERTISED_2500baseX_Full
) {
5180 if (!(bp
->phy_flags
& PHY_2_5G_CAPABLE_FLAG
))
5182 } else if (cmd
->advertising
== ADVERTISED_1000baseT_Full
) {
5183 advertising
= cmd
->advertising
;
5185 else if (cmd
->advertising
== ADVERTISED_1000baseT_Half
) {
5189 if (bp
->phy_flags
& PHY_SERDES_FLAG
) {
5190 advertising
= ETHTOOL_ALL_FIBRE_SPEED
;
5193 advertising
= ETHTOOL_ALL_COPPER_SPEED
;
5196 advertising
|= ADVERTISED_Autoneg
;
5199 if (bp
->phy_flags
& PHY_SERDES_FLAG
) {
5200 if ((cmd
->speed
!= SPEED_1000
&&
5201 cmd
->speed
!= SPEED_2500
) ||
5202 (cmd
->duplex
!= DUPLEX_FULL
))
5205 if (cmd
->speed
== SPEED_2500
&&
5206 !(bp
->phy_flags
& PHY_2_5G_CAPABLE_FLAG
))
5209 else if (cmd
->speed
== SPEED_1000
) {
5212 autoneg
&= ~AUTONEG_SPEED
;
5213 req_line_speed
= cmd
->speed
;
5214 req_duplex
= cmd
->duplex
;
5218 bp
->autoneg
= autoneg
;
5219 bp
->advertising
= advertising
;
5220 bp
->req_line_speed
= req_line_speed
;
5221 bp
->req_duplex
= req_duplex
;
5223 spin_lock_bh(&bp
->phy_lock
);
5227 spin_unlock_bh(&bp
->phy_lock
);
5233 bnx2_get_drvinfo(struct net_device
*dev
, struct ethtool_drvinfo
*info
)
5235 struct bnx2
*bp
= netdev_priv(dev
);
5237 strcpy(info
->driver
, DRV_MODULE_NAME
);
5238 strcpy(info
->version
, DRV_MODULE_VERSION
);
5239 strcpy(info
->bus_info
, pci_name(bp
->pdev
));
5240 info
->fw_version
[0] = ((bp
->fw_ver
& 0xff000000) >> 24) + '0';
5241 info
->fw_version
[2] = ((bp
->fw_ver
& 0xff0000) >> 16) + '0';
5242 info
->fw_version
[4] = ((bp
->fw_ver
& 0xff00) >> 8) + '0';
5243 info
->fw_version
[1] = info
->fw_version
[3] = '.';
5244 info
->fw_version
[5] = 0;
5247 #define BNX2_REGDUMP_LEN (32 * 1024)
5250 bnx2_get_regs_len(struct net_device
*dev
)
5252 return BNX2_REGDUMP_LEN
;
5256 bnx2_get_regs(struct net_device
*dev
, struct ethtool_regs
*regs
, void *_p
)
5258 u32
*p
= _p
, i
, offset
;
5260 struct bnx2
*bp
= netdev_priv(dev
);
5261 u32 reg_boundaries
[] = { 0x0000, 0x0098, 0x0400, 0x045c,
5262 0x0800, 0x0880, 0x0c00, 0x0c10,
5263 0x0c30, 0x0d08, 0x1000, 0x101c,
5264 0x1040, 0x1048, 0x1080, 0x10a4,
5265 0x1400, 0x1490, 0x1498, 0x14f0,
5266 0x1500, 0x155c, 0x1580, 0x15dc,
5267 0x1600, 0x1658, 0x1680, 0x16d8,
5268 0x1800, 0x1820, 0x1840, 0x1854,
5269 0x1880, 0x1894, 0x1900, 0x1984,
5270 0x1c00, 0x1c0c, 0x1c40, 0x1c54,
5271 0x1c80, 0x1c94, 0x1d00, 0x1d84,
5272 0x2000, 0x2030, 0x23c0, 0x2400,
5273 0x2800, 0x2820, 0x2830, 0x2850,
5274 0x2b40, 0x2c10, 0x2fc0, 0x3058,
5275 0x3c00, 0x3c94, 0x4000, 0x4010,
5276 0x4080, 0x4090, 0x43c0, 0x4458,
5277 0x4c00, 0x4c18, 0x4c40, 0x4c54,
5278 0x4fc0, 0x5010, 0x53c0, 0x5444,
5279 0x5c00, 0x5c18, 0x5c80, 0x5c90,
5280 0x5fc0, 0x6000, 0x6400, 0x6428,
5281 0x6800, 0x6848, 0x684c, 0x6860,
5282 0x6888, 0x6910, 0x8000 };
5286 memset(p
, 0, BNX2_REGDUMP_LEN
);
5288 if (!netif_running(bp
->dev
))
5292 offset
= reg_boundaries
[0];
5294 while (offset
< BNX2_REGDUMP_LEN
) {
5295 *p
++ = REG_RD(bp
, offset
);
5297 if (offset
== reg_boundaries
[i
+ 1]) {
5298 offset
= reg_boundaries
[i
+ 2];
5299 p
= (u32
*) (orig_p
+ offset
);
5306 bnx2_get_wol(struct net_device
*dev
, struct ethtool_wolinfo
*wol
)
5308 struct bnx2
*bp
= netdev_priv(dev
);
5310 if (bp
->flags
& NO_WOL_FLAG
) {
5315 wol
->supported
= WAKE_MAGIC
;
5317 wol
->wolopts
= WAKE_MAGIC
;
5321 memset(&wol
->sopass
, 0, sizeof(wol
->sopass
));
5325 bnx2_set_wol(struct net_device
*dev
, struct ethtool_wolinfo
*wol
)
5327 struct bnx2
*bp
= netdev_priv(dev
);
5329 if (wol
->wolopts
& ~WAKE_MAGIC
)
5332 if (wol
->wolopts
& WAKE_MAGIC
) {
5333 if (bp
->flags
& NO_WOL_FLAG
)
5345 bnx2_nway_reset(struct net_device
*dev
)
5347 struct bnx2
*bp
= netdev_priv(dev
);
5350 if (!(bp
->autoneg
& AUTONEG_SPEED
)) {
5354 spin_lock_bh(&bp
->phy_lock
);
5356 /* Force a link down visible on the other side */
5357 if (bp
->phy_flags
& PHY_SERDES_FLAG
) {
5358 bnx2_write_phy(bp
, bp
->mii_bmcr
, BMCR_LOOPBACK
);
5359 spin_unlock_bh(&bp
->phy_lock
);
5363 spin_lock_bh(&bp
->phy_lock
);
5365 bp
->current_interval
= SERDES_AN_TIMEOUT
;
5366 bp
->serdes_an_pending
= 1;
5367 mod_timer(&bp
->timer
, jiffies
+ bp
->current_interval
);
5370 bnx2_read_phy(bp
, bp
->mii_bmcr
, &bmcr
);
5371 bmcr
&= ~BMCR_LOOPBACK
;
5372 bnx2_write_phy(bp
, bp
->mii_bmcr
, bmcr
| BMCR_ANRESTART
| BMCR_ANENABLE
);
5374 spin_unlock_bh(&bp
->phy_lock
);
5380 bnx2_get_eeprom_len(struct net_device
*dev
)
5382 struct bnx2
*bp
= netdev_priv(dev
);
5384 if (bp
->flash_info
== NULL
)
5387 return (int) bp
->flash_size
;
5391 bnx2_get_eeprom(struct net_device
*dev
, struct ethtool_eeprom
*eeprom
,
5394 struct bnx2
*bp
= netdev_priv(dev
);
5397 /* parameters already validated in ethtool_get_eeprom */
5399 rc
= bnx2_nvram_read(bp
, eeprom
->offset
, eebuf
, eeprom
->len
);
5405 bnx2_set_eeprom(struct net_device
*dev
, struct ethtool_eeprom
*eeprom
,
5408 struct bnx2
*bp
= netdev_priv(dev
);
5411 /* parameters already validated in ethtool_set_eeprom */
5413 rc
= bnx2_nvram_write(bp
, eeprom
->offset
, eebuf
, eeprom
->len
);
5419 bnx2_get_coalesce(struct net_device
*dev
, struct ethtool_coalesce
*coal
)
5421 struct bnx2
*bp
= netdev_priv(dev
);
5423 memset(coal
, 0, sizeof(struct ethtool_coalesce
));
5425 coal
->rx_coalesce_usecs
= bp
->rx_ticks
;
5426 coal
->rx_max_coalesced_frames
= bp
->rx_quick_cons_trip
;
5427 coal
->rx_coalesce_usecs_irq
= bp
->rx_ticks_int
;
5428 coal
->rx_max_coalesced_frames_irq
= bp
->rx_quick_cons_trip_int
;
5430 coal
->tx_coalesce_usecs
= bp
->tx_ticks
;
5431 coal
->tx_max_coalesced_frames
= bp
->tx_quick_cons_trip
;
5432 coal
->tx_coalesce_usecs_irq
= bp
->tx_ticks_int
;
5433 coal
->tx_max_coalesced_frames_irq
= bp
->tx_quick_cons_trip_int
;
5435 coal
->stats_block_coalesce_usecs
= bp
->stats_ticks
;
5441 bnx2_set_coalesce(struct net_device
*dev
, struct ethtool_coalesce
*coal
)
5443 struct bnx2
*bp
= netdev_priv(dev
);
5445 bp
->rx_ticks
= (u16
) coal
->rx_coalesce_usecs
;
5446 if (bp
->rx_ticks
> 0x3ff) bp
->rx_ticks
= 0x3ff;
5448 bp
->rx_quick_cons_trip
= (u16
) coal
->rx_max_coalesced_frames
;
5449 if (bp
->rx_quick_cons_trip
> 0xff) bp
->rx_quick_cons_trip
= 0xff;
5451 bp
->rx_ticks_int
= (u16
) coal
->rx_coalesce_usecs_irq
;
5452 if (bp
->rx_ticks_int
> 0x3ff) bp
->rx_ticks_int
= 0x3ff;
5454 bp
->rx_quick_cons_trip_int
= (u16
) coal
->rx_max_coalesced_frames_irq
;
5455 if (bp
->rx_quick_cons_trip_int
> 0xff)
5456 bp
->rx_quick_cons_trip_int
= 0xff;
5458 bp
->tx_ticks
= (u16
) coal
->tx_coalesce_usecs
;
5459 if (bp
->tx_ticks
> 0x3ff) bp
->tx_ticks
= 0x3ff;
5461 bp
->tx_quick_cons_trip
= (u16
) coal
->tx_max_coalesced_frames
;
5462 if (bp
->tx_quick_cons_trip
> 0xff) bp
->tx_quick_cons_trip
= 0xff;
5464 bp
->tx_ticks_int
= (u16
) coal
->tx_coalesce_usecs_irq
;
5465 if (bp
->tx_ticks_int
> 0x3ff) bp
->tx_ticks_int
= 0x3ff;
5467 bp
->tx_quick_cons_trip_int
= (u16
) coal
->tx_max_coalesced_frames_irq
;
5468 if (bp
->tx_quick_cons_trip_int
> 0xff) bp
->tx_quick_cons_trip_int
=
5471 bp
->stats_ticks
= coal
->stats_block_coalesce_usecs
;
5472 if (CHIP_NUM(bp
) == CHIP_NUM_5708
) {
5473 if (bp
->stats_ticks
!= 0 && bp
->stats_ticks
!= USEC_PER_SEC
)
5474 bp
->stats_ticks
= USEC_PER_SEC
;
5476 if (bp
->stats_ticks
> 0xffff00) bp
->stats_ticks
= 0xffff00;
5477 bp
->stats_ticks
&= 0xffff00;
5479 if (netif_running(bp
->dev
)) {
5480 bnx2_netif_stop(bp
);
5482 bnx2_netif_start(bp
);
5489 bnx2_get_ringparam(struct net_device
*dev
, struct ethtool_ringparam
*ering
)
5491 struct bnx2
*bp
= netdev_priv(dev
);
5493 ering
->rx_max_pending
= MAX_TOTAL_RX_DESC_CNT
;
5494 ering
->rx_mini_max_pending
= 0;
5495 ering
->rx_jumbo_max_pending
= 0;
5497 ering
->rx_pending
= bp
->rx_ring_size
;
5498 ering
->rx_mini_pending
= 0;
5499 ering
->rx_jumbo_pending
= 0;
5501 ering
->tx_max_pending
= MAX_TX_DESC_CNT
;
5502 ering
->tx_pending
= bp
->tx_ring_size
;
5506 bnx2_set_ringparam(struct net_device
*dev
, struct ethtool_ringparam
*ering
)
5508 struct bnx2
*bp
= netdev_priv(dev
);
5510 if ((ering
->rx_pending
> MAX_TOTAL_RX_DESC_CNT
) ||
5511 (ering
->tx_pending
> MAX_TX_DESC_CNT
) ||
5512 (ering
->tx_pending
<= MAX_SKB_FRAGS
)) {
5516 if (netif_running(bp
->dev
)) {
5517 bnx2_netif_stop(bp
);
5518 bnx2_reset_chip(bp
, BNX2_DRV_MSG_CODE_RESET
);
5523 bnx2_set_rx_ring_size(bp
, ering
->rx_pending
);
5524 bp
->tx_ring_size
= ering
->tx_pending
;
5526 if (netif_running(bp
->dev
)) {
5529 rc
= bnx2_alloc_mem(bp
);
5533 bnx2_netif_start(bp
);
5540 bnx2_get_pauseparam(struct net_device
*dev
, struct ethtool_pauseparam
*epause
)
5542 struct bnx2
*bp
= netdev_priv(dev
);
5544 epause
->autoneg
= ((bp
->autoneg
& AUTONEG_FLOW_CTRL
) != 0);
5545 epause
->rx_pause
= ((bp
->flow_ctrl
& FLOW_CTRL_RX
) != 0);
5546 epause
->tx_pause
= ((bp
->flow_ctrl
& FLOW_CTRL_TX
) != 0);
5550 bnx2_set_pauseparam(struct net_device
*dev
, struct ethtool_pauseparam
*epause
)
5552 struct bnx2
*bp
= netdev_priv(dev
);
5554 bp
->req_flow_ctrl
= 0;
5555 if (epause
->rx_pause
)
5556 bp
->req_flow_ctrl
|= FLOW_CTRL_RX
;
5557 if (epause
->tx_pause
)
5558 bp
->req_flow_ctrl
|= FLOW_CTRL_TX
;
5560 if (epause
->autoneg
) {
5561 bp
->autoneg
|= AUTONEG_FLOW_CTRL
;
5564 bp
->autoneg
&= ~AUTONEG_FLOW_CTRL
;
5567 spin_lock_bh(&bp
->phy_lock
);
5571 spin_unlock_bh(&bp
->phy_lock
);
5577 bnx2_get_rx_csum(struct net_device
*dev
)
5579 struct bnx2
*bp
= netdev_priv(dev
);
5585 bnx2_set_rx_csum(struct net_device
*dev
, u32 data
)
5587 struct bnx2
*bp
= netdev_priv(dev
);
5594 bnx2_set_tso(struct net_device
*dev
, u32 data
)
5596 struct bnx2
*bp
= netdev_priv(dev
);
5599 dev
->features
|= NETIF_F_TSO
| NETIF_F_TSO_ECN
;
5600 if (CHIP_NUM(bp
) == CHIP_NUM_5709
)
5601 dev
->features
|= NETIF_F_TSO6
;
5603 dev
->features
&= ~(NETIF_F_TSO
| NETIF_F_TSO6
|
5608 #define BNX2_NUM_STATS 46
5611 char string
[ETH_GSTRING_LEN
];
5612 } bnx2_stats_str_arr
[BNX2_NUM_STATS
] = {
5614 { "rx_error_bytes" },
5616 { "tx_error_bytes" },
5617 { "rx_ucast_packets" },
5618 { "rx_mcast_packets" },
5619 { "rx_bcast_packets" },
5620 { "tx_ucast_packets" },
5621 { "tx_mcast_packets" },
5622 { "tx_bcast_packets" },
5623 { "tx_mac_errors" },
5624 { "tx_carrier_errors" },
5625 { "rx_crc_errors" },
5626 { "rx_align_errors" },
5627 { "tx_single_collisions" },
5628 { "tx_multi_collisions" },
5630 { "tx_excess_collisions" },
5631 { "tx_late_collisions" },
5632 { "tx_total_collisions" },
5635 { "rx_undersize_packets" },
5636 { "rx_oversize_packets" },
5637 { "rx_64_byte_packets" },
5638 { "rx_65_to_127_byte_packets" },
5639 { "rx_128_to_255_byte_packets" },
5640 { "rx_256_to_511_byte_packets" },
5641 { "rx_512_to_1023_byte_packets" },
5642 { "rx_1024_to_1522_byte_packets" },
5643 { "rx_1523_to_9022_byte_packets" },
5644 { "tx_64_byte_packets" },
5645 { "tx_65_to_127_byte_packets" },
5646 { "tx_128_to_255_byte_packets" },
5647 { "tx_256_to_511_byte_packets" },
5648 { "tx_512_to_1023_byte_packets" },
5649 { "tx_1024_to_1522_byte_packets" },
5650 { "tx_1523_to_9022_byte_packets" },
5651 { "rx_xon_frames" },
5652 { "rx_xoff_frames" },
5653 { "tx_xon_frames" },
5654 { "tx_xoff_frames" },
5655 { "rx_mac_ctrl_frames" },
5656 { "rx_filtered_packets" },
5658 { "rx_fw_discards" },
5661 #define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
5663 static const unsigned long bnx2_stats_offset_arr
[BNX2_NUM_STATS
] = {
5664 STATS_OFFSET32(stat_IfHCInOctets_hi
),
5665 STATS_OFFSET32(stat_IfHCInBadOctets_hi
),
5666 STATS_OFFSET32(stat_IfHCOutOctets_hi
),
5667 STATS_OFFSET32(stat_IfHCOutBadOctets_hi
),
5668 STATS_OFFSET32(stat_IfHCInUcastPkts_hi
),
5669 STATS_OFFSET32(stat_IfHCInMulticastPkts_hi
),
5670 STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi
),
5671 STATS_OFFSET32(stat_IfHCOutUcastPkts_hi
),
5672 STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi
),
5673 STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi
),
5674 STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors
),
5675 STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors
),
5676 STATS_OFFSET32(stat_Dot3StatsFCSErrors
),
5677 STATS_OFFSET32(stat_Dot3StatsAlignmentErrors
),
5678 STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames
),
5679 STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames
),
5680 STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions
),
5681 STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions
),
5682 STATS_OFFSET32(stat_Dot3StatsLateCollisions
),
5683 STATS_OFFSET32(stat_EtherStatsCollisions
),
5684 STATS_OFFSET32(stat_EtherStatsFragments
),
5685 STATS_OFFSET32(stat_EtherStatsJabbers
),
5686 STATS_OFFSET32(stat_EtherStatsUndersizePkts
),
5687 STATS_OFFSET32(stat_EtherStatsOverrsizePkts
),
5688 STATS_OFFSET32(stat_EtherStatsPktsRx64Octets
),
5689 STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets
),
5690 STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets
),
5691 STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets
),
5692 STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets
),
5693 STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets
),
5694 STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets
),
5695 STATS_OFFSET32(stat_EtherStatsPktsTx64Octets
),
5696 STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets
),
5697 STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets
),
5698 STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets
),
5699 STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets
),
5700 STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets
),
5701 STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets
),
5702 STATS_OFFSET32(stat_XonPauseFramesReceived
),
5703 STATS_OFFSET32(stat_XoffPauseFramesReceived
),
5704 STATS_OFFSET32(stat_OutXonSent
),
5705 STATS_OFFSET32(stat_OutXoffSent
),
5706 STATS_OFFSET32(stat_MacControlFramesReceived
),
5707 STATS_OFFSET32(stat_IfInFramesL2FilterDiscards
),
5708 STATS_OFFSET32(stat_IfInMBUFDiscards
),
5709 STATS_OFFSET32(stat_FwRxDrop
),
5712 /* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
5713 * skipped because of errata.
5715 static u8 bnx2_5706_stats_len_arr
[BNX2_NUM_STATS
] = {
5716 8,0,8,8,8,8,8,8,8,8,
5717 4,0,4,4,4,4,4,4,4,4,
5718 4,4,4,4,4,4,4,4,4,4,
5719 4,4,4,4,4,4,4,4,4,4,
5723 static u8 bnx2_5708_stats_len_arr
[BNX2_NUM_STATS
] = {
5724 8,0,8,8,8,8,8,8,8,8,
5725 4,4,4,4,4,4,4,4,4,4,
5726 4,4,4,4,4,4,4,4,4,4,
5727 4,4,4,4,4,4,4,4,4,4,
5731 #define BNX2_NUM_TESTS 6
5734 char string
[ETH_GSTRING_LEN
];
5735 } bnx2_tests_str_arr
[BNX2_NUM_TESTS
] = {
5736 { "register_test (offline)" },
5737 { "memory_test (offline)" },
5738 { "loopback_test (offline)" },
5739 { "nvram_test (online)" },
5740 { "interrupt_test (online)" },
5741 { "link_test (online)" },
5745 bnx2_self_test_count(struct net_device
*dev
)
5747 return BNX2_NUM_TESTS
;
5751 bnx2_self_test(struct net_device
*dev
, struct ethtool_test
*etest
, u64
*buf
)
5753 struct bnx2
*bp
= netdev_priv(dev
);
5755 memset(buf
, 0, sizeof(u64
) * BNX2_NUM_TESTS
);
5756 if (etest
->flags
& ETH_TEST_FL_OFFLINE
) {
5759 bnx2_netif_stop(bp
);
5760 bnx2_reset_chip(bp
, BNX2_DRV_MSG_CODE_DIAG
);
5763 if (bnx2_test_registers(bp
) != 0) {
5765 etest
->flags
|= ETH_TEST_FL_FAILED
;
5767 if (bnx2_test_memory(bp
) != 0) {
5769 etest
->flags
|= ETH_TEST_FL_FAILED
;
5771 if ((buf
[2] = bnx2_test_loopback(bp
)) != 0)
5772 etest
->flags
|= ETH_TEST_FL_FAILED
;
5774 if (!netif_running(bp
->dev
)) {
5775 bnx2_reset_chip(bp
, BNX2_DRV_MSG_CODE_RESET
);
5779 bnx2_netif_start(bp
);
5782 /* wait for link up */
5783 for (i
= 0; i
< 7; i
++) {
5786 msleep_interruptible(1000);
5790 if (bnx2_test_nvram(bp
) != 0) {
5792 etest
->flags
|= ETH_TEST_FL_FAILED
;
5794 if (bnx2_test_intr(bp
) != 0) {
5796 etest
->flags
|= ETH_TEST_FL_FAILED
;
5799 if (bnx2_test_link(bp
) != 0) {
5801 etest
->flags
|= ETH_TEST_FL_FAILED
;
5807 bnx2_get_strings(struct net_device
*dev
, u32 stringset
, u8
*buf
)
5809 switch (stringset
) {
5811 memcpy(buf
, bnx2_stats_str_arr
,
5812 sizeof(bnx2_stats_str_arr
));
5815 memcpy(buf
, bnx2_tests_str_arr
,
5816 sizeof(bnx2_tests_str_arr
));
5822 bnx2_get_stats_count(struct net_device
*dev
)
5824 return BNX2_NUM_STATS
;
5828 bnx2_get_ethtool_stats(struct net_device
*dev
,
5829 struct ethtool_stats
*stats
, u64
*buf
)
5831 struct bnx2
*bp
= netdev_priv(dev
);
5833 u32
*hw_stats
= (u32
*) bp
->stats_blk
;
5834 u8
*stats_len_arr
= NULL
;
5836 if (hw_stats
== NULL
) {
5837 memset(buf
, 0, sizeof(u64
) * BNX2_NUM_STATS
);
5841 if ((CHIP_ID(bp
) == CHIP_ID_5706_A0
) ||
5842 (CHIP_ID(bp
) == CHIP_ID_5706_A1
) ||
5843 (CHIP_ID(bp
) == CHIP_ID_5706_A2
) ||
5844 (CHIP_ID(bp
) == CHIP_ID_5708_A0
))
5845 stats_len_arr
= bnx2_5706_stats_len_arr
;
5847 stats_len_arr
= bnx2_5708_stats_len_arr
;
5849 for (i
= 0; i
< BNX2_NUM_STATS
; i
++) {
5850 if (stats_len_arr
[i
] == 0) {
5851 /* skip this counter */
5855 if (stats_len_arr
[i
] == 4) {
5856 /* 4-byte counter */
5858 *(hw_stats
+ bnx2_stats_offset_arr
[i
]);
5861 /* 8-byte counter */
5862 buf
[i
] = (((u64
) *(hw_stats
+
5863 bnx2_stats_offset_arr
[i
])) << 32) +
5864 *(hw_stats
+ bnx2_stats_offset_arr
[i
] + 1);
5869 bnx2_phys_id(struct net_device
*dev
, u32 data
)
5871 struct bnx2
*bp
= netdev_priv(dev
);
5878 save
= REG_RD(bp
, BNX2_MISC_CFG
);
5879 REG_WR(bp
, BNX2_MISC_CFG
, BNX2_MISC_CFG_LEDMODE_MAC
);
5881 for (i
= 0; i
< (data
* 2); i
++) {
5883 REG_WR(bp
, BNX2_EMAC_LED
, BNX2_EMAC_LED_OVERRIDE
);
5886 REG_WR(bp
, BNX2_EMAC_LED
, BNX2_EMAC_LED_OVERRIDE
|
5887 BNX2_EMAC_LED_1000MB_OVERRIDE
|
5888 BNX2_EMAC_LED_100MB_OVERRIDE
|
5889 BNX2_EMAC_LED_10MB_OVERRIDE
|
5890 BNX2_EMAC_LED_TRAFFIC_OVERRIDE
|
5891 BNX2_EMAC_LED_TRAFFIC
);
5893 msleep_interruptible(500);
5894 if (signal_pending(current
))
5897 REG_WR(bp
, BNX2_EMAC_LED
, 0);
5898 REG_WR(bp
, BNX2_MISC_CFG
, save
);
5903 bnx2_set_tx_csum(struct net_device
*dev
, u32 data
)
5905 struct bnx2
*bp
= netdev_priv(dev
);
5907 if (CHIP_NUM(bp
) == CHIP_NUM_5709
)
5908 return (ethtool_op_set_tx_hw_csum(dev
, data
));
5910 return (ethtool_op_set_tx_csum(dev
, data
));
5913 static const struct ethtool_ops bnx2_ethtool_ops
= {
5914 .get_settings
= bnx2_get_settings
,
5915 .set_settings
= bnx2_set_settings
,
5916 .get_drvinfo
= bnx2_get_drvinfo
,
5917 .get_regs_len
= bnx2_get_regs_len
,
5918 .get_regs
= bnx2_get_regs
,
5919 .get_wol
= bnx2_get_wol
,
5920 .set_wol
= bnx2_set_wol
,
5921 .nway_reset
= bnx2_nway_reset
,
5922 .get_link
= ethtool_op_get_link
,
5923 .get_eeprom_len
= bnx2_get_eeprom_len
,
5924 .get_eeprom
= bnx2_get_eeprom
,
5925 .set_eeprom
= bnx2_set_eeprom
,
5926 .get_coalesce
= bnx2_get_coalesce
,
5927 .set_coalesce
= bnx2_set_coalesce
,
5928 .get_ringparam
= bnx2_get_ringparam
,
5929 .set_ringparam
= bnx2_set_ringparam
,
5930 .get_pauseparam
= bnx2_get_pauseparam
,
5931 .set_pauseparam
= bnx2_set_pauseparam
,
5932 .get_rx_csum
= bnx2_get_rx_csum
,
5933 .set_rx_csum
= bnx2_set_rx_csum
,
5934 .get_tx_csum
= ethtool_op_get_tx_csum
,
5935 .set_tx_csum
= bnx2_set_tx_csum
,
5936 .get_sg
= ethtool_op_get_sg
,
5937 .set_sg
= ethtool_op_set_sg
,
5938 .get_tso
= ethtool_op_get_tso
,
5939 .set_tso
= bnx2_set_tso
,
5940 .self_test_count
= bnx2_self_test_count
,
5941 .self_test
= bnx2_self_test
,
5942 .get_strings
= bnx2_get_strings
,
5943 .phys_id
= bnx2_phys_id
,
5944 .get_stats_count
= bnx2_get_stats_count
,
5945 .get_ethtool_stats
= bnx2_get_ethtool_stats
,
5946 .get_perm_addr
= ethtool_op_get_perm_addr
,
5949 /* Called with rtnl_lock */
5951 bnx2_ioctl(struct net_device
*dev
, struct ifreq
*ifr
, int cmd
)
5953 struct mii_ioctl_data
*data
= if_mii(ifr
);
5954 struct bnx2
*bp
= netdev_priv(dev
);
5959 data
->phy_id
= bp
->phy_addr
;
5965 if (!netif_running(dev
))
5968 spin_lock_bh(&bp
->phy_lock
);
5969 err
= bnx2_read_phy(bp
, data
->reg_num
& 0x1f, &mii_regval
);
5970 spin_unlock_bh(&bp
->phy_lock
);
5972 data
->val_out
= mii_regval
;
5978 if (!capable(CAP_NET_ADMIN
))
5981 if (!netif_running(dev
))
5984 spin_lock_bh(&bp
->phy_lock
);
5985 err
= bnx2_write_phy(bp
, data
->reg_num
& 0x1f, data
->val_in
);
5986 spin_unlock_bh(&bp
->phy_lock
);
5997 /* Called with rtnl_lock */
5999 bnx2_change_mac_addr(struct net_device
*dev
, void *p
)
6001 struct sockaddr
*addr
= p
;
6002 struct bnx2
*bp
= netdev_priv(dev
);
6004 if (!is_valid_ether_addr(addr
->sa_data
))
6007 memcpy(dev
->dev_addr
, addr
->sa_data
, dev
->addr_len
);
6008 if (netif_running(dev
))
6009 bnx2_set_mac_addr(bp
);
6014 /* Called with rtnl_lock */
6016 bnx2_change_mtu(struct net_device
*dev
, int new_mtu
)
6018 struct bnx2
*bp
= netdev_priv(dev
);
6020 if (((new_mtu
+ ETH_HLEN
) > MAX_ETHERNET_JUMBO_PACKET_SIZE
) ||
6021 ((new_mtu
+ ETH_HLEN
) < MIN_ETHERNET_PACKET_SIZE
))
6025 if (netif_running(dev
)) {
6026 bnx2_netif_stop(bp
);
6030 bnx2_netif_start(bp
);
6035 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
6037 poll_bnx2(struct net_device
*dev
)
6039 struct bnx2
*bp
= netdev_priv(dev
);
6041 disable_irq(bp
->pdev
->irq
);
6042 bnx2_interrupt(bp
->pdev
->irq
, dev
);
6043 enable_irq(bp
->pdev
->irq
);
6047 static void __devinit
6048 bnx2_get_5709_media(struct bnx2
*bp
)
6050 u32 val
= REG_RD(bp
, BNX2_MISC_DUAL_MEDIA_CTRL
);
6051 u32 bond_id
= val
& BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID
;
6054 if (bond_id
== BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C
)
6056 else if (bond_id
== BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S
) {
6057 bp
->phy_flags
|= PHY_SERDES_FLAG
;
6061 if (val
& BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE
)
6062 strap
= (val
& BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL
) >> 21;
6064 strap
= (val
& BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP
) >> 8;
6066 if (PCI_FUNC(bp
->pdev
->devfn
) == 0) {
6071 bp
->phy_flags
|= PHY_SERDES_FLAG
;
6079 bp
->phy_flags
|= PHY_SERDES_FLAG
;
6085 static void __devinit
6086 bnx2_get_pci_speed(struct bnx2
*bp
)
6090 reg
= REG_RD(bp
, BNX2_PCICFG_MISC_STATUS
);
6091 if (reg
& BNX2_PCICFG_MISC_STATUS_PCIX_DET
) {
6094 bp
->flags
|= PCIX_FLAG
;
6096 clkreg
= REG_RD(bp
, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS
);
6098 clkreg
&= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET
;
6100 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ
:
6101 bp
->bus_speed_mhz
= 133;
6104 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ
:
6105 bp
->bus_speed_mhz
= 100;
6108 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ
:
6109 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ
:
6110 bp
->bus_speed_mhz
= 66;
6113 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ
:
6114 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ
:
6115 bp
->bus_speed_mhz
= 50;
6118 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW
:
6119 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ
:
6120 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ
:
6121 bp
->bus_speed_mhz
= 33;
6126 if (reg
& BNX2_PCICFG_MISC_STATUS_M66EN
)
6127 bp
->bus_speed_mhz
= 66;
6129 bp
->bus_speed_mhz
= 33;
6132 if (reg
& BNX2_PCICFG_MISC_STATUS_32BIT_DET
)
6133 bp
->flags
|= PCI_32BIT_FLAG
;
6137 static int __devinit
6138 bnx2_init_board(struct pci_dev
*pdev
, struct net_device
*dev
)
6141 unsigned long mem_len
;
6144 u64 dma_mask
, persist_dma_mask
;
6146 SET_MODULE_OWNER(dev
);
6147 SET_NETDEV_DEV(dev
, &pdev
->dev
);
6148 bp
= netdev_priv(dev
);
6153 /* enable device (incl. PCI PM wakeup), and bus-mastering */
6154 rc
= pci_enable_device(pdev
);
6156 dev_err(&pdev
->dev
, "Cannot enable PCI device, aborting.");
6160 if (!(pci_resource_flags(pdev
, 0) & IORESOURCE_MEM
)) {
6162 "Cannot find PCI device base address, aborting.\n");
6164 goto err_out_disable
;
6167 rc
= pci_request_regions(pdev
, DRV_MODULE_NAME
);
6169 dev_err(&pdev
->dev
, "Cannot obtain PCI resources, aborting.\n");
6170 goto err_out_disable
;
6173 pci_set_master(pdev
);
6175 bp
->pm_cap
= pci_find_capability(pdev
, PCI_CAP_ID_PM
);
6176 if (bp
->pm_cap
== 0) {
6178 "Cannot find power management capability, aborting.\n");
6180 goto err_out_release
;
6186 spin_lock_init(&bp
->phy_lock
);
6187 spin_lock_init(&bp
->indirect_lock
);
6188 INIT_WORK(&bp
->reset_task
, bnx2_reset_task
);
6190 dev
->base_addr
= dev
->mem_start
= pci_resource_start(pdev
, 0);
6191 mem_len
= MB_GET_CID_ADDR(TX_TSS_CID
+ 1);
6192 dev
->mem_end
= dev
->mem_start
+ mem_len
;
6193 dev
->irq
= pdev
->irq
;
6195 bp
->regview
= ioremap_nocache(dev
->base_addr
, mem_len
);
6198 dev_err(&pdev
->dev
, "Cannot map register space, aborting.\n");
6200 goto err_out_release
;
6203 /* Configure byte swap and enable write to the reg_window registers.
6204 * Rely on CPU to do target byte swapping on big endian systems
6205 * The chip's target access swapping will not swap all accesses
6207 pci_write_config_dword(bp
->pdev
, BNX2_PCICFG_MISC_CONFIG
,
6208 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA
|
6209 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP
);
6211 bnx2_set_power_state(bp
, PCI_D0
);
6213 bp
->chip_id
= REG_RD(bp
, BNX2_MISC_ID
);
6215 if (CHIP_NUM(bp
) == CHIP_NUM_5709
) {
6216 if (pci_find_capability(pdev
, PCI_CAP_ID_EXP
) == 0) {
6218 "Cannot find PCIE capability, aborting.\n");
6222 bp
->flags
|= PCIE_FLAG
;
6224 bp
->pcix_cap
= pci_find_capability(pdev
, PCI_CAP_ID_PCIX
);
6225 if (bp
->pcix_cap
== 0) {
6227 "Cannot find PCIX capability, aborting.\n");
6233 if (CHIP_ID(bp
) != CHIP_ID_5706_A0
&& CHIP_ID(bp
) != CHIP_ID_5706_A1
) {
6234 if (pci_find_capability(pdev
, PCI_CAP_ID_MSI
))
6235 bp
->flags
|= MSI_CAP_FLAG
;
6238 /* 5708 cannot support DMA addresses > 40-bit. */
6239 if (CHIP_NUM(bp
) == CHIP_NUM_5708
)
6240 persist_dma_mask
= dma_mask
= DMA_40BIT_MASK
;
6242 persist_dma_mask
= dma_mask
= DMA_64BIT_MASK
;
6244 /* Configure DMA attributes. */
6245 if (pci_set_dma_mask(pdev
, dma_mask
) == 0) {
6246 dev
->features
|= NETIF_F_HIGHDMA
;
6247 rc
= pci_set_consistent_dma_mask(pdev
, persist_dma_mask
);
6250 "pci_set_consistent_dma_mask failed, aborting.\n");
6253 } else if ((rc
= pci_set_dma_mask(pdev
, DMA_32BIT_MASK
)) != 0) {
6254 dev_err(&pdev
->dev
, "System does not support DMA, aborting.\n");
6258 if (!(bp
->flags
& PCIE_FLAG
))
6259 bnx2_get_pci_speed(bp
);
6261 /* 5706A0 may falsely detect SERR and PERR. */
6262 if (CHIP_ID(bp
) == CHIP_ID_5706_A0
) {
6263 reg
= REG_RD(bp
, PCI_COMMAND
);
6264 reg
&= ~(PCI_COMMAND_SERR
| PCI_COMMAND_PARITY
);
6265 REG_WR(bp
, PCI_COMMAND
, reg
);
6267 else if ((CHIP_ID(bp
) == CHIP_ID_5706_A1
) &&
6268 !(bp
->flags
& PCIX_FLAG
)) {
6271 "5706 A1 can only be used in a PCIX bus, aborting.\n");
6275 bnx2_init_nvram(bp
);
6277 reg
= REG_RD_IND(bp
, BNX2_SHM_HDR_SIGNATURE
);
6279 if ((reg
& BNX2_SHM_HDR_SIGNATURE_SIG_MASK
) ==
6280 BNX2_SHM_HDR_SIGNATURE_SIG
) {
6281 u32 off
= PCI_FUNC(pdev
->devfn
) << 2;
6283 bp
->shmem_base
= REG_RD_IND(bp
, BNX2_SHM_HDR_ADDR_0
+ off
);
6285 bp
->shmem_base
= HOST_VIEW_SHMEM_BASE
;
6287 /* Get the permanent MAC address. First we need to make sure the
6288 * firmware is actually running.
6290 reg
= REG_RD_IND(bp
, bp
->shmem_base
+ BNX2_DEV_INFO_SIGNATURE
);
6292 if ((reg
& BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK
) !=
6293 BNX2_DEV_INFO_SIGNATURE_MAGIC
) {
6294 dev_err(&pdev
->dev
, "Firmware not running, aborting.\n");
6299 bp
->fw_ver
= REG_RD_IND(bp
, bp
->shmem_base
+ BNX2_DEV_INFO_BC_REV
);
6301 reg
= REG_RD_IND(bp
, bp
->shmem_base
+ BNX2_PORT_HW_CFG_MAC_UPPER
);
6302 bp
->mac_addr
[0] = (u8
) (reg
>> 8);
6303 bp
->mac_addr
[1] = (u8
) reg
;
6305 reg
= REG_RD_IND(bp
, bp
->shmem_base
+ BNX2_PORT_HW_CFG_MAC_LOWER
);
6306 bp
->mac_addr
[2] = (u8
) (reg
>> 24);
6307 bp
->mac_addr
[3] = (u8
) (reg
>> 16);
6308 bp
->mac_addr
[4] = (u8
) (reg
>> 8);
6309 bp
->mac_addr
[5] = (u8
) reg
;
6311 bp
->tx_ring_size
= MAX_TX_DESC_CNT
;
6312 bnx2_set_rx_ring_size(bp
, 255);
6316 bp
->rx_offset
= sizeof(struct l2_fhdr
) + 2;
6318 bp
->tx_quick_cons_trip_int
= 20;
6319 bp
->tx_quick_cons_trip
= 20;
6320 bp
->tx_ticks_int
= 80;
6323 bp
->rx_quick_cons_trip_int
= 6;
6324 bp
->rx_quick_cons_trip
= 6;
6325 bp
->rx_ticks_int
= 18;
6328 bp
->stats_ticks
= 1000000 & 0xffff00;
6330 bp
->timer_interval
= HZ
;
6331 bp
->current_interval
= HZ
;
6335 /* Disable WOL support if we are running on a SERDES chip. */
6336 if (CHIP_NUM(bp
) == CHIP_NUM_5709
)
6337 bnx2_get_5709_media(bp
);
6338 else if (CHIP_BOND_ID(bp
) & CHIP_BOND_ID_SERDES_BIT
)
6339 bp
->phy_flags
|= PHY_SERDES_FLAG
;
6341 if (bp
->phy_flags
& PHY_SERDES_FLAG
) {
6342 bp
->flags
|= NO_WOL_FLAG
;
6343 if (CHIP_NUM(bp
) != CHIP_NUM_5706
) {
6345 reg
= REG_RD_IND(bp
, bp
->shmem_base
+
6346 BNX2_SHARED_HW_CFG_CONFIG
);
6347 if (reg
& BNX2_SHARED_HW_CFG_PHY_2_5G
)
6348 bp
->phy_flags
|= PHY_2_5G_CAPABLE_FLAG
;
6350 } else if (CHIP_NUM(bp
) == CHIP_NUM_5706
||
6351 CHIP_NUM(bp
) == CHIP_NUM_5708
)
6352 bp
->phy_flags
|= PHY_CRC_FIX_FLAG
;
6353 else if (CHIP_ID(bp
) == CHIP_ID_5709_A0
)
6354 bp
->phy_flags
|= PHY_DIS_EARLY_DAC_FLAG
;
6356 if ((CHIP_ID(bp
) == CHIP_ID_5708_A0
) ||
6357 (CHIP_ID(bp
) == CHIP_ID_5708_B0
) ||
6358 (CHIP_ID(bp
) == CHIP_ID_5708_B1
))
6359 bp
->flags
|= NO_WOL_FLAG
;
6361 if (CHIP_ID(bp
) == CHIP_ID_5706_A0
) {
6362 bp
->tx_quick_cons_trip_int
=
6363 bp
->tx_quick_cons_trip
;
6364 bp
->tx_ticks_int
= bp
->tx_ticks
;
6365 bp
->rx_quick_cons_trip_int
=
6366 bp
->rx_quick_cons_trip
;
6367 bp
->rx_ticks_int
= bp
->rx_ticks
;
6368 bp
->comp_prod_trip_int
= bp
->comp_prod_trip
;
6369 bp
->com_ticks_int
= bp
->com_ticks
;
6370 bp
->cmd_ticks_int
= bp
->cmd_ticks
;
6373 /* Disable MSI on 5706 if AMD 8132 bridge is found.
6375 * MSI is defined to be 32-bit write. The 5706 does 64-bit MSI writes
6376 * with byte enables disabled on the unused 32-bit word. This is legal
6377 * but causes problems on the AMD 8132 which will eventually stop
6378 * responding after a while.
6380 * AMD believes this incompatibility is unique to the 5706, and
6381 * prefers to locally disable MSI rather than globally disabling it.
6383 if (CHIP_NUM(bp
) == CHIP_NUM_5706
&& disable_msi
== 0) {
6384 struct pci_dev
*amd_8132
= NULL
;
6386 while ((amd_8132
= pci_get_device(PCI_VENDOR_ID_AMD
,
6387 PCI_DEVICE_ID_AMD_8132_BRIDGE
,
6391 pci_read_config_byte(amd_8132
, PCI_REVISION_ID
, &rev
);
6392 if (rev
>= 0x10 && rev
<= 0x13) {
6394 pci_dev_put(amd_8132
);
6400 bnx2_set_default_link(bp
);
6401 bp
->req_flow_ctrl
= FLOW_CTRL_RX
| FLOW_CTRL_TX
;
6403 init_timer(&bp
->timer
);
6404 bp
->timer
.expires
= RUN_AT(bp
->timer_interval
);
6405 bp
->timer
.data
= (unsigned long) bp
;
6406 bp
->timer
.function
= bnx2_timer
;
6412 iounmap(bp
->regview
);
6417 pci_release_regions(pdev
);
6420 pci_disable_device(pdev
);
6421 pci_set_drvdata(pdev
, NULL
);
6427 static char * __devinit
6428 bnx2_bus_string(struct bnx2
*bp
, char *str
)
6432 if (bp
->flags
& PCIE_FLAG
) {
6433 s
+= sprintf(s
, "PCI Express");
6435 s
+= sprintf(s
, "PCI");
6436 if (bp
->flags
& PCIX_FLAG
)
6437 s
+= sprintf(s
, "-X");
6438 if (bp
->flags
& PCI_32BIT_FLAG
)
6439 s
+= sprintf(s
, " 32-bit");
6441 s
+= sprintf(s
, " 64-bit");
6442 s
+= sprintf(s
, " %dMHz", bp
->bus_speed_mhz
);
6447 static int __devinit
6448 bnx2_init_one(struct pci_dev
*pdev
, const struct pci_device_id
*ent
)
6450 static int version_printed
= 0;
6451 struct net_device
*dev
= NULL
;
6456 if (version_printed
++ == 0)
6457 printk(KERN_INFO
"%s", version
);
6459 /* dev zeroed in init_etherdev */
6460 dev
= alloc_etherdev(sizeof(*bp
));
6465 rc
= bnx2_init_board(pdev
, dev
);
6471 dev
->open
= bnx2_open
;
6472 dev
->hard_start_xmit
= bnx2_start_xmit
;
6473 dev
->stop
= bnx2_close
;
6474 dev
->get_stats
= bnx2_get_stats
;
6475 dev
->set_multicast_list
= bnx2_set_rx_mode
;
6476 dev
->do_ioctl
= bnx2_ioctl
;
6477 dev
->set_mac_address
= bnx2_change_mac_addr
;
6478 dev
->change_mtu
= bnx2_change_mtu
;
6479 dev
->tx_timeout
= bnx2_tx_timeout
;
6480 dev
->watchdog_timeo
= TX_TIMEOUT
;
6482 dev
->vlan_rx_register
= bnx2_vlan_rx_register
;
6484 dev
->poll
= bnx2_poll
;
6485 dev
->ethtool_ops
= &bnx2_ethtool_ops
;
6488 bp
= netdev_priv(dev
);
6490 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
6491 dev
->poll_controller
= poll_bnx2
;
6494 pci_set_drvdata(pdev
, dev
);
6496 memcpy(dev
->dev_addr
, bp
->mac_addr
, 6);
6497 memcpy(dev
->perm_addr
, bp
->mac_addr
, 6);
6498 bp
->name
= board_info
[ent
->driver_data
].name
;
6500 dev
->features
|= NETIF_F_IP_CSUM
| NETIF_F_SG
;
6501 if (CHIP_NUM(bp
) == CHIP_NUM_5709
)
6502 dev
->features
|= NETIF_F_IPV6_CSUM
;
6505 dev
->features
|= NETIF_F_HW_VLAN_TX
| NETIF_F_HW_VLAN_RX
;
6507 dev
->features
|= NETIF_F_TSO
| NETIF_F_TSO_ECN
;
6508 if (CHIP_NUM(bp
) == CHIP_NUM_5709
)
6509 dev
->features
|= NETIF_F_TSO6
;
6511 if ((rc
= register_netdev(dev
))) {
6512 dev_err(&pdev
->dev
, "Cannot register net device\n");
6514 iounmap(bp
->regview
);
6515 pci_release_regions(pdev
);
6516 pci_disable_device(pdev
);
6517 pci_set_drvdata(pdev
, NULL
);
6522 printk(KERN_INFO
"%s: %s (%c%d) %s found at mem %lx, "
6526 ((CHIP_ID(bp
) & 0xf000) >> 12) + 'A',
6527 ((CHIP_ID(bp
) & 0x0ff0) >> 4),
6528 bnx2_bus_string(bp
, str
),
6532 printk("node addr ");
6533 for (i
= 0; i
< 6; i
++)
6534 printk("%2.2x", dev
->dev_addr
[i
]);
6540 static void __devexit
6541 bnx2_remove_one(struct pci_dev
*pdev
)
6543 struct net_device
*dev
= pci_get_drvdata(pdev
);
6544 struct bnx2
*bp
= netdev_priv(dev
);
6546 flush_scheduled_work();
6548 unregister_netdev(dev
);
6551 iounmap(bp
->regview
);
6554 pci_release_regions(pdev
);
6555 pci_disable_device(pdev
);
6556 pci_set_drvdata(pdev
, NULL
);
6560 bnx2_suspend(struct pci_dev
*pdev
, pm_message_t state
)
6562 struct net_device
*dev
= pci_get_drvdata(pdev
);
6563 struct bnx2
*bp
= netdev_priv(dev
);
6566 if (!netif_running(dev
))
6569 flush_scheduled_work();
6570 bnx2_netif_stop(bp
);
6571 netif_device_detach(dev
);
6572 del_timer_sync(&bp
->timer
);
6573 if (bp
->flags
& NO_WOL_FLAG
)
6574 reset_code
= BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN
;
6576 reset_code
= BNX2_DRV_MSG_CODE_SUSPEND_WOL
;
6578 reset_code
= BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL
;
6579 bnx2_reset_chip(bp
, reset_code
);
6581 pci_save_state(pdev
);
6582 bnx2_set_power_state(bp
, pci_choose_state(pdev
, state
));
6587 bnx2_resume(struct pci_dev
*pdev
)
6589 struct net_device
*dev
= pci_get_drvdata(pdev
);
6590 struct bnx2
*bp
= netdev_priv(dev
);
6592 if (!netif_running(dev
))
6595 pci_restore_state(pdev
);
6596 bnx2_set_power_state(bp
, PCI_D0
);
6597 netif_device_attach(dev
);
6599 bnx2_netif_start(bp
);
6603 static struct pci_driver bnx2_pci_driver
= {
6604 .name
= DRV_MODULE_NAME
,
6605 .id_table
= bnx2_pci_tbl
,
6606 .probe
= bnx2_init_one
,
6607 .remove
= __devexit_p(bnx2_remove_one
),
6608 .suspend
= bnx2_suspend
,
6609 .resume
= bnx2_resume
,
6612 static int __init
bnx2_init(void)
6614 return pci_register_driver(&bnx2_pci_driver
);
6617 static void __exit
bnx2_cleanup(void)
6619 pci_unregister_driver(&bnx2_pci_driver
);
6622 module_init(bnx2_init
);
6623 module_exit(bnx2_cleanup
);