1 /* bnx2.c: Broadcom NX2 network driver.
3 * Copyright (c) 2004, 2005, 2006 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Written by: Michael Chan (mchan@broadcom.com)
13 #include <linux/module.h>
14 #include <linux/moduleparam.h>
16 #include <linux/kernel.h>
17 #include <linux/timer.h>
18 #include <linux/errno.h>
19 #include <linux/ioport.h>
20 #include <linux/slab.h>
21 #include <linux/vmalloc.h>
22 #include <linux/interrupt.h>
23 #include <linux/pci.h>
24 #include <linux/init.h>
25 #include <linux/netdevice.h>
26 #include <linux/etherdevice.h>
27 #include <linux/skbuff.h>
28 #include <linux/dma-mapping.h>
29 #include <asm/bitops.h>
32 #include <linux/delay.h>
33 #include <asm/byteorder.h>
35 #include <linux/time.h>
36 #include <linux/ethtool.h>
37 #include <linux/mii.h>
38 #ifdef NETIF_F_HW_VLAN_TX
39 #include <linux/if_vlan.h>
45 #include <net/checksum.h>
48 #include <linux/workqueue.h>
49 #include <linux/crc32.h>
50 #include <linux/prefetch.h>
51 #include <linux/cache.h>
52 #include <linux/zlib.h>
57 #define DRV_MODULE_NAME "bnx2"
58 #define PFX DRV_MODULE_NAME ": "
59 #define DRV_MODULE_VERSION "1.4.45"
60 #define DRV_MODULE_RELDATE "September 29, 2006"
62 #define RUN_AT(x) (jiffies + (x))
64 /* Time in jiffies before concluding the transmitter is hung. */
65 #define TX_TIMEOUT (5*HZ)
67 static const char version
[] __devinitdata
=
68 "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME
" v" DRV_MODULE_VERSION
" (" DRV_MODULE_RELDATE
")\n";
70 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
71 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708 Driver");
72 MODULE_LICENSE("GPL");
73 MODULE_VERSION(DRV_MODULE_VERSION
);
75 static int disable_msi
= 0;
77 module_param(disable_msi
, int, 0);
78 MODULE_PARM_DESC(disable_msi
, "Disable Message Signaled Interrupt (MSI)");
90 /* indexed by board_t, above */
93 } board_info
[] __devinitdata
= {
94 { "Broadcom NetXtreme II BCM5706 1000Base-T" },
95 { "HP NC370T Multifunction Gigabit Server Adapter" },
96 { "HP NC370i Multifunction Gigabit Server Adapter" },
97 { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
98 { "HP NC370F Multifunction Gigabit Server Adapter" },
99 { "Broadcom NetXtreme II BCM5708 1000Base-T" },
100 { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
103 static struct pci_device_id bnx2_pci_tbl
[] = {
104 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_NX2_5706
,
105 PCI_VENDOR_ID_HP
, 0x3101, 0, 0, NC370T
},
106 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_NX2_5706
,
107 PCI_VENDOR_ID_HP
, 0x3106, 0, 0, NC370I
},
108 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_NX2_5706
,
109 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, BCM5706
},
110 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_NX2_5708
,
111 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, BCM5708
},
112 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_NX2_5706S
,
113 PCI_VENDOR_ID_HP
, 0x3102, 0, 0, NC370F
},
114 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_NX2_5706S
,
115 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, BCM5706S
},
116 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_NX2_5708S
,
117 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, BCM5708S
},
121 static struct flash_spec flash_table
[] =
124 {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
125 1, SEEPROM_PAGE_BITS
, SEEPROM_PAGE_SIZE
,
126 SEEPROM_BYTE_ADDR_MASK
, SEEPROM_TOTAL_SIZE
,
128 /* Expansion entry 0001 */
129 {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
130 0, SAIFUN_FLASH_PAGE_BITS
, SAIFUN_FLASH_PAGE_SIZE
,
131 SAIFUN_FLASH_BYTE_ADDR_MASK
, 0,
133 /* Saifun SA25F010 (non-buffered flash) */
134 /* strap, cfg1, & write1 need updates */
135 {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
136 0, SAIFUN_FLASH_PAGE_BITS
, SAIFUN_FLASH_PAGE_SIZE
,
137 SAIFUN_FLASH_BYTE_ADDR_MASK
, SAIFUN_FLASH_BASE_TOTAL_SIZE
*2,
138 "Non-buffered flash (128kB)"},
139 /* Saifun SA25F020 (non-buffered flash) */
140 /* strap, cfg1, & write1 need updates */
141 {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
142 0, SAIFUN_FLASH_PAGE_BITS
, SAIFUN_FLASH_PAGE_SIZE
,
143 SAIFUN_FLASH_BYTE_ADDR_MASK
, SAIFUN_FLASH_BASE_TOTAL_SIZE
*4,
144 "Non-buffered flash (256kB)"},
145 /* Expansion entry 0100 */
146 {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
147 0, SAIFUN_FLASH_PAGE_BITS
, SAIFUN_FLASH_PAGE_SIZE
,
148 SAIFUN_FLASH_BYTE_ADDR_MASK
, 0,
150 /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
151 {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
152 0, ST_MICRO_FLASH_PAGE_BITS
, ST_MICRO_FLASH_PAGE_SIZE
,
153 ST_MICRO_FLASH_BYTE_ADDR_MASK
, ST_MICRO_FLASH_BASE_TOTAL_SIZE
*2,
154 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
155 /* Entry 0110: ST M45PE20 (non-buffered flash)*/
156 {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
157 0, ST_MICRO_FLASH_PAGE_BITS
, ST_MICRO_FLASH_PAGE_SIZE
,
158 ST_MICRO_FLASH_BYTE_ADDR_MASK
, ST_MICRO_FLASH_BASE_TOTAL_SIZE
*4,
159 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
160 /* Saifun SA25F005 (non-buffered flash) */
161 /* strap, cfg1, & write1 need updates */
162 {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
163 0, SAIFUN_FLASH_PAGE_BITS
, SAIFUN_FLASH_PAGE_SIZE
,
164 SAIFUN_FLASH_BYTE_ADDR_MASK
, SAIFUN_FLASH_BASE_TOTAL_SIZE
,
165 "Non-buffered flash (64kB)"},
167 {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
168 1, SEEPROM_PAGE_BITS
, SEEPROM_PAGE_SIZE
,
169 SEEPROM_BYTE_ADDR_MASK
, SEEPROM_TOTAL_SIZE
,
171 /* Expansion entry 1001 */
172 {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
173 0, SAIFUN_FLASH_PAGE_BITS
, SAIFUN_FLASH_PAGE_SIZE
,
174 SAIFUN_FLASH_BYTE_ADDR_MASK
, 0,
176 /* Expansion entry 1010 */
177 {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
178 0, SAIFUN_FLASH_PAGE_BITS
, SAIFUN_FLASH_PAGE_SIZE
,
179 SAIFUN_FLASH_BYTE_ADDR_MASK
, 0,
181 /* ATMEL AT45DB011B (buffered flash) */
182 {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
183 1, BUFFERED_FLASH_PAGE_BITS
, BUFFERED_FLASH_PAGE_SIZE
,
184 BUFFERED_FLASH_BYTE_ADDR_MASK
, BUFFERED_FLASH_TOTAL_SIZE
,
185 "Buffered flash (128kB)"},
186 /* Expansion entry 1100 */
187 {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
188 0, SAIFUN_FLASH_PAGE_BITS
, SAIFUN_FLASH_PAGE_SIZE
,
189 SAIFUN_FLASH_BYTE_ADDR_MASK
, 0,
191 /* Expansion entry 1101 */
192 {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
193 0, SAIFUN_FLASH_PAGE_BITS
, SAIFUN_FLASH_PAGE_SIZE
,
194 SAIFUN_FLASH_BYTE_ADDR_MASK
, 0,
196 /* Ateml Expansion entry 1110 */
197 {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
198 1, BUFFERED_FLASH_PAGE_BITS
, BUFFERED_FLASH_PAGE_SIZE
,
199 BUFFERED_FLASH_BYTE_ADDR_MASK
, 0,
200 "Entry 1110 (Atmel)"},
201 /* ATMEL AT45DB021B (buffered flash) */
202 {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
203 1, BUFFERED_FLASH_PAGE_BITS
, BUFFERED_FLASH_PAGE_SIZE
,
204 BUFFERED_FLASH_BYTE_ADDR_MASK
, BUFFERED_FLASH_TOTAL_SIZE
*2,
205 "Buffered flash (256kB)"},
208 MODULE_DEVICE_TABLE(pci
, bnx2_pci_tbl
);
210 static inline u32
bnx2_tx_avail(struct bnx2
*bp
)
215 diff
= TX_RING_IDX(bp
->tx_prod
) - TX_RING_IDX(bp
->tx_cons
);
216 if (diff
> MAX_TX_DESC_CNT
)
217 diff
= (diff
& MAX_TX_DESC_CNT
) - 1;
218 return (bp
->tx_ring_size
- diff
);
222 bnx2_reg_rd_ind(struct bnx2
*bp
, u32 offset
)
224 REG_WR(bp
, BNX2_PCICFG_REG_WINDOW_ADDRESS
, offset
);
225 return (REG_RD(bp
, BNX2_PCICFG_REG_WINDOW
));
229 bnx2_reg_wr_ind(struct bnx2
*bp
, u32 offset
, u32 val
)
231 REG_WR(bp
, BNX2_PCICFG_REG_WINDOW_ADDRESS
, offset
);
232 REG_WR(bp
, BNX2_PCICFG_REG_WINDOW
, val
);
236 bnx2_ctx_wr(struct bnx2
*bp
, u32 cid_addr
, u32 offset
, u32 val
)
239 REG_WR(bp
, BNX2_CTX_DATA_ADR
, offset
);
240 REG_WR(bp
, BNX2_CTX_DATA
, val
);
244 bnx2_read_phy(struct bnx2
*bp
, u32 reg
, u32
*val
)
249 if (bp
->phy_flags
& PHY_INT_MODE_AUTO_POLLING_FLAG
) {
250 val1
= REG_RD(bp
, BNX2_EMAC_MDIO_MODE
);
251 val1
&= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL
;
253 REG_WR(bp
, BNX2_EMAC_MDIO_MODE
, val1
);
254 REG_RD(bp
, BNX2_EMAC_MDIO_MODE
);
259 val1
= (bp
->phy_addr
<< 21) | (reg
<< 16) |
260 BNX2_EMAC_MDIO_COMM_COMMAND_READ
| BNX2_EMAC_MDIO_COMM_DISEXT
|
261 BNX2_EMAC_MDIO_COMM_START_BUSY
;
262 REG_WR(bp
, BNX2_EMAC_MDIO_COMM
, val1
);
264 for (i
= 0; i
< 50; i
++) {
267 val1
= REG_RD(bp
, BNX2_EMAC_MDIO_COMM
);
268 if (!(val1
& BNX2_EMAC_MDIO_COMM_START_BUSY
)) {
271 val1
= REG_RD(bp
, BNX2_EMAC_MDIO_COMM
);
272 val1
&= BNX2_EMAC_MDIO_COMM_DATA
;
278 if (val1
& BNX2_EMAC_MDIO_COMM_START_BUSY
) {
287 if (bp
->phy_flags
& PHY_INT_MODE_AUTO_POLLING_FLAG
) {
288 val1
= REG_RD(bp
, BNX2_EMAC_MDIO_MODE
);
289 val1
|= BNX2_EMAC_MDIO_MODE_AUTO_POLL
;
291 REG_WR(bp
, BNX2_EMAC_MDIO_MODE
, val1
);
292 REG_RD(bp
, BNX2_EMAC_MDIO_MODE
);
301 bnx2_write_phy(struct bnx2
*bp
, u32 reg
, u32 val
)
306 if (bp
->phy_flags
& PHY_INT_MODE_AUTO_POLLING_FLAG
) {
307 val1
= REG_RD(bp
, BNX2_EMAC_MDIO_MODE
);
308 val1
&= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL
;
310 REG_WR(bp
, BNX2_EMAC_MDIO_MODE
, val1
);
311 REG_RD(bp
, BNX2_EMAC_MDIO_MODE
);
316 val1
= (bp
->phy_addr
<< 21) | (reg
<< 16) | val
|
317 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE
|
318 BNX2_EMAC_MDIO_COMM_START_BUSY
| BNX2_EMAC_MDIO_COMM_DISEXT
;
319 REG_WR(bp
, BNX2_EMAC_MDIO_COMM
, val1
);
321 for (i
= 0; i
< 50; i
++) {
324 val1
= REG_RD(bp
, BNX2_EMAC_MDIO_COMM
);
325 if (!(val1
& BNX2_EMAC_MDIO_COMM_START_BUSY
)) {
331 if (val1
& BNX2_EMAC_MDIO_COMM_START_BUSY
)
336 if (bp
->phy_flags
& PHY_INT_MODE_AUTO_POLLING_FLAG
) {
337 val1
= REG_RD(bp
, BNX2_EMAC_MDIO_MODE
);
338 val1
|= BNX2_EMAC_MDIO_MODE_AUTO_POLL
;
340 REG_WR(bp
, BNX2_EMAC_MDIO_MODE
, val1
);
341 REG_RD(bp
, BNX2_EMAC_MDIO_MODE
);
350 bnx2_disable_int(struct bnx2
*bp
)
352 REG_WR(bp
, BNX2_PCICFG_INT_ACK_CMD
,
353 BNX2_PCICFG_INT_ACK_CMD_MASK_INT
);
354 REG_RD(bp
, BNX2_PCICFG_INT_ACK_CMD
);
358 bnx2_enable_int(struct bnx2
*bp
)
360 REG_WR(bp
, BNX2_PCICFG_INT_ACK_CMD
,
361 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID
|
362 BNX2_PCICFG_INT_ACK_CMD_MASK_INT
| bp
->last_status_idx
);
364 REG_WR(bp
, BNX2_PCICFG_INT_ACK_CMD
,
365 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID
| bp
->last_status_idx
);
367 REG_WR(bp
, BNX2_HC_COMMAND
, bp
->hc_cmd
| BNX2_HC_COMMAND_COAL_NOW
);
371 bnx2_disable_int_sync(struct bnx2
*bp
)
373 atomic_inc(&bp
->intr_sem
);
374 bnx2_disable_int(bp
);
375 synchronize_irq(bp
->pdev
->irq
);
379 bnx2_netif_stop(struct bnx2
*bp
)
381 bnx2_disable_int_sync(bp
);
382 if (netif_running(bp
->dev
)) {
383 netif_poll_disable(bp
->dev
);
384 netif_tx_disable(bp
->dev
);
385 bp
->dev
->trans_start
= jiffies
; /* prevent tx timeout */
390 bnx2_netif_start(struct bnx2
*bp
)
392 if (atomic_dec_and_test(&bp
->intr_sem
)) {
393 if (netif_running(bp
->dev
)) {
394 netif_wake_queue(bp
->dev
);
395 netif_poll_enable(bp
->dev
);
402 bnx2_free_mem(struct bnx2
*bp
)
406 if (bp
->status_blk
) {
407 pci_free_consistent(bp
->pdev
, bp
->status_stats_size
,
408 bp
->status_blk
, bp
->status_blk_mapping
);
409 bp
->status_blk
= NULL
;
410 bp
->stats_blk
= NULL
;
412 if (bp
->tx_desc_ring
) {
413 pci_free_consistent(bp
->pdev
,
414 sizeof(struct tx_bd
) * TX_DESC_CNT
,
415 bp
->tx_desc_ring
, bp
->tx_desc_mapping
);
416 bp
->tx_desc_ring
= NULL
;
418 kfree(bp
->tx_buf_ring
);
419 bp
->tx_buf_ring
= NULL
;
420 for (i
= 0; i
< bp
->rx_max_ring
; i
++) {
421 if (bp
->rx_desc_ring
[i
])
422 pci_free_consistent(bp
->pdev
,
423 sizeof(struct rx_bd
) * RX_DESC_CNT
,
425 bp
->rx_desc_mapping
[i
]);
426 bp
->rx_desc_ring
[i
] = NULL
;
428 vfree(bp
->rx_buf_ring
);
429 bp
->rx_buf_ring
= NULL
;
433 bnx2_alloc_mem(struct bnx2
*bp
)
435 int i
, status_blk_size
;
437 bp
->tx_buf_ring
= kzalloc(sizeof(struct sw_bd
) * TX_DESC_CNT
,
439 if (bp
->tx_buf_ring
== NULL
)
442 bp
->tx_desc_ring
= pci_alloc_consistent(bp
->pdev
,
443 sizeof(struct tx_bd
) *
445 &bp
->tx_desc_mapping
);
446 if (bp
->tx_desc_ring
== NULL
)
449 bp
->rx_buf_ring
= vmalloc(sizeof(struct sw_bd
) * RX_DESC_CNT
*
451 if (bp
->rx_buf_ring
== NULL
)
454 memset(bp
->rx_buf_ring
, 0, sizeof(struct sw_bd
) * RX_DESC_CNT
*
457 for (i
= 0; i
< bp
->rx_max_ring
; i
++) {
458 bp
->rx_desc_ring
[i
] =
459 pci_alloc_consistent(bp
->pdev
,
460 sizeof(struct rx_bd
) * RX_DESC_CNT
,
461 &bp
->rx_desc_mapping
[i
]);
462 if (bp
->rx_desc_ring
[i
] == NULL
)
467 /* Combine status and statistics blocks into one allocation. */
468 status_blk_size
= L1_CACHE_ALIGN(sizeof(struct status_block
));
469 bp
->status_stats_size
= status_blk_size
+
470 sizeof(struct statistics_block
);
472 bp
->status_blk
= pci_alloc_consistent(bp
->pdev
, bp
->status_stats_size
,
473 &bp
->status_blk_mapping
);
474 if (bp
->status_blk
== NULL
)
477 memset(bp
->status_blk
, 0, bp
->status_stats_size
);
479 bp
->stats_blk
= (void *) ((unsigned long) bp
->status_blk
+
482 bp
->stats_blk_mapping
= bp
->status_blk_mapping
+ status_blk_size
;
492 bnx2_report_fw_link(struct bnx2
*bp
)
494 u32 fw_link_status
= 0;
499 switch (bp
->line_speed
) {
501 if (bp
->duplex
== DUPLEX_HALF
)
502 fw_link_status
= BNX2_LINK_STATUS_10HALF
;
504 fw_link_status
= BNX2_LINK_STATUS_10FULL
;
507 if (bp
->duplex
== DUPLEX_HALF
)
508 fw_link_status
= BNX2_LINK_STATUS_100HALF
;
510 fw_link_status
= BNX2_LINK_STATUS_100FULL
;
513 if (bp
->duplex
== DUPLEX_HALF
)
514 fw_link_status
= BNX2_LINK_STATUS_1000HALF
;
516 fw_link_status
= BNX2_LINK_STATUS_1000FULL
;
519 if (bp
->duplex
== DUPLEX_HALF
)
520 fw_link_status
= BNX2_LINK_STATUS_2500HALF
;
522 fw_link_status
= BNX2_LINK_STATUS_2500FULL
;
526 fw_link_status
|= BNX2_LINK_STATUS_LINK_UP
;
529 fw_link_status
|= BNX2_LINK_STATUS_AN_ENABLED
;
531 bnx2_read_phy(bp
, MII_BMSR
, &bmsr
);
532 bnx2_read_phy(bp
, MII_BMSR
, &bmsr
);
534 if (!(bmsr
& BMSR_ANEGCOMPLETE
) ||
535 bp
->phy_flags
& PHY_PARALLEL_DETECT_FLAG
)
536 fw_link_status
|= BNX2_LINK_STATUS_PARALLEL_DET
;
538 fw_link_status
|= BNX2_LINK_STATUS_AN_COMPLETE
;
542 fw_link_status
= BNX2_LINK_STATUS_LINK_DOWN
;
544 REG_WR_IND(bp
, bp
->shmem_base
+ BNX2_LINK_STATUS
, fw_link_status
);
548 bnx2_report_link(struct bnx2
*bp
)
551 netif_carrier_on(bp
->dev
);
552 printk(KERN_INFO PFX
"%s NIC Link is Up, ", bp
->dev
->name
);
554 printk("%d Mbps ", bp
->line_speed
);
556 if (bp
->duplex
== DUPLEX_FULL
)
557 printk("full duplex");
559 printk("half duplex");
562 if (bp
->flow_ctrl
& FLOW_CTRL_RX
) {
563 printk(", receive ");
564 if (bp
->flow_ctrl
& FLOW_CTRL_TX
)
565 printk("& transmit ");
568 printk(", transmit ");
570 printk("flow control ON");
575 netif_carrier_off(bp
->dev
);
576 printk(KERN_ERR PFX
"%s NIC Link is Down\n", bp
->dev
->name
);
579 bnx2_report_fw_link(bp
);
583 bnx2_resolve_flow_ctrl(struct bnx2
*bp
)
585 u32 local_adv
, remote_adv
;
588 if ((bp
->autoneg
& (AUTONEG_SPEED
| AUTONEG_FLOW_CTRL
)) !=
589 (AUTONEG_SPEED
| AUTONEG_FLOW_CTRL
)) {
591 if (bp
->duplex
== DUPLEX_FULL
) {
592 bp
->flow_ctrl
= bp
->req_flow_ctrl
;
597 if (bp
->duplex
!= DUPLEX_FULL
) {
601 if ((bp
->phy_flags
& PHY_SERDES_FLAG
) &&
602 (CHIP_NUM(bp
) == CHIP_NUM_5708
)) {
605 bnx2_read_phy(bp
, BCM5708S_1000X_STAT1
, &val
);
606 if (val
& BCM5708S_1000X_STAT1_TX_PAUSE
)
607 bp
->flow_ctrl
|= FLOW_CTRL_TX
;
608 if (val
& BCM5708S_1000X_STAT1_RX_PAUSE
)
609 bp
->flow_ctrl
|= FLOW_CTRL_RX
;
613 bnx2_read_phy(bp
, MII_ADVERTISE
, &local_adv
);
614 bnx2_read_phy(bp
, MII_LPA
, &remote_adv
);
616 if (bp
->phy_flags
& PHY_SERDES_FLAG
) {
617 u32 new_local_adv
= 0;
618 u32 new_remote_adv
= 0;
620 if (local_adv
& ADVERTISE_1000XPAUSE
)
621 new_local_adv
|= ADVERTISE_PAUSE_CAP
;
622 if (local_adv
& ADVERTISE_1000XPSE_ASYM
)
623 new_local_adv
|= ADVERTISE_PAUSE_ASYM
;
624 if (remote_adv
& ADVERTISE_1000XPAUSE
)
625 new_remote_adv
|= ADVERTISE_PAUSE_CAP
;
626 if (remote_adv
& ADVERTISE_1000XPSE_ASYM
)
627 new_remote_adv
|= ADVERTISE_PAUSE_ASYM
;
629 local_adv
= new_local_adv
;
630 remote_adv
= new_remote_adv
;
633 /* See Table 28B-3 of 802.3ab-1999 spec. */
634 if (local_adv
& ADVERTISE_PAUSE_CAP
) {
635 if(local_adv
& ADVERTISE_PAUSE_ASYM
) {
636 if (remote_adv
& ADVERTISE_PAUSE_CAP
) {
637 bp
->flow_ctrl
= FLOW_CTRL_TX
| FLOW_CTRL_RX
;
639 else if (remote_adv
& ADVERTISE_PAUSE_ASYM
) {
640 bp
->flow_ctrl
= FLOW_CTRL_RX
;
644 if (remote_adv
& ADVERTISE_PAUSE_CAP
) {
645 bp
->flow_ctrl
= FLOW_CTRL_TX
| FLOW_CTRL_RX
;
649 else if (local_adv
& ADVERTISE_PAUSE_ASYM
) {
650 if ((remote_adv
& ADVERTISE_PAUSE_CAP
) &&
651 (remote_adv
& ADVERTISE_PAUSE_ASYM
)) {
653 bp
->flow_ctrl
= FLOW_CTRL_TX
;
659 bnx2_5708s_linkup(struct bnx2
*bp
)
664 bnx2_read_phy(bp
, BCM5708S_1000X_STAT1
, &val
);
665 switch (val
& BCM5708S_1000X_STAT1_SPEED_MASK
) {
666 case BCM5708S_1000X_STAT1_SPEED_10
:
667 bp
->line_speed
= SPEED_10
;
669 case BCM5708S_1000X_STAT1_SPEED_100
:
670 bp
->line_speed
= SPEED_100
;
672 case BCM5708S_1000X_STAT1_SPEED_1G
:
673 bp
->line_speed
= SPEED_1000
;
675 case BCM5708S_1000X_STAT1_SPEED_2G5
:
676 bp
->line_speed
= SPEED_2500
;
679 if (val
& BCM5708S_1000X_STAT1_FD
)
680 bp
->duplex
= DUPLEX_FULL
;
682 bp
->duplex
= DUPLEX_HALF
;
688 bnx2_5706s_linkup(struct bnx2
*bp
)
690 u32 bmcr
, local_adv
, remote_adv
, common
;
693 bp
->line_speed
= SPEED_1000
;
695 bnx2_read_phy(bp
, MII_BMCR
, &bmcr
);
696 if (bmcr
& BMCR_FULLDPLX
) {
697 bp
->duplex
= DUPLEX_FULL
;
700 bp
->duplex
= DUPLEX_HALF
;
703 if (!(bmcr
& BMCR_ANENABLE
)) {
707 bnx2_read_phy(bp
, MII_ADVERTISE
, &local_adv
);
708 bnx2_read_phy(bp
, MII_LPA
, &remote_adv
);
710 common
= local_adv
& remote_adv
;
711 if (common
& (ADVERTISE_1000XHALF
| ADVERTISE_1000XFULL
)) {
713 if (common
& ADVERTISE_1000XFULL
) {
714 bp
->duplex
= DUPLEX_FULL
;
717 bp
->duplex
= DUPLEX_HALF
;
725 bnx2_copper_linkup(struct bnx2
*bp
)
729 bnx2_read_phy(bp
, MII_BMCR
, &bmcr
);
730 if (bmcr
& BMCR_ANENABLE
) {
731 u32 local_adv
, remote_adv
, common
;
733 bnx2_read_phy(bp
, MII_CTRL1000
, &local_adv
);
734 bnx2_read_phy(bp
, MII_STAT1000
, &remote_adv
);
736 common
= local_adv
& (remote_adv
>> 2);
737 if (common
& ADVERTISE_1000FULL
) {
738 bp
->line_speed
= SPEED_1000
;
739 bp
->duplex
= DUPLEX_FULL
;
741 else if (common
& ADVERTISE_1000HALF
) {
742 bp
->line_speed
= SPEED_1000
;
743 bp
->duplex
= DUPLEX_HALF
;
746 bnx2_read_phy(bp
, MII_ADVERTISE
, &local_adv
);
747 bnx2_read_phy(bp
, MII_LPA
, &remote_adv
);
749 common
= local_adv
& remote_adv
;
750 if (common
& ADVERTISE_100FULL
) {
751 bp
->line_speed
= SPEED_100
;
752 bp
->duplex
= DUPLEX_FULL
;
754 else if (common
& ADVERTISE_100HALF
) {
755 bp
->line_speed
= SPEED_100
;
756 bp
->duplex
= DUPLEX_HALF
;
758 else if (common
& ADVERTISE_10FULL
) {
759 bp
->line_speed
= SPEED_10
;
760 bp
->duplex
= DUPLEX_FULL
;
762 else if (common
& ADVERTISE_10HALF
) {
763 bp
->line_speed
= SPEED_10
;
764 bp
->duplex
= DUPLEX_HALF
;
773 if (bmcr
& BMCR_SPEED100
) {
774 bp
->line_speed
= SPEED_100
;
777 bp
->line_speed
= SPEED_10
;
779 if (bmcr
& BMCR_FULLDPLX
) {
780 bp
->duplex
= DUPLEX_FULL
;
783 bp
->duplex
= DUPLEX_HALF
;
791 bnx2_set_mac_link(struct bnx2
*bp
)
795 REG_WR(bp
, BNX2_EMAC_TX_LENGTHS
, 0x2620);
796 if (bp
->link_up
&& (bp
->line_speed
== SPEED_1000
) &&
797 (bp
->duplex
== DUPLEX_HALF
)) {
798 REG_WR(bp
, BNX2_EMAC_TX_LENGTHS
, 0x26ff);
801 /* Configure the EMAC mode register. */
802 val
= REG_RD(bp
, BNX2_EMAC_MODE
);
804 val
&= ~(BNX2_EMAC_MODE_PORT
| BNX2_EMAC_MODE_HALF_DUPLEX
|
805 BNX2_EMAC_MODE_MAC_LOOP
| BNX2_EMAC_MODE_FORCE_LINK
|
809 switch (bp
->line_speed
) {
811 if (CHIP_NUM(bp
) == CHIP_NUM_5708
) {
812 val
|= BNX2_EMAC_MODE_PORT_MII_10
;
817 val
|= BNX2_EMAC_MODE_PORT_MII
;
820 val
|= BNX2_EMAC_MODE_25G
;
823 val
|= BNX2_EMAC_MODE_PORT_GMII
;
828 val
|= BNX2_EMAC_MODE_PORT_GMII
;
831 /* Set the MAC to operate in the appropriate duplex mode. */
832 if (bp
->duplex
== DUPLEX_HALF
)
833 val
|= BNX2_EMAC_MODE_HALF_DUPLEX
;
834 REG_WR(bp
, BNX2_EMAC_MODE
, val
);
836 /* Enable/disable rx PAUSE. */
837 bp
->rx_mode
&= ~BNX2_EMAC_RX_MODE_FLOW_EN
;
839 if (bp
->flow_ctrl
& FLOW_CTRL_RX
)
840 bp
->rx_mode
|= BNX2_EMAC_RX_MODE_FLOW_EN
;
841 REG_WR(bp
, BNX2_EMAC_RX_MODE
, bp
->rx_mode
);
843 /* Enable/disable tx PAUSE. */
844 val
= REG_RD(bp
, BNX2_EMAC_TX_MODE
);
845 val
&= ~BNX2_EMAC_TX_MODE_FLOW_EN
;
847 if (bp
->flow_ctrl
& FLOW_CTRL_TX
)
848 val
|= BNX2_EMAC_TX_MODE_FLOW_EN
;
849 REG_WR(bp
, BNX2_EMAC_TX_MODE
, val
);
851 /* Acknowledge the interrupt. */
852 REG_WR(bp
, BNX2_EMAC_STATUS
, BNX2_EMAC_STATUS_LINK_CHANGE
);
858 bnx2_set_link(struct bnx2
*bp
)
863 if (bp
->loopback
== MAC_LOOPBACK
|| bp
->loopback
== PHY_LOOPBACK
) {
868 link_up
= bp
->link_up
;
870 bnx2_read_phy(bp
, MII_BMSR
, &bmsr
);
871 bnx2_read_phy(bp
, MII_BMSR
, &bmsr
);
873 if ((bp
->phy_flags
& PHY_SERDES_FLAG
) &&
874 (CHIP_NUM(bp
) == CHIP_NUM_5706
)) {
877 val
= REG_RD(bp
, BNX2_EMAC_STATUS
);
878 if (val
& BNX2_EMAC_STATUS_LINK
)
879 bmsr
|= BMSR_LSTATUS
;
881 bmsr
&= ~BMSR_LSTATUS
;
884 if (bmsr
& BMSR_LSTATUS
) {
887 if (bp
->phy_flags
& PHY_SERDES_FLAG
) {
888 if (CHIP_NUM(bp
) == CHIP_NUM_5706
)
889 bnx2_5706s_linkup(bp
);
890 else if (CHIP_NUM(bp
) == CHIP_NUM_5708
)
891 bnx2_5708s_linkup(bp
);
894 bnx2_copper_linkup(bp
);
896 bnx2_resolve_flow_ctrl(bp
);
899 if ((bp
->phy_flags
& PHY_SERDES_FLAG
) &&
900 (bp
->autoneg
& AUTONEG_SPEED
)) {
904 bnx2_read_phy(bp
, MII_BMCR
, &bmcr
);
905 bmcr
&= ~BCM5708S_BMCR_FORCE_2500
;
906 if (!(bmcr
& BMCR_ANENABLE
)) {
907 bnx2_write_phy(bp
, MII_BMCR
, bmcr
|
911 bp
->phy_flags
&= ~PHY_PARALLEL_DETECT_FLAG
;
915 if (bp
->link_up
!= link_up
) {
916 bnx2_report_link(bp
);
919 bnx2_set_mac_link(bp
);
925 bnx2_reset_phy(struct bnx2
*bp
)
930 bnx2_write_phy(bp
, MII_BMCR
, BMCR_RESET
);
932 #define PHY_RESET_MAX_WAIT 100
933 for (i
= 0; i
< PHY_RESET_MAX_WAIT
; i
++) {
936 bnx2_read_phy(bp
, MII_BMCR
, ®
);
937 if (!(reg
& BMCR_RESET
)) {
942 if (i
== PHY_RESET_MAX_WAIT
) {
949 bnx2_phy_get_pause_adv(struct bnx2
*bp
)
953 if ((bp
->req_flow_ctrl
& (FLOW_CTRL_RX
| FLOW_CTRL_TX
)) ==
954 (FLOW_CTRL_RX
| FLOW_CTRL_TX
)) {
956 if (bp
->phy_flags
& PHY_SERDES_FLAG
) {
957 adv
= ADVERTISE_1000XPAUSE
;
960 adv
= ADVERTISE_PAUSE_CAP
;
963 else if (bp
->req_flow_ctrl
& FLOW_CTRL_TX
) {
964 if (bp
->phy_flags
& PHY_SERDES_FLAG
) {
965 adv
= ADVERTISE_1000XPSE_ASYM
;
968 adv
= ADVERTISE_PAUSE_ASYM
;
971 else if (bp
->req_flow_ctrl
& FLOW_CTRL_RX
) {
972 if (bp
->phy_flags
& PHY_SERDES_FLAG
) {
973 adv
= ADVERTISE_1000XPAUSE
| ADVERTISE_1000XPSE_ASYM
;
976 adv
= ADVERTISE_PAUSE_CAP
| ADVERTISE_PAUSE_ASYM
;
983 bnx2_setup_serdes_phy(struct bnx2
*bp
)
988 if (!(bp
->autoneg
& AUTONEG_SPEED
)) {
990 int force_link_down
= 0;
992 bnx2_read_phy(bp
, MII_ADVERTISE
, &adv
);
993 adv
&= ~(ADVERTISE_1000XFULL
| ADVERTISE_1000XHALF
);
995 bnx2_read_phy(bp
, MII_BMCR
, &bmcr
);
996 new_bmcr
= bmcr
& ~(BMCR_ANENABLE
| BCM5708S_BMCR_FORCE_2500
);
997 new_bmcr
|= BMCR_SPEED1000
;
998 if (bp
->req_line_speed
== SPEED_2500
) {
999 new_bmcr
|= BCM5708S_BMCR_FORCE_2500
;
1000 bnx2_read_phy(bp
, BCM5708S_UP1
, &up1
);
1001 if (!(up1
& BCM5708S_UP1_2G5
)) {
1002 up1
|= BCM5708S_UP1_2G5
;
1003 bnx2_write_phy(bp
, BCM5708S_UP1
, up1
);
1004 force_link_down
= 1;
1006 } else if (CHIP_NUM(bp
) == CHIP_NUM_5708
) {
1007 bnx2_read_phy(bp
, BCM5708S_UP1
, &up1
);
1008 if (up1
& BCM5708S_UP1_2G5
) {
1009 up1
&= ~BCM5708S_UP1_2G5
;
1010 bnx2_write_phy(bp
, BCM5708S_UP1
, up1
);
1011 force_link_down
= 1;
1015 if (bp
->req_duplex
== DUPLEX_FULL
) {
1016 adv
|= ADVERTISE_1000XFULL
;
1017 new_bmcr
|= BMCR_FULLDPLX
;
1020 adv
|= ADVERTISE_1000XHALF
;
1021 new_bmcr
&= ~BMCR_FULLDPLX
;
1023 if ((new_bmcr
!= bmcr
) || (force_link_down
)) {
1024 /* Force a link down visible on the other side */
1026 bnx2_write_phy(bp
, MII_ADVERTISE
, adv
&
1027 ~(ADVERTISE_1000XFULL
|
1028 ADVERTISE_1000XHALF
));
1029 bnx2_write_phy(bp
, MII_BMCR
, bmcr
|
1030 BMCR_ANRESTART
| BMCR_ANENABLE
);
1033 netif_carrier_off(bp
->dev
);
1034 bnx2_write_phy(bp
, MII_BMCR
, new_bmcr
);
1035 bnx2_report_link(bp
);
1037 bnx2_write_phy(bp
, MII_ADVERTISE
, adv
);
1038 bnx2_write_phy(bp
, MII_BMCR
, new_bmcr
);
1043 if (bp
->phy_flags
& PHY_2_5G_CAPABLE_FLAG
) {
1044 bnx2_read_phy(bp
, BCM5708S_UP1
, &up1
);
1045 up1
|= BCM5708S_UP1_2G5
;
1046 bnx2_write_phy(bp
, BCM5708S_UP1
, up1
);
1049 if (bp
->advertising
& ADVERTISED_1000baseT_Full
)
1050 new_adv
|= ADVERTISE_1000XFULL
;
1052 new_adv
|= bnx2_phy_get_pause_adv(bp
);
1054 bnx2_read_phy(bp
, MII_ADVERTISE
, &adv
);
1055 bnx2_read_phy(bp
, MII_BMCR
, &bmcr
);
1057 bp
->serdes_an_pending
= 0;
1058 if ((adv
!= new_adv
) || ((bmcr
& BMCR_ANENABLE
) == 0)) {
1059 /* Force a link down visible on the other side */
1061 bnx2_write_phy(bp
, MII_BMCR
, BMCR_LOOPBACK
);
1062 spin_unlock_bh(&bp
->phy_lock
);
1064 spin_lock_bh(&bp
->phy_lock
);
1067 bnx2_write_phy(bp
, MII_ADVERTISE
, new_adv
);
1068 bnx2_write_phy(bp
, MII_BMCR
, bmcr
| BMCR_ANRESTART
|
1070 if (CHIP_NUM(bp
) == CHIP_NUM_5706
) {
1071 /* Speed up link-up time when the link partner
1072 * does not autonegotiate which is very common
1073 * in blade servers. Some blade servers use
1074 * IPMI for kerboard input and it's important
1075 * to minimize link disruptions. Autoneg. involves
1076 * exchanging base pages plus 3 next pages and
1077 * normally completes in about 120 msec.
1079 bp
->current_interval
= SERDES_AN_TIMEOUT
;
1080 bp
->serdes_an_pending
= 1;
1081 mod_timer(&bp
->timer
, jiffies
+ bp
->current_interval
);
1088 #define ETHTOOL_ALL_FIBRE_SPEED \
1089 (ADVERTISED_1000baseT_Full)
1091 #define ETHTOOL_ALL_COPPER_SPEED \
1092 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1093 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1094 ADVERTISED_1000baseT_Full)
1096 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1097 ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1099 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1102 bnx2_setup_copper_phy(struct bnx2
*bp
)
1107 bnx2_read_phy(bp
, MII_BMCR
, &bmcr
);
1109 if (bp
->autoneg
& AUTONEG_SPEED
) {
1110 u32 adv_reg
, adv1000_reg
;
1111 u32 new_adv_reg
= 0;
1112 u32 new_adv1000_reg
= 0;
1114 bnx2_read_phy(bp
, MII_ADVERTISE
, &adv_reg
);
1115 adv_reg
&= (PHY_ALL_10_100_SPEED
| ADVERTISE_PAUSE_CAP
|
1116 ADVERTISE_PAUSE_ASYM
);
1118 bnx2_read_phy(bp
, MII_CTRL1000
, &adv1000_reg
);
1119 adv1000_reg
&= PHY_ALL_1000_SPEED
;
1121 if (bp
->advertising
& ADVERTISED_10baseT_Half
)
1122 new_adv_reg
|= ADVERTISE_10HALF
;
1123 if (bp
->advertising
& ADVERTISED_10baseT_Full
)
1124 new_adv_reg
|= ADVERTISE_10FULL
;
1125 if (bp
->advertising
& ADVERTISED_100baseT_Half
)
1126 new_adv_reg
|= ADVERTISE_100HALF
;
1127 if (bp
->advertising
& ADVERTISED_100baseT_Full
)
1128 new_adv_reg
|= ADVERTISE_100FULL
;
1129 if (bp
->advertising
& ADVERTISED_1000baseT_Full
)
1130 new_adv1000_reg
|= ADVERTISE_1000FULL
;
1132 new_adv_reg
|= ADVERTISE_CSMA
;
1134 new_adv_reg
|= bnx2_phy_get_pause_adv(bp
);
1136 if ((adv1000_reg
!= new_adv1000_reg
) ||
1137 (adv_reg
!= new_adv_reg
) ||
1138 ((bmcr
& BMCR_ANENABLE
) == 0)) {
1140 bnx2_write_phy(bp
, MII_ADVERTISE
, new_adv_reg
);
1141 bnx2_write_phy(bp
, MII_CTRL1000
, new_adv1000_reg
);
1142 bnx2_write_phy(bp
, MII_BMCR
, BMCR_ANRESTART
|
1145 else if (bp
->link_up
) {
1146 /* Flow ctrl may have changed from auto to forced */
1147 /* or vice-versa. */
1149 bnx2_resolve_flow_ctrl(bp
);
1150 bnx2_set_mac_link(bp
);
1156 if (bp
->req_line_speed
== SPEED_100
) {
1157 new_bmcr
|= BMCR_SPEED100
;
1159 if (bp
->req_duplex
== DUPLEX_FULL
) {
1160 new_bmcr
|= BMCR_FULLDPLX
;
1162 if (new_bmcr
!= bmcr
) {
1166 bnx2_read_phy(bp
, MII_BMSR
, &bmsr
);
1167 bnx2_read_phy(bp
, MII_BMSR
, &bmsr
);
1169 if (bmsr
& BMSR_LSTATUS
) {
1170 /* Force link down */
1171 bnx2_write_phy(bp
, MII_BMCR
, BMCR_LOOPBACK
);
1174 bnx2_read_phy(bp
, MII_BMSR
, &bmsr
);
1175 bnx2_read_phy(bp
, MII_BMSR
, &bmsr
);
1177 } while ((bmsr
& BMSR_LSTATUS
) && (i
< 620));
1180 bnx2_write_phy(bp
, MII_BMCR
, new_bmcr
);
1182 /* Normally, the new speed is setup after the link has
1183 * gone down and up again. In some cases, link will not go
1184 * down so we need to set up the new speed here.
1186 if (bmsr
& BMSR_LSTATUS
) {
1187 bp
->line_speed
= bp
->req_line_speed
;
1188 bp
->duplex
= bp
->req_duplex
;
1189 bnx2_resolve_flow_ctrl(bp
);
1190 bnx2_set_mac_link(bp
);
1197 bnx2_setup_phy(struct bnx2
*bp
)
1199 if (bp
->loopback
== MAC_LOOPBACK
)
1202 if (bp
->phy_flags
& PHY_SERDES_FLAG
) {
1203 return (bnx2_setup_serdes_phy(bp
));
1206 return (bnx2_setup_copper_phy(bp
));
1211 bnx2_init_5708s_phy(struct bnx2
*bp
)
1215 bnx2_write_phy(bp
, BCM5708S_BLK_ADDR
, BCM5708S_BLK_ADDR_DIG3
);
1216 bnx2_write_phy(bp
, BCM5708S_DIG_3_0
, BCM5708S_DIG_3_0_USE_IEEE
);
1217 bnx2_write_phy(bp
, BCM5708S_BLK_ADDR
, BCM5708S_BLK_ADDR_DIG
);
1219 bnx2_read_phy(bp
, BCM5708S_1000X_CTL1
, &val
);
1220 val
|= BCM5708S_1000X_CTL1_FIBER_MODE
| BCM5708S_1000X_CTL1_AUTODET_EN
;
1221 bnx2_write_phy(bp
, BCM5708S_1000X_CTL1
, val
);
1223 bnx2_read_phy(bp
, BCM5708S_1000X_CTL2
, &val
);
1224 val
|= BCM5708S_1000X_CTL2_PLLEL_DET_EN
;
1225 bnx2_write_phy(bp
, BCM5708S_1000X_CTL2
, val
);
1227 if (bp
->phy_flags
& PHY_2_5G_CAPABLE_FLAG
) {
1228 bnx2_read_phy(bp
, BCM5708S_UP1
, &val
);
1229 val
|= BCM5708S_UP1_2G5
;
1230 bnx2_write_phy(bp
, BCM5708S_UP1
, val
);
1233 if ((CHIP_ID(bp
) == CHIP_ID_5708_A0
) ||
1234 (CHIP_ID(bp
) == CHIP_ID_5708_B0
) ||
1235 (CHIP_ID(bp
) == CHIP_ID_5708_B1
)) {
1236 /* increase tx signal amplitude */
1237 bnx2_write_phy(bp
, BCM5708S_BLK_ADDR
,
1238 BCM5708S_BLK_ADDR_TX_MISC
);
1239 bnx2_read_phy(bp
, BCM5708S_TX_ACTL1
, &val
);
1240 val
&= ~BCM5708S_TX_ACTL1_DRIVER_VCM
;
1241 bnx2_write_phy(bp
, BCM5708S_TX_ACTL1
, val
);
1242 bnx2_write_phy(bp
, BCM5708S_BLK_ADDR
, BCM5708S_BLK_ADDR_DIG
);
1245 val
= REG_RD_IND(bp
, bp
->shmem_base
+ BNX2_PORT_HW_CFG_CONFIG
) &
1246 BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK
;
1251 is_backplane
= REG_RD_IND(bp
, bp
->shmem_base
+
1252 BNX2_SHARED_HW_CFG_CONFIG
);
1253 if (is_backplane
& BNX2_SHARED_HW_CFG_PHY_BACKPLANE
) {
1254 bnx2_write_phy(bp
, BCM5708S_BLK_ADDR
,
1255 BCM5708S_BLK_ADDR_TX_MISC
);
1256 bnx2_write_phy(bp
, BCM5708S_TX_ACTL3
, val
);
1257 bnx2_write_phy(bp
, BCM5708S_BLK_ADDR
,
1258 BCM5708S_BLK_ADDR_DIG
);
1265 bnx2_init_5706s_phy(struct bnx2
*bp
)
1267 bp
->phy_flags
&= ~PHY_PARALLEL_DETECT_FLAG
;
1269 if (CHIP_NUM(bp
) == CHIP_NUM_5706
) {
1270 REG_WR(bp
, BNX2_MISC_UNUSED0
, 0x300);
1273 if (bp
->dev
->mtu
> 1500) {
1276 /* Set extended packet length bit */
1277 bnx2_write_phy(bp
, 0x18, 0x7);
1278 bnx2_read_phy(bp
, 0x18, &val
);
1279 bnx2_write_phy(bp
, 0x18, (val
& 0xfff8) | 0x4000);
1281 bnx2_write_phy(bp
, 0x1c, 0x6c00);
1282 bnx2_read_phy(bp
, 0x1c, &val
);
1283 bnx2_write_phy(bp
, 0x1c, (val
& 0x3ff) | 0xec02);
1288 bnx2_write_phy(bp
, 0x18, 0x7);
1289 bnx2_read_phy(bp
, 0x18, &val
);
1290 bnx2_write_phy(bp
, 0x18, val
& ~0x4007);
1292 bnx2_write_phy(bp
, 0x1c, 0x6c00);
1293 bnx2_read_phy(bp
, 0x1c, &val
);
1294 bnx2_write_phy(bp
, 0x1c, (val
& 0x3fd) | 0xec00);
1301 bnx2_init_copper_phy(struct bnx2
*bp
)
1305 bp
->phy_flags
|= PHY_CRC_FIX_FLAG
;
1307 if (bp
->phy_flags
& PHY_CRC_FIX_FLAG
) {
1308 bnx2_write_phy(bp
, 0x18, 0x0c00);
1309 bnx2_write_phy(bp
, 0x17, 0x000a);
1310 bnx2_write_phy(bp
, 0x15, 0x310b);
1311 bnx2_write_phy(bp
, 0x17, 0x201f);
1312 bnx2_write_phy(bp
, 0x15, 0x9506);
1313 bnx2_write_phy(bp
, 0x17, 0x401f);
1314 bnx2_write_phy(bp
, 0x15, 0x14e2);
1315 bnx2_write_phy(bp
, 0x18, 0x0400);
1318 if (bp
->dev
->mtu
> 1500) {
1319 /* Set extended packet length bit */
1320 bnx2_write_phy(bp
, 0x18, 0x7);
1321 bnx2_read_phy(bp
, 0x18, &val
);
1322 bnx2_write_phy(bp
, 0x18, val
| 0x4000);
1324 bnx2_read_phy(bp
, 0x10, &val
);
1325 bnx2_write_phy(bp
, 0x10, val
| 0x1);
1328 bnx2_write_phy(bp
, 0x18, 0x7);
1329 bnx2_read_phy(bp
, 0x18, &val
);
1330 bnx2_write_phy(bp
, 0x18, val
& ~0x4007);
1332 bnx2_read_phy(bp
, 0x10, &val
);
1333 bnx2_write_phy(bp
, 0x10, val
& ~0x1);
1336 /* ethernet@wirespeed */
1337 bnx2_write_phy(bp
, 0x18, 0x7007);
1338 bnx2_read_phy(bp
, 0x18, &val
);
1339 bnx2_write_phy(bp
, 0x18, val
| (1 << 15) | (1 << 4));
1345 bnx2_init_phy(struct bnx2
*bp
)
1350 bp
->phy_flags
&= ~PHY_INT_MODE_MASK_FLAG
;
1351 bp
->phy_flags
|= PHY_INT_MODE_LINK_READY_FLAG
;
1353 REG_WR(bp
, BNX2_EMAC_ATTENTION_ENA
, BNX2_EMAC_ATTENTION_ENA_LINK
);
1357 bnx2_read_phy(bp
, MII_PHYSID1
, &val
);
1358 bp
->phy_id
= val
<< 16;
1359 bnx2_read_phy(bp
, MII_PHYSID2
, &val
);
1360 bp
->phy_id
|= val
& 0xffff;
1362 if (bp
->phy_flags
& PHY_SERDES_FLAG
) {
1363 if (CHIP_NUM(bp
) == CHIP_NUM_5706
)
1364 rc
= bnx2_init_5706s_phy(bp
);
1365 else if (CHIP_NUM(bp
) == CHIP_NUM_5708
)
1366 rc
= bnx2_init_5708s_phy(bp
);
1369 rc
= bnx2_init_copper_phy(bp
);
1378 bnx2_set_mac_loopback(struct bnx2
*bp
)
1382 mac_mode
= REG_RD(bp
, BNX2_EMAC_MODE
);
1383 mac_mode
&= ~BNX2_EMAC_MODE_PORT
;
1384 mac_mode
|= BNX2_EMAC_MODE_MAC_LOOP
| BNX2_EMAC_MODE_FORCE_LINK
;
1385 REG_WR(bp
, BNX2_EMAC_MODE
, mac_mode
);
1390 static int bnx2_test_link(struct bnx2
*);
1393 bnx2_set_phy_loopback(struct bnx2
*bp
)
1398 spin_lock_bh(&bp
->phy_lock
);
1399 rc
= bnx2_write_phy(bp
, MII_BMCR
, BMCR_LOOPBACK
| BMCR_FULLDPLX
|
1401 spin_unlock_bh(&bp
->phy_lock
);
1405 for (i
= 0; i
< 10; i
++) {
1406 if (bnx2_test_link(bp
) == 0)
1411 mac_mode
= REG_RD(bp
, BNX2_EMAC_MODE
);
1412 mac_mode
&= ~(BNX2_EMAC_MODE_PORT
| BNX2_EMAC_MODE_HALF_DUPLEX
|
1413 BNX2_EMAC_MODE_MAC_LOOP
| BNX2_EMAC_MODE_FORCE_LINK
|
1414 BNX2_EMAC_MODE_25G
);
1416 mac_mode
|= BNX2_EMAC_MODE_PORT_GMII
;
1417 REG_WR(bp
, BNX2_EMAC_MODE
, mac_mode
);
1423 bnx2_fw_sync(struct bnx2
*bp
, u32 msg_data
, int silent
)
1429 msg_data
|= bp
->fw_wr_seq
;
1431 REG_WR_IND(bp
, bp
->shmem_base
+ BNX2_DRV_MB
, msg_data
);
1433 /* wait for an acknowledgement. */
1434 for (i
= 0; i
< (FW_ACK_TIME_OUT_MS
/ 10); i
++) {
1437 val
= REG_RD_IND(bp
, bp
->shmem_base
+ BNX2_FW_MB
);
1439 if ((val
& BNX2_FW_MSG_ACK
) == (msg_data
& BNX2_DRV_MSG_SEQ
))
1442 if ((msg_data
& BNX2_DRV_MSG_DATA
) == BNX2_DRV_MSG_DATA_WAIT0
)
1445 /* If we timed out, inform the firmware that this is the case. */
1446 if ((val
& BNX2_FW_MSG_ACK
) != (msg_data
& BNX2_DRV_MSG_SEQ
)) {
1448 printk(KERN_ERR PFX
"fw sync timeout, reset code = "
1451 msg_data
&= ~BNX2_DRV_MSG_CODE
;
1452 msg_data
|= BNX2_DRV_MSG_CODE_FW_TIMEOUT
;
1454 REG_WR_IND(bp
, bp
->shmem_base
+ BNX2_DRV_MB
, msg_data
);
1459 if ((val
& BNX2_FW_MSG_STATUS_MASK
) != BNX2_FW_MSG_STATUS_OK
)
1466 bnx2_init_context(struct bnx2
*bp
)
1472 u32 vcid_addr
, pcid_addr
, offset
;
1476 if (CHIP_ID(bp
) == CHIP_ID_5706_A0
) {
1479 vcid_addr
= GET_PCID_ADDR(vcid
);
1481 new_vcid
= 0x60 + (vcid
& 0xf0) + (vcid
& 0x7);
1486 pcid_addr
= GET_PCID_ADDR(new_vcid
);
1489 vcid_addr
= GET_CID_ADDR(vcid
);
1490 pcid_addr
= vcid_addr
;
1493 REG_WR(bp
, BNX2_CTX_VIRT_ADDR
, 0x00);
1494 REG_WR(bp
, BNX2_CTX_PAGE_TBL
, pcid_addr
);
1496 /* Zero out the context. */
1497 for (offset
= 0; offset
< PHY_CTX_SIZE
; offset
+= 4) {
1498 CTX_WR(bp
, 0x00, offset
, 0);
1501 REG_WR(bp
, BNX2_CTX_VIRT_ADDR
, vcid_addr
);
1502 REG_WR(bp
, BNX2_CTX_PAGE_TBL
, pcid_addr
);
1507 bnx2_alloc_bad_rbuf(struct bnx2
*bp
)
1513 good_mbuf
= kmalloc(512 * sizeof(u16
), GFP_KERNEL
);
1514 if (good_mbuf
== NULL
) {
1515 printk(KERN_ERR PFX
"Failed to allocate memory in "
1516 "bnx2_alloc_bad_rbuf\n");
1520 REG_WR(bp
, BNX2_MISC_ENABLE_SET_BITS
,
1521 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE
);
1525 /* Allocate a bunch of mbufs and save the good ones in an array. */
1526 val
= REG_RD_IND(bp
, BNX2_RBUF_STATUS1
);
1527 while (val
& BNX2_RBUF_STATUS1_FREE_COUNT
) {
1528 REG_WR_IND(bp
, BNX2_RBUF_COMMAND
, BNX2_RBUF_COMMAND_ALLOC_REQ
);
1530 val
= REG_RD_IND(bp
, BNX2_RBUF_FW_BUF_ALLOC
);
1532 val
&= BNX2_RBUF_FW_BUF_ALLOC_VALUE
;
1534 /* The addresses with Bit 9 set are bad memory blocks. */
1535 if (!(val
& (1 << 9))) {
1536 good_mbuf
[good_mbuf_cnt
] = (u16
) val
;
1540 val
= REG_RD_IND(bp
, BNX2_RBUF_STATUS1
);
1543 /* Free the good ones back to the mbuf pool thus discarding
1544 * all the bad ones. */
1545 while (good_mbuf_cnt
) {
1548 val
= good_mbuf
[good_mbuf_cnt
];
1549 val
= (val
<< 9) | val
| 1;
1551 REG_WR_IND(bp
, BNX2_RBUF_FW_BUF_FREE
, val
);
1558 bnx2_set_mac_addr(struct bnx2
*bp
)
1561 u8
*mac_addr
= bp
->dev
->dev_addr
;
1563 val
= (mac_addr
[0] << 8) | mac_addr
[1];
1565 REG_WR(bp
, BNX2_EMAC_MAC_MATCH0
, val
);
1567 val
= (mac_addr
[2] << 24) | (mac_addr
[3] << 16) |
1568 (mac_addr
[4] << 8) | mac_addr
[5];
1570 REG_WR(bp
, BNX2_EMAC_MAC_MATCH1
, val
);
1574 bnx2_alloc_rx_skb(struct bnx2
*bp
, u16 index
)
1576 struct sk_buff
*skb
;
1577 struct sw_bd
*rx_buf
= &bp
->rx_buf_ring
[index
];
1579 struct rx_bd
*rxbd
= &bp
->rx_desc_ring
[RX_RING(index
)][RX_IDX(index
)];
1580 unsigned long align
;
1582 skb
= netdev_alloc_skb(bp
->dev
, bp
->rx_buf_size
);
1587 if (unlikely((align
= (unsigned long) skb
->data
& 0x7))) {
1588 skb_reserve(skb
, 8 - align
);
1591 mapping
= pci_map_single(bp
->pdev
, skb
->data
, bp
->rx_buf_use_size
,
1592 PCI_DMA_FROMDEVICE
);
1595 pci_unmap_addr_set(rx_buf
, mapping
, mapping
);
1597 rxbd
->rx_bd_haddr_hi
= (u64
) mapping
>> 32;
1598 rxbd
->rx_bd_haddr_lo
= (u64
) mapping
& 0xffffffff;
1600 bp
->rx_prod_bseq
+= bp
->rx_buf_use_size
;
1606 bnx2_phy_int(struct bnx2
*bp
)
1608 u32 new_link_state
, old_link_state
;
1610 new_link_state
= bp
->status_blk
->status_attn_bits
&
1611 STATUS_ATTN_BITS_LINK_STATE
;
1612 old_link_state
= bp
->status_blk
->status_attn_bits_ack
&
1613 STATUS_ATTN_BITS_LINK_STATE
;
1614 if (new_link_state
!= old_link_state
) {
1615 if (new_link_state
) {
1616 REG_WR(bp
, BNX2_PCICFG_STATUS_BIT_SET_CMD
,
1617 STATUS_ATTN_BITS_LINK_STATE
);
1620 REG_WR(bp
, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD
,
1621 STATUS_ATTN_BITS_LINK_STATE
);
1628 bnx2_tx_int(struct bnx2
*bp
)
1630 struct status_block
*sblk
= bp
->status_blk
;
1631 u16 hw_cons
, sw_cons
, sw_ring_cons
;
1634 hw_cons
= bp
->hw_tx_cons
= sblk
->status_tx_quick_consumer_index0
;
1635 if ((hw_cons
& MAX_TX_DESC_CNT
) == MAX_TX_DESC_CNT
) {
1638 sw_cons
= bp
->tx_cons
;
1640 while (sw_cons
!= hw_cons
) {
1641 struct sw_bd
*tx_buf
;
1642 struct sk_buff
*skb
;
1645 sw_ring_cons
= TX_RING_IDX(sw_cons
);
1647 tx_buf
= &bp
->tx_buf_ring
[sw_ring_cons
];
1650 /* partial BD completions possible with TSO packets */
1651 if (skb_is_gso(skb
)) {
1652 u16 last_idx
, last_ring_idx
;
1654 last_idx
= sw_cons
+
1655 skb_shinfo(skb
)->nr_frags
+ 1;
1656 last_ring_idx
= sw_ring_cons
+
1657 skb_shinfo(skb
)->nr_frags
+ 1;
1658 if (unlikely(last_ring_idx
>= MAX_TX_DESC_CNT
)) {
1661 if (((s16
) ((s16
) last_idx
- (s16
) hw_cons
)) > 0) {
1666 pci_unmap_single(bp
->pdev
, pci_unmap_addr(tx_buf
, mapping
),
1667 skb_headlen(skb
), PCI_DMA_TODEVICE
);
1670 last
= skb_shinfo(skb
)->nr_frags
;
1672 for (i
= 0; i
< last
; i
++) {
1673 sw_cons
= NEXT_TX_BD(sw_cons
);
1675 pci_unmap_page(bp
->pdev
,
1677 &bp
->tx_buf_ring
[TX_RING_IDX(sw_cons
)],
1679 skb_shinfo(skb
)->frags
[i
].size
,
1683 sw_cons
= NEXT_TX_BD(sw_cons
);
1685 tx_free_bd
+= last
+ 1;
1689 hw_cons
= bp
->hw_tx_cons
=
1690 sblk
->status_tx_quick_consumer_index0
;
1692 if ((hw_cons
& MAX_TX_DESC_CNT
) == MAX_TX_DESC_CNT
) {
1697 bp
->tx_cons
= sw_cons
;
1698 /* Need to make the tx_cons update visible to bnx2_start_xmit()
1699 * before checking for netif_queue_stopped(). Without the
1700 * memory barrier, there is a small possibility that bnx2_start_xmit()
1701 * will miss it and cause the queue to be stopped forever.
1705 if (unlikely(netif_queue_stopped(bp
->dev
)) &&
1706 (bnx2_tx_avail(bp
) > bp
->tx_wake_thresh
)) {
1707 netif_tx_lock(bp
->dev
);
1708 if ((netif_queue_stopped(bp
->dev
)) &&
1709 (bnx2_tx_avail(bp
) > bp
->tx_wake_thresh
))
1710 netif_wake_queue(bp
->dev
);
1711 netif_tx_unlock(bp
->dev
);
1716 bnx2_reuse_rx_skb(struct bnx2
*bp
, struct sk_buff
*skb
,
1719 struct sw_bd
*cons_rx_buf
, *prod_rx_buf
;
1720 struct rx_bd
*cons_bd
, *prod_bd
;
1722 cons_rx_buf
= &bp
->rx_buf_ring
[cons
];
1723 prod_rx_buf
= &bp
->rx_buf_ring
[prod
];
1725 pci_dma_sync_single_for_device(bp
->pdev
,
1726 pci_unmap_addr(cons_rx_buf
, mapping
),
1727 bp
->rx_offset
+ RX_COPY_THRESH
, PCI_DMA_FROMDEVICE
);
1729 bp
->rx_prod_bseq
+= bp
->rx_buf_use_size
;
1731 prod_rx_buf
->skb
= skb
;
1736 pci_unmap_addr_set(prod_rx_buf
, mapping
,
1737 pci_unmap_addr(cons_rx_buf
, mapping
));
1739 cons_bd
= &bp
->rx_desc_ring
[RX_RING(cons
)][RX_IDX(cons
)];
1740 prod_bd
= &bp
->rx_desc_ring
[RX_RING(prod
)][RX_IDX(prod
)];
1741 prod_bd
->rx_bd_haddr_hi
= cons_bd
->rx_bd_haddr_hi
;
1742 prod_bd
->rx_bd_haddr_lo
= cons_bd
->rx_bd_haddr_lo
;
1746 bnx2_rx_int(struct bnx2
*bp
, int budget
)
1748 struct status_block
*sblk
= bp
->status_blk
;
1749 u16 hw_cons
, sw_cons
, sw_ring_cons
, sw_prod
, sw_ring_prod
;
1750 struct l2_fhdr
*rx_hdr
;
1753 hw_cons
= bp
->hw_rx_cons
= sblk
->status_rx_quick_consumer_index0
;
1754 if ((hw_cons
& MAX_RX_DESC_CNT
) == MAX_RX_DESC_CNT
) {
1757 sw_cons
= bp
->rx_cons
;
1758 sw_prod
= bp
->rx_prod
;
1760 /* Memory barrier necessary as speculative reads of the rx
1761 * buffer can be ahead of the index in the status block
1764 while (sw_cons
!= hw_cons
) {
1767 struct sw_bd
*rx_buf
;
1768 struct sk_buff
*skb
;
1769 dma_addr_t dma_addr
;
1771 sw_ring_cons
= RX_RING_IDX(sw_cons
);
1772 sw_ring_prod
= RX_RING_IDX(sw_prod
);
1774 rx_buf
= &bp
->rx_buf_ring
[sw_ring_cons
];
1779 dma_addr
= pci_unmap_addr(rx_buf
, mapping
);
1781 pci_dma_sync_single_for_cpu(bp
->pdev
, dma_addr
,
1782 bp
->rx_offset
+ RX_COPY_THRESH
, PCI_DMA_FROMDEVICE
);
1784 rx_hdr
= (struct l2_fhdr
*) skb
->data
;
1785 len
= rx_hdr
->l2_fhdr_pkt_len
- 4;
1787 if ((status
= rx_hdr
->l2_fhdr_status
) &
1788 (L2_FHDR_ERRORS_BAD_CRC
|
1789 L2_FHDR_ERRORS_PHY_DECODE
|
1790 L2_FHDR_ERRORS_ALIGNMENT
|
1791 L2_FHDR_ERRORS_TOO_SHORT
|
1792 L2_FHDR_ERRORS_GIANT_FRAME
)) {
1797 /* Since we don't have a jumbo ring, copy small packets
1800 if ((bp
->dev
->mtu
> 1500) && (len
<= RX_COPY_THRESH
)) {
1801 struct sk_buff
*new_skb
;
1803 new_skb
= netdev_alloc_skb(bp
->dev
, len
+ 2);
1804 if (new_skb
== NULL
)
1808 memcpy(new_skb
->data
,
1809 skb
->data
+ bp
->rx_offset
- 2,
1812 skb_reserve(new_skb
, 2);
1813 skb_put(new_skb
, len
);
1815 bnx2_reuse_rx_skb(bp
, skb
,
1816 sw_ring_cons
, sw_ring_prod
);
1820 else if (bnx2_alloc_rx_skb(bp
, sw_ring_prod
) == 0) {
1821 pci_unmap_single(bp
->pdev
, dma_addr
,
1822 bp
->rx_buf_use_size
, PCI_DMA_FROMDEVICE
);
1824 skb_reserve(skb
, bp
->rx_offset
);
1829 bnx2_reuse_rx_skb(bp
, skb
,
1830 sw_ring_cons
, sw_ring_prod
);
1834 skb
->protocol
= eth_type_trans(skb
, bp
->dev
);
1836 if ((len
> (bp
->dev
->mtu
+ ETH_HLEN
)) &&
1837 (ntohs(skb
->protocol
) != 0x8100)) {
1844 skb
->ip_summed
= CHECKSUM_NONE
;
1846 (status
& (L2_FHDR_STATUS_TCP_SEGMENT
|
1847 L2_FHDR_STATUS_UDP_DATAGRAM
))) {
1849 if (likely((status
& (L2_FHDR_ERRORS_TCP_XSUM
|
1850 L2_FHDR_ERRORS_UDP_XSUM
)) == 0))
1851 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1855 if ((status
& L2_FHDR_STATUS_L2_VLAN_TAG
) && (bp
->vlgrp
!= 0)) {
1856 vlan_hwaccel_receive_skb(skb
, bp
->vlgrp
,
1857 rx_hdr
->l2_fhdr_vlan_tag
);
1861 netif_receive_skb(skb
);
1863 bp
->dev
->last_rx
= jiffies
;
1867 sw_cons
= NEXT_RX_BD(sw_cons
);
1868 sw_prod
= NEXT_RX_BD(sw_prod
);
1870 if ((rx_pkt
== budget
))
1873 /* Refresh hw_cons to see if there is new work */
1874 if (sw_cons
== hw_cons
) {
1875 hw_cons
= bp
->hw_rx_cons
=
1876 sblk
->status_rx_quick_consumer_index0
;
1877 if ((hw_cons
& MAX_RX_DESC_CNT
) == MAX_RX_DESC_CNT
)
1882 bp
->rx_cons
= sw_cons
;
1883 bp
->rx_prod
= sw_prod
;
1885 REG_WR16(bp
, MB_RX_CID_ADDR
+ BNX2_L2CTX_HOST_BDIDX
, sw_prod
);
1887 REG_WR(bp
, MB_RX_CID_ADDR
+ BNX2_L2CTX_HOST_BSEQ
, bp
->rx_prod_bseq
);
1895 /* MSI ISR - The only difference between this and the INTx ISR
1896 * is that the MSI interrupt is always serviced.
1899 bnx2_msi(int irq
, void *dev_instance
)
1901 struct net_device
*dev
= dev_instance
;
1902 struct bnx2
*bp
= netdev_priv(dev
);
1904 prefetch(bp
->status_blk
);
1905 REG_WR(bp
, BNX2_PCICFG_INT_ACK_CMD
,
1906 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM
|
1907 BNX2_PCICFG_INT_ACK_CMD_MASK_INT
);
1909 /* Return here if interrupt is disabled. */
1910 if (unlikely(atomic_read(&bp
->intr_sem
) != 0))
1913 netif_rx_schedule(dev
);
1919 bnx2_interrupt(int irq
, void *dev_instance
)
1921 struct net_device
*dev
= dev_instance
;
1922 struct bnx2
*bp
= netdev_priv(dev
);
1924 /* When using INTx, it is possible for the interrupt to arrive
1925 * at the CPU before the status block posted prior to the
1926 * interrupt. Reading a register will flush the status block.
1927 * When using MSI, the MSI message will always complete after
1928 * the status block write.
1930 if ((bp
->status_blk
->status_idx
== bp
->last_status_idx
) &&
1931 (REG_RD(bp
, BNX2_PCICFG_MISC_STATUS
) &
1932 BNX2_PCICFG_MISC_STATUS_INTA_VALUE
))
1935 REG_WR(bp
, BNX2_PCICFG_INT_ACK_CMD
,
1936 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM
|
1937 BNX2_PCICFG_INT_ACK_CMD_MASK_INT
);
1939 /* Return here if interrupt is shared and is disabled. */
1940 if (unlikely(atomic_read(&bp
->intr_sem
) != 0))
1943 netif_rx_schedule(dev
);
1949 bnx2_has_work(struct bnx2
*bp
)
1951 struct status_block
*sblk
= bp
->status_blk
;
1953 if ((sblk
->status_rx_quick_consumer_index0
!= bp
->hw_rx_cons
) ||
1954 (sblk
->status_tx_quick_consumer_index0
!= bp
->hw_tx_cons
))
1957 if (((sblk
->status_attn_bits
& STATUS_ATTN_BITS_LINK_STATE
) != 0) !=
1965 bnx2_poll(struct net_device
*dev
, int *budget
)
1967 struct bnx2
*bp
= netdev_priv(dev
);
1969 if ((bp
->status_blk
->status_attn_bits
&
1970 STATUS_ATTN_BITS_LINK_STATE
) !=
1971 (bp
->status_blk
->status_attn_bits_ack
&
1972 STATUS_ATTN_BITS_LINK_STATE
)) {
1974 spin_lock(&bp
->phy_lock
);
1976 spin_unlock(&bp
->phy_lock
);
1978 /* This is needed to take care of transient status
1979 * during link changes.
1981 REG_WR(bp
, BNX2_HC_COMMAND
,
1982 bp
->hc_cmd
| BNX2_HC_COMMAND_COAL_NOW_WO_INT
);
1983 REG_RD(bp
, BNX2_HC_COMMAND
);
1986 if (bp
->status_blk
->status_tx_quick_consumer_index0
!= bp
->hw_tx_cons
)
1989 if (bp
->status_blk
->status_rx_quick_consumer_index0
!= bp
->hw_rx_cons
) {
1990 int orig_budget
= *budget
;
1993 if (orig_budget
> dev
->quota
)
1994 orig_budget
= dev
->quota
;
1996 work_done
= bnx2_rx_int(bp
, orig_budget
);
1997 *budget
-= work_done
;
1998 dev
->quota
-= work_done
;
2001 bp
->last_status_idx
= bp
->status_blk
->status_idx
;
2004 if (!bnx2_has_work(bp
)) {
2005 netif_rx_complete(dev
);
2006 if (likely(bp
->flags
& USING_MSI_FLAG
)) {
2007 REG_WR(bp
, BNX2_PCICFG_INT_ACK_CMD
,
2008 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID
|
2009 bp
->last_status_idx
);
2012 REG_WR(bp
, BNX2_PCICFG_INT_ACK_CMD
,
2013 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID
|
2014 BNX2_PCICFG_INT_ACK_CMD_MASK_INT
|
2015 bp
->last_status_idx
);
2017 REG_WR(bp
, BNX2_PCICFG_INT_ACK_CMD
,
2018 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID
|
2019 bp
->last_status_idx
);
2026 /* Called with rtnl_lock from vlan functions and also netif_tx_lock
2027 * from set_multicast.
2030 bnx2_set_rx_mode(struct net_device
*dev
)
2032 struct bnx2
*bp
= netdev_priv(dev
);
2033 u32 rx_mode
, sort_mode
;
2036 spin_lock_bh(&bp
->phy_lock
);
2038 rx_mode
= bp
->rx_mode
& ~(BNX2_EMAC_RX_MODE_PROMISCUOUS
|
2039 BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG
);
2040 sort_mode
= 1 | BNX2_RPM_SORT_USER0_BC_EN
;
2042 if (!bp
->vlgrp
&& !(bp
->flags
& ASF_ENABLE_FLAG
))
2043 rx_mode
|= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG
;
2045 if (!(bp
->flags
& ASF_ENABLE_FLAG
))
2046 rx_mode
|= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG
;
2048 if (dev
->flags
& IFF_PROMISC
) {
2049 /* Promiscuous mode. */
2050 rx_mode
|= BNX2_EMAC_RX_MODE_PROMISCUOUS
;
2051 sort_mode
|= BNX2_RPM_SORT_USER0_PROM_EN
|
2052 BNX2_RPM_SORT_USER0_PROM_VLAN
;
2054 else if (dev
->flags
& IFF_ALLMULTI
) {
2055 for (i
= 0; i
< NUM_MC_HASH_REGISTERS
; i
++) {
2056 REG_WR(bp
, BNX2_EMAC_MULTICAST_HASH0
+ (i
* 4),
2059 sort_mode
|= BNX2_RPM_SORT_USER0_MC_EN
;
2062 /* Accept one or more multicast(s). */
2063 struct dev_mc_list
*mclist
;
2064 u32 mc_filter
[NUM_MC_HASH_REGISTERS
];
2069 memset(mc_filter
, 0, 4 * NUM_MC_HASH_REGISTERS
);
2071 for (i
= 0, mclist
= dev
->mc_list
; mclist
&& i
< dev
->mc_count
;
2072 i
++, mclist
= mclist
->next
) {
2074 crc
= ether_crc_le(ETH_ALEN
, mclist
->dmi_addr
);
2076 regidx
= (bit
& 0xe0) >> 5;
2078 mc_filter
[regidx
] |= (1 << bit
);
2081 for (i
= 0; i
< NUM_MC_HASH_REGISTERS
; i
++) {
2082 REG_WR(bp
, BNX2_EMAC_MULTICAST_HASH0
+ (i
* 4),
2086 sort_mode
|= BNX2_RPM_SORT_USER0_MC_HSH_EN
;
2089 if (rx_mode
!= bp
->rx_mode
) {
2090 bp
->rx_mode
= rx_mode
;
2091 REG_WR(bp
, BNX2_EMAC_RX_MODE
, rx_mode
);
2094 REG_WR(bp
, BNX2_RPM_SORT_USER0
, 0x0);
2095 REG_WR(bp
, BNX2_RPM_SORT_USER0
, sort_mode
);
2096 REG_WR(bp
, BNX2_RPM_SORT_USER0
, sort_mode
| BNX2_RPM_SORT_USER0_ENA
);
2098 spin_unlock_bh(&bp
->phy_lock
);
2101 #define FW_BUF_SIZE 0x8000
2104 bnx2_gunzip_init(struct bnx2
*bp
)
2106 if ((bp
->gunzip_buf
= vmalloc(FW_BUF_SIZE
)) == NULL
)
2109 if ((bp
->strm
= kmalloc(sizeof(*bp
->strm
), GFP_KERNEL
)) == NULL
)
2112 bp
->strm
->workspace
= kmalloc(zlib_inflate_workspacesize(), GFP_KERNEL
);
2113 if (bp
->strm
->workspace
== NULL
)
2123 vfree(bp
->gunzip_buf
);
2124 bp
->gunzip_buf
= NULL
;
2127 printk(KERN_ERR PFX
"%s: Cannot allocate firmware buffer for "
2128 "uncompression.\n", bp
->dev
->name
);
2133 bnx2_gunzip_end(struct bnx2
*bp
)
2135 kfree(bp
->strm
->workspace
);
2140 if (bp
->gunzip_buf
) {
2141 vfree(bp
->gunzip_buf
);
2142 bp
->gunzip_buf
= NULL
;
2147 bnx2_gunzip(struct bnx2
*bp
, u8
*zbuf
, int len
, void **outbuf
, int *outlen
)
2151 /* check gzip header */
2152 if ((zbuf
[0] != 0x1f) || (zbuf
[1] != 0x8b) || (zbuf
[2] != Z_DEFLATED
))
2158 if (zbuf
[3] & FNAME
)
2159 while ((zbuf
[n
++] != 0) && (n
< len
));
2161 bp
->strm
->next_in
= zbuf
+ n
;
2162 bp
->strm
->avail_in
= len
- n
;
2163 bp
->strm
->next_out
= bp
->gunzip_buf
;
2164 bp
->strm
->avail_out
= FW_BUF_SIZE
;
2166 rc
= zlib_inflateInit2(bp
->strm
, -MAX_WBITS
);
2170 rc
= zlib_inflate(bp
->strm
, Z_FINISH
);
2172 *outlen
= FW_BUF_SIZE
- bp
->strm
->avail_out
;
2173 *outbuf
= bp
->gunzip_buf
;
2175 if ((rc
!= Z_OK
) && (rc
!= Z_STREAM_END
))
2176 printk(KERN_ERR PFX
"%s: Firmware decompression error: %s\n",
2177 bp
->dev
->name
, bp
->strm
->msg
);
2179 zlib_inflateEnd(bp
->strm
);
2181 if (rc
== Z_STREAM_END
)
2188 load_rv2p_fw(struct bnx2
*bp
, u32
*rv2p_code
, u32 rv2p_code_len
,
2195 for (i
= 0; i
< rv2p_code_len
; i
+= 8) {
2196 REG_WR(bp
, BNX2_RV2P_INSTR_HIGH
, cpu_to_le32(*rv2p_code
));
2198 REG_WR(bp
, BNX2_RV2P_INSTR_LOW
, cpu_to_le32(*rv2p_code
));
2201 if (rv2p_proc
== RV2P_PROC1
) {
2202 val
= (i
/ 8) | BNX2_RV2P_PROC1_ADDR_CMD_RDWR
;
2203 REG_WR(bp
, BNX2_RV2P_PROC1_ADDR_CMD
, val
);
2206 val
= (i
/ 8) | BNX2_RV2P_PROC2_ADDR_CMD_RDWR
;
2207 REG_WR(bp
, BNX2_RV2P_PROC2_ADDR_CMD
, val
);
2211 /* Reset the processor, un-stall is done later. */
2212 if (rv2p_proc
== RV2P_PROC1
) {
2213 REG_WR(bp
, BNX2_RV2P_COMMAND
, BNX2_RV2P_COMMAND_PROC1_RESET
);
2216 REG_WR(bp
, BNX2_RV2P_COMMAND
, BNX2_RV2P_COMMAND_PROC2_RESET
);
2221 load_cpu_fw(struct bnx2
*bp
, struct cpu_reg
*cpu_reg
, struct fw_info
*fw
)
2227 val
= REG_RD_IND(bp
, cpu_reg
->mode
);
2228 val
|= cpu_reg
->mode_value_halt
;
2229 REG_WR_IND(bp
, cpu_reg
->mode
, val
);
2230 REG_WR_IND(bp
, cpu_reg
->state
, cpu_reg
->state_value_clear
);
2232 /* Load the Text area. */
2233 offset
= cpu_reg
->spad_base
+ (fw
->text_addr
- cpu_reg
->mips_view_base
);
2237 for (j
= 0; j
< (fw
->text_len
/ 4); j
++, offset
+= 4) {
2238 REG_WR_IND(bp
, offset
, cpu_to_le32(fw
->text
[j
]));
2242 /* Load the Data area. */
2243 offset
= cpu_reg
->spad_base
+ (fw
->data_addr
- cpu_reg
->mips_view_base
);
2247 for (j
= 0; j
< (fw
->data_len
/ 4); j
++, offset
+= 4) {
2248 REG_WR_IND(bp
, offset
, fw
->data
[j
]);
2252 /* Load the SBSS area. */
2253 offset
= cpu_reg
->spad_base
+ (fw
->sbss_addr
- cpu_reg
->mips_view_base
);
2257 for (j
= 0; j
< (fw
->sbss_len
/ 4); j
++, offset
+= 4) {
2258 REG_WR_IND(bp
, offset
, fw
->sbss
[j
]);
2262 /* Load the BSS area. */
2263 offset
= cpu_reg
->spad_base
+ (fw
->bss_addr
- cpu_reg
->mips_view_base
);
2267 for (j
= 0; j
< (fw
->bss_len
/4); j
++, offset
+= 4) {
2268 REG_WR_IND(bp
, offset
, fw
->bss
[j
]);
2272 /* Load the Read-Only area. */
2273 offset
= cpu_reg
->spad_base
+
2274 (fw
->rodata_addr
- cpu_reg
->mips_view_base
);
2278 for (j
= 0; j
< (fw
->rodata_len
/ 4); j
++, offset
+= 4) {
2279 REG_WR_IND(bp
, offset
, fw
->rodata
[j
]);
2283 /* Clear the pre-fetch instruction. */
2284 REG_WR_IND(bp
, cpu_reg
->inst
, 0);
2285 REG_WR_IND(bp
, cpu_reg
->pc
, fw
->start_addr
);
2287 /* Start the CPU. */
2288 val
= REG_RD_IND(bp
, cpu_reg
->mode
);
2289 val
&= ~cpu_reg
->mode_value_halt
;
2290 REG_WR_IND(bp
, cpu_reg
->state
, cpu_reg
->state_value_clear
);
2291 REG_WR_IND(bp
, cpu_reg
->mode
, val
);
2295 bnx2_init_cpus(struct bnx2
*bp
)
2297 struct cpu_reg cpu_reg
;
2303 if ((rc
= bnx2_gunzip_init(bp
)) != 0)
2306 /* Initialize the RV2P processor. */
2307 rc
= bnx2_gunzip(bp
, bnx2_rv2p_proc1
, sizeof(bnx2_rv2p_proc1
), &text
,
2312 load_rv2p_fw(bp
, text
, text_len
, RV2P_PROC1
);
2314 rc
= bnx2_gunzip(bp
, bnx2_rv2p_proc2
, sizeof(bnx2_rv2p_proc2
), &text
,
2319 load_rv2p_fw(bp
, text
, text_len
, RV2P_PROC2
);
2321 /* Initialize the RX Processor. */
2322 cpu_reg
.mode
= BNX2_RXP_CPU_MODE
;
2323 cpu_reg
.mode_value_halt
= BNX2_RXP_CPU_MODE_SOFT_HALT
;
2324 cpu_reg
.mode_value_sstep
= BNX2_RXP_CPU_MODE_STEP_ENA
;
2325 cpu_reg
.state
= BNX2_RXP_CPU_STATE
;
2326 cpu_reg
.state_value_clear
= 0xffffff;
2327 cpu_reg
.gpr0
= BNX2_RXP_CPU_REG_FILE
;
2328 cpu_reg
.evmask
= BNX2_RXP_CPU_EVENT_MASK
;
2329 cpu_reg
.pc
= BNX2_RXP_CPU_PROGRAM_COUNTER
;
2330 cpu_reg
.inst
= BNX2_RXP_CPU_INSTRUCTION
;
2331 cpu_reg
.bp
= BNX2_RXP_CPU_HW_BREAKPOINT
;
2332 cpu_reg
.spad_base
= BNX2_RXP_SCRATCH
;
2333 cpu_reg
.mips_view_base
= 0x8000000;
2335 fw
.ver_major
= bnx2_RXP_b06FwReleaseMajor
;
2336 fw
.ver_minor
= bnx2_RXP_b06FwReleaseMinor
;
2337 fw
.ver_fix
= bnx2_RXP_b06FwReleaseFix
;
2338 fw
.start_addr
= bnx2_RXP_b06FwStartAddr
;
2340 fw
.text_addr
= bnx2_RXP_b06FwTextAddr
;
2341 fw
.text_len
= bnx2_RXP_b06FwTextLen
;
2344 rc
= bnx2_gunzip(bp
, bnx2_RXP_b06FwText
, sizeof(bnx2_RXP_b06FwText
),
2351 fw
.data_addr
= bnx2_RXP_b06FwDataAddr
;
2352 fw
.data_len
= bnx2_RXP_b06FwDataLen
;
2354 fw
.data
= bnx2_RXP_b06FwData
;
2356 fw
.sbss_addr
= bnx2_RXP_b06FwSbssAddr
;
2357 fw
.sbss_len
= bnx2_RXP_b06FwSbssLen
;
2359 fw
.sbss
= bnx2_RXP_b06FwSbss
;
2361 fw
.bss_addr
= bnx2_RXP_b06FwBssAddr
;
2362 fw
.bss_len
= bnx2_RXP_b06FwBssLen
;
2364 fw
.bss
= bnx2_RXP_b06FwBss
;
2366 fw
.rodata_addr
= bnx2_RXP_b06FwRodataAddr
;
2367 fw
.rodata_len
= bnx2_RXP_b06FwRodataLen
;
2368 fw
.rodata_index
= 0;
2369 fw
.rodata
= bnx2_RXP_b06FwRodata
;
2371 load_cpu_fw(bp
, &cpu_reg
, &fw
);
2373 /* Initialize the TX Processor. */
2374 cpu_reg
.mode
= BNX2_TXP_CPU_MODE
;
2375 cpu_reg
.mode_value_halt
= BNX2_TXP_CPU_MODE_SOFT_HALT
;
2376 cpu_reg
.mode_value_sstep
= BNX2_TXP_CPU_MODE_STEP_ENA
;
2377 cpu_reg
.state
= BNX2_TXP_CPU_STATE
;
2378 cpu_reg
.state_value_clear
= 0xffffff;
2379 cpu_reg
.gpr0
= BNX2_TXP_CPU_REG_FILE
;
2380 cpu_reg
.evmask
= BNX2_TXP_CPU_EVENT_MASK
;
2381 cpu_reg
.pc
= BNX2_TXP_CPU_PROGRAM_COUNTER
;
2382 cpu_reg
.inst
= BNX2_TXP_CPU_INSTRUCTION
;
2383 cpu_reg
.bp
= BNX2_TXP_CPU_HW_BREAKPOINT
;
2384 cpu_reg
.spad_base
= BNX2_TXP_SCRATCH
;
2385 cpu_reg
.mips_view_base
= 0x8000000;
2387 fw
.ver_major
= bnx2_TXP_b06FwReleaseMajor
;
2388 fw
.ver_minor
= bnx2_TXP_b06FwReleaseMinor
;
2389 fw
.ver_fix
= bnx2_TXP_b06FwReleaseFix
;
2390 fw
.start_addr
= bnx2_TXP_b06FwStartAddr
;
2392 fw
.text_addr
= bnx2_TXP_b06FwTextAddr
;
2393 fw
.text_len
= bnx2_TXP_b06FwTextLen
;
2396 rc
= bnx2_gunzip(bp
, bnx2_TXP_b06FwText
, sizeof(bnx2_TXP_b06FwText
),
2403 fw
.data_addr
= bnx2_TXP_b06FwDataAddr
;
2404 fw
.data_len
= bnx2_TXP_b06FwDataLen
;
2406 fw
.data
= bnx2_TXP_b06FwData
;
2408 fw
.sbss_addr
= bnx2_TXP_b06FwSbssAddr
;
2409 fw
.sbss_len
= bnx2_TXP_b06FwSbssLen
;
2411 fw
.sbss
= bnx2_TXP_b06FwSbss
;
2413 fw
.bss_addr
= bnx2_TXP_b06FwBssAddr
;
2414 fw
.bss_len
= bnx2_TXP_b06FwBssLen
;
2416 fw
.bss
= bnx2_TXP_b06FwBss
;
2418 fw
.rodata_addr
= bnx2_TXP_b06FwRodataAddr
;
2419 fw
.rodata_len
= bnx2_TXP_b06FwRodataLen
;
2420 fw
.rodata_index
= 0;
2421 fw
.rodata
= bnx2_TXP_b06FwRodata
;
2423 load_cpu_fw(bp
, &cpu_reg
, &fw
);
2425 /* Initialize the TX Patch-up Processor. */
2426 cpu_reg
.mode
= BNX2_TPAT_CPU_MODE
;
2427 cpu_reg
.mode_value_halt
= BNX2_TPAT_CPU_MODE_SOFT_HALT
;
2428 cpu_reg
.mode_value_sstep
= BNX2_TPAT_CPU_MODE_STEP_ENA
;
2429 cpu_reg
.state
= BNX2_TPAT_CPU_STATE
;
2430 cpu_reg
.state_value_clear
= 0xffffff;
2431 cpu_reg
.gpr0
= BNX2_TPAT_CPU_REG_FILE
;
2432 cpu_reg
.evmask
= BNX2_TPAT_CPU_EVENT_MASK
;
2433 cpu_reg
.pc
= BNX2_TPAT_CPU_PROGRAM_COUNTER
;
2434 cpu_reg
.inst
= BNX2_TPAT_CPU_INSTRUCTION
;
2435 cpu_reg
.bp
= BNX2_TPAT_CPU_HW_BREAKPOINT
;
2436 cpu_reg
.spad_base
= BNX2_TPAT_SCRATCH
;
2437 cpu_reg
.mips_view_base
= 0x8000000;
2439 fw
.ver_major
= bnx2_TPAT_b06FwReleaseMajor
;
2440 fw
.ver_minor
= bnx2_TPAT_b06FwReleaseMinor
;
2441 fw
.ver_fix
= bnx2_TPAT_b06FwReleaseFix
;
2442 fw
.start_addr
= bnx2_TPAT_b06FwStartAddr
;
2444 fw
.text_addr
= bnx2_TPAT_b06FwTextAddr
;
2445 fw
.text_len
= bnx2_TPAT_b06FwTextLen
;
2448 rc
= bnx2_gunzip(bp
, bnx2_TPAT_b06FwText
, sizeof(bnx2_TPAT_b06FwText
),
2455 fw
.data_addr
= bnx2_TPAT_b06FwDataAddr
;
2456 fw
.data_len
= bnx2_TPAT_b06FwDataLen
;
2458 fw
.data
= bnx2_TPAT_b06FwData
;
2460 fw
.sbss_addr
= bnx2_TPAT_b06FwSbssAddr
;
2461 fw
.sbss_len
= bnx2_TPAT_b06FwSbssLen
;
2463 fw
.sbss
= bnx2_TPAT_b06FwSbss
;
2465 fw
.bss_addr
= bnx2_TPAT_b06FwBssAddr
;
2466 fw
.bss_len
= bnx2_TPAT_b06FwBssLen
;
2468 fw
.bss
= bnx2_TPAT_b06FwBss
;
2470 fw
.rodata_addr
= bnx2_TPAT_b06FwRodataAddr
;
2471 fw
.rodata_len
= bnx2_TPAT_b06FwRodataLen
;
2472 fw
.rodata_index
= 0;
2473 fw
.rodata
= bnx2_TPAT_b06FwRodata
;
2475 load_cpu_fw(bp
, &cpu_reg
, &fw
);
2477 /* Initialize the Completion Processor. */
2478 cpu_reg
.mode
= BNX2_COM_CPU_MODE
;
2479 cpu_reg
.mode_value_halt
= BNX2_COM_CPU_MODE_SOFT_HALT
;
2480 cpu_reg
.mode_value_sstep
= BNX2_COM_CPU_MODE_STEP_ENA
;
2481 cpu_reg
.state
= BNX2_COM_CPU_STATE
;
2482 cpu_reg
.state_value_clear
= 0xffffff;
2483 cpu_reg
.gpr0
= BNX2_COM_CPU_REG_FILE
;
2484 cpu_reg
.evmask
= BNX2_COM_CPU_EVENT_MASK
;
2485 cpu_reg
.pc
= BNX2_COM_CPU_PROGRAM_COUNTER
;
2486 cpu_reg
.inst
= BNX2_COM_CPU_INSTRUCTION
;
2487 cpu_reg
.bp
= BNX2_COM_CPU_HW_BREAKPOINT
;
2488 cpu_reg
.spad_base
= BNX2_COM_SCRATCH
;
2489 cpu_reg
.mips_view_base
= 0x8000000;
2491 fw
.ver_major
= bnx2_COM_b06FwReleaseMajor
;
2492 fw
.ver_minor
= bnx2_COM_b06FwReleaseMinor
;
2493 fw
.ver_fix
= bnx2_COM_b06FwReleaseFix
;
2494 fw
.start_addr
= bnx2_COM_b06FwStartAddr
;
2496 fw
.text_addr
= bnx2_COM_b06FwTextAddr
;
2497 fw
.text_len
= bnx2_COM_b06FwTextLen
;
2500 rc
= bnx2_gunzip(bp
, bnx2_COM_b06FwText
, sizeof(bnx2_COM_b06FwText
),
2507 fw
.data_addr
= bnx2_COM_b06FwDataAddr
;
2508 fw
.data_len
= bnx2_COM_b06FwDataLen
;
2510 fw
.data
= bnx2_COM_b06FwData
;
2512 fw
.sbss_addr
= bnx2_COM_b06FwSbssAddr
;
2513 fw
.sbss_len
= bnx2_COM_b06FwSbssLen
;
2515 fw
.sbss
= bnx2_COM_b06FwSbss
;
2517 fw
.bss_addr
= bnx2_COM_b06FwBssAddr
;
2518 fw
.bss_len
= bnx2_COM_b06FwBssLen
;
2520 fw
.bss
= bnx2_COM_b06FwBss
;
2522 fw
.rodata_addr
= bnx2_COM_b06FwRodataAddr
;
2523 fw
.rodata_len
= bnx2_COM_b06FwRodataLen
;
2524 fw
.rodata_index
= 0;
2525 fw
.rodata
= bnx2_COM_b06FwRodata
;
2527 load_cpu_fw(bp
, &cpu_reg
, &fw
);
2530 bnx2_gunzip_end(bp
);
2535 bnx2_set_power_state(struct bnx2
*bp
, pci_power_t state
)
2539 pci_read_config_word(bp
->pdev
, bp
->pm_cap
+ PCI_PM_CTRL
, &pmcsr
);
2545 pci_write_config_word(bp
->pdev
, bp
->pm_cap
+ PCI_PM_CTRL
,
2546 (pmcsr
& ~PCI_PM_CTRL_STATE_MASK
) |
2547 PCI_PM_CTRL_PME_STATUS
);
2549 if (pmcsr
& PCI_PM_CTRL_STATE_MASK
)
2550 /* delay required during transition out of D3hot */
2553 val
= REG_RD(bp
, BNX2_EMAC_MODE
);
2554 val
|= BNX2_EMAC_MODE_MPKT_RCVD
| BNX2_EMAC_MODE_ACPI_RCVD
;
2555 val
&= ~BNX2_EMAC_MODE_MPKT
;
2556 REG_WR(bp
, BNX2_EMAC_MODE
, val
);
2558 val
= REG_RD(bp
, BNX2_RPM_CONFIG
);
2559 val
&= ~BNX2_RPM_CONFIG_ACPI_ENA
;
2560 REG_WR(bp
, BNX2_RPM_CONFIG
, val
);
2571 autoneg
= bp
->autoneg
;
2572 advertising
= bp
->advertising
;
2574 bp
->autoneg
= AUTONEG_SPEED
;
2575 bp
->advertising
= ADVERTISED_10baseT_Half
|
2576 ADVERTISED_10baseT_Full
|
2577 ADVERTISED_100baseT_Half
|
2578 ADVERTISED_100baseT_Full
|
2581 bnx2_setup_copper_phy(bp
);
2583 bp
->autoneg
= autoneg
;
2584 bp
->advertising
= advertising
;
2586 bnx2_set_mac_addr(bp
);
2588 val
= REG_RD(bp
, BNX2_EMAC_MODE
);
2590 /* Enable port mode. */
2591 val
&= ~BNX2_EMAC_MODE_PORT
;
2592 val
|= BNX2_EMAC_MODE_PORT_MII
|
2593 BNX2_EMAC_MODE_MPKT_RCVD
|
2594 BNX2_EMAC_MODE_ACPI_RCVD
|
2595 BNX2_EMAC_MODE_MPKT
;
2597 REG_WR(bp
, BNX2_EMAC_MODE
, val
);
2599 /* receive all multicast */
2600 for (i
= 0; i
< NUM_MC_HASH_REGISTERS
; i
++) {
2601 REG_WR(bp
, BNX2_EMAC_MULTICAST_HASH0
+ (i
* 4),
2604 REG_WR(bp
, BNX2_EMAC_RX_MODE
,
2605 BNX2_EMAC_RX_MODE_SORT_MODE
);
2607 val
= 1 | BNX2_RPM_SORT_USER0_BC_EN
|
2608 BNX2_RPM_SORT_USER0_MC_EN
;
2609 REG_WR(bp
, BNX2_RPM_SORT_USER0
, 0x0);
2610 REG_WR(bp
, BNX2_RPM_SORT_USER0
, val
);
2611 REG_WR(bp
, BNX2_RPM_SORT_USER0
, val
|
2612 BNX2_RPM_SORT_USER0_ENA
);
2614 /* Need to enable EMAC and RPM for WOL. */
2615 REG_WR(bp
, BNX2_MISC_ENABLE_SET_BITS
,
2616 BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE
|
2617 BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE
|
2618 BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE
);
2620 val
= REG_RD(bp
, BNX2_RPM_CONFIG
);
2621 val
&= ~BNX2_RPM_CONFIG_ACPI_ENA
;
2622 REG_WR(bp
, BNX2_RPM_CONFIG
, val
);
2624 wol_msg
= BNX2_DRV_MSG_CODE_SUSPEND_WOL
;
2627 wol_msg
= BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL
;
2630 if (!(bp
->flags
& NO_WOL_FLAG
))
2631 bnx2_fw_sync(bp
, BNX2_DRV_MSG_DATA_WAIT3
| wol_msg
, 0);
2633 pmcsr
&= ~PCI_PM_CTRL_STATE_MASK
;
2634 if ((CHIP_ID(bp
) == CHIP_ID_5706_A0
) ||
2635 (CHIP_ID(bp
) == CHIP_ID_5706_A1
)) {
2644 pmcsr
|= PCI_PM_CTRL_PME_ENABLE
;
2646 pci_write_config_word(bp
->pdev
, bp
->pm_cap
+ PCI_PM_CTRL
,
2649 /* No more memory access after this point until
2650 * device is brought back to D0.
2662 bnx2_acquire_nvram_lock(struct bnx2
*bp
)
2667 /* Request access to the flash interface. */
2668 REG_WR(bp
, BNX2_NVM_SW_ARB
, BNX2_NVM_SW_ARB_ARB_REQ_SET2
);
2669 for (j
= 0; j
< NVRAM_TIMEOUT_COUNT
; j
++) {
2670 val
= REG_RD(bp
, BNX2_NVM_SW_ARB
);
2671 if (val
& BNX2_NVM_SW_ARB_ARB_ARB2
)
2677 if (j
>= NVRAM_TIMEOUT_COUNT
)
2684 bnx2_release_nvram_lock(struct bnx2
*bp
)
2689 /* Relinquish nvram interface. */
2690 REG_WR(bp
, BNX2_NVM_SW_ARB
, BNX2_NVM_SW_ARB_ARB_REQ_CLR2
);
2692 for (j
= 0; j
< NVRAM_TIMEOUT_COUNT
; j
++) {
2693 val
= REG_RD(bp
, BNX2_NVM_SW_ARB
);
2694 if (!(val
& BNX2_NVM_SW_ARB_ARB_ARB2
))
2700 if (j
>= NVRAM_TIMEOUT_COUNT
)
2708 bnx2_enable_nvram_write(struct bnx2
*bp
)
2712 val
= REG_RD(bp
, BNX2_MISC_CFG
);
2713 REG_WR(bp
, BNX2_MISC_CFG
, val
| BNX2_MISC_CFG_NVM_WR_EN_PCI
);
2715 if (!bp
->flash_info
->buffered
) {
2718 REG_WR(bp
, BNX2_NVM_COMMAND
, BNX2_NVM_COMMAND_DONE
);
2719 REG_WR(bp
, BNX2_NVM_COMMAND
,
2720 BNX2_NVM_COMMAND_WREN
| BNX2_NVM_COMMAND_DOIT
);
2722 for (j
= 0; j
< NVRAM_TIMEOUT_COUNT
; j
++) {
2725 val
= REG_RD(bp
, BNX2_NVM_COMMAND
);
2726 if (val
& BNX2_NVM_COMMAND_DONE
)
2730 if (j
>= NVRAM_TIMEOUT_COUNT
)
2737 bnx2_disable_nvram_write(struct bnx2
*bp
)
2741 val
= REG_RD(bp
, BNX2_MISC_CFG
);
2742 REG_WR(bp
, BNX2_MISC_CFG
, val
& ~BNX2_MISC_CFG_NVM_WR_EN
);
2747 bnx2_enable_nvram_access(struct bnx2
*bp
)
2751 val
= REG_RD(bp
, BNX2_NVM_ACCESS_ENABLE
);
2752 /* Enable both bits, even on read. */
2753 REG_WR(bp
, BNX2_NVM_ACCESS_ENABLE
,
2754 val
| BNX2_NVM_ACCESS_ENABLE_EN
| BNX2_NVM_ACCESS_ENABLE_WR_EN
);
2758 bnx2_disable_nvram_access(struct bnx2
*bp
)
2762 val
= REG_RD(bp
, BNX2_NVM_ACCESS_ENABLE
);
2763 /* Disable both bits, even after read. */
2764 REG_WR(bp
, BNX2_NVM_ACCESS_ENABLE
,
2765 val
& ~(BNX2_NVM_ACCESS_ENABLE_EN
|
2766 BNX2_NVM_ACCESS_ENABLE_WR_EN
));
2770 bnx2_nvram_erase_page(struct bnx2
*bp
, u32 offset
)
2775 if (bp
->flash_info
->buffered
)
2776 /* Buffered flash, no erase needed */
2779 /* Build an erase command */
2780 cmd
= BNX2_NVM_COMMAND_ERASE
| BNX2_NVM_COMMAND_WR
|
2781 BNX2_NVM_COMMAND_DOIT
;
2783 /* Need to clear DONE bit separately. */
2784 REG_WR(bp
, BNX2_NVM_COMMAND
, BNX2_NVM_COMMAND_DONE
);
2786 /* Address of the NVRAM to read from. */
2787 REG_WR(bp
, BNX2_NVM_ADDR
, offset
& BNX2_NVM_ADDR_NVM_ADDR_VALUE
);
2789 /* Issue an erase command. */
2790 REG_WR(bp
, BNX2_NVM_COMMAND
, cmd
);
2792 /* Wait for completion. */
2793 for (j
= 0; j
< NVRAM_TIMEOUT_COUNT
; j
++) {
2798 val
= REG_RD(bp
, BNX2_NVM_COMMAND
);
2799 if (val
& BNX2_NVM_COMMAND_DONE
)
2803 if (j
>= NVRAM_TIMEOUT_COUNT
)
2810 bnx2_nvram_read_dword(struct bnx2
*bp
, u32 offset
, u8
*ret_val
, u32 cmd_flags
)
2815 /* Build the command word. */
2816 cmd
= BNX2_NVM_COMMAND_DOIT
| cmd_flags
;
2818 /* Calculate an offset of a buffered flash. */
2819 if (bp
->flash_info
->buffered
) {
2820 offset
= ((offset
/ bp
->flash_info
->page_size
) <<
2821 bp
->flash_info
->page_bits
) +
2822 (offset
% bp
->flash_info
->page_size
);
2825 /* Need to clear DONE bit separately. */
2826 REG_WR(bp
, BNX2_NVM_COMMAND
, BNX2_NVM_COMMAND_DONE
);
2828 /* Address of the NVRAM to read from. */
2829 REG_WR(bp
, BNX2_NVM_ADDR
, offset
& BNX2_NVM_ADDR_NVM_ADDR_VALUE
);
2831 /* Issue a read command. */
2832 REG_WR(bp
, BNX2_NVM_COMMAND
, cmd
);
2834 /* Wait for completion. */
2835 for (j
= 0; j
< NVRAM_TIMEOUT_COUNT
; j
++) {
2840 val
= REG_RD(bp
, BNX2_NVM_COMMAND
);
2841 if (val
& BNX2_NVM_COMMAND_DONE
) {
2842 val
= REG_RD(bp
, BNX2_NVM_READ
);
2844 val
= be32_to_cpu(val
);
2845 memcpy(ret_val
, &val
, 4);
2849 if (j
>= NVRAM_TIMEOUT_COUNT
)
2857 bnx2_nvram_write_dword(struct bnx2
*bp
, u32 offset
, u8
*val
, u32 cmd_flags
)
2862 /* Build the command word. */
2863 cmd
= BNX2_NVM_COMMAND_DOIT
| BNX2_NVM_COMMAND_WR
| cmd_flags
;
2865 /* Calculate an offset of a buffered flash. */
2866 if (bp
->flash_info
->buffered
) {
2867 offset
= ((offset
/ bp
->flash_info
->page_size
) <<
2868 bp
->flash_info
->page_bits
) +
2869 (offset
% bp
->flash_info
->page_size
);
2872 /* Need to clear DONE bit separately. */
2873 REG_WR(bp
, BNX2_NVM_COMMAND
, BNX2_NVM_COMMAND_DONE
);
2875 memcpy(&val32
, val
, 4);
2876 val32
= cpu_to_be32(val32
);
2878 /* Write the data. */
2879 REG_WR(bp
, BNX2_NVM_WRITE
, val32
);
2881 /* Address of the NVRAM to write to. */
2882 REG_WR(bp
, BNX2_NVM_ADDR
, offset
& BNX2_NVM_ADDR_NVM_ADDR_VALUE
);
2884 /* Issue the write command. */
2885 REG_WR(bp
, BNX2_NVM_COMMAND
, cmd
);
2887 /* Wait for completion. */
2888 for (j
= 0; j
< NVRAM_TIMEOUT_COUNT
; j
++) {
2891 if (REG_RD(bp
, BNX2_NVM_COMMAND
) & BNX2_NVM_COMMAND_DONE
)
2894 if (j
>= NVRAM_TIMEOUT_COUNT
)
2901 bnx2_init_nvram(struct bnx2
*bp
)
2904 int j
, entry_count
, rc
;
2905 struct flash_spec
*flash
;
2907 /* Determine the selected interface. */
2908 val
= REG_RD(bp
, BNX2_NVM_CFG1
);
2910 entry_count
= sizeof(flash_table
) / sizeof(struct flash_spec
);
2913 if (val
& 0x40000000) {
2915 /* Flash interface has been reconfigured */
2916 for (j
= 0, flash
= &flash_table
[0]; j
< entry_count
;
2918 if ((val
& FLASH_BACKUP_STRAP_MASK
) ==
2919 (flash
->config1
& FLASH_BACKUP_STRAP_MASK
)) {
2920 bp
->flash_info
= flash
;
2927 /* Not yet been reconfigured */
2929 if (val
& (1 << 23))
2930 mask
= FLASH_BACKUP_STRAP_MASK
;
2932 mask
= FLASH_STRAP_MASK
;
2934 for (j
= 0, flash
= &flash_table
[0]; j
< entry_count
;
2937 if ((val
& mask
) == (flash
->strapping
& mask
)) {
2938 bp
->flash_info
= flash
;
2940 /* Request access to the flash interface. */
2941 if ((rc
= bnx2_acquire_nvram_lock(bp
)) != 0)
2944 /* Enable access to flash interface */
2945 bnx2_enable_nvram_access(bp
);
2947 /* Reconfigure the flash interface */
2948 REG_WR(bp
, BNX2_NVM_CFG1
, flash
->config1
);
2949 REG_WR(bp
, BNX2_NVM_CFG2
, flash
->config2
);
2950 REG_WR(bp
, BNX2_NVM_CFG3
, flash
->config3
);
2951 REG_WR(bp
, BNX2_NVM_WRITE1
, flash
->write1
);
2953 /* Disable access to flash interface */
2954 bnx2_disable_nvram_access(bp
);
2955 bnx2_release_nvram_lock(bp
);
2960 } /* if (val & 0x40000000) */
2962 if (j
== entry_count
) {
2963 bp
->flash_info
= NULL
;
2964 printk(KERN_ALERT PFX
"Unknown flash/EEPROM type.\n");
2968 val
= REG_RD_IND(bp
, bp
->shmem_base
+ BNX2_SHARED_HW_CFG_CONFIG2
);
2969 val
&= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK
;
2971 bp
->flash_size
= val
;
2973 bp
->flash_size
= bp
->flash_info
->total_size
;
2979 bnx2_nvram_read(struct bnx2
*bp
, u32 offset
, u8
*ret_buf
,
2983 u32 cmd_flags
, offset32
, len32
, extra
;
2988 /* Request access to the flash interface. */
2989 if ((rc
= bnx2_acquire_nvram_lock(bp
)) != 0)
2992 /* Enable access to flash interface */
2993 bnx2_enable_nvram_access(bp
);
3006 pre_len
= 4 - (offset
& 3);
3008 if (pre_len
>= len32
) {
3010 cmd_flags
= BNX2_NVM_COMMAND_FIRST
|
3011 BNX2_NVM_COMMAND_LAST
;
3014 cmd_flags
= BNX2_NVM_COMMAND_FIRST
;
3017 rc
= bnx2_nvram_read_dword(bp
, offset32
, buf
, cmd_flags
);
3022 memcpy(ret_buf
, buf
+ (offset
& 3), pre_len
);
3029 extra
= 4 - (len32
& 3);
3030 len32
= (len32
+ 4) & ~3;
3037 cmd_flags
= BNX2_NVM_COMMAND_LAST
;
3039 cmd_flags
= BNX2_NVM_COMMAND_FIRST
|
3040 BNX2_NVM_COMMAND_LAST
;
3042 rc
= bnx2_nvram_read_dword(bp
, offset32
, buf
, cmd_flags
);
3044 memcpy(ret_buf
, buf
, 4 - extra
);
3046 else if (len32
> 0) {
3049 /* Read the first word. */
3053 cmd_flags
= BNX2_NVM_COMMAND_FIRST
;
3055 rc
= bnx2_nvram_read_dword(bp
, offset32
, ret_buf
, cmd_flags
);
3057 /* Advance to the next dword. */
3062 while (len32
> 4 && rc
== 0) {
3063 rc
= bnx2_nvram_read_dword(bp
, offset32
, ret_buf
, 0);
3065 /* Advance to the next dword. */
3074 cmd_flags
= BNX2_NVM_COMMAND_LAST
;
3075 rc
= bnx2_nvram_read_dword(bp
, offset32
, buf
, cmd_flags
);
3077 memcpy(ret_buf
, buf
, 4 - extra
);
3080 /* Disable access to flash interface */
3081 bnx2_disable_nvram_access(bp
);
3083 bnx2_release_nvram_lock(bp
);
3089 bnx2_nvram_write(struct bnx2
*bp
, u32 offset
, u8
*data_buf
,
3092 u32 written
, offset32
, len32
;
3093 u8
*buf
, start
[4], end
[4], *flash_buffer
= NULL
;
3095 int align_start
, align_end
;
3100 align_start
= align_end
= 0;
3102 if ((align_start
= (offset32
& 3))) {
3104 len32
+= align_start
;
3105 if ((rc
= bnx2_nvram_read(bp
, offset32
, start
, 4)))
3110 if ((len32
> 4) || !align_start
) {
3111 align_end
= 4 - (len32
& 3);
3113 if ((rc
= bnx2_nvram_read(bp
, offset32
+ len32
- 4,
3120 if (align_start
|| align_end
) {
3121 buf
= kmalloc(len32
, GFP_KERNEL
);
3125 memcpy(buf
, start
, 4);
3128 memcpy(buf
+ len32
- 4, end
, 4);
3130 memcpy(buf
+ align_start
, data_buf
, buf_size
);
3133 if (bp
->flash_info
->buffered
== 0) {
3134 flash_buffer
= kmalloc(264, GFP_KERNEL
);
3135 if (flash_buffer
== NULL
) {
3137 goto nvram_write_end
;
3142 while ((written
< len32
) && (rc
== 0)) {
3143 u32 page_start
, page_end
, data_start
, data_end
;
3144 u32 addr
, cmd_flags
;
3147 /* Find the page_start addr */
3148 page_start
= offset32
+ written
;
3149 page_start
-= (page_start
% bp
->flash_info
->page_size
);
3150 /* Find the page_end addr */
3151 page_end
= page_start
+ bp
->flash_info
->page_size
;
3152 /* Find the data_start addr */
3153 data_start
= (written
== 0) ? offset32
: page_start
;
3154 /* Find the data_end addr */
3155 data_end
= (page_end
> offset32
+ len32
) ?
3156 (offset32
+ len32
) : page_end
;
3158 /* Request access to the flash interface. */
3159 if ((rc
= bnx2_acquire_nvram_lock(bp
)) != 0)
3160 goto nvram_write_end
;
3162 /* Enable access to flash interface */
3163 bnx2_enable_nvram_access(bp
);
3165 cmd_flags
= BNX2_NVM_COMMAND_FIRST
;
3166 if (bp
->flash_info
->buffered
== 0) {
3169 /* Read the whole page into the buffer
3170 * (non-buffer flash only) */
3171 for (j
= 0; j
< bp
->flash_info
->page_size
; j
+= 4) {
3172 if (j
== (bp
->flash_info
->page_size
- 4)) {
3173 cmd_flags
|= BNX2_NVM_COMMAND_LAST
;
3175 rc
= bnx2_nvram_read_dword(bp
,
3181 goto nvram_write_end
;
3187 /* Enable writes to flash interface (unlock write-protect) */
3188 if ((rc
= bnx2_enable_nvram_write(bp
)) != 0)
3189 goto nvram_write_end
;
3191 /* Erase the page */
3192 if ((rc
= bnx2_nvram_erase_page(bp
, page_start
)) != 0)
3193 goto nvram_write_end
;
3195 /* Re-enable the write again for the actual write */
3196 bnx2_enable_nvram_write(bp
);
3198 /* Loop to write back the buffer data from page_start to
3201 if (bp
->flash_info
->buffered
== 0) {
3202 for (addr
= page_start
; addr
< data_start
;
3203 addr
+= 4, i
+= 4) {
3205 rc
= bnx2_nvram_write_dword(bp
, addr
,
3206 &flash_buffer
[i
], cmd_flags
);
3209 goto nvram_write_end
;
3215 /* Loop to write the new data from data_start to data_end */
3216 for (addr
= data_start
; addr
< data_end
; addr
+= 4, i
+= 4) {
3217 if ((addr
== page_end
- 4) ||
3218 ((bp
->flash_info
->buffered
) &&
3219 (addr
== data_end
- 4))) {
3221 cmd_flags
|= BNX2_NVM_COMMAND_LAST
;
3223 rc
= bnx2_nvram_write_dword(bp
, addr
, buf
,
3227 goto nvram_write_end
;
3233 /* Loop to write back the buffer data from data_end
3235 if (bp
->flash_info
->buffered
== 0) {
3236 for (addr
= data_end
; addr
< page_end
;
3237 addr
+= 4, i
+= 4) {
3239 if (addr
== page_end
-4) {
3240 cmd_flags
= BNX2_NVM_COMMAND_LAST
;
3242 rc
= bnx2_nvram_write_dword(bp
, addr
,
3243 &flash_buffer
[i
], cmd_flags
);
3246 goto nvram_write_end
;
3252 /* Disable writes to flash interface (lock write-protect) */
3253 bnx2_disable_nvram_write(bp
);
3255 /* Disable access to flash interface */
3256 bnx2_disable_nvram_access(bp
);
3257 bnx2_release_nvram_lock(bp
);
3259 /* Increment written */
3260 written
+= data_end
- data_start
;
3264 if (bp
->flash_info
->buffered
== 0)
3265 kfree(flash_buffer
);
3267 if (align_start
|| align_end
)
3273 bnx2_reset_chip(struct bnx2
*bp
, u32 reset_code
)
3278 /* Wait for the current PCI transaction to complete before
3279 * issuing a reset. */
3280 REG_WR(bp
, BNX2_MISC_ENABLE_CLR_BITS
,
3281 BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE
|
3282 BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE
|
3283 BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE
|
3284 BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE
);
3285 val
= REG_RD(bp
, BNX2_MISC_ENABLE_CLR_BITS
);
3288 /* Wait for the firmware to tell us it is ok to issue a reset. */
3289 bnx2_fw_sync(bp
, BNX2_DRV_MSG_DATA_WAIT0
| reset_code
, 1);
3291 /* Deposit a driver reset signature so the firmware knows that
3292 * this is a soft reset. */
3293 REG_WR_IND(bp
, bp
->shmem_base
+ BNX2_DRV_RESET_SIGNATURE
,
3294 BNX2_DRV_RESET_SIGNATURE_MAGIC
);
3296 /* Do a dummy read to force the chip to complete all current transaction
3297 * before we issue a reset. */
3298 val
= REG_RD(bp
, BNX2_MISC_ID
);
3300 val
= BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ
|
3301 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA
|
3302 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP
;
3305 REG_WR(bp
, BNX2_PCICFG_MISC_CONFIG
, val
);
3307 if ((CHIP_ID(bp
) == CHIP_ID_5706_A0
) ||
3308 (CHIP_ID(bp
) == CHIP_ID_5706_A1
))
3311 /* Reset takes approximate 30 usec */
3312 for (i
= 0; i
< 10; i
++) {
3313 val
= REG_RD(bp
, BNX2_PCICFG_MISC_CONFIG
);
3314 if ((val
& (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ
|
3315 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY
)) == 0) {
3321 if (val
& (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ
|
3322 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY
)) {
3323 printk(KERN_ERR PFX
"Chip reset did not complete\n");
3327 /* Make sure byte swapping is properly configured. */
3328 val
= REG_RD(bp
, BNX2_PCI_SWAP_DIAG0
);
3329 if (val
!= 0x01020304) {
3330 printk(KERN_ERR PFX
"Chip not in correct endian mode\n");
3334 /* Wait for the firmware to finish its initialization. */
3335 rc
= bnx2_fw_sync(bp
, BNX2_DRV_MSG_DATA_WAIT1
| reset_code
, 0);
3339 if (CHIP_ID(bp
) == CHIP_ID_5706_A0
) {
3340 /* Adjust the voltage regular to two steps lower. The default
3341 * of this register is 0x0000000e. */
3342 REG_WR(bp
, BNX2_MISC_VREG_CONTROL
, 0x000000fa);
3344 /* Remove bad rbuf memory from the free pool. */
3345 rc
= bnx2_alloc_bad_rbuf(bp
);
3352 bnx2_init_chip(struct bnx2
*bp
)
3357 /* Make sure the interrupt is not active. */
3358 REG_WR(bp
, BNX2_PCICFG_INT_ACK_CMD
, BNX2_PCICFG_INT_ACK_CMD_MASK_INT
);
3360 val
= BNX2_DMA_CONFIG_DATA_BYTE_SWAP
|
3361 BNX2_DMA_CONFIG_DATA_WORD_SWAP
|
3363 BNX2_DMA_CONFIG_CNTL_BYTE_SWAP
|
3365 BNX2_DMA_CONFIG_CNTL_WORD_SWAP
|
3366 DMA_READ_CHANS
<< 12 |
3367 DMA_WRITE_CHANS
<< 16;
3369 val
|= (0x2 << 20) | (1 << 11);
3371 if ((bp
->flags
& PCIX_FLAG
) && (bp
->bus_speed_mhz
== 133))
3374 if ((CHIP_NUM(bp
) == CHIP_NUM_5706
) &&
3375 (CHIP_ID(bp
) != CHIP_ID_5706_A0
) && !(bp
->flags
& PCIX_FLAG
))
3376 val
|= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA
;
3378 REG_WR(bp
, BNX2_DMA_CONFIG
, val
);
3380 if (CHIP_ID(bp
) == CHIP_ID_5706_A0
) {
3381 val
= REG_RD(bp
, BNX2_TDMA_CONFIG
);
3382 val
|= BNX2_TDMA_CONFIG_ONE_DMA
;
3383 REG_WR(bp
, BNX2_TDMA_CONFIG
, val
);
3386 if (bp
->flags
& PCIX_FLAG
) {
3389 pci_read_config_word(bp
->pdev
, bp
->pcix_cap
+ PCI_X_CMD
,
3391 pci_write_config_word(bp
->pdev
, bp
->pcix_cap
+ PCI_X_CMD
,
3392 val16
& ~PCI_X_CMD_ERO
);
3395 REG_WR(bp
, BNX2_MISC_ENABLE_SET_BITS
,
3396 BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE
|
3397 BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE
|
3398 BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE
);
3400 /* Initialize context mapping and zero out the quick contexts. The
3401 * context block must have already been enabled. */
3402 bnx2_init_context(bp
);
3404 if ((rc
= bnx2_init_cpus(bp
)) != 0)
3407 bnx2_init_nvram(bp
);
3409 bnx2_set_mac_addr(bp
);
3411 val
= REG_RD(bp
, BNX2_MQ_CONFIG
);
3412 val
&= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE
;
3413 val
|= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256
;
3414 REG_WR(bp
, BNX2_MQ_CONFIG
, val
);
3416 val
= 0x10000 + (MAX_CID_CNT
* MB_KERNEL_CTX_SIZE
);
3417 REG_WR(bp
, BNX2_MQ_KNL_BYP_WIND_START
, val
);
3418 REG_WR(bp
, BNX2_MQ_KNL_WIND_END
, val
);
3420 val
= (BCM_PAGE_BITS
- 8) << 24;
3421 REG_WR(bp
, BNX2_RV2P_CONFIG
, val
);
3423 /* Configure page size. */
3424 val
= REG_RD(bp
, BNX2_TBDR_CONFIG
);
3425 val
&= ~BNX2_TBDR_CONFIG_PAGE_SIZE
;
3426 val
|= (BCM_PAGE_BITS
- 8) << 24 | 0x40;
3427 REG_WR(bp
, BNX2_TBDR_CONFIG
, val
);
3429 val
= bp
->mac_addr
[0] +
3430 (bp
->mac_addr
[1] << 8) +
3431 (bp
->mac_addr
[2] << 16) +
3433 (bp
->mac_addr
[4] << 8) +
3434 (bp
->mac_addr
[5] << 16);
3435 REG_WR(bp
, BNX2_EMAC_BACKOFF_SEED
, val
);
3437 /* Program the MTU. Also include 4 bytes for CRC32. */
3438 val
= bp
->dev
->mtu
+ ETH_HLEN
+ 4;
3439 if (val
> (MAX_ETHERNET_PACKET_SIZE
+ 4))
3440 val
|= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA
;
3441 REG_WR(bp
, BNX2_EMAC_RX_MTU_SIZE
, val
);
3443 bp
->last_status_idx
= 0;
3444 bp
->rx_mode
= BNX2_EMAC_RX_MODE_SORT_MODE
;
3446 /* Set up how to generate a link change interrupt. */
3447 REG_WR(bp
, BNX2_EMAC_ATTENTION_ENA
, BNX2_EMAC_ATTENTION_ENA_LINK
);
3449 REG_WR(bp
, BNX2_HC_STATUS_ADDR_L
,
3450 (u64
) bp
->status_blk_mapping
& 0xffffffff);
3451 REG_WR(bp
, BNX2_HC_STATUS_ADDR_H
, (u64
) bp
->status_blk_mapping
>> 32);
3453 REG_WR(bp
, BNX2_HC_STATISTICS_ADDR_L
,
3454 (u64
) bp
->stats_blk_mapping
& 0xffffffff);
3455 REG_WR(bp
, BNX2_HC_STATISTICS_ADDR_H
,
3456 (u64
) bp
->stats_blk_mapping
>> 32);
3458 REG_WR(bp
, BNX2_HC_TX_QUICK_CONS_TRIP
,
3459 (bp
->tx_quick_cons_trip_int
<< 16) | bp
->tx_quick_cons_trip
);
3461 REG_WR(bp
, BNX2_HC_RX_QUICK_CONS_TRIP
,
3462 (bp
->rx_quick_cons_trip_int
<< 16) | bp
->rx_quick_cons_trip
);
3464 REG_WR(bp
, BNX2_HC_COMP_PROD_TRIP
,
3465 (bp
->comp_prod_trip_int
<< 16) | bp
->comp_prod_trip
);
3467 REG_WR(bp
, BNX2_HC_TX_TICKS
, (bp
->tx_ticks_int
<< 16) | bp
->tx_ticks
);
3469 REG_WR(bp
, BNX2_HC_RX_TICKS
, (bp
->rx_ticks_int
<< 16) | bp
->rx_ticks
);
3471 REG_WR(bp
, BNX2_HC_COM_TICKS
,
3472 (bp
->com_ticks_int
<< 16) | bp
->com_ticks
);
3474 REG_WR(bp
, BNX2_HC_CMD_TICKS
,
3475 (bp
->cmd_ticks_int
<< 16) | bp
->cmd_ticks
);
3477 REG_WR(bp
, BNX2_HC_STATS_TICKS
, bp
->stats_ticks
& 0xffff00);
3478 REG_WR(bp
, BNX2_HC_STAT_COLLECT_TICKS
, 0xbb8); /* 3ms */
3480 if (CHIP_ID(bp
) == CHIP_ID_5706_A1
)
3481 REG_WR(bp
, BNX2_HC_CONFIG
, BNX2_HC_CONFIG_COLLECT_STATS
);
3483 REG_WR(bp
, BNX2_HC_CONFIG
, BNX2_HC_CONFIG_RX_TMR_MODE
|
3484 BNX2_HC_CONFIG_TX_TMR_MODE
|
3485 BNX2_HC_CONFIG_COLLECT_STATS
);
3488 /* Clear internal stats counters. */
3489 REG_WR(bp
, BNX2_HC_COMMAND
, BNX2_HC_COMMAND_CLR_STAT_NOW
);
3491 REG_WR(bp
, BNX2_HC_ATTN_BITS_ENABLE
, STATUS_ATTN_BITS_LINK_STATE
);
3493 if (REG_RD_IND(bp
, bp
->shmem_base
+ BNX2_PORT_FEATURE
) &
3494 BNX2_PORT_FEATURE_ASF_ENABLED
)
3495 bp
->flags
|= ASF_ENABLE_FLAG
;
3497 /* Initialize the receive filter. */
3498 bnx2_set_rx_mode(bp
->dev
);
3500 rc
= bnx2_fw_sync(bp
, BNX2_DRV_MSG_DATA_WAIT2
| BNX2_DRV_MSG_CODE_RESET
,
3503 REG_WR(bp
, BNX2_MISC_ENABLE_SET_BITS
, 0x5ffffff);
3504 REG_RD(bp
, BNX2_MISC_ENABLE_SET_BITS
);
3508 bp
->hc_cmd
= REG_RD(bp
, BNX2_HC_COMMAND
);
3515 bnx2_init_tx_ring(struct bnx2
*bp
)
3520 bp
->tx_wake_thresh
= bp
->tx_ring_size
/ 2;
3522 txbd
= &bp
->tx_desc_ring
[MAX_TX_DESC_CNT
];
3524 txbd
->tx_bd_haddr_hi
= (u64
) bp
->tx_desc_mapping
>> 32;
3525 txbd
->tx_bd_haddr_lo
= (u64
) bp
->tx_desc_mapping
& 0xffffffff;
3530 bp
->tx_prod_bseq
= 0;
3532 val
= BNX2_L2CTX_TYPE_TYPE_L2
;
3533 val
|= BNX2_L2CTX_TYPE_SIZE_L2
;
3534 CTX_WR(bp
, GET_CID_ADDR(TX_CID
), BNX2_L2CTX_TYPE
, val
);
3536 val
= BNX2_L2CTX_CMD_TYPE_TYPE_L2
;
3538 CTX_WR(bp
, GET_CID_ADDR(TX_CID
), BNX2_L2CTX_CMD_TYPE
, val
);
3540 val
= (u64
) bp
->tx_desc_mapping
>> 32;
3541 CTX_WR(bp
, GET_CID_ADDR(TX_CID
), BNX2_L2CTX_TBDR_BHADDR_HI
, val
);
3543 val
= (u64
) bp
->tx_desc_mapping
& 0xffffffff;
3544 CTX_WR(bp
, GET_CID_ADDR(TX_CID
), BNX2_L2CTX_TBDR_BHADDR_LO
, val
);
3548 bnx2_init_rx_ring(struct bnx2
*bp
)
3552 u16 prod
, ring_prod
;
3555 /* 8 for CRC and VLAN */
3556 bp
->rx_buf_use_size
= bp
->dev
->mtu
+ ETH_HLEN
+ bp
->rx_offset
+ 8;
3557 /* 8 for alignment */
3558 bp
->rx_buf_size
= bp
->rx_buf_use_size
+ 8;
3560 ring_prod
= prod
= bp
->rx_prod
= 0;
3563 bp
->rx_prod_bseq
= 0;
3565 for (i
= 0; i
< bp
->rx_max_ring
; i
++) {
3568 rxbd
= &bp
->rx_desc_ring
[i
][0];
3569 for (j
= 0; j
< MAX_RX_DESC_CNT
; j
++, rxbd
++) {
3570 rxbd
->rx_bd_len
= bp
->rx_buf_use_size
;
3571 rxbd
->rx_bd_flags
= RX_BD_FLAGS_START
| RX_BD_FLAGS_END
;
3573 if (i
== (bp
->rx_max_ring
- 1))
3577 rxbd
->rx_bd_haddr_hi
= (u64
) bp
->rx_desc_mapping
[j
] >> 32;
3578 rxbd
->rx_bd_haddr_lo
= (u64
) bp
->rx_desc_mapping
[j
] &
3582 val
= BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE
;
3583 val
|= BNX2_L2CTX_CTX_TYPE_SIZE_L2
;
3585 CTX_WR(bp
, GET_CID_ADDR(RX_CID
), BNX2_L2CTX_CTX_TYPE
, val
);
3587 val
= (u64
) bp
->rx_desc_mapping
[0] >> 32;
3588 CTX_WR(bp
, GET_CID_ADDR(RX_CID
), BNX2_L2CTX_NX_BDHADDR_HI
, val
);
3590 val
= (u64
) bp
->rx_desc_mapping
[0] & 0xffffffff;
3591 CTX_WR(bp
, GET_CID_ADDR(RX_CID
), BNX2_L2CTX_NX_BDHADDR_LO
, val
);
3593 for (i
= 0; i
< bp
->rx_ring_size
; i
++) {
3594 if (bnx2_alloc_rx_skb(bp
, ring_prod
) < 0) {
3597 prod
= NEXT_RX_BD(prod
);
3598 ring_prod
= RX_RING_IDX(prod
);
3602 REG_WR16(bp
, MB_RX_CID_ADDR
+ BNX2_L2CTX_HOST_BDIDX
, prod
);
3604 REG_WR(bp
, MB_RX_CID_ADDR
+ BNX2_L2CTX_HOST_BSEQ
, bp
->rx_prod_bseq
);
3608 bnx2_set_rx_ring_size(struct bnx2
*bp
, u32 size
)
3612 bp
->rx_ring_size
= size
;
3614 while (size
> MAX_RX_DESC_CNT
) {
3615 size
-= MAX_RX_DESC_CNT
;
3618 /* round to next power of 2 */
3620 while ((max
& num_rings
) == 0)
3623 if (num_rings
!= max
)
3626 bp
->rx_max_ring
= max
;
3627 bp
->rx_max_ring_idx
= (bp
->rx_max_ring
* RX_DESC_CNT
) - 1;
3631 bnx2_free_tx_skbs(struct bnx2
*bp
)
3635 if (bp
->tx_buf_ring
== NULL
)
3638 for (i
= 0; i
< TX_DESC_CNT
; ) {
3639 struct sw_bd
*tx_buf
= &bp
->tx_buf_ring
[i
];
3640 struct sk_buff
*skb
= tx_buf
->skb
;
3648 pci_unmap_single(bp
->pdev
, pci_unmap_addr(tx_buf
, mapping
),
3649 skb_headlen(skb
), PCI_DMA_TODEVICE
);
3653 last
= skb_shinfo(skb
)->nr_frags
;
3654 for (j
= 0; j
< last
; j
++) {
3655 tx_buf
= &bp
->tx_buf_ring
[i
+ j
+ 1];
3656 pci_unmap_page(bp
->pdev
,
3657 pci_unmap_addr(tx_buf
, mapping
),
3658 skb_shinfo(skb
)->frags
[j
].size
,
3668 bnx2_free_rx_skbs(struct bnx2
*bp
)
3672 if (bp
->rx_buf_ring
== NULL
)
3675 for (i
= 0; i
< bp
->rx_max_ring_idx
; i
++) {
3676 struct sw_bd
*rx_buf
= &bp
->rx_buf_ring
[i
];
3677 struct sk_buff
*skb
= rx_buf
->skb
;
3682 pci_unmap_single(bp
->pdev
, pci_unmap_addr(rx_buf
, mapping
),
3683 bp
->rx_buf_use_size
, PCI_DMA_FROMDEVICE
);
3692 bnx2_free_skbs(struct bnx2
*bp
)
3694 bnx2_free_tx_skbs(bp
);
3695 bnx2_free_rx_skbs(bp
);
3699 bnx2_reset_nic(struct bnx2
*bp
, u32 reset_code
)
3703 rc
= bnx2_reset_chip(bp
, reset_code
);
3708 if ((rc
= bnx2_init_chip(bp
)) != 0)
3711 bnx2_init_tx_ring(bp
);
3712 bnx2_init_rx_ring(bp
);
3717 bnx2_init_nic(struct bnx2
*bp
)
3721 if ((rc
= bnx2_reset_nic(bp
, BNX2_DRV_MSG_CODE_RESET
)) != 0)
3724 spin_lock_bh(&bp
->phy_lock
);
3726 spin_unlock_bh(&bp
->phy_lock
);
3732 bnx2_test_registers(struct bnx2
*bp
)
3736 static const struct {
3742 { 0x006c, 0, 0x00000000, 0x0000003f },
3743 { 0x0090, 0, 0xffffffff, 0x00000000 },
3744 { 0x0094, 0, 0x00000000, 0x00000000 },
3746 { 0x0404, 0, 0x00003f00, 0x00000000 },
3747 { 0x0418, 0, 0x00000000, 0xffffffff },
3748 { 0x041c, 0, 0x00000000, 0xffffffff },
3749 { 0x0420, 0, 0x00000000, 0x80ffffff },
3750 { 0x0424, 0, 0x00000000, 0x00000000 },
3751 { 0x0428, 0, 0x00000000, 0x00000001 },
3752 { 0x0450, 0, 0x00000000, 0x0000ffff },
3753 { 0x0454, 0, 0x00000000, 0xffffffff },
3754 { 0x0458, 0, 0x00000000, 0xffffffff },
3756 { 0x0808, 0, 0x00000000, 0xffffffff },
3757 { 0x0854, 0, 0x00000000, 0xffffffff },
3758 { 0x0868, 0, 0x00000000, 0x77777777 },
3759 { 0x086c, 0, 0x00000000, 0x77777777 },
3760 { 0x0870, 0, 0x00000000, 0x77777777 },
3761 { 0x0874, 0, 0x00000000, 0x77777777 },
3763 { 0x0c00, 0, 0x00000000, 0x00000001 },
3764 { 0x0c04, 0, 0x00000000, 0x03ff0001 },
3765 { 0x0c08, 0, 0x0f0ff073, 0x00000000 },
3767 { 0x1000, 0, 0x00000000, 0x00000001 },
3768 { 0x1004, 0, 0x00000000, 0x000f0001 },
3770 { 0x1408, 0, 0x01c00800, 0x00000000 },
3771 { 0x149c, 0, 0x8000ffff, 0x00000000 },
3772 { 0x14a8, 0, 0x00000000, 0x000001ff },
3773 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
3774 { 0x14b0, 0, 0x00000002, 0x00000001 },
3775 { 0x14b8, 0, 0x00000000, 0x00000000 },
3776 { 0x14c0, 0, 0x00000000, 0x00000009 },
3777 { 0x14c4, 0, 0x00003fff, 0x00000000 },
3778 { 0x14cc, 0, 0x00000000, 0x00000001 },
3779 { 0x14d0, 0, 0xffffffff, 0x00000000 },
3781 { 0x1800, 0, 0x00000000, 0x00000001 },
3782 { 0x1804, 0, 0x00000000, 0x00000003 },
3784 { 0x2800, 0, 0x00000000, 0x00000001 },
3785 { 0x2804, 0, 0x00000000, 0x00003f01 },
3786 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
3787 { 0x2810, 0, 0xffff0000, 0x00000000 },
3788 { 0x2814, 0, 0xffff0000, 0x00000000 },
3789 { 0x2818, 0, 0xffff0000, 0x00000000 },
3790 { 0x281c, 0, 0xffff0000, 0x00000000 },
3791 { 0x2834, 0, 0xffffffff, 0x00000000 },
3792 { 0x2840, 0, 0x00000000, 0xffffffff },
3793 { 0x2844, 0, 0x00000000, 0xffffffff },
3794 { 0x2848, 0, 0xffffffff, 0x00000000 },
3795 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
3797 { 0x2c00, 0, 0x00000000, 0x00000011 },
3798 { 0x2c04, 0, 0x00000000, 0x00030007 },
3800 { 0x3c00, 0, 0x00000000, 0x00000001 },
3801 { 0x3c04, 0, 0x00000000, 0x00070000 },
3802 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
3803 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
3804 { 0x3c10, 0, 0xffffffff, 0x00000000 },
3805 { 0x3c14, 0, 0x00000000, 0xffffffff },
3806 { 0x3c18, 0, 0x00000000, 0xffffffff },
3807 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
3808 { 0x3c20, 0, 0xffffff00, 0x00000000 },
3810 { 0x5004, 0, 0x00000000, 0x0000007f },
3811 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
3812 { 0x500c, 0, 0xf800f800, 0x07ff07ff },
3814 { 0x5c00, 0, 0x00000000, 0x00000001 },
3815 { 0x5c04, 0, 0x00000000, 0x0003000f },
3816 { 0x5c08, 0, 0x00000003, 0x00000000 },
3817 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
3818 { 0x5c10, 0, 0x00000000, 0xffffffff },
3819 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
3820 { 0x5c84, 0, 0x00000000, 0x0000f333 },
3821 { 0x5c88, 0, 0x00000000, 0x00077373 },
3822 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
3824 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
3825 { 0x680c, 0, 0xffffffff, 0x00000000 },
3826 { 0x6810, 0, 0xffffffff, 0x00000000 },
3827 { 0x6814, 0, 0xffffffff, 0x00000000 },
3828 { 0x6818, 0, 0xffffffff, 0x00000000 },
3829 { 0x681c, 0, 0xffffffff, 0x00000000 },
3830 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
3831 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
3832 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
3833 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
3834 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
3835 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
3836 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
3837 { 0x683c, 0, 0x0000ffff, 0x00000000 },
3838 { 0x6840, 0, 0x00000ff0, 0x00000000 },
3839 { 0x6844, 0, 0x00ffff00, 0x00000000 },
3840 { 0x684c, 0, 0xffffffff, 0x00000000 },
3841 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
3842 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
3843 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
3844 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
3845 { 0x6908, 0, 0x00000000, 0x0001ff0f },
3846 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
3848 { 0xffff, 0, 0x00000000, 0x00000000 },
3852 for (i
= 0; reg_tbl
[i
].offset
!= 0xffff; i
++) {
3853 u32 offset
, rw_mask
, ro_mask
, save_val
, val
;
3855 offset
= (u32
) reg_tbl
[i
].offset
;
3856 rw_mask
= reg_tbl
[i
].rw_mask
;
3857 ro_mask
= reg_tbl
[i
].ro_mask
;
3859 save_val
= readl(bp
->regview
+ offset
);
3861 writel(0, bp
->regview
+ offset
);
3863 val
= readl(bp
->regview
+ offset
);
3864 if ((val
& rw_mask
) != 0) {
3868 if ((val
& ro_mask
) != (save_val
& ro_mask
)) {
3872 writel(0xffffffff, bp
->regview
+ offset
);
3874 val
= readl(bp
->regview
+ offset
);
3875 if ((val
& rw_mask
) != rw_mask
) {
3879 if ((val
& ro_mask
) != (save_val
& ro_mask
)) {
3883 writel(save_val
, bp
->regview
+ offset
);
3887 writel(save_val
, bp
->regview
+ offset
);
3895 bnx2_do_mem_test(struct bnx2
*bp
, u32 start
, u32 size
)
3897 static const u32 test_pattern
[] = { 0x00000000, 0xffffffff, 0x55555555,
3898 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
3901 for (i
= 0; i
< sizeof(test_pattern
) / 4; i
++) {
3904 for (offset
= 0; offset
< size
; offset
+= 4) {
3906 REG_WR_IND(bp
, start
+ offset
, test_pattern
[i
]);
3908 if (REG_RD_IND(bp
, start
+ offset
) !=
3918 bnx2_test_memory(struct bnx2
*bp
)
3922 static const struct {
3926 { 0x60000, 0x4000 },
3927 { 0xa0000, 0x3000 },
3928 { 0xe0000, 0x4000 },
3929 { 0x120000, 0x4000 },
3930 { 0x1a0000, 0x4000 },
3931 { 0x160000, 0x4000 },
3935 for (i
= 0; mem_tbl
[i
].offset
!= 0xffffffff; i
++) {
3936 if ((ret
= bnx2_do_mem_test(bp
, mem_tbl
[i
].offset
,
3937 mem_tbl
[i
].len
)) != 0) {
3945 #define BNX2_MAC_LOOPBACK 0
3946 #define BNX2_PHY_LOOPBACK 1
3949 bnx2_run_loopback(struct bnx2
*bp
, int loopback_mode
)
3951 unsigned int pkt_size
, num_pkts
, i
;
3952 struct sk_buff
*skb
, *rx_skb
;
3953 unsigned char *packet
;
3954 u16 rx_start_idx
, rx_idx
;
3957 struct sw_bd
*rx_buf
;
3958 struct l2_fhdr
*rx_hdr
;
3961 if (loopback_mode
== BNX2_MAC_LOOPBACK
) {
3962 bp
->loopback
= MAC_LOOPBACK
;
3963 bnx2_set_mac_loopback(bp
);
3965 else if (loopback_mode
== BNX2_PHY_LOOPBACK
) {
3966 bp
->loopback
= PHY_LOOPBACK
;
3967 bnx2_set_phy_loopback(bp
);
3973 skb
= netdev_alloc_skb(bp
->dev
, pkt_size
);
3976 packet
= skb_put(skb
, pkt_size
);
3977 memcpy(packet
, bp
->mac_addr
, 6);
3978 memset(packet
+ 6, 0x0, 8);
3979 for (i
= 14; i
< pkt_size
; i
++)
3980 packet
[i
] = (unsigned char) (i
& 0xff);
3982 map
= pci_map_single(bp
->pdev
, skb
->data
, pkt_size
,
3985 REG_WR(bp
, BNX2_HC_COMMAND
,
3986 bp
->hc_cmd
| BNX2_HC_COMMAND_COAL_NOW_WO_INT
);
3988 REG_RD(bp
, BNX2_HC_COMMAND
);
3991 rx_start_idx
= bp
->status_blk
->status_rx_quick_consumer_index0
;
3995 txbd
= &bp
->tx_desc_ring
[TX_RING_IDX(bp
->tx_prod
)];
3997 txbd
->tx_bd_haddr_hi
= (u64
) map
>> 32;
3998 txbd
->tx_bd_haddr_lo
= (u64
) map
& 0xffffffff;
3999 txbd
->tx_bd_mss_nbytes
= pkt_size
;
4000 txbd
->tx_bd_vlan_tag_flags
= TX_BD_FLAGS_START
| TX_BD_FLAGS_END
;
4003 bp
->tx_prod
= NEXT_TX_BD(bp
->tx_prod
);
4004 bp
->tx_prod_bseq
+= pkt_size
;
4006 REG_WR16(bp
, MB_TX_CID_ADDR
+ BNX2_L2CTX_TX_HOST_BIDX
, bp
->tx_prod
);
4007 REG_WR(bp
, MB_TX_CID_ADDR
+ BNX2_L2CTX_TX_HOST_BSEQ
, bp
->tx_prod_bseq
);
4011 REG_WR(bp
, BNX2_HC_COMMAND
,
4012 bp
->hc_cmd
| BNX2_HC_COMMAND_COAL_NOW_WO_INT
);
4014 REG_RD(bp
, BNX2_HC_COMMAND
);
4018 pci_unmap_single(bp
->pdev
, map
, pkt_size
, PCI_DMA_TODEVICE
);
4021 if (bp
->status_blk
->status_tx_quick_consumer_index0
!= bp
->tx_prod
) {
4022 goto loopback_test_done
;
4025 rx_idx
= bp
->status_blk
->status_rx_quick_consumer_index0
;
4026 if (rx_idx
!= rx_start_idx
+ num_pkts
) {
4027 goto loopback_test_done
;
4030 rx_buf
= &bp
->rx_buf_ring
[rx_start_idx
];
4031 rx_skb
= rx_buf
->skb
;
4033 rx_hdr
= (struct l2_fhdr
*) rx_skb
->data
;
4034 skb_reserve(rx_skb
, bp
->rx_offset
);
4036 pci_dma_sync_single_for_cpu(bp
->pdev
,
4037 pci_unmap_addr(rx_buf
, mapping
),
4038 bp
->rx_buf_size
, PCI_DMA_FROMDEVICE
);
4040 if (rx_hdr
->l2_fhdr_status
&
4041 (L2_FHDR_ERRORS_BAD_CRC
|
4042 L2_FHDR_ERRORS_PHY_DECODE
|
4043 L2_FHDR_ERRORS_ALIGNMENT
|
4044 L2_FHDR_ERRORS_TOO_SHORT
|
4045 L2_FHDR_ERRORS_GIANT_FRAME
)) {
4047 goto loopback_test_done
;
4050 if ((rx_hdr
->l2_fhdr_pkt_len
- 4) != pkt_size
) {
4051 goto loopback_test_done
;
4054 for (i
= 14; i
< pkt_size
; i
++) {
4055 if (*(rx_skb
->data
+ i
) != (unsigned char) (i
& 0xff)) {
4056 goto loopback_test_done
;
4067 #define BNX2_MAC_LOOPBACK_FAILED 1
4068 #define BNX2_PHY_LOOPBACK_FAILED 2
4069 #define BNX2_LOOPBACK_FAILED (BNX2_MAC_LOOPBACK_FAILED | \
4070 BNX2_PHY_LOOPBACK_FAILED)
4073 bnx2_test_loopback(struct bnx2
*bp
)
4077 if (!netif_running(bp
->dev
))
4078 return BNX2_LOOPBACK_FAILED
;
4080 bnx2_reset_nic(bp
, BNX2_DRV_MSG_CODE_RESET
);
4081 spin_lock_bh(&bp
->phy_lock
);
4083 spin_unlock_bh(&bp
->phy_lock
);
4084 if (bnx2_run_loopback(bp
, BNX2_MAC_LOOPBACK
))
4085 rc
|= BNX2_MAC_LOOPBACK_FAILED
;
4086 if (bnx2_run_loopback(bp
, BNX2_PHY_LOOPBACK
))
4087 rc
|= BNX2_PHY_LOOPBACK_FAILED
;
4091 #define NVRAM_SIZE 0x200
4092 #define CRC32_RESIDUAL 0xdebb20e3
4095 bnx2_test_nvram(struct bnx2
*bp
)
4097 u32 buf
[NVRAM_SIZE
/ 4];
4098 u8
*data
= (u8
*) buf
;
4102 if ((rc
= bnx2_nvram_read(bp
, 0, data
, 4)) != 0)
4103 goto test_nvram_done
;
4105 magic
= be32_to_cpu(buf
[0]);
4106 if (magic
!= 0x669955aa) {
4108 goto test_nvram_done
;
4111 if ((rc
= bnx2_nvram_read(bp
, 0x100, data
, NVRAM_SIZE
)) != 0)
4112 goto test_nvram_done
;
4114 csum
= ether_crc_le(0x100, data
);
4115 if (csum
!= CRC32_RESIDUAL
) {
4117 goto test_nvram_done
;
4120 csum
= ether_crc_le(0x100, data
+ 0x100);
4121 if (csum
!= CRC32_RESIDUAL
) {
4130 bnx2_test_link(struct bnx2
*bp
)
4134 spin_lock_bh(&bp
->phy_lock
);
4135 bnx2_read_phy(bp
, MII_BMSR
, &bmsr
);
4136 bnx2_read_phy(bp
, MII_BMSR
, &bmsr
);
4137 spin_unlock_bh(&bp
->phy_lock
);
4139 if (bmsr
& BMSR_LSTATUS
) {
4146 bnx2_test_intr(struct bnx2
*bp
)
4151 if (!netif_running(bp
->dev
))
4154 status_idx
= REG_RD(bp
, BNX2_PCICFG_INT_ACK_CMD
) & 0xffff;
4156 /* This register is not touched during run-time. */
4157 REG_WR(bp
, BNX2_HC_COMMAND
, bp
->hc_cmd
| BNX2_HC_COMMAND_COAL_NOW
);
4158 REG_RD(bp
, BNX2_HC_COMMAND
);
4160 for (i
= 0; i
< 10; i
++) {
4161 if ((REG_RD(bp
, BNX2_PCICFG_INT_ACK_CMD
) & 0xffff) !=
4167 msleep_interruptible(10);
4176 bnx2_timer(unsigned long data
)
4178 struct bnx2
*bp
= (struct bnx2
*) data
;
4181 if (!netif_running(bp
->dev
))
4184 if (atomic_read(&bp
->intr_sem
) != 0)
4185 goto bnx2_restart_timer
;
4187 msg
= (u32
) ++bp
->fw_drv_pulse_wr_seq
;
4188 REG_WR_IND(bp
, bp
->shmem_base
+ BNX2_DRV_PULSE_MB
, msg
);
4190 bp
->stats_blk
->stat_FwRxDrop
= REG_RD_IND(bp
, BNX2_FW_RX_DROP_COUNT
);
4192 if ((bp
->phy_flags
& PHY_SERDES_FLAG
) &&
4193 (CHIP_NUM(bp
) == CHIP_NUM_5706
)) {
4195 spin_lock(&bp
->phy_lock
);
4196 if (bp
->serdes_an_pending
) {
4197 bp
->serdes_an_pending
--;
4199 else if ((bp
->link_up
== 0) && (bp
->autoneg
& AUTONEG_SPEED
)) {
4202 bp
->current_interval
= bp
->timer_interval
;
4204 bnx2_read_phy(bp
, MII_BMCR
, &bmcr
);
4206 if (bmcr
& BMCR_ANENABLE
) {
4209 bnx2_write_phy(bp
, 0x1c, 0x7c00);
4210 bnx2_read_phy(bp
, 0x1c, &phy1
);
4212 bnx2_write_phy(bp
, 0x17, 0x0f01);
4213 bnx2_read_phy(bp
, 0x15, &phy2
);
4214 bnx2_write_phy(bp
, 0x17, 0x0f01);
4215 bnx2_read_phy(bp
, 0x15, &phy2
);
4217 if ((phy1
& 0x10) && /* SIGNAL DETECT */
4218 !(phy2
& 0x20)) { /* no CONFIG */
4220 bmcr
&= ~BMCR_ANENABLE
;
4221 bmcr
|= BMCR_SPEED1000
|
4223 bnx2_write_phy(bp
, MII_BMCR
, bmcr
);
4225 PHY_PARALLEL_DETECT_FLAG
;
4229 else if ((bp
->link_up
) && (bp
->autoneg
& AUTONEG_SPEED
) &&
4230 (bp
->phy_flags
& PHY_PARALLEL_DETECT_FLAG
)) {
4233 bnx2_write_phy(bp
, 0x17, 0x0f01);
4234 bnx2_read_phy(bp
, 0x15, &phy2
);
4238 bnx2_read_phy(bp
, MII_BMCR
, &bmcr
);
4239 bmcr
|= BMCR_ANENABLE
;
4240 bnx2_write_phy(bp
, MII_BMCR
, bmcr
);
4242 bp
->phy_flags
&= ~PHY_PARALLEL_DETECT_FLAG
;
4247 bp
->current_interval
= bp
->timer_interval
;
4249 spin_unlock(&bp
->phy_lock
);
4253 mod_timer(&bp
->timer
, jiffies
+ bp
->current_interval
);
4256 /* Called with rtnl_lock */
4258 bnx2_open(struct net_device
*dev
)
4260 struct bnx2
*bp
= netdev_priv(dev
);
4263 bnx2_set_power_state(bp
, PCI_D0
);
4264 bnx2_disable_int(bp
);
4266 rc
= bnx2_alloc_mem(bp
);
4270 if ((CHIP_ID(bp
) != CHIP_ID_5706_A0
) &&
4271 (CHIP_ID(bp
) != CHIP_ID_5706_A1
) &&
4274 if (pci_enable_msi(bp
->pdev
) == 0) {
4275 bp
->flags
|= USING_MSI_FLAG
;
4276 rc
= request_irq(bp
->pdev
->irq
, bnx2_msi
, 0, dev
->name
,
4280 rc
= request_irq(bp
->pdev
->irq
, bnx2_interrupt
,
4281 IRQF_SHARED
, dev
->name
, dev
);
4285 rc
= request_irq(bp
->pdev
->irq
, bnx2_interrupt
, IRQF_SHARED
,
4293 rc
= bnx2_init_nic(bp
);
4296 free_irq(bp
->pdev
->irq
, dev
);
4297 if (bp
->flags
& USING_MSI_FLAG
) {
4298 pci_disable_msi(bp
->pdev
);
4299 bp
->flags
&= ~USING_MSI_FLAG
;
4306 mod_timer(&bp
->timer
, jiffies
+ bp
->current_interval
);
4308 atomic_set(&bp
->intr_sem
, 0);
4310 bnx2_enable_int(bp
);
4312 if (bp
->flags
& USING_MSI_FLAG
) {
4313 /* Test MSI to make sure it is working
4314 * If MSI test fails, go back to INTx mode
4316 if (bnx2_test_intr(bp
) != 0) {
4317 printk(KERN_WARNING PFX
"%s: No interrupt was generated"
4318 " using MSI, switching to INTx mode. Please"
4319 " report this failure to the PCI maintainer"
4320 " and include system chipset information.\n",
4323 bnx2_disable_int(bp
);
4324 free_irq(bp
->pdev
->irq
, dev
);
4325 pci_disable_msi(bp
->pdev
);
4326 bp
->flags
&= ~USING_MSI_FLAG
;
4328 rc
= bnx2_init_nic(bp
);
4331 rc
= request_irq(bp
->pdev
->irq
, bnx2_interrupt
,
4332 IRQF_SHARED
, dev
->name
, dev
);
4337 del_timer_sync(&bp
->timer
);
4340 bnx2_enable_int(bp
);
4343 if (bp
->flags
& USING_MSI_FLAG
) {
4344 printk(KERN_INFO PFX
"%s: using MSI\n", dev
->name
);
4347 netif_start_queue(dev
);
4353 bnx2_reset_task(void *data
)
4355 struct bnx2
*bp
= data
;
4357 if (!netif_running(bp
->dev
))
4360 bp
->in_reset_task
= 1;
4361 bnx2_netif_stop(bp
);
4365 atomic_set(&bp
->intr_sem
, 1);
4366 bnx2_netif_start(bp
);
4367 bp
->in_reset_task
= 0;
4371 bnx2_tx_timeout(struct net_device
*dev
)
4373 struct bnx2
*bp
= netdev_priv(dev
);
4375 /* This allows the netif to be shutdown gracefully before resetting */
4376 schedule_work(&bp
->reset_task
);
4380 /* Called with rtnl_lock */
4382 bnx2_vlan_rx_register(struct net_device
*dev
, struct vlan_group
*vlgrp
)
4384 struct bnx2
*bp
= netdev_priv(dev
);
4386 bnx2_netif_stop(bp
);
4389 bnx2_set_rx_mode(dev
);
4391 bnx2_netif_start(bp
);
4394 /* Called with rtnl_lock */
4396 bnx2_vlan_rx_kill_vid(struct net_device
*dev
, uint16_t vid
)
4398 struct bnx2
*bp
= netdev_priv(dev
);
4400 bnx2_netif_stop(bp
);
4403 bp
->vlgrp
->vlan_devices
[vid
] = NULL
;
4404 bnx2_set_rx_mode(dev
);
4406 bnx2_netif_start(bp
);
4410 /* Called with netif_tx_lock.
4411 * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
4412 * netif_wake_queue().
4415 bnx2_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
4417 struct bnx2
*bp
= netdev_priv(dev
);
4420 struct sw_bd
*tx_buf
;
4421 u32 len
, vlan_tag_flags
, last_frag
, mss
;
4422 u16 prod
, ring_prod
;
4425 if (unlikely(bnx2_tx_avail(bp
) < (skb_shinfo(skb
)->nr_frags
+ 1))) {
4426 netif_stop_queue(dev
);
4427 printk(KERN_ERR PFX
"%s: BUG! Tx ring full when queue awake!\n",
4430 return NETDEV_TX_BUSY
;
4432 len
= skb_headlen(skb
);
4434 ring_prod
= TX_RING_IDX(prod
);
4437 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
4438 vlan_tag_flags
|= TX_BD_FLAGS_TCP_UDP_CKSUM
;
4441 if (bp
->vlgrp
!= 0 && vlan_tx_tag_present(skb
)) {
4443 (TX_BD_FLAGS_VLAN_TAG
| (vlan_tx_tag_get(skb
) << 16));
4446 if ((mss
= skb_shinfo(skb
)->gso_size
) &&
4447 (skb
->len
> (bp
->dev
->mtu
+ ETH_HLEN
))) {
4448 u32 tcp_opt_len
, ip_tcp_len
;
4450 if (skb_header_cloned(skb
) &&
4451 pskb_expand_head(skb
, 0, 0, GFP_ATOMIC
)) {
4453 return NETDEV_TX_OK
;
4456 tcp_opt_len
= ((skb
->h
.th
->doff
- 5) * 4);
4457 vlan_tag_flags
|= TX_BD_FLAGS_SW_LSO
;
4460 if (skb
->h
.th
->doff
> 5) {
4461 tcp_opt_len
= (skb
->h
.th
->doff
- 5) << 2;
4463 ip_tcp_len
= (skb
->nh
.iph
->ihl
<< 2) + sizeof(struct tcphdr
);
4465 skb
->nh
.iph
->check
= 0;
4466 skb
->nh
.iph
->tot_len
= htons(mss
+ ip_tcp_len
+ tcp_opt_len
);
4468 ~csum_tcpudp_magic(skb
->nh
.iph
->saddr
,
4472 if (tcp_opt_len
|| (skb
->nh
.iph
->ihl
> 5)) {
4473 vlan_tag_flags
|= ((skb
->nh
.iph
->ihl
- 5) +
4474 (tcp_opt_len
>> 2)) << 8;
4483 mapping
= pci_map_single(bp
->pdev
, skb
->data
, len
, PCI_DMA_TODEVICE
);
4485 tx_buf
= &bp
->tx_buf_ring
[ring_prod
];
4487 pci_unmap_addr_set(tx_buf
, mapping
, mapping
);
4489 txbd
= &bp
->tx_desc_ring
[ring_prod
];
4491 txbd
->tx_bd_haddr_hi
= (u64
) mapping
>> 32;
4492 txbd
->tx_bd_haddr_lo
= (u64
) mapping
& 0xffffffff;
4493 txbd
->tx_bd_mss_nbytes
= len
| (mss
<< 16);
4494 txbd
->tx_bd_vlan_tag_flags
= vlan_tag_flags
| TX_BD_FLAGS_START
;
4496 last_frag
= skb_shinfo(skb
)->nr_frags
;
4498 for (i
= 0; i
< last_frag
; i
++) {
4499 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
4501 prod
= NEXT_TX_BD(prod
);
4502 ring_prod
= TX_RING_IDX(prod
);
4503 txbd
= &bp
->tx_desc_ring
[ring_prod
];
4506 mapping
= pci_map_page(bp
->pdev
, frag
->page
, frag
->page_offset
,
4507 len
, PCI_DMA_TODEVICE
);
4508 pci_unmap_addr_set(&bp
->tx_buf_ring
[ring_prod
],
4511 txbd
->tx_bd_haddr_hi
= (u64
) mapping
>> 32;
4512 txbd
->tx_bd_haddr_lo
= (u64
) mapping
& 0xffffffff;
4513 txbd
->tx_bd_mss_nbytes
= len
| (mss
<< 16);
4514 txbd
->tx_bd_vlan_tag_flags
= vlan_tag_flags
;
4517 txbd
->tx_bd_vlan_tag_flags
|= TX_BD_FLAGS_END
;
4519 prod
= NEXT_TX_BD(prod
);
4520 bp
->tx_prod_bseq
+= skb
->len
;
4522 REG_WR16(bp
, MB_TX_CID_ADDR
+ BNX2_L2CTX_TX_HOST_BIDX
, prod
);
4523 REG_WR(bp
, MB_TX_CID_ADDR
+ BNX2_L2CTX_TX_HOST_BSEQ
, bp
->tx_prod_bseq
);
4528 dev
->trans_start
= jiffies
;
4530 if (unlikely(bnx2_tx_avail(bp
) <= MAX_SKB_FRAGS
)) {
4531 netif_stop_queue(dev
);
4532 if (bnx2_tx_avail(bp
) > bp
->tx_wake_thresh
)
4533 netif_wake_queue(dev
);
4536 return NETDEV_TX_OK
;
4539 /* Called with rtnl_lock */
4541 bnx2_close(struct net_device
*dev
)
4543 struct bnx2
*bp
= netdev_priv(dev
);
4546 /* Calling flush_scheduled_work() may deadlock because
4547 * linkwatch_event() may be on the workqueue and it will try to get
4548 * the rtnl_lock which we are holding.
4550 while (bp
->in_reset_task
)
4553 bnx2_netif_stop(bp
);
4554 del_timer_sync(&bp
->timer
);
4555 if (bp
->flags
& NO_WOL_FLAG
)
4556 reset_code
= BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN
;
4558 reset_code
= BNX2_DRV_MSG_CODE_SUSPEND_WOL
;
4560 reset_code
= BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL
;
4561 bnx2_reset_chip(bp
, reset_code
);
4562 free_irq(bp
->pdev
->irq
, dev
);
4563 if (bp
->flags
& USING_MSI_FLAG
) {
4564 pci_disable_msi(bp
->pdev
);
4565 bp
->flags
&= ~USING_MSI_FLAG
;
4570 netif_carrier_off(bp
->dev
);
4571 bnx2_set_power_state(bp
, PCI_D3hot
);
4575 #define GET_NET_STATS64(ctr) \
4576 (unsigned long) ((unsigned long) (ctr##_hi) << 32) + \
4577 (unsigned long) (ctr##_lo)
4579 #define GET_NET_STATS32(ctr) \
4582 #if (BITS_PER_LONG == 64)
4583 #define GET_NET_STATS GET_NET_STATS64
4585 #define GET_NET_STATS GET_NET_STATS32
4588 static struct net_device_stats
*
4589 bnx2_get_stats(struct net_device
*dev
)
4591 struct bnx2
*bp
= netdev_priv(dev
);
4592 struct statistics_block
*stats_blk
= bp
->stats_blk
;
4593 struct net_device_stats
*net_stats
= &bp
->net_stats
;
4595 if (bp
->stats_blk
== NULL
) {
4598 net_stats
->rx_packets
=
4599 GET_NET_STATS(stats_blk
->stat_IfHCInUcastPkts
) +
4600 GET_NET_STATS(stats_blk
->stat_IfHCInMulticastPkts
) +
4601 GET_NET_STATS(stats_blk
->stat_IfHCInBroadcastPkts
);
4603 net_stats
->tx_packets
=
4604 GET_NET_STATS(stats_blk
->stat_IfHCOutUcastPkts
) +
4605 GET_NET_STATS(stats_blk
->stat_IfHCOutMulticastPkts
) +
4606 GET_NET_STATS(stats_blk
->stat_IfHCOutBroadcastPkts
);
4608 net_stats
->rx_bytes
=
4609 GET_NET_STATS(stats_blk
->stat_IfHCInOctets
);
4611 net_stats
->tx_bytes
=
4612 GET_NET_STATS(stats_blk
->stat_IfHCOutOctets
);
4614 net_stats
->multicast
=
4615 GET_NET_STATS(stats_blk
->stat_IfHCOutMulticastPkts
);
4617 net_stats
->collisions
=
4618 (unsigned long) stats_blk
->stat_EtherStatsCollisions
;
4620 net_stats
->rx_length_errors
=
4621 (unsigned long) (stats_blk
->stat_EtherStatsUndersizePkts
+
4622 stats_blk
->stat_EtherStatsOverrsizePkts
);
4624 net_stats
->rx_over_errors
=
4625 (unsigned long) stats_blk
->stat_IfInMBUFDiscards
;
4627 net_stats
->rx_frame_errors
=
4628 (unsigned long) stats_blk
->stat_Dot3StatsAlignmentErrors
;
4630 net_stats
->rx_crc_errors
=
4631 (unsigned long) stats_blk
->stat_Dot3StatsFCSErrors
;
4633 net_stats
->rx_errors
= net_stats
->rx_length_errors
+
4634 net_stats
->rx_over_errors
+ net_stats
->rx_frame_errors
+
4635 net_stats
->rx_crc_errors
;
4637 net_stats
->tx_aborted_errors
=
4638 (unsigned long) (stats_blk
->stat_Dot3StatsExcessiveCollisions
+
4639 stats_blk
->stat_Dot3StatsLateCollisions
);
4641 if ((CHIP_NUM(bp
) == CHIP_NUM_5706
) ||
4642 (CHIP_ID(bp
) == CHIP_ID_5708_A0
))
4643 net_stats
->tx_carrier_errors
= 0;
4645 net_stats
->tx_carrier_errors
=
4647 stats_blk
->stat_Dot3StatsCarrierSenseErrors
;
4650 net_stats
->tx_errors
=
4652 stats_blk
->stat_emac_tx_stat_dot3statsinternalmactransmiterrors
4654 net_stats
->tx_aborted_errors
+
4655 net_stats
->tx_carrier_errors
;
4657 net_stats
->rx_missed_errors
=
4658 (unsigned long) (stats_blk
->stat_IfInMBUFDiscards
+
4659 stats_blk
->stat_FwRxDrop
);
4664 /* All ethtool functions called with rtnl_lock */
4667 bnx2_get_settings(struct net_device
*dev
, struct ethtool_cmd
*cmd
)
4669 struct bnx2
*bp
= netdev_priv(dev
);
4671 cmd
->supported
= SUPPORTED_Autoneg
;
4672 if (bp
->phy_flags
& PHY_SERDES_FLAG
) {
4673 cmd
->supported
|= SUPPORTED_1000baseT_Full
|
4676 cmd
->port
= PORT_FIBRE
;
4679 cmd
->supported
|= SUPPORTED_10baseT_Half
|
4680 SUPPORTED_10baseT_Full
|
4681 SUPPORTED_100baseT_Half
|
4682 SUPPORTED_100baseT_Full
|
4683 SUPPORTED_1000baseT_Full
|
4686 cmd
->port
= PORT_TP
;
4689 cmd
->advertising
= bp
->advertising
;
4691 if (bp
->autoneg
& AUTONEG_SPEED
) {
4692 cmd
->autoneg
= AUTONEG_ENABLE
;
4695 cmd
->autoneg
= AUTONEG_DISABLE
;
4698 if (netif_carrier_ok(dev
)) {
4699 cmd
->speed
= bp
->line_speed
;
4700 cmd
->duplex
= bp
->duplex
;
4707 cmd
->transceiver
= XCVR_INTERNAL
;
4708 cmd
->phy_address
= bp
->phy_addr
;
4714 bnx2_set_settings(struct net_device
*dev
, struct ethtool_cmd
*cmd
)
4716 struct bnx2
*bp
= netdev_priv(dev
);
4717 u8 autoneg
= bp
->autoneg
;
4718 u8 req_duplex
= bp
->req_duplex
;
4719 u16 req_line_speed
= bp
->req_line_speed
;
4720 u32 advertising
= bp
->advertising
;
4722 if (cmd
->autoneg
== AUTONEG_ENABLE
) {
4723 autoneg
|= AUTONEG_SPEED
;
4725 cmd
->advertising
&= ETHTOOL_ALL_COPPER_SPEED
;
4727 /* allow advertising 1 speed */
4728 if ((cmd
->advertising
== ADVERTISED_10baseT_Half
) ||
4729 (cmd
->advertising
== ADVERTISED_10baseT_Full
) ||
4730 (cmd
->advertising
== ADVERTISED_100baseT_Half
) ||
4731 (cmd
->advertising
== ADVERTISED_100baseT_Full
)) {
4733 if (bp
->phy_flags
& PHY_SERDES_FLAG
)
4736 advertising
= cmd
->advertising
;
4739 else if (cmd
->advertising
== ADVERTISED_1000baseT_Full
) {
4740 advertising
= cmd
->advertising
;
4742 else if (cmd
->advertising
== ADVERTISED_1000baseT_Half
) {
4746 if (bp
->phy_flags
& PHY_SERDES_FLAG
) {
4747 advertising
= ETHTOOL_ALL_FIBRE_SPEED
;
4750 advertising
= ETHTOOL_ALL_COPPER_SPEED
;
4753 advertising
|= ADVERTISED_Autoneg
;
4756 if (bp
->phy_flags
& PHY_SERDES_FLAG
) {
4757 if ((cmd
->speed
!= SPEED_1000
&&
4758 cmd
->speed
!= SPEED_2500
) ||
4759 (cmd
->duplex
!= DUPLEX_FULL
))
4762 if (cmd
->speed
== SPEED_2500
&&
4763 !(bp
->phy_flags
& PHY_2_5G_CAPABLE_FLAG
))
4766 else if (cmd
->speed
== SPEED_1000
) {
4769 autoneg
&= ~AUTONEG_SPEED
;
4770 req_line_speed
= cmd
->speed
;
4771 req_duplex
= cmd
->duplex
;
4775 bp
->autoneg
= autoneg
;
4776 bp
->advertising
= advertising
;
4777 bp
->req_line_speed
= req_line_speed
;
4778 bp
->req_duplex
= req_duplex
;
4780 spin_lock_bh(&bp
->phy_lock
);
4784 spin_unlock_bh(&bp
->phy_lock
);
4790 bnx2_get_drvinfo(struct net_device
*dev
, struct ethtool_drvinfo
*info
)
4792 struct bnx2
*bp
= netdev_priv(dev
);
4794 strcpy(info
->driver
, DRV_MODULE_NAME
);
4795 strcpy(info
->version
, DRV_MODULE_VERSION
);
4796 strcpy(info
->bus_info
, pci_name(bp
->pdev
));
4797 info
->fw_version
[0] = ((bp
->fw_ver
& 0xff000000) >> 24) + '0';
4798 info
->fw_version
[2] = ((bp
->fw_ver
& 0xff0000) >> 16) + '0';
4799 info
->fw_version
[4] = ((bp
->fw_ver
& 0xff00) >> 8) + '0';
4800 info
->fw_version
[1] = info
->fw_version
[3] = '.';
4801 info
->fw_version
[5] = 0;
4804 #define BNX2_REGDUMP_LEN (32 * 1024)
4807 bnx2_get_regs_len(struct net_device
*dev
)
4809 return BNX2_REGDUMP_LEN
;
4813 bnx2_get_regs(struct net_device
*dev
, struct ethtool_regs
*regs
, void *_p
)
4815 u32
*p
= _p
, i
, offset
;
4817 struct bnx2
*bp
= netdev_priv(dev
);
4818 u32 reg_boundaries
[] = { 0x0000, 0x0098, 0x0400, 0x045c,
4819 0x0800, 0x0880, 0x0c00, 0x0c10,
4820 0x0c30, 0x0d08, 0x1000, 0x101c,
4821 0x1040, 0x1048, 0x1080, 0x10a4,
4822 0x1400, 0x1490, 0x1498, 0x14f0,
4823 0x1500, 0x155c, 0x1580, 0x15dc,
4824 0x1600, 0x1658, 0x1680, 0x16d8,
4825 0x1800, 0x1820, 0x1840, 0x1854,
4826 0x1880, 0x1894, 0x1900, 0x1984,
4827 0x1c00, 0x1c0c, 0x1c40, 0x1c54,
4828 0x1c80, 0x1c94, 0x1d00, 0x1d84,
4829 0x2000, 0x2030, 0x23c0, 0x2400,
4830 0x2800, 0x2820, 0x2830, 0x2850,
4831 0x2b40, 0x2c10, 0x2fc0, 0x3058,
4832 0x3c00, 0x3c94, 0x4000, 0x4010,
4833 0x4080, 0x4090, 0x43c0, 0x4458,
4834 0x4c00, 0x4c18, 0x4c40, 0x4c54,
4835 0x4fc0, 0x5010, 0x53c0, 0x5444,
4836 0x5c00, 0x5c18, 0x5c80, 0x5c90,
4837 0x5fc0, 0x6000, 0x6400, 0x6428,
4838 0x6800, 0x6848, 0x684c, 0x6860,
4839 0x6888, 0x6910, 0x8000 };
4843 memset(p
, 0, BNX2_REGDUMP_LEN
);
4845 if (!netif_running(bp
->dev
))
4849 offset
= reg_boundaries
[0];
4851 while (offset
< BNX2_REGDUMP_LEN
) {
4852 *p
++ = REG_RD(bp
, offset
);
4854 if (offset
== reg_boundaries
[i
+ 1]) {
4855 offset
= reg_boundaries
[i
+ 2];
4856 p
= (u32
*) (orig_p
+ offset
);
4863 bnx2_get_wol(struct net_device
*dev
, struct ethtool_wolinfo
*wol
)
4865 struct bnx2
*bp
= netdev_priv(dev
);
4867 if (bp
->flags
& NO_WOL_FLAG
) {
4872 wol
->supported
= WAKE_MAGIC
;
4874 wol
->wolopts
= WAKE_MAGIC
;
4878 memset(&wol
->sopass
, 0, sizeof(wol
->sopass
));
4882 bnx2_set_wol(struct net_device
*dev
, struct ethtool_wolinfo
*wol
)
4884 struct bnx2
*bp
= netdev_priv(dev
);
4886 if (wol
->wolopts
& ~WAKE_MAGIC
)
4889 if (wol
->wolopts
& WAKE_MAGIC
) {
4890 if (bp
->flags
& NO_WOL_FLAG
)
4902 bnx2_nway_reset(struct net_device
*dev
)
4904 struct bnx2
*bp
= netdev_priv(dev
);
4907 if (!(bp
->autoneg
& AUTONEG_SPEED
)) {
4911 spin_lock_bh(&bp
->phy_lock
);
4913 /* Force a link down visible on the other side */
4914 if (bp
->phy_flags
& PHY_SERDES_FLAG
) {
4915 bnx2_write_phy(bp
, MII_BMCR
, BMCR_LOOPBACK
);
4916 spin_unlock_bh(&bp
->phy_lock
);
4920 spin_lock_bh(&bp
->phy_lock
);
4921 if (CHIP_NUM(bp
) == CHIP_NUM_5706
) {
4922 bp
->current_interval
= SERDES_AN_TIMEOUT
;
4923 bp
->serdes_an_pending
= 1;
4924 mod_timer(&bp
->timer
, jiffies
+ bp
->current_interval
);
4928 bnx2_read_phy(bp
, MII_BMCR
, &bmcr
);
4929 bmcr
&= ~BMCR_LOOPBACK
;
4930 bnx2_write_phy(bp
, MII_BMCR
, bmcr
| BMCR_ANRESTART
| BMCR_ANENABLE
);
4932 spin_unlock_bh(&bp
->phy_lock
);
4938 bnx2_get_eeprom_len(struct net_device
*dev
)
4940 struct bnx2
*bp
= netdev_priv(dev
);
4942 if (bp
->flash_info
== NULL
)
4945 return (int) bp
->flash_size
;
4949 bnx2_get_eeprom(struct net_device
*dev
, struct ethtool_eeprom
*eeprom
,
4952 struct bnx2
*bp
= netdev_priv(dev
);
4955 /* parameters already validated in ethtool_get_eeprom */
4957 rc
= bnx2_nvram_read(bp
, eeprom
->offset
, eebuf
, eeprom
->len
);
4963 bnx2_set_eeprom(struct net_device
*dev
, struct ethtool_eeprom
*eeprom
,
4966 struct bnx2
*bp
= netdev_priv(dev
);
4969 /* parameters already validated in ethtool_set_eeprom */
4971 rc
= bnx2_nvram_write(bp
, eeprom
->offset
, eebuf
, eeprom
->len
);
4977 bnx2_get_coalesce(struct net_device
*dev
, struct ethtool_coalesce
*coal
)
4979 struct bnx2
*bp
= netdev_priv(dev
);
4981 memset(coal
, 0, sizeof(struct ethtool_coalesce
));
4983 coal
->rx_coalesce_usecs
= bp
->rx_ticks
;
4984 coal
->rx_max_coalesced_frames
= bp
->rx_quick_cons_trip
;
4985 coal
->rx_coalesce_usecs_irq
= bp
->rx_ticks_int
;
4986 coal
->rx_max_coalesced_frames_irq
= bp
->rx_quick_cons_trip_int
;
4988 coal
->tx_coalesce_usecs
= bp
->tx_ticks
;
4989 coal
->tx_max_coalesced_frames
= bp
->tx_quick_cons_trip
;
4990 coal
->tx_coalesce_usecs_irq
= bp
->tx_ticks_int
;
4991 coal
->tx_max_coalesced_frames_irq
= bp
->tx_quick_cons_trip_int
;
4993 coal
->stats_block_coalesce_usecs
= bp
->stats_ticks
;
4999 bnx2_set_coalesce(struct net_device
*dev
, struct ethtool_coalesce
*coal
)
5001 struct bnx2
*bp
= netdev_priv(dev
);
5003 bp
->rx_ticks
= (u16
) coal
->rx_coalesce_usecs
;
5004 if (bp
->rx_ticks
> 0x3ff) bp
->rx_ticks
= 0x3ff;
5006 bp
->rx_quick_cons_trip
= (u16
) coal
->rx_max_coalesced_frames
;
5007 if (bp
->rx_quick_cons_trip
> 0xff) bp
->rx_quick_cons_trip
= 0xff;
5009 bp
->rx_ticks_int
= (u16
) coal
->rx_coalesce_usecs_irq
;
5010 if (bp
->rx_ticks_int
> 0x3ff) bp
->rx_ticks_int
= 0x3ff;
5012 bp
->rx_quick_cons_trip_int
= (u16
) coal
->rx_max_coalesced_frames_irq
;
5013 if (bp
->rx_quick_cons_trip_int
> 0xff)
5014 bp
->rx_quick_cons_trip_int
= 0xff;
5016 bp
->tx_ticks
= (u16
) coal
->tx_coalesce_usecs
;
5017 if (bp
->tx_ticks
> 0x3ff) bp
->tx_ticks
= 0x3ff;
5019 bp
->tx_quick_cons_trip
= (u16
) coal
->tx_max_coalesced_frames
;
5020 if (bp
->tx_quick_cons_trip
> 0xff) bp
->tx_quick_cons_trip
= 0xff;
5022 bp
->tx_ticks_int
= (u16
) coal
->tx_coalesce_usecs_irq
;
5023 if (bp
->tx_ticks_int
> 0x3ff) bp
->tx_ticks_int
= 0x3ff;
5025 bp
->tx_quick_cons_trip_int
= (u16
) coal
->tx_max_coalesced_frames_irq
;
5026 if (bp
->tx_quick_cons_trip_int
> 0xff) bp
->tx_quick_cons_trip_int
=
5029 bp
->stats_ticks
= coal
->stats_block_coalesce_usecs
;
5030 if (bp
->stats_ticks
> 0xffff00) bp
->stats_ticks
= 0xffff00;
5031 bp
->stats_ticks
&= 0xffff00;
5033 if (netif_running(bp
->dev
)) {
5034 bnx2_netif_stop(bp
);
5036 bnx2_netif_start(bp
);
5043 bnx2_get_ringparam(struct net_device
*dev
, struct ethtool_ringparam
*ering
)
5045 struct bnx2
*bp
= netdev_priv(dev
);
5047 ering
->rx_max_pending
= MAX_TOTAL_RX_DESC_CNT
;
5048 ering
->rx_mini_max_pending
= 0;
5049 ering
->rx_jumbo_max_pending
= 0;
5051 ering
->rx_pending
= bp
->rx_ring_size
;
5052 ering
->rx_mini_pending
= 0;
5053 ering
->rx_jumbo_pending
= 0;
5055 ering
->tx_max_pending
= MAX_TX_DESC_CNT
;
5056 ering
->tx_pending
= bp
->tx_ring_size
;
5060 bnx2_set_ringparam(struct net_device
*dev
, struct ethtool_ringparam
*ering
)
5062 struct bnx2
*bp
= netdev_priv(dev
);
5064 if ((ering
->rx_pending
> MAX_TOTAL_RX_DESC_CNT
) ||
5065 (ering
->tx_pending
> MAX_TX_DESC_CNT
) ||
5066 (ering
->tx_pending
<= MAX_SKB_FRAGS
)) {
5070 if (netif_running(bp
->dev
)) {
5071 bnx2_netif_stop(bp
);
5072 bnx2_reset_chip(bp
, BNX2_DRV_MSG_CODE_RESET
);
5077 bnx2_set_rx_ring_size(bp
, ering
->rx_pending
);
5078 bp
->tx_ring_size
= ering
->tx_pending
;
5080 if (netif_running(bp
->dev
)) {
5083 rc
= bnx2_alloc_mem(bp
);
5087 bnx2_netif_start(bp
);
5094 bnx2_get_pauseparam(struct net_device
*dev
, struct ethtool_pauseparam
*epause
)
5096 struct bnx2
*bp
= netdev_priv(dev
);
5098 epause
->autoneg
= ((bp
->autoneg
& AUTONEG_FLOW_CTRL
) != 0);
5099 epause
->rx_pause
= ((bp
->flow_ctrl
& FLOW_CTRL_RX
) != 0);
5100 epause
->tx_pause
= ((bp
->flow_ctrl
& FLOW_CTRL_TX
) != 0);
5104 bnx2_set_pauseparam(struct net_device
*dev
, struct ethtool_pauseparam
*epause
)
5106 struct bnx2
*bp
= netdev_priv(dev
);
5108 bp
->req_flow_ctrl
= 0;
5109 if (epause
->rx_pause
)
5110 bp
->req_flow_ctrl
|= FLOW_CTRL_RX
;
5111 if (epause
->tx_pause
)
5112 bp
->req_flow_ctrl
|= FLOW_CTRL_TX
;
5114 if (epause
->autoneg
) {
5115 bp
->autoneg
|= AUTONEG_FLOW_CTRL
;
5118 bp
->autoneg
&= ~AUTONEG_FLOW_CTRL
;
5121 spin_lock_bh(&bp
->phy_lock
);
5125 spin_unlock_bh(&bp
->phy_lock
);
5131 bnx2_get_rx_csum(struct net_device
*dev
)
5133 struct bnx2
*bp
= netdev_priv(dev
);
5139 bnx2_set_rx_csum(struct net_device
*dev
, u32 data
)
5141 struct bnx2
*bp
= netdev_priv(dev
);
5148 bnx2_set_tso(struct net_device
*dev
, u32 data
)
5151 dev
->features
|= NETIF_F_TSO
| NETIF_F_TSO_ECN
;
5153 dev
->features
&= ~(NETIF_F_TSO
| NETIF_F_TSO_ECN
);
5157 #define BNX2_NUM_STATS 46
5160 char string
[ETH_GSTRING_LEN
];
5161 } bnx2_stats_str_arr
[BNX2_NUM_STATS
] = {
5163 { "rx_error_bytes" },
5165 { "tx_error_bytes" },
5166 { "rx_ucast_packets" },
5167 { "rx_mcast_packets" },
5168 { "rx_bcast_packets" },
5169 { "tx_ucast_packets" },
5170 { "tx_mcast_packets" },
5171 { "tx_bcast_packets" },
5172 { "tx_mac_errors" },
5173 { "tx_carrier_errors" },
5174 { "rx_crc_errors" },
5175 { "rx_align_errors" },
5176 { "tx_single_collisions" },
5177 { "tx_multi_collisions" },
5179 { "tx_excess_collisions" },
5180 { "tx_late_collisions" },
5181 { "tx_total_collisions" },
5184 { "rx_undersize_packets" },
5185 { "rx_oversize_packets" },
5186 { "rx_64_byte_packets" },
5187 { "rx_65_to_127_byte_packets" },
5188 { "rx_128_to_255_byte_packets" },
5189 { "rx_256_to_511_byte_packets" },
5190 { "rx_512_to_1023_byte_packets" },
5191 { "rx_1024_to_1522_byte_packets" },
5192 { "rx_1523_to_9022_byte_packets" },
5193 { "tx_64_byte_packets" },
5194 { "tx_65_to_127_byte_packets" },
5195 { "tx_128_to_255_byte_packets" },
5196 { "tx_256_to_511_byte_packets" },
5197 { "tx_512_to_1023_byte_packets" },
5198 { "tx_1024_to_1522_byte_packets" },
5199 { "tx_1523_to_9022_byte_packets" },
5200 { "rx_xon_frames" },
5201 { "rx_xoff_frames" },
5202 { "tx_xon_frames" },
5203 { "tx_xoff_frames" },
5204 { "rx_mac_ctrl_frames" },
5205 { "rx_filtered_packets" },
5207 { "rx_fw_discards" },
5210 #define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
5212 static const unsigned long bnx2_stats_offset_arr
[BNX2_NUM_STATS
] = {
5213 STATS_OFFSET32(stat_IfHCInOctets_hi
),
5214 STATS_OFFSET32(stat_IfHCInBadOctets_hi
),
5215 STATS_OFFSET32(stat_IfHCOutOctets_hi
),
5216 STATS_OFFSET32(stat_IfHCOutBadOctets_hi
),
5217 STATS_OFFSET32(stat_IfHCInUcastPkts_hi
),
5218 STATS_OFFSET32(stat_IfHCInMulticastPkts_hi
),
5219 STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi
),
5220 STATS_OFFSET32(stat_IfHCOutUcastPkts_hi
),
5221 STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi
),
5222 STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi
),
5223 STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors
),
5224 STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors
),
5225 STATS_OFFSET32(stat_Dot3StatsFCSErrors
),
5226 STATS_OFFSET32(stat_Dot3StatsAlignmentErrors
),
5227 STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames
),
5228 STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames
),
5229 STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions
),
5230 STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions
),
5231 STATS_OFFSET32(stat_Dot3StatsLateCollisions
),
5232 STATS_OFFSET32(stat_EtherStatsCollisions
),
5233 STATS_OFFSET32(stat_EtherStatsFragments
),
5234 STATS_OFFSET32(stat_EtherStatsJabbers
),
5235 STATS_OFFSET32(stat_EtherStatsUndersizePkts
),
5236 STATS_OFFSET32(stat_EtherStatsOverrsizePkts
),
5237 STATS_OFFSET32(stat_EtherStatsPktsRx64Octets
),
5238 STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets
),
5239 STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets
),
5240 STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets
),
5241 STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets
),
5242 STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets
),
5243 STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets
),
5244 STATS_OFFSET32(stat_EtherStatsPktsTx64Octets
),
5245 STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets
),
5246 STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets
),
5247 STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets
),
5248 STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets
),
5249 STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets
),
5250 STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets
),
5251 STATS_OFFSET32(stat_XonPauseFramesReceived
),
5252 STATS_OFFSET32(stat_XoffPauseFramesReceived
),
5253 STATS_OFFSET32(stat_OutXonSent
),
5254 STATS_OFFSET32(stat_OutXoffSent
),
5255 STATS_OFFSET32(stat_MacControlFramesReceived
),
5256 STATS_OFFSET32(stat_IfInFramesL2FilterDiscards
),
5257 STATS_OFFSET32(stat_IfInMBUFDiscards
),
5258 STATS_OFFSET32(stat_FwRxDrop
),
5261 /* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
5262 * skipped because of errata.
5264 static u8 bnx2_5706_stats_len_arr
[BNX2_NUM_STATS
] = {
5265 8,0,8,8,8,8,8,8,8,8,
5266 4,0,4,4,4,4,4,4,4,4,
5267 4,4,4,4,4,4,4,4,4,4,
5268 4,4,4,4,4,4,4,4,4,4,
5272 static u8 bnx2_5708_stats_len_arr
[BNX2_NUM_STATS
] = {
5273 8,0,8,8,8,8,8,8,8,8,
5274 4,4,4,4,4,4,4,4,4,4,
5275 4,4,4,4,4,4,4,4,4,4,
5276 4,4,4,4,4,4,4,4,4,4,
5280 #define BNX2_NUM_TESTS 6
5283 char string
[ETH_GSTRING_LEN
];
5284 } bnx2_tests_str_arr
[BNX2_NUM_TESTS
] = {
5285 { "register_test (offline)" },
5286 { "memory_test (offline)" },
5287 { "loopback_test (offline)" },
5288 { "nvram_test (online)" },
5289 { "interrupt_test (online)" },
5290 { "link_test (online)" },
5294 bnx2_self_test_count(struct net_device
*dev
)
5296 return BNX2_NUM_TESTS
;
5300 bnx2_self_test(struct net_device
*dev
, struct ethtool_test
*etest
, u64
*buf
)
5302 struct bnx2
*bp
= netdev_priv(dev
);
5304 memset(buf
, 0, sizeof(u64
) * BNX2_NUM_TESTS
);
5305 if (etest
->flags
& ETH_TEST_FL_OFFLINE
) {
5308 bnx2_netif_stop(bp
);
5309 bnx2_reset_chip(bp
, BNX2_DRV_MSG_CODE_DIAG
);
5312 if (bnx2_test_registers(bp
) != 0) {
5314 etest
->flags
|= ETH_TEST_FL_FAILED
;
5316 if (bnx2_test_memory(bp
) != 0) {
5318 etest
->flags
|= ETH_TEST_FL_FAILED
;
5320 if ((buf
[2] = bnx2_test_loopback(bp
)) != 0)
5321 etest
->flags
|= ETH_TEST_FL_FAILED
;
5323 if (!netif_running(bp
->dev
)) {
5324 bnx2_reset_chip(bp
, BNX2_DRV_MSG_CODE_RESET
);
5328 bnx2_netif_start(bp
);
5331 /* wait for link up */
5332 for (i
= 0; i
< 7; i
++) {
5335 msleep_interruptible(1000);
5339 if (bnx2_test_nvram(bp
) != 0) {
5341 etest
->flags
|= ETH_TEST_FL_FAILED
;
5343 if (bnx2_test_intr(bp
) != 0) {
5345 etest
->flags
|= ETH_TEST_FL_FAILED
;
5348 if (bnx2_test_link(bp
) != 0) {
5350 etest
->flags
|= ETH_TEST_FL_FAILED
;
5356 bnx2_get_strings(struct net_device
*dev
, u32 stringset
, u8
*buf
)
5358 switch (stringset
) {
5360 memcpy(buf
, bnx2_stats_str_arr
,
5361 sizeof(bnx2_stats_str_arr
));
5364 memcpy(buf
, bnx2_tests_str_arr
,
5365 sizeof(bnx2_tests_str_arr
));
5371 bnx2_get_stats_count(struct net_device
*dev
)
5373 return BNX2_NUM_STATS
;
5377 bnx2_get_ethtool_stats(struct net_device
*dev
,
5378 struct ethtool_stats
*stats
, u64
*buf
)
5380 struct bnx2
*bp
= netdev_priv(dev
);
5382 u32
*hw_stats
= (u32
*) bp
->stats_blk
;
5383 u8
*stats_len_arr
= NULL
;
5385 if (hw_stats
== NULL
) {
5386 memset(buf
, 0, sizeof(u64
) * BNX2_NUM_STATS
);
5390 if ((CHIP_ID(bp
) == CHIP_ID_5706_A0
) ||
5391 (CHIP_ID(bp
) == CHIP_ID_5706_A1
) ||
5392 (CHIP_ID(bp
) == CHIP_ID_5706_A2
) ||
5393 (CHIP_ID(bp
) == CHIP_ID_5708_A0
))
5394 stats_len_arr
= bnx2_5706_stats_len_arr
;
5396 stats_len_arr
= bnx2_5708_stats_len_arr
;
5398 for (i
= 0; i
< BNX2_NUM_STATS
; i
++) {
5399 if (stats_len_arr
[i
] == 0) {
5400 /* skip this counter */
5404 if (stats_len_arr
[i
] == 4) {
5405 /* 4-byte counter */
5407 *(hw_stats
+ bnx2_stats_offset_arr
[i
]);
5410 /* 8-byte counter */
5411 buf
[i
] = (((u64
) *(hw_stats
+
5412 bnx2_stats_offset_arr
[i
])) << 32) +
5413 *(hw_stats
+ bnx2_stats_offset_arr
[i
] + 1);
5418 bnx2_phys_id(struct net_device
*dev
, u32 data
)
5420 struct bnx2
*bp
= netdev_priv(dev
);
5427 save
= REG_RD(bp
, BNX2_MISC_CFG
);
5428 REG_WR(bp
, BNX2_MISC_CFG
, BNX2_MISC_CFG_LEDMODE_MAC
);
5430 for (i
= 0; i
< (data
* 2); i
++) {
5432 REG_WR(bp
, BNX2_EMAC_LED
, BNX2_EMAC_LED_OVERRIDE
);
5435 REG_WR(bp
, BNX2_EMAC_LED
, BNX2_EMAC_LED_OVERRIDE
|
5436 BNX2_EMAC_LED_1000MB_OVERRIDE
|
5437 BNX2_EMAC_LED_100MB_OVERRIDE
|
5438 BNX2_EMAC_LED_10MB_OVERRIDE
|
5439 BNX2_EMAC_LED_TRAFFIC_OVERRIDE
|
5440 BNX2_EMAC_LED_TRAFFIC
);
5442 msleep_interruptible(500);
5443 if (signal_pending(current
))
5446 REG_WR(bp
, BNX2_EMAC_LED
, 0);
5447 REG_WR(bp
, BNX2_MISC_CFG
, save
);
5451 static const struct ethtool_ops bnx2_ethtool_ops
= {
5452 .get_settings
= bnx2_get_settings
,
5453 .set_settings
= bnx2_set_settings
,
5454 .get_drvinfo
= bnx2_get_drvinfo
,
5455 .get_regs_len
= bnx2_get_regs_len
,
5456 .get_regs
= bnx2_get_regs
,
5457 .get_wol
= bnx2_get_wol
,
5458 .set_wol
= bnx2_set_wol
,
5459 .nway_reset
= bnx2_nway_reset
,
5460 .get_link
= ethtool_op_get_link
,
5461 .get_eeprom_len
= bnx2_get_eeprom_len
,
5462 .get_eeprom
= bnx2_get_eeprom
,
5463 .set_eeprom
= bnx2_set_eeprom
,
5464 .get_coalesce
= bnx2_get_coalesce
,
5465 .set_coalesce
= bnx2_set_coalesce
,
5466 .get_ringparam
= bnx2_get_ringparam
,
5467 .set_ringparam
= bnx2_set_ringparam
,
5468 .get_pauseparam
= bnx2_get_pauseparam
,
5469 .set_pauseparam
= bnx2_set_pauseparam
,
5470 .get_rx_csum
= bnx2_get_rx_csum
,
5471 .set_rx_csum
= bnx2_set_rx_csum
,
5472 .get_tx_csum
= ethtool_op_get_tx_csum
,
5473 .set_tx_csum
= ethtool_op_set_tx_csum
,
5474 .get_sg
= ethtool_op_get_sg
,
5475 .set_sg
= ethtool_op_set_sg
,
5477 .get_tso
= ethtool_op_get_tso
,
5478 .set_tso
= bnx2_set_tso
,
5480 .self_test_count
= bnx2_self_test_count
,
5481 .self_test
= bnx2_self_test
,
5482 .get_strings
= bnx2_get_strings
,
5483 .phys_id
= bnx2_phys_id
,
5484 .get_stats_count
= bnx2_get_stats_count
,
5485 .get_ethtool_stats
= bnx2_get_ethtool_stats
,
5486 .get_perm_addr
= ethtool_op_get_perm_addr
,
5489 /* Called with rtnl_lock */
5491 bnx2_ioctl(struct net_device
*dev
, struct ifreq
*ifr
, int cmd
)
5493 struct mii_ioctl_data
*data
= if_mii(ifr
);
5494 struct bnx2
*bp
= netdev_priv(dev
);
5499 data
->phy_id
= bp
->phy_addr
;
5505 spin_lock_bh(&bp
->phy_lock
);
5506 err
= bnx2_read_phy(bp
, data
->reg_num
& 0x1f, &mii_regval
);
5507 spin_unlock_bh(&bp
->phy_lock
);
5509 data
->val_out
= mii_regval
;
5515 if (!capable(CAP_NET_ADMIN
))
5518 spin_lock_bh(&bp
->phy_lock
);
5519 err
= bnx2_write_phy(bp
, data
->reg_num
& 0x1f, data
->val_in
);
5520 spin_unlock_bh(&bp
->phy_lock
);
5531 /* Called with rtnl_lock */
5533 bnx2_change_mac_addr(struct net_device
*dev
, void *p
)
5535 struct sockaddr
*addr
= p
;
5536 struct bnx2
*bp
= netdev_priv(dev
);
5538 if (!is_valid_ether_addr(addr
->sa_data
))
5541 memcpy(dev
->dev_addr
, addr
->sa_data
, dev
->addr_len
);
5542 if (netif_running(dev
))
5543 bnx2_set_mac_addr(bp
);
5548 /* Called with rtnl_lock */
5550 bnx2_change_mtu(struct net_device
*dev
, int new_mtu
)
5552 struct bnx2
*bp
= netdev_priv(dev
);
5554 if (((new_mtu
+ ETH_HLEN
) > MAX_ETHERNET_JUMBO_PACKET_SIZE
) ||
5555 ((new_mtu
+ ETH_HLEN
) < MIN_ETHERNET_PACKET_SIZE
))
5559 if (netif_running(dev
)) {
5560 bnx2_netif_stop(bp
);
5564 bnx2_netif_start(bp
);
5569 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
5571 poll_bnx2(struct net_device
*dev
)
5573 struct bnx2
*bp
= netdev_priv(dev
);
5575 disable_irq(bp
->pdev
->irq
);
5576 bnx2_interrupt(bp
->pdev
->irq
, dev
);
5577 enable_irq(bp
->pdev
->irq
);
5581 static int __devinit
5582 bnx2_init_board(struct pci_dev
*pdev
, struct net_device
*dev
)
5585 unsigned long mem_len
;
5589 SET_MODULE_OWNER(dev
);
5590 SET_NETDEV_DEV(dev
, &pdev
->dev
);
5591 bp
= netdev_priv(dev
);
5596 /* enable device (incl. PCI PM wakeup), and bus-mastering */
5597 rc
= pci_enable_device(pdev
);
5599 dev_err(&pdev
->dev
, "Cannot enable PCI device, aborting.");
5603 if (!(pci_resource_flags(pdev
, 0) & IORESOURCE_MEM
)) {
5605 "Cannot find PCI device base address, aborting.\n");
5607 goto err_out_disable
;
5610 rc
= pci_request_regions(pdev
, DRV_MODULE_NAME
);
5612 dev_err(&pdev
->dev
, "Cannot obtain PCI resources, aborting.\n");
5613 goto err_out_disable
;
5616 pci_set_master(pdev
);
5618 bp
->pm_cap
= pci_find_capability(pdev
, PCI_CAP_ID_PM
);
5619 if (bp
->pm_cap
== 0) {
5621 "Cannot find power management capability, aborting.\n");
5623 goto err_out_release
;
5626 bp
->pcix_cap
= pci_find_capability(pdev
, PCI_CAP_ID_PCIX
);
5627 if (bp
->pcix_cap
== 0) {
5628 dev_err(&pdev
->dev
, "Cannot find PCIX capability, aborting.\n");
5630 goto err_out_release
;
5633 if (pci_set_dma_mask(pdev
, DMA_64BIT_MASK
) == 0) {
5634 bp
->flags
|= USING_DAC_FLAG
;
5635 if (pci_set_consistent_dma_mask(pdev
, DMA_64BIT_MASK
) != 0) {
5637 "pci_set_consistent_dma_mask failed, aborting.\n");
5639 goto err_out_release
;
5642 else if (pci_set_dma_mask(pdev
, DMA_32BIT_MASK
) != 0) {
5643 dev_err(&pdev
->dev
, "System does not support DMA, aborting.\n");
5645 goto err_out_release
;
5651 spin_lock_init(&bp
->phy_lock
);
5652 INIT_WORK(&bp
->reset_task
, bnx2_reset_task
, bp
);
5654 dev
->base_addr
= dev
->mem_start
= pci_resource_start(pdev
, 0);
5655 mem_len
= MB_GET_CID_ADDR(17);
5656 dev
->mem_end
= dev
->mem_start
+ mem_len
;
5657 dev
->irq
= pdev
->irq
;
5659 bp
->regview
= ioremap_nocache(dev
->base_addr
, mem_len
);
5662 dev_err(&pdev
->dev
, "Cannot map register space, aborting.\n");
5664 goto err_out_release
;
5667 /* Configure byte swap and enable write to the reg_window registers.
5668 * Rely on CPU to do target byte swapping on big endian systems
5669 * The chip's target access swapping will not swap all accesses
5671 pci_write_config_dword(bp
->pdev
, BNX2_PCICFG_MISC_CONFIG
,
5672 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA
|
5673 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP
);
5675 bnx2_set_power_state(bp
, PCI_D0
);
5677 bp
->chip_id
= REG_RD(bp
, BNX2_MISC_ID
);
5679 /* Get bus information. */
5680 reg
= REG_RD(bp
, BNX2_PCICFG_MISC_STATUS
);
5681 if (reg
& BNX2_PCICFG_MISC_STATUS_PCIX_DET
) {
5684 bp
->flags
|= PCIX_FLAG
;
5686 clkreg
= REG_RD(bp
, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS
);
5688 clkreg
&= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET
;
5690 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ
:
5691 bp
->bus_speed_mhz
= 133;
5694 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ
:
5695 bp
->bus_speed_mhz
= 100;
5698 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ
:
5699 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ
:
5700 bp
->bus_speed_mhz
= 66;
5703 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ
:
5704 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ
:
5705 bp
->bus_speed_mhz
= 50;
5708 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW
:
5709 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ
:
5710 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ
:
5711 bp
->bus_speed_mhz
= 33;
5716 if (reg
& BNX2_PCICFG_MISC_STATUS_M66EN
)
5717 bp
->bus_speed_mhz
= 66;
5719 bp
->bus_speed_mhz
= 33;
5722 if (reg
& BNX2_PCICFG_MISC_STATUS_32BIT_DET
)
5723 bp
->flags
|= PCI_32BIT_FLAG
;
5725 /* 5706A0 may falsely detect SERR and PERR. */
5726 if (CHIP_ID(bp
) == CHIP_ID_5706_A0
) {
5727 reg
= REG_RD(bp
, PCI_COMMAND
);
5728 reg
&= ~(PCI_COMMAND_SERR
| PCI_COMMAND_PARITY
);
5729 REG_WR(bp
, PCI_COMMAND
, reg
);
5731 else if ((CHIP_ID(bp
) == CHIP_ID_5706_A1
) &&
5732 !(bp
->flags
& PCIX_FLAG
)) {
5735 "5706 A1 can only be used in a PCIX bus, aborting.\n");
5739 bnx2_init_nvram(bp
);
5741 reg
= REG_RD_IND(bp
, BNX2_SHM_HDR_SIGNATURE
);
5743 if ((reg
& BNX2_SHM_HDR_SIGNATURE_SIG_MASK
) ==
5744 BNX2_SHM_HDR_SIGNATURE_SIG
)
5745 bp
->shmem_base
= REG_RD_IND(bp
, BNX2_SHM_HDR_ADDR_0
);
5747 bp
->shmem_base
= HOST_VIEW_SHMEM_BASE
;
5749 /* Get the permanent MAC address. First we need to make sure the
5750 * firmware is actually running.
5752 reg
= REG_RD_IND(bp
, bp
->shmem_base
+ BNX2_DEV_INFO_SIGNATURE
);
5754 if ((reg
& BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK
) !=
5755 BNX2_DEV_INFO_SIGNATURE_MAGIC
) {
5756 dev_err(&pdev
->dev
, "Firmware not running, aborting.\n");
5761 bp
->fw_ver
= REG_RD_IND(bp
, bp
->shmem_base
+ BNX2_DEV_INFO_BC_REV
);
5763 reg
= REG_RD_IND(bp
, bp
->shmem_base
+ BNX2_PORT_HW_CFG_MAC_UPPER
);
5764 bp
->mac_addr
[0] = (u8
) (reg
>> 8);
5765 bp
->mac_addr
[1] = (u8
) reg
;
5767 reg
= REG_RD_IND(bp
, bp
->shmem_base
+ BNX2_PORT_HW_CFG_MAC_LOWER
);
5768 bp
->mac_addr
[2] = (u8
) (reg
>> 24);
5769 bp
->mac_addr
[3] = (u8
) (reg
>> 16);
5770 bp
->mac_addr
[4] = (u8
) (reg
>> 8);
5771 bp
->mac_addr
[5] = (u8
) reg
;
5773 bp
->tx_ring_size
= MAX_TX_DESC_CNT
;
5774 bnx2_set_rx_ring_size(bp
, 255);
5778 bp
->rx_offset
= sizeof(struct l2_fhdr
) + 2;
5780 bp
->tx_quick_cons_trip_int
= 20;
5781 bp
->tx_quick_cons_trip
= 20;
5782 bp
->tx_ticks_int
= 80;
5785 bp
->rx_quick_cons_trip_int
= 6;
5786 bp
->rx_quick_cons_trip
= 6;
5787 bp
->rx_ticks_int
= 18;
5790 bp
->stats_ticks
= 1000000 & 0xffff00;
5792 bp
->timer_interval
= HZ
;
5793 bp
->current_interval
= HZ
;
5797 /* Disable WOL support if we are running on a SERDES chip. */
5798 if (CHIP_BOND_ID(bp
) & CHIP_BOND_ID_SERDES_BIT
) {
5799 bp
->phy_flags
|= PHY_SERDES_FLAG
;
5800 bp
->flags
|= NO_WOL_FLAG
;
5801 if (CHIP_NUM(bp
) == CHIP_NUM_5708
) {
5803 reg
= REG_RD_IND(bp
, bp
->shmem_base
+
5804 BNX2_SHARED_HW_CFG_CONFIG
);
5805 if (reg
& BNX2_SHARED_HW_CFG_PHY_2_5G
)
5806 bp
->phy_flags
|= PHY_2_5G_CAPABLE_FLAG
;
5810 if ((CHIP_ID(bp
) == CHIP_ID_5708_A0
) ||
5811 (CHIP_ID(bp
) == CHIP_ID_5708_B0
) ||
5812 (CHIP_ID(bp
) == CHIP_ID_5708_B1
))
5813 bp
->flags
|= NO_WOL_FLAG
;
5815 if (CHIP_ID(bp
) == CHIP_ID_5706_A0
) {
5816 bp
->tx_quick_cons_trip_int
=
5817 bp
->tx_quick_cons_trip
;
5818 bp
->tx_ticks_int
= bp
->tx_ticks
;
5819 bp
->rx_quick_cons_trip_int
=
5820 bp
->rx_quick_cons_trip
;
5821 bp
->rx_ticks_int
= bp
->rx_ticks
;
5822 bp
->comp_prod_trip_int
= bp
->comp_prod_trip
;
5823 bp
->com_ticks_int
= bp
->com_ticks
;
5824 bp
->cmd_ticks_int
= bp
->cmd_ticks
;
5827 /* Disable MSI on 5706 if AMD 8132 bridge is found.
5829 * MSI is defined to be 32-bit write. The 5706 does 64-bit MSI writes
5830 * with byte enables disabled on the unused 32-bit word. This is legal
5831 * but causes problems on the AMD 8132 which will eventually stop
5832 * responding after a while.
5834 * AMD believes this incompatibility is unique to the 5706, and
5835 * prefers to locally disable MSI rather than globally disabling it
5836 * using pci_msi_quirk.
5838 if (CHIP_NUM(bp
) == CHIP_NUM_5706
&& disable_msi
== 0) {
5839 struct pci_dev
*amd_8132
= NULL
;
5841 while ((amd_8132
= pci_get_device(PCI_VENDOR_ID_AMD
,
5842 PCI_DEVICE_ID_AMD_8132_BRIDGE
,
5846 pci_read_config_byte(amd_8132
, PCI_REVISION_ID
, &rev
);
5847 if (rev
>= 0x10 && rev
<= 0x13) {
5849 pci_dev_put(amd_8132
);
5855 bp
->autoneg
= AUTONEG_SPEED
| AUTONEG_FLOW_CTRL
;
5856 bp
->req_line_speed
= 0;
5857 if (bp
->phy_flags
& PHY_SERDES_FLAG
) {
5858 bp
->advertising
= ETHTOOL_ALL_FIBRE_SPEED
| ADVERTISED_Autoneg
;
5860 reg
= REG_RD_IND(bp
, bp
->shmem_base
+ BNX2_PORT_HW_CFG_CONFIG
);
5861 reg
&= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK
;
5862 if (reg
== BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G
) {
5864 bp
->req_line_speed
= bp
->line_speed
= SPEED_1000
;
5865 bp
->req_duplex
= DUPLEX_FULL
;
5869 bp
->advertising
= ETHTOOL_ALL_COPPER_SPEED
| ADVERTISED_Autoneg
;
5872 bp
->req_flow_ctrl
= FLOW_CTRL_RX
| FLOW_CTRL_TX
;
5874 init_timer(&bp
->timer
);
5875 bp
->timer
.expires
= RUN_AT(bp
->timer_interval
);
5876 bp
->timer
.data
= (unsigned long) bp
;
5877 bp
->timer
.function
= bnx2_timer
;
5883 iounmap(bp
->regview
);
5888 pci_release_regions(pdev
);
5891 pci_disable_device(pdev
);
5892 pci_set_drvdata(pdev
, NULL
);
5898 static int __devinit
5899 bnx2_init_one(struct pci_dev
*pdev
, const struct pci_device_id
*ent
)
5901 static int version_printed
= 0;
5902 struct net_device
*dev
= NULL
;
5906 if (version_printed
++ == 0)
5907 printk(KERN_INFO
"%s", version
);
5909 /* dev zeroed in init_etherdev */
5910 dev
= alloc_etherdev(sizeof(*bp
));
5915 rc
= bnx2_init_board(pdev
, dev
);
5921 dev
->open
= bnx2_open
;
5922 dev
->hard_start_xmit
= bnx2_start_xmit
;
5923 dev
->stop
= bnx2_close
;
5924 dev
->get_stats
= bnx2_get_stats
;
5925 dev
->set_multicast_list
= bnx2_set_rx_mode
;
5926 dev
->do_ioctl
= bnx2_ioctl
;
5927 dev
->set_mac_address
= bnx2_change_mac_addr
;
5928 dev
->change_mtu
= bnx2_change_mtu
;
5929 dev
->tx_timeout
= bnx2_tx_timeout
;
5930 dev
->watchdog_timeo
= TX_TIMEOUT
;
5932 dev
->vlan_rx_register
= bnx2_vlan_rx_register
;
5933 dev
->vlan_rx_kill_vid
= bnx2_vlan_rx_kill_vid
;
5935 dev
->poll
= bnx2_poll
;
5936 dev
->ethtool_ops
= &bnx2_ethtool_ops
;
5939 bp
= netdev_priv(dev
);
5941 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
5942 dev
->poll_controller
= poll_bnx2
;
5945 if ((rc
= register_netdev(dev
))) {
5946 dev_err(&pdev
->dev
, "Cannot register net device\n");
5948 iounmap(bp
->regview
);
5949 pci_release_regions(pdev
);
5950 pci_disable_device(pdev
);
5951 pci_set_drvdata(pdev
, NULL
);
5956 pci_set_drvdata(pdev
, dev
);
5958 memcpy(dev
->dev_addr
, bp
->mac_addr
, 6);
5959 memcpy(dev
->perm_addr
, bp
->mac_addr
, 6);
5960 bp
->name
= board_info
[ent
->driver_data
].name
,
5961 printk(KERN_INFO
"%s: %s (%c%d) PCI%s %s %dMHz found at mem %lx, "
5965 ((CHIP_ID(bp
) & 0xf000) >> 12) + 'A',
5966 ((CHIP_ID(bp
) & 0x0ff0) >> 4),
5967 ((bp
->flags
& PCIX_FLAG
) ? "-X" : ""),
5968 ((bp
->flags
& PCI_32BIT_FLAG
) ? "32-bit" : "64-bit"),
5973 printk("node addr ");
5974 for (i
= 0; i
< 6; i
++)
5975 printk("%2.2x", dev
->dev_addr
[i
]);
5978 dev
->features
|= NETIF_F_SG
;
5979 if (bp
->flags
& USING_DAC_FLAG
)
5980 dev
->features
|= NETIF_F_HIGHDMA
;
5981 dev
->features
|= NETIF_F_IP_CSUM
;
5983 dev
->features
|= NETIF_F_HW_VLAN_TX
| NETIF_F_HW_VLAN_RX
;
5986 dev
->features
|= NETIF_F_TSO
| NETIF_F_TSO_ECN
;
5989 netif_carrier_off(bp
->dev
);
5994 static void __devexit
5995 bnx2_remove_one(struct pci_dev
*pdev
)
5997 struct net_device
*dev
= pci_get_drvdata(pdev
);
5998 struct bnx2
*bp
= netdev_priv(dev
);
6000 flush_scheduled_work();
6002 unregister_netdev(dev
);
6005 iounmap(bp
->regview
);
6008 pci_release_regions(pdev
);
6009 pci_disable_device(pdev
);
6010 pci_set_drvdata(pdev
, NULL
);
6014 bnx2_suspend(struct pci_dev
*pdev
, pm_message_t state
)
6016 struct net_device
*dev
= pci_get_drvdata(pdev
);
6017 struct bnx2
*bp
= netdev_priv(dev
);
6020 if (!netif_running(dev
))
6023 flush_scheduled_work();
6024 bnx2_netif_stop(bp
);
6025 netif_device_detach(dev
);
6026 del_timer_sync(&bp
->timer
);
6027 if (bp
->flags
& NO_WOL_FLAG
)
6028 reset_code
= BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN
;
6030 reset_code
= BNX2_DRV_MSG_CODE_SUSPEND_WOL
;
6032 reset_code
= BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL
;
6033 bnx2_reset_chip(bp
, reset_code
);
6035 bnx2_set_power_state(bp
, pci_choose_state(pdev
, state
));
6040 bnx2_resume(struct pci_dev
*pdev
)
6042 struct net_device
*dev
= pci_get_drvdata(pdev
);
6043 struct bnx2
*bp
= netdev_priv(dev
);
6045 if (!netif_running(dev
))
6048 bnx2_set_power_state(bp
, PCI_D0
);
6049 netif_device_attach(dev
);
6051 bnx2_netif_start(bp
);
6055 static struct pci_driver bnx2_pci_driver
= {
6056 .name
= DRV_MODULE_NAME
,
6057 .id_table
= bnx2_pci_tbl
,
6058 .probe
= bnx2_init_one
,
6059 .remove
= __devexit_p(bnx2_remove_one
),
6060 .suspend
= bnx2_suspend
,
6061 .resume
= bnx2_resume
,
6064 static int __init
bnx2_init(void)
6066 return pci_register_driver(&bnx2_pci_driver
);
6069 static void __exit
bnx2_cleanup(void)
6071 pci_unregister_driver(&bnx2_pci_driver
);
6074 module_init(bnx2_init
);
6075 module_exit(bnx2_cleanup
);