1 /* bnx2x.c: Broadcom Everest network driver.
3 * Copyright (c) 2007-2008 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
18 /* define this to make the driver freeze on error
19 * to allow getting debug info
20 * (you will need to reboot afterwards)
22 /*#define BNX2X_STOP_ON_ERROR*/
24 #include <linux/module.h>
25 #include <linux/moduleparam.h>
26 #include <linux/kernel.h>
27 #include <linux/device.h> /* for dev_info() */
28 #include <linux/timer.h>
29 #include <linux/errno.h>
30 #include <linux/ioport.h>
31 #include <linux/slab.h>
32 #include <linux/vmalloc.h>
33 #include <linux/interrupt.h>
34 #include <linux/pci.h>
35 #include <linux/init.h>
36 #include <linux/netdevice.h>
37 #include <linux/etherdevice.h>
38 #include <linux/skbuff.h>
39 #include <linux/dma-mapping.h>
40 #include <linux/bitops.h>
41 #include <linux/irq.h>
42 #include <linux/delay.h>
43 #include <asm/byteorder.h>
44 #include <linux/time.h>
45 #include <linux/ethtool.h>
46 #include <linux/mii.h>
47 #ifdef NETIF_F_HW_VLAN_TX
48 #include <linux/if_vlan.h>
53 #include <net/checksum.h>
54 #include <linux/workqueue.h>
55 #include <linux/crc32.h>
56 #include <linux/prefetch.h>
57 #include <linux/zlib.h>
58 #include <linux/version.h>
61 #include "bnx2x_reg.h"
62 #include "bnx2x_fw_defs.h"
63 #include "bnx2x_hsi.h"
64 #include "bnx2x_link.h"
66 #include "bnx2x_init.h"
68 #define DRV_MODULE_VERSION "1.42.4"
69 #define DRV_MODULE_RELDATE "2008/4/9"
70 #define BNX2X_BC_VER 0x040200
72 /* Time in jiffies before concluding the transmitter is hung. */
73 #define TX_TIMEOUT (5*HZ)
75 static char version
[] __devinitdata
=
76 "Broadcom NetXtreme II 5771X 10Gigabit Ethernet Driver "
77 DRV_MODULE_NAME
" " DRV_MODULE_VERSION
" (" DRV_MODULE_RELDATE
")\n";
79 MODULE_AUTHOR("Eliezer Tamir");
80 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710 Driver");
81 MODULE_LICENSE("GPL");
82 MODULE_VERSION(DRV_MODULE_VERSION
);
91 module_param(use_inta
, int, 0);
92 module_param(poll
, int, 0);
93 module_param(onefunc
, int, 0);
94 module_param(debug
, int, 0);
95 MODULE_PARM_DESC(use_inta
, "use INT#A instead of MSI-X");
96 MODULE_PARM_DESC(poll
, "use polling (for debug)");
97 MODULE_PARM_DESC(onefunc
, "enable only first function");
98 MODULE_PARM_DESC(nomcp
, "ignore management CPU (Implies onefunc)");
99 MODULE_PARM_DESC(debug
, "default debug msglevel");
102 module_param(use_multi
, int, 0);
103 MODULE_PARM_DESC(use_multi
, "use per-CPU queues");
106 enum bnx2x_board_type
{
110 /* indexed by board_t, above */
113 } board_info
[] __devinitdata
= {
114 { "Broadcom NetXtreme II BCM57710 XGb" }
117 static const struct pci_device_id bnx2x_pci_tbl
[] = {
118 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_NX2_57710
,
119 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, BCM57710
},
123 MODULE_DEVICE_TABLE(pci
, bnx2x_pci_tbl
);
125 /****************************************************************************
126 * General service functions
127 ****************************************************************************/
130 * locking is done by mcp
132 static void bnx2x_reg_wr_ind(struct bnx2x
*bp
, u32 addr
, u32 val
)
134 pci_write_config_dword(bp
->pdev
, PCICFG_GRC_ADDRESS
, addr
);
135 pci_write_config_dword(bp
->pdev
, PCICFG_GRC_DATA
, val
);
136 pci_write_config_dword(bp
->pdev
, PCICFG_GRC_ADDRESS
,
137 PCICFG_VENDOR_ID_OFFSET
);
140 static u32
bnx2x_reg_rd_ind(struct bnx2x
*bp
, u32 addr
)
144 pci_write_config_dword(bp
->pdev
, PCICFG_GRC_ADDRESS
, addr
);
145 pci_read_config_dword(bp
->pdev
, PCICFG_GRC_DATA
, &val
);
146 pci_write_config_dword(bp
->pdev
, PCICFG_GRC_ADDRESS
,
147 PCICFG_VENDOR_ID_OFFSET
);
152 static const u32 dmae_reg_go_c
[] = {
153 DMAE_REG_GO_C0
, DMAE_REG_GO_C1
, DMAE_REG_GO_C2
, DMAE_REG_GO_C3
,
154 DMAE_REG_GO_C4
, DMAE_REG_GO_C5
, DMAE_REG_GO_C6
, DMAE_REG_GO_C7
,
155 DMAE_REG_GO_C8
, DMAE_REG_GO_C9
, DMAE_REG_GO_C10
, DMAE_REG_GO_C11
,
156 DMAE_REG_GO_C12
, DMAE_REG_GO_C13
, DMAE_REG_GO_C14
, DMAE_REG_GO_C15
159 /* copy command into DMAE command memory and set DMAE command go */
160 static void bnx2x_post_dmae(struct bnx2x
*bp
, struct dmae_command
*dmae
,
166 cmd_offset
= (DMAE_REG_CMD_MEM
+ sizeof(struct dmae_command
) * idx
);
167 for (i
= 0; i
< (sizeof(struct dmae_command
)/4); i
++) {
168 REG_WR(bp
, cmd_offset
+ i
*4, *(((u32
*)dmae
) + i
));
170 DP(BNX2X_MSG_OFF
, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
171 idx
, i
, cmd_offset
+ i
*4, *(((u32
*)dmae
) + i
));
173 REG_WR(bp
, dmae_reg_go_c
[idx
], 1);
176 void bnx2x_write_dmae(struct bnx2x
*bp
, dma_addr_t dma_addr
, u32 dst_addr
,
179 struct dmae_command
*dmae
= &bp
->init_dmae
;
180 u32
*wb_comp
= bnx2x_sp(bp
, wb_comp
);
183 if (!bp
->dmae_ready
) {
184 u32
*data
= bnx2x_sp(bp
, wb_data
[0]);
186 DP(BNX2X_MSG_OFF
, "DMAE is not ready (dst_addr %08x len32 %d)"
187 " using indirect\n", dst_addr
, len32
);
188 bnx2x_init_ind_wr(bp
, dst_addr
, data
, len32
);
192 mutex_lock(&bp
->dmae_mutex
);
194 memset(dmae
, 0, sizeof(struct dmae_command
));
196 dmae
->opcode
= (DMAE_CMD_SRC_PCI
| DMAE_CMD_DST_GRC
|
197 DMAE_CMD_C_DST_PCI
| DMAE_CMD_C_ENABLE
|
198 DMAE_CMD_SRC_RESET
| DMAE_CMD_DST_RESET
|
200 DMAE_CMD_ENDIANITY_B_DW_SWAP
|
202 DMAE_CMD_ENDIANITY_DW_SWAP
|
204 (bp
->port
? DMAE_CMD_PORT_1
: DMAE_CMD_PORT_0
));
205 dmae
->src_addr_lo
= U64_LO(dma_addr
);
206 dmae
->src_addr_hi
= U64_HI(dma_addr
);
207 dmae
->dst_addr_lo
= dst_addr
>> 2;
208 dmae
->dst_addr_hi
= 0;
210 dmae
->comp_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, wb_comp
));
211 dmae
->comp_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, wb_comp
));
212 dmae
->comp_val
= DMAE_COMP_VAL
;
214 DP(BNX2X_MSG_OFF
, "dmae: opcode 0x%08x\n"
215 DP_LEVEL
"src_addr [%x:%08x] len [%d *4] "
216 "dst_addr [%x:%08x (%08x)]\n"
217 DP_LEVEL
"comp_addr [%x:%08x] comp_val 0x%08x\n",
218 dmae
->opcode
, dmae
->src_addr_hi
, dmae
->src_addr_lo
,
219 dmae
->len
, dmae
->dst_addr_hi
, dmae
->dst_addr_lo
, dst_addr
,
220 dmae
->comp_addr_hi
, dmae
->comp_addr_lo
, dmae
->comp_val
);
221 DP(BNX2X_MSG_OFF
, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
222 bp
->slowpath
->wb_data
[0], bp
->slowpath
->wb_data
[1],
223 bp
->slowpath
->wb_data
[2], bp
->slowpath
->wb_data
[3]);
227 bnx2x_post_dmae(bp
, dmae
, (bp
->port
)*MAX_DMAE_C_PER_PORT
);
231 while (*wb_comp
!= DMAE_COMP_VAL
) {
232 DP(BNX2X_MSG_OFF
, "wb_comp 0x%08x\n", *wb_comp
);
234 /* adjust delay for emulation/FPGA */
235 if (CHIP_REV_IS_SLOW(bp
))
241 BNX2X_ERR("dmae timeout!\n");
247 mutex_unlock(&bp
->dmae_mutex
);
250 void bnx2x_read_dmae(struct bnx2x
*bp
, u32 src_addr
, u32 len32
)
252 struct dmae_command
*dmae
= &bp
->init_dmae
;
253 u32
*wb_comp
= bnx2x_sp(bp
, wb_comp
);
256 if (!bp
->dmae_ready
) {
257 u32
*data
= bnx2x_sp(bp
, wb_data
[0]);
260 DP(BNX2X_MSG_OFF
, "DMAE is not ready (src_addr %08x len32 %d)"
261 " using indirect\n", src_addr
, len32
);
262 for (i
= 0; i
< len32
; i
++)
263 data
[i
] = bnx2x_reg_rd_ind(bp
, src_addr
+ i
*4);
267 mutex_lock(&bp
->dmae_mutex
);
269 memset(bnx2x_sp(bp
, wb_data
[0]), 0, sizeof(u32
) * 4);
270 memset(dmae
, 0, sizeof(struct dmae_command
));
272 dmae
->opcode
= (DMAE_CMD_SRC_GRC
| DMAE_CMD_DST_PCI
|
273 DMAE_CMD_C_DST_PCI
| DMAE_CMD_C_ENABLE
|
274 DMAE_CMD_SRC_RESET
| DMAE_CMD_DST_RESET
|
276 DMAE_CMD_ENDIANITY_B_DW_SWAP
|
278 DMAE_CMD_ENDIANITY_DW_SWAP
|
280 (bp
->port
? DMAE_CMD_PORT_1
: DMAE_CMD_PORT_0
));
281 dmae
->src_addr_lo
= src_addr
>> 2;
282 dmae
->src_addr_hi
= 0;
283 dmae
->dst_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, wb_data
));
284 dmae
->dst_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, wb_data
));
286 dmae
->comp_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, wb_comp
));
287 dmae
->comp_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, wb_comp
));
288 dmae
->comp_val
= DMAE_COMP_VAL
;
290 DP(BNX2X_MSG_OFF
, "dmae: opcode 0x%08x\n"
291 DP_LEVEL
"src_addr [%x:%08x] len [%d *4] "
292 "dst_addr [%x:%08x (%08x)]\n"
293 DP_LEVEL
"comp_addr [%x:%08x] comp_val 0x%08x\n",
294 dmae
->opcode
, dmae
->src_addr_hi
, dmae
->src_addr_lo
,
295 dmae
->len
, dmae
->dst_addr_hi
, dmae
->dst_addr_lo
, src_addr
,
296 dmae
->comp_addr_hi
, dmae
->comp_addr_lo
, dmae
->comp_val
);
300 bnx2x_post_dmae(bp
, dmae
, (bp
->port
)*MAX_DMAE_C_PER_PORT
);
304 while (*wb_comp
!= DMAE_COMP_VAL
) {
306 /* adjust delay for emulation/FPGA */
307 if (CHIP_REV_IS_SLOW(bp
))
313 BNX2X_ERR("dmae timeout!\n");
318 DP(BNX2X_MSG_OFF
, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
319 bp
->slowpath
->wb_data
[0], bp
->slowpath
->wb_data
[1],
320 bp
->slowpath
->wb_data
[2], bp
->slowpath
->wb_data
[3]);
322 mutex_unlock(&bp
->dmae_mutex
);
325 /* used only for slowpath so not inlined */
326 static void bnx2x_wb_wr(struct bnx2x
*bp
, int reg
, u32 val_hi
, u32 val_lo
)
330 wb_write
[0] = val_hi
;
331 wb_write
[1] = val_lo
;
332 REG_WR_DMAE(bp
, reg
, wb_write
, 2);
336 static u64
bnx2x_wb_rd(struct bnx2x
*bp
, int reg
)
340 REG_RD_DMAE(bp
, reg
, wb_data
, 2);
342 return HILO_U64(wb_data
[0], wb_data
[1]);
346 static int bnx2x_mc_assert(struct bnx2x
*bp
)
350 const char storm
[] = {"XTCU"};
351 const u32 intmem_base
[] = {
358 /* Go through all instances of all SEMIs */
359 for (i
= 0; i
< 4; i
++) {
360 last_idx
= REG_RD8(bp
, XSTORM_ASSERT_LIST_INDEX_OFFSET
+
363 BNX2X_LOG("DATA %cSTORM_ASSERT_LIST_INDEX 0x%x\n",
366 /* print the asserts */
367 for (j
= 0; j
< STROM_ASSERT_ARRAY_SIZE
; j
++) {
368 u32 row0
, row1
, row2
, row3
;
370 row0
= REG_RD(bp
, XSTORM_ASSERT_LIST_OFFSET(j
) +
372 row1
= REG_RD(bp
, XSTORM_ASSERT_LIST_OFFSET(j
) + 4 +
374 row2
= REG_RD(bp
, XSTORM_ASSERT_LIST_OFFSET(j
) + 8 +
376 row3
= REG_RD(bp
, XSTORM_ASSERT_LIST_OFFSET(j
) + 12 +
379 if (row0
!= COMMON_ASM_INVALID_ASSERT_OPCODE
) {
380 BNX2X_LOG("DATA %cSTORM_ASSERT_INDEX 0x%x ="
381 " 0x%08x 0x%08x 0x%08x 0x%08x\n",
382 storm
[i
], j
, row3
, row2
, row1
, row0
);
392 static void bnx2x_fw_dump(struct bnx2x
*bp
)
398 mark
= REG_RD(bp
, MCP_REG_MCPR_SCRATCH
+ 0xf104);
399 mark
= ((mark
+ 0x3) & ~0x3);
400 printk(KERN_ERR PFX
"begin fw dump (mark 0x%x)\n" KERN_ERR
, mark
);
402 for (offset
= mark
- 0x08000000; offset
<= 0xF900; offset
+= 0x8*4) {
403 for (word
= 0; word
< 8; word
++)
404 data
[word
] = htonl(REG_RD(bp
, MCP_REG_MCPR_SCRATCH
+
407 printk(KERN_CONT
"%s", (char *)data
);
409 for (offset
= 0xF108; offset
<= mark
- 0x08000000; offset
+= 0x8*4) {
410 for (word
= 0; word
< 8; word
++)
411 data
[word
] = htonl(REG_RD(bp
, MCP_REG_MCPR_SCRATCH
+
414 printk(KERN_CONT
"%s", (char *)data
);
416 printk("\n" KERN_ERR PFX
"end of fw dump\n");
419 static void bnx2x_panic_dump(struct bnx2x
*bp
)
424 BNX2X_ERR("begin crash dump -----------------\n");
426 for_each_queue(bp
, i
) {
427 struct bnx2x_fastpath
*fp
= &bp
->fp
[i
];
428 struct eth_tx_db_data
*hw_prods
= fp
->hw_tx_prods
;
430 BNX2X_ERR("queue[%d]: tx_pkt_prod(%x) tx_pkt_cons(%x)"
431 " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)"
432 " *rx_cons_sb(%x) rx_comp_prod(%x)"
433 " rx_comp_cons(%x) fp_c_idx(%x) fp_u_idx(%x)"
435 i
, fp
->tx_pkt_prod
, fp
->tx_pkt_cons
, fp
->tx_bd_prod
,
436 fp
->tx_bd_cons
, *fp
->tx_cons_sb
, *fp
->rx_cons_sb
,
437 fp
->rx_comp_prod
, fp
->rx_comp_cons
, fp
->fp_c_idx
,
438 fp
->fp_u_idx
, hw_prods
->packets_prod
,
441 start
= TX_BD(le16_to_cpu(*fp
->tx_cons_sb
) - 10);
442 end
= TX_BD(le16_to_cpu(*fp
->tx_cons_sb
) + 245);
443 for (j
= start
; j
< end
; j
++) {
444 struct sw_tx_bd
*sw_bd
= &fp
->tx_buf_ring
[j
];
446 BNX2X_ERR("packet[%x]=[%p,%x]\n", j
,
447 sw_bd
->skb
, sw_bd
->first_bd
);
450 start
= TX_BD(fp
->tx_bd_cons
- 10);
451 end
= TX_BD(fp
->tx_bd_cons
+ 254);
452 for (j
= start
; j
< end
; j
++) {
453 u32
*tx_bd
= (u32
*)&fp
->tx_desc_ring
[j
];
455 BNX2X_ERR("tx_bd[%x]=[%x:%x:%x:%x]\n",
456 j
, tx_bd
[0], tx_bd
[1], tx_bd
[2], tx_bd
[3]);
459 start
= RX_BD(le16_to_cpu(*fp
->rx_cons_sb
) - 10);
460 end
= RX_BD(le16_to_cpu(*fp
->rx_cons_sb
) + 503);
461 for (j
= start
; j
< end
; j
++) {
462 u32
*rx_bd
= (u32
*)&fp
->rx_desc_ring
[j
];
463 struct sw_rx_bd
*sw_bd
= &fp
->rx_buf_ring
[j
];
465 BNX2X_ERR("rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
466 j
, rx_bd
[0], rx_bd
[1], sw_bd
->skb
);
469 start
= RCQ_BD(fp
->rx_comp_cons
- 10);
470 end
= RCQ_BD(fp
->rx_comp_cons
+ 503);
471 for (j
= start
; j
< end
; j
++) {
472 u32
*cqe
= (u32
*)&fp
->rx_comp_ring
[j
];
474 BNX2X_ERR("cqe[%x]=[%x:%x:%x:%x]\n",
475 j
, cqe
[0], cqe
[1], cqe
[2], cqe
[3]);
479 BNX2X_ERR("def_c_idx(%u) def_u_idx(%u) def_x_idx(%u)"
480 " def_t_idx(%u) def_att_idx(%u) attn_state(%u)"
481 " spq_prod_idx(%u)\n",
482 bp
->def_c_idx
, bp
->def_u_idx
, bp
->def_x_idx
, bp
->def_t_idx
,
483 bp
->def_att_idx
, bp
->attn_state
, bp
->spq_prod_idx
);
487 BNX2X_ERR("end crash dump -----------------\n");
489 bp
->stats_state
= STATS_STATE_DISABLE
;
490 DP(BNX2X_MSG_STATS
, "stats_state - DISABLE\n");
493 static void bnx2x_int_enable(struct bnx2x
*bp
)
496 u32 addr
= port
? HC_REG_CONFIG_1
: HC_REG_CONFIG_0
;
497 u32 val
= REG_RD(bp
, addr
);
498 int msix
= (bp
->flags
& USING_MSIX_FLAG
) ? 1 : 0;
501 val
&= ~HC_CONFIG_0_REG_SINGLE_ISR_EN_0
;
502 val
|= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0
|
503 HC_CONFIG_0_REG_ATTN_BIT_EN_0
);
505 val
|= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0
|
506 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0
|
507 HC_CONFIG_0_REG_INT_LINE_EN_0
|
508 HC_CONFIG_0_REG_ATTN_BIT_EN_0
);
510 /* Errata A0.158 workaround */
511 DP(NETIF_MSG_INTR
, "write %x to HC %d (addr 0x%x) MSI-X %d\n",
512 val
, port
, addr
, msix
);
514 REG_WR(bp
, addr
, val
);
516 val
&= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0
;
519 DP(NETIF_MSG_INTR
, "write %x to HC %d (addr 0x%x) MSI-X %d\n",
520 val
, port
, addr
, msix
);
522 REG_WR(bp
, addr
, val
);
525 static void bnx2x_int_disable(struct bnx2x
*bp
)
528 u32 addr
= port
? HC_REG_CONFIG_1
: HC_REG_CONFIG_0
;
529 u32 val
= REG_RD(bp
, addr
);
531 val
&= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0
|
532 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0
|
533 HC_CONFIG_0_REG_INT_LINE_EN_0
|
534 HC_CONFIG_0_REG_ATTN_BIT_EN_0
);
536 DP(NETIF_MSG_INTR
, "write %x to HC %d (addr 0x%x)\n",
539 REG_WR(bp
, addr
, val
);
540 if (REG_RD(bp
, addr
) != val
)
541 BNX2X_ERR("BUG! proper val not read from IGU!\n");
544 static void bnx2x_int_disable_sync(struct bnx2x
*bp
)
547 int msix
= (bp
->flags
& USING_MSIX_FLAG
) ? 1 : 0;
550 atomic_inc(&bp
->intr_sem
);
551 /* prevent the HW from sending interrupts */
552 bnx2x_int_disable(bp
);
554 /* make sure all ISRs are done */
556 for_each_queue(bp
, i
)
557 synchronize_irq(bp
->msix_table
[i
].vector
);
559 /* one more for the Slow Path IRQ */
560 synchronize_irq(bp
->msix_table
[i
].vector
);
562 synchronize_irq(bp
->pdev
->irq
);
564 /* make sure sp_task is not running */
565 cancel_work_sync(&bp
->sp_task
);
572 * general service functions
575 static inline void bnx2x_ack_sb(struct bnx2x
*bp
, u8 id
,
576 u8 storm
, u16 index
, u8 op
, u8 update
)
578 u32 igu_addr
= (IGU_ADDR_INT_ACK
+ IGU_PORT_BASE
* bp
->port
) * 8;
579 struct igu_ack_register igu_ack
;
581 igu_ack
.status_block_index
= index
;
582 igu_ack
.sb_id_and_flags
=
583 ((id
<< IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT
) |
584 (storm
<< IGU_ACK_REGISTER_STORM_ID_SHIFT
) |
585 (update
<< IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT
) |
586 (op
<< IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT
));
588 /* DP(NETIF_MSG_INTR, "write 0x%08x to IGU addr 0x%x\n",
589 (*(u32 *)&igu_ack), BAR_IGU_INTMEM + igu_addr); */
590 REG_WR(bp
, BAR_IGU_INTMEM
+ igu_addr
, (*(u32
*)&igu_ack
));
593 static inline u16
bnx2x_update_fpsb_idx(struct bnx2x_fastpath
*fp
)
595 struct host_status_block
*fpsb
= fp
->status_blk
;
598 barrier(); /* status block is written to by the chip */
599 if (fp
->fp_c_idx
!= fpsb
->c_status_block
.status_block_index
) {
600 fp
->fp_c_idx
= fpsb
->c_status_block
.status_block_index
;
603 if (fp
->fp_u_idx
!= fpsb
->u_status_block
.status_block_index
) {
604 fp
->fp_u_idx
= fpsb
->u_status_block
.status_block_index
;
610 static inline int bnx2x_has_work(struct bnx2x_fastpath
*fp
)
612 u16 rx_cons_sb
= le16_to_cpu(*fp
->rx_cons_sb
);
614 if ((rx_cons_sb
& MAX_RCQ_DESC_CNT
) == MAX_RCQ_DESC_CNT
)
617 if ((rx_cons_sb
!= fp
->rx_comp_cons
) ||
618 (le16_to_cpu(*fp
->tx_cons_sb
) != fp
->tx_pkt_cons
))
624 static u16
bnx2x_ack_int(struct bnx2x
*bp
)
626 u32 igu_addr
= (IGU_ADDR_SIMD_MASK
+ IGU_PORT_BASE
* bp
->port
) * 8;
627 u32 result
= REG_RD(bp
, BAR_IGU_INTMEM
+ igu_addr
);
629 /* DP(NETIF_MSG_INTR, "read 0x%08x from IGU addr 0x%x\n",
630 result, BAR_IGU_INTMEM + igu_addr); */
633 #warning IGU_DEBUG active
635 BNX2X_ERR("read %x from IGU\n", result
);
636 REG_WR(bp
, TM_REG_TIMER_SOFT_RST
, 0);
644 * fast path service functions
647 /* free skb in the packet ring at pos idx
648 * return idx of last bd freed
650 static u16
bnx2x_free_tx_pkt(struct bnx2x
*bp
, struct bnx2x_fastpath
*fp
,
653 struct sw_tx_bd
*tx_buf
= &fp
->tx_buf_ring
[idx
];
654 struct eth_tx_bd
*tx_bd
;
655 struct sk_buff
*skb
= tx_buf
->skb
;
656 u16 bd_idx
= tx_buf
->first_bd
;
659 DP(BNX2X_MSG_OFF
, "pkt_idx %d buff @(%p)->skb %p\n",
663 DP(BNX2X_MSG_OFF
, "free bd_idx %d\n", bd_idx
);
664 tx_bd
= &fp
->tx_desc_ring
[bd_idx
];
665 pci_unmap_single(bp
->pdev
, BD_UNMAP_ADDR(tx_bd
),
666 BD_UNMAP_LEN(tx_bd
), PCI_DMA_TODEVICE
);
668 nbd
= le16_to_cpu(tx_bd
->nbd
) - 1;
669 #ifdef BNX2X_STOP_ON_ERROR
670 if (nbd
> (MAX_SKB_FRAGS
+ 2)) {
671 BNX2X_ERR("bad nbd!\n");
676 /* Skip a parse bd and the TSO split header bd
677 since they have no mapping */
679 bd_idx
= TX_BD(NEXT_TX_IDX(bd_idx
));
681 if (tx_bd
->bd_flags
.as_bitfield
& (ETH_TX_BD_FLAGS_IP_CSUM
|
682 ETH_TX_BD_FLAGS_TCP_CSUM
|
683 ETH_TX_BD_FLAGS_SW_LSO
)) {
685 bd_idx
= TX_BD(NEXT_TX_IDX(bd_idx
));
686 tx_bd
= &fp
->tx_desc_ring
[bd_idx
];
687 /* is this a TSO split header bd? */
688 if (tx_bd
->bd_flags
.as_bitfield
& ETH_TX_BD_FLAGS_SW_LSO
) {
690 bd_idx
= TX_BD(NEXT_TX_IDX(bd_idx
));
697 DP(BNX2X_MSG_OFF
, "free frag bd_idx %d\n", bd_idx
);
698 tx_bd
= &fp
->tx_desc_ring
[bd_idx
];
699 pci_unmap_page(bp
->pdev
, BD_UNMAP_ADDR(tx_bd
),
700 BD_UNMAP_LEN(tx_bd
), PCI_DMA_TODEVICE
);
702 bd_idx
= TX_BD(NEXT_TX_IDX(bd_idx
));
708 tx_buf
->first_bd
= 0;
714 static inline u32
bnx2x_tx_avail(struct bnx2x_fastpath
*fp
)
720 /* Tell compiler that prod and cons can change */
722 prod
= fp
->tx_bd_prod
;
723 cons
= fp
->tx_bd_cons
;
725 used
= (NUM_TX_BD
- NUM_TX_RINGS
+ prod
- cons
+
726 (cons
/ TX_DESC_CNT
) - (prod
/ TX_DESC_CNT
));
729 /* used = prod - cons - prod/size + cons/size */
730 used
-= NUM_TX_BD
- NUM_TX_RINGS
;
733 BUG_TRAP(used
<= fp
->bp
->tx_ring_size
);
734 BUG_TRAP((fp
->bp
->tx_ring_size
- used
) <= MAX_TX_AVAIL
);
736 return (fp
->bp
->tx_ring_size
- used
);
739 static void bnx2x_tx_int(struct bnx2x_fastpath
*fp
, int work
)
741 struct bnx2x
*bp
= fp
->bp
;
742 u16 hw_cons
, sw_cons
, bd_cons
= fp
->tx_bd_cons
;
745 #ifdef BNX2X_STOP_ON_ERROR
746 if (unlikely(bp
->panic
))
750 hw_cons
= le16_to_cpu(*fp
->tx_cons_sb
);
751 sw_cons
= fp
->tx_pkt_cons
;
753 while (sw_cons
!= hw_cons
) {
756 pkt_cons
= TX_BD(sw_cons
);
758 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
760 DP(NETIF_MSG_TX_DONE
, "hw_cons %u sw_cons %u pkt_cons %d\n",
761 hw_cons
, sw_cons
, pkt_cons
);
763 /* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
765 prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
768 bd_cons
= bnx2x_free_tx_pkt(bp
, fp
, pkt_cons
);
776 fp
->tx_pkt_cons
= sw_cons
;
777 fp
->tx_bd_cons
= bd_cons
;
779 /* Need to make the tx_cons update visible to start_xmit()
780 * before checking for netif_queue_stopped(). Without the
781 * memory barrier, there is a small possibility that start_xmit()
782 * will miss it and cause the queue to be stopped forever.
786 /* TBD need a thresh? */
787 if (unlikely(netif_queue_stopped(bp
->dev
))) {
789 netif_tx_lock(bp
->dev
);
791 if (netif_queue_stopped(bp
->dev
) &&
792 (bnx2x_tx_avail(fp
) >= MAX_SKB_FRAGS
+ 3))
793 netif_wake_queue(bp
->dev
);
795 netif_tx_unlock(bp
->dev
);
800 static void bnx2x_sp_event(struct bnx2x_fastpath
*fp
,
801 union eth_rx_cqe
*rr_cqe
)
803 struct bnx2x
*bp
= fp
->bp
;
804 int cid
= SW_CID(rr_cqe
->ramrod_cqe
.conn_and_cmd_data
);
805 int command
= CQE_CMD(rr_cqe
->ramrod_cqe
.conn_and_cmd_data
);
807 DP(NETIF_MSG_RX_STATUS
,
808 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
809 fp
->index
, cid
, command
, bp
->state
, rr_cqe
->ramrod_cqe
.type
);
814 switch (command
| fp
->state
) {
815 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP
|
816 BNX2X_FP_STATE_OPENING
):
817 DP(NETIF_MSG_IFUP
, "got MULTI[%d] setup ramrod\n",
819 fp
->state
= BNX2X_FP_STATE_OPEN
;
822 case (RAMROD_CMD_ID_ETH_HALT
| BNX2X_FP_STATE_HALTING
):
823 DP(NETIF_MSG_IFDOWN
, "got MULTI[%d] halt ramrod\n",
825 fp
->state
= BNX2X_FP_STATE_HALTED
;
829 BNX2X_ERR("unexpected MC reply(%d) state is %x\n",
832 mb(); /* force bnx2x_wait_ramrod to see the change */
836 switch (command
| bp
->state
) {
837 case (RAMROD_CMD_ID_ETH_PORT_SETUP
| BNX2X_STATE_OPENING_WAIT4_PORT
):
838 DP(NETIF_MSG_IFUP
, "got setup ramrod\n");
839 bp
->state
= BNX2X_STATE_OPEN
;
842 case (RAMROD_CMD_ID_ETH_HALT
| BNX2X_STATE_CLOSING_WAIT4_HALT
):
843 DP(NETIF_MSG_IFDOWN
, "got halt ramrod\n");
844 bp
->state
= BNX2X_STATE_CLOSING_WAIT4_DELETE
;
845 fp
->state
= BNX2X_FP_STATE_HALTED
;
848 case (RAMROD_CMD_ID_ETH_CFC_DEL
| BNX2X_STATE_CLOSING_WAIT4_HALT
):
849 DP(NETIF_MSG_IFDOWN
, "got delete ramrod for MULTI[%d]\n",
851 bnx2x_fp(bp
, cid
, state
) = BNX2X_FP_STATE_CLOSED
;
854 case (RAMROD_CMD_ID_ETH_SET_MAC
| BNX2X_STATE_OPEN
):
855 DP(NETIF_MSG_IFUP
, "got set mac ramrod\n");
858 case (RAMROD_CMD_ID_ETH_SET_MAC
| BNX2X_STATE_CLOSING_WAIT4_HALT
):
859 DP(NETIF_MSG_IFUP
, "got (un)set mac ramrod\n");
863 BNX2X_ERR("unexpected ramrod (%d) state is %x\n",
867 mb(); /* force bnx2x_wait_ramrod to see the change */
870 static inline int bnx2x_alloc_rx_skb(struct bnx2x
*bp
,
871 struct bnx2x_fastpath
*fp
, u16 index
)
874 struct sw_rx_bd
*rx_buf
= &fp
->rx_buf_ring
[index
];
875 struct eth_rx_bd
*rx_bd
= &fp
->rx_desc_ring
[index
];
878 skb
= netdev_alloc_skb(bp
->dev
, bp
->rx_buf_size
);
879 if (unlikely(skb
== NULL
))
882 mapping
= pci_map_single(bp
->pdev
, skb
->data
, bp
->rx_buf_use_size
,
884 if (unlikely(dma_mapping_error(mapping
))) {
891 pci_unmap_addr_set(rx_buf
, mapping
, mapping
);
893 rx_bd
->addr_hi
= cpu_to_le32(U64_HI(mapping
));
894 rx_bd
->addr_lo
= cpu_to_le32(U64_LO(mapping
));
899 /* note that we are not allocating a new skb,
900 * we are just moving one from cons to prod
901 * we are not creating a new mapping,
902 * so there is no need to check for dma_mapping_error().
904 static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath
*fp
,
905 struct sk_buff
*skb
, u16 cons
, u16 prod
)
907 struct bnx2x
*bp
= fp
->bp
;
908 struct sw_rx_bd
*cons_rx_buf
= &fp
->rx_buf_ring
[cons
];
909 struct sw_rx_bd
*prod_rx_buf
= &fp
->rx_buf_ring
[prod
];
910 struct eth_rx_bd
*cons_bd
= &fp
->rx_desc_ring
[cons
];
911 struct eth_rx_bd
*prod_bd
= &fp
->rx_desc_ring
[prod
];
913 pci_dma_sync_single_for_device(bp
->pdev
,
914 pci_unmap_addr(cons_rx_buf
, mapping
),
915 bp
->rx_offset
+ RX_COPY_THRESH
,
918 prod_rx_buf
->skb
= cons_rx_buf
->skb
;
919 pci_unmap_addr_set(prod_rx_buf
, mapping
,
920 pci_unmap_addr(cons_rx_buf
, mapping
));
924 static int bnx2x_rx_int(struct bnx2x_fastpath
*fp
, int budget
)
926 struct bnx2x
*bp
= fp
->bp
;
927 u16 bd_cons
, bd_prod
, comp_ring_cons
;
928 u16 hw_comp_cons
, sw_comp_cons
, sw_comp_prod
;
931 #ifdef BNX2X_STOP_ON_ERROR
932 if (unlikely(bp
->panic
))
936 hw_comp_cons
= le16_to_cpu(*fp
->rx_cons_sb
);
937 if ((hw_comp_cons
& MAX_RCQ_DESC_CNT
) == MAX_RCQ_DESC_CNT
)
940 bd_cons
= fp
->rx_bd_cons
;
941 bd_prod
= fp
->rx_bd_prod
;
942 sw_comp_cons
= fp
->rx_comp_cons
;
943 sw_comp_prod
= fp
->rx_comp_prod
;
945 /* Memory barrier necessary as speculative reads of the rx
946 * buffer can be ahead of the index in the status block
950 DP(NETIF_MSG_RX_STATUS
,
951 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
952 fp
->index
, hw_comp_cons
, sw_comp_cons
);
954 while (sw_comp_cons
!= hw_comp_cons
) {
955 unsigned int len
, pad
;
956 struct sw_rx_bd
*rx_buf
;
958 union eth_rx_cqe
*cqe
;
960 comp_ring_cons
= RCQ_BD(sw_comp_cons
);
961 bd_prod
= RX_BD(bd_prod
);
962 bd_cons
= RX_BD(bd_cons
);
964 cqe
= &fp
->rx_comp_ring
[comp_ring_cons
];
966 DP(NETIF_MSG_RX_STATUS
, "hw_comp_cons %u sw_comp_cons %u"
967 " comp_ring (%u) bd_ring (%u,%u)\n",
968 hw_comp_cons
, sw_comp_cons
,
969 comp_ring_cons
, bd_prod
, bd_cons
);
970 DP(NETIF_MSG_RX_STATUS
, "CQE type %x err %x status %x"
971 " queue %x vlan %x len %x\n",
972 cqe
->fast_path_cqe
.type
,
973 cqe
->fast_path_cqe
.error_type_flags
,
974 cqe
->fast_path_cqe
.status_flags
,
975 cqe
->fast_path_cqe
.rss_hash_result
,
976 cqe
->fast_path_cqe
.vlan_tag
, cqe
->fast_path_cqe
.pkt_len
);
978 /* is this a slowpath msg? */
979 if (unlikely(cqe
->fast_path_cqe
.type
)) {
980 bnx2x_sp_event(fp
, cqe
);
983 /* this is an rx packet */
985 rx_buf
= &fp
->rx_buf_ring
[bd_cons
];
988 len
= le16_to_cpu(cqe
->fast_path_cqe
.pkt_len
);
989 pad
= cqe
->fast_path_cqe
.placement_offset
;
991 pci_dma_sync_single_for_device(bp
->pdev
,
992 pci_unmap_addr(rx_buf
, mapping
),
993 pad
+ RX_COPY_THRESH
,
996 prefetch(((char *)(skb
)) + 128);
998 /* is this an error packet? */
999 if (unlikely(cqe
->fast_path_cqe
.error_type_flags
&
1000 ETH_RX_ERROR_FALGS
)) {
1001 /* do we sometimes forward error packets anyway? */
1002 DP(NETIF_MSG_RX_ERR
,
1003 "ERROR flags(%u) Rx packet(%u)\n",
1004 cqe
->fast_path_cqe
.error_type_flags
,
1006 /* TBD make sure MC counts this as a drop */
1010 /* Since we don't have a jumbo ring
1011 * copy small packets if mtu > 1500
1013 if ((bp
->dev
->mtu
> ETH_MAX_PACKET_SIZE
) &&
1014 (len
<= RX_COPY_THRESH
)) {
1015 struct sk_buff
*new_skb
;
1017 new_skb
= netdev_alloc_skb(bp
->dev
,
1019 if (new_skb
== NULL
) {
1020 DP(NETIF_MSG_RX_ERR
,
1021 "ERROR packet dropped "
1022 "because of alloc failure\n");
1023 /* TBD count this as a drop? */
1028 skb_copy_from_linear_data_offset(skb
, pad
,
1029 new_skb
->data
+ pad
, len
);
1030 skb_reserve(new_skb
, pad
);
1031 skb_put(new_skb
, len
);
1033 bnx2x_reuse_rx_skb(fp
, skb
, bd_cons
, bd_prod
);
1037 } else if (bnx2x_alloc_rx_skb(bp
, fp
, bd_prod
) == 0) {
1038 pci_unmap_single(bp
->pdev
,
1039 pci_unmap_addr(rx_buf
, mapping
),
1040 bp
->rx_buf_use_size
,
1041 PCI_DMA_FROMDEVICE
);
1042 skb_reserve(skb
, pad
);
1046 DP(NETIF_MSG_RX_ERR
,
1047 "ERROR packet dropped because "
1048 "of alloc failure\n");
1050 bnx2x_reuse_rx_skb(fp
, skb
, bd_cons
, bd_prod
);
1054 skb
->protocol
= eth_type_trans(skb
, bp
->dev
);
1056 skb
->ip_summed
= CHECKSUM_NONE
;
1057 if (bp
->rx_csum
&& BNX2X_RX_SUM_OK(cqe
))
1058 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1060 /* TBD do we pass bad csum packets in promisc */
1064 if ((le16_to_cpu(cqe
->fast_path_cqe
.pars_flags
.flags
)
1065 & PARSING_FLAGS_NUMBER_OF_NESTED_VLANS
)
1066 && (bp
->vlgrp
!= NULL
))
1067 vlan_hwaccel_receive_skb(skb
, bp
->vlgrp
,
1068 le16_to_cpu(cqe
->fast_path_cqe
.vlan_tag
));
1071 netif_receive_skb(skb
);
1073 bp
->dev
->last_rx
= jiffies
;
1078 bd_cons
= NEXT_RX_IDX(bd_cons
);
1079 bd_prod
= NEXT_RX_IDX(bd_prod
);
1081 sw_comp_prod
= NEXT_RCQ_IDX(sw_comp_prod
);
1082 sw_comp_cons
= NEXT_RCQ_IDX(sw_comp_cons
);
1085 if ((rx_pkt
== budget
))
1089 fp
->rx_bd_cons
= bd_cons
;
1090 fp
->rx_bd_prod
= bd_prod
;
1091 fp
->rx_comp_cons
= sw_comp_cons
;
1092 fp
->rx_comp_prod
= sw_comp_prod
;
1094 REG_WR(bp
, BAR_TSTRORM_INTMEM
+
1095 TSTORM_RCQ_PROD_OFFSET(bp
->port
, fp
->index
), sw_comp_prod
);
1097 mmiowb(); /* keep prod updates ordered */
1099 fp
->rx_pkt
+= rx_pkt
;
1105 static irqreturn_t
bnx2x_msix_fp_int(int irq
, void *fp_cookie
)
1107 struct bnx2x_fastpath
*fp
= fp_cookie
;
1108 struct bnx2x
*bp
= fp
->bp
;
1109 struct net_device
*dev
= bp
->dev
;
1110 int index
= fp
->index
;
1112 DP(NETIF_MSG_INTR
, "got an msix interrupt on [%d]\n", index
);
1113 bnx2x_ack_sb(bp
, index
, USTORM_ID
, 0, IGU_INT_DISABLE
, 0);
1115 #ifdef BNX2X_STOP_ON_ERROR
1116 if (unlikely(bp
->panic
))
1120 prefetch(fp
->rx_cons_sb
);
1121 prefetch(fp
->tx_cons_sb
);
1122 prefetch(&fp
->status_blk
->c_status_block
.status_block_index
);
1123 prefetch(&fp
->status_blk
->u_status_block
.status_block_index
);
1125 netif_rx_schedule(dev
, &bnx2x_fp(bp
, index
, napi
));
1129 static irqreturn_t
bnx2x_interrupt(int irq
, void *dev_instance
)
1131 struct net_device
*dev
= dev_instance
;
1132 struct bnx2x
*bp
= netdev_priv(dev
);
1133 u16 status
= bnx2x_ack_int(bp
);
1135 if (unlikely(status
== 0)) {
1136 DP(NETIF_MSG_INTR
, "not our interrupt!\n");
1140 DP(NETIF_MSG_INTR
, "got an interrupt status is %u\n", status
);
1142 #ifdef BNX2X_STOP_ON_ERROR
1143 if (unlikely(bp
->panic
))
1147 /* Return here if interrupt is shared and is disabled */
1148 if (unlikely(atomic_read(&bp
->intr_sem
) != 0)) {
1149 DP(NETIF_MSG_INTR
, "called but intr_sem not 0, returning\n");
1154 struct bnx2x_fastpath
*fp
= &bp
->fp
[0];
1156 prefetch(fp
->rx_cons_sb
);
1157 prefetch(fp
->tx_cons_sb
);
1158 prefetch(&fp
->status_blk
->c_status_block
.status_block_index
);
1159 prefetch(&fp
->status_blk
->u_status_block
.status_block_index
);
1161 netif_rx_schedule(dev
, &bnx2x_fp(bp
, 0, napi
));
1168 if (unlikely(status
& 0x1)) {
1170 schedule_work(&bp
->sp_task
);
1177 DP(NETIF_MSG_INTR
, "got an unknown interrupt! (status is %u)\n",
1183 /* end of fast path */
1189 * General service functions
1192 static int bnx2x_hw_lock(struct bnx2x
*bp
, u32 resource
)
1195 u32 resource_bit
= (1 << resource
);
1199 /* Validating that the resource is within range */
1200 if (resource
> HW_LOCK_MAX_RESOURCE_VALUE
) {
1202 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1203 resource
, HW_LOCK_MAX_RESOURCE_VALUE
);
1207 /* Validating that the resource is not already taken */
1208 lock_status
= REG_RD(bp
, MISC_REG_DRIVER_CONTROL_1
+ port
*8);
1209 if (lock_status
& resource_bit
) {
1210 DP(NETIF_MSG_HW
, "lock_status 0x%x resource_bit 0x%x\n",
1211 lock_status
, resource_bit
);
1215 /* Try for 1 second every 5ms */
1216 for (cnt
= 0; cnt
< 200; cnt
++) {
1217 /* Try to acquire the lock */
1218 REG_WR(bp
, MISC_REG_DRIVER_CONTROL_1
+ port
*8 + 4,
1220 lock_status
= REG_RD(bp
, MISC_REG_DRIVER_CONTROL_1
+ port
*8);
1221 if (lock_status
& resource_bit
)
1226 DP(NETIF_MSG_HW
, "Timeout\n");
1230 static int bnx2x_hw_unlock(struct bnx2x
*bp
, u32 resource
)
1233 u32 resource_bit
= (1 << resource
);
1236 /* Validating that the resource is within range */
1237 if (resource
> HW_LOCK_MAX_RESOURCE_VALUE
) {
1239 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1240 resource
, HW_LOCK_MAX_RESOURCE_VALUE
);
1244 /* Validating that the resource is currently taken */
1245 lock_status
= REG_RD(bp
, MISC_REG_DRIVER_CONTROL_1
+ port
*8);
1246 if (!(lock_status
& resource_bit
)) {
1247 DP(NETIF_MSG_HW
, "lock_status 0x%x resource_bit 0x%x\n",
1248 lock_status
, resource_bit
);
1252 REG_WR(bp
, MISC_REG_DRIVER_CONTROL_1
+ port
*8, resource_bit
);
1256 /* HW Lock for shared dual port PHYs */
1257 static void bnx2x_phy_hw_lock(struct bnx2x
*bp
)
1259 u32 ext_phy_type
= XGXS_EXT_PHY_TYPE(bp
->link_params
.ext_phy_config
);
1261 mutex_lock(&bp
->phy_mutex
);
1263 if ((ext_phy_type
== PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072
) ||
1264 (ext_phy_type
== PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073
))
1265 bnx2x_hw_lock(bp
, HW_LOCK_RESOURCE_8072_MDIO
);
1268 static void bnx2x_phy_hw_unlock(struct bnx2x
*bp
)
1270 u32 ext_phy_type
= XGXS_EXT_PHY_TYPE(bp
->link_params
.ext_phy_config
);
1272 if ((ext_phy_type
== PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072
) ||
1273 (ext_phy_type
== PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073
))
1274 bnx2x_hw_unlock(bp
, HW_LOCK_RESOURCE_8072_MDIO
);
1276 mutex_unlock(&bp
->phy_mutex
);
1279 int bnx2x_set_gpio(struct bnx2x
*bp
, int gpio_num
, u32 mode
)
1281 /* The GPIO should be swapped if swap register is set and active */
1282 int gpio_port
= (REG_RD(bp
, NIG_REG_PORT_SWAP
) &&
1283 REG_RD(bp
, NIG_REG_STRAP_OVERRIDE
)) ^ bp
->port
;
1284 int gpio_shift
= gpio_num
+
1285 (gpio_port
? MISC_REGISTERS_GPIO_PORT_SHIFT
: 0);
1286 u32 gpio_mask
= (1 << gpio_shift
);
1289 if (gpio_num
> MISC_REGISTERS_GPIO_3
) {
1290 BNX2X_ERR("Invalid GPIO %d\n", gpio_num
);
1294 bnx2x_hw_lock(bp
, HW_LOCK_RESOURCE_GPIO
);
1295 /* read GPIO and mask except the float bits */
1296 gpio_reg
= (REG_RD(bp
, MISC_REG_GPIO
) & MISC_REGISTERS_GPIO_FLOAT
);
1299 case MISC_REGISTERS_GPIO_OUTPUT_LOW
:
1300 DP(NETIF_MSG_LINK
, "Set GPIO %d (shift %d) -> output low\n",
1301 gpio_num
, gpio_shift
);
1302 /* clear FLOAT and set CLR */
1303 gpio_reg
&= ~(gpio_mask
<< MISC_REGISTERS_GPIO_FLOAT_POS
);
1304 gpio_reg
|= (gpio_mask
<< MISC_REGISTERS_GPIO_CLR_POS
);
1307 case MISC_REGISTERS_GPIO_OUTPUT_HIGH
:
1308 DP(NETIF_MSG_LINK
, "Set GPIO %d (shift %d) -> output high\n",
1309 gpio_num
, gpio_shift
);
1310 /* clear FLOAT and set SET */
1311 gpio_reg
&= ~(gpio_mask
<< MISC_REGISTERS_GPIO_FLOAT_POS
);
1312 gpio_reg
|= (gpio_mask
<< MISC_REGISTERS_GPIO_SET_POS
);
1315 case MISC_REGISTERS_GPIO_INPUT_HI_Z
:
1316 DP(NETIF_MSG_LINK
, "Set GPIO %d (shift %d) -> input\n",
1317 gpio_num
, gpio_shift
);
1319 gpio_reg
|= (gpio_mask
<< MISC_REGISTERS_GPIO_FLOAT_POS
);
1326 REG_WR(bp
, MISC_REG_GPIO
, gpio_reg
);
1327 bnx2x_hw_unlock(bp
, HW_LOCK_RESOURCE_GPIO
);
1332 static int bnx2x_set_spio(struct bnx2x
*bp
, int spio_num
, u32 mode
)
1334 u32 spio_mask
= (1 << spio_num
);
1337 if ((spio_num
< MISC_REGISTERS_SPIO_4
) ||
1338 (spio_num
> MISC_REGISTERS_SPIO_7
)) {
1339 BNX2X_ERR("Invalid SPIO %d\n", spio_num
);
1343 bnx2x_hw_lock(bp
, HW_LOCK_RESOURCE_SPIO
);
1344 /* read SPIO and mask except the float bits */
1345 spio_reg
= (REG_RD(bp
, MISC_REG_SPIO
) & MISC_REGISTERS_SPIO_FLOAT
);
1348 case MISC_REGISTERS_SPIO_OUTPUT_LOW
:
1349 DP(NETIF_MSG_LINK
, "Set SPIO %d -> output low\n", spio_num
);
1350 /* clear FLOAT and set CLR */
1351 spio_reg
&= ~(spio_mask
<< MISC_REGISTERS_SPIO_FLOAT_POS
);
1352 spio_reg
|= (spio_mask
<< MISC_REGISTERS_SPIO_CLR_POS
);
1355 case MISC_REGISTERS_SPIO_OUTPUT_HIGH
:
1356 DP(NETIF_MSG_LINK
, "Set SPIO %d -> output high\n", spio_num
);
1357 /* clear FLOAT and set SET */
1358 spio_reg
&= ~(spio_mask
<< MISC_REGISTERS_SPIO_FLOAT_POS
);
1359 spio_reg
|= (spio_mask
<< MISC_REGISTERS_SPIO_SET_POS
);
1362 case MISC_REGISTERS_SPIO_INPUT_HI_Z
:
1363 DP(NETIF_MSG_LINK
, "Set SPIO %d -> input\n", spio_num
);
1365 spio_reg
|= (spio_mask
<< MISC_REGISTERS_SPIO_FLOAT_POS
);
1372 REG_WR(bp
, MISC_REG_SPIO
, spio_reg
);
1373 bnx2x_hw_unlock(bp
, HW_LOCK_RESOURCE_SPIO
);
1378 static void bnx2x_calc_fc_adv(struct bnx2x
*bp
)
1380 switch (bp
->link_vars
.ieee_fc
) {
1381 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE
:
1382 bp
->advertising
&= ~(ADVERTISED_Asym_Pause
|
1385 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH
:
1386 bp
->advertising
|= (ADVERTISED_Asym_Pause
|
1389 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC
:
1390 bp
->advertising
|= ADVERTISED_Asym_Pause
;
1393 bp
->advertising
&= ~(ADVERTISED_Asym_Pause
|
1399 static void bnx2x_link_report(struct bnx2x
*bp
)
1401 if (bp
->link_vars
.link_up
) {
1402 if (bp
->state
== BNX2X_STATE_OPEN
)
1403 netif_carrier_on(bp
->dev
);
1404 printk(KERN_INFO PFX
"%s NIC Link is Up, ", bp
->dev
->name
);
1406 printk("%d Mbps ", bp
->link_vars
.line_speed
);
1408 if (bp
->link_vars
.duplex
== DUPLEX_FULL
)
1409 printk("full duplex");
1411 printk("half duplex");
1413 if (bp
->link_vars
.flow_ctrl
!= FLOW_CTRL_NONE
) {
1414 if (bp
->link_vars
.flow_ctrl
& FLOW_CTRL_RX
) {
1415 printk(", receive ");
1416 if (bp
->link_vars
.flow_ctrl
& FLOW_CTRL_TX
)
1417 printk("& transmit ");
1419 printk(", transmit ");
1421 printk("flow control ON");
1425 } else { /* link_down */
1426 netif_carrier_off(bp
->dev
);
1427 printk(KERN_ERR PFX
"%s NIC Link is Down\n", bp
->dev
->name
);
1431 static u8
bnx2x_initial_phy_init(struct bnx2x
*bp
)
1435 /* Initialize link parameters structure variables */
1436 bp
->link_params
.mtu
= bp
->dev
->mtu
;
1438 bnx2x_phy_hw_lock(bp
);
1439 rc
= bnx2x_phy_init(&bp
->link_params
, &bp
->link_vars
);
1440 bnx2x_phy_hw_unlock(bp
);
1442 if (bp
->link_vars
.link_up
)
1443 bnx2x_link_report(bp
);
1445 bnx2x_calc_fc_adv(bp
);
1449 static void bnx2x_link_set(struct bnx2x
*bp
)
1451 bnx2x_phy_hw_lock(bp
);
1452 bnx2x_phy_init(&bp
->link_params
, &bp
->link_vars
);
1453 bnx2x_phy_hw_unlock(bp
);
1455 bnx2x_calc_fc_adv(bp
);
1458 static void bnx2x__link_reset(struct bnx2x
*bp
)
1460 bnx2x_phy_hw_lock(bp
);
1461 bnx2x_link_reset(&bp
->link_params
, &bp
->link_vars
);
1462 bnx2x_phy_hw_unlock(bp
);
1465 static u8
bnx2x_link_test(struct bnx2x
*bp
)
1469 bnx2x_phy_hw_lock(bp
);
1470 rc
= bnx2x_test_link(&bp
->link_params
, &bp
->link_vars
);
1471 bnx2x_phy_hw_unlock(bp
);
1476 /* This function is called upon link interrupt */
1477 static void bnx2x_link_attn(struct bnx2x
*bp
)
1479 bnx2x_phy_hw_lock(bp
);
1480 bnx2x_link_update(&bp
->link_params
, &bp
->link_vars
);
1481 bnx2x_phy_hw_unlock(bp
);
1483 /* indicate link status */
1484 bnx2x_link_report(bp
);
1487 static void bnx2x__link_status_update(struct bnx2x
*bp
)
1489 if (bp
->state
!= BNX2X_STATE_OPEN
)
1492 bnx2x_link_status_update(&bp
->link_params
, &bp
->link_vars
);
1494 /* indicate link status */
1495 bnx2x_link_report(bp
);
1503 * General service functions
1506 /* the slow path queue is odd since completions arrive on the fastpath ring */
1507 static int bnx2x_sp_post(struct bnx2x
*bp
, int command
, int cid
,
1508 u32 data_hi
, u32 data_lo
, int common
)
1510 int port
= bp
->port
;
1513 "spe (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
1514 (u32
)U64_HI(bp
->spq_mapping
), (u32
)(U64_LO(bp
->spq_mapping
) +
1515 (void *)bp
->spq_prod_bd
- (void *)bp
->spq
), command
,
1516 HW_CID(bp
, cid
), data_hi
, data_lo
, bp
->spq_left
);
1518 #ifdef BNX2X_STOP_ON_ERROR
1519 if (unlikely(bp
->panic
))
1523 spin_lock(&bp
->spq_lock
);
1525 if (!bp
->spq_left
) {
1526 BNX2X_ERR("BUG! SPQ ring full!\n");
1527 spin_unlock(&bp
->spq_lock
);
1532 /* CID needs port number to be encoded int it */
1533 bp
->spq_prod_bd
->hdr
.conn_and_cmd_data
=
1534 cpu_to_le32(((command
<< SPE_HDR_CMD_ID_SHIFT
) |
1536 bp
->spq_prod_bd
->hdr
.type
= cpu_to_le16(ETH_CONNECTION_TYPE
);
1538 bp
->spq_prod_bd
->hdr
.type
|=
1539 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT
));
1541 bp
->spq_prod_bd
->data
.mac_config_addr
.hi
= cpu_to_le32(data_hi
);
1542 bp
->spq_prod_bd
->data
.mac_config_addr
.lo
= cpu_to_le32(data_lo
);
1546 if (bp
->spq_prod_bd
== bp
->spq_last_bd
) {
1547 bp
->spq_prod_bd
= bp
->spq
;
1548 bp
->spq_prod_idx
= 0;
1549 DP(NETIF_MSG_TIMER
, "end of spq\n");
1556 REG_WR(bp
, BAR_XSTRORM_INTMEM
+ XSTORM_SPQ_PROD_OFFSET(port
),
1559 spin_unlock(&bp
->spq_lock
);
1563 /* acquire split MCP access lock register */
1564 static int bnx2x_lock_alr(struct bnx2x
*bp
)
1571 for (j
= 0; j
< i
*10; j
++) {
1573 REG_WR(bp
, GRCBASE_MCP
+ 0x9c, val
);
1574 val
= REG_RD(bp
, GRCBASE_MCP
+ 0x9c);
1575 if (val
& (1L << 31))
1581 if (!(val
& (1L << 31))) {
1582 BNX2X_ERR("Cannot acquire nvram interface\n");
1590 /* Release split MCP access lock register */
1591 static void bnx2x_unlock_alr(struct bnx2x
*bp
)
1595 REG_WR(bp
, GRCBASE_MCP
+ 0x9c, val
);
1598 static inline u16
bnx2x_update_dsb_idx(struct bnx2x
*bp
)
1600 struct host_def_status_block
*def_sb
= bp
->def_status_blk
;
1603 barrier(); /* status block is written to by the chip */
1605 if (bp
->def_att_idx
!= def_sb
->atten_status_block
.attn_bits_index
) {
1606 bp
->def_att_idx
= def_sb
->atten_status_block
.attn_bits_index
;
1609 if (bp
->def_c_idx
!= def_sb
->c_def_status_block
.status_block_index
) {
1610 bp
->def_c_idx
= def_sb
->c_def_status_block
.status_block_index
;
1613 if (bp
->def_u_idx
!= def_sb
->u_def_status_block
.status_block_index
) {
1614 bp
->def_u_idx
= def_sb
->u_def_status_block
.status_block_index
;
1617 if (bp
->def_x_idx
!= def_sb
->x_def_status_block
.status_block_index
) {
1618 bp
->def_x_idx
= def_sb
->x_def_status_block
.status_block_index
;
1621 if (bp
->def_t_idx
!= def_sb
->t_def_status_block
.status_block_index
) {
1622 bp
->def_t_idx
= def_sb
->t_def_status_block
.status_block_index
;
1629 * slow path service functions
1632 static void bnx2x_attn_int_asserted(struct bnx2x
*bp
, u32 asserted
)
1634 int port
= bp
->port
;
1635 u32 igu_addr
= (IGU_ADDR_ATTN_BITS_SET
+ IGU_PORT_BASE
* port
) * 8;
1636 u32 aeu_addr
= port
? MISC_REG_AEU_MASK_ATTN_FUNC_1
:
1637 MISC_REG_AEU_MASK_ATTN_FUNC_0
;
1638 u32 nig_int_mask_addr
= port
? NIG_REG_MASK_INTERRUPT_PORT1
:
1639 NIG_REG_MASK_INTERRUPT_PORT0
;
1641 if (~bp
->aeu_mask
& (asserted
& 0xff))
1642 BNX2X_ERR("IGU ERROR\n");
1643 if (bp
->attn_state
& asserted
)
1644 BNX2X_ERR("IGU ERROR\n");
1646 DP(NETIF_MSG_HW
, "aeu_mask %x newly asserted %x\n",
1647 bp
->aeu_mask
, asserted
);
1648 bp
->aeu_mask
&= ~(asserted
& 0xff);
1649 DP(NETIF_MSG_HW
, "after masking: aeu_mask %x\n", bp
->aeu_mask
);
1651 REG_WR(bp
, aeu_addr
, bp
->aeu_mask
);
1653 bp
->attn_state
|= asserted
;
1655 if (asserted
& ATTN_HARD_WIRED_MASK
) {
1656 if (asserted
& ATTN_NIG_FOR_FUNC
) {
1658 /* save nig interrupt mask */
1659 bp
->nig_mask
= REG_RD(bp
, nig_int_mask_addr
);
1660 REG_WR(bp
, nig_int_mask_addr
, 0);
1662 bnx2x_link_attn(bp
);
1664 /* handle unicore attn? */
1666 if (asserted
& ATTN_SW_TIMER_4_FUNC
)
1667 DP(NETIF_MSG_HW
, "ATTN_SW_TIMER_4_FUNC!\n");
1669 if (asserted
& GPIO_2_FUNC
)
1670 DP(NETIF_MSG_HW
, "GPIO_2_FUNC!\n");
1672 if (asserted
& GPIO_3_FUNC
)
1673 DP(NETIF_MSG_HW
, "GPIO_3_FUNC!\n");
1675 if (asserted
& GPIO_4_FUNC
)
1676 DP(NETIF_MSG_HW
, "GPIO_4_FUNC!\n");
1679 if (asserted
& ATTN_GENERAL_ATTN_1
) {
1680 DP(NETIF_MSG_HW
, "ATTN_GENERAL_ATTN_1!\n");
1681 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_1
, 0x0);
1683 if (asserted
& ATTN_GENERAL_ATTN_2
) {
1684 DP(NETIF_MSG_HW
, "ATTN_GENERAL_ATTN_2!\n");
1685 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_2
, 0x0);
1687 if (asserted
& ATTN_GENERAL_ATTN_3
) {
1688 DP(NETIF_MSG_HW
, "ATTN_GENERAL_ATTN_3!\n");
1689 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_3
, 0x0);
1692 if (asserted
& ATTN_GENERAL_ATTN_4
) {
1693 DP(NETIF_MSG_HW
, "ATTN_GENERAL_ATTN_4!\n");
1694 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_4
, 0x0);
1696 if (asserted
& ATTN_GENERAL_ATTN_5
) {
1697 DP(NETIF_MSG_HW
, "ATTN_GENERAL_ATTN_5!\n");
1698 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_5
, 0x0);
1700 if (asserted
& ATTN_GENERAL_ATTN_6
) {
1701 DP(NETIF_MSG_HW
, "ATTN_GENERAL_ATTN_6!\n");
1702 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_6
, 0x0);
1706 } /* if hardwired */
1708 DP(NETIF_MSG_HW
, "about to mask 0x%08x at IGU addr 0x%x\n",
1709 asserted
, BAR_IGU_INTMEM
+ igu_addr
);
1710 REG_WR(bp
, BAR_IGU_INTMEM
+ igu_addr
, asserted
);
1712 /* now set back the mask */
1713 if (asserted
& ATTN_NIG_FOR_FUNC
)
1714 REG_WR(bp
, nig_int_mask_addr
, bp
->nig_mask
);
1717 static inline void bnx2x_attn_int_deasserted0(struct bnx2x
*bp
, u32 attn
)
1719 int port
= bp
->port
;
1723 if (attn
& AEU_INPUTS_ATTN_BITS_SPIO5
) {
1725 reg_offset
= (port
? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0
:
1726 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0
);
1728 val
= REG_RD(bp
, reg_offset
);
1729 val
&= ~AEU_INPUTS_ATTN_BITS_SPIO5
;
1730 REG_WR(bp
, reg_offset
, val
);
1732 BNX2X_ERR("SPIO5 hw attention\n");
1734 switch (bp
->board
& SHARED_HW_CFG_BOARD_TYPE_MASK
) {
1735 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G
:
1736 /* Fan failure attention */
1738 /* The PHY reset is controled by GPIO 1 */
1739 bnx2x_set_gpio(bp
, MISC_REGISTERS_GPIO_1
,
1740 MISC_REGISTERS_GPIO_OUTPUT_LOW
);
1741 /* Low power mode is controled by GPIO 2 */
1742 bnx2x_set_gpio(bp
, MISC_REGISTERS_GPIO_2
,
1743 MISC_REGISTERS_GPIO_OUTPUT_LOW
);
1744 /* mark the failure */
1745 bp
->link_params
.ext_phy_config
&=
1746 ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK
;
1747 bp
->link_params
.ext_phy_config
|=
1748 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE
;
1750 dev_info
.port_hw_config
[port
].
1751 external_phy_config
,
1752 bp
->link_params
.ext_phy_config
);
1753 /* log the failure */
1754 printk(KERN_ERR PFX
"Fan Failure on Network"
1755 " Controller %s has caused the driver to"
1756 " shutdown the card to prevent permanent"
1757 " damage. Please contact Dell Support for"
1758 " assistance\n", bp
->dev
->name
);
1767 static inline void bnx2x_attn_int_deasserted1(struct bnx2x
*bp
, u32 attn
)
1771 if (attn
& BNX2X_DOORQ_ASSERT
) {
1773 val
= REG_RD(bp
, DORQ_REG_DORQ_INT_STS_CLR
);
1774 BNX2X_ERR("DB hw attention 0x%x\n", val
);
1775 /* DORQ discard attention */
1777 BNX2X_ERR("FATAL error from DORQ\n");
1781 static inline void bnx2x_attn_int_deasserted2(struct bnx2x
*bp
, u32 attn
)
1785 if (attn
& AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT
) {
1787 val
= REG_RD(bp
, CFC_REG_CFC_INT_STS_CLR
);
1788 BNX2X_ERR("CFC hw attention 0x%x\n", val
);
1789 /* CFC error attention */
1791 BNX2X_ERR("FATAL error from CFC\n");
1794 if (attn
& AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT
) {
1796 val
= REG_RD(bp
, PXP_REG_PXP_INT_STS_CLR_0
);
1797 BNX2X_ERR("PXP hw attention 0x%x\n", val
);
1798 /* RQ_USDMDP_FIFO_OVERFLOW */
1800 BNX2X_ERR("FATAL error from PXP\n");
1804 static inline void bnx2x_attn_int_deasserted3(struct bnx2x
*bp
, u32 attn
)
1806 if (attn
& EVEREST_GEN_ATTN_IN_USE_MASK
) {
1808 if (attn
& BNX2X_MC_ASSERT_BITS
) {
1810 BNX2X_ERR("MC assert!\n");
1811 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_10
, 0);
1812 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_9
, 0);
1813 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_8
, 0);
1814 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_7
, 0);
1817 } else if (attn
& BNX2X_MCP_ASSERT
) {
1819 BNX2X_ERR("MCP assert!\n");
1820 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_11
, 0);
1821 bnx2x_mc_assert(bp
);
1824 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn
);
1827 if (attn
& EVEREST_LATCHED_ATTN_IN_USE_MASK
) {
1829 REG_WR(bp
, MISC_REG_AEU_CLR_LATCH_SIGNAL
, 0x7ff);
1830 BNX2X_ERR("LATCHED attention 0x%x (masked)\n", attn
);
1834 static void bnx2x_attn_int_deasserted(struct bnx2x
*bp
, u32 deasserted
)
1836 struct attn_route attn
;
1837 struct attn_route group_mask
;
1838 int port
= bp
->port
;
1843 /* need to take HW lock because MCP or other port might also
1844 try to handle this event */
1847 attn
.sig
[0] = REG_RD(bp
, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0
+ port
*4);
1848 attn
.sig
[1] = REG_RD(bp
, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0
+ port
*4);
1849 attn
.sig
[2] = REG_RD(bp
, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0
+ port
*4);
1850 attn
.sig
[3] = REG_RD(bp
, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0
+ port
*4);
1851 DP(NETIF_MSG_HW
, "attn %llx\n", (unsigned long long)attn
.sig
[0]);
1853 for (index
= 0; index
< MAX_DYNAMIC_ATTN_GRPS
; index
++) {
1854 if (deasserted
& (1 << index
)) {
1855 group_mask
= bp
->attn_group
[index
];
1857 DP(NETIF_MSG_HW
, "group[%d]: %llx\n", index
,
1858 (unsigned long long)group_mask
.sig
[0]);
1860 bnx2x_attn_int_deasserted3(bp
,
1861 attn
.sig
[3] & group_mask
.sig
[3]);
1862 bnx2x_attn_int_deasserted1(bp
,
1863 attn
.sig
[1] & group_mask
.sig
[1]);
1864 bnx2x_attn_int_deasserted2(bp
,
1865 attn
.sig
[2] & group_mask
.sig
[2]);
1866 bnx2x_attn_int_deasserted0(bp
,
1867 attn
.sig
[0] & group_mask
.sig
[0]);
1869 if ((attn
.sig
[0] & group_mask
.sig
[0] &
1870 HW_INTERRUT_ASSERT_SET_0
) ||
1871 (attn
.sig
[1] & group_mask
.sig
[1] &
1872 HW_INTERRUT_ASSERT_SET_1
) ||
1873 (attn
.sig
[2] & group_mask
.sig
[2] &
1874 HW_INTERRUT_ASSERT_SET_2
))
1875 BNX2X_ERR("FATAL HW block attention"
1876 " set0 0x%x set1 0x%x"
1878 (attn
.sig
[0] & group_mask
.sig
[0] &
1879 HW_INTERRUT_ASSERT_SET_0
),
1880 (attn
.sig
[1] & group_mask
.sig
[1] &
1881 HW_INTERRUT_ASSERT_SET_1
),
1882 (attn
.sig
[2] & group_mask
.sig
[2] &
1883 HW_INTERRUT_ASSERT_SET_2
));
1885 if ((attn
.sig
[0] & group_mask
.sig
[0] &
1886 HW_PRTY_ASSERT_SET_0
) ||
1887 (attn
.sig
[1] & group_mask
.sig
[1] &
1888 HW_PRTY_ASSERT_SET_1
) ||
1889 (attn
.sig
[2] & group_mask
.sig
[2] &
1890 HW_PRTY_ASSERT_SET_2
))
1891 BNX2X_ERR("FATAL HW block parity attention\n");
1895 bnx2x_unlock_alr(bp
);
1897 reg_addr
= (IGU_ADDR_ATTN_BITS_CLR
+ IGU_PORT_BASE
* port
) * 8;
1900 /* DP(NETIF_MSG_INTR, "write 0x%08x to IGU addr 0x%x\n",
1901 val, BAR_IGU_INTMEM + reg_addr); */
1902 REG_WR(bp
, BAR_IGU_INTMEM
+ reg_addr
, val
);
1904 if (bp
->aeu_mask
& (deasserted
& 0xff))
1905 BNX2X_ERR("IGU BUG\n");
1906 if (~bp
->attn_state
& deasserted
)
1907 BNX2X_ERR("IGU BUG\n");
1909 reg_addr
= port
? MISC_REG_AEU_MASK_ATTN_FUNC_1
:
1910 MISC_REG_AEU_MASK_ATTN_FUNC_0
;
1912 DP(NETIF_MSG_HW
, "aeu_mask %x\n", bp
->aeu_mask
);
1913 bp
->aeu_mask
|= (deasserted
& 0xff);
1915 DP(NETIF_MSG_HW
, "new mask %x\n", bp
->aeu_mask
);
1916 REG_WR(bp
, reg_addr
, bp
->aeu_mask
);
1918 DP(NETIF_MSG_HW
, "attn_state %x\n", bp
->attn_state
);
1919 bp
->attn_state
&= ~deasserted
;
1920 DP(NETIF_MSG_HW
, "new state %x\n", bp
->attn_state
);
1923 static void bnx2x_attn_int(struct bnx2x
*bp
)
1925 /* read local copy of bits */
1926 u32 attn_bits
= bp
->def_status_blk
->atten_status_block
.attn_bits
;
1927 u32 attn_ack
= bp
->def_status_blk
->atten_status_block
.attn_bits_ack
;
1928 u32 attn_state
= bp
->attn_state
;
1930 /* look for changed bits */
1931 u32 asserted
= attn_bits
& ~attn_ack
& ~attn_state
;
1932 u32 deasserted
= ~attn_bits
& attn_ack
& attn_state
;
1935 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
1936 attn_bits
, attn_ack
, asserted
, deasserted
);
1938 if (~(attn_bits
^ attn_ack
) & (attn_bits
^ attn_state
))
1939 BNX2X_ERR("bad attention state\n");
1941 /* handle bits that were raised */
1943 bnx2x_attn_int_asserted(bp
, asserted
);
1946 bnx2x_attn_int_deasserted(bp
, deasserted
);
1949 static void bnx2x_sp_task(struct work_struct
*work
)
1951 struct bnx2x
*bp
= container_of(work
, struct bnx2x
, sp_task
);
1954 /* Return here if interrupt is disabled */
1955 if (unlikely(atomic_read(&bp
->intr_sem
) != 0)) {
1956 DP(BNX2X_MSG_SP
, "called but intr_sem not 0, returning\n");
1960 status
= bnx2x_update_dsb_idx(bp
);
1962 BNX2X_ERR("spurious slowpath interrupt!\n");
1964 DP(NETIF_MSG_INTR
, "got a slowpath interrupt (updated %x)\n", status
);
1970 /* CStorm events: query_stats, port delete ramrod */
1972 bp
->stat_pending
= 0;
1974 bnx2x_ack_sb(bp
, DEF_SB_ID
, ATTENTION_ID
, bp
->def_att_idx
,
1976 bnx2x_ack_sb(bp
, DEF_SB_ID
, USTORM_ID
, le16_to_cpu(bp
->def_u_idx
),
1978 bnx2x_ack_sb(bp
, DEF_SB_ID
, CSTORM_ID
, le16_to_cpu(bp
->def_c_idx
),
1980 bnx2x_ack_sb(bp
, DEF_SB_ID
, XSTORM_ID
, le16_to_cpu(bp
->def_x_idx
),
1982 bnx2x_ack_sb(bp
, DEF_SB_ID
, TSTORM_ID
, le16_to_cpu(bp
->def_t_idx
),
1987 static irqreturn_t
bnx2x_msix_sp_int(int irq
, void *dev_instance
)
1989 struct net_device
*dev
= dev_instance
;
1990 struct bnx2x
*bp
= netdev_priv(dev
);
1992 /* Return here if interrupt is disabled */
1993 if (unlikely(atomic_read(&bp
->intr_sem
) != 0)) {
1994 DP(BNX2X_MSG_SP
, "called but intr_sem not 0, returning\n");
1998 bnx2x_ack_sb(bp
, DEF_SB_ID
, XSTORM_ID
, 0, IGU_INT_DISABLE
, 0);
2000 #ifdef BNX2X_STOP_ON_ERROR
2001 if (unlikely(bp
->panic
))
2005 schedule_work(&bp
->sp_task
);
2010 /* end of slow path */
2014 /****************************************************************************
2016 ****************************************************************************/
2018 #define UPDATE_STAT(s, t) \
2020 estats->t += new->s - old->s; \
2024 /* sum[hi:lo] += add[hi:lo] */
2025 #define ADD_64(s_hi, a_hi, s_lo, a_lo) \
2028 s_hi += a_hi + (s_lo < a_lo) ? 1 : 0; \
2031 /* difference = minuend - subtrahend */
2032 #define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
2034 if (m_lo < s_lo) { /* underflow */ \
2035 d_hi = m_hi - s_hi; \
2036 if (d_hi > 0) { /* we can 'loan' 1 */ \
2038 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
2039 } else { /* m_hi <= s_hi */ \
2043 } else { /* m_lo >= s_lo */ \
2044 if (m_hi < s_hi) { \
2047 } else { /* m_hi >= s_hi */ \
2048 d_hi = m_hi - s_hi; \
2049 d_lo = m_lo - s_lo; \
2054 /* minuend -= subtrahend */
2055 #define SUB_64(m_hi, s_hi, m_lo, s_lo) \
2057 DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
2060 #define UPDATE_STAT64(s_hi, t_hi, s_lo, t_lo) \
2062 DIFF_64(diff.hi, new->s_hi, old->s_hi, \
2063 diff.lo, new->s_lo, old->s_lo); \
2064 old->s_hi = new->s_hi; \
2065 old->s_lo = new->s_lo; \
2066 ADD_64(estats->t_hi, diff.hi, \
2067 estats->t_lo, diff.lo); \
2070 /* sum[hi:lo] += add */
2071 #define ADD_EXTEND_64(s_hi, s_lo, a) \
2074 s_hi += (s_lo < a) ? 1 : 0; \
2077 #define UPDATE_EXTEND_STAT(s, t_hi, t_lo) \
2079 ADD_EXTEND_64(estats->t_hi, estats->t_lo, new->s); \
2082 #define UPDATE_EXTEND_TSTAT(s, t_hi, t_lo) \
2084 diff = le32_to_cpu(tclient->s) - old_tclient->s; \
2085 old_tclient->s = le32_to_cpu(tclient->s); \
2086 ADD_EXTEND_64(estats->t_hi, estats->t_lo, diff); \
2090 * General service functions
2093 static inline long bnx2x_hilo(u32
*hiref
)
2095 u32 lo
= *(hiref
+ 1);
2096 #if (BITS_PER_LONG == 64)
2099 return HILO_U64(hi
, lo
);
2106 * Init service functions
2109 static void bnx2x_init_mac_stats(struct bnx2x
*bp
)
2111 struct dmae_command
*dmae
;
2112 int port
= bp
->port
;
2113 int loader_idx
= port
* 8;
2117 bp
->executer_idx
= 0;
2120 opcode
= (DMAE_CMD_SRC_PCI
| DMAE_CMD_DST_GRC
|
2121 DMAE_CMD_SRC_RESET
| DMAE_CMD_DST_RESET
|
2123 DMAE_CMD_ENDIANITY_B_DW_SWAP
|
2125 DMAE_CMD_ENDIANITY_DW_SWAP
|
2127 (port
? DMAE_CMD_PORT_1
: DMAE_CMD_PORT_0
));
2129 if (bp
->link_vars
.link_up
)
2130 opcode
|= (DMAE_CMD_C_DST_GRC
| DMAE_CMD_C_ENABLE
);
2132 dmae
= bnx2x_sp(bp
, dmae
[bp
->executer_idx
++]);
2133 dmae
->opcode
= opcode
;
2134 dmae
->src_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, eth_stats
) +
2136 dmae
->src_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, eth_stats
) +
2138 dmae
->dst_addr_lo
= bp
->fw_mb
>> 2;
2139 dmae
->dst_addr_hi
= 0;
2140 dmae
->len
= (offsetof(struct bnx2x_eth_stats
, mac_stx_end
) -
2142 if (bp
->link_vars
.link_up
) {
2143 dmae
->comp_addr_lo
= dmae_reg_go_c
[loader_idx
] >> 2;
2144 dmae
->comp_addr_hi
= 0;
2147 dmae
->comp_addr_lo
= 0;
2148 dmae
->comp_addr_hi
= 0;
2153 if (!bp
->link_vars
.link_up
) {
2154 /* no need to collect statistics in link down */
2158 opcode
= (DMAE_CMD_SRC_GRC
| DMAE_CMD_DST_PCI
|
2159 DMAE_CMD_C_DST_GRC
| DMAE_CMD_C_ENABLE
|
2160 DMAE_CMD_SRC_RESET
| DMAE_CMD_DST_RESET
|
2162 DMAE_CMD_ENDIANITY_B_DW_SWAP
|
2164 DMAE_CMD_ENDIANITY_DW_SWAP
|
2166 (port
? DMAE_CMD_PORT_1
: DMAE_CMD_PORT_0
));
2168 if (bp
->link_vars
.mac_type
== MAC_TYPE_BMAC
) {
2170 mac_addr
= (port
? NIG_REG_INGRESS_BMAC1_MEM
:
2171 NIG_REG_INGRESS_BMAC0_MEM
);
2173 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
2174 BIGMAC_REGISTER_TX_STAT_GTBYT */
2175 dmae
= bnx2x_sp(bp
, dmae
[bp
->executer_idx
++]);
2176 dmae
->opcode
= opcode
;
2177 dmae
->src_addr_lo
= (mac_addr
+
2178 BIGMAC_REGISTER_TX_STAT_GTPKT
) >> 2;
2179 dmae
->src_addr_hi
= 0;
2180 dmae
->dst_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, mac_stats
));
2181 dmae
->dst_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, mac_stats
));
2182 dmae
->len
= (8 + BIGMAC_REGISTER_TX_STAT_GTBYT
-
2183 BIGMAC_REGISTER_TX_STAT_GTPKT
) >> 2;
2184 dmae
->comp_addr_lo
= dmae_reg_go_c
[loader_idx
] >> 2;
2185 dmae
->comp_addr_hi
= 0;
2188 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
2189 BIGMAC_REGISTER_RX_STAT_GRIPJ */
2190 dmae
= bnx2x_sp(bp
, dmae
[bp
->executer_idx
++]);
2191 dmae
->opcode
= opcode
;
2192 dmae
->src_addr_lo
= (mac_addr
+
2193 BIGMAC_REGISTER_RX_STAT_GR64
) >> 2;
2194 dmae
->src_addr_hi
= 0;
2195 dmae
->dst_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, mac_stats
) +
2196 offsetof(struct bmac_stats
, rx_gr64
));
2197 dmae
->dst_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, mac_stats
) +
2198 offsetof(struct bmac_stats
, rx_gr64
));
2199 dmae
->len
= (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ
-
2200 BIGMAC_REGISTER_RX_STAT_GR64
) >> 2;
2201 dmae
->comp_addr_lo
= dmae_reg_go_c
[loader_idx
] >> 2;
2202 dmae
->comp_addr_hi
= 0;
2205 } else if (bp
->link_vars
.mac_type
== MAC_TYPE_EMAC
) {
2207 mac_addr
= (port
? GRCBASE_EMAC1
: GRCBASE_EMAC0
);
2209 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
2210 dmae
= bnx2x_sp(bp
, dmae
[bp
->executer_idx
++]);
2211 dmae
->opcode
= opcode
;
2212 dmae
->src_addr_lo
= (mac_addr
+
2213 EMAC_REG_EMAC_RX_STAT_AC
) >> 2;
2214 dmae
->src_addr_hi
= 0;
2215 dmae
->dst_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, mac_stats
));
2216 dmae
->dst_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, mac_stats
));
2217 dmae
->len
= EMAC_REG_EMAC_RX_STAT_AC_COUNT
;
2218 dmae
->comp_addr_lo
= dmae_reg_go_c
[loader_idx
] >> 2;
2219 dmae
->comp_addr_hi
= 0;
2222 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
2223 dmae
= bnx2x_sp(bp
, dmae
[bp
->executer_idx
++]);
2224 dmae
->opcode
= opcode
;
2225 dmae
->src_addr_lo
= (mac_addr
+
2226 EMAC_REG_EMAC_RX_STAT_AC_28
) >> 2;
2227 dmae
->src_addr_hi
= 0;
2228 dmae
->dst_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, mac_stats
) +
2229 offsetof(struct emac_stats
,
2230 rx_falsecarriererrors
));
2231 dmae
->dst_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, mac_stats
) +
2232 offsetof(struct emac_stats
,
2233 rx_falsecarriererrors
));
2235 dmae
->comp_addr_lo
= dmae_reg_go_c
[loader_idx
] >> 2;
2236 dmae
->comp_addr_hi
= 0;
2239 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
2240 dmae
= bnx2x_sp(bp
, dmae
[bp
->executer_idx
++]);
2241 dmae
->opcode
= opcode
;
2242 dmae
->src_addr_lo
= (mac_addr
+
2243 EMAC_REG_EMAC_TX_STAT_AC
) >> 2;
2244 dmae
->src_addr_hi
= 0;
2245 dmae
->dst_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, mac_stats
) +
2246 offsetof(struct emac_stats
,
2248 dmae
->dst_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, mac_stats
) +
2249 offsetof(struct emac_stats
,
2251 dmae
->len
= EMAC_REG_EMAC_TX_STAT_AC_COUNT
;
2252 dmae
->comp_addr_lo
= dmae_reg_go_c
[loader_idx
] >> 2;
2253 dmae
->comp_addr_hi
= 0;
2258 dmae
= bnx2x_sp(bp
, dmae
[bp
->executer_idx
++]);
2259 dmae
->opcode
= (DMAE_CMD_SRC_GRC
| DMAE_CMD_DST_PCI
|
2260 DMAE_CMD_C_DST_PCI
| DMAE_CMD_C_ENABLE
|
2261 DMAE_CMD_SRC_RESET
| DMAE_CMD_DST_RESET
|
2263 DMAE_CMD_ENDIANITY_B_DW_SWAP
|
2265 DMAE_CMD_ENDIANITY_DW_SWAP
|
2267 (port
? DMAE_CMD_PORT_1
: DMAE_CMD_PORT_0
));
2268 dmae
->src_addr_lo
= (port
? NIG_REG_STAT1_BRB_DISCARD
:
2269 NIG_REG_STAT0_BRB_DISCARD
) >> 2;
2270 dmae
->src_addr_hi
= 0;
2271 dmae
->dst_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, nig
));
2272 dmae
->dst_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, nig
));
2273 dmae
->len
= (sizeof(struct nig_stats
) - 2*sizeof(u32
)) >> 2;
2274 dmae
->comp_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, nig
) +
2275 offsetof(struct nig_stats
, done
));
2276 dmae
->comp_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, nig
) +
2277 offsetof(struct nig_stats
, done
));
2278 dmae
->comp_val
= 0xffffffff;
2281 static void bnx2x_init_stats(struct bnx2x
*bp
)
2283 int port
= bp
->port
;
2285 bp
->stats_state
= STATS_STATE_DISABLE
;
2286 bp
->executer_idx
= 0;
2288 bp
->old_brb_discard
= REG_RD(bp
,
2289 NIG_REG_STAT0_BRB_DISCARD
+ port
*0x38);
2291 memset(&bp
->old_bmac
, 0, sizeof(struct bmac_stats
));
2292 memset(&bp
->old_tclient
, 0, sizeof(struct tstorm_per_client_stats
));
2293 memset(&bp
->dev
->stats
, 0, sizeof(struct net_device_stats
));
2295 REG_WR(bp
, BAR_XSTRORM_INTMEM
+ XSTORM_STATS_FLAGS_OFFSET(port
), 1);
2296 REG_WR(bp
, BAR_XSTRORM_INTMEM
+
2297 XSTORM_STATS_FLAGS_OFFSET(port
) + 4, 0);
2299 REG_WR(bp
, BAR_TSTRORM_INTMEM
+ TSTORM_STATS_FLAGS_OFFSET(port
), 1);
2300 REG_WR(bp
, BAR_TSTRORM_INTMEM
+
2301 TSTORM_STATS_FLAGS_OFFSET(port
) + 4, 0);
2303 REG_WR(bp
, BAR_CSTRORM_INTMEM
+ CSTORM_STATS_FLAGS_OFFSET(port
), 0);
2304 REG_WR(bp
, BAR_CSTRORM_INTMEM
+
2305 CSTORM_STATS_FLAGS_OFFSET(port
) + 4, 0);
2307 REG_WR(bp
, BAR_XSTRORM_INTMEM
+
2308 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(port
),
2309 U64_LO(bnx2x_sp_mapping(bp
, fw_stats
)));
2310 REG_WR(bp
, BAR_XSTRORM_INTMEM
+
2311 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(port
) + 4,
2312 U64_HI(bnx2x_sp_mapping(bp
, fw_stats
)));
2314 REG_WR(bp
, BAR_TSTRORM_INTMEM
+
2315 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(port
),
2316 U64_LO(bnx2x_sp_mapping(bp
, fw_stats
)));
2317 REG_WR(bp
, BAR_TSTRORM_INTMEM
+
2318 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(port
) + 4,
2319 U64_HI(bnx2x_sp_mapping(bp
, fw_stats
)));
2322 static void bnx2x_stop_stats(struct bnx2x
*bp
)
2325 if (bp
->stats_state
!= STATS_STATE_DISABLE
) {
2328 bp
->stats_state
= STATS_STATE_STOP
;
2329 DP(BNX2X_MSG_STATS
, "stats_state - STOP\n");
2331 while (bp
->stats_state
!= STATS_STATE_DISABLE
) {
2333 BNX2X_ERR("timeout waiting for stats stop\n");
2340 DP(BNX2X_MSG_STATS
, "stats_state - DISABLE\n");
2344 * Statistics service functions
2347 static void bnx2x_update_bmac_stats(struct bnx2x
*bp
)
2351 struct bmac_stats
*new = bnx2x_sp(bp
, mac_stats
.bmac
);
2352 struct bmac_stats
*old
= &bp
->old_bmac
;
2353 struct bnx2x_eth_stats
*estats
= bnx2x_sp(bp
, eth_stats
);
2358 UPDATE_STAT64(tx_gtbyt
.hi
, total_bytes_transmitted_hi
,
2359 tx_gtbyt
.lo
, total_bytes_transmitted_lo
);
2361 UPDATE_STAT64(tx_gtmca
.hi
, total_multicast_packets_transmitted_hi
,
2362 tx_gtmca
.lo
, total_multicast_packets_transmitted_lo
);
2363 ADD_64(sum
.hi
, diff
.hi
, sum
.lo
, diff
.lo
);
2365 UPDATE_STAT64(tx_gtgca
.hi
, total_broadcast_packets_transmitted_hi
,
2366 tx_gtgca
.lo
, total_broadcast_packets_transmitted_lo
);
2367 ADD_64(sum
.hi
, diff
.hi
, sum
.lo
, diff
.lo
);
2369 UPDATE_STAT64(tx_gtpkt
.hi
, total_unicast_packets_transmitted_hi
,
2370 tx_gtpkt
.lo
, total_unicast_packets_transmitted_lo
);
2371 SUB_64(estats
->total_unicast_packets_transmitted_hi
, sum
.hi
,
2372 estats
->total_unicast_packets_transmitted_lo
, sum
.lo
);
2374 UPDATE_STAT(tx_gtxpf
.lo
, pause_xoff_frames_transmitted
);
2375 UPDATE_STAT(tx_gt64
.lo
, frames_transmitted_64_bytes
);
2376 UPDATE_STAT(tx_gt127
.lo
, frames_transmitted_65_127_bytes
);
2377 UPDATE_STAT(tx_gt255
.lo
, frames_transmitted_128_255_bytes
);
2378 UPDATE_STAT(tx_gt511
.lo
, frames_transmitted_256_511_bytes
);
2379 UPDATE_STAT(tx_gt1023
.lo
, frames_transmitted_512_1023_bytes
);
2380 UPDATE_STAT(tx_gt1518
.lo
, frames_transmitted_1024_1522_bytes
);
2381 UPDATE_STAT(tx_gt2047
.lo
, frames_transmitted_1523_9022_bytes
);
2382 UPDATE_STAT(tx_gt4095
.lo
, frames_transmitted_1523_9022_bytes
);
2383 UPDATE_STAT(tx_gt9216
.lo
, frames_transmitted_1523_9022_bytes
);
2384 UPDATE_STAT(tx_gt16383
.lo
, frames_transmitted_1523_9022_bytes
);
2386 UPDATE_STAT(rx_grfcs
.lo
, crc_receive_errors
);
2387 UPDATE_STAT(rx_grund
.lo
, runt_packets_received
);
2388 UPDATE_STAT(rx_grovr
.lo
, stat_Dot3statsFramesTooLong
);
2389 UPDATE_STAT(rx_grxpf
.lo
, pause_xoff_frames_received
);
2390 UPDATE_STAT(rx_grxcf
.lo
, control_frames_received
);
2391 /* UPDATE_STAT(rx_grxpf.lo, control_frames_received); */
2392 UPDATE_STAT(rx_grfrg
.lo
, error_runt_packets_received
);
2393 UPDATE_STAT(rx_grjbr
.lo
, error_jabber_packets_received
);
2395 UPDATE_STAT64(rx_grerb
.hi
, stat_IfHCInBadOctets_hi
,
2396 rx_grerb
.lo
, stat_IfHCInBadOctets_lo
);
2397 UPDATE_STAT64(tx_gtufl
.hi
, stat_IfHCOutBadOctets_hi
,
2398 tx_gtufl
.lo
, stat_IfHCOutBadOctets_lo
);
2399 UPDATE_STAT(tx_gterr
.lo
, stat_Dot3statsInternalMacTransmitErrors
);
2400 /* UPDATE_STAT(rx_grxpf.lo, stat_XoffStateEntered); */
2401 estats
->stat_XoffStateEntered
= estats
->pause_xoff_frames_received
;
2404 static void bnx2x_update_emac_stats(struct bnx2x
*bp
)
2406 struct emac_stats
*new = bnx2x_sp(bp
, mac_stats
.emac
);
2407 struct bnx2x_eth_stats
*estats
= bnx2x_sp(bp
, eth_stats
);
2409 UPDATE_EXTEND_STAT(tx_ifhcoutoctets
, total_bytes_transmitted_hi
,
2410 total_bytes_transmitted_lo
);
2411 UPDATE_EXTEND_STAT(tx_ifhcoutucastpkts
,
2412 total_unicast_packets_transmitted_hi
,
2413 total_unicast_packets_transmitted_lo
);
2414 UPDATE_EXTEND_STAT(tx_ifhcoutmulticastpkts
,
2415 total_multicast_packets_transmitted_hi
,
2416 total_multicast_packets_transmitted_lo
);
2417 UPDATE_EXTEND_STAT(tx_ifhcoutbroadcastpkts
,
2418 total_broadcast_packets_transmitted_hi
,
2419 total_broadcast_packets_transmitted_lo
);
2421 estats
->pause_xon_frames_transmitted
+= new->tx_outxonsent
;
2422 estats
->pause_xoff_frames_transmitted
+= new->tx_outxoffsent
;
2423 estats
->single_collision_transmit_frames
+=
2424 new->tx_dot3statssinglecollisionframes
;
2425 estats
->multiple_collision_transmit_frames
+=
2426 new->tx_dot3statsmultiplecollisionframes
;
2427 estats
->late_collision_frames
+= new->tx_dot3statslatecollisions
;
2428 estats
->excessive_collision_frames
+=
2429 new->tx_dot3statsexcessivecollisions
;
2430 estats
->frames_transmitted_64_bytes
+= new->tx_etherstatspkts64octets
;
2431 estats
->frames_transmitted_65_127_bytes
+=
2432 new->tx_etherstatspkts65octetsto127octets
;
2433 estats
->frames_transmitted_128_255_bytes
+=
2434 new->tx_etherstatspkts128octetsto255octets
;
2435 estats
->frames_transmitted_256_511_bytes
+=
2436 new->tx_etherstatspkts256octetsto511octets
;
2437 estats
->frames_transmitted_512_1023_bytes
+=
2438 new->tx_etherstatspkts512octetsto1023octets
;
2439 estats
->frames_transmitted_1024_1522_bytes
+=
2440 new->tx_etherstatspkts1024octetsto1522octet
;
2441 estats
->frames_transmitted_1523_9022_bytes
+=
2442 new->tx_etherstatspktsover1522octets
;
2444 estats
->crc_receive_errors
+= new->rx_dot3statsfcserrors
;
2445 estats
->alignment_errors
+= new->rx_dot3statsalignmenterrors
;
2446 estats
->false_carrier_detections
+= new->rx_falsecarriererrors
;
2447 estats
->runt_packets_received
+= new->rx_etherstatsundersizepkts
;
2448 estats
->stat_Dot3statsFramesTooLong
+= new->rx_dot3statsframestoolong
;
2449 estats
->pause_xon_frames_received
+= new->rx_xonpauseframesreceived
;
2450 estats
->pause_xoff_frames_received
+= new->rx_xoffpauseframesreceived
;
2451 estats
->control_frames_received
+= new->rx_maccontrolframesreceived
;
2452 estats
->error_runt_packets_received
+= new->rx_etherstatsfragments
;
2453 estats
->error_jabber_packets_received
+= new->rx_etherstatsjabbers
;
2455 UPDATE_EXTEND_STAT(rx_ifhcinbadoctets
, stat_IfHCInBadOctets_hi
,
2456 stat_IfHCInBadOctets_lo
);
2457 UPDATE_EXTEND_STAT(tx_ifhcoutbadoctets
, stat_IfHCOutBadOctets_hi
,
2458 stat_IfHCOutBadOctets_lo
);
2459 estats
->stat_Dot3statsInternalMacTransmitErrors
+=
2460 new->tx_dot3statsinternalmactransmiterrors
;
2461 estats
->stat_Dot3StatsCarrierSenseErrors
+=
2462 new->rx_dot3statscarriersenseerrors
;
2463 estats
->stat_Dot3StatsDeferredTransmissions
+=
2464 new->tx_dot3statsdeferredtransmissions
;
2465 estats
->stat_FlowControlDone
+= new->tx_flowcontroldone
;
2466 estats
->stat_XoffStateEntered
+= new->rx_xoffstateentered
;
2469 static int bnx2x_update_storm_stats(struct bnx2x
*bp
)
2471 struct eth_stats_query
*stats
= bnx2x_sp(bp
, fw_stats
);
2472 struct tstorm_common_stats
*tstats
= &stats
->tstorm_common
;
2473 struct tstorm_per_client_stats
*tclient
=
2474 &tstats
->client_statistics
[0];
2475 struct tstorm_per_client_stats
*old_tclient
= &bp
->old_tclient
;
2476 struct xstorm_common_stats
*xstats
= &stats
->xstorm_common
;
2477 struct nig_stats
*nstats
= bnx2x_sp(bp
, nig
);
2478 struct bnx2x_eth_stats
*estats
= bnx2x_sp(bp
, eth_stats
);
2481 /* are DMAE stats valid? */
2482 if (nstats
->done
!= 0xffffffff) {
2483 DP(BNX2X_MSG_STATS
, "stats not updated by dmae\n");
2487 /* are storm stats valid? */
2488 if (tstats
->done
.hi
!= 0xffffffff) {
2489 DP(BNX2X_MSG_STATS
, "stats not updated by tstorm\n");
2492 if (xstats
->done
.hi
!= 0xffffffff) {
2493 DP(BNX2X_MSG_STATS
, "stats not updated by xstorm\n");
2497 estats
->total_bytes_received_hi
=
2498 estats
->valid_bytes_received_hi
=
2499 le32_to_cpu(tclient
->total_rcv_bytes
.hi
);
2500 estats
->total_bytes_received_lo
=
2501 estats
->valid_bytes_received_lo
=
2502 le32_to_cpu(tclient
->total_rcv_bytes
.lo
);
2503 ADD_64(estats
->total_bytes_received_hi
,
2504 le32_to_cpu(tclient
->rcv_error_bytes
.hi
),
2505 estats
->total_bytes_received_lo
,
2506 le32_to_cpu(tclient
->rcv_error_bytes
.lo
));
2508 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts
,
2509 total_unicast_packets_received_hi
,
2510 total_unicast_packets_received_lo
);
2511 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts
,
2512 total_multicast_packets_received_hi
,
2513 total_multicast_packets_received_lo
);
2514 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts
,
2515 total_broadcast_packets_received_hi
,
2516 total_broadcast_packets_received_lo
);
2518 estats
->frames_received_64_bytes
= MAC_STX_NA
;
2519 estats
->frames_received_65_127_bytes
= MAC_STX_NA
;
2520 estats
->frames_received_128_255_bytes
= MAC_STX_NA
;
2521 estats
->frames_received_256_511_bytes
= MAC_STX_NA
;
2522 estats
->frames_received_512_1023_bytes
= MAC_STX_NA
;
2523 estats
->frames_received_1024_1522_bytes
= MAC_STX_NA
;
2524 estats
->frames_received_1523_9022_bytes
= MAC_STX_NA
;
2526 estats
->x_total_sent_bytes_hi
=
2527 le32_to_cpu(xstats
->total_sent_bytes
.hi
);
2528 estats
->x_total_sent_bytes_lo
=
2529 le32_to_cpu(xstats
->total_sent_bytes
.lo
);
2530 estats
->x_total_sent_pkts
= le32_to_cpu(xstats
->total_sent_pkts
);
2532 estats
->t_rcv_unicast_bytes_hi
=
2533 le32_to_cpu(tclient
->rcv_unicast_bytes
.hi
);
2534 estats
->t_rcv_unicast_bytes_lo
=
2535 le32_to_cpu(tclient
->rcv_unicast_bytes
.lo
);
2536 estats
->t_rcv_broadcast_bytes_hi
=
2537 le32_to_cpu(tclient
->rcv_broadcast_bytes
.hi
);
2538 estats
->t_rcv_broadcast_bytes_lo
=
2539 le32_to_cpu(tclient
->rcv_broadcast_bytes
.lo
);
2540 estats
->t_rcv_multicast_bytes_hi
=
2541 le32_to_cpu(tclient
->rcv_multicast_bytes
.hi
);
2542 estats
->t_rcv_multicast_bytes_lo
=
2543 le32_to_cpu(tclient
->rcv_multicast_bytes
.lo
);
2544 estats
->t_total_rcv_pkt
= le32_to_cpu(tclient
->total_rcv_pkts
);
2546 estats
->checksum_discard
= le32_to_cpu(tclient
->checksum_discard
);
2547 estats
->packets_too_big_discard
=
2548 le32_to_cpu(tclient
->packets_too_big_discard
);
2549 estats
->jabber_packets_received
= estats
->packets_too_big_discard
+
2550 estats
->stat_Dot3statsFramesTooLong
;
2551 estats
->no_buff_discard
= le32_to_cpu(tclient
->no_buff_discard
);
2552 estats
->ttl0_discard
= le32_to_cpu(tclient
->ttl0_discard
);
2553 estats
->mac_discard
= le32_to_cpu(tclient
->mac_discard
);
2554 estats
->mac_filter_discard
= le32_to_cpu(tstats
->mac_filter_discard
);
2555 estats
->xxoverflow_discard
= le32_to_cpu(tstats
->xxoverflow_discard
);
2556 estats
->brb_truncate_discard
=
2557 le32_to_cpu(tstats
->brb_truncate_discard
);
2559 estats
->brb_discard
+= nstats
->brb_discard
- bp
->old_brb_discard
;
2560 bp
->old_brb_discard
= nstats
->brb_discard
;
2562 estats
->brb_packet
= nstats
->brb_packet
;
2563 estats
->brb_truncate
= nstats
->brb_truncate
;
2564 estats
->flow_ctrl_discard
= nstats
->flow_ctrl_discard
;
2565 estats
->flow_ctrl_octets
= nstats
->flow_ctrl_octets
;
2566 estats
->flow_ctrl_packet
= nstats
->flow_ctrl_packet
;
2567 estats
->mng_discard
= nstats
->mng_discard
;
2568 estats
->mng_octet_inp
= nstats
->mng_octet_inp
;
2569 estats
->mng_octet_out
= nstats
->mng_octet_out
;
2570 estats
->mng_packet_inp
= nstats
->mng_packet_inp
;
2571 estats
->mng_packet_out
= nstats
->mng_packet_out
;
2572 estats
->pbf_octets
= nstats
->pbf_octets
;
2573 estats
->pbf_packet
= nstats
->pbf_packet
;
2574 estats
->safc_inp
= nstats
->safc_inp
;
2576 xstats
->done
.hi
= 0;
2577 tstats
->done
.hi
= 0;
2583 static void bnx2x_update_net_stats(struct bnx2x
*bp
)
2585 struct bnx2x_eth_stats
*estats
= bnx2x_sp(bp
, eth_stats
);
2586 struct net_device_stats
*nstats
= &bp
->dev
->stats
;
2588 nstats
->rx_packets
=
2589 bnx2x_hilo(&estats
->total_unicast_packets_received_hi
) +
2590 bnx2x_hilo(&estats
->total_multicast_packets_received_hi
) +
2591 bnx2x_hilo(&estats
->total_broadcast_packets_received_hi
);
2593 nstats
->tx_packets
=
2594 bnx2x_hilo(&estats
->total_unicast_packets_transmitted_hi
) +
2595 bnx2x_hilo(&estats
->total_multicast_packets_transmitted_hi
) +
2596 bnx2x_hilo(&estats
->total_broadcast_packets_transmitted_hi
);
2598 nstats
->rx_bytes
= bnx2x_hilo(&estats
->total_bytes_received_hi
);
2600 nstats
->tx_bytes
= bnx2x_hilo(&estats
->total_bytes_transmitted_hi
);
2602 nstats
->rx_dropped
= estats
->checksum_discard
+ estats
->mac_discard
;
2603 nstats
->tx_dropped
= 0;
2606 bnx2x_hilo(&estats
->total_multicast_packets_transmitted_hi
);
2608 nstats
->collisions
= estats
->single_collision_transmit_frames
+
2609 estats
->multiple_collision_transmit_frames
+
2610 estats
->late_collision_frames
+
2611 estats
->excessive_collision_frames
;
2613 nstats
->rx_length_errors
= estats
->runt_packets_received
+
2614 estats
->jabber_packets_received
;
2615 nstats
->rx_over_errors
= estats
->brb_discard
+
2616 estats
->brb_truncate_discard
;
2617 nstats
->rx_crc_errors
= estats
->crc_receive_errors
;
2618 nstats
->rx_frame_errors
= estats
->alignment_errors
;
2619 nstats
->rx_fifo_errors
= estats
->no_buff_discard
;
2620 nstats
->rx_missed_errors
= estats
->xxoverflow_discard
;
2622 nstats
->rx_errors
= nstats
->rx_length_errors
+
2623 nstats
->rx_over_errors
+
2624 nstats
->rx_crc_errors
+
2625 nstats
->rx_frame_errors
+
2626 nstats
->rx_fifo_errors
+
2627 nstats
->rx_missed_errors
;
2629 nstats
->tx_aborted_errors
= estats
->late_collision_frames
+
2630 estats
->excessive_collision_frames
;
2631 nstats
->tx_carrier_errors
= estats
->false_carrier_detections
;
2632 nstats
->tx_fifo_errors
= 0;
2633 nstats
->tx_heartbeat_errors
= 0;
2634 nstats
->tx_window_errors
= 0;
2636 nstats
->tx_errors
= nstats
->tx_aborted_errors
+
2637 nstats
->tx_carrier_errors
;
2639 estats
->mac_stx_start
= ++estats
->mac_stx_end
;
2642 static void bnx2x_update_stats(struct bnx2x
*bp
)
2646 if (!bnx2x_update_storm_stats(bp
)) {
2648 if (bp
->link_vars
.mac_type
== MAC_TYPE_BMAC
) {
2649 bnx2x_update_bmac_stats(bp
);
2651 } else if (bp
->link_vars
.mac_type
== MAC_TYPE_EMAC
) {
2652 bnx2x_update_emac_stats(bp
);
2654 } else { /* unreached */
2655 BNX2X_ERR("no MAC active\n");
2659 bnx2x_update_net_stats(bp
);
2662 if (bp
->msglevel
& NETIF_MSG_TIMER
) {
2663 struct bnx2x_eth_stats
*estats
= bnx2x_sp(bp
, eth_stats
);
2664 struct net_device_stats
*nstats
= &bp
->dev
->stats
;
2666 printk(KERN_DEBUG
"%s:\n", bp
->dev
->name
);
2667 printk(KERN_DEBUG
" tx avail (%4x) tx hc idx (%x)"
2669 bnx2x_tx_avail(bp
->fp
),
2670 *bp
->fp
->tx_cons_sb
, nstats
->tx_packets
);
2671 printk(KERN_DEBUG
" rx usage (%4x) rx hc idx (%x)"
2673 (u16
)(*bp
->fp
->rx_cons_sb
- bp
->fp
->rx_comp_cons
),
2674 *bp
->fp
->rx_cons_sb
, nstats
->rx_packets
);
2675 printk(KERN_DEBUG
" %s (Xoff events %u) brb drops %u\n",
2676 netif_queue_stopped(bp
->dev
)? "Xoff" : "Xon",
2677 estats
->driver_xoff
, estats
->brb_discard
);
2678 printk(KERN_DEBUG
"tstats: checksum_discard %u "
2679 "packets_too_big_discard %u no_buff_discard %u "
2680 "mac_discard %u mac_filter_discard %u "
2681 "xxovrflow_discard %u brb_truncate_discard %u "
2682 "ttl0_discard %u\n",
2683 estats
->checksum_discard
,
2684 estats
->packets_too_big_discard
,
2685 estats
->no_buff_discard
, estats
->mac_discard
,
2686 estats
->mac_filter_discard
, estats
->xxoverflow_discard
,
2687 estats
->brb_truncate_discard
, estats
->ttl0_discard
);
2689 for_each_queue(bp
, i
) {
2690 printk(KERN_DEBUG
"[%d]: %lu\t%lu\t%lu\n", i
,
2691 bnx2x_fp(bp
, i
, tx_pkt
),
2692 bnx2x_fp(bp
, i
, rx_pkt
),
2693 bnx2x_fp(bp
, i
, rx_calls
));
2697 if (bp
->state
!= BNX2X_STATE_OPEN
) {
2698 DP(BNX2X_MSG_STATS
, "state is %x, returning\n", bp
->state
);
2702 #ifdef BNX2X_STOP_ON_ERROR
2703 if (unlikely(bp
->panic
))
2708 if (bp
->executer_idx
) {
2709 struct dmae_command
*dmae
= &bp
->dmae
;
2710 int port
= bp
->port
;
2711 int loader_idx
= port
* 8;
2713 memset(dmae
, 0, sizeof(struct dmae_command
));
2715 dmae
->opcode
= (DMAE_CMD_SRC_PCI
| DMAE_CMD_DST_GRC
|
2716 DMAE_CMD_C_DST_GRC
| DMAE_CMD_C_ENABLE
|
2717 DMAE_CMD_DST_RESET
|
2719 DMAE_CMD_ENDIANITY_B_DW_SWAP
|
2721 DMAE_CMD_ENDIANITY_DW_SWAP
|
2723 (port
? DMAE_CMD_PORT_1
: DMAE_CMD_PORT_0
));
2724 dmae
->src_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, dmae
[0]));
2725 dmae
->src_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, dmae
[0]));
2726 dmae
->dst_addr_lo
= (DMAE_REG_CMD_MEM
+
2727 sizeof(struct dmae_command
) *
2728 (loader_idx
+ 1)) >> 2;
2729 dmae
->dst_addr_hi
= 0;
2730 dmae
->len
= sizeof(struct dmae_command
) >> 2;
2731 dmae
->len
--; /* !!! for A0/1 only */
2732 dmae
->comp_addr_lo
= dmae_reg_go_c
[loader_idx
+ 1] >> 2;
2733 dmae
->comp_addr_hi
= 0;
2736 bnx2x_post_dmae(bp
, dmae
, loader_idx
);
2739 if (bp
->stats_state
!= STATS_STATE_ENABLE
) {
2740 bp
->stats_state
= STATS_STATE_DISABLE
;
2744 if (bnx2x_sp_post(bp
, RAMROD_CMD_ID_ETH_STAT_QUERY
, 0, 0, 0, 0) == 0) {
2745 /* stats ramrod has it's own slot on the spe */
2747 bp
->stat_pending
= 1;
2751 static void bnx2x_timer(unsigned long data
)
2753 struct bnx2x
*bp
= (struct bnx2x
*) data
;
2755 if (!netif_running(bp
->dev
))
2758 if (atomic_read(&bp
->intr_sem
) != 0)
2762 struct bnx2x_fastpath
*fp
= &bp
->fp
[0];
2765 bnx2x_tx_int(fp
, 1000);
2766 rc
= bnx2x_rx_int(fp
, 1000);
2770 int port
= bp
->port
;
2774 ++bp
->fw_drv_pulse_wr_seq
;
2775 bp
->fw_drv_pulse_wr_seq
&= DRV_PULSE_SEQ_MASK
;
2776 /* TBD - add SYSTEM_TIME */
2777 drv_pulse
= bp
->fw_drv_pulse_wr_seq
;
2778 SHMEM_WR(bp
, func_mb
[port
].drv_pulse_mb
, drv_pulse
);
2780 mcp_pulse
= (SHMEM_RD(bp
, func_mb
[port
].mcp_pulse_mb
) &
2781 MCP_PULSE_SEQ_MASK
);
2782 /* The delta between driver pulse and mcp response
2783 * should be 1 (before mcp response) or 0 (after mcp response)
2785 if ((drv_pulse
!= mcp_pulse
) &&
2786 (drv_pulse
!= ((mcp_pulse
+ 1) & MCP_PULSE_SEQ_MASK
))) {
2787 /* someone lost a heartbeat... */
2788 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
2789 drv_pulse
, mcp_pulse
);
2793 if (bp
->stats_state
== STATS_STATE_DISABLE
)
2796 bnx2x_update_stats(bp
);
2799 mod_timer(&bp
->timer
, jiffies
+ bp
->current_interval
);
2802 /* end of Statistics */
2807 * nic init service functions
2810 static void bnx2x_init_sb(struct bnx2x
*bp
, struct host_status_block
*sb
,
2811 dma_addr_t mapping
, int id
)
2813 int port
= bp
->port
;
2818 section
= ((u64
)mapping
) + offsetof(struct host_status_block
,
2820 sb
->u_status_block
.status_block_id
= id
;
2822 REG_WR(bp
, BAR_USTRORM_INTMEM
+
2823 USTORM_SB_HOST_SB_ADDR_OFFSET(port
, id
), U64_LO(section
));
2824 REG_WR(bp
, BAR_USTRORM_INTMEM
+
2825 ((USTORM_SB_HOST_SB_ADDR_OFFSET(port
, id
)) + 4),
2828 for (index
= 0; index
< HC_USTORM_SB_NUM_INDICES
; index
++)
2829 REG_WR16(bp
, BAR_USTRORM_INTMEM
+
2830 USTORM_SB_HC_DISABLE_OFFSET(port
, id
, index
), 0x1);
2833 section
= ((u64
)mapping
) + offsetof(struct host_status_block
,
2835 sb
->c_status_block
.status_block_id
= id
;
2837 REG_WR(bp
, BAR_CSTRORM_INTMEM
+
2838 CSTORM_SB_HOST_SB_ADDR_OFFSET(port
, id
), U64_LO(section
));
2839 REG_WR(bp
, BAR_CSTRORM_INTMEM
+
2840 ((CSTORM_SB_HOST_SB_ADDR_OFFSET(port
, id
)) + 4),
2843 for (index
= 0; index
< HC_CSTORM_SB_NUM_INDICES
; index
++)
2844 REG_WR16(bp
, BAR_CSTRORM_INTMEM
+
2845 CSTORM_SB_HC_DISABLE_OFFSET(port
, id
, index
), 0x1);
2847 bnx2x_ack_sb(bp
, id
, CSTORM_ID
, 0, IGU_INT_ENABLE
, 0);
2850 static void bnx2x_init_def_sb(struct bnx2x
*bp
,
2851 struct host_def_status_block
*def_sb
,
2852 dma_addr_t mapping
, int id
)
2854 int port
= bp
->port
;
2855 int index
, val
, reg_offset
;
2859 section
= ((u64
)mapping
) + offsetof(struct host_def_status_block
,
2860 atten_status_block
);
2861 def_sb
->atten_status_block
.status_block_id
= id
;
2863 bp
->def_att_idx
= 0;
2866 reg_offset
= (port
? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0
:
2867 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0
);
2869 for (index
= 0; index
< 3; index
++) {
2870 bp
->attn_group
[index
].sig
[0] = REG_RD(bp
,
2871 reg_offset
+ 0x10*index
);
2872 bp
->attn_group
[index
].sig
[1] = REG_RD(bp
,
2873 reg_offset
+ 0x4 + 0x10*index
);
2874 bp
->attn_group
[index
].sig
[2] = REG_RD(bp
,
2875 reg_offset
+ 0x8 + 0x10*index
);
2876 bp
->attn_group
[index
].sig
[3] = REG_RD(bp
,
2877 reg_offset
+ 0xc + 0x10*index
);
2880 bp
->aeu_mask
= REG_RD(bp
, (port
? MISC_REG_AEU_MASK_ATTN_FUNC_1
:
2881 MISC_REG_AEU_MASK_ATTN_FUNC_0
));
2883 reg_offset
= (port
? HC_REG_ATTN_MSG1_ADDR_L
:
2884 HC_REG_ATTN_MSG0_ADDR_L
);
2886 REG_WR(bp
, reg_offset
, U64_LO(section
));
2887 REG_WR(bp
, reg_offset
+ 4, U64_HI(section
));
2889 reg_offset
= (port
? HC_REG_ATTN_NUM_P1
: HC_REG_ATTN_NUM_P0
);
2891 val
= REG_RD(bp
, reg_offset
);
2893 REG_WR(bp
, reg_offset
, val
);
2896 section
= ((u64
)mapping
) + offsetof(struct host_def_status_block
,
2897 u_def_status_block
);
2898 def_sb
->u_def_status_block
.status_block_id
= id
;
2902 REG_WR(bp
, BAR_USTRORM_INTMEM
+
2903 USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port
), U64_LO(section
));
2904 REG_WR(bp
, BAR_USTRORM_INTMEM
+
2905 ((USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port
)) + 4),
2907 REG_WR(bp
, BAR_USTRORM_INTMEM
+ USTORM_HC_BTR_OFFSET(port
),
2910 for (index
= 0; index
< HC_USTORM_DEF_SB_NUM_INDICES
; index
++)
2911 REG_WR16(bp
, BAR_USTRORM_INTMEM
+
2912 USTORM_DEF_SB_HC_DISABLE_OFFSET(port
, index
), 0x1);
2915 section
= ((u64
)mapping
) + offsetof(struct host_def_status_block
,
2916 c_def_status_block
);
2917 def_sb
->c_def_status_block
.status_block_id
= id
;
2921 REG_WR(bp
, BAR_CSTRORM_INTMEM
+
2922 CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port
), U64_LO(section
));
2923 REG_WR(bp
, BAR_CSTRORM_INTMEM
+
2924 ((CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port
)) + 4),
2926 REG_WR(bp
, BAR_CSTRORM_INTMEM
+ CSTORM_HC_BTR_OFFSET(port
),
2929 for (index
= 0; index
< HC_CSTORM_DEF_SB_NUM_INDICES
; index
++)
2930 REG_WR16(bp
, BAR_CSTRORM_INTMEM
+
2931 CSTORM_DEF_SB_HC_DISABLE_OFFSET(port
, index
), 0x1);
2934 section
= ((u64
)mapping
) + offsetof(struct host_def_status_block
,
2935 t_def_status_block
);
2936 def_sb
->t_def_status_block
.status_block_id
= id
;
2940 REG_WR(bp
, BAR_TSTRORM_INTMEM
+
2941 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port
), U64_LO(section
));
2942 REG_WR(bp
, BAR_TSTRORM_INTMEM
+
2943 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port
)) + 4),
2945 REG_WR(bp
, BAR_TSTRORM_INTMEM
+ TSTORM_HC_BTR_OFFSET(port
),
2948 for (index
= 0; index
< HC_TSTORM_DEF_SB_NUM_INDICES
; index
++)
2949 REG_WR16(bp
, BAR_TSTRORM_INTMEM
+
2950 TSTORM_DEF_SB_HC_DISABLE_OFFSET(port
, index
), 0x1);
2953 section
= ((u64
)mapping
) + offsetof(struct host_def_status_block
,
2954 x_def_status_block
);
2955 def_sb
->x_def_status_block
.status_block_id
= id
;
2959 REG_WR(bp
, BAR_XSTRORM_INTMEM
+
2960 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port
), U64_LO(section
));
2961 REG_WR(bp
, BAR_XSTRORM_INTMEM
+
2962 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port
)) + 4),
2964 REG_WR(bp
, BAR_XSTRORM_INTMEM
+ XSTORM_HC_BTR_OFFSET(port
),
2967 for (index
= 0; index
< HC_XSTORM_DEF_SB_NUM_INDICES
; index
++)
2968 REG_WR16(bp
, BAR_XSTRORM_INTMEM
+
2969 XSTORM_DEF_SB_HC_DISABLE_OFFSET(port
, index
), 0x1);
2971 bp
->stat_pending
= 0;
2973 bnx2x_ack_sb(bp
, id
, CSTORM_ID
, 0, IGU_INT_ENABLE
, 0);
2976 static void bnx2x_update_coalesce(struct bnx2x
*bp
)
2978 int port
= bp
->port
;
2981 for_each_queue(bp
, i
) {
2983 /* HC_INDEX_U_ETH_RX_CQ_CONS */
2984 REG_WR8(bp
, BAR_USTRORM_INTMEM
+
2985 USTORM_SB_HC_TIMEOUT_OFFSET(port
, i
,
2986 HC_INDEX_U_ETH_RX_CQ_CONS
),
2987 bp
->rx_ticks_int
/12);
2988 REG_WR16(bp
, BAR_USTRORM_INTMEM
+
2989 USTORM_SB_HC_DISABLE_OFFSET(port
, i
,
2990 HC_INDEX_U_ETH_RX_CQ_CONS
),
2991 bp
->rx_ticks_int
? 0 : 1);
2993 /* HC_INDEX_C_ETH_TX_CQ_CONS */
2994 REG_WR8(bp
, BAR_CSTRORM_INTMEM
+
2995 CSTORM_SB_HC_TIMEOUT_OFFSET(port
, i
,
2996 HC_INDEX_C_ETH_TX_CQ_CONS
),
2997 bp
->tx_ticks_int
/12);
2998 REG_WR16(bp
, BAR_CSTRORM_INTMEM
+
2999 CSTORM_SB_HC_DISABLE_OFFSET(port
, i
,
3000 HC_INDEX_C_ETH_TX_CQ_CONS
),
3001 bp
->tx_ticks_int
? 0 : 1);
3005 static void bnx2x_init_rx_rings(struct bnx2x
*bp
)
3009 int port
= bp
->port
;
3011 bp
->rx_buf_use_size
= bp
->dev
->mtu
;
3013 bp
->rx_buf_use_size
+= bp
->rx_offset
+ ETH_OVREHEAD
;
3014 bp
->rx_buf_size
= bp
->rx_buf_use_size
+ 64;
3016 for_each_queue(bp
, j
) {
3017 struct bnx2x_fastpath
*fp
= &bp
->fp
[j
];
3020 fp
->rx_cons_sb
= BNX2X_RX_SB_INDEX
;
3022 for (i
= 1; i
<= NUM_RX_RINGS
; i
++) {
3023 struct eth_rx_bd
*rx_bd
;
3025 rx_bd
= &fp
->rx_desc_ring
[RX_DESC_CNT
* i
- 2];
3027 cpu_to_le32(U64_HI(fp
->rx_desc_mapping
+
3028 BCM_PAGE_SIZE
*(i
% NUM_RX_RINGS
)));
3030 cpu_to_le32(U64_LO(fp
->rx_desc_mapping
+
3031 BCM_PAGE_SIZE
*(i
% NUM_RX_RINGS
)));
3035 for (i
= 1; i
<= NUM_RCQ_RINGS
; i
++) {
3036 struct eth_rx_cqe_next_page
*nextpg
;
3038 nextpg
= (struct eth_rx_cqe_next_page
*)
3039 &fp
->rx_comp_ring
[RCQ_DESC_CNT
* i
- 1];
3041 cpu_to_le32(U64_HI(fp
->rx_comp_mapping
+
3042 BCM_PAGE_SIZE
*(i
% NUM_RCQ_RINGS
)));
3044 cpu_to_le32(U64_LO(fp
->rx_comp_mapping
+
3045 BCM_PAGE_SIZE
*(i
% NUM_RCQ_RINGS
)));
3048 /* rx completion queue */
3049 fp
->rx_comp_cons
= ring_prod
= 0;
3051 for (i
= 0; i
< bp
->rx_ring_size
; i
++) {
3052 if (bnx2x_alloc_rx_skb(bp
, fp
, ring_prod
) < 0) {
3053 BNX2X_ERR("was only able to allocate "
3057 ring_prod
= NEXT_RX_IDX(ring_prod
);
3058 BUG_TRAP(ring_prod
> i
);
3061 fp
->rx_bd_prod
= fp
->rx_comp_prod
= ring_prod
;
3062 fp
->rx_pkt
= fp
->rx_calls
= 0;
3064 /* Warning! this will generate an interrupt (to the TSTORM) */
3065 /* must only be done when chip is initialized */
3066 REG_WR(bp
, BAR_TSTRORM_INTMEM
+
3067 TSTORM_RCQ_PROD_OFFSET(port
, j
), ring_prod
);
3071 REG_WR(bp
, BAR_USTRORM_INTMEM
+
3072 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(port
),
3073 U64_LO(fp
->rx_comp_mapping
));
3074 REG_WR(bp
, BAR_USTRORM_INTMEM
+
3075 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(port
) + 4,
3076 U64_HI(fp
->rx_comp_mapping
));
3080 static void bnx2x_init_tx_ring(struct bnx2x
*bp
)
3084 for_each_queue(bp
, j
) {
3085 struct bnx2x_fastpath
*fp
= &bp
->fp
[j
];
3087 for (i
= 1; i
<= NUM_TX_RINGS
; i
++) {
3088 struct eth_tx_bd
*tx_bd
=
3089 &fp
->tx_desc_ring
[TX_DESC_CNT
* i
- 1];
3092 cpu_to_le32(U64_HI(fp
->tx_desc_mapping
+
3093 BCM_PAGE_SIZE
*(i
% NUM_TX_RINGS
)));
3095 cpu_to_le32(U64_LO(fp
->tx_desc_mapping
+
3096 BCM_PAGE_SIZE
*(i
% NUM_TX_RINGS
)));
3099 fp
->tx_pkt_prod
= 0;
3100 fp
->tx_pkt_cons
= 0;
3103 fp
->tx_cons_sb
= BNX2X_TX_SB_INDEX
;
3108 static void bnx2x_init_sp_ring(struct bnx2x
*bp
)
3110 int port
= bp
->port
;
3112 spin_lock_init(&bp
->spq_lock
);
3114 bp
->spq_left
= MAX_SPQ_PENDING
;
3115 bp
->spq_prod_idx
= 0;
3116 bp
->dsb_sp_prod
= BNX2X_SP_DSB_INDEX
;
3117 bp
->spq_prod_bd
= bp
->spq
;
3118 bp
->spq_last_bd
= bp
->spq_prod_bd
+ MAX_SP_DESC_CNT
;
3120 REG_WR(bp
, BAR_XSTRORM_INTMEM
+ XSTORM_SPQ_PAGE_BASE_OFFSET(port
),
3121 U64_LO(bp
->spq_mapping
));
3122 REG_WR(bp
, BAR_XSTRORM_INTMEM
+ XSTORM_SPQ_PAGE_BASE_OFFSET(port
) + 4,
3123 U64_HI(bp
->spq_mapping
));
3125 REG_WR(bp
, XSEM_REG_FAST_MEMORY
+ XSTORM_SPQ_PROD_OFFSET(port
),
3129 static void bnx2x_init_context(struct bnx2x
*bp
)
3133 for_each_queue(bp
, i
) {
3134 struct eth_context
*context
= bnx2x_sp(bp
, context
[i
].eth
);
3135 struct bnx2x_fastpath
*fp
= &bp
->fp
[i
];
3137 context
->xstorm_st_context
.tx_bd_page_base_hi
=
3138 U64_HI(fp
->tx_desc_mapping
);
3139 context
->xstorm_st_context
.tx_bd_page_base_lo
=
3140 U64_LO(fp
->tx_desc_mapping
);
3141 context
->xstorm_st_context
.db_data_addr_hi
=
3142 U64_HI(fp
->tx_prods_mapping
);
3143 context
->xstorm_st_context
.db_data_addr_lo
=
3144 U64_LO(fp
->tx_prods_mapping
);
3146 context
->ustorm_st_context
.rx_bd_page_base_hi
=
3147 U64_HI(fp
->rx_desc_mapping
);
3148 context
->ustorm_st_context
.rx_bd_page_base_lo
=
3149 U64_LO(fp
->rx_desc_mapping
);
3150 context
->ustorm_st_context
.status_block_id
= i
;
3151 context
->ustorm_st_context
.sb_index_number
=
3152 HC_INDEX_U_ETH_RX_CQ_CONS
;
3153 context
->ustorm_st_context
.rcq_base_address_hi
=
3154 U64_HI(fp
->rx_comp_mapping
);
3155 context
->ustorm_st_context
.rcq_base_address_lo
=
3156 U64_LO(fp
->rx_comp_mapping
);
3157 context
->ustorm_st_context
.flags
=
3158 USTORM_ETH_ST_CONTEXT_ENABLE_MC_ALIGNMENT
;
3159 context
->ustorm_st_context
.mc_alignment_size
= 64;
3160 context
->ustorm_st_context
.num_rss
= bp
->num_queues
;
3162 context
->cstorm_st_context
.sb_index_number
=
3163 HC_INDEX_C_ETH_TX_CQ_CONS
;
3164 context
->cstorm_st_context
.status_block_id
= i
;
3166 context
->xstorm_ag_context
.cdu_reserved
=
3167 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp
, i
),
3168 CDU_REGION_NUMBER_XCM_AG
,
3169 ETH_CONNECTION_TYPE
);
3170 context
->ustorm_ag_context
.cdu_usage
=
3171 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp
, i
),
3172 CDU_REGION_NUMBER_UCM_AG
,
3173 ETH_CONNECTION_TYPE
);
3177 static void bnx2x_init_ind_table(struct bnx2x
*bp
)
3179 int port
= bp
->port
;
3185 for (i
= 0; i
< TSTORM_INDIRECTION_TABLE_SIZE
; i
++)
3186 REG_WR8(bp
, TSTORM_INDIRECTION_TABLE_OFFSET(port
) + i
,
3187 i
% bp
->num_queues
);
3189 REG_WR(bp
, PRS_REG_A_PRSU_20
, 0xf);
3192 static void bnx2x_set_client_config(struct bnx2x
*bp
)
3195 int mode
= bp
->rx_mode
;
3197 int i
, port
= bp
->port
;
3198 struct tstorm_eth_client_config tstorm_client
= {0};
3200 tstorm_client
.mtu
= bp
->dev
->mtu
;
3201 tstorm_client
.statistics_counter_id
= 0;
3202 tstorm_client
.config_flags
=
3203 TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE
;
3205 if (mode
&& bp
->vlgrp
) {
3206 tstorm_client
.config_flags
|=
3207 TSTORM_ETH_CLIENT_CONFIG_VLAN_REMOVAL_ENABLE
;
3208 DP(NETIF_MSG_IFUP
, "vlan removal enabled\n");
3211 if (mode
!= BNX2X_RX_MODE_PROMISC
)
3212 tstorm_client
.drop_flags
=
3213 TSTORM_ETH_CLIENT_CONFIG_DROP_MAC_ERR
;
3215 for_each_queue(bp
, i
) {
3216 REG_WR(bp
, BAR_TSTRORM_INTMEM
+
3217 TSTORM_CLIENT_CONFIG_OFFSET(port
, i
),
3218 ((u32
*)&tstorm_client
)[0]);
3219 REG_WR(bp
, BAR_TSTRORM_INTMEM
+
3220 TSTORM_CLIENT_CONFIG_OFFSET(port
, i
) + 4,
3221 ((u32
*)&tstorm_client
)[1]);
3224 /* DP(NETIF_MSG_IFUP, "tstorm_client: 0x%08x 0x%08x\n",
3225 ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]); */
3228 static void bnx2x_set_storm_rx_mode(struct bnx2x
*bp
)
3230 int mode
= bp
->rx_mode
;
3231 int port
= bp
->port
;
3232 struct tstorm_eth_mac_filter_config tstorm_mac_filter
= {0};
3235 DP(NETIF_MSG_RX_STATUS
, "rx mode is %d\n", mode
);
3238 case BNX2X_RX_MODE_NONE
: /* no Rx */
3239 tstorm_mac_filter
.ucast_drop_all
= 1;
3240 tstorm_mac_filter
.mcast_drop_all
= 1;
3241 tstorm_mac_filter
.bcast_drop_all
= 1;
3243 case BNX2X_RX_MODE_NORMAL
:
3244 tstorm_mac_filter
.bcast_accept_all
= 1;
3246 case BNX2X_RX_MODE_ALLMULTI
:
3247 tstorm_mac_filter
.mcast_accept_all
= 1;
3248 tstorm_mac_filter
.bcast_accept_all
= 1;
3250 case BNX2X_RX_MODE_PROMISC
:
3251 tstorm_mac_filter
.ucast_accept_all
= 1;
3252 tstorm_mac_filter
.mcast_accept_all
= 1;
3253 tstorm_mac_filter
.bcast_accept_all
= 1;
3256 BNX2X_ERR("bad rx mode (%d)\n", mode
);
3259 for (i
= 0; i
< sizeof(struct tstorm_eth_mac_filter_config
)/4; i
++) {
3260 REG_WR(bp
, BAR_TSTRORM_INTMEM
+
3261 TSTORM_MAC_FILTER_CONFIG_OFFSET(port
) + i
* 4,
3262 ((u32
*)&tstorm_mac_filter
)[i
]);
3264 /* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
3265 ((u32 *)&tstorm_mac_filter)[i]); */
3268 if (mode
!= BNX2X_RX_MODE_NONE
)
3269 bnx2x_set_client_config(bp
);
3272 static void bnx2x_init_internal(struct bnx2x
*bp
)
3274 int port
= bp
->port
;
3275 struct tstorm_eth_function_common_config tstorm_config
= {0};
3276 struct stats_indication_flags stats_flags
= {0};
3279 tstorm_config
.config_flags
= MULTI_FLAGS
;
3280 tstorm_config
.rss_result_mask
= MULTI_MASK
;
3283 REG_WR(bp
, BAR_TSTRORM_INTMEM
+
3284 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(port
),
3285 (*(u32
*)&tstorm_config
));
3287 /* DP(NETIF_MSG_IFUP, "tstorm_config: 0x%08x\n",
3288 (*(u32 *)&tstorm_config)); */
3290 bp
->rx_mode
= BNX2X_RX_MODE_NONE
; /* no rx until link is up */
3291 bnx2x_set_storm_rx_mode(bp
);
3293 stats_flags
.collect_eth
= cpu_to_le32(1);
3295 REG_WR(bp
, BAR_XSTRORM_INTMEM
+ XSTORM_STATS_FLAGS_OFFSET(port
),
3296 ((u32
*)&stats_flags
)[0]);
3297 REG_WR(bp
, BAR_XSTRORM_INTMEM
+ XSTORM_STATS_FLAGS_OFFSET(port
) + 4,
3298 ((u32
*)&stats_flags
)[1]);
3300 REG_WR(bp
, BAR_TSTRORM_INTMEM
+ TSTORM_STATS_FLAGS_OFFSET(port
),
3301 ((u32
*)&stats_flags
)[0]);
3302 REG_WR(bp
, BAR_TSTRORM_INTMEM
+ TSTORM_STATS_FLAGS_OFFSET(port
) + 4,
3303 ((u32
*)&stats_flags
)[1]);
3305 REG_WR(bp
, BAR_CSTRORM_INTMEM
+ CSTORM_STATS_FLAGS_OFFSET(port
),
3306 ((u32
*)&stats_flags
)[0]);
3307 REG_WR(bp
, BAR_CSTRORM_INTMEM
+ CSTORM_STATS_FLAGS_OFFSET(port
) + 4,
3308 ((u32
*)&stats_flags
)[1]);
3310 /* DP(NETIF_MSG_IFUP, "stats_flags: 0x%08x 0x%08x\n",
3311 ((u32 *)&stats_flags)[0], ((u32 *)&stats_flags)[1]); */
3314 static void bnx2x_nic_init(struct bnx2x
*bp
)
3318 for_each_queue(bp
, i
) {
3319 struct bnx2x_fastpath
*fp
= &bp
->fp
[i
];
3321 fp
->state
= BNX2X_FP_STATE_CLOSED
;
3322 DP(NETIF_MSG_IFUP
, "bnx2x_init_sb(%p,%p,%d);\n",
3323 bp
, fp
->status_blk
, i
);
3325 bnx2x_init_sb(bp
, fp
->status_blk
, fp
->status_blk_mapping
, i
);
3328 bnx2x_init_def_sb(bp
, bp
->def_status_blk
,
3329 bp
->def_status_blk_mapping
, 0x10);
3330 bnx2x_update_coalesce(bp
);
3331 bnx2x_init_rx_rings(bp
);
3332 bnx2x_init_tx_ring(bp
);
3333 bnx2x_init_sp_ring(bp
);
3334 bnx2x_init_context(bp
);
3335 bnx2x_init_internal(bp
);
3336 bnx2x_init_stats(bp
);
3337 bnx2x_init_ind_table(bp
);
3338 bnx2x_int_enable(bp
);
3342 /* end of nic init */
3345 * gzip service functions
3348 static int bnx2x_gunzip_init(struct bnx2x
*bp
)
3350 bp
->gunzip_buf
= pci_alloc_consistent(bp
->pdev
, FW_BUF_SIZE
,
3351 &bp
->gunzip_mapping
);
3352 if (bp
->gunzip_buf
== NULL
)
3355 bp
->strm
= kmalloc(sizeof(*bp
->strm
), GFP_KERNEL
);
3356 if (bp
->strm
== NULL
)
3359 bp
->strm
->workspace
= kmalloc(zlib_inflate_workspacesize(),
3361 if (bp
->strm
->workspace
== NULL
)
3371 pci_free_consistent(bp
->pdev
, FW_BUF_SIZE
, bp
->gunzip_buf
,
3372 bp
->gunzip_mapping
);
3373 bp
->gunzip_buf
= NULL
;
3376 printk(KERN_ERR PFX
"%s: Cannot allocate firmware buffer for"
3377 " uncompression\n", bp
->dev
->name
);
3381 static void bnx2x_gunzip_end(struct bnx2x
*bp
)
3383 kfree(bp
->strm
->workspace
);
3388 if (bp
->gunzip_buf
) {
3389 pci_free_consistent(bp
->pdev
, FW_BUF_SIZE
, bp
->gunzip_buf
,
3390 bp
->gunzip_mapping
);
3391 bp
->gunzip_buf
= NULL
;
3395 static int bnx2x_gunzip(struct bnx2x
*bp
, u8
*zbuf
, int len
)
3399 /* check gzip header */
3400 if ((zbuf
[0] != 0x1f) || (zbuf
[1] != 0x8b) || (zbuf
[2] != Z_DEFLATED
))
3407 if (zbuf
[3] & FNAME
)
3408 while ((zbuf
[n
++] != 0) && (n
< len
));
3410 bp
->strm
->next_in
= zbuf
+ n
;
3411 bp
->strm
->avail_in
= len
- n
;
3412 bp
->strm
->next_out
= bp
->gunzip_buf
;
3413 bp
->strm
->avail_out
= FW_BUF_SIZE
;
3415 rc
= zlib_inflateInit2(bp
->strm
, -MAX_WBITS
);
3419 rc
= zlib_inflate(bp
->strm
, Z_FINISH
);
3420 if ((rc
!= Z_OK
) && (rc
!= Z_STREAM_END
))
3421 printk(KERN_ERR PFX
"%s: Firmware decompression error: %s\n",
3422 bp
->dev
->name
, bp
->strm
->msg
);
3424 bp
->gunzip_outlen
= (FW_BUF_SIZE
- bp
->strm
->avail_out
);
3425 if (bp
->gunzip_outlen
& 0x3)
3426 printk(KERN_ERR PFX
"%s: Firmware decompression error:"
3427 " gunzip_outlen (%d) not aligned\n",
3428 bp
->dev
->name
, bp
->gunzip_outlen
);
3429 bp
->gunzip_outlen
>>= 2;
3431 zlib_inflateEnd(bp
->strm
);
3433 if (rc
== Z_STREAM_END
)
3439 /* nic load/unload */
3442 * general service functions
3445 /* send a NIG loopback debug packet */
3446 static void bnx2x_lb_pckt(struct bnx2x
*bp
)
3452 /* Ethernet source and destination addresses */
3454 wb_write
[0] = 0x55555555;
3455 wb_write
[1] = 0x55555555;
3456 wb_write
[2] = 0x20; /* SOP */
3457 REG_WR_DMAE(bp
, NIG_REG_DEBUG_PACKET_LB
, wb_write
, 3);
3459 REG_WR_IND(bp
, NIG_REG_DEBUG_PACKET_LB
, 0x55555555);
3460 REG_WR_IND(bp
, NIG_REG_DEBUG_PACKET_LB
+ 4, 0x55555555);
3462 REG_WR_IND(bp
, NIG_REG_DEBUG_PACKET_LB
+ 8, 0x20);
3465 /* NON-IP protocol */
3467 wb_write
[0] = 0x09000000;
3468 wb_write
[1] = 0x55555555;
3469 wb_write
[2] = 0x10; /* EOP, eop_bvalid = 0 */
3470 REG_WR_DMAE(bp
, NIG_REG_DEBUG_PACKET_LB
, wb_write
, 3);
3472 REG_WR_IND(bp
, NIG_REG_DEBUG_PACKET_LB
, 0x09000000);
3473 REG_WR_IND(bp
, NIG_REG_DEBUG_PACKET_LB
+ 4, 0x55555555);
3474 /* EOP, eop_bvalid = 0 */
3475 REG_WR_IND(bp
, NIG_REG_DEBUG_PACKET_LB
+ 8, 0x10);
3479 /* some of the internal memories
3480 * are not directly readable from the driver
3481 * to test them we send debug packets
3483 static int bnx2x_int_mem_test(struct bnx2x
*bp
)
3489 if (CHIP_REV_IS_FPGA(bp
))
3491 else if (CHIP_REV_IS_EMUL(bp
))
3496 DP(NETIF_MSG_HW
, "start part1\n");
3498 /* Disable inputs of parser neighbor blocks */
3499 REG_WR(bp
, TSDM_REG_ENABLE_IN1
, 0x0);
3500 REG_WR(bp
, TCM_REG_PRS_IFEN
, 0x0);
3501 REG_WR(bp
, CFC_REG_DEBUG0
, 0x1);
3502 NIG_WR(NIG_REG_PRS_REQ_IN_EN
, 0x0);
3504 /* Write 0 to parser credits for CFC search request */
3505 REG_WR(bp
, PRS_REG_CFC_SEARCH_INITIAL_CREDIT
, 0x0);
3507 /* send Ethernet packet */
3510 /* TODO do i reset NIG statistic? */
3511 /* Wait until NIG register shows 1 packet of size 0x10 */
3512 count
= 1000 * factor
;
3514 #ifdef BNX2X_DMAE_RD
3515 bnx2x_read_dmae(bp
, NIG_REG_STAT2_BRB_OCTET
, 2);
3516 val
= *bnx2x_sp(bp
, wb_data
[0]);
3518 val
= REG_RD(bp
, NIG_REG_STAT2_BRB_OCTET
);
3519 REG_RD(bp
, NIG_REG_STAT2_BRB_OCTET
+ 4);
3528 BNX2X_ERR("NIG timeout val = 0x%x\n", val
);
3532 /* Wait until PRS register shows 1 packet */
3533 count
= 1000 * factor
;
3535 val
= REG_RD(bp
, PRS_REG_NUM_OF_PACKETS
);
3544 BNX2X_ERR("PRS timeout val = 0x%x\n", val
);
3548 /* Reset and init BRB, PRS */
3549 REG_WR(bp
, GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_1_CLEAR
, 0x3);
3551 REG_WR(bp
, GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_1_SET
, 0x3);
3553 bnx2x_init_block(bp
, BRB1_COMMON_START
, BRB1_COMMON_END
);
3554 bnx2x_init_block(bp
, PRS_COMMON_START
, PRS_COMMON_END
);
3556 DP(NETIF_MSG_HW
, "part2\n");
3558 /* Disable inputs of parser neighbor blocks */
3559 REG_WR(bp
, TSDM_REG_ENABLE_IN1
, 0x0);
3560 REG_WR(bp
, TCM_REG_PRS_IFEN
, 0x0);
3561 REG_WR(bp
, CFC_REG_DEBUG0
, 0x1);
3562 NIG_WR(NIG_REG_PRS_REQ_IN_EN
, 0x0);
3564 /* Write 0 to parser credits for CFC search request */
3565 REG_WR(bp
, PRS_REG_CFC_SEARCH_INITIAL_CREDIT
, 0x0);
3567 /* send 10 Ethernet packets */
3568 for (i
= 0; i
< 10; i
++)
3571 /* Wait until NIG register shows 10 + 1
3572 packets of size 11*0x10 = 0xb0 */
3573 count
= 1000 * factor
;
3575 #ifdef BNX2X_DMAE_RD
3576 bnx2x_read_dmae(bp
, NIG_REG_STAT2_BRB_OCTET
, 2);
3577 val
= *bnx2x_sp(bp
, wb_data
[0]);
3579 val
= REG_RD(bp
, NIG_REG_STAT2_BRB_OCTET
);
3580 REG_RD(bp
, NIG_REG_STAT2_BRB_OCTET
+ 4);
3589 BNX2X_ERR("NIG timeout val = 0x%x\n", val
);
3593 /* Wait until PRS register shows 2 packets */
3594 val
= REG_RD(bp
, PRS_REG_NUM_OF_PACKETS
);
3596 BNX2X_ERR("PRS timeout val = 0x%x\n", val
);
3598 /* Write 1 to parser credits for CFC search request */
3599 REG_WR(bp
, PRS_REG_CFC_SEARCH_INITIAL_CREDIT
, 0x1);
3601 /* Wait until PRS register shows 3 packets */
3602 msleep(10 * factor
);
3603 /* Wait until NIG register shows 1 packet of size 0x10 */
3604 val
= REG_RD(bp
, PRS_REG_NUM_OF_PACKETS
);
3606 BNX2X_ERR("PRS timeout val = 0x%x\n", val
);
3608 /* clear NIG EOP FIFO */
3609 for (i
= 0; i
< 11; i
++)
3610 REG_RD(bp
, NIG_REG_INGRESS_EOP_LB_FIFO
);
3611 val
= REG_RD(bp
, NIG_REG_INGRESS_EOP_LB_EMPTY
);
3613 BNX2X_ERR("clear of NIG failed\n");
3617 /* Reset and init BRB, PRS, NIG */
3618 REG_WR(bp
, GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_1_CLEAR
, 0x03);
3620 REG_WR(bp
, GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_1_SET
, 0x03);
3622 bnx2x_init_block(bp
, BRB1_COMMON_START
, BRB1_COMMON_END
);
3623 bnx2x_init_block(bp
, PRS_COMMON_START
, PRS_COMMON_END
);
3626 REG_WR(bp
, PRS_REG_NIC_MODE
, 1);
3629 /* Enable inputs of parser neighbor blocks */
3630 REG_WR(bp
, TSDM_REG_ENABLE_IN1
, 0x7fffffff);
3631 REG_WR(bp
, TCM_REG_PRS_IFEN
, 0x1);
3632 REG_WR(bp
, CFC_REG_DEBUG0
, 0x0);
3633 NIG_WR(NIG_REG_PRS_REQ_IN_EN
, 0x1);
3635 DP(NETIF_MSG_HW
, "done\n");
3640 static void enable_blocks_attention(struct bnx2x
*bp
)
3642 REG_WR(bp
, PXP_REG_PXP_INT_MASK_0
, 0);
3643 REG_WR(bp
, PXP_REG_PXP_INT_MASK_1
, 0);
3644 REG_WR(bp
, DORQ_REG_DORQ_INT_MASK
, 0);
3645 REG_WR(bp
, CFC_REG_CFC_INT_MASK
, 0);
3646 REG_WR(bp
, QM_REG_QM_INT_MASK
, 0);
3647 REG_WR(bp
, TM_REG_TM_INT_MASK
, 0);
3648 REG_WR(bp
, XSDM_REG_XSDM_INT_MASK_0
, 0);
3649 REG_WR(bp
, XSDM_REG_XSDM_INT_MASK_1
, 0);
3650 REG_WR(bp
, XCM_REG_XCM_INT_MASK
, 0);
3651 /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
3652 /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
3653 REG_WR(bp
, USDM_REG_USDM_INT_MASK_0
, 0);
3654 REG_WR(bp
, USDM_REG_USDM_INT_MASK_1
, 0);
3655 REG_WR(bp
, UCM_REG_UCM_INT_MASK
, 0);
3656 /* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
3657 /* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
3658 REG_WR(bp
, GRCBASE_UPB
+ PB_REG_PB_INT_MASK
, 0);
3659 REG_WR(bp
, CSDM_REG_CSDM_INT_MASK_0
, 0);
3660 REG_WR(bp
, CSDM_REG_CSDM_INT_MASK_1
, 0);
3661 REG_WR(bp
, CCM_REG_CCM_INT_MASK
, 0);
3662 /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
3663 /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
3664 REG_WR(bp
, PXP2_REG_PXP2_INT_MASK
, 0x480000);
3665 REG_WR(bp
, TSDM_REG_TSDM_INT_MASK_0
, 0);
3666 REG_WR(bp
, TSDM_REG_TSDM_INT_MASK_1
, 0);
3667 REG_WR(bp
, TCM_REG_TCM_INT_MASK
, 0);
3668 /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
3669 /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
3670 REG_WR(bp
, CDU_REG_CDU_INT_MASK
, 0);
3671 REG_WR(bp
, DMAE_REG_DMAE_INT_MASK
, 0);
3672 /* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
3673 REG_WR(bp
, PBF_REG_PBF_INT_MASK
, 0X18); /* bit 3,4 masked */
3676 static int bnx2x_function_init(struct bnx2x
*bp
, int mode
)
3678 int func
= bp
->port
;
3679 int port
= func
? PORT1
: PORT0
;
3685 DP(BNX2X_MSG_MCP
, "function is %d mode is %x\n", func
, mode
);
3686 if ((func
!= 0) && (func
!= 1)) {
3687 BNX2X_ERR("BAD function number (%d)\n", func
);
3691 bnx2x_gunzip_init(bp
);
3693 if (mode
& 0x1) { /* init common */
3694 DP(BNX2X_MSG_MCP
, "starting common init func %d mode %x\n",
3696 REG_WR(bp
, GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_1_SET
,
3698 REG_WR(bp
, GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_2_SET
,
3700 bnx2x_init_block(bp
, MISC_COMMON_START
, MISC_COMMON_END
);
3702 REG_WR(bp
, MISC_REG_LCPLL_CTRL_REG_2
, 0x100);
3704 REG_WR(bp
, MISC_REG_LCPLL_CTRL_REG_2
, 0x0);
3706 bnx2x_init_block(bp
, PXP_COMMON_START
, PXP_COMMON_END
);
3707 bnx2x_init_block(bp
, PXP2_COMMON_START
, PXP2_COMMON_END
);
3711 if (CHIP_REV(bp
) == CHIP_REV_Ax
) {
3712 /* enable HW interrupt from PXP on USDM
3713 overflow bit 16 on INT_MASK_0 */
3714 REG_WR(bp
, PXP_REG_PXP_INT_MASK_0
, 0);
3718 REG_WR(bp
, PXP2_REG_RQ_QM_ENDIAN_M
, 1);
3719 REG_WR(bp
, PXP2_REG_RQ_TM_ENDIAN_M
, 1);
3720 REG_WR(bp
, PXP2_REG_RQ_SRC_ENDIAN_M
, 1);
3721 REG_WR(bp
, PXP2_REG_RQ_CDU_ENDIAN_M
, 1);
3722 REG_WR(bp
, PXP2_REG_RQ_DBG_ENDIAN_M
, 1);
3723 REG_WR(bp
, PXP2_REG_RQ_HC_ENDIAN_M
, 1);
3725 /* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
3726 REG_WR(bp
, PXP2_REG_RD_QM_SWAP_MODE
, 1);
3727 REG_WR(bp
, PXP2_REG_RD_TM_SWAP_MODE
, 1);
3728 REG_WR(bp
, PXP2_REG_RD_SRC_SWAP_MODE
, 1);
3729 REG_WR(bp
, PXP2_REG_RD_CDURD_SWAP_MODE
, 1);
3734 REG_WR(bp
, PRS_REG_NIC_MODE
, 1);
3737 REG_WR(bp
, PXP2_REG_RQ_CDU_P_SIZE
, 5);
3739 REG_WR(bp
, PXP2_REG_RQ_TM_P_SIZE
, 5);
3740 REG_WR(bp
, PXP2_REG_RQ_QM_P_SIZE
, 5);
3741 REG_WR(bp
, PXP2_REG_RQ_SRC_P_SIZE
, 5);
3744 bnx2x_init_block(bp
, DMAE_COMMON_START
, DMAE_COMMON_END
);
3746 /* let the HW do it's magic ... */
3749 (can be moved up if we want to use the DMAE) */
3750 val
= REG_RD(bp
, PXP2_REG_RQ_CFG_DONE
);
3752 BNX2X_ERR("PXP2 CFG failed\n");
3756 val
= REG_RD(bp
, PXP2_REG_RD_INIT_DONE
);
3758 BNX2X_ERR("PXP2 RD_INIT failed\n");
3762 REG_WR(bp
, PXP2_REG_RQ_DISABLE_INPUTS
, 0);
3763 REG_WR(bp
, PXP2_REG_RD_DISABLE_INPUTS
, 0);
3765 bnx2x_init_fill(bp
, TSEM_REG_PRAM
, 0, 8);
3767 bnx2x_init_block(bp
, TCM_COMMON_START
, TCM_COMMON_END
);
3768 bnx2x_init_block(bp
, UCM_COMMON_START
, UCM_COMMON_END
);
3769 bnx2x_init_block(bp
, CCM_COMMON_START
, CCM_COMMON_END
);
3770 bnx2x_init_block(bp
, XCM_COMMON_START
, XCM_COMMON_END
);
3772 #ifdef BNX2X_DMAE_RD
3773 bnx2x_read_dmae(bp
, XSEM_REG_PASSIVE_BUFFER
, 3);
3774 bnx2x_read_dmae(bp
, CSEM_REG_PASSIVE_BUFFER
, 3);
3775 bnx2x_read_dmae(bp
, TSEM_REG_PASSIVE_BUFFER
, 3);
3776 bnx2x_read_dmae(bp
, USEM_REG_PASSIVE_BUFFER
, 3);
3778 REG_RD(bp
, XSEM_REG_PASSIVE_BUFFER
);
3779 REG_RD(bp
, XSEM_REG_PASSIVE_BUFFER
+ 4);
3780 REG_RD(bp
, XSEM_REG_PASSIVE_BUFFER
+ 8);
3781 REG_RD(bp
, CSEM_REG_PASSIVE_BUFFER
);
3782 REG_RD(bp
, CSEM_REG_PASSIVE_BUFFER
+ 4);
3783 REG_RD(bp
, CSEM_REG_PASSIVE_BUFFER
+ 8);
3784 REG_RD(bp
, TSEM_REG_PASSIVE_BUFFER
);
3785 REG_RD(bp
, TSEM_REG_PASSIVE_BUFFER
+ 4);
3786 REG_RD(bp
, TSEM_REG_PASSIVE_BUFFER
+ 8);
3787 REG_RD(bp
, USEM_REG_PASSIVE_BUFFER
);
3788 REG_RD(bp
, USEM_REG_PASSIVE_BUFFER
+ 4);
3789 REG_RD(bp
, USEM_REG_PASSIVE_BUFFER
+ 8);
3791 bnx2x_init_block(bp
, QM_COMMON_START
, QM_COMMON_END
);
3792 /* soft reset pulse */
3793 REG_WR(bp
, QM_REG_SOFT_RESET
, 1);
3794 REG_WR(bp
, QM_REG_SOFT_RESET
, 0);
3797 bnx2x_init_block(bp
, TIMERS_COMMON_START
, TIMERS_COMMON_END
);
3799 bnx2x_init_block(bp
, DQ_COMMON_START
, DQ_COMMON_END
);
3800 REG_WR(bp
, DORQ_REG_DPM_CID_OFST
, BCM_PAGE_BITS
);
3801 if (CHIP_REV(bp
) == CHIP_REV_Ax
) {
3802 /* enable hw interrupt from doorbell Q */
3803 REG_WR(bp
, DORQ_REG_DORQ_INT_MASK
, 0);
3806 bnx2x_init_block(bp
, BRB1_COMMON_START
, BRB1_COMMON_END
);
3808 if (CHIP_REV_IS_SLOW(bp
)) {
3809 /* fix for emulation and FPGA for no pause */
3810 REG_WR(bp
, BRB1_REG_PAUSE_HIGH_THRESHOLD_0
, 513);
3811 REG_WR(bp
, BRB1_REG_PAUSE_HIGH_THRESHOLD_1
, 513);
3812 REG_WR(bp
, BRB1_REG_PAUSE_LOW_THRESHOLD_0
, 0);
3813 REG_WR(bp
, BRB1_REG_PAUSE_LOW_THRESHOLD_1
, 0);
3816 bnx2x_init_block(bp
, PRS_COMMON_START
, PRS_COMMON_END
);
3818 bnx2x_init_block(bp
, TSDM_COMMON_START
, TSDM_COMMON_END
);
3819 bnx2x_init_block(bp
, CSDM_COMMON_START
, CSDM_COMMON_END
);
3820 bnx2x_init_block(bp
, USDM_COMMON_START
, USDM_COMMON_END
);
3821 bnx2x_init_block(bp
, XSDM_COMMON_START
, XSDM_COMMON_END
);
3823 bnx2x_init_fill(bp
, TSTORM_INTMEM_ADDR
, 0,
3824 STORM_INTMEM_SIZE_E1
);
3825 bnx2x_init_fill(bp
, CSTORM_INTMEM_ADDR
, 0,
3826 STORM_INTMEM_SIZE_E1
);
3827 bnx2x_init_fill(bp
, XSTORM_INTMEM_ADDR
, 0,
3828 STORM_INTMEM_SIZE_E1
);
3829 bnx2x_init_fill(bp
, USTORM_INTMEM_ADDR
, 0,
3830 STORM_INTMEM_SIZE_E1
);
3832 bnx2x_init_block(bp
, TSEM_COMMON_START
, TSEM_COMMON_END
);
3833 bnx2x_init_block(bp
, USEM_COMMON_START
, USEM_COMMON_END
);
3834 bnx2x_init_block(bp
, CSEM_COMMON_START
, CSEM_COMMON_END
);
3835 bnx2x_init_block(bp
, XSEM_COMMON_START
, XSEM_COMMON_END
);
3838 REG_WR(bp
, GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_1_CLEAR
,
3840 REG_WR(bp
, GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_1_SET
,
3843 bnx2x_init_block(bp
, UPB_COMMON_START
, UPB_COMMON_END
);
3844 bnx2x_init_block(bp
, XPB_COMMON_START
, XPB_COMMON_END
);
3845 bnx2x_init_block(bp
, PBF_COMMON_START
, PBF_COMMON_END
);
3847 REG_WR(bp
, SRC_REG_SOFT_RST
, 1);
3848 for (i
= SRC_REG_KEYRSS0_0
; i
<= SRC_REG_KEYRSS1_9
; i
+= 4) {
3849 REG_WR(bp
, i
, 0xc0cac01a);
3850 /* TODO: replace with something meaningful */
3852 /* SRCH COMMON comes here */
3853 REG_WR(bp
, SRC_REG_SOFT_RST
, 0);
3855 if (sizeof(union cdu_context
) != 1024) {
3856 /* we currently assume that a context is 1024 bytes */
3857 printk(KERN_ALERT PFX
"please adjust the size of"
3858 " cdu_context(%ld)\n",
3859 (long)sizeof(union cdu_context
));
3861 val
= (4 << 24) + (0 << 12) + 1024;
3862 REG_WR(bp
, CDU_REG_CDU_GLOBAL_PARAMS
, val
);
3863 bnx2x_init_block(bp
, CDU_COMMON_START
, CDU_COMMON_END
);
3865 bnx2x_init_block(bp
, CFC_COMMON_START
, CFC_COMMON_END
);
3866 REG_WR(bp
, CFC_REG_INIT_REG
, 0x7FF);
3868 bnx2x_init_block(bp
, HC_COMMON_START
, HC_COMMON_END
);
3869 bnx2x_init_block(bp
, MISC_AEU_COMMON_START
,
3870 MISC_AEU_COMMON_END
);
3871 /* RXPCS COMMON comes here */
3872 /* EMAC0 COMMON comes here */
3873 /* EMAC1 COMMON comes here */
3874 /* DBU COMMON comes here */
3875 /* DBG COMMON comes here */
3876 bnx2x_init_block(bp
, NIG_COMMON_START
, NIG_COMMON_END
);
3878 if (CHIP_REV_IS_SLOW(bp
))
3881 /* finish CFC init */
3882 val
= REG_RD(bp
, CFC_REG_LL_INIT_DONE
);
3884 BNX2X_ERR("CFC LL_INIT failed\n");
3888 val
= REG_RD(bp
, CFC_REG_AC_INIT_DONE
);
3890 BNX2X_ERR("CFC AC_INIT failed\n");
3894 val
= REG_RD(bp
, CFC_REG_CAM_INIT_DONE
);
3896 BNX2X_ERR("CFC CAM_INIT failed\n");
3900 REG_WR(bp
, CFC_REG_DEBUG0
, 0);
3902 /* read NIG statistic
3903 to see if this is our first up since powerup */
3904 #ifdef BNX2X_DMAE_RD
3905 bnx2x_read_dmae(bp
, NIG_REG_STAT2_BRB_OCTET
, 2);
3906 val
= *bnx2x_sp(bp
, wb_data
[0]);
3908 val
= REG_RD(bp
, NIG_REG_STAT2_BRB_OCTET
);
3909 REG_RD(bp
, NIG_REG_STAT2_BRB_OCTET
+ 4);
3911 /* do internal memory self test */
3912 if ((val
== 0) && bnx2x_int_mem_test(bp
)) {
3913 BNX2X_ERR("internal mem selftest failed\n");
3917 /* clear PXP2 attentions */
3918 REG_RD(bp
, PXP2_REG_PXP2_INT_STS_CLR
);
3920 enable_blocks_attention(bp
);
3921 /* enable_blocks_parity(bp); */
3923 switch (bp
->board
& SHARED_HW_CFG_BOARD_TYPE_MASK
) {
3924 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G
:
3925 /* Fan failure is indicated by SPIO 5 */
3926 bnx2x_set_spio(bp
, MISC_REGISTERS_SPIO_5
,
3927 MISC_REGISTERS_SPIO_INPUT_HI_Z
);
3929 /* set to active low mode */
3930 val
= REG_RD(bp
, MISC_REG_SPIO_INT
);
3931 val
|= ((1 << MISC_REGISTERS_SPIO_5
) <<
3932 MISC_REGISTERS_SPIO_INT_OLD_SET_POS
);
3933 REG_WR(bp
, MISC_REG_SPIO_INT
, val
);
3935 /* enable interrupt to signal the IGU */
3936 val
= REG_RD(bp
, MISC_REG_SPIO_EVENT_EN
);
3937 val
|= (1 << MISC_REGISTERS_SPIO_5
);
3938 REG_WR(bp
, MISC_REG_SPIO_EVENT_EN
, val
);
3945 } /* end of common init */
3949 /* the phys address is shifted right 12 bits and has an added
3950 1=valid bit added to the 53rd bit
3951 then since this is a wide register(TM)
3952 we split it into two 32 bit writes
3954 #define RQ_ONCHIP_AT_PORT_SIZE 384
3955 #define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
3956 #define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
3957 #define PXP_ONE_ILT(x) ((x << 10) | x)
3959 DP(BNX2X_MSG_MCP
, "starting per-function init port is %x\n", func
);
3961 REG_WR(bp
, NIG_REG_MASK_INTERRUPT_PORT0
+ func
*4, 0);
3963 /* Port PXP comes here */
3964 /* Port PXP2 comes here */
3969 i
= func
* RQ_ONCHIP_AT_PORT_SIZE
;
3971 wb_write
[0] = ONCHIP_ADDR1(bnx2x_sp_mapping(bp
, context
));
3972 wb_write
[1] = ONCHIP_ADDR2(bnx2x_sp_mapping(bp
, context
));
3973 REG_WR_DMAE(bp
, PXP2_REG_RQ_ONCHIP_AT
+ i
*8, wb_write
, 2);
3975 REG_WR_IND(bp
, PXP2_REG_RQ_ONCHIP_AT
+ i
*8,
3976 ONCHIP_ADDR1(bnx2x_sp_mapping(bp
, context
)));
3977 REG_WR_IND(bp
, PXP2_REG_RQ_ONCHIP_AT
+ i
*8 + 4,
3978 ONCHIP_ADDR2(bnx2x_sp_mapping(bp
, context
)));
3980 REG_WR(bp
, PXP2_REG_PSWRQ_CDU0_L2P
+ func
*4, PXP_ONE_ILT(i
));
3986 wb_write
[0] = ONCHIP_ADDR1(bp
->timers_mapping
);
3987 wb_write
[1] = ONCHIP_ADDR2(bp
->timers_mapping
);
3988 REG_WR_DMAE(bp
, PXP2_REG_RQ_ONCHIP_AT
+ i
*8, wb_write
, 2);
3989 REG_WR(bp
, PXP2_REG_PSWRQ_TM0_L2P
+ func
*4, PXP_ONE_ILT(i
));
3994 wb_write
[0] = ONCHIP_ADDR1(bp
->qm_mapping
);
3995 wb_write
[1] = ONCHIP_ADDR2(bp
->qm_mapping
);
3996 REG_WR_DMAE(bp
, PXP2_REG_RQ_ONCHIP_AT
+ i
*8, wb_write
, 2);
3997 REG_WR(bp
, PXP2_REG_PSWRQ_QM0_L2P
+ func
*4, PXP_ONE_ILT(i
));
4002 wb_write
[0] = ONCHIP_ADDR1(bp
->t1_mapping
);
4003 wb_write
[1] = ONCHIP_ADDR2(bp
->t1_mapping
);
4004 REG_WR_DMAE(bp
, PXP2_REG_RQ_ONCHIP_AT
+ i
*8, wb_write
, 2);
4005 REG_WR(bp
, PXP2_REG_PSWRQ_SRC0_L2P
+ func
*4, PXP_ONE_ILT(i
));
4008 /* Port TCM comes here */
4009 /* Port UCM comes here */
4010 /* Port CCM comes here */
4011 bnx2x_init_block(bp
, func
? XCM_PORT1_START
: XCM_PORT0_START
,
4012 func
? XCM_PORT1_END
: XCM_PORT0_END
);
4018 for (i
= 0; i
< 32; i
++) {
4019 REG_WR(bp
, QM_REG_BASEADDR
+ (func
*32 + i
)*4, 1024 * 4 * i
);
4021 REG_WR_DMAE(bp
, QM_REG_PTRTBL
+ (func
*32 + i
)*8, wb_write
, 2);
4023 REG_WR_IND(bp
, QM_REG_PTRTBL
+ (func
*32 + i
)*8, 0);
4024 REG_WR_IND(bp
, QM_REG_PTRTBL
+ (func
*32 + i
)*8 + 4, 0);
4027 REG_WR(bp
, QM_REG_CONNNUM_0
+ func
*4, 1024/16 - 1);
4029 /* Port QM comes here */
4032 REG_WR(bp
, TM_REG_LIN0_SCAN_TIME
+ func
*4, 1024/64*20);
4033 REG_WR(bp
, TM_REG_LIN0_MAX_ACTIVE_CID
+ func
*4, 31);
4035 bnx2x_init_block(bp
, func
? TIMERS_PORT1_START
: TIMERS_PORT0_START
,
4036 func
? TIMERS_PORT1_END
: TIMERS_PORT0_END
);
4038 /* Port DQ comes here */
4039 /* Port BRB1 comes here */
4040 /* Port PRS comes here */
4041 /* Port TSDM comes here */
4042 /* Port CSDM comes here */
4043 /* Port USDM comes here */
4044 /* Port XSDM comes here */
4045 bnx2x_init_block(bp
, func
? TSEM_PORT1_START
: TSEM_PORT0_START
,
4046 func
? TSEM_PORT1_END
: TSEM_PORT0_END
);
4047 bnx2x_init_block(bp
, func
? USEM_PORT1_START
: USEM_PORT0_START
,
4048 func
? USEM_PORT1_END
: USEM_PORT0_END
);
4049 bnx2x_init_block(bp
, func
? CSEM_PORT1_START
: CSEM_PORT0_START
,
4050 func
? CSEM_PORT1_END
: CSEM_PORT0_END
);
4051 bnx2x_init_block(bp
, func
? XSEM_PORT1_START
: XSEM_PORT0_START
,
4052 func
? XSEM_PORT1_END
: XSEM_PORT0_END
);
4053 /* Port UPB comes here */
4054 /* Port XSDM comes here */
4055 bnx2x_init_block(bp
, func
? PBF_PORT1_START
: PBF_PORT0_START
,
4056 func
? PBF_PORT1_END
: PBF_PORT0_END
);
4058 /* configure PBF to work without PAUSE mtu 9000 */
4059 REG_WR(bp
, PBF_REG_P0_PAUSE_ENABLE
+ func
*4, 0);
4061 /* update threshold */
4062 REG_WR(bp
, PBF_REG_P0_ARB_THRSH
+ func
*4, (9040/16));
4063 /* update init credit */
4064 REG_WR(bp
, PBF_REG_P0_INIT_CRD
+ func
*4, (9040/16) + 553 - 22);
4067 REG_WR(bp
, PBF_REG_INIT_P0
+ func
*4, 1);
4069 REG_WR(bp
, PBF_REG_INIT_P0
+ func
*4, 0);
4072 /* tell the searcher where the T2 table is */
4073 REG_WR(bp
, SRC_REG_COUNTFREE0
+ func
*4, 16*1024/64);
4075 wb_write
[0] = U64_LO(bp
->t2_mapping
);
4076 wb_write
[1] = U64_HI(bp
->t2_mapping
);
4077 REG_WR_DMAE(bp
, SRC_REG_FIRSTFREE0
+ func
*4, wb_write
, 2);
4078 wb_write
[0] = U64_LO((u64
)bp
->t2_mapping
+ 16*1024 - 64);
4079 wb_write
[1] = U64_HI((u64
)bp
->t2_mapping
+ 16*1024 - 64);
4080 REG_WR_DMAE(bp
, SRC_REG_LASTFREE0
+ func
*4, wb_write
, 2);
4082 REG_WR(bp
, SRC_REG_NUMBER_HASH_BITS0
+ func
*4, 10);
4083 /* Port SRCH comes here */
4085 /* Port CDU comes here */
4086 /* Port CFC comes here */
4087 bnx2x_init_block(bp
, func
? HC_PORT1_START
: HC_PORT0_START
,
4088 func
? HC_PORT1_END
: HC_PORT0_END
);
4089 bnx2x_init_block(bp
, func
? MISC_AEU_PORT1_START
:
4090 MISC_AEU_PORT0_START
,
4091 func
? MISC_AEU_PORT1_END
: MISC_AEU_PORT0_END
);
4092 /* Port PXPCS comes here */
4093 /* Port EMAC0 comes here */
4094 /* Port EMAC1 comes here */
4095 /* Port DBU comes here */
4096 /* Port DBG comes here */
4097 bnx2x_init_block(bp
, func
? NIG_PORT1_START
: NIG_PORT0_START
,
4098 func
? NIG_PORT1_END
: NIG_PORT0_END
);
4099 REG_WR(bp
, NIG_REG_XGXS_SERDES0_MODE_SEL
+ func
*4, 1);
4100 /* Port MCP comes here */
4101 /* Port DMAE comes here */
4103 switch (bp
->board
& SHARED_HW_CFG_BOARD_TYPE_MASK
) {
4104 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G
:
4105 /* add SPIO 5 to group 0 */
4106 val
= REG_RD(bp
, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0
);
4107 val
|= AEU_INPUTS_ATTN_BITS_SPIO5
;
4108 REG_WR(bp
, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0
, val
);
4115 bnx2x__link_reset(bp
);
4117 /* Reset PCIE errors for debug */
4118 REG_WR(bp
, 0x2114, 0xffffffff);
4119 REG_WR(bp
, 0x2120, 0xffffffff);
4120 REG_WR(bp
, 0x2814, 0xffffffff);
4122 /* !!! move to init_values.h */
4123 REG_WR(bp
, XSDM_REG_INIT_CREDIT_PXP_CTRL
, 0x1);
4124 REG_WR(bp
, USDM_REG_INIT_CREDIT_PXP_CTRL
, 0x1);
4125 REG_WR(bp
, CSDM_REG_INIT_CREDIT_PXP_CTRL
, 0x1);
4126 REG_WR(bp
, TSDM_REG_INIT_CREDIT_PXP_CTRL
, 0x1);
4128 REG_WR(bp
, DBG_REG_PCI_REQ_CREDIT
, 0x1);
4129 REG_WR(bp
, TM_REG_PCIARB_CRDCNT_VAL
, 0x1);
4130 REG_WR(bp
, CDU_REG_CDU_DEBUG
, 0x264);
4131 REG_WR(bp
, CDU_REG_CDU_DEBUG
, 0x0);
4133 bnx2x_gunzip_end(bp
);
4138 bp
->fw_drv_pulse_wr_seq
=
4139 (SHMEM_RD(bp
, func_mb
[port
].drv_pulse_mb
) &
4140 DRV_PULSE_SEQ_MASK
);
4141 bp
->fw_mb
= SHMEM_RD(bp
, func_mb
[port
].fw_mb_param
);
4142 DP(BNX2X_MSG_MCP
, "drv_pulse 0x%x fw_mb 0x%x\n",
4143 bp
->fw_drv_pulse_wr_seq
, bp
->fw_mb
);
4151 /* send the MCP a request, block until there is a reply */
4152 static u32
bnx2x_fw_command(struct bnx2x
*bp
, u32 command
)
4154 int port
= bp
->port
;
4155 u32 seq
= ++bp
->fw_seq
;
4158 SHMEM_WR(bp
, func_mb
[port
].drv_mb_header
, (command
| seq
));
4159 DP(BNX2X_MSG_MCP
, "wrote command (%x) to FW MB\n", (command
| seq
));
4161 /* let the FW do it's magic ... */
4162 msleep(100); /* TBD */
4164 if (CHIP_REV_IS_SLOW(bp
))
4167 rc
= SHMEM_RD(bp
, func_mb
[port
].fw_mb_header
);
4168 DP(BNX2X_MSG_MCP
, "read (%x) seq is (%x) from FW MB\n", rc
, seq
);
4170 /* is this a reply to our command? */
4171 if (seq
== (rc
& FW_MSG_SEQ_NUMBER_MASK
)) {
4172 rc
&= FW_MSG_CODE_MASK
;
4176 BNX2X_ERR("FW failed to respond!\n");
4184 static void bnx2x_free_mem(struct bnx2x
*bp
)
4187 #define BNX2X_PCI_FREE(x, y, size) \
4190 pci_free_consistent(bp->pdev, size, x, y); \
4196 #define BNX2X_FREE(x) \
4207 for_each_queue(bp
, i
) {
4210 BNX2X_PCI_FREE(bnx2x_fp(bp
, i
, status_blk
),
4211 bnx2x_fp(bp
, i
, status_blk_mapping
),
4212 sizeof(struct host_status_block
) +
4213 sizeof(struct eth_tx_db_data
));
4215 /* fast path rings: tx_buf tx_desc rx_buf rx_desc rx_comp */
4216 BNX2X_FREE(bnx2x_fp(bp
, i
, tx_buf_ring
));
4217 BNX2X_PCI_FREE(bnx2x_fp(bp
, i
, tx_desc_ring
),
4218 bnx2x_fp(bp
, i
, tx_desc_mapping
),
4219 sizeof(struct eth_tx_bd
) * NUM_TX_BD
);
4221 BNX2X_FREE(bnx2x_fp(bp
, i
, rx_buf_ring
));
4222 BNX2X_PCI_FREE(bnx2x_fp(bp
, i
, rx_desc_ring
),
4223 bnx2x_fp(bp
, i
, rx_desc_mapping
),
4224 sizeof(struct eth_rx_bd
) * NUM_RX_BD
);
4226 BNX2X_PCI_FREE(bnx2x_fp(bp
, i
, rx_comp_ring
),
4227 bnx2x_fp(bp
, i
, rx_comp_mapping
),
4228 sizeof(struct eth_fast_path_rx_cqe
) *
4234 /* end of fastpath */
4236 BNX2X_PCI_FREE(bp
->def_status_blk
, bp
->def_status_blk_mapping
,
4237 (sizeof(struct host_def_status_block
)));
4239 BNX2X_PCI_FREE(bp
->slowpath
, bp
->slowpath_mapping
,
4240 (sizeof(struct bnx2x_slowpath
)));
4243 BNX2X_PCI_FREE(bp
->t1
, bp
->t1_mapping
, 64*1024);
4244 BNX2X_PCI_FREE(bp
->t2
, bp
->t2_mapping
, 16*1024);
4245 BNX2X_PCI_FREE(bp
->timers
, bp
->timers_mapping
, 8*1024);
4246 BNX2X_PCI_FREE(bp
->qm
, bp
->qm_mapping
, 128*1024);
4248 BNX2X_PCI_FREE(bp
->spq
, bp
->spq_mapping
, PAGE_SIZE
);
4250 #undef BNX2X_PCI_FREE
4254 static int bnx2x_alloc_mem(struct bnx2x
*bp
)
4257 #define BNX2X_PCI_ALLOC(x, y, size) \
4259 x = pci_alloc_consistent(bp->pdev, size, y); \
4261 goto alloc_mem_err; \
4262 memset(x, 0, size); \
4265 #define BNX2X_ALLOC(x, size) \
4267 x = vmalloc(size); \
4269 goto alloc_mem_err; \
4270 memset(x, 0, size); \
4276 BNX2X_ALLOC(bp
->fp
, sizeof(struct bnx2x_fastpath
) * bp
->num_queues
);
4278 for_each_queue(bp
, i
) {
4279 bnx2x_fp(bp
, i
, bp
) = bp
;
4282 BNX2X_PCI_ALLOC(bnx2x_fp(bp
, i
, status_blk
),
4283 &bnx2x_fp(bp
, i
, status_blk_mapping
),
4284 sizeof(struct host_status_block
) +
4285 sizeof(struct eth_tx_db_data
));
4287 bnx2x_fp(bp
, i
, hw_tx_prods
) =
4288 (void *)(bnx2x_fp(bp
, i
, status_blk
) + 1);
4290 bnx2x_fp(bp
, i
, tx_prods_mapping
) =
4291 bnx2x_fp(bp
, i
, status_blk_mapping
) +
4292 sizeof(struct host_status_block
);
4294 /* fast path rings: tx_buf tx_desc rx_buf rx_desc rx_comp */
4295 BNX2X_ALLOC(bnx2x_fp(bp
, i
, tx_buf_ring
),
4296 sizeof(struct sw_tx_bd
) * NUM_TX_BD
);
4297 BNX2X_PCI_ALLOC(bnx2x_fp(bp
, i
, tx_desc_ring
),
4298 &bnx2x_fp(bp
, i
, tx_desc_mapping
),
4299 sizeof(struct eth_tx_bd
) * NUM_TX_BD
);
4301 BNX2X_ALLOC(bnx2x_fp(bp
, i
, rx_buf_ring
),
4302 sizeof(struct sw_rx_bd
) * NUM_RX_BD
);
4303 BNX2X_PCI_ALLOC(bnx2x_fp(bp
, i
, rx_desc_ring
),
4304 &bnx2x_fp(bp
, i
, rx_desc_mapping
),
4305 sizeof(struct eth_rx_bd
) * NUM_RX_BD
);
4307 BNX2X_PCI_ALLOC(bnx2x_fp(bp
, i
, rx_comp_ring
),
4308 &bnx2x_fp(bp
, i
, rx_comp_mapping
),
4309 sizeof(struct eth_fast_path_rx_cqe
) *
4313 /* end of fastpath */
4315 BNX2X_PCI_ALLOC(bp
->def_status_blk
, &bp
->def_status_blk_mapping
,
4316 sizeof(struct host_def_status_block
));
4318 BNX2X_PCI_ALLOC(bp
->slowpath
, &bp
->slowpath_mapping
,
4319 sizeof(struct bnx2x_slowpath
));
4322 BNX2X_PCI_ALLOC(bp
->t1
, &bp
->t1_mapping
, 64*1024);
4325 for (i
= 0; i
< 64*1024; i
+= 64) {
4326 *(u64
*)((char *)bp
->t1
+ i
+ 56) = 0x0UL
;
4327 *(u64
*)((char *)bp
->t1
+ i
+ 3) = 0x0UL
;
4330 /* allocate searcher T2 table
4331 we allocate 1/4 of alloc num for T2
4332 (which is not entered into the ILT) */
4333 BNX2X_PCI_ALLOC(bp
->t2
, &bp
->t2_mapping
, 16*1024);
4336 for (i
= 0; i
< 16*1024; i
+= 64)
4337 * (u64
*)((char *)bp
->t2
+ i
+ 56) = bp
->t2_mapping
+ i
+ 64;
4339 /* now fixup the last line in the block to point to the next block */
4340 *(u64
*)((char *)bp
->t2
+ 1024*16-8) = bp
->t2_mapping
;
4342 /* Timer block array (MAX_CONN*8) phys uncached for now 1024 conns */
4343 BNX2X_PCI_ALLOC(bp
->timers
, &bp
->timers_mapping
, 8*1024);
4345 /* QM queues (128*MAX_CONN) */
4346 BNX2X_PCI_ALLOC(bp
->qm
, &bp
->qm_mapping
, 128*1024);
4349 /* Slow path ring */
4350 BNX2X_PCI_ALLOC(bp
->spq
, &bp
->spq_mapping
, BCM_PAGE_SIZE
);
4358 #undef BNX2X_PCI_ALLOC
4362 static void bnx2x_free_tx_skbs(struct bnx2x
*bp
)
4366 for_each_queue(bp
, i
) {
4367 struct bnx2x_fastpath
*fp
= &bp
->fp
[i
];
4369 u16 bd_cons
= fp
->tx_bd_cons
;
4370 u16 sw_prod
= fp
->tx_pkt_prod
;
4371 u16 sw_cons
= fp
->tx_pkt_cons
;
4373 BUG_TRAP(fp
->tx_buf_ring
!= NULL
);
4375 while (sw_cons
!= sw_prod
) {
4376 bd_cons
= bnx2x_free_tx_pkt(bp
, fp
, TX_BD(sw_cons
));
4382 static void bnx2x_free_rx_skbs(struct bnx2x
*bp
)
4386 for_each_queue(bp
, j
) {
4387 struct bnx2x_fastpath
*fp
= &bp
->fp
[j
];
4389 BUG_TRAP(fp
->rx_buf_ring
!= NULL
);
4391 for (i
= 0; i
< NUM_RX_BD
; i
++) {
4392 struct sw_rx_bd
*rx_buf
= &fp
->rx_buf_ring
[i
];
4393 struct sk_buff
*skb
= rx_buf
->skb
;
4398 pci_unmap_single(bp
->pdev
,
4399 pci_unmap_addr(rx_buf
, mapping
),
4400 bp
->rx_buf_use_size
,
4401 PCI_DMA_FROMDEVICE
);
4409 static void bnx2x_free_skbs(struct bnx2x
*bp
)
4411 bnx2x_free_tx_skbs(bp
);
4412 bnx2x_free_rx_skbs(bp
);
4415 static void bnx2x_free_msix_irqs(struct bnx2x
*bp
)
4419 free_irq(bp
->msix_table
[0].vector
, bp
->dev
);
4420 DP(NETIF_MSG_IFDOWN
, "released sp irq (%d)\n",
4421 bp
->msix_table
[0].vector
);
4423 for_each_queue(bp
, i
) {
4424 DP(NETIF_MSG_IFDOWN
, "about to release fp #%d->%d irq "
4425 "state(%x)\n", i
, bp
->msix_table
[i
+ 1].vector
,
4426 bnx2x_fp(bp
, i
, state
));
4428 if (bnx2x_fp(bp
, i
, state
) != BNX2X_FP_STATE_CLOSED
)
4429 BNX2X_ERR("IRQ of fp #%d being freed while "
4430 "state != closed\n", i
);
4432 free_irq(bp
->msix_table
[i
+ 1].vector
, &bp
->fp
[i
]);
4437 static void bnx2x_free_irq(struct bnx2x
*bp
)
4440 if (bp
->flags
& USING_MSIX_FLAG
) {
4442 bnx2x_free_msix_irqs(bp
);
4443 pci_disable_msix(bp
->pdev
);
4445 bp
->flags
&= ~USING_MSIX_FLAG
;
4448 free_irq(bp
->pdev
->irq
, bp
->dev
);
4451 static int bnx2x_enable_msix(struct bnx2x
*bp
)
4456 bp
->msix_table
[0].entry
= 0;
4457 for_each_queue(bp
, i
)
4458 bp
->msix_table
[i
+ 1].entry
= i
+ 1;
4460 if (pci_enable_msix(bp
->pdev
, &bp
->msix_table
[0],
4461 bp
->num_queues
+ 1)){
4462 BNX2X_LOG("failed to enable MSI-X\n");
4467 bp
->flags
|= USING_MSIX_FLAG
;
4474 static int bnx2x_req_msix_irqs(struct bnx2x
*bp
)
4479 rc
= request_irq(bp
->msix_table
[0].vector
, bnx2x_msix_sp_int
, 0,
4480 bp
->dev
->name
, bp
->dev
);
4483 BNX2X_ERR("request sp irq failed\n");
4487 for_each_queue(bp
, i
) {
4488 rc
= request_irq(bp
->msix_table
[i
+ 1].vector
,
4489 bnx2x_msix_fp_int
, 0,
4490 bp
->dev
->name
, &bp
->fp
[i
]);
4493 BNX2X_ERR("request fp #%d irq failed "
4495 bnx2x_free_msix_irqs(bp
);
4499 bnx2x_fp(bp
, i
, state
) = BNX2X_FP_STATE_IRQ
;
4507 static int bnx2x_req_irq(struct bnx2x
*bp
)
4510 int rc
= request_irq(bp
->pdev
->irq
, bnx2x_interrupt
,
4511 IRQF_SHARED
, bp
->dev
->name
, bp
->dev
);
4513 bnx2x_fp(bp
, 0, state
) = BNX2X_FP_STATE_IRQ
;
4520 * Init service functions
4523 static void bnx2x_set_mac_addr(struct bnx2x
*bp
)
4525 struct mac_configuration_cmd
*config
= bnx2x_sp(bp
, mac_config
);
4528 * unicasts 0-31:port0 32-63:port1
4529 * multicast 64-127:port0 128-191:port1
4531 config
->hdr
.length_6b
= 2;
4532 config
->hdr
.offset
= bp
->port
? 31 : 0;
4533 config
->hdr
.reserved0
= 0;
4534 config
->hdr
.reserved1
= 0;
4537 config
->config_table
[0].cam_entry
.msb_mac_addr
=
4538 swab16(*(u16
*)&bp
->dev
->dev_addr
[0]);
4539 config
->config_table
[0].cam_entry
.middle_mac_addr
=
4540 swab16(*(u16
*)&bp
->dev
->dev_addr
[2]);
4541 config
->config_table
[0].cam_entry
.lsb_mac_addr
=
4542 swab16(*(u16
*)&bp
->dev
->dev_addr
[4]);
4543 config
->config_table
[0].cam_entry
.flags
= cpu_to_le16(bp
->port
);
4544 config
->config_table
[0].target_table_entry
.flags
= 0;
4545 config
->config_table
[0].target_table_entry
.client_id
= 0;
4546 config
->config_table
[0].target_table_entry
.vlan_id
= 0;
4548 DP(NETIF_MSG_IFUP
, "setting MAC (%04x:%04x:%04x)\n",
4549 config
->config_table
[0].cam_entry
.msb_mac_addr
,
4550 config
->config_table
[0].cam_entry
.middle_mac_addr
,
4551 config
->config_table
[0].cam_entry
.lsb_mac_addr
);
4554 config
->config_table
[1].cam_entry
.msb_mac_addr
= 0xffff;
4555 config
->config_table
[1].cam_entry
.middle_mac_addr
= 0xffff;
4556 config
->config_table
[1].cam_entry
.lsb_mac_addr
= 0xffff;
4557 config
->config_table
[1].cam_entry
.flags
= cpu_to_le16(bp
->port
);
4558 config
->config_table
[1].target_table_entry
.flags
=
4559 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST
;
4560 config
->config_table
[1].target_table_entry
.client_id
= 0;
4561 config
->config_table
[1].target_table_entry
.vlan_id
= 0;
4563 bnx2x_sp_post(bp
, RAMROD_CMD_ID_ETH_SET_MAC
, 0,
4564 U64_HI(bnx2x_sp_mapping(bp
, mac_config
)),
4565 U64_LO(bnx2x_sp_mapping(bp
, mac_config
)), 0);
4568 static int bnx2x_wait_ramrod(struct bnx2x
*bp
, int state
, int idx
,
4569 int *state_p
, int poll
)
4571 /* can take a while if any port is running */
4574 DP(NETIF_MSG_IFUP
, "%s for state to become %x on IDX [%d]\n",
4575 poll
? "polling" : "waiting", state
, idx
);
4582 bnx2x_rx_int(bp
->fp
, 10);
4583 /* If index is different from 0
4584 * The reply for some commands will
4585 * be on the none default queue
4588 bnx2x_rx_int(&bp
->fp
[idx
], 10);
4591 mb(); /* state is changed by bnx2x_sp_event()*/
4593 if (*state_p
== state
)
4602 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
4603 poll
? "polling" : "waiting", state
, idx
);
4608 static int bnx2x_setup_leading(struct bnx2x
*bp
)
4611 /* reset IGU state */
4612 bnx2x_ack_sb(bp
, DEF_SB_ID
, CSTORM_ID
, 0, IGU_INT_ENABLE
, 0);
4615 bnx2x_sp_post(bp
, RAMROD_CMD_ID_ETH_PORT_SETUP
, 0, 0, 0, 0);
4617 return bnx2x_wait_ramrod(bp
, BNX2X_STATE_OPEN
, 0, &(bp
->state
), 0);
4621 static int bnx2x_setup_multi(struct bnx2x
*bp
, int index
)
4624 /* reset IGU state */
4625 bnx2x_ack_sb(bp
, index
, CSTORM_ID
, 0, IGU_INT_ENABLE
, 0);
4628 bp
->fp
[index
].state
= BNX2X_FP_STATE_OPENING
;
4629 bnx2x_sp_post(bp
, RAMROD_CMD_ID_ETH_CLIENT_SETUP
, index
, 0, index
, 0);
4631 /* Wait for completion */
4632 return bnx2x_wait_ramrod(bp
, BNX2X_FP_STATE_OPEN
, index
,
4633 &(bp
->fp
[index
].state
), 0);
4638 static int bnx2x_poll(struct napi_struct
*napi
, int budget
);
4639 static void bnx2x_set_rx_mode(struct net_device
*dev
);
4641 static int bnx2x_nic_load(struct bnx2x
*bp
, int req_irq
)
4646 bp
->state
= BNX2X_STATE_OPENING_WAIT4_LOAD
;
4648 /* Send LOAD_REQUEST command to MCP.
4649 Returns the type of LOAD command: if it is the
4650 first port to be initialized common blocks should be
4651 initialized, otherwise - not.
4654 load_code
= bnx2x_fw_command(bp
, DRV_MSG_CODE_LOAD_REQ
);
4656 BNX2X_ERR("MCP response failure, unloading\n");
4659 if (load_code
== FW_MSG_CODE_DRV_LOAD_REFUSED
) {
4660 BNX2X_ERR("MCP refused load request, unloading\n");
4661 return -EBUSY
; /* other port in diagnostic mode */
4664 load_code
= FW_MSG_CODE_DRV_LOAD_COMMON
;
4667 /* if we can't use msix we only need one fp,
4668 * so try to enable msix with the requested number of fp's
4669 * and fallback to inta with one fp
4675 if ((use_multi
> 1) && (use_multi
<= 16))
4676 /* user requested number */
4677 bp
->num_queues
= use_multi
;
4678 else if (use_multi
== 1)
4679 bp
->num_queues
= num_online_cpus();
4683 if (bnx2x_enable_msix(bp
)) {
4684 /* failed to enable msix */
4687 BNX2X_ERR("Multi requested but failed"
4688 " to enable MSI-X\n");
4693 DP(NETIF_MSG_IFUP
, "set number of queues to %d\n", bp
->num_queues
);
4695 if (bnx2x_alloc_mem(bp
))
4699 if (bp
->flags
& USING_MSIX_FLAG
) {
4700 if (bnx2x_req_msix_irqs(bp
)) {
4701 pci_disable_msix(bp
->pdev
);
4706 if (bnx2x_req_irq(bp
)) {
4707 BNX2X_ERR("IRQ request failed, aborting\n");
4713 for_each_queue(bp
, i
)
4714 netif_napi_add(bp
->dev
, &bnx2x_fp(bp
, i
, napi
),
4719 if (bnx2x_function_init(bp
,
4720 (load_code
== FW_MSG_CODE_DRV_LOAD_COMMON
))) {
4721 BNX2X_ERR("HW init failed, aborting\n");
4726 atomic_set(&bp
->intr_sem
, 0);
4729 /* Setup NIC internals and enable interrupts */
4732 /* Send LOAD_DONE command to MCP */
4734 load_code
= bnx2x_fw_command(bp
, DRV_MSG_CODE_LOAD_DONE
);
4736 BNX2X_ERR("MCP response failure, unloading\n");
4737 goto load_int_disable
;
4741 bp
->state
= BNX2X_STATE_OPENING_WAIT4_PORT
;
4743 /* Enable Rx interrupt handling before sending the ramrod
4744 as it's completed on Rx FP queue */
4745 for_each_queue(bp
, i
)
4746 napi_enable(&bnx2x_fp(bp
, i
, napi
));
4748 if (bnx2x_setup_leading(bp
))
4749 goto load_stop_netif
;
4751 for_each_nondefault_queue(bp
, i
)
4752 if (bnx2x_setup_multi(bp
, i
))
4753 goto load_stop_netif
;
4755 bnx2x_set_mac_addr(bp
);
4757 bnx2x_initial_phy_init(bp
);
4759 /* Start fast path */
4760 if (req_irq
) { /* IRQ is only requested from bnx2x_open */
4761 netif_start_queue(bp
->dev
);
4762 if (bp
->flags
& USING_MSIX_FLAG
)
4763 printk(KERN_INFO PFX
"%s: using MSI-X\n",
4766 /* Otherwise Tx queue should be only reenabled */
4767 } else if (netif_running(bp
->dev
)) {
4768 netif_wake_queue(bp
->dev
);
4769 bnx2x_set_rx_mode(bp
->dev
);
4772 /* start the timer */
4773 mod_timer(&bp
->timer
, jiffies
+ bp
->current_interval
);
4778 for_each_queue(bp
, i
)
4779 napi_disable(&bnx2x_fp(bp
, i
, napi
));
4782 bnx2x_int_disable_sync(bp
);
4784 bnx2x_free_skbs(bp
);
4790 /* TBD we really need to reset the chip
4791 if we want to recover from this */
4796 static void bnx2x_reset_chip(struct bnx2x
*bp
, u32 reset_code
)
4798 int port
= bp
->port
;
4804 DP(NETIF_MSG_IFDOWN
, "reset called with code %x\n", reset_code
);
4806 /* Do not rcv packets to BRB */
4807 REG_WR(bp
, NIG_REG_LLH0_BRB1_DRV_MASK
+ port
*4, 0x0);
4808 /* Do not direct rcv packets that are not for MCP to the BRB */
4809 REG_WR(bp
, (port
? NIG_REG_LLH1_BRB1_NOT_MCP
:
4810 NIG_REG_LLH0_BRB1_NOT_MCP
), 0x0);
4812 /* Configure IGU and AEU */
4813 REG_WR(bp
, HC_REG_CONFIG_0
+ port
*4, 0x1000);
4814 REG_WR(bp
, MISC_REG_AEU_MASK_ATTN_FUNC_0
+ port
*4, 0);
4816 /* TODO: Close Doorbell port? */
4823 base
= port
* RQ_ONCHIP_AT_PORT_SIZE
;
4824 for (i
= base
; i
< base
+ RQ_ONCHIP_AT_PORT_SIZE
; i
++) {
4826 REG_WR_DMAE(bp
, PXP2_REG_RQ_ONCHIP_AT
+ i
*8, wb_write
, 2);
4828 REG_WR_IND(bp
, PXP2_REG_RQ_ONCHIP_AT
, 0);
4829 REG_WR_IND(bp
, PXP2_REG_RQ_ONCHIP_AT
+ 4, 0);
4833 if (reset_code
== FW_MSG_CODE_DRV_UNLOAD_COMMON
) {
4835 REG_WR(bp
, GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_1_CLEAR
,
4837 REG_WR(bp
, GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_2_CLEAR
,
4842 static int bnx2x_stop_multi(struct bnx2x
*bp
, int index
)
4847 /* halt the connection */
4848 bp
->fp
[index
].state
= BNX2X_FP_STATE_HALTING
;
4849 bnx2x_sp_post(bp
, RAMROD_CMD_ID_ETH_HALT
, index
, 0, 0, 0);
4852 rc
= bnx2x_wait_ramrod(bp
, BNX2X_FP_STATE_HALTED
, index
,
4853 &(bp
->fp
[index
].state
), 1);
4854 if (rc
) /* timeout */
4857 /* delete cfc entry */
4858 bnx2x_sp_post(bp
, RAMROD_CMD_ID_ETH_CFC_DEL
, index
, 0, 0, 1);
4860 return bnx2x_wait_ramrod(bp
, BNX2X_FP_STATE_CLOSED
, index
,
4861 &(bp
->fp
[index
].state
), 1);
4866 static void bnx2x_stop_leading(struct bnx2x
*bp
)
4868 u16 dsb_sp_prod_idx
;
4869 /* if the other port is handling traffic,
4870 this can take a lot of time */
4875 /* Send HALT ramrod */
4876 bp
->fp
[0].state
= BNX2X_FP_STATE_HALTING
;
4877 bnx2x_sp_post(bp
, RAMROD_CMD_ID_ETH_HALT
, 0, 0, 0, 0);
4879 if (bnx2x_wait_ramrod(bp
, BNX2X_FP_STATE_HALTED
, 0,
4880 &(bp
->fp
[0].state
), 1))
4883 dsb_sp_prod_idx
= *bp
->dsb_sp_prod
;
4885 /* Send PORT_DELETE ramrod */
4886 bnx2x_sp_post(bp
, RAMROD_CMD_ID_ETH_PORT_DEL
, 0, 0, 0, 1);
4888 /* Wait for completion to arrive on default status block
4889 we are going to reset the chip anyway
4890 so there is not much to do if this times out
4892 while ((dsb_sp_prod_idx
== *bp
->dsb_sp_prod
) && timeout
) {
4897 DP(NETIF_MSG_IFDOWN
, "timeout polling for completion "
4898 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
4899 *bp
->dsb_sp_prod
, dsb_sp_prod_idx
);
4901 bp
->state
= BNX2X_STATE_CLOSING_WAIT4_UNLOAD
;
4902 bp
->fp
[0].state
= BNX2X_FP_STATE_CLOSED
;
4906 static int bnx2x_nic_unload(struct bnx2x
*bp
, int free_irq
)
4911 bp
->state
= BNX2X_STATE_CLOSING_WAIT4_HALT
;
4913 del_timer_sync(&bp
->timer
);
4915 bp
->rx_mode
= BNX2X_RX_MODE_NONE
;
4916 bnx2x_set_storm_rx_mode(bp
);
4918 if (netif_running(bp
->dev
)) {
4919 netif_tx_disable(bp
->dev
);
4920 bp
->dev
->trans_start
= jiffies
; /* prevent tx timeout */
4923 /* Wait until all fast path tasks complete */
4924 for_each_queue(bp
, i
) {
4925 struct bnx2x_fastpath
*fp
= &bp
->fp
[i
];
4928 while (bnx2x_has_work(fp
) && (timeout
--))
4931 BNX2X_ERR("timeout waiting for queue[%d]\n", i
);
4934 /* Wait until stat ramrod returns and all SP tasks complete */
4936 while ((bp
->stat_pending
|| (bp
->spq_left
!= MAX_SPQ_PENDING
)) &&
4940 for_each_queue(bp
, i
)
4941 napi_disable(&bnx2x_fp(bp
, i
, napi
));
4942 /* Disable interrupts after Tx and Rx are disabled on stack level */
4943 bnx2x_int_disable_sync(bp
);
4945 if (bp
->flags
& NO_WOL_FLAG
)
4946 reset_code
= DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP
;
4949 u32 emac_base
= bp
->port
? GRCBASE_EMAC0
: GRCBASE_EMAC1
;
4950 u8
*mac_addr
= bp
->dev
->dev_addr
;
4951 u32 val
= (EMAC_MODE_MPKT
| EMAC_MODE_MPKT_RCVD
|
4952 EMAC_MODE_ACPI_RCVD
);
4954 EMAC_WR(EMAC_REG_EMAC_MODE
, val
);
4956 val
= (mac_addr
[0] << 8) | mac_addr
[1];
4957 EMAC_WR(EMAC_REG_EMAC_MAC_MATCH
, val
);
4959 val
= (mac_addr
[2] << 24) | (mac_addr
[3] << 16) |
4960 (mac_addr
[4] << 8) | mac_addr
[5];
4961 EMAC_WR(EMAC_REG_EMAC_MAC_MATCH
+ 4, val
);
4963 reset_code
= DRV_MSG_CODE_UNLOAD_REQ_WOL_EN
;
4966 reset_code
= DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS
;
4968 /* Close multi and leading connections */
4969 for_each_nondefault_queue(bp
, i
)
4970 if (bnx2x_stop_multi(bp
, i
))
4973 bnx2x_stop_leading(bp
);
4974 if ((bp
->state
!= BNX2X_STATE_CLOSING_WAIT4_UNLOAD
) ||
4975 (bp
->fp
[0].state
!= BNX2X_FP_STATE_CLOSED
)) {
4976 DP(NETIF_MSG_IFDOWN
, "failed to close leading properly!"
4977 "state 0x%x fp[0].state 0x%x",
4978 bp
->state
, bp
->fp
[0].state
);
4982 bnx2x__link_reset(bp
);
4985 reset_code
= bnx2x_fw_command(bp
, reset_code
);
4987 reset_code
= FW_MSG_CODE_DRV_UNLOAD_COMMON
;
4993 /* Reset the chip */
4994 bnx2x_reset_chip(bp
, reset_code
);
4996 /* Report UNLOAD_DONE to MCP */
4998 bnx2x_fw_command(bp
, DRV_MSG_CODE_UNLOAD_DONE
);
5000 /* Free SKBs and driver internals */
5001 bnx2x_free_skbs(bp
);
5004 bp
->state
= BNX2X_STATE_CLOSED
;
5006 netif_carrier_off(bp
->dev
);
5011 /* end of nic load/unload */
5016 * Init service functions
5019 static void bnx2x_link_settings_supported(struct bnx2x
*bp
, u32 switch_cfg
)
5021 int port
= bp
->port
;
5024 switch (switch_cfg
) {
5026 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg
);
5029 SERDES_EXT_PHY_TYPE(bp
->link_params
.ext_phy_config
);
5030 switch (ext_phy_type
) {
5031 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT
:
5032 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
5035 bp
->supported
|= (SUPPORTED_10baseT_Half
|
5036 SUPPORTED_10baseT_Full
|
5037 SUPPORTED_100baseT_Half
|
5038 SUPPORTED_100baseT_Full
|
5039 SUPPORTED_1000baseT_Full
|
5040 SUPPORTED_2500baseX_Full
|
5041 SUPPORTED_TP
| SUPPORTED_FIBRE
|
5044 SUPPORTED_Asym_Pause
);
5047 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482
:
5048 BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
5051 bp
->supported
|= (SUPPORTED_10baseT_Half
|
5052 SUPPORTED_10baseT_Full
|
5053 SUPPORTED_100baseT_Half
|
5054 SUPPORTED_100baseT_Full
|
5055 SUPPORTED_1000baseT_Full
|
5056 SUPPORTED_TP
| SUPPORTED_FIBRE
|
5059 SUPPORTED_Asym_Pause
);
5063 BNX2X_ERR("NVRAM config error. "
5064 "BAD SerDes ext_phy_config 0x%x\n",
5065 bp
->link_params
.ext_phy_config
);
5069 bp
->phy_addr
= REG_RD(bp
, NIG_REG_SERDES0_CTRL_PHY_ADDR
+
5071 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp
->phy_addr
);
5074 case SWITCH_CFG_10G
:
5075 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg
);
5078 XGXS_EXT_PHY_TYPE(bp
->link_params
.ext_phy_config
);
5079 switch (ext_phy_type
) {
5080 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT
:
5081 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
5084 bp
->supported
|= (SUPPORTED_10baseT_Half
|
5085 SUPPORTED_10baseT_Full
|
5086 SUPPORTED_100baseT_Half
|
5087 SUPPORTED_100baseT_Full
|
5088 SUPPORTED_1000baseT_Full
|
5089 SUPPORTED_2500baseX_Full
|
5090 SUPPORTED_10000baseT_Full
|
5091 SUPPORTED_TP
| SUPPORTED_FIBRE
|
5094 SUPPORTED_Asym_Pause
);
5097 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705
:
5098 BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
5101 bp
->supported
|= (SUPPORTED_10000baseT_Full
|
5104 SUPPORTED_Asym_Pause
);
5107 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706
:
5108 BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
5111 bp
->supported
|= (SUPPORTED_10000baseT_Full
|
5112 SUPPORTED_1000baseT_Full
|
5116 SUPPORTED_Asym_Pause
);
5119 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072
:
5120 BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
5123 bp
->supported
|= (SUPPORTED_10000baseT_Full
|
5124 SUPPORTED_1000baseT_Full
|
5128 SUPPORTED_Asym_Pause
);
5131 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073
:
5132 BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
5135 bp
->supported
|= (SUPPORTED_10000baseT_Full
|
5136 SUPPORTED_2500baseX_Full
|
5137 SUPPORTED_1000baseT_Full
|
5141 SUPPORTED_Asym_Pause
);
5144 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101
:
5145 BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
5148 bp
->supported
|= (SUPPORTED_10000baseT_Full
|
5152 SUPPORTED_Asym_Pause
);
5155 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE
:
5156 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
5157 bp
->link_params
.ext_phy_config
);
5161 BNX2X_ERR("NVRAM config error. "
5162 "BAD XGXS ext_phy_config 0x%x\n",
5163 bp
->link_params
.ext_phy_config
);
5167 bp
->phy_addr
= REG_RD(bp
, NIG_REG_XGXS0_CTRL_PHY_ADDR
+
5169 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp
->phy_addr
);
5174 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
5178 bp
->link_params
.phy_addr
= bp
->phy_addr
;
5180 /* mask what we support according to speed_cap_mask */
5181 if (!(bp
->link_params
.speed_cap_mask
&
5182 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF
))
5183 bp
->supported
&= ~SUPPORTED_10baseT_Half
;
5185 if (!(bp
->link_params
.speed_cap_mask
&
5186 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL
))
5187 bp
->supported
&= ~SUPPORTED_10baseT_Full
;
5189 if (!(bp
->link_params
.speed_cap_mask
&
5190 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF
))
5191 bp
->supported
&= ~SUPPORTED_100baseT_Half
;
5193 if (!(bp
->link_params
.speed_cap_mask
&
5194 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL
))
5195 bp
->supported
&= ~SUPPORTED_100baseT_Full
;
5197 if (!(bp
->link_params
.speed_cap_mask
&
5198 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G
))
5199 bp
->supported
&= ~(SUPPORTED_1000baseT_Half
|
5200 SUPPORTED_1000baseT_Full
);
5202 if (!(bp
->link_params
.speed_cap_mask
&
5203 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G
))
5204 bp
->supported
&= ~SUPPORTED_2500baseX_Full
;
5206 if (!(bp
->link_params
.speed_cap_mask
&
5207 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G
))
5208 bp
->supported
&= ~SUPPORTED_10000baseT_Full
;
5210 BNX2X_DEV_INFO("supported 0x%x\n", bp
->supported
);
5213 static void bnx2x_link_settings_requested(struct bnx2x
*bp
)
5215 bp
->link_params
.req_duplex
= DUPLEX_FULL
;
5217 switch (bp
->link_config
& PORT_FEATURE_LINK_SPEED_MASK
) {
5218 case PORT_FEATURE_LINK_SPEED_AUTO
:
5219 if (bp
->supported
& SUPPORTED_Autoneg
) {
5220 bp
->link_params
.req_line_speed
= SPEED_AUTO_NEG
;
5221 bp
->advertising
= bp
->supported
;
5224 XGXS_EXT_PHY_TYPE(bp
->link_params
.ext_phy_config
);
5226 if ((ext_phy_type
==
5227 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705
) ||
5229 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706
)) {
5230 /* force 10G, no AN */
5231 bp
->link_params
.req_line_speed
= SPEED_10000
;
5233 (ADVERTISED_10000baseT_Full
|
5237 BNX2X_ERR("NVRAM config error. "
5238 "Invalid link_config 0x%x"
5239 " Autoneg not supported\n",
5245 case PORT_FEATURE_LINK_SPEED_10M_FULL
:
5246 if (bp
->supported
& SUPPORTED_10baseT_Full
) {
5247 bp
->link_params
.req_line_speed
= SPEED_10
;
5248 bp
->advertising
= (ADVERTISED_10baseT_Full
|
5251 BNX2X_ERR("NVRAM config error. "
5252 "Invalid link_config 0x%x"
5253 " speed_cap_mask 0x%x\n",
5255 bp
->link_params
.speed_cap_mask
);
5260 case PORT_FEATURE_LINK_SPEED_10M_HALF
:
5261 if (bp
->supported
& SUPPORTED_10baseT_Half
) {
5262 bp
->link_params
.req_line_speed
= SPEED_10
;
5263 bp
->link_params
.req_duplex
= DUPLEX_HALF
;
5264 bp
->advertising
= (ADVERTISED_10baseT_Half
|
5267 BNX2X_ERR("NVRAM config error. "
5268 "Invalid link_config 0x%x"
5269 " speed_cap_mask 0x%x\n",
5271 bp
->link_params
.speed_cap_mask
);
5276 case PORT_FEATURE_LINK_SPEED_100M_FULL
:
5277 if (bp
->supported
& SUPPORTED_100baseT_Full
) {
5278 bp
->link_params
.req_line_speed
= SPEED_100
;
5279 bp
->advertising
= (ADVERTISED_100baseT_Full
|
5282 BNX2X_ERR("NVRAM config error. "
5283 "Invalid link_config 0x%x"
5284 " speed_cap_mask 0x%x\n",
5286 bp
->link_params
.speed_cap_mask
);
5291 case PORT_FEATURE_LINK_SPEED_100M_HALF
:
5292 if (bp
->supported
& SUPPORTED_100baseT_Half
) {
5293 bp
->link_params
.req_line_speed
= SPEED_100
;
5294 bp
->link_params
.req_duplex
= DUPLEX_HALF
;
5295 bp
->advertising
= (ADVERTISED_100baseT_Half
|
5298 BNX2X_ERR("NVRAM config error. "
5299 "Invalid link_config 0x%x"
5300 " speed_cap_mask 0x%x\n",
5302 bp
->link_params
.speed_cap_mask
);
5307 case PORT_FEATURE_LINK_SPEED_1G
:
5308 if (bp
->supported
& SUPPORTED_1000baseT_Full
) {
5309 bp
->link_params
.req_line_speed
= SPEED_1000
;
5310 bp
->advertising
= (ADVERTISED_1000baseT_Full
|
5313 BNX2X_ERR("NVRAM config error. "
5314 "Invalid link_config 0x%x"
5315 " speed_cap_mask 0x%x\n",
5317 bp
->link_params
.speed_cap_mask
);
5322 case PORT_FEATURE_LINK_SPEED_2_5G
:
5323 if (bp
->supported
& SUPPORTED_2500baseX_Full
) {
5324 bp
->link_params
.req_line_speed
= SPEED_2500
;
5325 bp
->advertising
= (ADVERTISED_2500baseX_Full
|
5328 BNX2X_ERR("NVRAM config error. "
5329 "Invalid link_config 0x%x"
5330 " speed_cap_mask 0x%x\n",
5332 bp
->link_params
.speed_cap_mask
);
5337 case PORT_FEATURE_LINK_SPEED_10G_CX4
:
5338 case PORT_FEATURE_LINK_SPEED_10G_KX4
:
5339 case PORT_FEATURE_LINK_SPEED_10G_KR
:
5340 if (bp
->supported
& SUPPORTED_10000baseT_Full
) {
5341 bp
->link_params
.req_line_speed
= SPEED_10000
;
5342 bp
->advertising
= (ADVERTISED_10000baseT_Full
|
5345 BNX2X_ERR("NVRAM config error. "
5346 "Invalid link_config 0x%x"
5347 " speed_cap_mask 0x%x\n",
5349 bp
->link_params
.speed_cap_mask
);
5355 BNX2X_ERR("NVRAM config error. "
5356 "BAD link speed link_config 0x%x\n",
5358 bp
->link_params
.req_line_speed
= SPEED_AUTO_NEG
;
5359 bp
->advertising
= bp
->supported
;
5363 bp
->link_params
.req_flow_ctrl
= (bp
->link_config
&
5364 PORT_FEATURE_FLOW_CONTROL_MASK
);
5365 if ((bp
->link_params
.req_flow_ctrl
== FLOW_CTRL_AUTO
) &&
5366 (!bp
->supported
& SUPPORTED_Autoneg
))
5367 bp
->link_params
.req_flow_ctrl
= FLOW_CTRL_NONE
;
5369 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x"
5370 " advertising 0x%x\n",
5371 bp
->link_params
.req_line_speed
,
5372 bp
->link_params
.req_duplex
,
5373 bp
->link_params
.req_flow_ctrl
, bp
->advertising
);
5376 static void bnx2x_get_hwinfo(struct bnx2x
*bp
)
5378 u32 val
, val2
, val3
, val4
, id
;
5379 int port
= bp
->port
;
5381 bp
->shmem_base
= REG_RD(bp
, MISC_REG_SHARED_MEM_ADDR
);
5382 BNX2X_DEV_INFO("shmem offset is %x\n", bp
->shmem_base
);
5384 /* Get the chip revision id and number. */
5385 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
5386 val
= REG_RD(bp
, MISC_REG_CHIP_NUM
);
5387 id
= ((val
& 0xffff) << 16);
5388 val
= REG_RD(bp
, MISC_REG_CHIP_REV
);
5389 id
|= ((val
& 0xf) << 12);
5390 val
= REG_RD(bp
, MISC_REG_CHIP_METAL
);
5391 id
|= ((val
& 0xff) << 4);
5392 REG_RD(bp
, MISC_REG_BOND_ID
);
5395 BNX2X_DEV_INFO("chip ID is %x\n", id
);
5397 bp
->link_params
.bp
= bp
;
5399 if (!bp
->shmem_base
|| (bp
->shmem_base
!= 0xAF900)) {
5400 BNX2X_DEV_INFO("MCP not active\n");
5405 val
= SHMEM_RD(bp
, validity_map
[port
]);
5406 if ((val
& (SHR_MEM_VALIDITY_DEV_INFO
| SHR_MEM_VALIDITY_MB
))
5407 != (SHR_MEM_VALIDITY_DEV_INFO
| SHR_MEM_VALIDITY_MB
))
5408 BNX2X_ERR("BAD MCP validity signature\n");
5410 bp
->fw_seq
= (SHMEM_RD(bp
, func_mb
[port
].drv_mb_header
) &
5411 DRV_MSG_SEQ_NUMBER_MASK
);
5413 bp
->hw_config
= SHMEM_RD(bp
, dev_info
.shared_hw_config
.config
);
5414 bp
->board
= SHMEM_RD(bp
, dev_info
.shared_hw_config
.board
);
5415 bp
->link_params
.serdes_config
=
5416 SHMEM_RD(bp
, dev_info
.port_hw_config
[port
].serdes_config
);
5417 bp
->link_params
.lane_config
=
5418 SHMEM_RD(bp
, dev_info
.port_hw_config
[port
].lane_config
);
5419 bp
->link_params
.ext_phy_config
=
5421 dev_info
.port_hw_config
[port
].external_phy_config
);
5422 bp
->link_params
.speed_cap_mask
=
5424 dev_info
.port_hw_config
[port
].speed_capability_mask
);
5427 SHMEM_RD(bp
, dev_info
.port_feature_config
[port
].link_config
);
5429 BNX2X_DEV_INFO("serdes_config (%08x) lane_config (%08x)\n"
5430 KERN_INFO
" ext_phy_config (%08x) speed_cap_mask (%08x)"
5431 " link_config (%08x)\n",
5432 bp
->link_params
.serdes_config
,
5433 bp
->link_params
.lane_config
,
5434 bp
->link_params
.ext_phy_config
,
5435 bp
->link_params
.speed_cap_mask
,
5438 bp
->link_params
.switch_cfg
= (bp
->link_config
&
5439 PORT_FEATURE_CONNECTED_SWITCH_MASK
);
5440 bnx2x_link_settings_supported(bp
, bp
->link_params
.switch_cfg
);
5442 bnx2x_link_settings_requested(bp
);
5444 val2
= SHMEM_RD(bp
, dev_info
.port_hw_config
[port
].mac_upper
);
5445 val
= SHMEM_RD(bp
, dev_info
.port_hw_config
[port
].mac_lower
);
5446 bp
->dev
->dev_addr
[0] = (u8
)(val2
>> 8 & 0xff);
5447 bp
->dev
->dev_addr
[1] = (u8
)(val2
& 0xff);
5448 bp
->dev
->dev_addr
[2] = (u8
)(val
>> 24 & 0xff);
5449 bp
->dev
->dev_addr
[3] = (u8
)(val
>> 16 & 0xff);
5450 bp
->dev
->dev_addr
[4] = (u8
)(val
>> 8 & 0xff);
5451 bp
->dev
->dev_addr
[5] = (u8
)(val
& 0xff);
5452 memcpy(bp
->link_params
.mac_addr
, bp
->dev
->dev_addr
, ETH_ALEN
);
5453 memcpy(bp
->dev
->perm_addr
, bp
->dev
->dev_addr
, ETH_ALEN
);
5457 val
= SHMEM_RD(bp
, dev_info
.shared_hw_config
.part_num
);
5458 val2
= SHMEM_RD(bp
, dev_info
.shared_hw_config
.part_num
[4]);
5459 val3
= SHMEM_RD(bp
, dev_info
.shared_hw_config
.part_num
[8]);
5460 val4
= SHMEM_RD(bp
, dev_info
.shared_hw_config
.part_num
[12]);
5462 printk(KERN_INFO PFX
"part number %X-%X-%X-%X\n",
5463 val
, val2
, val3
, val4
);
5467 bp
->bc_ver
= val
= ((SHMEM_RD(bp
, dev_info
.bc_rev
)) >> 8);
5468 BNX2X_DEV_INFO("bc_ver %X\n", val
);
5469 if (val
< BNX2X_BC_VER
) {
5470 /* for now only warn
5471 * later we might need to enforce this */
5472 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
5473 " please upgrade BC\n", BNX2X_BC_VER
, val
);
5479 val
= REG_RD(bp
, MCP_REG_MCPR_NVM_CFG4
);
5480 bp
->flash_size
= (NVRAM_1MB_SIZE
<< (val
& MCPR_NVM_CFG4_FLASH_SIZE
));
5481 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
5482 bp
->flash_size
, bp
->flash_size
);
5486 set_mac
: /* only supposed to happen on emulation/FPGA */
5487 BNX2X_ERR("warning rendom MAC workaround active\n");
5488 random_ether_addr(bp
->dev
->dev_addr
);
5489 memcpy(bp
->dev
->perm_addr
, bp
->dev
->dev_addr
, 6);
5494 * ethtool service functions
5497 /* All ethtool functions called with rtnl_lock */
5499 static int bnx2x_get_settings(struct net_device
*dev
, struct ethtool_cmd
*cmd
)
5501 struct bnx2x
*bp
= netdev_priv(dev
);
5503 cmd
->supported
= bp
->supported
;
5504 cmd
->advertising
= bp
->advertising
;
5506 if (netif_carrier_ok(dev
)) {
5507 cmd
->speed
= bp
->link_vars
.line_speed
;
5508 cmd
->duplex
= bp
->link_vars
.duplex
;
5510 cmd
->speed
= bp
->link_params
.req_line_speed
;
5511 cmd
->duplex
= bp
->link_params
.req_duplex
;
5514 if (bp
->link_params
.switch_cfg
== SWITCH_CFG_10G
) {
5516 XGXS_EXT_PHY_TYPE(bp
->link_params
.ext_phy_config
);
5518 switch (ext_phy_type
) {
5519 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT
:
5520 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705
:
5521 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706
:
5522 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072
:
5523 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073
:
5524 cmd
->port
= PORT_FIBRE
;
5527 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101
:
5528 cmd
->port
= PORT_TP
;
5531 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE
:
5532 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
5533 bp
->link_params
.ext_phy_config
);
5537 DP(NETIF_MSG_LINK
, "BAD XGXS ext_phy_config 0x%x\n",
5538 bp
->link_params
.ext_phy_config
);
5542 cmd
->port
= PORT_TP
;
5544 cmd
->phy_address
= bp
->phy_addr
;
5545 cmd
->transceiver
= XCVR_INTERNAL
;
5547 if (bp
->link_params
.req_line_speed
== SPEED_AUTO_NEG
)
5548 cmd
->autoneg
= AUTONEG_ENABLE
;
5550 cmd
->autoneg
= AUTONEG_DISABLE
;
5555 DP(NETIF_MSG_LINK
, "ethtool_cmd: cmd %d\n"
5556 DP_LEVEL
" supported 0x%x advertising 0x%x speed %d\n"
5557 DP_LEVEL
" duplex %d port %d phy_address %d transceiver %d\n"
5558 DP_LEVEL
" autoneg %d maxtxpkt %d maxrxpkt %d\n",
5559 cmd
->cmd
, cmd
->supported
, cmd
->advertising
, cmd
->speed
,
5560 cmd
->duplex
, cmd
->port
, cmd
->phy_address
, cmd
->transceiver
,
5561 cmd
->autoneg
, cmd
->maxtxpkt
, cmd
->maxrxpkt
);
5566 static int bnx2x_set_settings(struct net_device
*dev
, struct ethtool_cmd
*cmd
)
5568 struct bnx2x
*bp
= netdev_priv(dev
);
5571 DP(NETIF_MSG_LINK
, "ethtool_cmd: cmd %d\n"
5572 DP_LEVEL
" supported 0x%x advertising 0x%x speed %d\n"
5573 DP_LEVEL
" duplex %d port %d phy_address %d transceiver %d\n"
5574 DP_LEVEL
" autoneg %d maxtxpkt %d maxrxpkt %d\n",
5575 cmd
->cmd
, cmd
->supported
, cmd
->advertising
, cmd
->speed
,
5576 cmd
->duplex
, cmd
->port
, cmd
->phy_address
, cmd
->transceiver
,
5577 cmd
->autoneg
, cmd
->maxtxpkt
, cmd
->maxrxpkt
);
5579 if (cmd
->autoneg
== AUTONEG_ENABLE
) {
5580 if (!(bp
->supported
& SUPPORTED_Autoneg
)) {
5581 DP(NETIF_MSG_LINK
, "Aotoneg not supported\n");
5585 /* advertise the requested speed and duplex if supported */
5586 cmd
->advertising
&= bp
->supported
;
5588 bp
->link_params
.req_line_speed
= SPEED_AUTO_NEG
;
5589 bp
->link_params
.req_duplex
= DUPLEX_FULL
;
5590 bp
->advertising
|= (ADVERTISED_Autoneg
| cmd
->advertising
);
5592 } else { /* forced speed */
5593 /* advertise the requested speed and duplex if supported */
5594 switch (cmd
->speed
) {
5596 if (cmd
->duplex
== DUPLEX_FULL
) {
5597 if (!(bp
->supported
&
5598 SUPPORTED_10baseT_Full
)) {
5600 "10M full not supported\n");
5604 advertising
= (ADVERTISED_10baseT_Full
|
5607 if (!(bp
->supported
&
5608 SUPPORTED_10baseT_Half
)) {
5610 "10M half not supported\n");
5614 advertising
= (ADVERTISED_10baseT_Half
|
5620 if (cmd
->duplex
== DUPLEX_FULL
) {
5621 if (!(bp
->supported
&
5622 SUPPORTED_100baseT_Full
)) {
5624 "100M full not supported\n");
5628 advertising
= (ADVERTISED_100baseT_Full
|
5631 if (!(bp
->supported
&
5632 SUPPORTED_100baseT_Half
)) {
5634 "100M half not supported\n");
5638 advertising
= (ADVERTISED_100baseT_Half
|
5644 if (cmd
->duplex
!= DUPLEX_FULL
) {
5645 DP(NETIF_MSG_LINK
, "1G half not supported\n");
5649 if (!(bp
->supported
& SUPPORTED_1000baseT_Full
)) {
5650 DP(NETIF_MSG_LINK
, "1G full not supported\n");
5654 advertising
= (ADVERTISED_1000baseT_Full
|
5659 if (cmd
->duplex
!= DUPLEX_FULL
) {
5661 "2.5G half not supported\n");
5665 if (!(bp
->supported
& SUPPORTED_2500baseX_Full
)) {
5667 "2.5G full not supported\n");
5671 advertising
= (ADVERTISED_2500baseX_Full
|
5676 if (cmd
->duplex
!= DUPLEX_FULL
) {
5677 DP(NETIF_MSG_LINK
, "10G half not supported\n");
5681 if (!(bp
->supported
& SUPPORTED_10000baseT_Full
)) {
5682 DP(NETIF_MSG_LINK
, "10G full not supported\n");
5686 advertising
= (ADVERTISED_10000baseT_Full
|
5691 DP(NETIF_MSG_LINK
, "Unsupported speed\n");
5695 bp
->link_params
.req_line_speed
= cmd
->speed
;
5696 bp
->link_params
.req_duplex
= cmd
->duplex
;
5697 bp
->advertising
= advertising
;
5700 DP(NETIF_MSG_LINK
, "req_line_speed %d\n"
5701 DP_LEVEL
" req_duplex %d advertising 0x%x\n",
5702 bp
->link_params
.req_line_speed
, bp
->link_params
.req_duplex
,
5705 bnx2x_stop_stats(bp
);
5711 #define PHY_FW_VER_LEN 10
5713 static void bnx2x_get_drvinfo(struct net_device
*dev
,
5714 struct ethtool_drvinfo
*info
)
5716 struct bnx2x
*bp
= netdev_priv(dev
);
5717 char phy_fw_ver
[PHY_FW_VER_LEN
];
5719 strcpy(info
->driver
, DRV_MODULE_NAME
);
5720 strcpy(info
->version
, DRV_MODULE_VERSION
);
5722 phy_fw_ver
[0] = '\0';
5723 bnx2x_phy_hw_lock(bp
);
5724 bnx2x_get_ext_phy_fw_version(&bp
->link_params
,
5725 (bp
->state
!= BNX2X_STATE_CLOSED
),
5726 phy_fw_ver
, PHY_FW_VER_LEN
);
5727 bnx2x_phy_hw_unlock(bp
);
5729 snprintf(info
->fw_version
, 32, "%d.%d.%d:%d BC:%x%s%s",
5730 BCM_5710_FW_MAJOR_VERSION
, BCM_5710_FW_MINOR_VERSION
,
5731 BCM_5710_FW_REVISION_VERSION
,
5732 BCM_5710_FW_COMPILE_FLAGS
, bp
->bc_ver
,
5733 ((phy_fw_ver
[0] != '\0')? " PHY:":""), phy_fw_ver
);
5734 strcpy(info
->bus_info
, pci_name(bp
->pdev
));
5735 info
->n_stats
= BNX2X_NUM_STATS
;
5736 info
->testinfo_len
= BNX2X_NUM_TESTS
;
5737 info
->eedump_len
= bp
->flash_size
;
5738 info
->regdump_len
= 0;
5741 static void bnx2x_get_wol(struct net_device
*dev
, struct ethtool_wolinfo
*wol
)
5743 struct bnx2x
*bp
= netdev_priv(dev
);
5745 if (bp
->flags
& NO_WOL_FLAG
) {
5749 wol
->supported
= WAKE_MAGIC
;
5751 wol
->wolopts
= WAKE_MAGIC
;
5755 memset(&wol
->sopass
, 0, sizeof(wol
->sopass
));
5758 static int bnx2x_set_wol(struct net_device
*dev
, struct ethtool_wolinfo
*wol
)
5760 struct bnx2x
*bp
= netdev_priv(dev
);
5762 if (wol
->wolopts
& ~WAKE_MAGIC
)
5765 if (wol
->wolopts
& WAKE_MAGIC
) {
5766 if (bp
->flags
& NO_WOL_FLAG
)
5776 static u32
bnx2x_get_msglevel(struct net_device
*dev
)
5778 struct bnx2x
*bp
= netdev_priv(dev
);
5780 return bp
->msglevel
;
5783 static void bnx2x_set_msglevel(struct net_device
*dev
, u32 level
)
5785 struct bnx2x
*bp
= netdev_priv(dev
);
5787 if (capable(CAP_NET_ADMIN
))
5788 bp
->msglevel
= level
;
5791 static int bnx2x_nway_reset(struct net_device
*dev
)
5793 struct bnx2x
*bp
= netdev_priv(dev
);
5795 if (bp
->state
!= BNX2X_STATE_OPEN
) {
5796 DP(NETIF_MSG_PROBE
, "state is %x, returning\n", bp
->state
);
5800 bnx2x_stop_stats(bp
);
5806 static int bnx2x_get_eeprom_len(struct net_device
*dev
)
5808 struct bnx2x
*bp
= netdev_priv(dev
);
5810 return bp
->flash_size
;
5813 static int bnx2x_acquire_nvram_lock(struct bnx2x
*bp
)
5815 int port
= bp
->port
;
5819 /* adjust timeout for emulation/FPGA */
5820 count
= NVRAM_TIMEOUT_COUNT
;
5821 if (CHIP_REV_IS_SLOW(bp
))
5824 /* request access to nvram interface */
5825 REG_WR(bp
, MCP_REG_MCPR_NVM_SW_ARB
,
5826 (MCPR_NVM_SW_ARB_ARB_REQ_SET1
<< port
));
5828 for (i
= 0; i
< count
*10; i
++) {
5829 val
= REG_RD(bp
, MCP_REG_MCPR_NVM_SW_ARB
);
5830 if (val
& (MCPR_NVM_SW_ARB_ARB_ARB1
<< port
))
5836 if (!(val
& (MCPR_NVM_SW_ARB_ARB_ARB1
<< port
))) {
5837 DP(NETIF_MSG_NVM
, "cannot get access to nvram interface\n");
5844 static int bnx2x_release_nvram_lock(struct bnx2x
*bp
)
5846 int port
= bp
->port
;
5850 /* adjust timeout for emulation/FPGA */
5851 count
= NVRAM_TIMEOUT_COUNT
;
5852 if (CHIP_REV_IS_SLOW(bp
))
5855 /* relinquish nvram interface */
5856 REG_WR(bp
, MCP_REG_MCPR_NVM_SW_ARB
,
5857 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1
<< port
));
5859 for (i
= 0; i
< count
*10; i
++) {
5860 val
= REG_RD(bp
, MCP_REG_MCPR_NVM_SW_ARB
);
5861 if (!(val
& (MCPR_NVM_SW_ARB_ARB_ARB1
<< port
)))
5867 if (val
& (MCPR_NVM_SW_ARB_ARB_ARB1
<< port
)) {
5868 DP(NETIF_MSG_NVM
, "cannot free access to nvram interface\n");
5875 static void bnx2x_enable_nvram_access(struct bnx2x
*bp
)
5879 val
= REG_RD(bp
, MCP_REG_MCPR_NVM_ACCESS_ENABLE
);
5881 /* enable both bits, even on read */
5882 REG_WR(bp
, MCP_REG_MCPR_NVM_ACCESS_ENABLE
,
5883 (val
| MCPR_NVM_ACCESS_ENABLE_EN
|
5884 MCPR_NVM_ACCESS_ENABLE_WR_EN
));
5887 static void bnx2x_disable_nvram_access(struct bnx2x
*bp
)
5891 val
= REG_RD(bp
, MCP_REG_MCPR_NVM_ACCESS_ENABLE
);
5893 /* disable both bits, even after read */
5894 REG_WR(bp
, MCP_REG_MCPR_NVM_ACCESS_ENABLE
,
5895 (val
& ~(MCPR_NVM_ACCESS_ENABLE_EN
|
5896 MCPR_NVM_ACCESS_ENABLE_WR_EN
)));
5899 static int bnx2x_nvram_read_dword(struct bnx2x
*bp
, u32 offset
, u32
*ret_val
,
5905 /* build the command word */
5906 cmd_flags
|= MCPR_NVM_COMMAND_DOIT
;
5908 /* need to clear DONE bit separately */
5909 REG_WR(bp
, MCP_REG_MCPR_NVM_COMMAND
, MCPR_NVM_COMMAND_DONE
);
5911 /* address of the NVRAM to read from */
5912 REG_WR(bp
, MCP_REG_MCPR_NVM_ADDR
,
5913 (offset
& MCPR_NVM_ADDR_NVM_ADDR_VALUE
));
5915 /* issue a read command */
5916 REG_WR(bp
, MCP_REG_MCPR_NVM_COMMAND
, cmd_flags
);
5918 /* adjust timeout for emulation/FPGA */
5919 count
= NVRAM_TIMEOUT_COUNT
;
5920 if (CHIP_REV_IS_SLOW(bp
))
5923 /* wait for completion */
5926 for (i
= 0; i
< count
; i
++) {
5928 val
= REG_RD(bp
, MCP_REG_MCPR_NVM_COMMAND
);
5930 if (val
& MCPR_NVM_COMMAND_DONE
) {
5931 val
= REG_RD(bp
, MCP_REG_MCPR_NVM_READ
);
5932 DP(NETIF_MSG_NVM
, "val 0x%08x\n", val
);
5933 /* we read nvram data in cpu order
5934 * but ethtool sees it as an array of bytes
5935 * converting to big-endian will do the work */
5936 val
= cpu_to_be32(val
);
5946 static int bnx2x_nvram_read(struct bnx2x
*bp
, u32 offset
, u8
*ret_buf
,
5953 if ((offset
& 0x03) || (buf_size
& 0x03) || (buf_size
== 0)) {
5955 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
5960 if (offset
+ buf_size
> bp
->flash_size
) {
5961 DP(NETIF_MSG_NVM
, "Invalid parameter: offset (0x%x) +"
5962 " buf_size (0x%x) > flash_size (0x%x)\n",
5963 offset
, buf_size
, bp
->flash_size
);
5967 /* request access to nvram interface */
5968 rc
= bnx2x_acquire_nvram_lock(bp
);
5972 /* enable access to nvram interface */
5973 bnx2x_enable_nvram_access(bp
);
5975 /* read the first word(s) */
5976 cmd_flags
= MCPR_NVM_COMMAND_FIRST
;
5977 while ((buf_size
> sizeof(u32
)) && (rc
== 0)) {
5978 rc
= bnx2x_nvram_read_dword(bp
, offset
, &val
, cmd_flags
);
5979 memcpy(ret_buf
, &val
, 4);
5981 /* advance to the next dword */
5982 offset
+= sizeof(u32
);
5983 ret_buf
+= sizeof(u32
);
5984 buf_size
-= sizeof(u32
);
5989 cmd_flags
|= MCPR_NVM_COMMAND_LAST
;
5990 rc
= bnx2x_nvram_read_dword(bp
, offset
, &val
, cmd_flags
);
5991 memcpy(ret_buf
, &val
, 4);
5994 /* disable access to nvram interface */
5995 bnx2x_disable_nvram_access(bp
);
5996 bnx2x_release_nvram_lock(bp
);
6001 static int bnx2x_get_eeprom(struct net_device
*dev
,
6002 struct ethtool_eeprom
*eeprom
, u8
*eebuf
)
6004 struct bnx2x
*bp
= netdev_priv(dev
);
6007 DP(NETIF_MSG_NVM
, "ethtool_eeprom: cmd %d\n"
6008 DP_LEVEL
" magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
6009 eeprom
->cmd
, eeprom
->magic
, eeprom
->offset
, eeprom
->offset
,
6010 eeprom
->len
, eeprom
->len
);
6012 /* parameters already validated in ethtool_get_eeprom */
6014 rc
= bnx2x_nvram_read(bp
, eeprom
->offset
, eebuf
, eeprom
->len
);
6019 static int bnx2x_nvram_write_dword(struct bnx2x
*bp
, u32 offset
, u32 val
,
6024 /* build the command word */
6025 cmd_flags
|= MCPR_NVM_COMMAND_DOIT
| MCPR_NVM_COMMAND_WR
;
6027 /* need to clear DONE bit separately */
6028 REG_WR(bp
, MCP_REG_MCPR_NVM_COMMAND
, MCPR_NVM_COMMAND_DONE
);
6030 /* write the data */
6031 REG_WR(bp
, MCP_REG_MCPR_NVM_WRITE
, val
);
6033 /* address of the NVRAM to write to */
6034 REG_WR(bp
, MCP_REG_MCPR_NVM_ADDR
,
6035 (offset
& MCPR_NVM_ADDR_NVM_ADDR_VALUE
));
6037 /* issue the write command */
6038 REG_WR(bp
, MCP_REG_MCPR_NVM_COMMAND
, cmd_flags
);
6040 /* adjust timeout for emulation/FPGA */
6041 count
= NVRAM_TIMEOUT_COUNT
;
6042 if (CHIP_REV_IS_SLOW(bp
))
6045 /* wait for completion */
6047 for (i
= 0; i
< count
; i
++) {
6049 val
= REG_RD(bp
, MCP_REG_MCPR_NVM_COMMAND
);
6050 if (val
& MCPR_NVM_COMMAND_DONE
) {
6059 #define BYTE_OFFSET(offset) (8 * (offset & 0x03))
6061 static int bnx2x_nvram_write1(struct bnx2x
*bp
, u32 offset
, u8
*data_buf
,
6069 if (offset
+ buf_size
> bp
->flash_size
) {
6070 DP(NETIF_MSG_NVM
, "Invalid parameter: offset (0x%x) +"
6071 " buf_size (0x%x) > flash_size (0x%x)\n",
6072 offset
, buf_size
, bp
->flash_size
);
6076 /* request access to nvram interface */
6077 rc
= bnx2x_acquire_nvram_lock(bp
);
6081 /* enable access to nvram interface */
6082 bnx2x_enable_nvram_access(bp
);
6084 cmd_flags
= (MCPR_NVM_COMMAND_FIRST
| MCPR_NVM_COMMAND_LAST
);
6085 align_offset
= (offset
& ~0x03);
6086 rc
= bnx2x_nvram_read_dword(bp
, align_offset
, &val
, cmd_flags
);
6089 val
&= ~(0xff << BYTE_OFFSET(offset
));
6090 val
|= (*data_buf
<< BYTE_OFFSET(offset
));
6092 /* nvram data is returned as an array of bytes
6093 * convert it back to cpu order */
6094 val
= be32_to_cpu(val
);
6096 DP(NETIF_MSG_NVM
, "val 0x%08x\n", val
);
6098 rc
= bnx2x_nvram_write_dword(bp
, align_offset
, val
,
6102 /* disable access to nvram interface */
6103 bnx2x_disable_nvram_access(bp
);
6104 bnx2x_release_nvram_lock(bp
);
6109 static int bnx2x_nvram_write(struct bnx2x
*bp
, u32 offset
, u8
*data_buf
,
6117 if (buf_size
== 1) { /* ethtool */
6118 return bnx2x_nvram_write1(bp
, offset
, data_buf
, buf_size
);
6121 if ((offset
& 0x03) || (buf_size
& 0x03) || (buf_size
== 0)) {
6123 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
6128 if (offset
+ buf_size
> bp
->flash_size
) {
6129 DP(NETIF_MSG_NVM
, "Invalid parameter: offset (0x%x) +"
6130 " buf_size (0x%x) > flash_size (0x%x)\n",
6131 offset
, buf_size
, bp
->flash_size
);
6135 /* request access to nvram interface */
6136 rc
= bnx2x_acquire_nvram_lock(bp
);
6140 /* enable access to nvram interface */
6141 bnx2x_enable_nvram_access(bp
);
6144 cmd_flags
= MCPR_NVM_COMMAND_FIRST
;
6145 while ((written_so_far
< buf_size
) && (rc
== 0)) {
6146 if (written_so_far
== (buf_size
- sizeof(u32
)))
6147 cmd_flags
|= MCPR_NVM_COMMAND_LAST
;
6148 else if (((offset
+ 4) % NVRAM_PAGE_SIZE
) == 0)
6149 cmd_flags
|= MCPR_NVM_COMMAND_LAST
;
6150 else if ((offset
% NVRAM_PAGE_SIZE
) == 0)
6151 cmd_flags
|= MCPR_NVM_COMMAND_FIRST
;
6153 memcpy(&val
, data_buf
, 4);
6154 DP(NETIF_MSG_NVM
, "val 0x%08x\n", val
);
6156 rc
= bnx2x_nvram_write_dword(bp
, offset
, val
, cmd_flags
);
6158 /* advance to the next dword */
6159 offset
+= sizeof(u32
);
6160 data_buf
+= sizeof(u32
);
6161 written_so_far
+= sizeof(u32
);
6165 /* disable access to nvram interface */
6166 bnx2x_disable_nvram_access(bp
);
6167 bnx2x_release_nvram_lock(bp
);
6172 static int bnx2x_set_eeprom(struct net_device
*dev
,
6173 struct ethtool_eeprom
*eeprom
, u8
*eebuf
)
6175 struct bnx2x
*bp
= netdev_priv(dev
);
6178 DP(NETIF_MSG_NVM
, "ethtool_eeprom: cmd %d\n"
6179 DP_LEVEL
" magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
6180 eeprom
->cmd
, eeprom
->magic
, eeprom
->offset
, eeprom
->offset
,
6181 eeprom
->len
, eeprom
->len
);
6183 /* parameters already validated in ethtool_set_eeprom */
6185 /* If the magic number is PHY (0x00504859) upgrade the PHY FW */
6186 if (eeprom
->magic
== 0x00504859) {
6188 bnx2x_phy_hw_lock(bp
);
6189 rc
= bnx2x_flash_download(bp
, bp
->port
,
6190 bp
->link_params
.ext_phy_config
,
6191 (bp
->state
!= BNX2X_STATE_CLOSED
),
6192 eebuf
, eeprom
->len
);
6193 rc
|= bnx2x_link_reset(&bp
->link_params
,
6195 rc
|= bnx2x_phy_init(&bp
->link_params
,
6197 bnx2x_phy_hw_unlock(bp
);
6200 rc
= bnx2x_nvram_write(bp
, eeprom
->offset
, eebuf
, eeprom
->len
);
6205 static int bnx2x_get_coalesce(struct net_device
*dev
,
6206 struct ethtool_coalesce
*coal
)
6208 struct bnx2x
*bp
= netdev_priv(dev
);
6210 memset(coal
, 0, sizeof(struct ethtool_coalesce
));
6212 coal
->rx_coalesce_usecs
= bp
->rx_ticks
;
6213 coal
->tx_coalesce_usecs
= bp
->tx_ticks
;
6214 coal
->stats_block_coalesce_usecs
= bp
->stats_ticks
;
6219 static int bnx2x_set_coalesce(struct net_device
*dev
,
6220 struct ethtool_coalesce
*coal
)
6222 struct bnx2x
*bp
= netdev_priv(dev
);
6224 bp
->rx_ticks
= (u16
) coal
->rx_coalesce_usecs
;
6225 if (bp
->rx_ticks
> 3000)
6226 bp
->rx_ticks
= 3000;
6228 bp
->tx_ticks
= (u16
) coal
->tx_coalesce_usecs
;
6229 if (bp
->tx_ticks
> 0x3000)
6230 bp
->tx_ticks
= 0x3000;
6232 bp
->stats_ticks
= coal
->stats_block_coalesce_usecs
;
6233 if (bp
->stats_ticks
> 0xffff00)
6234 bp
->stats_ticks
= 0xffff00;
6235 bp
->stats_ticks
&= 0xffff00;
6237 if (netif_running(bp
->dev
))
6238 bnx2x_update_coalesce(bp
);
6243 static void bnx2x_get_ringparam(struct net_device
*dev
,
6244 struct ethtool_ringparam
*ering
)
6246 struct bnx2x
*bp
= netdev_priv(dev
);
6248 ering
->rx_max_pending
= MAX_RX_AVAIL
;
6249 ering
->rx_mini_max_pending
= 0;
6250 ering
->rx_jumbo_max_pending
= 0;
6252 ering
->rx_pending
= bp
->rx_ring_size
;
6253 ering
->rx_mini_pending
= 0;
6254 ering
->rx_jumbo_pending
= 0;
6256 ering
->tx_max_pending
= MAX_TX_AVAIL
;
6257 ering
->tx_pending
= bp
->tx_ring_size
;
6260 static int bnx2x_set_ringparam(struct net_device
*dev
,
6261 struct ethtool_ringparam
*ering
)
6263 struct bnx2x
*bp
= netdev_priv(dev
);
6265 if ((ering
->rx_pending
> MAX_RX_AVAIL
) ||
6266 (ering
->tx_pending
> MAX_TX_AVAIL
) ||
6267 (ering
->tx_pending
<= MAX_SKB_FRAGS
+ 4))
6270 bp
->rx_ring_size
= ering
->rx_pending
;
6271 bp
->tx_ring_size
= ering
->tx_pending
;
6273 if (netif_running(bp
->dev
)) {
6274 bnx2x_nic_unload(bp
, 0);
6275 bnx2x_nic_load(bp
, 0);
6281 static void bnx2x_get_pauseparam(struct net_device
*dev
,
6282 struct ethtool_pauseparam
*epause
)
6284 struct bnx2x
*bp
= netdev_priv(dev
);
6286 epause
->autoneg
= (bp
->link_params
.req_flow_ctrl
== FLOW_CTRL_AUTO
) &&
6287 (bp
->link_params
.req_line_speed
== SPEED_AUTO_NEG
);
6289 epause
->rx_pause
= ((bp
->link_vars
.flow_ctrl
& FLOW_CTRL_RX
) ==
6291 epause
->tx_pause
= ((bp
->link_vars
.flow_ctrl
& FLOW_CTRL_TX
) ==
6294 DP(NETIF_MSG_LINK
, "ethtool_pauseparam: cmd %d\n"
6295 DP_LEVEL
" autoneg %d rx_pause %d tx_pause %d\n",
6296 epause
->cmd
, epause
->autoneg
, epause
->rx_pause
, epause
->tx_pause
);
6299 static int bnx2x_set_pauseparam(struct net_device
*dev
,
6300 struct ethtool_pauseparam
*epause
)
6302 struct bnx2x
*bp
= netdev_priv(dev
);
6304 DP(NETIF_MSG_LINK
, "ethtool_pauseparam: cmd %d\n"
6305 DP_LEVEL
" autoneg %d rx_pause %d tx_pause %d\n",
6306 epause
->cmd
, epause
->autoneg
, epause
->rx_pause
, epause
->tx_pause
);
6308 bp
->link_params
.req_flow_ctrl
= FLOW_CTRL_AUTO
;
6310 if (epause
->rx_pause
)
6311 bp
->link_params
.req_flow_ctrl
|= FLOW_CTRL_RX
;
6313 if (epause
->tx_pause
)
6314 bp
->link_params
.req_flow_ctrl
|= FLOW_CTRL_TX
;
6316 if (bp
->link_params
.req_flow_ctrl
== FLOW_CTRL_AUTO
)
6317 bp
->link_params
.req_flow_ctrl
= FLOW_CTRL_NONE
;
6319 if (epause
->autoneg
) {
6320 if (!(bp
->supported
& SUPPORTED_Autoneg
)) {
6321 DP(NETIF_MSG_LINK
, "Autoneg not supported\n");
6325 if (bp
->link_params
.req_line_speed
== SPEED_AUTO_NEG
)
6326 bp
->link_params
.req_flow_ctrl
= FLOW_CTRL_AUTO
;
6330 "req_flow_ctrl 0x%x\n", bp
->link_params
.req_flow_ctrl
);
6331 bnx2x_stop_stats(bp
);
6337 static u32
bnx2x_get_rx_csum(struct net_device
*dev
)
6339 struct bnx2x
*bp
= netdev_priv(dev
);
6344 static int bnx2x_set_rx_csum(struct net_device
*dev
, u32 data
)
6346 struct bnx2x
*bp
= netdev_priv(dev
);
6352 static int bnx2x_set_tso(struct net_device
*dev
, u32 data
)
6355 dev
->features
|= (NETIF_F_TSO
| NETIF_F_TSO_ECN
);
6357 dev
->features
&= ~(NETIF_F_TSO
| NETIF_F_TSO_ECN
);
6362 char string
[ETH_GSTRING_LEN
];
6363 } bnx2x_tests_str_arr
[BNX2X_NUM_TESTS
] = {
6364 { "MC Errors (online)" }
6367 static int bnx2x_self_test_count(struct net_device
*dev
)
6369 return BNX2X_NUM_TESTS
;
6372 static void bnx2x_self_test(struct net_device
*dev
,
6373 struct ethtool_test
*etest
, u64
*buf
)
6375 struct bnx2x
*bp
= netdev_priv(dev
);
6378 memset(buf
, 0, sizeof(u64
) * BNX2X_NUM_TESTS
);
6380 if (bp
->state
!= BNX2X_STATE_OPEN
) {
6381 DP(NETIF_MSG_PROBE
, "state is %x, returning\n", bp
->state
);
6385 stats_state
= bp
->stats_state
;
6386 bnx2x_stop_stats(bp
);
6388 if (bnx2x_mc_assert(bp
) != 0) {
6390 etest
->flags
|= ETH_TEST_FL_FAILED
;
6393 #ifdef BNX2X_EXTRA_DEBUG
6394 bnx2x_panic_dump(bp
);
6396 bp
->stats_state
= stats_state
;
6400 char string
[ETH_GSTRING_LEN
];
6401 } bnx2x_stats_str_arr
[BNX2X_NUM_STATS
] = {
6403 { "rx_error_bytes"},
6405 { "tx_error_bytes"},
6406 { "rx_ucast_packets"},
6407 { "rx_mcast_packets"},
6408 { "rx_bcast_packets"},
6409 { "tx_ucast_packets"},
6410 { "tx_mcast_packets"},
6411 { "tx_bcast_packets"},
6412 { "tx_mac_errors"}, /* 10 */
6413 { "tx_carrier_errors"},
6415 { "rx_align_errors"},
6416 { "tx_single_collisions"},
6417 { "tx_multi_collisions"},
6419 { "tx_excess_collisions"},
6420 { "tx_late_collisions"},
6421 { "tx_total_collisions"},
6422 { "rx_fragments"}, /* 20 */
6424 { "rx_undersize_packets"},
6425 { "rx_oversize_packets"},
6427 { "rx_xoff_frames"},
6429 { "tx_xoff_frames"},
6430 { "rx_mac_ctrl_frames"},
6431 { "rx_filtered_packets"},
6432 { "rx_discards"}, /* 30 */
6438 #define STATS_OFFSET32(offset_name) \
6439 (offsetof(struct bnx2x_eth_stats, offset_name) / 4)
6441 static unsigned long bnx2x_stats_offset_arr
[BNX2X_NUM_STATS
] = {
6442 STATS_OFFSET32(total_bytes_received_hi
),
6443 STATS_OFFSET32(stat_IfHCInBadOctets_hi
),
6444 STATS_OFFSET32(total_bytes_transmitted_hi
),
6445 STATS_OFFSET32(stat_IfHCOutBadOctets_hi
),
6446 STATS_OFFSET32(total_unicast_packets_received_hi
),
6447 STATS_OFFSET32(total_multicast_packets_received_hi
),
6448 STATS_OFFSET32(total_broadcast_packets_received_hi
),
6449 STATS_OFFSET32(total_unicast_packets_transmitted_hi
),
6450 STATS_OFFSET32(total_multicast_packets_transmitted_hi
),
6451 STATS_OFFSET32(total_broadcast_packets_transmitted_hi
),
6452 STATS_OFFSET32(stat_Dot3statsInternalMacTransmitErrors
), /* 10 */
6453 STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors
),
6454 STATS_OFFSET32(crc_receive_errors
),
6455 STATS_OFFSET32(alignment_errors
),
6456 STATS_OFFSET32(single_collision_transmit_frames
),
6457 STATS_OFFSET32(multiple_collision_transmit_frames
),
6458 STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions
),
6459 STATS_OFFSET32(excessive_collision_frames
),
6460 STATS_OFFSET32(late_collision_frames
),
6461 STATS_OFFSET32(number_of_bugs_found_in_stats_spec
),
6462 STATS_OFFSET32(runt_packets_received
), /* 20 */
6463 STATS_OFFSET32(jabber_packets_received
),
6464 STATS_OFFSET32(error_runt_packets_received
),
6465 STATS_OFFSET32(error_jabber_packets_received
),
6466 STATS_OFFSET32(pause_xon_frames_received
),
6467 STATS_OFFSET32(pause_xoff_frames_received
),
6468 STATS_OFFSET32(pause_xon_frames_transmitted
),
6469 STATS_OFFSET32(pause_xoff_frames_transmitted
),
6470 STATS_OFFSET32(control_frames_received
),
6471 STATS_OFFSET32(mac_filter_discard
),
6472 STATS_OFFSET32(no_buff_discard
), /* 30 */
6473 STATS_OFFSET32(brb_discard
),
6474 STATS_OFFSET32(brb_truncate_discard
),
6475 STATS_OFFSET32(xxoverflow_discard
)
6478 static u8 bnx2x_stats_len_arr
[BNX2X_NUM_STATS
] = {
6479 8, 0, 8, 0, 8, 8, 8, 8, 8, 8,
6480 4, 0, 4, 4, 4, 4, 4, 4, 4, 4,
6481 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
6485 static void bnx2x_get_strings(struct net_device
*dev
, u32 stringset
, u8
*buf
)
6487 switch (stringset
) {
6489 memcpy(buf
, bnx2x_stats_str_arr
, sizeof(bnx2x_stats_str_arr
));
6493 memcpy(buf
, bnx2x_tests_str_arr
, sizeof(bnx2x_tests_str_arr
));
6498 static int bnx2x_get_stats_count(struct net_device
*dev
)
6500 return BNX2X_NUM_STATS
;
6503 static void bnx2x_get_ethtool_stats(struct net_device
*dev
,
6504 struct ethtool_stats
*stats
, u64
*buf
)
6506 struct bnx2x
*bp
= netdev_priv(dev
);
6507 u32
*hw_stats
= (u32
*)bnx2x_sp_check(bp
, eth_stats
);
6510 for (i
= 0; i
< BNX2X_NUM_STATS
; i
++) {
6511 if (bnx2x_stats_len_arr
[i
] == 0) {
6512 /* skip this counter */
6520 if (bnx2x_stats_len_arr
[i
] == 4) {
6521 /* 4-byte counter */
6522 buf
[i
] = (u64
) *(hw_stats
+ bnx2x_stats_offset_arr
[i
]);
6525 /* 8-byte counter */
6526 buf
[i
] = HILO_U64(*(hw_stats
+ bnx2x_stats_offset_arr
[i
]),
6527 *(hw_stats
+ bnx2x_stats_offset_arr
[i
] + 1));
6531 static int bnx2x_phys_id(struct net_device
*dev
, u32 data
)
6533 struct bnx2x
*bp
= netdev_priv(dev
);
6539 for (i
= 0; i
< (data
* 2); i
++) {
6541 bnx2x_set_led(bp
, bp
->port
, LED_MODE_OPER
, SPEED_1000
,
6542 bp
->link_params
.hw_led_mode
,
6543 bp
->link_params
.chip_id
);
6545 bnx2x_set_led(bp
, bp
->port
, LED_MODE_OFF
, 0,
6546 bp
->link_params
.hw_led_mode
,
6547 bp
->link_params
.chip_id
);
6549 msleep_interruptible(500);
6550 if (signal_pending(current
))
6554 if (bp
->link_vars
.link_up
)
6555 bnx2x_set_led(bp
, bp
->port
, LED_MODE_OPER
,
6556 bp
->link_vars
.line_speed
,
6557 bp
->link_params
.hw_led_mode
,
6558 bp
->link_params
.chip_id
);
6563 static struct ethtool_ops bnx2x_ethtool_ops
= {
6564 .get_settings
= bnx2x_get_settings
,
6565 .set_settings
= bnx2x_set_settings
,
6566 .get_drvinfo
= bnx2x_get_drvinfo
,
6567 .get_wol
= bnx2x_get_wol
,
6568 .set_wol
= bnx2x_set_wol
,
6569 .get_msglevel
= bnx2x_get_msglevel
,
6570 .set_msglevel
= bnx2x_set_msglevel
,
6571 .nway_reset
= bnx2x_nway_reset
,
6572 .get_link
= ethtool_op_get_link
,
6573 .get_eeprom_len
= bnx2x_get_eeprom_len
,
6574 .get_eeprom
= bnx2x_get_eeprom
,
6575 .set_eeprom
= bnx2x_set_eeprom
,
6576 .get_coalesce
= bnx2x_get_coalesce
,
6577 .set_coalesce
= bnx2x_set_coalesce
,
6578 .get_ringparam
= bnx2x_get_ringparam
,
6579 .set_ringparam
= bnx2x_set_ringparam
,
6580 .get_pauseparam
= bnx2x_get_pauseparam
,
6581 .set_pauseparam
= bnx2x_set_pauseparam
,
6582 .get_rx_csum
= bnx2x_get_rx_csum
,
6583 .set_rx_csum
= bnx2x_set_rx_csum
,
6584 .get_tx_csum
= ethtool_op_get_tx_csum
,
6585 .set_tx_csum
= ethtool_op_set_tx_csum
,
6586 .get_sg
= ethtool_op_get_sg
,
6587 .set_sg
= ethtool_op_set_sg
,
6588 .get_tso
= ethtool_op_get_tso
,
6589 .set_tso
= bnx2x_set_tso
,
6590 .self_test_count
= bnx2x_self_test_count
,
6591 .self_test
= bnx2x_self_test
,
6592 .get_strings
= bnx2x_get_strings
,
6593 .phys_id
= bnx2x_phys_id
,
6594 .get_stats_count
= bnx2x_get_stats_count
,
6595 .get_ethtool_stats
= bnx2x_get_ethtool_stats
6598 /* end of ethtool_ops */
6600 /****************************************************************************
6601 * General service functions
6602 ****************************************************************************/
6604 static int bnx2x_set_power_state(struct bnx2x
*bp
, pci_power_t state
)
6608 pci_read_config_word(bp
->pdev
, bp
->pm_cap
+ PCI_PM_CTRL
, &pmcsr
);
6612 pci_write_config_word(bp
->pdev
,
6613 bp
->pm_cap
+ PCI_PM_CTRL
,
6614 ((pmcsr
& ~PCI_PM_CTRL_STATE_MASK
) |
6615 PCI_PM_CTRL_PME_STATUS
));
6617 if (pmcsr
& PCI_PM_CTRL_STATE_MASK
)
6618 /* delay required during transition out of D3hot */
6623 pmcsr
&= ~PCI_PM_CTRL_STATE_MASK
;
6627 pmcsr
|= PCI_PM_CTRL_PME_ENABLE
;
6629 pci_write_config_word(bp
->pdev
, bp
->pm_cap
+ PCI_PM_CTRL
,
6632 /* No more memory access after this point until
6633 * device is brought back to D0.
6644 * net_device service functions
6647 /* called with netif_tx_lock from set_multicast */
6648 static void bnx2x_set_rx_mode(struct net_device
*dev
)
6650 struct bnx2x
*bp
= netdev_priv(dev
);
6651 u32 rx_mode
= BNX2X_RX_MODE_NORMAL
;
6653 DP(NETIF_MSG_IFUP
, "called dev->flags = %x\n", dev
->flags
);
6655 if (dev
->flags
& IFF_PROMISC
)
6656 rx_mode
= BNX2X_RX_MODE_PROMISC
;
6658 else if ((dev
->flags
& IFF_ALLMULTI
) ||
6659 (dev
->mc_count
> BNX2X_MAX_MULTICAST
))
6660 rx_mode
= BNX2X_RX_MODE_ALLMULTI
;
6662 else { /* some multicasts */
6664 struct dev_mc_list
*mclist
;
6665 struct mac_configuration_cmd
*config
=
6666 bnx2x_sp(bp
, mcast_config
);
6668 for (i
= 0, mclist
= dev
->mc_list
;
6669 mclist
&& (i
< dev
->mc_count
);
6670 i
++, mclist
= mclist
->next
) {
6672 config
->config_table
[i
].cam_entry
.msb_mac_addr
=
6673 swab16(*(u16
*)&mclist
->dmi_addr
[0]);
6674 config
->config_table
[i
].cam_entry
.middle_mac_addr
=
6675 swab16(*(u16
*)&mclist
->dmi_addr
[2]);
6676 config
->config_table
[i
].cam_entry
.lsb_mac_addr
=
6677 swab16(*(u16
*)&mclist
->dmi_addr
[4]);
6678 config
->config_table
[i
].cam_entry
.flags
=
6679 cpu_to_le16(bp
->port
);
6680 config
->config_table
[i
].target_table_entry
.flags
= 0;
6681 config
->config_table
[i
].target_table_entry
.
6683 config
->config_table
[i
].target_table_entry
.
6687 "setting MCAST[%d] (%04x:%04x:%04x)\n",
6688 i
, config
->config_table
[i
].cam_entry
.msb_mac_addr
,
6689 config
->config_table
[i
].cam_entry
.middle_mac_addr
,
6690 config
->config_table
[i
].cam_entry
.lsb_mac_addr
);
6692 old
= config
->hdr
.length_6b
;
6694 for (; i
< old
; i
++) {
6695 if (CAM_IS_INVALID(config
->config_table
[i
])) {
6696 i
--; /* already invalidated */
6700 CAM_INVALIDATE(config
->config_table
[i
]);
6704 if (CHIP_REV_IS_SLOW(bp
))
6705 offset
= BNX2X_MAX_EMUL_MULTI
*(1 + bp
->port
);
6707 offset
= BNX2X_MAX_MULTICAST
*(1 + bp
->port
);
6709 config
->hdr
.length_6b
= i
;
6710 config
->hdr
.offset
= offset
;
6711 config
->hdr
.reserved0
= 0;
6712 config
->hdr
.reserved1
= 0;
6714 bnx2x_sp_post(bp
, RAMROD_CMD_ID_ETH_SET_MAC
, 0,
6715 U64_HI(bnx2x_sp_mapping(bp
, mcast_config
)),
6716 U64_LO(bnx2x_sp_mapping(bp
, mcast_config
)), 0);
6719 bp
->rx_mode
= rx_mode
;
6720 bnx2x_set_storm_rx_mode(bp
);
6723 static int bnx2x_poll(struct napi_struct
*napi
, int budget
)
6725 struct bnx2x_fastpath
*fp
= container_of(napi
, struct bnx2x_fastpath
,
6727 struct bnx2x
*bp
= fp
->bp
;
6730 #ifdef BNX2X_STOP_ON_ERROR
6731 if (unlikely(bp
->panic
))
6735 prefetch(fp
->tx_buf_ring
[TX_BD(fp
->tx_pkt_cons
)].skb
);
6736 prefetch(fp
->rx_buf_ring
[RX_BD(fp
->rx_bd_cons
)].skb
);
6737 prefetch((char *)(fp
->rx_buf_ring
[RX_BD(fp
->rx_bd_cons
)].skb
) + 256);
6739 bnx2x_update_fpsb_idx(fp
);
6741 if (le16_to_cpu(*fp
->tx_cons_sb
) != fp
->tx_pkt_cons
)
6742 bnx2x_tx_int(fp
, budget
);
6745 if (le16_to_cpu(*fp
->rx_cons_sb
) != fp
->rx_comp_cons
)
6746 work_done
= bnx2x_rx_int(fp
, budget
);
6749 rmb(); /* bnx2x_has_work() reads the status block */
6751 /* must not complete if we consumed full budget */
6752 if ((work_done
< budget
) && !bnx2x_has_work(fp
)) {
6754 #ifdef BNX2X_STOP_ON_ERROR
6757 netif_rx_complete(bp
->dev
, napi
);
6759 bnx2x_ack_sb(bp
, fp
->index
, USTORM_ID
,
6760 le16_to_cpu(fp
->fp_u_idx
), IGU_INT_NOP
, 1);
6761 bnx2x_ack_sb(bp
, fp
->index
, CSTORM_ID
,
6762 le16_to_cpu(fp
->fp_c_idx
), IGU_INT_ENABLE
, 1);
6768 /* Called with netif_tx_lock.
6769 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
6770 * netif_wake_queue().
6772 static int bnx2x_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
6774 struct bnx2x
*bp
= netdev_priv(dev
);
6775 struct bnx2x_fastpath
*fp
;
6776 struct sw_tx_bd
*tx_buf
;
6777 struct eth_tx_bd
*tx_bd
;
6778 struct eth_tx_parse_bd
*pbd
= NULL
;
6779 u16 pkt_prod
, bd_prod
;
6780 int nbd
, fp_index
= 0;
6783 #ifdef BNX2X_STOP_ON_ERROR
6784 if (unlikely(bp
->panic
))
6785 return NETDEV_TX_BUSY
;
6788 fp_index
= smp_processor_id() % (bp
->num_queues
);
6790 fp
= &bp
->fp
[fp_index
];
6791 if (unlikely(bnx2x_tx_avail(bp
->fp
) <
6792 (skb_shinfo(skb
)->nr_frags
+ 3))) {
6793 bp
->slowpath
->eth_stats
.driver_xoff
++,
6794 netif_stop_queue(dev
);
6795 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
6796 return NETDEV_TX_BUSY
;
6800 This is a bit ugly. First we use one BD which we mark as start,
6801 then for TSO or xsum we have a parsing info BD,
6802 and only then we have the rest of the TSO bds.
6803 (don't forget to mark the last one as last,
6804 and to unmap only AFTER you write to the BD ...)
6805 I would like to thank DovH for this mess.
6808 pkt_prod
= fp
->tx_pkt_prod
++;
6809 bd_prod
= fp
->tx_bd_prod
;
6810 bd_prod
= TX_BD(bd_prod
);
6812 /* get a tx_buff and first bd */
6813 tx_buf
= &fp
->tx_buf_ring
[TX_BD(pkt_prod
)];
6814 tx_bd
= &fp
->tx_desc_ring
[bd_prod
];
6816 tx_bd
->bd_flags
.as_bitfield
= ETH_TX_BD_FLAGS_START_BD
;
6817 tx_bd
->general_data
= (UNICAST_ADDRESS
<<
6818 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT
);
6819 tx_bd
->general_data
|= 1; /* header nbd */
6821 /* remember the first bd of the packet */
6822 tx_buf
->first_bd
= bd_prod
;
6824 DP(NETIF_MSG_TX_QUEUED
,
6825 "sending pkt %u @%p next_idx %u bd %u @%p\n",
6826 pkt_prod
, tx_buf
, fp
->tx_pkt_prod
, bd_prod
, tx_bd
);
6828 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
6829 struct iphdr
*iph
= ip_hdr(skb
);
6832 tx_bd
->bd_flags
.as_bitfield
|= ETH_TX_BD_FLAGS_IP_CSUM
;
6834 /* turn on parsing and get a bd */
6835 bd_prod
= TX_BD(NEXT_TX_IDX(bd_prod
));
6836 pbd
= (void *)&fp
->tx_desc_ring
[bd_prod
];
6837 len
= ((u8
*)iph
- (u8
*)skb
->data
) / 2;
6839 /* for now NS flag is not used in Linux */
6840 pbd
->global_data
= (len
|
6841 ((skb
->protocol
== ntohs(ETH_P_8021Q
)) <<
6842 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT
));
6843 pbd
->ip_hlen
= ip_hdrlen(skb
) / 2;
6844 pbd
->total_hlen
= cpu_to_le16(len
+ pbd
->ip_hlen
);
6845 if (iph
->protocol
== IPPROTO_TCP
) {
6846 struct tcphdr
*th
= tcp_hdr(skb
);
6848 tx_bd
->bd_flags
.as_bitfield
|=
6849 ETH_TX_BD_FLAGS_TCP_CSUM
;
6850 pbd
->tcp_flags
= pbd_tcp_flags(skb
);
6851 pbd
->total_hlen
+= cpu_to_le16(tcp_hdrlen(skb
) / 2);
6852 pbd
->tcp_pseudo_csum
= swab16(th
->check
);
6854 } else if (iph
->protocol
== IPPROTO_UDP
) {
6855 struct udphdr
*uh
= udp_hdr(skb
);
6857 tx_bd
->bd_flags
.as_bitfield
|=
6858 ETH_TX_BD_FLAGS_TCP_CSUM
;
6859 pbd
->total_hlen
+= cpu_to_le16(4);
6860 pbd
->global_data
|= ETH_TX_PARSE_BD_CS_ANY_FLG
;
6861 pbd
->cs_offset
= 5; /* 10 >> 1 */
6862 pbd
->tcp_pseudo_csum
= 0;
6863 /* HW bug: we need to subtract 10 bytes before the
6864 * UDP header from the csum
6866 uh
->check
= (u16
) ~csum_fold(csum_sub(uh
->check
,
6867 csum_partial(((u8
*)(uh
)-10), 10, 0)));
6871 if ((bp
->vlgrp
!= NULL
) && vlan_tx_tag_present(skb
)) {
6872 tx_bd
->vlan
= cpu_to_le16(vlan_tx_tag_get(skb
));
6873 tx_bd
->bd_flags
.as_bitfield
|= ETH_TX_BD_FLAGS_VLAN_TAG
;
6875 tx_bd
->vlan
= cpu_to_le16(pkt_prod
);
6878 mapping
= pci_map_single(bp
->pdev
, skb
->data
,
6879 skb
->len
, PCI_DMA_TODEVICE
);
6881 tx_bd
->addr_hi
= cpu_to_le32(U64_HI(mapping
));
6882 tx_bd
->addr_lo
= cpu_to_le32(U64_LO(mapping
));
6883 nbd
= skb_shinfo(skb
)->nr_frags
+ ((pbd
== NULL
)? 1 : 2);
6884 tx_bd
->nbd
= cpu_to_le16(nbd
);
6885 tx_bd
->nbytes
= cpu_to_le16(skb_headlen(skb
));
6887 DP(NETIF_MSG_TX_QUEUED
, "first bd @%p addr (%x:%x) nbd %d"
6888 " nbytes %d flags %x vlan %u\n",
6889 tx_bd
, tx_bd
->addr_hi
, tx_bd
->addr_lo
, tx_bd
->nbd
,
6890 tx_bd
->nbytes
, tx_bd
->bd_flags
.as_bitfield
, tx_bd
->vlan
);
6892 if (skb_shinfo(skb
)->gso_size
&&
6893 (skb
->len
> (bp
->dev
->mtu
+ ETH_HLEN
))) {
6894 int hlen
= 2 * le16_to_cpu(pbd
->total_hlen
);
6896 DP(NETIF_MSG_TX_QUEUED
,
6897 "TSO packet len %d hlen %d total len %d tso size %d\n",
6898 skb
->len
, hlen
, skb_headlen(skb
),
6899 skb_shinfo(skb
)->gso_size
);
6901 tx_bd
->bd_flags
.as_bitfield
|= ETH_TX_BD_FLAGS_SW_LSO
;
6903 if (tx_bd
->nbytes
> cpu_to_le16(hlen
)) {
6904 /* we split the first bd into headers and data bds
6905 * to ease the pain of our fellow micocode engineers
6906 * we use one mapping for both bds
6907 * So far this has only been observed to happen
6908 * in Other Operating Systems(TM)
6911 /* first fix first bd */
6913 tx_bd
->nbd
= cpu_to_le16(nbd
);
6914 tx_bd
->nbytes
= cpu_to_le16(hlen
);
6916 /* we only print this as an error
6917 * because we don't think this will ever happen.
6919 BNX2X_ERR("TSO split header size is %d (%x:%x)"
6920 " nbd %d\n", tx_bd
->nbytes
, tx_bd
->addr_hi
,
6921 tx_bd
->addr_lo
, tx_bd
->nbd
);
6923 /* now get a new data bd
6924 * (after the pbd) and fill it */
6925 bd_prod
= TX_BD(NEXT_TX_IDX(bd_prod
));
6926 tx_bd
= &fp
->tx_desc_ring
[bd_prod
];
6928 tx_bd
->addr_hi
= cpu_to_le32(U64_HI(mapping
));
6929 tx_bd
->addr_lo
= cpu_to_le32(U64_LO(mapping
) + hlen
);
6930 tx_bd
->nbytes
= cpu_to_le16(skb_headlen(skb
) - hlen
);
6931 tx_bd
->vlan
= cpu_to_le16(pkt_prod
);
6932 /* this marks the bd
6933 * as one that has no individual mapping
6934 * the FW ignores this flag in a bd not marked start
6936 tx_bd
->bd_flags
.as_bitfield
= ETH_TX_BD_FLAGS_SW_LSO
;
6937 DP(NETIF_MSG_TX_QUEUED
,
6938 "TSO split data size is %d (%x:%x)\n",
6939 tx_bd
->nbytes
, tx_bd
->addr_hi
, tx_bd
->addr_lo
);
6943 /* supposed to be unreached
6944 * (and therefore not handled properly...)
6946 BNX2X_ERR("LSO with no PBD\n");
6950 pbd
->lso_mss
= cpu_to_le16(skb_shinfo(skb
)->gso_size
);
6951 pbd
->tcp_send_seq
= swab32(tcp_hdr(skb
)->seq
);
6952 pbd
->ip_id
= swab16(ip_hdr(skb
)->id
);
6953 pbd
->tcp_pseudo_csum
=
6954 swab16(~csum_tcpudp_magic(ip_hdr(skb
)->saddr
,
6956 0, IPPROTO_TCP
, 0));
6957 pbd
->global_data
|= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN
;
6963 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
6964 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
6966 bd_prod
= TX_BD(NEXT_TX_IDX(bd_prod
));
6967 tx_bd
= &fp
->tx_desc_ring
[bd_prod
];
6969 mapping
= pci_map_page(bp
->pdev
, frag
->page
,
6971 frag
->size
, PCI_DMA_TODEVICE
);
6973 tx_bd
->addr_hi
= cpu_to_le32(U64_HI(mapping
));
6974 tx_bd
->addr_lo
= cpu_to_le32(U64_LO(mapping
));
6975 tx_bd
->nbytes
= cpu_to_le16(frag
->size
);
6976 tx_bd
->vlan
= cpu_to_le16(pkt_prod
);
6977 tx_bd
->bd_flags
.as_bitfield
= 0;
6978 DP(NETIF_MSG_TX_QUEUED
, "frag %d bd @%p"
6979 " addr (%x:%x) nbytes %d flags %x\n",
6980 i
, tx_bd
, tx_bd
->addr_hi
, tx_bd
->addr_lo
,
6981 tx_bd
->nbytes
, tx_bd
->bd_flags
.as_bitfield
);
6985 /* now at last mark the bd as the last bd */
6986 tx_bd
->bd_flags
.as_bitfield
|= ETH_TX_BD_FLAGS_END_BD
;
6988 DP(NETIF_MSG_TX_QUEUED
, "last bd @%p flags %x\n",
6989 tx_bd
, tx_bd
->bd_flags
.as_bitfield
);
6993 bd_prod
= TX_BD(NEXT_TX_IDX(bd_prod
));
6995 /* now send a tx doorbell, counting the next bd
6996 * if the packet contains or ends with it
6998 if (TX_BD_POFF(bd_prod
) < nbd
)
7002 DP(NETIF_MSG_TX_QUEUED
,
7003 "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
7004 " tcp_flags %x xsum %x seq %u hlen %u\n",
7005 pbd
, pbd
->global_data
, pbd
->ip_hlen
, pbd
->ip_id
,
7006 pbd
->lso_mss
, pbd
->tcp_flags
, pbd
->tcp_pseudo_csum
,
7007 pbd
->tcp_send_seq
, pbd
->total_hlen
);
7009 DP(NETIF_MSG_TX_QUEUED
, "doorbell: nbd %u bd %d\n", nbd
, bd_prod
);
7011 fp
->hw_tx_prods
->bds_prod
=
7012 cpu_to_le16(le16_to_cpu(fp
->hw_tx_prods
->bds_prod
) + nbd
);
7013 mb(); /* FW restriction: must not reorder writing nbd and packets */
7014 fp
->hw_tx_prods
->packets_prod
=
7015 cpu_to_le32(le32_to_cpu(fp
->hw_tx_prods
->packets_prod
) + 1);
7016 DOORBELL(bp
, fp_index
, 0);
7020 fp
->tx_bd_prod
= bd_prod
;
7021 dev
->trans_start
= jiffies
;
7023 if (unlikely(bnx2x_tx_avail(fp
) < MAX_SKB_FRAGS
+ 3)) {
7024 netif_stop_queue(dev
);
7025 bp
->slowpath
->eth_stats
.driver_xoff
++;
7026 if (bnx2x_tx_avail(fp
) >= MAX_SKB_FRAGS
+ 3)
7027 netif_wake_queue(dev
);
7031 return NETDEV_TX_OK
;
7034 /* Called with rtnl_lock */
7035 static int bnx2x_open(struct net_device
*dev
)
7037 struct bnx2x
*bp
= netdev_priv(dev
);
7039 bnx2x_set_power_state(bp
, PCI_D0
);
7041 return bnx2x_nic_load(bp
, 1);
7044 /* Called with rtnl_lock */
7045 static int bnx2x_close(struct net_device
*dev
)
7047 struct bnx2x
*bp
= netdev_priv(dev
);
7049 /* Unload the driver, release IRQs */
7050 bnx2x_nic_unload(bp
, 1);
7052 if (!CHIP_REV_IS_SLOW(bp
))
7053 bnx2x_set_power_state(bp
, PCI_D3hot
);
7058 /* Called with rtnl_lock */
7059 static int bnx2x_change_mac_addr(struct net_device
*dev
, void *p
)
7061 struct sockaddr
*addr
= p
;
7062 struct bnx2x
*bp
= netdev_priv(dev
);
7064 if (!is_valid_ether_addr(addr
->sa_data
))
7067 memcpy(dev
->dev_addr
, addr
->sa_data
, dev
->addr_len
);
7068 if (netif_running(dev
))
7069 bnx2x_set_mac_addr(bp
);
7074 /* called with rtnl_lock */
7075 static int bnx2x_ioctl(struct net_device
*dev
, struct ifreq
*ifr
, int cmd
)
7077 struct mii_ioctl_data
*data
= if_mii(ifr
);
7078 struct bnx2x
*bp
= netdev_priv(dev
);
7083 data
->phy_id
= bp
->phy_addr
;
7090 if (!netif_running(dev
))
7093 mutex_lock(&bp
->phy_mutex
);
7094 err
= bnx2x_cl45_read(bp
, bp
->port
, 0, bp
->phy_addr
,
7095 DEFAULT_PHY_DEV_ADDR
,
7096 (data
->reg_num
& 0x1f), &mii_regval
);
7097 data
->val_out
= mii_regval
;
7098 mutex_unlock(&bp
->phy_mutex
);
7103 if (!capable(CAP_NET_ADMIN
))
7106 if (!netif_running(dev
))
7109 mutex_lock(&bp
->phy_mutex
);
7110 err
= bnx2x_cl45_write(bp
, bp
->port
, 0, bp
->phy_addr
,
7111 DEFAULT_PHY_DEV_ADDR
,
7112 (data
->reg_num
& 0x1f), data
->val_in
);
7113 mutex_unlock(&bp
->phy_mutex
);
7124 /* Called with rtnl_lock */
7125 static int bnx2x_change_mtu(struct net_device
*dev
, int new_mtu
)
7127 struct bnx2x
*bp
= netdev_priv(dev
);
7129 if ((new_mtu
> ETH_MAX_JUMBO_PACKET_SIZE
) ||
7130 ((new_mtu
+ ETH_HLEN
) < ETH_MIN_PACKET_SIZE
))
7133 /* This does not race with packet allocation
7134 * because the actual alloc size is
7135 * only updated as part of load
7139 if (netif_running(dev
)) {
7140 bnx2x_nic_unload(bp
, 0);
7141 bnx2x_nic_load(bp
, 0);
7146 static void bnx2x_tx_timeout(struct net_device
*dev
)
7148 struct bnx2x
*bp
= netdev_priv(dev
);
7150 #ifdef BNX2X_STOP_ON_ERROR
7154 /* This allows the netif to be shutdown gracefully before resetting */
7155 schedule_work(&bp
->reset_task
);
7159 /* Called with rtnl_lock */
7160 static void bnx2x_vlan_rx_register(struct net_device
*dev
,
7161 struct vlan_group
*vlgrp
)
7163 struct bnx2x
*bp
= netdev_priv(dev
);
7166 if (netif_running(dev
))
7167 bnx2x_set_client_config(bp
);
7171 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
7172 static void poll_bnx2x(struct net_device
*dev
)
7174 struct bnx2x
*bp
= netdev_priv(dev
);
7176 disable_irq(bp
->pdev
->irq
);
7177 bnx2x_interrupt(bp
->pdev
->irq
, dev
);
7178 enable_irq(bp
->pdev
->irq
);
7182 static void bnx2x_reset_task(struct work_struct
*work
)
7184 struct bnx2x
*bp
= container_of(work
, struct bnx2x
, reset_task
);
7186 #ifdef BNX2X_STOP_ON_ERROR
7187 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
7188 " so reset not done to allow debug dump,\n"
7189 KERN_ERR
" you will need to reboot when done\n");
7193 if (!netif_running(bp
->dev
))
7198 if (bp
->state
!= BNX2X_STATE_OPEN
) {
7199 DP(NETIF_MSG_TX_ERR
, "state is %x, returning\n", bp
->state
);
7200 goto reset_task_exit
;
7203 bnx2x_nic_unload(bp
, 0);
7204 bnx2x_nic_load(bp
, 0);
7210 static int __devinit
bnx2x_init_board(struct pci_dev
*pdev
,
7211 struct net_device
*dev
)
7216 SET_NETDEV_DEV(dev
, &pdev
->dev
);
7217 bp
= netdev_priv(dev
);
7220 bp
->port
= PCI_FUNC(pdev
->devfn
);
7222 rc
= pci_enable_device(pdev
);
7224 printk(KERN_ERR PFX
"Cannot enable PCI device, aborting\n");
7228 if (!(pci_resource_flags(pdev
, 0) & IORESOURCE_MEM
)) {
7229 printk(KERN_ERR PFX
"Cannot find PCI device base address,"
7232 goto err_out_disable
;
7235 if (!(pci_resource_flags(pdev
, 2) & IORESOURCE_MEM
)) {
7236 printk(KERN_ERR PFX
"Cannot find second PCI device"
7237 " base address, aborting\n");
7239 goto err_out_disable
;
7242 rc
= pci_request_regions(pdev
, DRV_MODULE_NAME
);
7244 printk(KERN_ERR PFX
"Cannot obtain PCI resources,"
7246 goto err_out_disable
;
7249 pci_set_master(pdev
);
7251 bp
->pm_cap
= pci_find_capability(pdev
, PCI_CAP_ID_PM
);
7252 if (bp
->pm_cap
== 0) {
7253 printk(KERN_ERR PFX
"Cannot find power management"
7254 " capability, aborting\n");
7256 goto err_out_release
;
7259 bp
->pcie_cap
= pci_find_capability(pdev
, PCI_CAP_ID_EXP
);
7260 if (bp
->pcie_cap
== 0) {
7261 printk(KERN_ERR PFX
"Cannot find PCI Express capability,"
7264 goto err_out_release
;
7267 if (pci_set_dma_mask(pdev
, DMA_64BIT_MASK
) == 0) {
7268 bp
->flags
|= USING_DAC_FLAG
;
7269 if (pci_set_consistent_dma_mask(pdev
, DMA_64BIT_MASK
) != 0) {
7270 printk(KERN_ERR PFX
"pci_set_consistent_dma_mask"
7271 " failed, aborting\n");
7273 goto err_out_release
;
7276 } else if (pci_set_dma_mask(pdev
, DMA_32BIT_MASK
) != 0) {
7277 printk(KERN_ERR PFX
"System does not support DMA,"
7280 goto err_out_release
;
7286 INIT_WORK(&bp
->reset_task
, bnx2x_reset_task
);
7287 INIT_WORK(&bp
->sp_task
, bnx2x_sp_task
);
7289 dev
->base_addr
= pci_resource_start(pdev
, 0);
7291 dev
->irq
= pdev
->irq
;
7293 bp
->regview
= ioremap_nocache(dev
->base_addr
,
7294 pci_resource_len(pdev
, 0));
7296 printk(KERN_ERR PFX
"Cannot map register space, aborting\n");
7298 goto err_out_release
;
7301 bp
->doorbells
= ioremap_nocache(pci_resource_start(pdev
, 2),
7302 pci_resource_len(pdev
, 2));
7303 if (!bp
->doorbells
) {
7304 printk(KERN_ERR PFX
"Cannot map doorbell space, aborting\n");
7309 bnx2x_set_power_state(bp
, PCI_D0
);
7311 bnx2x_get_hwinfo(bp
);
7315 printk(KERN_ERR PFX
"MCP disabled, will only"
7316 " init first device\n");
7320 if (onefunc
&& bp
->port
) {
7321 printk(KERN_ERR PFX
"Second device disabled, exiting\n");
7326 bp
->tx_ring_size
= MAX_TX_AVAIL
;
7327 bp
->rx_ring_size
= MAX_RX_AVAIL
;
7333 bp
->tx_quick_cons_trip_int
= 0xff;
7334 bp
->tx_quick_cons_trip
= 0xff;
7335 bp
->tx_ticks_int
= 50;
7338 bp
->rx_quick_cons_trip_int
= 0xff;
7339 bp
->rx_quick_cons_trip
= 0xff;
7340 bp
->rx_ticks_int
= 25;
7343 bp
->stats_ticks
= 1000000 & 0xffff00;
7345 bp
->timer_interval
= HZ
;
7346 bp
->current_interval
= (poll
? poll
: HZ
);
7348 init_timer(&bp
->timer
);
7349 bp
->timer
.expires
= jiffies
+ bp
->current_interval
;
7350 bp
->timer
.data
= (unsigned long) bp
;
7351 bp
->timer
.function
= bnx2x_timer
;
7357 iounmap(bp
->regview
);
7361 if (bp
->doorbells
) {
7362 iounmap(bp
->doorbells
);
7363 bp
->doorbells
= NULL
;
7367 pci_release_regions(pdev
);
7370 pci_disable_device(pdev
);
7371 pci_set_drvdata(pdev
, NULL
);
7377 static int __devinit
bnx2x_get_pcie_width(struct bnx2x
*bp
)
7379 u32 val
= REG_RD(bp
, PCICFG_OFFSET
+ PCICFG_LINK_CONTROL
);
7381 val
= (val
& PCICFG_LINK_WIDTH
) >> PCICFG_LINK_WIDTH_SHIFT
;
7385 /* return value of 1=2.5GHz 2=5GHz */
7386 static int __devinit
bnx2x_get_pcie_speed(struct bnx2x
*bp
)
7388 u32 val
= REG_RD(bp
, PCICFG_OFFSET
+ PCICFG_LINK_CONTROL
);
7390 val
= (val
& PCICFG_LINK_SPEED
) >> PCICFG_LINK_SPEED_SHIFT
;
7394 static int __devinit
bnx2x_init_one(struct pci_dev
*pdev
,
7395 const struct pci_device_id
*ent
)
7397 static int version_printed
;
7398 struct net_device
*dev
= NULL
;
7401 int port
= PCI_FUNC(pdev
->devfn
);
7402 DECLARE_MAC_BUF(mac
);
7404 if (version_printed
++ == 0)
7405 printk(KERN_INFO
"%s", version
);
7407 /* dev zeroed in init_etherdev */
7408 dev
= alloc_etherdev(sizeof(*bp
));
7412 netif_carrier_off(dev
);
7414 bp
= netdev_priv(dev
);
7415 bp
->msglevel
= debug
;
7417 if (port
&& onefunc
) {
7418 printk(KERN_ERR PFX
"second function disabled. exiting\n");
7423 rc
= bnx2x_init_board(pdev
, dev
);
7429 dev
->hard_start_xmit
= bnx2x_start_xmit
;
7430 dev
->watchdog_timeo
= TX_TIMEOUT
;
7432 dev
->ethtool_ops
= &bnx2x_ethtool_ops
;
7433 dev
->open
= bnx2x_open
;
7434 dev
->stop
= bnx2x_close
;
7435 dev
->set_multicast_list
= bnx2x_set_rx_mode
;
7436 dev
->set_mac_address
= bnx2x_change_mac_addr
;
7437 dev
->do_ioctl
= bnx2x_ioctl
;
7438 dev
->change_mtu
= bnx2x_change_mtu
;
7439 dev
->tx_timeout
= bnx2x_tx_timeout
;
7441 dev
->vlan_rx_register
= bnx2x_vlan_rx_register
;
7443 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
7444 dev
->poll_controller
= poll_bnx2x
;
7446 dev
->features
|= NETIF_F_SG
;
7447 if (bp
->flags
& USING_DAC_FLAG
)
7448 dev
->features
|= NETIF_F_HIGHDMA
;
7449 dev
->features
|= NETIF_F_IP_CSUM
;
7451 dev
->features
|= NETIF_F_HW_VLAN_TX
| NETIF_F_HW_VLAN_RX
;
7453 dev
->features
|= NETIF_F_TSO
| NETIF_F_TSO_ECN
;
7455 rc
= register_netdev(dev
);
7457 dev_err(&pdev
->dev
, "Cannot register net device\n");
7459 iounmap(bp
->regview
);
7461 iounmap(bp
->doorbells
);
7462 pci_release_regions(pdev
);
7463 pci_disable_device(pdev
);
7464 pci_set_drvdata(pdev
, NULL
);
7469 pci_set_drvdata(pdev
, dev
);
7471 bp
->name
= board_info
[ent
->driver_data
].name
;
7472 printk(KERN_INFO
"%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
7473 " IRQ %d, ", dev
->name
, bp
->name
,
7474 ((CHIP_ID(bp
) & 0xf000) >> 12) + 'A',
7475 ((CHIP_ID(bp
) & 0x0ff0) >> 4),
7476 bnx2x_get_pcie_width(bp
),
7477 (bnx2x_get_pcie_speed(bp
) == 2) ? "5GHz (Gen2)" : "2.5GHz",
7478 dev
->base_addr
, bp
->pdev
->irq
);
7479 printk(KERN_CONT
"node addr %s\n", print_mac(mac
, dev
->dev_addr
));
7483 static void __devexit
bnx2x_remove_one(struct pci_dev
*pdev
)
7485 struct net_device
*dev
= pci_get_drvdata(pdev
);
7489 /* we get here if init_one() fails */
7490 printk(KERN_ERR PFX
"BAD net device from bnx2x_init_one\n");
7494 bp
= netdev_priv(dev
);
7496 unregister_netdev(dev
);
7499 iounmap(bp
->regview
);
7502 iounmap(bp
->doorbells
);
7505 pci_release_regions(pdev
);
7506 pci_disable_device(pdev
);
7507 pci_set_drvdata(pdev
, NULL
);
7510 static int bnx2x_suspend(struct pci_dev
*pdev
, pm_message_t state
)
7512 struct net_device
*dev
= pci_get_drvdata(pdev
);
7518 if (!netif_running(dev
))
7521 bp
= netdev_priv(dev
);
7523 bnx2x_nic_unload(bp
, 0);
7525 netif_device_detach(dev
);
7527 pci_save_state(pdev
);
7528 bnx2x_set_power_state(bp
, pci_choose_state(pdev
, state
));
7533 static int bnx2x_resume(struct pci_dev
*pdev
)
7535 struct net_device
*dev
= pci_get_drvdata(pdev
);
7540 printk(KERN_ERR PFX
"BAD net device from bnx2x_init_one\n");
7544 if (!netif_running(dev
))
7547 bp
= netdev_priv(dev
);
7549 pci_restore_state(pdev
);
7550 bnx2x_set_power_state(bp
, PCI_D0
);
7551 netif_device_attach(dev
);
7553 rc
= bnx2x_nic_load(bp
, 0);
7560 static struct pci_driver bnx2x_pci_driver
= {
7561 .name
= DRV_MODULE_NAME
,
7562 .id_table
= bnx2x_pci_tbl
,
7563 .probe
= bnx2x_init_one
,
7564 .remove
= __devexit_p(bnx2x_remove_one
),
7565 .suspend
= bnx2x_suspend
,
7566 .resume
= bnx2x_resume
,
7569 static int __init
bnx2x_init(void)
7571 return pci_register_driver(&bnx2x_pci_driver
);
7574 static void __exit
bnx2x_cleanup(void)
7576 pci_unregister_driver(&bnx2x_pci_driver
);
7579 module_init(bnx2x_init
);
7580 module_exit(bnx2x_cleanup
);