1 /* bnx2x_main.c: Broadcom Everest network driver.
3 * Copyright (c) 2007-2008 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
18 #include <linux/module.h>
19 #include <linux/moduleparam.h>
20 #include <linux/kernel.h>
21 #include <linux/device.h> /* for dev_info() */
22 #include <linux/timer.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
25 #include <linux/slab.h>
26 #include <linux/vmalloc.h>
27 #include <linux/interrupt.h>
28 #include <linux/pci.h>
29 #include <linux/init.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/bitops.h>
35 #include <linux/irq.h>
36 #include <linux/delay.h>
37 #include <asm/byteorder.h>
38 #include <linux/time.h>
39 #include <linux/ethtool.h>
40 #include <linux/mii.h>
41 #ifdef NETIF_F_HW_VLAN_TX
42 #include <linux/if_vlan.h>
46 #include <net/checksum.h>
47 #include <linux/version.h>
48 #include <net/ip6_checksum.h>
49 #include <linux/workqueue.h>
50 #include <linux/crc32.h>
51 #include <linux/crc32c.h>
52 #include <linux/prefetch.h>
53 #include <linux/zlib.h>
56 #include "bnx2x_reg.h"
57 #include "bnx2x_fw_defs.h"
58 #include "bnx2x_hsi.h"
59 #include "bnx2x_link.h"
61 #include "bnx2x_init.h"
63 #define DRV_MODULE_VERSION "1.45.6"
64 #define DRV_MODULE_RELDATE "2008/06/23"
65 #define BNX2X_BC_VER 0x040200
67 /* Time in jiffies before concluding the transmitter is hung */
68 #define TX_TIMEOUT (5*HZ)
70 static char version
[] __devinitdata
=
71 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
72 DRV_MODULE_NAME
" " DRV_MODULE_VERSION
" (" DRV_MODULE_RELDATE
")\n";
74 MODULE_AUTHOR("Eliezer Tamir");
75 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710 Driver");
76 MODULE_LICENSE("GPL");
77 MODULE_VERSION(DRV_MODULE_VERSION
);
79 static int disable_tpa
;
83 static int load_count
[3]; /* 0-common, 1-port0, 2-port1 */
86 module_param(disable_tpa
, int, 0);
87 module_param(use_inta
, int, 0);
88 module_param(poll
, int, 0);
89 module_param(debug
, int, 0);
90 MODULE_PARM_DESC(disable_tpa
, "disable the TPA (LRO) feature");
91 MODULE_PARM_DESC(use_inta
, "use INT#A instead of MSI-X");
92 MODULE_PARM_DESC(poll
, "use polling (for debug)");
93 MODULE_PARM_DESC(debug
, "default debug msglevel");
96 module_param(use_multi
, int, 0);
97 MODULE_PARM_DESC(use_multi
, "use per-CPU queues");
100 enum bnx2x_board_type
{
106 /* indexed by board_type, above */
109 } board_info
[] __devinitdata
= {
110 { "Broadcom NetXtreme II BCM57710 XGb" },
111 { "Broadcom NetXtreme II BCM57711 XGb" },
112 { "Broadcom NetXtreme II BCM57711E XGb" }
116 static const struct pci_device_id bnx2x_pci_tbl
[] = {
117 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_NX2_57710
,
118 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, BCM57710
},
119 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_NX2_57711
,
120 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, BCM57711
},
121 { PCI_VENDOR_ID_BROADCOM
, PCI_DEVICE_ID_NX2_57711E
,
122 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, BCM57711E
},
126 MODULE_DEVICE_TABLE(pci
, bnx2x_pci_tbl
);
128 /****************************************************************************
129 * General service functions
130 ****************************************************************************/
133 * locking is done by mcp
135 static void bnx2x_reg_wr_ind(struct bnx2x
*bp
, u32 addr
, u32 val
)
137 pci_write_config_dword(bp
->pdev
, PCICFG_GRC_ADDRESS
, addr
);
138 pci_write_config_dword(bp
->pdev
, PCICFG_GRC_DATA
, val
);
139 pci_write_config_dword(bp
->pdev
, PCICFG_GRC_ADDRESS
,
140 PCICFG_VENDOR_ID_OFFSET
);
143 static u32
bnx2x_reg_rd_ind(struct bnx2x
*bp
, u32 addr
)
147 pci_write_config_dword(bp
->pdev
, PCICFG_GRC_ADDRESS
, addr
);
148 pci_read_config_dword(bp
->pdev
, PCICFG_GRC_DATA
, &val
);
149 pci_write_config_dword(bp
->pdev
, PCICFG_GRC_ADDRESS
,
150 PCICFG_VENDOR_ID_OFFSET
);
155 static const u32 dmae_reg_go_c
[] = {
156 DMAE_REG_GO_C0
, DMAE_REG_GO_C1
, DMAE_REG_GO_C2
, DMAE_REG_GO_C3
,
157 DMAE_REG_GO_C4
, DMAE_REG_GO_C5
, DMAE_REG_GO_C6
, DMAE_REG_GO_C7
,
158 DMAE_REG_GO_C8
, DMAE_REG_GO_C9
, DMAE_REG_GO_C10
, DMAE_REG_GO_C11
,
159 DMAE_REG_GO_C12
, DMAE_REG_GO_C13
, DMAE_REG_GO_C14
, DMAE_REG_GO_C15
162 /* copy command into DMAE command memory and set DMAE command go */
163 static void bnx2x_post_dmae(struct bnx2x
*bp
, struct dmae_command
*dmae
,
169 cmd_offset
= (DMAE_REG_CMD_MEM
+ sizeof(struct dmae_command
) * idx
);
170 for (i
= 0; i
< (sizeof(struct dmae_command
)/4); i
++) {
171 REG_WR(bp
, cmd_offset
+ i
*4, *(((u32
*)dmae
) + i
));
173 DP(BNX2X_MSG_OFF
, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
174 idx
, i
, cmd_offset
+ i
*4, *(((u32
*)dmae
) + i
));
176 REG_WR(bp
, dmae_reg_go_c
[idx
], 1);
179 void bnx2x_write_dmae(struct bnx2x
*bp
, dma_addr_t dma_addr
, u32 dst_addr
,
182 struct dmae_command
*dmae
= &bp
->init_dmae
;
183 u32
*wb_comp
= bnx2x_sp(bp
, wb_comp
);
186 if (!bp
->dmae_ready
) {
187 u32
*data
= bnx2x_sp(bp
, wb_data
[0]);
189 DP(BNX2X_MSG_OFF
, "DMAE is not ready (dst_addr %08x len32 %d)"
190 " using indirect\n", dst_addr
, len32
);
191 bnx2x_init_ind_wr(bp
, dst_addr
, data
, len32
);
195 mutex_lock(&bp
->dmae_mutex
);
197 memset(dmae
, 0, sizeof(struct dmae_command
));
199 dmae
->opcode
= (DMAE_CMD_SRC_PCI
| DMAE_CMD_DST_GRC
|
200 DMAE_CMD_C_DST_PCI
| DMAE_CMD_C_ENABLE
|
201 DMAE_CMD_SRC_RESET
| DMAE_CMD_DST_RESET
|
203 DMAE_CMD_ENDIANITY_B_DW_SWAP
|
205 DMAE_CMD_ENDIANITY_DW_SWAP
|
207 (BP_PORT(bp
) ? DMAE_CMD_PORT_1
: DMAE_CMD_PORT_0
) |
208 (BP_E1HVN(bp
) << DMAE_CMD_E1HVN_SHIFT
));
209 dmae
->src_addr_lo
= U64_LO(dma_addr
);
210 dmae
->src_addr_hi
= U64_HI(dma_addr
);
211 dmae
->dst_addr_lo
= dst_addr
>> 2;
212 dmae
->dst_addr_hi
= 0;
214 dmae
->comp_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, wb_comp
));
215 dmae
->comp_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, wb_comp
));
216 dmae
->comp_val
= DMAE_COMP_VAL
;
218 DP(BNX2X_MSG_OFF
, "dmae: opcode 0x%08x\n"
219 DP_LEVEL
"src_addr [%x:%08x] len [%d *4] "
220 "dst_addr [%x:%08x (%08x)]\n"
221 DP_LEVEL
"comp_addr [%x:%08x] comp_val 0x%08x\n",
222 dmae
->opcode
, dmae
->src_addr_hi
, dmae
->src_addr_lo
,
223 dmae
->len
, dmae
->dst_addr_hi
, dmae
->dst_addr_lo
, dst_addr
,
224 dmae
->comp_addr_hi
, dmae
->comp_addr_lo
, dmae
->comp_val
);
225 DP(BNX2X_MSG_OFF
, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
226 bp
->slowpath
->wb_data
[0], bp
->slowpath
->wb_data
[1],
227 bp
->slowpath
->wb_data
[2], bp
->slowpath
->wb_data
[3]);
231 bnx2x_post_dmae(bp
, dmae
, INIT_DMAE_C(bp
));
235 while (*wb_comp
!= DMAE_COMP_VAL
) {
236 DP(BNX2X_MSG_OFF
, "wb_comp 0x%08x\n", *wb_comp
);
238 /* adjust delay for emulation/FPGA */
239 if (CHIP_REV_IS_SLOW(bp
))
245 BNX2X_ERR("dmae timeout!\n");
251 mutex_unlock(&bp
->dmae_mutex
);
254 void bnx2x_read_dmae(struct bnx2x
*bp
, u32 src_addr
, u32 len32
)
256 struct dmae_command
*dmae
= &bp
->init_dmae
;
257 u32
*wb_comp
= bnx2x_sp(bp
, wb_comp
);
260 if (!bp
->dmae_ready
) {
261 u32
*data
= bnx2x_sp(bp
, wb_data
[0]);
264 DP(BNX2X_MSG_OFF
, "DMAE is not ready (src_addr %08x len32 %d)"
265 " using indirect\n", src_addr
, len32
);
266 for (i
= 0; i
< len32
; i
++)
267 data
[i
] = bnx2x_reg_rd_ind(bp
, src_addr
+ i
*4);
271 mutex_lock(&bp
->dmae_mutex
);
273 memset(bnx2x_sp(bp
, wb_data
[0]), 0, sizeof(u32
) * 4);
274 memset(dmae
, 0, sizeof(struct dmae_command
));
276 dmae
->opcode
= (DMAE_CMD_SRC_GRC
| DMAE_CMD_DST_PCI
|
277 DMAE_CMD_C_DST_PCI
| DMAE_CMD_C_ENABLE
|
278 DMAE_CMD_SRC_RESET
| DMAE_CMD_DST_RESET
|
280 DMAE_CMD_ENDIANITY_B_DW_SWAP
|
282 DMAE_CMD_ENDIANITY_DW_SWAP
|
284 (BP_PORT(bp
) ? DMAE_CMD_PORT_1
: DMAE_CMD_PORT_0
) |
285 (BP_E1HVN(bp
) << DMAE_CMD_E1HVN_SHIFT
));
286 dmae
->src_addr_lo
= src_addr
>> 2;
287 dmae
->src_addr_hi
= 0;
288 dmae
->dst_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, wb_data
));
289 dmae
->dst_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, wb_data
));
291 dmae
->comp_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, wb_comp
));
292 dmae
->comp_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, wb_comp
));
293 dmae
->comp_val
= DMAE_COMP_VAL
;
295 DP(BNX2X_MSG_OFF
, "dmae: opcode 0x%08x\n"
296 DP_LEVEL
"src_addr [%x:%08x] len [%d *4] "
297 "dst_addr [%x:%08x (%08x)]\n"
298 DP_LEVEL
"comp_addr [%x:%08x] comp_val 0x%08x\n",
299 dmae
->opcode
, dmae
->src_addr_hi
, dmae
->src_addr_lo
,
300 dmae
->len
, dmae
->dst_addr_hi
, dmae
->dst_addr_lo
, src_addr
,
301 dmae
->comp_addr_hi
, dmae
->comp_addr_lo
, dmae
->comp_val
);
305 bnx2x_post_dmae(bp
, dmae
, INIT_DMAE_C(bp
));
309 while (*wb_comp
!= DMAE_COMP_VAL
) {
311 /* adjust delay for emulation/FPGA */
312 if (CHIP_REV_IS_SLOW(bp
))
318 BNX2X_ERR("dmae timeout!\n");
323 DP(BNX2X_MSG_OFF
, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
324 bp
->slowpath
->wb_data
[0], bp
->slowpath
->wb_data
[1],
325 bp
->slowpath
->wb_data
[2], bp
->slowpath
->wb_data
[3]);
327 mutex_unlock(&bp
->dmae_mutex
);
330 /* used only for slowpath so not inlined */
331 static void bnx2x_wb_wr(struct bnx2x
*bp
, int reg
, u32 val_hi
, u32 val_lo
)
335 wb_write
[0] = val_hi
;
336 wb_write
[1] = val_lo
;
337 REG_WR_DMAE(bp
, reg
, wb_write
, 2);
341 static u64
bnx2x_wb_rd(struct bnx2x
*bp
, int reg
)
345 REG_RD_DMAE(bp
, reg
, wb_data
, 2);
347 return HILO_U64(wb_data
[0], wb_data
[1]);
351 static int bnx2x_mc_assert(struct bnx2x
*bp
)
355 u32 row0
, row1
, row2
, row3
;
358 last_idx
= REG_RD8(bp
, BAR_XSTRORM_INTMEM
+
359 XSTORM_ASSERT_LIST_INDEX_OFFSET
);
361 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx
);
363 /* print the asserts */
364 for (i
= 0; i
< STROM_ASSERT_ARRAY_SIZE
; i
++) {
366 row0
= REG_RD(bp
, BAR_XSTRORM_INTMEM
+
367 XSTORM_ASSERT_LIST_OFFSET(i
));
368 row1
= REG_RD(bp
, BAR_XSTRORM_INTMEM
+
369 XSTORM_ASSERT_LIST_OFFSET(i
) + 4);
370 row2
= REG_RD(bp
, BAR_XSTRORM_INTMEM
+
371 XSTORM_ASSERT_LIST_OFFSET(i
) + 8);
372 row3
= REG_RD(bp
, BAR_XSTRORM_INTMEM
+
373 XSTORM_ASSERT_LIST_OFFSET(i
) + 12);
375 if (row0
!= COMMON_ASM_INVALID_ASSERT_OPCODE
) {
376 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
377 " 0x%08x 0x%08x 0x%08x\n",
378 i
, row3
, row2
, row1
, row0
);
386 last_idx
= REG_RD8(bp
, BAR_TSTRORM_INTMEM
+
387 TSTORM_ASSERT_LIST_INDEX_OFFSET
);
389 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx
);
391 /* print the asserts */
392 for (i
= 0; i
< STROM_ASSERT_ARRAY_SIZE
; i
++) {
394 row0
= REG_RD(bp
, BAR_TSTRORM_INTMEM
+
395 TSTORM_ASSERT_LIST_OFFSET(i
));
396 row1
= REG_RD(bp
, BAR_TSTRORM_INTMEM
+
397 TSTORM_ASSERT_LIST_OFFSET(i
) + 4);
398 row2
= REG_RD(bp
, BAR_TSTRORM_INTMEM
+
399 TSTORM_ASSERT_LIST_OFFSET(i
) + 8);
400 row3
= REG_RD(bp
, BAR_TSTRORM_INTMEM
+
401 TSTORM_ASSERT_LIST_OFFSET(i
) + 12);
403 if (row0
!= COMMON_ASM_INVALID_ASSERT_OPCODE
) {
404 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
405 " 0x%08x 0x%08x 0x%08x\n",
406 i
, row3
, row2
, row1
, row0
);
414 last_idx
= REG_RD8(bp
, BAR_CSTRORM_INTMEM
+
415 CSTORM_ASSERT_LIST_INDEX_OFFSET
);
417 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx
);
419 /* print the asserts */
420 for (i
= 0; i
< STROM_ASSERT_ARRAY_SIZE
; i
++) {
422 row0
= REG_RD(bp
, BAR_CSTRORM_INTMEM
+
423 CSTORM_ASSERT_LIST_OFFSET(i
));
424 row1
= REG_RD(bp
, BAR_CSTRORM_INTMEM
+
425 CSTORM_ASSERT_LIST_OFFSET(i
) + 4);
426 row2
= REG_RD(bp
, BAR_CSTRORM_INTMEM
+
427 CSTORM_ASSERT_LIST_OFFSET(i
) + 8);
428 row3
= REG_RD(bp
, BAR_CSTRORM_INTMEM
+
429 CSTORM_ASSERT_LIST_OFFSET(i
) + 12);
431 if (row0
!= COMMON_ASM_INVALID_ASSERT_OPCODE
) {
432 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
433 " 0x%08x 0x%08x 0x%08x\n",
434 i
, row3
, row2
, row1
, row0
);
442 last_idx
= REG_RD8(bp
, BAR_USTRORM_INTMEM
+
443 USTORM_ASSERT_LIST_INDEX_OFFSET
);
445 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx
);
447 /* print the asserts */
448 for (i
= 0; i
< STROM_ASSERT_ARRAY_SIZE
; i
++) {
450 row0
= REG_RD(bp
, BAR_USTRORM_INTMEM
+
451 USTORM_ASSERT_LIST_OFFSET(i
));
452 row1
= REG_RD(bp
, BAR_USTRORM_INTMEM
+
453 USTORM_ASSERT_LIST_OFFSET(i
) + 4);
454 row2
= REG_RD(bp
, BAR_USTRORM_INTMEM
+
455 USTORM_ASSERT_LIST_OFFSET(i
) + 8);
456 row3
= REG_RD(bp
, BAR_USTRORM_INTMEM
+
457 USTORM_ASSERT_LIST_OFFSET(i
) + 12);
459 if (row0
!= COMMON_ASM_INVALID_ASSERT_OPCODE
) {
460 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
461 " 0x%08x 0x%08x 0x%08x\n",
462 i
, row3
, row2
, row1
, row0
);
472 static void bnx2x_fw_dump(struct bnx2x
*bp
)
478 mark
= REG_RD(bp
, MCP_REG_MCPR_SCRATCH
+ 0xf104);
479 mark
= ((mark
+ 0x3) & ~0x3);
480 printk(KERN_ERR PFX
"begin fw dump (mark 0x%x)\n" KERN_ERR
, mark
);
482 for (offset
= mark
- 0x08000000; offset
<= 0xF900; offset
+= 0x8*4) {
483 for (word
= 0; word
< 8; word
++)
484 data
[word
] = htonl(REG_RD(bp
, MCP_REG_MCPR_SCRATCH
+
487 printk(KERN_CONT
"%s", (char *)data
);
489 for (offset
= 0xF108; offset
<= mark
- 0x08000000; offset
+= 0x8*4) {
490 for (word
= 0; word
< 8; word
++)
491 data
[word
] = htonl(REG_RD(bp
, MCP_REG_MCPR_SCRATCH
+
494 printk(KERN_CONT
"%s", (char *)data
);
496 printk("\n" KERN_ERR PFX
"end of fw dump\n");
499 static void bnx2x_panic_dump(struct bnx2x
*bp
)
504 BNX2X_ERR("begin crash dump -----------------\n");
506 for_each_queue(bp
, i
) {
507 struct bnx2x_fastpath
*fp
= &bp
->fp
[i
];
508 struct eth_tx_db_data
*hw_prods
= fp
->hw_tx_prods
;
510 BNX2X_ERR("queue[%d]: tx_pkt_prod(%x) tx_pkt_cons(%x)"
511 " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)\n",
512 i
, fp
->tx_pkt_prod
, fp
->tx_pkt_cons
, fp
->tx_bd_prod
,
513 fp
->tx_bd_cons
, le16_to_cpu(*fp
->tx_cons_sb
));
514 BNX2X_ERR(" rx_comp_prod(%x) rx_comp_cons(%x)"
515 " *rx_cons_sb(%x) *rx_bd_cons_sb(%x)"
516 " rx_sge_prod(%x) last_max_sge(%x)\n",
517 fp
->rx_comp_prod
, fp
->rx_comp_cons
,
518 le16_to_cpu(*fp
->rx_cons_sb
),
519 le16_to_cpu(*fp
->rx_bd_cons_sb
),
520 fp
->rx_sge_prod
, fp
->last_max_sge
);
521 BNX2X_ERR(" fp_c_idx(%x) fp_u_idx(%x)"
522 " bd data(%x,%x) rx_alloc_failed(%lx)\n",
523 fp
->fp_c_idx
, fp
->fp_u_idx
, hw_prods
->packets_prod
,
524 hw_prods
->bds_prod
, fp
->rx_alloc_failed
);
526 start
= TX_BD(le16_to_cpu(*fp
->tx_cons_sb
) - 10);
527 end
= TX_BD(le16_to_cpu(*fp
->tx_cons_sb
) + 245);
528 for (j
= start
; j
< end
; j
++) {
529 struct sw_tx_bd
*sw_bd
= &fp
->tx_buf_ring
[j
];
531 BNX2X_ERR("packet[%x]=[%p,%x]\n", j
,
532 sw_bd
->skb
, sw_bd
->first_bd
);
535 start
= TX_BD(fp
->tx_bd_cons
- 10);
536 end
= TX_BD(fp
->tx_bd_cons
+ 254);
537 for (j
= start
; j
< end
; j
++) {
538 u32
*tx_bd
= (u32
*)&fp
->tx_desc_ring
[j
];
540 BNX2X_ERR("tx_bd[%x]=[%x:%x:%x:%x]\n",
541 j
, tx_bd
[0], tx_bd
[1], tx_bd
[2], tx_bd
[3]);
544 start
= RX_BD(le16_to_cpu(*fp
->rx_cons_sb
) - 10);
545 end
= RX_BD(le16_to_cpu(*fp
->rx_cons_sb
) + 503);
546 for (j
= start
; j
< end
; j
++) {
547 u32
*rx_bd
= (u32
*)&fp
->rx_desc_ring
[j
];
548 struct sw_rx_bd
*sw_bd
= &fp
->rx_buf_ring
[j
];
550 BNX2X_ERR("rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
551 j
, rx_bd
[1], rx_bd
[0], sw_bd
->skb
);
555 end
= RX_SGE_CNT
*NUM_RX_SGE_PAGES
;
556 for (j
= start
; j
< end
; j
++) {
557 u32
*rx_sge
= (u32
*)&fp
->rx_sge_ring
[j
];
558 struct sw_rx_page
*sw_page
= &fp
->rx_page_ring
[j
];
560 BNX2X_ERR("rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
561 j
, rx_sge
[1], rx_sge
[0], sw_page
->page
);
564 start
= RCQ_BD(fp
->rx_comp_cons
- 10);
565 end
= RCQ_BD(fp
->rx_comp_cons
+ 503);
566 for (j
= start
; j
< end
; j
++) {
567 u32
*cqe
= (u32
*)&fp
->rx_comp_ring
[j
];
569 BNX2X_ERR("cqe[%x]=[%x:%x:%x:%x]\n",
570 j
, cqe
[0], cqe
[1], cqe
[2], cqe
[3]);
574 BNX2X_ERR("def_c_idx(%u) def_u_idx(%u) def_x_idx(%u)"
575 " def_t_idx(%u) def_att_idx(%u) attn_state(%u)"
576 " spq_prod_idx(%u)\n",
577 bp
->def_c_idx
, bp
->def_u_idx
, bp
->def_x_idx
, bp
->def_t_idx
,
578 bp
->def_att_idx
, bp
->attn_state
, bp
->spq_prod_idx
);
582 BNX2X_ERR("end crash dump -----------------\n");
584 bp
->stats_state
= STATS_STATE_DISABLED
;
585 DP(BNX2X_MSG_STATS
, "stats_state - DISABLED\n");
588 static void bnx2x_int_enable(struct bnx2x
*bp
)
590 int port
= BP_PORT(bp
);
591 u32 addr
= port
? HC_REG_CONFIG_1
: HC_REG_CONFIG_0
;
592 u32 val
= REG_RD(bp
, addr
);
593 int msix
= (bp
->flags
& USING_MSIX_FLAG
) ? 1 : 0;
596 val
&= ~HC_CONFIG_0_REG_SINGLE_ISR_EN_0
;
597 val
|= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0
|
598 HC_CONFIG_0_REG_ATTN_BIT_EN_0
);
600 val
|= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0
|
601 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0
|
602 HC_CONFIG_0_REG_INT_LINE_EN_0
|
603 HC_CONFIG_0_REG_ATTN_BIT_EN_0
);
605 DP(NETIF_MSG_INTR
, "write %x to HC %d (addr 0x%x) MSI-X %d\n",
606 val
, port
, addr
, msix
);
608 REG_WR(bp
, addr
, val
);
610 val
&= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0
;
613 DP(NETIF_MSG_INTR
, "write %x to HC %d (addr 0x%x) MSI-X %d\n",
614 val
, port
, addr
, msix
);
616 REG_WR(bp
, addr
, val
);
618 if (CHIP_IS_E1H(bp
)) {
619 /* init leading/trailing edge */
621 val
= (0xfe0f | (1 << (BP_E1HVN(bp
) + 4)));
623 /* enable nig attention */
628 REG_WR(bp
, HC_REG_TRAILING_EDGE_0
+ port
*8, val
);
629 REG_WR(bp
, HC_REG_LEADING_EDGE_0
+ port
*8, val
);
633 static void bnx2x_int_disable(struct bnx2x
*bp
)
635 int port
= BP_PORT(bp
);
636 u32 addr
= port
? HC_REG_CONFIG_1
: HC_REG_CONFIG_0
;
637 u32 val
= REG_RD(bp
, addr
);
639 val
&= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0
|
640 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0
|
641 HC_CONFIG_0_REG_INT_LINE_EN_0
|
642 HC_CONFIG_0_REG_ATTN_BIT_EN_0
);
644 DP(NETIF_MSG_INTR
, "write %x to HC %d (addr 0x%x)\n",
647 REG_WR(bp
, addr
, val
);
648 if (REG_RD(bp
, addr
) != val
)
649 BNX2X_ERR("BUG! proper val not read from IGU!\n");
652 static void bnx2x_int_disable_sync(struct bnx2x
*bp
)
654 int msix
= (bp
->flags
& USING_MSIX_FLAG
) ? 1 : 0;
657 /* disable interrupt handling */
658 atomic_inc(&bp
->intr_sem
);
659 /* prevent the HW from sending interrupts */
660 bnx2x_int_disable(bp
);
662 /* make sure all ISRs are done */
664 for_each_queue(bp
, i
)
665 synchronize_irq(bp
->msix_table
[i
].vector
);
667 /* one more for the Slow Path IRQ */
668 synchronize_irq(bp
->msix_table
[i
].vector
);
670 synchronize_irq(bp
->pdev
->irq
);
672 /* make sure sp_task is not running */
673 cancel_work_sync(&bp
->sp_task
);
679 * General service functions
682 static inline void bnx2x_ack_sb(struct bnx2x
*bp
, u8 sb_id
,
683 u8 storm
, u16 index
, u8 op
, u8 update
)
685 u32 igu_addr
= (IGU_ADDR_INT_ACK
+ IGU_FUNC_BASE
* BP_FUNC(bp
)) * 8;
686 struct igu_ack_register igu_ack
;
688 igu_ack
.status_block_index
= index
;
689 igu_ack
.sb_id_and_flags
=
690 ((sb_id
<< IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT
) |
691 (storm
<< IGU_ACK_REGISTER_STORM_ID_SHIFT
) |
692 (update
<< IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT
) |
693 (op
<< IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT
));
695 DP(BNX2X_MSG_OFF
, "write 0x%08x to IGU addr 0x%x\n",
696 (*(u32
*)&igu_ack
), BAR_IGU_INTMEM
+ igu_addr
);
697 REG_WR(bp
, BAR_IGU_INTMEM
+ igu_addr
, (*(u32
*)&igu_ack
));
700 static inline u16
bnx2x_update_fpsb_idx(struct bnx2x_fastpath
*fp
)
702 struct host_status_block
*fpsb
= fp
->status_blk
;
705 barrier(); /* status block is written to by the chip */
706 if (fp
->fp_c_idx
!= fpsb
->c_status_block
.status_block_index
) {
707 fp
->fp_c_idx
= fpsb
->c_status_block
.status_block_index
;
710 if (fp
->fp_u_idx
!= fpsb
->u_status_block
.status_block_index
) {
711 fp
->fp_u_idx
= fpsb
->u_status_block
.status_block_index
;
717 static inline int bnx2x_has_work(struct bnx2x_fastpath
*fp
)
719 u16 rx_cons_sb
= le16_to_cpu(*fp
->rx_cons_sb
);
721 if ((rx_cons_sb
& MAX_RCQ_DESC_CNT
) == MAX_RCQ_DESC_CNT
)
724 if ((fp
->rx_comp_cons
!= rx_cons_sb
) ||
725 (fp
->tx_pkt_prod
!= le16_to_cpu(*fp
->tx_cons_sb
)) ||
726 (fp
->tx_pkt_prod
!= fp
->tx_pkt_cons
))
732 static u16
bnx2x_ack_int(struct bnx2x
*bp
)
734 u32 igu_addr
= (IGU_ADDR_SIMD_MASK
+ IGU_FUNC_BASE
* BP_FUNC(bp
)) * 8;
735 u32 result
= REG_RD(bp
, BAR_IGU_INTMEM
+ igu_addr
);
737 DP(BNX2X_MSG_OFF
, "read 0x%08x from IGU addr 0x%x\n",
738 result
, BAR_IGU_INTMEM
+ igu_addr
);
741 #warning IGU_DEBUG active
743 BNX2X_ERR("read %x from IGU\n", result
);
744 REG_WR(bp
, TM_REG_TIMER_SOFT_RST
, 0);
752 * fast path service functions
755 /* free skb in the packet ring at pos idx
756 * return idx of last bd freed
758 static u16
bnx2x_free_tx_pkt(struct bnx2x
*bp
, struct bnx2x_fastpath
*fp
,
761 struct sw_tx_bd
*tx_buf
= &fp
->tx_buf_ring
[idx
];
762 struct eth_tx_bd
*tx_bd
;
763 struct sk_buff
*skb
= tx_buf
->skb
;
764 u16 bd_idx
= TX_BD(tx_buf
->first_bd
), new_cons
;
767 DP(BNX2X_MSG_OFF
, "pkt_idx %d buff @(%p)->skb %p\n",
771 DP(BNX2X_MSG_OFF
, "free bd_idx %d\n", bd_idx
);
772 tx_bd
= &fp
->tx_desc_ring
[bd_idx
];
773 pci_unmap_single(bp
->pdev
, BD_UNMAP_ADDR(tx_bd
),
774 BD_UNMAP_LEN(tx_bd
), PCI_DMA_TODEVICE
);
776 nbd
= le16_to_cpu(tx_bd
->nbd
) - 1;
777 new_cons
= nbd
+ tx_buf
->first_bd
;
778 #ifdef BNX2X_STOP_ON_ERROR
779 if (nbd
> (MAX_SKB_FRAGS
+ 2)) {
780 BNX2X_ERR("BAD nbd!\n");
785 /* Skip a parse bd and the TSO split header bd
786 since they have no mapping */
788 bd_idx
= TX_BD(NEXT_TX_IDX(bd_idx
));
790 if (tx_bd
->bd_flags
.as_bitfield
& (ETH_TX_BD_FLAGS_IP_CSUM
|
791 ETH_TX_BD_FLAGS_TCP_CSUM
|
792 ETH_TX_BD_FLAGS_SW_LSO
)) {
794 bd_idx
= TX_BD(NEXT_TX_IDX(bd_idx
));
795 tx_bd
= &fp
->tx_desc_ring
[bd_idx
];
796 /* is this a TSO split header bd? */
797 if (tx_bd
->bd_flags
.as_bitfield
& ETH_TX_BD_FLAGS_SW_LSO
) {
799 bd_idx
= TX_BD(NEXT_TX_IDX(bd_idx
));
806 DP(BNX2X_MSG_OFF
, "free frag bd_idx %d\n", bd_idx
);
807 tx_bd
= &fp
->tx_desc_ring
[bd_idx
];
808 pci_unmap_page(bp
->pdev
, BD_UNMAP_ADDR(tx_bd
),
809 BD_UNMAP_LEN(tx_bd
), PCI_DMA_TODEVICE
);
811 bd_idx
= TX_BD(NEXT_TX_IDX(bd_idx
));
817 tx_buf
->first_bd
= 0;
823 static inline u16
bnx2x_tx_avail(struct bnx2x_fastpath
*fp
)
829 barrier(); /* Tell compiler that prod and cons can change */
830 prod
= fp
->tx_bd_prod
;
831 cons
= fp
->tx_bd_cons
;
833 /* NUM_TX_RINGS = number of "next-page" entries
834 It will be used as a threshold */
835 used
= SUB_S16(prod
, cons
) + (s16
)NUM_TX_RINGS
;
837 #ifdef BNX2X_STOP_ON_ERROR
839 WARN_ON(used
> fp
->bp
->tx_ring_size
);
840 WARN_ON((fp
->bp
->tx_ring_size
- used
) > MAX_TX_AVAIL
);
843 return (s16
)(fp
->bp
->tx_ring_size
) - used
;
846 static void bnx2x_tx_int(struct bnx2x_fastpath
*fp
, int work
)
848 struct bnx2x
*bp
= fp
->bp
;
849 u16 hw_cons
, sw_cons
, bd_cons
= fp
->tx_bd_cons
;
852 #ifdef BNX2X_STOP_ON_ERROR
853 if (unlikely(bp
->panic
))
857 hw_cons
= le16_to_cpu(*fp
->tx_cons_sb
);
858 sw_cons
= fp
->tx_pkt_cons
;
860 while (sw_cons
!= hw_cons
) {
863 pkt_cons
= TX_BD(sw_cons
);
865 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
867 DP(NETIF_MSG_TX_DONE
, "hw_cons %u sw_cons %u pkt_cons %u\n",
868 hw_cons
, sw_cons
, pkt_cons
);
870 /* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
872 prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
875 bd_cons
= bnx2x_free_tx_pkt(bp
, fp
, pkt_cons
);
883 fp
->tx_pkt_cons
= sw_cons
;
884 fp
->tx_bd_cons
= bd_cons
;
886 /* Need to make the tx_cons update visible to start_xmit()
887 * before checking for netif_queue_stopped(). Without the
888 * memory barrier, there is a small possibility that start_xmit()
889 * will miss it and cause the queue to be stopped forever.
893 /* TBD need a thresh? */
894 if (unlikely(netif_queue_stopped(bp
->dev
))) {
896 netif_tx_lock(bp
->dev
);
898 if (netif_queue_stopped(bp
->dev
) &&
899 (bnx2x_tx_avail(fp
) >= MAX_SKB_FRAGS
+ 3))
900 netif_wake_queue(bp
->dev
);
902 netif_tx_unlock(bp
->dev
);
906 static void bnx2x_sp_event(struct bnx2x_fastpath
*fp
,
907 union eth_rx_cqe
*rr_cqe
)
909 struct bnx2x
*bp
= fp
->bp
;
910 int cid
= SW_CID(rr_cqe
->ramrod_cqe
.conn_and_cmd_data
);
911 int command
= CQE_CMD(rr_cqe
->ramrod_cqe
.conn_and_cmd_data
);
914 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
915 FP_IDX(fp
), cid
, command
, bp
->state
,
916 rr_cqe
->ramrod_cqe
.ramrod_type
);
921 switch (command
| fp
->state
) {
922 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP
|
923 BNX2X_FP_STATE_OPENING
):
924 DP(NETIF_MSG_IFUP
, "got MULTI[%d] setup ramrod\n",
926 fp
->state
= BNX2X_FP_STATE_OPEN
;
929 case (RAMROD_CMD_ID_ETH_HALT
| BNX2X_FP_STATE_HALTING
):
930 DP(NETIF_MSG_IFDOWN
, "got MULTI[%d] halt ramrod\n",
932 fp
->state
= BNX2X_FP_STATE_HALTED
;
936 BNX2X_ERR("unexpected MC reply (%d) "
937 "fp->state is %x\n", command
, fp
->state
);
940 mb(); /* force bnx2x_wait_ramrod() to see the change */
944 switch (command
| bp
->state
) {
945 case (RAMROD_CMD_ID_ETH_PORT_SETUP
| BNX2X_STATE_OPENING_WAIT4_PORT
):
946 DP(NETIF_MSG_IFUP
, "got setup ramrod\n");
947 bp
->state
= BNX2X_STATE_OPEN
;
950 case (RAMROD_CMD_ID_ETH_HALT
| BNX2X_STATE_CLOSING_WAIT4_HALT
):
951 DP(NETIF_MSG_IFDOWN
, "got halt ramrod\n");
952 bp
->state
= BNX2X_STATE_CLOSING_WAIT4_DELETE
;
953 fp
->state
= BNX2X_FP_STATE_HALTED
;
956 case (RAMROD_CMD_ID_ETH_CFC_DEL
| BNX2X_STATE_CLOSING_WAIT4_HALT
):
957 DP(NETIF_MSG_IFDOWN
, "got delete ramrod for MULTI[%d]\n", cid
);
958 bnx2x_fp(bp
, cid
, state
) = BNX2X_FP_STATE_CLOSED
;
961 case (RAMROD_CMD_ID_ETH_SET_MAC
| BNX2X_STATE_OPEN
):
962 case (RAMROD_CMD_ID_ETH_SET_MAC
| BNX2X_STATE_DIAG
):
963 DP(NETIF_MSG_IFUP
, "got set mac ramrod\n");
964 bp
->set_mac_pending
= 0;
967 case (RAMROD_CMD_ID_ETH_SET_MAC
| BNX2X_STATE_CLOSING_WAIT4_HALT
):
968 DP(NETIF_MSG_IFDOWN
, "got (un)set mac ramrod\n");
972 BNX2X_ERR("unexpected MC reply (%d) bp->state is %x\n",
976 mb(); /* force bnx2x_wait_ramrod() to see the change */
979 static inline void bnx2x_free_rx_sge(struct bnx2x
*bp
,
980 struct bnx2x_fastpath
*fp
, u16 index
)
982 struct sw_rx_page
*sw_buf
= &fp
->rx_page_ring
[index
];
983 struct page
*page
= sw_buf
->page
;
984 struct eth_rx_sge
*sge
= &fp
->rx_sge_ring
[index
];
986 /* Skip "next page" elements */
990 pci_unmap_page(bp
->pdev
, pci_unmap_addr(sw_buf
, mapping
),
991 BCM_PAGE_SIZE
*PAGES_PER_SGE
, PCI_DMA_FROMDEVICE
);
992 __free_pages(page
, PAGES_PER_SGE_SHIFT
);
999 static inline void bnx2x_free_rx_sge_range(struct bnx2x
*bp
,
1000 struct bnx2x_fastpath
*fp
, int last
)
1004 for (i
= 0; i
< last
; i
++)
1005 bnx2x_free_rx_sge(bp
, fp
, i
);
1008 static inline int bnx2x_alloc_rx_sge(struct bnx2x
*bp
,
1009 struct bnx2x_fastpath
*fp
, u16 index
)
1011 struct page
*page
= alloc_pages(GFP_ATOMIC
, PAGES_PER_SGE_SHIFT
);
1012 struct sw_rx_page
*sw_buf
= &fp
->rx_page_ring
[index
];
1013 struct eth_rx_sge
*sge
= &fp
->rx_sge_ring
[index
];
1016 if (unlikely(page
== NULL
))
1019 mapping
= pci_map_page(bp
->pdev
, page
, 0, BCM_PAGE_SIZE
*PAGES_PER_SGE
,
1020 PCI_DMA_FROMDEVICE
);
1021 if (unlikely(dma_mapping_error(&bp
->pdev
->dev
, mapping
))) {
1022 __free_pages(page
, PAGES_PER_SGE_SHIFT
);
1026 sw_buf
->page
= page
;
1027 pci_unmap_addr_set(sw_buf
, mapping
, mapping
);
1029 sge
->addr_hi
= cpu_to_le32(U64_HI(mapping
));
1030 sge
->addr_lo
= cpu_to_le32(U64_LO(mapping
));
1035 static inline int bnx2x_alloc_rx_skb(struct bnx2x
*bp
,
1036 struct bnx2x_fastpath
*fp
, u16 index
)
1038 struct sk_buff
*skb
;
1039 struct sw_rx_bd
*rx_buf
= &fp
->rx_buf_ring
[index
];
1040 struct eth_rx_bd
*rx_bd
= &fp
->rx_desc_ring
[index
];
1043 skb
= netdev_alloc_skb(bp
->dev
, bp
->rx_buf_size
);
1044 if (unlikely(skb
== NULL
))
1047 mapping
= pci_map_single(bp
->pdev
, skb
->data
, bp
->rx_buf_use_size
,
1048 PCI_DMA_FROMDEVICE
);
1049 if (unlikely(dma_mapping_error(&bp
->pdev
->dev
, mapping
))) {
1055 pci_unmap_addr_set(rx_buf
, mapping
, mapping
);
1057 rx_bd
->addr_hi
= cpu_to_le32(U64_HI(mapping
));
1058 rx_bd
->addr_lo
= cpu_to_le32(U64_LO(mapping
));
1063 /* note that we are not allocating a new skb,
1064 * we are just moving one from cons to prod
1065 * we are not creating a new mapping,
1066 * so there is no need to check for dma_mapping_error().
1068 static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath
*fp
,
1069 struct sk_buff
*skb
, u16 cons
, u16 prod
)
1071 struct bnx2x
*bp
= fp
->bp
;
1072 struct sw_rx_bd
*cons_rx_buf
= &fp
->rx_buf_ring
[cons
];
1073 struct sw_rx_bd
*prod_rx_buf
= &fp
->rx_buf_ring
[prod
];
1074 struct eth_rx_bd
*cons_bd
= &fp
->rx_desc_ring
[cons
];
1075 struct eth_rx_bd
*prod_bd
= &fp
->rx_desc_ring
[prod
];
1077 pci_dma_sync_single_for_device(bp
->pdev
,
1078 pci_unmap_addr(cons_rx_buf
, mapping
),
1079 bp
->rx_offset
+ RX_COPY_THRESH
,
1080 PCI_DMA_FROMDEVICE
);
1082 prod_rx_buf
->skb
= cons_rx_buf
->skb
;
1083 pci_unmap_addr_set(prod_rx_buf
, mapping
,
1084 pci_unmap_addr(cons_rx_buf
, mapping
));
1085 *prod_bd
= *cons_bd
;
1088 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath
*fp
,
1091 u16 last_max
= fp
->last_max_sge
;
1093 if (SUB_S16(idx
, last_max
) > 0)
1094 fp
->last_max_sge
= idx
;
1097 static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath
*fp
)
1101 for (i
= 1; i
<= NUM_RX_SGE_PAGES
; i
++) {
1102 int idx
= RX_SGE_CNT
* i
- 1;
1104 for (j
= 0; j
< 2; j
++) {
1105 SGE_MASK_CLEAR_BIT(fp
, idx
);
1111 static void bnx2x_update_sge_prod(struct bnx2x_fastpath
*fp
,
1112 struct eth_fast_path_rx_cqe
*fp_cqe
)
1114 struct bnx2x
*bp
= fp
->bp
;
1115 u16 sge_len
= BCM_PAGE_ALIGN(le16_to_cpu(fp_cqe
->pkt_len
) -
1116 le16_to_cpu(fp_cqe
->len_on_bd
)) >>
1118 u16 last_max
, last_elem
, first_elem
;
1125 /* First mark all used pages */
1126 for (i
= 0; i
< sge_len
; i
++)
1127 SGE_MASK_CLEAR_BIT(fp
, RX_SGE(le16_to_cpu(fp_cqe
->sgl
[i
])));
1129 DP(NETIF_MSG_RX_STATUS
, "fp_cqe->sgl[%d] = %d\n",
1130 sge_len
- 1, le16_to_cpu(fp_cqe
->sgl
[sge_len
- 1]));
1132 /* Here we assume that the last SGE index is the biggest */
1133 prefetch((void *)(fp
->sge_mask
));
1134 bnx2x_update_last_max_sge(fp
, le16_to_cpu(fp_cqe
->sgl
[sge_len
- 1]));
1136 last_max
= RX_SGE(fp
->last_max_sge
);
1137 last_elem
= last_max
>> RX_SGE_MASK_ELEM_SHIFT
;
1138 first_elem
= RX_SGE(fp
->rx_sge_prod
) >> RX_SGE_MASK_ELEM_SHIFT
;
1140 /* If ring is not full */
1141 if (last_elem
+ 1 != first_elem
)
1144 /* Now update the prod */
1145 for (i
= first_elem
; i
!= last_elem
; i
= NEXT_SGE_MASK_ELEM(i
)) {
1146 if (likely(fp
->sge_mask
[i
]))
1149 fp
->sge_mask
[i
] = RX_SGE_MASK_ELEM_ONE_MASK
;
1150 delta
+= RX_SGE_MASK_ELEM_SZ
;
1154 fp
->rx_sge_prod
+= delta
;
1155 /* clear page-end entries */
1156 bnx2x_clear_sge_mask_next_elems(fp
);
1159 DP(NETIF_MSG_RX_STATUS
,
1160 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
1161 fp
->last_max_sge
, fp
->rx_sge_prod
);
1164 static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath
*fp
)
1166 /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1167 memset(fp
->sge_mask
, 0xff,
1168 (NUM_RX_SGE
>> RX_SGE_MASK_ELEM_SHIFT
)*sizeof(u64
));
1170 /* Clear the two last indeces in the page to 1:
1171 these are the indeces that correspond to the "next" element,
1172 hence will never be indicated and should be removed from
1173 the calculations. */
1174 bnx2x_clear_sge_mask_next_elems(fp
);
1177 static void bnx2x_tpa_start(struct bnx2x_fastpath
*fp
, u16 queue
,
1178 struct sk_buff
*skb
, u16 cons
, u16 prod
)
1180 struct bnx2x
*bp
= fp
->bp
;
1181 struct sw_rx_bd
*cons_rx_buf
= &fp
->rx_buf_ring
[cons
];
1182 struct sw_rx_bd
*prod_rx_buf
= &fp
->rx_buf_ring
[prod
];
1183 struct eth_rx_bd
*prod_bd
= &fp
->rx_desc_ring
[prod
];
1186 /* move empty skb from pool to prod and map it */
1187 prod_rx_buf
->skb
= fp
->tpa_pool
[queue
].skb
;
1188 mapping
= pci_map_single(bp
->pdev
, fp
->tpa_pool
[queue
].skb
->data
,
1189 bp
->rx_buf_use_size
, PCI_DMA_FROMDEVICE
);
1190 pci_unmap_addr_set(prod_rx_buf
, mapping
, mapping
);
1192 /* move partial skb from cons to pool (don't unmap yet) */
1193 fp
->tpa_pool
[queue
] = *cons_rx_buf
;
1195 /* mark bin state as start - print error if current state != stop */
1196 if (fp
->tpa_state
[queue
] != BNX2X_TPA_STOP
)
1197 BNX2X_ERR("start of bin not in stop [%d]\n", queue
);
1199 fp
->tpa_state
[queue
] = BNX2X_TPA_START
;
1201 /* point prod_bd to new skb */
1202 prod_bd
->addr_hi
= cpu_to_le32(U64_HI(mapping
));
1203 prod_bd
->addr_lo
= cpu_to_le32(U64_LO(mapping
));
1205 #ifdef BNX2X_STOP_ON_ERROR
1206 fp
->tpa_queue_used
|= (1 << queue
);
1207 #ifdef __powerpc64__
1208 DP(NETIF_MSG_RX_STATUS
, "fp->tpa_queue_used = 0x%lx\n",
1210 DP(NETIF_MSG_RX_STATUS
, "fp->tpa_queue_used = 0x%llx\n",
1212 fp
->tpa_queue_used
);
1216 static int bnx2x_fill_frag_skb(struct bnx2x
*bp
, struct bnx2x_fastpath
*fp
,
1217 struct sk_buff
*skb
,
1218 struct eth_fast_path_rx_cqe
*fp_cqe
,
1221 struct sw_rx_page
*rx_pg
, old_rx_pg
;
1223 u16 len_on_bd
= le16_to_cpu(fp_cqe
->len_on_bd
);
1224 u32 i
, frag_len
, frag_size
, pages
;
1228 frag_size
= le16_to_cpu(fp_cqe
->pkt_len
) - len_on_bd
;
1229 pages
= BCM_PAGE_ALIGN(frag_size
) >> BCM_PAGE_SHIFT
;
1231 /* This is needed in order to enable forwarding support */
1233 skb_shinfo(skb
)->gso_size
= min((u32
)BCM_PAGE_SIZE
,
1234 max(frag_size
, (u32
)len_on_bd
));
1236 #ifdef BNX2X_STOP_ON_ERROR
1237 if (pages
> 8*PAGES_PER_SGE
) {
1238 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1240 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
1241 fp_cqe
->pkt_len
, len_on_bd
);
1247 /* Run through the SGL and compose the fragmented skb */
1248 for (i
= 0, j
= 0; i
< pages
; i
+= PAGES_PER_SGE
, j
++) {
1249 u16 sge_idx
= RX_SGE(le16_to_cpu(fp_cqe
->sgl
[j
]));
1251 /* FW gives the indices of the SGE as if the ring is an array
1252 (meaning that "next" element will consume 2 indices) */
1253 frag_len
= min(frag_size
, (u32
)(BCM_PAGE_SIZE
*PAGES_PER_SGE
));
1254 rx_pg
= &fp
->rx_page_ring
[sge_idx
];
1258 /* If we fail to allocate a substitute page, we simply stop
1259 where we are and drop the whole packet */
1260 err
= bnx2x_alloc_rx_sge(bp
, fp
, sge_idx
);
1261 if (unlikely(err
)) {
1262 fp
->rx_alloc_failed
++;
1266 /* Unmap the page as we r going to pass it to the stack */
1267 pci_unmap_page(bp
->pdev
, pci_unmap_addr(&old_rx_pg
, mapping
),
1268 BCM_PAGE_SIZE
*PAGES_PER_SGE
, PCI_DMA_FROMDEVICE
);
1270 /* Add one frag and update the appropriate fields in the skb */
1271 skb_fill_page_desc(skb
, j
, old_rx_pg
.page
, 0, frag_len
);
1273 skb
->data_len
+= frag_len
;
1274 skb
->truesize
+= frag_len
;
1275 skb
->len
+= frag_len
;
1277 frag_size
-= frag_len
;
1283 static void bnx2x_tpa_stop(struct bnx2x
*bp
, struct bnx2x_fastpath
*fp
,
1284 u16 queue
, int pad
, int len
, union eth_rx_cqe
*cqe
,
1287 struct sw_rx_bd
*rx_buf
= &fp
->tpa_pool
[queue
];
1288 struct sk_buff
*skb
= rx_buf
->skb
;
1290 struct sk_buff
*new_skb
= netdev_alloc_skb(bp
->dev
, bp
->rx_buf_size
);
1292 /* Unmap skb in the pool anyway, as we are going to change
1293 pool entry status to BNX2X_TPA_STOP even if new skb allocation
1295 pci_unmap_single(bp
->pdev
, pci_unmap_addr(rx_buf
, mapping
),
1296 bp
->rx_buf_use_size
, PCI_DMA_FROMDEVICE
);
1298 /* if alloc failed drop the packet and keep the buffer in the bin */
1299 if (likely(new_skb
)) {
1302 prefetch(((char *)(skb
)) + 128);
1304 /* else fix ip xsum and give it to the stack */
1305 /* (no need to map the new skb) */
1306 #ifdef BNX2X_STOP_ON_ERROR
1307 if (pad
+ len
> bp
->rx_buf_size
) {
1308 BNX2X_ERR("skb_put is about to fail... "
1309 "pad %d len %d rx_buf_size %d\n",
1310 pad
, len
, bp
->rx_buf_size
);
1316 skb_reserve(skb
, pad
);
1319 skb
->protocol
= eth_type_trans(skb
, bp
->dev
);
1320 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1325 iph
= (struct iphdr
*)skb
->data
;
1327 iph
->check
= ip_fast_csum((u8
*)iph
, iph
->ihl
);
1330 if (!bnx2x_fill_frag_skb(bp
, fp
, skb
,
1331 &cqe
->fast_path_cqe
, cqe_idx
)) {
1333 if ((bp
->vlgrp
!= NULL
) &&
1334 (le16_to_cpu(cqe
->fast_path_cqe
.pars_flags
.flags
) &
1335 PARSING_FLAGS_VLAN
))
1336 vlan_hwaccel_receive_skb(skb
, bp
->vlgrp
,
1337 le16_to_cpu(cqe
->fast_path_cqe
.
1341 netif_receive_skb(skb
);
1343 DP(NETIF_MSG_RX_STATUS
, "Failed to allocate new pages"
1344 " - dropping packet!\n");
1348 bp
->dev
->last_rx
= jiffies
;
1350 /* put new skb in bin */
1351 fp
->tpa_pool
[queue
].skb
= new_skb
;
1354 DP(NETIF_MSG_RX_STATUS
,
1355 "Failed to allocate new skb - dropping packet!\n");
1356 fp
->rx_alloc_failed
++;
1359 fp
->tpa_state
[queue
] = BNX2X_TPA_STOP
;
1362 static inline void bnx2x_update_rx_prod(struct bnx2x
*bp
,
1363 struct bnx2x_fastpath
*fp
,
1364 u16 bd_prod
, u16 rx_comp_prod
,
1367 struct tstorm_eth_rx_producers rx_prods
= {0};
1370 /* Update producers */
1371 rx_prods
.bd_prod
= bd_prod
;
1372 rx_prods
.cqe_prod
= rx_comp_prod
;
1373 rx_prods
.sge_prod
= rx_sge_prod
;
1375 for (i
= 0; i
< sizeof(struct tstorm_eth_rx_producers
)/4; i
++)
1376 REG_WR(bp
, BAR_TSTRORM_INTMEM
+
1377 TSTORM_RX_PRODS_OFFSET(BP_PORT(bp
), FP_CL_ID(fp
)) + i
*4,
1378 ((u32
*)&rx_prods
)[i
]);
1380 DP(NETIF_MSG_RX_STATUS
,
1381 "Wrote: bd_prod %u cqe_prod %u sge_prod %u\n",
1382 bd_prod
, rx_comp_prod
, rx_sge_prod
);
1385 static int bnx2x_rx_int(struct bnx2x_fastpath
*fp
, int budget
)
1387 struct bnx2x
*bp
= fp
->bp
;
1388 u16 bd_cons
, bd_prod
, bd_prod_fw
, comp_ring_cons
;
1389 u16 hw_comp_cons
, sw_comp_cons
, sw_comp_prod
;
1393 #ifdef BNX2X_STOP_ON_ERROR
1394 if (unlikely(bp
->panic
))
1398 /* CQ "next element" is of the size of the regular element,
1399 that's why it's ok here */
1400 hw_comp_cons
= le16_to_cpu(*fp
->rx_cons_sb
);
1401 if ((hw_comp_cons
& MAX_RCQ_DESC_CNT
) == MAX_RCQ_DESC_CNT
)
1404 bd_cons
= fp
->rx_bd_cons
;
1405 bd_prod
= fp
->rx_bd_prod
;
1406 bd_prod_fw
= bd_prod
;
1407 sw_comp_cons
= fp
->rx_comp_cons
;
1408 sw_comp_prod
= fp
->rx_comp_prod
;
1410 /* Memory barrier necessary as speculative reads of the rx
1411 * buffer can be ahead of the index in the status block
1415 DP(NETIF_MSG_RX_STATUS
,
1416 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
1417 FP_IDX(fp
), hw_comp_cons
, sw_comp_cons
);
1419 while (sw_comp_cons
!= hw_comp_cons
) {
1420 struct sw_rx_bd
*rx_buf
= NULL
;
1421 struct sk_buff
*skb
;
1422 union eth_rx_cqe
*cqe
;
1426 comp_ring_cons
= RCQ_BD(sw_comp_cons
);
1427 bd_prod
= RX_BD(bd_prod
);
1428 bd_cons
= RX_BD(bd_cons
);
1430 cqe
= &fp
->rx_comp_ring
[comp_ring_cons
];
1431 cqe_fp_flags
= cqe
->fast_path_cqe
.type_error_flags
;
1433 DP(NETIF_MSG_RX_STATUS
, "CQE type %x err %x status %x"
1434 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags
),
1435 cqe_fp_flags
, cqe
->fast_path_cqe
.status_flags
,
1436 cqe
->fast_path_cqe
.rss_hash_result
,
1437 le16_to_cpu(cqe
->fast_path_cqe
.vlan_tag
),
1438 le16_to_cpu(cqe
->fast_path_cqe
.pkt_len
));
1440 /* is this a slowpath msg? */
1441 if (unlikely(CQE_TYPE(cqe_fp_flags
))) {
1442 bnx2x_sp_event(fp
, cqe
);
1445 /* this is an rx packet */
1447 rx_buf
= &fp
->rx_buf_ring
[bd_cons
];
1449 len
= le16_to_cpu(cqe
->fast_path_cqe
.pkt_len
);
1450 pad
= cqe
->fast_path_cqe
.placement_offset
;
1452 /* If CQE is marked both TPA_START and TPA_END
1453 it is a non-TPA CQE */
1454 if ((!fp
->disable_tpa
) &&
1455 (TPA_TYPE(cqe_fp_flags
) !=
1456 (TPA_TYPE_START
| TPA_TYPE_END
))) {
1457 queue
= cqe
->fast_path_cqe
.queue_index
;
1459 if (TPA_TYPE(cqe_fp_flags
) == TPA_TYPE_START
) {
1460 DP(NETIF_MSG_RX_STATUS
,
1461 "calling tpa_start on queue %d\n",
1464 bnx2x_tpa_start(fp
, queue
, skb
,
1469 if (TPA_TYPE(cqe_fp_flags
) == TPA_TYPE_END
) {
1470 DP(NETIF_MSG_RX_STATUS
,
1471 "calling tpa_stop on queue %d\n",
1474 if (!BNX2X_RX_SUM_FIX(cqe
))
1475 BNX2X_ERR("STOP on none TCP "
1478 /* This is a size of the linear data
1480 len
= le16_to_cpu(cqe
->fast_path_cqe
.
1482 bnx2x_tpa_stop(bp
, fp
, queue
, pad
,
1483 len
, cqe
, comp_ring_cons
);
1484 #ifdef BNX2X_STOP_ON_ERROR
1489 bnx2x_update_sge_prod(fp
,
1490 &cqe
->fast_path_cqe
);
1495 pci_dma_sync_single_for_device(bp
->pdev
,
1496 pci_unmap_addr(rx_buf
, mapping
),
1497 pad
+ RX_COPY_THRESH
,
1498 PCI_DMA_FROMDEVICE
);
1500 prefetch(((char *)(skb
)) + 128);
1502 /* is this an error packet? */
1503 if (unlikely(cqe_fp_flags
& ETH_RX_ERROR_FALGS
)) {
1504 DP(NETIF_MSG_RX_ERR
,
1505 "ERROR flags %x rx packet %u\n",
1506 cqe_fp_flags
, sw_comp_cons
);
1507 /* TBD make sure MC counts this as a drop */
1511 /* Since we don't have a jumbo ring
1512 * copy small packets if mtu > 1500
1514 if ((bp
->dev
->mtu
> ETH_MAX_PACKET_SIZE
) &&
1515 (len
<= RX_COPY_THRESH
)) {
1516 struct sk_buff
*new_skb
;
1518 new_skb
= netdev_alloc_skb(bp
->dev
,
1520 if (new_skb
== NULL
) {
1521 DP(NETIF_MSG_RX_ERR
,
1522 "ERROR packet dropped "
1523 "because of alloc failure\n");
1524 fp
->rx_alloc_failed
++;
1529 skb_copy_from_linear_data_offset(skb
, pad
,
1530 new_skb
->data
+ pad
, len
);
1531 skb_reserve(new_skb
, pad
);
1532 skb_put(new_skb
, len
);
1534 bnx2x_reuse_rx_skb(fp
, skb
, bd_cons
, bd_prod
);
1538 } else if (bnx2x_alloc_rx_skb(bp
, fp
, bd_prod
) == 0) {
1539 pci_unmap_single(bp
->pdev
,
1540 pci_unmap_addr(rx_buf
, mapping
),
1541 bp
->rx_buf_use_size
,
1542 PCI_DMA_FROMDEVICE
);
1543 skb_reserve(skb
, pad
);
1547 DP(NETIF_MSG_RX_ERR
,
1548 "ERROR packet dropped because "
1549 "of alloc failure\n");
1550 fp
->rx_alloc_failed
++;
1552 bnx2x_reuse_rx_skb(fp
, skb
, bd_cons
, bd_prod
);
1556 skb
->protocol
= eth_type_trans(skb
, bp
->dev
);
1558 skb
->ip_summed
= CHECKSUM_NONE
;
1560 if (likely(BNX2X_RX_CSUM_OK(cqe
)))
1561 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1566 if ((bp
->vlgrp
!= NULL
) &&
1567 (le16_to_cpu(cqe
->fast_path_cqe
.pars_flags
.flags
) &
1568 PARSING_FLAGS_VLAN
))
1569 vlan_hwaccel_receive_skb(skb
, bp
->vlgrp
,
1570 le16_to_cpu(cqe
->fast_path_cqe
.vlan_tag
));
1573 netif_receive_skb(skb
);
1575 bp
->dev
->last_rx
= jiffies
;
1580 bd_cons
= NEXT_RX_IDX(bd_cons
);
1581 bd_prod
= NEXT_RX_IDX(bd_prod
);
1582 bd_prod_fw
= NEXT_RX_IDX(bd_prod_fw
);
1585 sw_comp_prod
= NEXT_RCQ_IDX(sw_comp_prod
);
1586 sw_comp_cons
= NEXT_RCQ_IDX(sw_comp_cons
);
1588 if (rx_pkt
== budget
)
1592 fp
->rx_bd_cons
= bd_cons
;
1593 fp
->rx_bd_prod
= bd_prod_fw
;
1594 fp
->rx_comp_cons
= sw_comp_cons
;
1595 fp
->rx_comp_prod
= sw_comp_prod
;
1597 /* Update producers */
1598 bnx2x_update_rx_prod(bp
, fp
, bd_prod_fw
, sw_comp_prod
,
1600 mmiowb(); /* keep prod updates ordered */
1602 fp
->rx_pkt
+= rx_pkt
;
1608 static irqreturn_t
bnx2x_msix_fp_int(int irq
, void *fp_cookie
)
1610 struct bnx2x_fastpath
*fp
= fp_cookie
;
1611 struct bnx2x
*bp
= fp
->bp
;
1612 struct net_device
*dev
= bp
->dev
;
1613 int index
= FP_IDX(fp
);
1615 DP(BNX2X_MSG_FP
, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
1616 index
, FP_SB_ID(fp
));
1617 bnx2x_ack_sb(bp
, FP_SB_ID(fp
), USTORM_ID
, 0, IGU_INT_DISABLE
, 0);
1619 #ifdef BNX2X_STOP_ON_ERROR
1620 if (unlikely(bp
->panic
))
1624 prefetch(fp
->rx_cons_sb
);
1625 prefetch(fp
->tx_cons_sb
);
1626 prefetch(&fp
->status_blk
->c_status_block
.status_block_index
);
1627 prefetch(&fp
->status_blk
->u_status_block
.status_block_index
);
1629 netif_rx_schedule(dev
, &bnx2x_fp(bp
, index
, napi
));
1634 static irqreturn_t
bnx2x_interrupt(int irq
, void *dev_instance
)
1636 struct net_device
*dev
= dev_instance
;
1637 struct bnx2x
*bp
= netdev_priv(dev
);
1638 u16 status
= bnx2x_ack_int(bp
);
1641 /* Return here if interrupt is shared and it's not for us */
1642 if (unlikely(status
== 0)) {
1643 DP(NETIF_MSG_INTR
, "not our interrupt!\n");
1646 DP(NETIF_MSG_INTR
, "got an interrupt status %u\n", status
);
1648 #ifdef BNX2X_STOP_ON_ERROR
1649 if (unlikely(bp
->panic
))
1653 /* Return here if interrupt is disabled */
1654 if (unlikely(atomic_read(&bp
->intr_sem
) != 0)) {
1655 DP(NETIF_MSG_INTR
, "called but intr_sem not 0, returning\n");
1659 mask
= 0x2 << bp
->fp
[0].sb_id
;
1660 if (status
& mask
) {
1661 struct bnx2x_fastpath
*fp
= &bp
->fp
[0];
1663 prefetch(fp
->rx_cons_sb
);
1664 prefetch(fp
->tx_cons_sb
);
1665 prefetch(&fp
->status_blk
->c_status_block
.status_block_index
);
1666 prefetch(&fp
->status_blk
->u_status_block
.status_block_index
);
1668 netif_rx_schedule(dev
, &bnx2x_fp(bp
, 0, napi
));
1674 if (unlikely(status
& 0x1)) {
1675 schedule_work(&bp
->sp_task
);
1683 DP(NETIF_MSG_INTR
, "got an unknown interrupt! (status %u)\n",
1689 /* end of fast path */
1691 static void bnx2x_stats_handle(struct bnx2x
*bp
, enum bnx2x_stats_event event
);
1696 * General service functions
1699 static int bnx2x_hw_lock(struct bnx2x
*bp
, u32 resource
)
1702 u32 resource_bit
= (1 << resource
);
1703 u8 port
= BP_PORT(bp
);
1706 /* Validating that the resource is within range */
1707 if (resource
> HW_LOCK_MAX_RESOURCE_VALUE
) {
1709 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1710 resource
, HW_LOCK_MAX_RESOURCE_VALUE
);
1714 /* Validating that the resource is not already taken */
1715 lock_status
= REG_RD(bp
, MISC_REG_DRIVER_CONTROL_1
+ port
*8);
1716 if (lock_status
& resource_bit
) {
1717 DP(NETIF_MSG_HW
, "lock_status 0x%x resource_bit 0x%x\n",
1718 lock_status
, resource_bit
);
1722 /* Try for 1 second every 5ms */
1723 for (cnt
= 0; cnt
< 200; cnt
++) {
1724 /* Try to acquire the lock */
1725 REG_WR(bp
, MISC_REG_DRIVER_CONTROL_1
+ port
*8 + 4,
1727 lock_status
= REG_RD(bp
, MISC_REG_DRIVER_CONTROL_1
+ port
*8);
1728 if (lock_status
& resource_bit
)
1733 DP(NETIF_MSG_HW
, "Timeout\n");
1737 static int bnx2x_hw_unlock(struct bnx2x
*bp
, u32 resource
)
1740 u32 resource_bit
= (1 << resource
);
1741 u8 port
= BP_PORT(bp
);
1743 /* Validating that the resource is within range */
1744 if (resource
> HW_LOCK_MAX_RESOURCE_VALUE
) {
1746 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1747 resource
, HW_LOCK_MAX_RESOURCE_VALUE
);
1751 /* Validating that the resource is currently taken */
1752 lock_status
= REG_RD(bp
, MISC_REG_DRIVER_CONTROL_1
+ port
*8);
1753 if (!(lock_status
& resource_bit
)) {
1754 DP(NETIF_MSG_HW
, "lock_status 0x%x resource_bit 0x%x\n",
1755 lock_status
, resource_bit
);
1759 REG_WR(bp
, MISC_REG_DRIVER_CONTROL_1
+ port
*8, resource_bit
);
1763 /* HW Lock for shared dual port PHYs */
1764 static void bnx2x_phy_hw_lock(struct bnx2x
*bp
)
1766 u32 ext_phy_type
= XGXS_EXT_PHY_TYPE(bp
->link_params
.ext_phy_config
);
1768 mutex_lock(&bp
->port
.phy_mutex
);
1770 if ((ext_phy_type
== PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072
) ||
1771 (ext_phy_type
== PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073
))
1772 bnx2x_hw_lock(bp
, HW_LOCK_RESOURCE_8072_MDIO
);
1775 static void bnx2x_phy_hw_unlock(struct bnx2x
*bp
)
1777 u32 ext_phy_type
= XGXS_EXT_PHY_TYPE(bp
->link_params
.ext_phy_config
);
1779 if ((ext_phy_type
== PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072
) ||
1780 (ext_phy_type
== PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073
))
1781 bnx2x_hw_unlock(bp
, HW_LOCK_RESOURCE_8072_MDIO
);
1783 mutex_unlock(&bp
->port
.phy_mutex
);
1786 int bnx2x_set_gpio(struct bnx2x
*bp
, int gpio_num
, u32 mode
)
1788 /* The GPIO should be swapped if swap register is set and active */
1789 int gpio_port
= (REG_RD(bp
, NIG_REG_PORT_SWAP
) &&
1790 REG_RD(bp
, NIG_REG_STRAP_OVERRIDE
)) ^ BP_PORT(bp
);
1791 int gpio_shift
= gpio_num
+
1792 (gpio_port
? MISC_REGISTERS_GPIO_PORT_SHIFT
: 0);
1793 u32 gpio_mask
= (1 << gpio_shift
);
1796 if (gpio_num
> MISC_REGISTERS_GPIO_3
) {
1797 BNX2X_ERR("Invalid GPIO %d\n", gpio_num
);
1801 bnx2x_hw_lock(bp
, HW_LOCK_RESOURCE_GPIO
);
1802 /* read GPIO and mask except the float bits */
1803 gpio_reg
= (REG_RD(bp
, MISC_REG_GPIO
) & MISC_REGISTERS_GPIO_FLOAT
);
1806 case MISC_REGISTERS_GPIO_OUTPUT_LOW
:
1807 DP(NETIF_MSG_LINK
, "Set GPIO %d (shift %d) -> output low\n",
1808 gpio_num
, gpio_shift
);
1809 /* clear FLOAT and set CLR */
1810 gpio_reg
&= ~(gpio_mask
<< MISC_REGISTERS_GPIO_FLOAT_POS
);
1811 gpio_reg
|= (gpio_mask
<< MISC_REGISTERS_GPIO_CLR_POS
);
1814 case MISC_REGISTERS_GPIO_OUTPUT_HIGH
:
1815 DP(NETIF_MSG_LINK
, "Set GPIO %d (shift %d) -> output high\n",
1816 gpio_num
, gpio_shift
);
1817 /* clear FLOAT and set SET */
1818 gpio_reg
&= ~(gpio_mask
<< MISC_REGISTERS_GPIO_FLOAT_POS
);
1819 gpio_reg
|= (gpio_mask
<< MISC_REGISTERS_GPIO_SET_POS
);
1822 case MISC_REGISTERS_GPIO_INPUT_HI_Z
:
1823 DP(NETIF_MSG_LINK
, "Set GPIO %d (shift %d) -> input\n",
1824 gpio_num
, gpio_shift
);
1826 gpio_reg
|= (gpio_mask
<< MISC_REGISTERS_GPIO_FLOAT_POS
);
1833 REG_WR(bp
, MISC_REG_GPIO
, gpio_reg
);
1834 bnx2x_hw_unlock(bp
, HW_LOCK_RESOURCE_GPIO
);
1839 static int bnx2x_set_spio(struct bnx2x
*bp
, int spio_num
, u32 mode
)
1841 u32 spio_mask
= (1 << spio_num
);
1844 if ((spio_num
< MISC_REGISTERS_SPIO_4
) ||
1845 (spio_num
> MISC_REGISTERS_SPIO_7
)) {
1846 BNX2X_ERR("Invalid SPIO %d\n", spio_num
);
1850 bnx2x_hw_lock(bp
, HW_LOCK_RESOURCE_SPIO
);
1851 /* read SPIO and mask except the float bits */
1852 spio_reg
= (REG_RD(bp
, MISC_REG_SPIO
) & MISC_REGISTERS_SPIO_FLOAT
);
1855 case MISC_REGISTERS_SPIO_OUTPUT_LOW
:
1856 DP(NETIF_MSG_LINK
, "Set SPIO %d -> output low\n", spio_num
);
1857 /* clear FLOAT and set CLR */
1858 spio_reg
&= ~(spio_mask
<< MISC_REGISTERS_SPIO_FLOAT_POS
);
1859 spio_reg
|= (spio_mask
<< MISC_REGISTERS_SPIO_CLR_POS
);
1862 case MISC_REGISTERS_SPIO_OUTPUT_HIGH
:
1863 DP(NETIF_MSG_LINK
, "Set SPIO %d -> output high\n", spio_num
);
1864 /* clear FLOAT and set SET */
1865 spio_reg
&= ~(spio_mask
<< MISC_REGISTERS_SPIO_FLOAT_POS
);
1866 spio_reg
|= (spio_mask
<< MISC_REGISTERS_SPIO_SET_POS
);
1869 case MISC_REGISTERS_SPIO_INPUT_HI_Z
:
1870 DP(NETIF_MSG_LINK
, "Set SPIO %d -> input\n", spio_num
);
1872 spio_reg
|= (spio_mask
<< MISC_REGISTERS_SPIO_FLOAT_POS
);
1879 REG_WR(bp
, MISC_REG_SPIO
, spio_reg
);
1880 bnx2x_hw_unlock(bp
, HW_LOCK_RESOURCE_SPIO
);
1885 static void bnx2x_calc_fc_adv(struct bnx2x
*bp
)
1887 switch (bp
->link_vars
.ieee_fc
) {
1888 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE
:
1889 bp
->port
.advertising
&= ~(ADVERTISED_Asym_Pause
|
1892 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH
:
1893 bp
->port
.advertising
|= (ADVERTISED_Asym_Pause
|
1896 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC
:
1897 bp
->port
.advertising
|= ADVERTISED_Asym_Pause
;
1900 bp
->port
.advertising
&= ~(ADVERTISED_Asym_Pause
|
1906 static void bnx2x_link_report(struct bnx2x
*bp
)
1908 if (bp
->link_vars
.link_up
) {
1909 if (bp
->state
== BNX2X_STATE_OPEN
)
1910 netif_carrier_on(bp
->dev
);
1911 printk(KERN_INFO PFX
"%s NIC Link is Up, ", bp
->dev
->name
);
1913 printk("%d Mbps ", bp
->link_vars
.line_speed
);
1915 if (bp
->link_vars
.duplex
== DUPLEX_FULL
)
1916 printk("full duplex");
1918 printk("half duplex");
1920 if (bp
->link_vars
.flow_ctrl
!= FLOW_CTRL_NONE
) {
1921 if (bp
->link_vars
.flow_ctrl
& FLOW_CTRL_RX
) {
1922 printk(", receive ");
1923 if (bp
->link_vars
.flow_ctrl
& FLOW_CTRL_TX
)
1924 printk("& transmit ");
1926 printk(", transmit ");
1928 printk("flow control ON");
1932 } else { /* link_down */
1933 netif_carrier_off(bp
->dev
);
1934 printk(KERN_ERR PFX
"%s NIC Link is Down\n", bp
->dev
->name
);
1938 static u8
bnx2x_initial_phy_init(struct bnx2x
*bp
)
1940 if (!BP_NOMCP(bp
)) {
1943 /* Initialize link parameters structure variables */
1944 bp
->link_params
.mtu
= bp
->dev
->mtu
;
1946 bnx2x_phy_hw_lock(bp
);
1947 rc
= bnx2x_phy_init(&bp
->link_params
, &bp
->link_vars
);
1948 bnx2x_phy_hw_unlock(bp
);
1950 if (bp
->link_vars
.link_up
)
1951 bnx2x_link_report(bp
);
1953 bnx2x_calc_fc_adv(bp
);
1957 BNX2X_ERR("Bootcode is missing -not initializing link\n");
1961 static void bnx2x_link_set(struct bnx2x
*bp
)
1963 if (!BP_NOMCP(bp
)) {
1964 bnx2x_phy_hw_lock(bp
);
1965 bnx2x_phy_init(&bp
->link_params
, &bp
->link_vars
);
1966 bnx2x_phy_hw_unlock(bp
);
1968 bnx2x_calc_fc_adv(bp
);
1970 BNX2X_ERR("Bootcode is missing -not setting link\n");
1973 static void bnx2x__link_reset(struct bnx2x
*bp
)
1975 if (!BP_NOMCP(bp
)) {
1976 bnx2x_phy_hw_lock(bp
);
1977 bnx2x_link_reset(&bp
->link_params
, &bp
->link_vars
);
1978 bnx2x_phy_hw_unlock(bp
);
1980 BNX2X_ERR("Bootcode is missing -not resetting link\n");
1983 static u8
bnx2x_link_test(struct bnx2x
*bp
)
1987 bnx2x_phy_hw_lock(bp
);
1988 rc
= bnx2x_test_link(&bp
->link_params
, &bp
->link_vars
);
1989 bnx2x_phy_hw_unlock(bp
);
1994 /* Calculates the sum of vn_min_rates.
1995 It's needed for further normalizing of the min_rates.
2000 0 - if all the min_rates are 0.
2001 In the later case fainess algorithm should be deactivated.
2002 If not all min_rates are zero then those that are zeroes will
2005 static u32
bnx2x_calc_vn_wsum(struct bnx2x
*bp
)
2007 int i
, port
= BP_PORT(bp
);
2011 for (i
= 0; i
< E1HVN_MAX
; i
++) {
2013 SHMEM_RD(bp
, mf_cfg
.func_mf_config
[2*i
+ port
].config
);
2014 u32 vn_min_rate
= ((vn_cfg
& FUNC_MF_CFG_MIN_BW_MASK
) >>
2015 FUNC_MF_CFG_MIN_BW_SHIFT
) * 100;
2016 if (!(vn_cfg
& FUNC_MF_CFG_FUNC_HIDE
)) {
2017 /* If min rate is zero - set it to 1 */
2019 vn_min_rate
= DEF_MIN_RATE
;
2023 wsum
+= vn_min_rate
;
2027 /* ... only if all min rates are zeros - disable FAIRNESS */
2034 static void bnx2x_init_port_minmax(struct bnx2x
*bp
,
2037 struct cmng_struct_per_port
*m_cmng_port
)
2039 u32 r_param
= port_rate
/ 8;
2040 int port
= BP_PORT(bp
);
2043 memset(m_cmng_port
, 0, sizeof(struct cmng_struct_per_port
));
2045 /* Enable minmax only if we are in e1hmf mode */
2047 u32 fair_periodic_timeout_usec
;
2050 /* Enable rate shaping and fairness */
2051 m_cmng_port
->flags
.cmng_vn_enable
= 1;
2052 m_cmng_port
->flags
.fairness_enable
= en_fness
? 1 : 0;
2053 m_cmng_port
->flags
.rate_shaping_enable
= 1;
2056 DP(NETIF_MSG_IFUP
, "All MIN values are zeroes"
2057 " fairness will be disabled\n");
2059 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2060 m_cmng_port
->rs_vars
.rs_periodic_timeout
=
2061 RS_PERIODIC_TIMEOUT_USEC
/ 4;
2063 /* this is the threshold below which no timer arming will occur
2064 1.25 coefficient is for the threshold to be a little bigger
2065 than the real time, to compensate for timer in-accuracy */
2066 m_cmng_port
->rs_vars
.rs_threshold
=
2067 (RS_PERIODIC_TIMEOUT_USEC
* r_param
* 5) / 4;
2069 /* resolution of fairness timer */
2070 fair_periodic_timeout_usec
= QM_ARB_BYTES
/ r_param
;
2071 /* for 10G it is 1000usec. for 1G it is 10000usec. */
2072 t_fair
= T_FAIR_COEF
/ port_rate
;
2074 /* this is the threshold below which we won't arm
2075 the timer anymore */
2076 m_cmng_port
->fair_vars
.fair_threshold
= QM_ARB_BYTES
;
2078 /* we multiply by 1e3/8 to get bytes/msec.
2079 We don't want the credits to pass a credit
2080 of the T_FAIR*FAIR_MEM (algorithm resolution) */
2081 m_cmng_port
->fair_vars
.upper_bound
=
2082 r_param
* t_fair
* FAIR_MEM
;
2083 /* since each tick is 4 usec */
2084 m_cmng_port
->fair_vars
.fairness_timeout
=
2085 fair_periodic_timeout_usec
/ 4;
2088 /* Disable rate shaping and fairness */
2089 m_cmng_port
->flags
.cmng_vn_enable
= 0;
2090 m_cmng_port
->flags
.fairness_enable
= 0;
2091 m_cmng_port
->flags
.rate_shaping_enable
= 0;
2094 "Single function mode minmax will be disabled\n");
2097 /* Store it to internal memory */
2098 for (i
= 0; i
< sizeof(struct cmng_struct_per_port
) / 4; i
++)
2099 REG_WR(bp
, BAR_XSTRORM_INTMEM
+
2100 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port
) + i
* 4,
2101 ((u32
*)(m_cmng_port
))[i
]);
2104 static void bnx2x_init_vn_minmax(struct bnx2x
*bp
, int func
,
2105 u32 wsum
, u16 port_rate
,
2106 struct cmng_struct_per_port
*m_cmng_port
)
2108 struct rate_shaping_vars_per_vn m_rs_vn
;
2109 struct fairness_vars_per_vn m_fair_vn
;
2110 u32 vn_cfg
= SHMEM_RD(bp
, mf_cfg
.func_mf_config
[func
].config
);
2111 u16 vn_min_rate
, vn_max_rate
;
2114 /* If function is hidden - set min and max to zeroes */
2115 if (vn_cfg
& FUNC_MF_CFG_FUNC_HIDE
) {
2120 vn_min_rate
= ((vn_cfg
& FUNC_MF_CFG_MIN_BW_MASK
) >>
2121 FUNC_MF_CFG_MIN_BW_SHIFT
) * 100;
2122 /* If FAIRNESS is enabled (not all min rates are zeroes) and
2123 if current min rate is zero - set it to 1.
2124 This is a requirment of the algorithm. */
2125 if ((vn_min_rate
== 0) && wsum
)
2126 vn_min_rate
= DEF_MIN_RATE
;
2127 vn_max_rate
= ((vn_cfg
& FUNC_MF_CFG_MAX_BW_MASK
) >>
2128 FUNC_MF_CFG_MAX_BW_SHIFT
) * 100;
2131 DP(NETIF_MSG_IFUP
, "func %d: vn_min_rate=%d vn_max_rate=%d "
2132 "wsum=%d\n", func
, vn_min_rate
, vn_max_rate
, wsum
);
2134 memset(&m_rs_vn
, 0, sizeof(struct rate_shaping_vars_per_vn
));
2135 memset(&m_fair_vn
, 0, sizeof(struct fairness_vars_per_vn
));
2137 /* global vn counter - maximal Mbps for this vn */
2138 m_rs_vn
.vn_counter
.rate
= vn_max_rate
;
2140 /* quota - number of bytes transmitted in this period */
2141 m_rs_vn
.vn_counter
.quota
=
2142 (vn_max_rate
* RS_PERIODIC_TIMEOUT_USEC
) / 8;
2144 #ifdef BNX2X_PER_PROT_QOS
2145 /* per protocol counter */
2146 for (protocol
= 0; protocol
< NUM_OF_PROTOCOLS
; protocol
++) {
2147 /* maximal Mbps for this protocol */
2148 m_rs_vn
.protocol_counters
[protocol
].rate
=
2149 protocol_max_rate
[protocol
];
2150 /* the quota in each timer period -
2151 number of bytes transmitted in this period */
2152 m_rs_vn
.protocol_counters
[protocol
].quota
=
2153 (u32
)(rs_periodic_timeout_usec
*
2155 protocol_counters
[protocol
].rate
/8));
2160 /* credit for each period of the fairness algorithm:
2161 number of bytes in T_FAIR (the vn share the port rate).
2162 wsum should not be larger than 10000, thus
2163 T_FAIR_COEF / (8 * wsum) will always be grater than zero */
2164 m_fair_vn
.vn_credit_delta
=
2165 max((u64
)(vn_min_rate
* (T_FAIR_COEF
/ (8 * wsum
))),
2166 (u64
)(m_cmng_port
->fair_vars
.fair_threshold
* 2));
2167 DP(NETIF_MSG_IFUP
, "m_fair_vn.vn_credit_delta=%d\n",
2168 m_fair_vn
.vn_credit_delta
);
2171 #ifdef BNX2X_PER_PROT_QOS
2173 u32 protocolWeightSum
= 0;
2175 for (protocol
= 0; protocol
< NUM_OF_PROTOCOLS
; protocol
++)
2176 protocolWeightSum
+=
2177 drvInit
.protocol_min_rate
[protocol
];
2178 /* per protocol counter -
2179 NOT NEEDED IF NO PER-PROTOCOL CONGESTION MANAGEMENT */
2180 if (protocolWeightSum
> 0) {
2182 protocol
< NUM_OF_PROTOCOLS
; protocol
++)
2183 /* credit for each period of the
2184 fairness algorithm - number of bytes in
2185 T_FAIR (the protocol share the vn rate) */
2186 m_fair_vn
.protocol_credit_delta
[protocol
] =
2187 (u32
)((vn_min_rate
/ 8) * t_fair
*
2188 protocol_min_rate
/ protocolWeightSum
);
2193 /* Store it to internal memory */
2194 for (i
= 0; i
< sizeof(struct rate_shaping_vars_per_vn
)/4; i
++)
2195 REG_WR(bp
, BAR_XSTRORM_INTMEM
+
2196 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func
) + i
* 4,
2197 ((u32
*)(&m_rs_vn
))[i
]);
2199 for (i
= 0; i
< sizeof(struct fairness_vars_per_vn
)/4; i
++)
2200 REG_WR(bp
, BAR_XSTRORM_INTMEM
+
2201 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func
) + i
* 4,
2202 ((u32
*)(&m_fair_vn
))[i
]);
2205 /* This function is called upon link interrupt */
2206 static void bnx2x_link_attn(struct bnx2x
*bp
)
2210 /* Make sure that we are synced with the current statistics */
2211 bnx2x_stats_handle(bp
, STATS_EVENT_STOP
);
2213 bnx2x_phy_hw_lock(bp
);
2214 bnx2x_link_update(&bp
->link_params
, &bp
->link_vars
);
2215 bnx2x_phy_hw_unlock(bp
);
2217 if (bp
->link_vars
.link_up
) {
2219 if (bp
->link_vars
.mac_type
== MAC_TYPE_BMAC
) {
2220 struct host_port_stats
*pstats
;
2222 pstats
= bnx2x_sp(bp
, port_stats
);
2223 /* reset old bmac stats */
2224 memset(&(pstats
->mac_stx
[0]), 0,
2225 sizeof(struct mac_stx
));
2227 if ((bp
->state
== BNX2X_STATE_OPEN
) ||
2228 (bp
->state
== BNX2X_STATE_DISABLED
))
2229 bnx2x_stats_handle(bp
, STATS_EVENT_LINK_UP
);
2232 /* indicate link status */
2233 bnx2x_link_report(bp
);
2238 for (vn
= VN_0
; vn
< E1HVN_MAX
; vn
++) {
2239 if (vn
== BP_E1HVN(bp
))
2242 func
= ((vn
<< 1) | BP_PORT(bp
));
2244 /* Set the attention towards other drivers
2246 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_0
+
2247 (LINK_SYNC_ATTENTION_BIT_FUNC_0
+ func
)*4, 1);
2251 if (CHIP_IS_E1H(bp
) && (bp
->link_vars
.line_speed
> 0)) {
2252 struct cmng_struct_per_port m_cmng_port
;
2254 int port
= BP_PORT(bp
);
2256 /* Init RATE SHAPING and FAIRNESS contexts */
2257 wsum
= bnx2x_calc_vn_wsum(bp
);
2258 bnx2x_init_port_minmax(bp
, (int)wsum
,
2259 bp
->link_vars
.line_speed
,
2262 for (vn
= VN_0
; vn
< E1HVN_MAX
; vn
++)
2263 bnx2x_init_vn_minmax(bp
, 2*vn
+ port
,
2264 wsum
, bp
->link_vars
.line_speed
,
2269 static void bnx2x__link_status_update(struct bnx2x
*bp
)
2271 if (bp
->state
!= BNX2X_STATE_OPEN
)
2274 bnx2x_link_status_update(&bp
->link_params
, &bp
->link_vars
);
2276 if (bp
->link_vars
.link_up
)
2277 bnx2x_stats_handle(bp
, STATS_EVENT_LINK_UP
);
2279 bnx2x_stats_handle(bp
, STATS_EVENT_STOP
);
2281 /* indicate link status */
2282 bnx2x_link_report(bp
);
2285 static void bnx2x_pmf_update(struct bnx2x
*bp
)
2287 int port
= BP_PORT(bp
);
2291 DP(NETIF_MSG_LINK
, "pmf %d\n", bp
->port
.pmf
);
2293 /* enable nig attention */
2294 val
= (0xff0f | (1 << (BP_E1HVN(bp
) + 4)));
2295 REG_WR(bp
, HC_REG_TRAILING_EDGE_0
+ port
*8, val
);
2296 REG_WR(bp
, HC_REG_LEADING_EDGE_0
+ port
*8, val
);
2298 bnx2x_stats_handle(bp
, STATS_EVENT_PMF
);
2306 * General service functions
2309 /* the slow path queue is odd since completions arrive on the fastpath ring */
2310 static int bnx2x_sp_post(struct bnx2x
*bp
, int command
, int cid
,
2311 u32 data_hi
, u32 data_lo
, int common
)
2313 int func
= BP_FUNC(bp
);
2315 DP(BNX2X_MSG_SP
/*NETIF_MSG_TIMER*/,
2316 "SPQE (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
2317 (u32
)U64_HI(bp
->spq_mapping
), (u32
)(U64_LO(bp
->spq_mapping
) +
2318 (void *)bp
->spq_prod_bd
- (void *)bp
->spq
), command
,
2319 HW_CID(bp
, cid
), data_hi
, data_lo
, bp
->spq_left
);
2321 #ifdef BNX2X_STOP_ON_ERROR
2322 if (unlikely(bp
->panic
))
2326 spin_lock_bh(&bp
->spq_lock
);
2328 if (!bp
->spq_left
) {
2329 BNX2X_ERR("BUG! SPQ ring full!\n");
2330 spin_unlock_bh(&bp
->spq_lock
);
2335 /* CID needs port number to be encoded int it */
2336 bp
->spq_prod_bd
->hdr
.conn_and_cmd_data
=
2337 cpu_to_le32(((command
<< SPE_HDR_CMD_ID_SHIFT
) |
2339 bp
->spq_prod_bd
->hdr
.type
= cpu_to_le16(ETH_CONNECTION_TYPE
);
2341 bp
->spq_prod_bd
->hdr
.type
|=
2342 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT
));
2344 bp
->spq_prod_bd
->data
.mac_config_addr
.hi
= cpu_to_le32(data_hi
);
2345 bp
->spq_prod_bd
->data
.mac_config_addr
.lo
= cpu_to_le32(data_lo
);
2349 if (bp
->spq_prod_bd
== bp
->spq_last_bd
) {
2350 bp
->spq_prod_bd
= bp
->spq
;
2351 bp
->spq_prod_idx
= 0;
2352 DP(NETIF_MSG_TIMER
, "end of spq\n");
2359 REG_WR(bp
, BAR_XSTRORM_INTMEM
+ XSTORM_SPQ_PROD_OFFSET(func
),
2362 spin_unlock_bh(&bp
->spq_lock
);
2366 /* acquire split MCP access lock register */
2367 static int bnx2x_lock_alr(struct bnx2x
*bp
)
2374 for (j
= 0; j
< i
*10; j
++) {
2376 REG_WR(bp
, GRCBASE_MCP
+ 0x9c, val
);
2377 val
= REG_RD(bp
, GRCBASE_MCP
+ 0x9c);
2378 if (val
& (1L << 31))
2383 if (!(val
& (1L << 31))) {
2384 BNX2X_ERR("Cannot acquire MCP access lock register\n");
2391 /* Release split MCP access lock register */
2392 static void bnx2x_unlock_alr(struct bnx2x
*bp
)
2396 REG_WR(bp
, GRCBASE_MCP
+ 0x9c, val
);
2399 static inline u16
bnx2x_update_dsb_idx(struct bnx2x
*bp
)
2401 struct host_def_status_block
*def_sb
= bp
->def_status_blk
;
2404 barrier(); /* status block is written to by the chip */
2406 if (bp
->def_att_idx
!= def_sb
->atten_status_block
.attn_bits_index
) {
2407 bp
->def_att_idx
= def_sb
->atten_status_block
.attn_bits_index
;
2410 if (bp
->def_c_idx
!= def_sb
->c_def_status_block
.status_block_index
) {
2411 bp
->def_c_idx
= def_sb
->c_def_status_block
.status_block_index
;
2414 if (bp
->def_u_idx
!= def_sb
->u_def_status_block
.status_block_index
) {
2415 bp
->def_u_idx
= def_sb
->u_def_status_block
.status_block_index
;
2418 if (bp
->def_x_idx
!= def_sb
->x_def_status_block
.status_block_index
) {
2419 bp
->def_x_idx
= def_sb
->x_def_status_block
.status_block_index
;
2422 if (bp
->def_t_idx
!= def_sb
->t_def_status_block
.status_block_index
) {
2423 bp
->def_t_idx
= def_sb
->t_def_status_block
.status_block_index
;
2430 * slow path service functions
2433 static void bnx2x_attn_int_asserted(struct bnx2x
*bp
, u32 asserted
)
2435 int port
= BP_PORT(bp
);
2436 int func
= BP_FUNC(bp
);
2437 u32 igu_addr
= (IGU_ADDR_ATTN_BITS_SET
+ IGU_FUNC_BASE
* func
) * 8;
2438 u32 aeu_addr
= port
? MISC_REG_AEU_MASK_ATTN_FUNC_1
:
2439 MISC_REG_AEU_MASK_ATTN_FUNC_0
;
2440 u32 nig_int_mask_addr
= port
? NIG_REG_MASK_INTERRUPT_PORT1
:
2441 NIG_REG_MASK_INTERRUPT_PORT0
;
2443 if (~bp
->aeu_mask
& (asserted
& 0xff))
2444 BNX2X_ERR("IGU ERROR\n");
2445 if (bp
->attn_state
& asserted
)
2446 BNX2X_ERR("IGU ERROR\n");
2448 DP(NETIF_MSG_HW
, "aeu_mask %x newly asserted %x\n",
2449 bp
->aeu_mask
, asserted
);
2450 bp
->aeu_mask
&= ~(asserted
& 0xff);
2451 DP(NETIF_MSG_HW
, "after masking: aeu_mask %x\n", bp
->aeu_mask
);
2453 REG_WR(bp
, aeu_addr
, bp
->aeu_mask
);
2455 bp
->attn_state
|= asserted
;
2457 if (asserted
& ATTN_HARD_WIRED_MASK
) {
2458 if (asserted
& ATTN_NIG_FOR_FUNC
) {
2460 /* save nig interrupt mask */
2461 bp
->nig_mask
= REG_RD(bp
, nig_int_mask_addr
);
2462 REG_WR(bp
, nig_int_mask_addr
, 0);
2464 bnx2x_link_attn(bp
);
2466 /* handle unicore attn? */
2468 if (asserted
& ATTN_SW_TIMER_4_FUNC
)
2469 DP(NETIF_MSG_HW
, "ATTN_SW_TIMER_4_FUNC!\n");
2471 if (asserted
& GPIO_2_FUNC
)
2472 DP(NETIF_MSG_HW
, "GPIO_2_FUNC!\n");
2474 if (asserted
& GPIO_3_FUNC
)
2475 DP(NETIF_MSG_HW
, "GPIO_3_FUNC!\n");
2477 if (asserted
& GPIO_4_FUNC
)
2478 DP(NETIF_MSG_HW
, "GPIO_4_FUNC!\n");
2481 if (asserted
& ATTN_GENERAL_ATTN_1
) {
2482 DP(NETIF_MSG_HW
, "ATTN_GENERAL_ATTN_1!\n");
2483 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_1
, 0x0);
2485 if (asserted
& ATTN_GENERAL_ATTN_2
) {
2486 DP(NETIF_MSG_HW
, "ATTN_GENERAL_ATTN_2!\n");
2487 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_2
, 0x0);
2489 if (asserted
& ATTN_GENERAL_ATTN_3
) {
2490 DP(NETIF_MSG_HW
, "ATTN_GENERAL_ATTN_3!\n");
2491 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_3
, 0x0);
2494 if (asserted
& ATTN_GENERAL_ATTN_4
) {
2495 DP(NETIF_MSG_HW
, "ATTN_GENERAL_ATTN_4!\n");
2496 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_4
, 0x0);
2498 if (asserted
& ATTN_GENERAL_ATTN_5
) {
2499 DP(NETIF_MSG_HW
, "ATTN_GENERAL_ATTN_5!\n");
2500 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_5
, 0x0);
2502 if (asserted
& ATTN_GENERAL_ATTN_6
) {
2503 DP(NETIF_MSG_HW
, "ATTN_GENERAL_ATTN_6!\n");
2504 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_6
, 0x0);
2508 } /* if hardwired */
2510 DP(NETIF_MSG_HW
, "about to mask 0x%08x at IGU addr 0x%x\n",
2511 asserted
, BAR_IGU_INTMEM
+ igu_addr
);
2512 REG_WR(bp
, BAR_IGU_INTMEM
+ igu_addr
, asserted
);
2514 /* now set back the mask */
2515 if (asserted
& ATTN_NIG_FOR_FUNC
)
2516 REG_WR(bp
, nig_int_mask_addr
, bp
->nig_mask
);
2519 static inline void bnx2x_attn_int_deasserted0(struct bnx2x
*bp
, u32 attn
)
2521 int port
= BP_PORT(bp
);
2525 reg_offset
= (port
? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0
:
2526 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0
);
2528 if (attn
& AEU_INPUTS_ATTN_BITS_SPIO5
) {
2530 val
= REG_RD(bp
, reg_offset
);
2531 val
&= ~AEU_INPUTS_ATTN_BITS_SPIO5
;
2532 REG_WR(bp
, reg_offset
, val
);
2534 BNX2X_ERR("SPIO5 hw attention\n");
2536 switch (bp
->common
.board
& SHARED_HW_CFG_BOARD_TYPE_MASK
) {
2537 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G
:
2538 /* Fan failure attention */
2540 /* The PHY reset is controled by GPIO 1 */
2541 bnx2x_set_gpio(bp
, MISC_REGISTERS_GPIO_1
,
2542 MISC_REGISTERS_GPIO_OUTPUT_LOW
);
2543 /* Low power mode is controled by GPIO 2 */
2544 bnx2x_set_gpio(bp
, MISC_REGISTERS_GPIO_2
,
2545 MISC_REGISTERS_GPIO_OUTPUT_LOW
);
2546 /* mark the failure */
2547 bp
->link_params
.ext_phy_config
&=
2548 ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK
;
2549 bp
->link_params
.ext_phy_config
|=
2550 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE
;
2552 dev_info
.port_hw_config
[port
].
2553 external_phy_config
,
2554 bp
->link_params
.ext_phy_config
);
2555 /* log the failure */
2556 printk(KERN_ERR PFX
"Fan Failure on Network"
2557 " Controller %s has caused the driver to"
2558 " shutdown the card to prevent permanent"
2559 " damage. Please contact Dell Support for"
2560 " assistance\n", bp
->dev
->name
);
2568 if (attn
& HW_INTERRUT_ASSERT_SET_0
) {
2570 val
= REG_RD(bp
, reg_offset
);
2571 val
&= ~(attn
& HW_INTERRUT_ASSERT_SET_0
);
2572 REG_WR(bp
, reg_offset
, val
);
2574 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2575 (attn
& HW_INTERRUT_ASSERT_SET_0
));
2580 static inline void bnx2x_attn_int_deasserted1(struct bnx2x
*bp
, u32 attn
)
2584 if (attn
& BNX2X_DOORQ_ASSERT
) {
2586 val
= REG_RD(bp
, DORQ_REG_DORQ_INT_STS_CLR
);
2587 BNX2X_ERR("DB hw attention 0x%x\n", val
);
2588 /* DORQ discard attention */
2590 BNX2X_ERR("FATAL error from DORQ\n");
2593 if (attn
& HW_INTERRUT_ASSERT_SET_1
) {
2595 int port
= BP_PORT(bp
);
2598 reg_offset
= (port
? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1
:
2599 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1
);
2601 val
= REG_RD(bp
, reg_offset
);
2602 val
&= ~(attn
& HW_INTERRUT_ASSERT_SET_1
);
2603 REG_WR(bp
, reg_offset
, val
);
2605 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2606 (attn
& HW_INTERRUT_ASSERT_SET_1
));
2611 static inline void bnx2x_attn_int_deasserted2(struct bnx2x
*bp
, u32 attn
)
2615 if (attn
& AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT
) {
2617 val
= REG_RD(bp
, CFC_REG_CFC_INT_STS_CLR
);
2618 BNX2X_ERR("CFC hw attention 0x%x\n", val
);
2619 /* CFC error attention */
2621 BNX2X_ERR("FATAL error from CFC\n");
2624 if (attn
& AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT
) {
2626 val
= REG_RD(bp
, PXP_REG_PXP_INT_STS_CLR_0
);
2627 BNX2X_ERR("PXP hw attention 0x%x\n", val
);
2628 /* RQ_USDMDP_FIFO_OVERFLOW */
2630 BNX2X_ERR("FATAL error from PXP\n");
2633 if (attn
& HW_INTERRUT_ASSERT_SET_2
) {
2635 int port
= BP_PORT(bp
);
2638 reg_offset
= (port
? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2
:
2639 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2
);
2641 val
= REG_RD(bp
, reg_offset
);
2642 val
&= ~(attn
& HW_INTERRUT_ASSERT_SET_2
);
2643 REG_WR(bp
, reg_offset
, val
);
2645 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
2646 (attn
& HW_INTERRUT_ASSERT_SET_2
));
2651 static inline void bnx2x_attn_int_deasserted3(struct bnx2x
*bp
, u32 attn
)
2655 if (attn
& EVEREST_GEN_ATTN_IN_USE_MASK
) {
2657 if (attn
& BNX2X_PMF_LINK_ASSERT
) {
2658 int func
= BP_FUNC(bp
);
2660 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_12
+ func
*4, 0);
2661 bnx2x__link_status_update(bp
);
2662 if (SHMEM_RD(bp
, func_mb
[func
].drv_status
) &
2664 bnx2x_pmf_update(bp
);
2666 } else if (attn
& BNX2X_MC_ASSERT_BITS
) {
2668 BNX2X_ERR("MC assert!\n");
2669 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_10
, 0);
2670 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_9
, 0);
2671 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_8
, 0);
2672 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_7
, 0);
2675 } else if (attn
& BNX2X_MCP_ASSERT
) {
2677 BNX2X_ERR("MCP assert!\n");
2678 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_11
, 0);
2682 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn
);
2685 if (attn
& EVEREST_LATCHED_ATTN_IN_USE_MASK
) {
2686 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn
);
2687 if (attn
& BNX2X_GRC_TIMEOUT
) {
2688 val
= CHIP_IS_E1H(bp
) ?
2689 REG_RD(bp
, MISC_REG_GRC_TIMEOUT_ATTN
) : 0;
2690 BNX2X_ERR("GRC time-out 0x%08x\n", val
);
2692 if (attn
& BNX2X_GRC_RSV
) {
2693 val
= CHIP_IS_E1H(bp
) ?
2694 REG_RD(bp
, MISC_REG_GRC_RSV_ATTN
) : 0;
2695 BNX2X_ERR("GRC reserved 0x%08x\n", val
);
2697 REG_WR(bp
, MISC_REG_AEU_CLR_LATCH_SIGNAL
, 0x7ff);
2701 static void bnx2x_attn_int_deasserted(struct bnx2x
*bp
, u32 deasserted
)
2703 struct attn_route attn
;
2704 struct attn_route group_mask
;
2705 int port
= BP_PORT(bp
);
2710 /* need to take HW lock because MCP or other port might also
2711 try to handle this event */
2714 attn
.sig
[0] = REG_RD(bp
, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0
+ port
*4);
2715 attn
.sig
[1] = REG_RD(bp
, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0
+ port
*4);
2716 attn
.sig
[2] = REG_RD(bp
, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0
+ port
*4);
2717 attn
.sig
[3] = REG_RD(bp
, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0
+ port
*4);
2718 DP(NETIF_MSG_HW
, "attn: %08x %08x %08x %08x\n",
2719 attn
.sig
[0], attn
.sig
[1], attn
.sig
[2], attn
.sig
[3]);
2721 for (index
= 0; index
< MAX_DYNAMIC_ATTN_GRPS
; index
++) {
2722 if (deasserted
& (1 << index
)) {
2723 group_mask
= bp
->attn_group
[index
];
2725 DP(NETIF_MSG_HW
, "group[%d]: %08x %08x %08x %08x\n",
2726 index
, group_mask
.sig
[0], group_mask
.sig
[1],
2727 group_mask
.sig
[2], group_mask
.sig
[3]);
2729 bnx2x_attn_int_deasserted3(bp
,
2730 attn
.sig
[3] & group_mask
.sig
[3]);
2731 bnx2x_attn_int_deasserted1(bp
,
2732 attn
.sig
[1] & group_mask
.sig
[1]);
2733 bnx2x_attn_int_deasserted2(bp
,
2734 attn
.sig
[2] & group_mask
.sig
[2]);
2735 bnx2x_attn_int_deasserted0(bp
,
2736 attn
.sig
[0] & group_mask
.sig
[0]);
2738 if ((attn
.sig
[0] & group_mask
.sig
[0] &
2739 HW_PRTY_ASSERT_SET_0
) ||
2740 (attn
.sig
[1] & group_mask
.sig
[1] &
2741 HW_PRTY_ASSERT_SET_1
) ||
2742 (attn
.sig
[2] & group_mask
.sig
[2] &
2743 HW_PRTY_ASSERT_SET_2
))
2744 BNX2X_ERR("FATAL HW block parity attention\n");
2748 bnx2x_unlock_alr(bp
);
2750 reg_addr
= (IGU_ADDR_ATTN_BITS_CLR
+ IGU_FUNC_BASE
* BP_FUNC(bp
)) * 8;
2753 /* DP(NETIF_MSG_INTR, "write 0x%08x to IGU addr 0x%x\n",
2754 val, BAR_IGU_INTMEM + reg_addr); */
2755 REG_WR(bp
, BAR_IGU_INTMEM
+ reg_addr
, val
);
2757 if (bp
->aeu_mask
& (deasserted
& 0xff))
2758 BNX2X_ERR("IGU BUG!\n");
2759 if (~bp
->attn_state
& deasserted
)
2760 BNX2X_ERR("IGU BUG!\n");
2762 reg_addr
= port
? MISC_REG_AEU_MASK_ATTN_FUNC_1
:
2763 MISC_REG_AEU_MASK_ATTN_FUNC_0
;
2765 DP(NETIF_MSG_HW
, "aeu_mask %x\n", bp
->aeu_mask
);
2766 bp
->aeu_mask
|= (deasserted
& 0xff);
2768 DP(NETIF_MSG_HW
, "new mask %x\n", bp
->aeu_mask
);
2769 REG_WR(bp
, reg_addr
, bp
->aeu_mask
);
2771 DP(NETIF_MSG_HW
, "attn_state %x\n", bp
->attn_state
);
2772 bp
->attn_state
&= ~deasserted
;
2773 DP(NETIF_MSG_HW
, "new state %x\n", bp
->attn_state
);
2776 static void bnx2x_attn_int(struct bnx2x
*bp
)
2778 /* read local copy of bits */
2779 u32 attn_bits
= bp
->def_status_blk
->atten_status_block
.attn_bits
;
2780 u32 attn_ack
= bp
->def_status_blk
->atten_status_block
.attn_bits_ack
;
2781 u32 attn_state
= bp
->attn_state
;
2783 /* look for changed bits */
2784 u32 asserted
= attn_bits
& ~attn_ack
& ~attn_state
;
2785 u32 deasserted
= ~attn_bits
& attn_ack
& attn_state
;
2788 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
2789 attn_bits
, attn_ack
, asserted
, deasserted
);
2791 if (~(attn_bits
^ attn_ack
) & (attn_bits
^ attn_state
))
2792 BNX2X_ERR("BAD attention state\n");
2794 /* handle bits that were raised */
2796 bnx2x_attn_int_asserted(bp
, asserted
);
2799 bnx2x_attn_int_deasserted(bp
, deasserted
);
2802 static void bnx2x_sp_task(struct work_struct
*work
)
2804 struct bnx2x
*bp
= container_of(work
, struct bnx2x
, sp_task
);
2808 /* Return here if interrupt is disabled */
2809 if (unlikely(atomic_read(&bp
->intr_sem
) != 0)) {
2810 DP(BNX2X_MSG_SP
, "called but intr_sem not 0, returning\n");
2814 status
= bnx2x_update_dsb_idx(bp
);
2815 /* if (status == 0) */
2816 /* BNX2X_ERR("spurious slowpath interrupt!\n"); */
2818 DP(BNX2X_MSG_SP
, "got a slowpath interrupt (updated %x)\n", status
);
2824 /* CStorm events: query_stats, port delete ramrod */
2826 bp
->stats_pending
= 0;
2828 bnx2x_ack_sb(bp
, DEF_SB_ID
, ATTENTION_ID
, bp
->def_att_idx
,
2830 bnx2x_ack_sb(bp
, DEF_SB_ID
, USTORM_ID
, le16_to_cpu(bp
->def_u_idx
),
2832 bnx2x_ack_sb(bp
, DEF_SB_ID
, CSTORM_ID
, le16_to_cpu(bp
->def_c_idx
),
2834 bnx2x_ack_sb(bp
, DEF_SB_ID
, XSTORM_ID
, le16_to_cpu(bp
->def_x_idx
),
2836 bnx2x_ack_sb(bp
, DEF_SB_ID
, TSTORM_ID
, le16_to_cpu(bp
->def_t_idx
),
2841 static irqreturn_t
bnx2x_msix_sp_int(int irq
, void *dev_instance
)
2843 struct net_device
*dev
= dev_instance
;
2844 struct bnx2x
*bp
= netdev_priv(dev
);
2846 /* Return here if interrupt is disabled */
2847 if (unlikely(atomic_read(&bp
->intr_sem
) != 0)) {
2848 DP(BNX2X_MSG_SP
, "called but intr_sem not 0, returning\n");
2852 bnx2x_ack_sb(bp
, DEF_SB_ID
, XSTORM_ID
, 0, IGU_INT_DISABLE
, 0);
2854 #ifdef BNX2X_STOP_ON_ERROR
2855 if (unlikely(bp
->panic
))
2859 schedule_work(&bp
->sp_task
);
2864 /* end of slow path */
2868 /****************************************************************************
2870 ****************************************************************************/
2872 /* sum[hi:lo] += add[hi:lo] */
2873 #define ADD_64(s_hi, a_hi, s_lo, a_lo) \
2876 s_hi += a_hi + (s_lo < a_lo) ? 1 : 0; \
2879 /* difference = minuend - subtrahend */
2880 #define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
2882 if (m_lo < s_lo) { \
2884 d_hi = m_hi - s_hi; \
2886 /* we can 'loan' 1 */ \
2888 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
2890 /* m_hi <= s_hi */ \
2895 /* m_lo >= s_lo */ \
2896 if (m_hi < s_hi) { \
2900 /* m_hi >= s_hi */ \
2901 d_hi = m_hi - s_hi; \
2902 d_lo = m_lo - s_lo; \
2907 #define UPDATE_STAT64(s, t) \
2909 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
2910 diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
2911 pstats->mac_stx[0].t##_hi = new->s##_hi; \
2912 pstats->mac_stx[0].t##_lo = new->s##_lo; \
2913 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
2914 pstats->mac_stx[1].t##_lo, diff.lo); \
2917 #define UPDATE_STAT64_NIG(s, t) \
2919 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
2920 diff.lo, new->s##_lo, old->s##_lo); \
2921 ADD_64(estats->t##_hi, diff.hi, \
2922 estats->t##_lo, diff.lo); \
2925 /* sum[hi:lo] += add */
2926 #define ADD_EXTEND_64(s_hi, s_lo, a) \
2929 s_hi += (s_lo < a) ? 1 : 0; \
2932 #define UPDATE_EXTEND_STAT(s) \
2934 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
2935 pstats->mac_stx[1].s##_lo, \
2939 #define UPDATE_EXTEND_TSTAT(s, t) \
2941 diff = le32_to_cpu(tclient->s) - old_tclient->s; \
2942 old_tclient->s = le32_to_cpu(tclient->s); \
2943 ADD_EXTEND_64(fstats->t##_hi, fstats->t##_lo, diff); \
2946 #define UPDATE_EXTEND_XSTAT(s, t) \
2948 diff = le32_to_cpu(xclient->s) - old_xclient->s; \
2949 old_xclient->s = le32_to_cpu(xclient->s); \
2950 ADD_EXTEND_64(fstats->t##_hi, fstats->t##_lo, diff); \
2954 * General service functions
2957 static inline long bnx2x_hilo(u32
*hiref
)
2959 u32 lo
= *(hiref
+ 1);
2960 #if (BITS_PER_LONG == 64)
2963 return HILO_U64(hi
, lo
);
2970 * Init service functions
2973 static void bnx2x_storm_stats_init(struct bnx2x
*bp
)
2975 int func
= BP_FUNC(bp
);
2977 REG_WR(bp
, BAR_XSTRORM_INTMEM
+ XSTORM_STATS_FLAGS_OFFSET(func
), 1);
2978 REG_WR(bp
, BAR_XSTRORM_INTMEM
+
2979 XSTORM_STATS_FLAGS_OFFSET(func
) + 4, 0);
2981 REG_WR(bp
, BAR_TSTRORM_INTMEM
+ TSTORM_STATS_FLAGS_OFFSET(func
), 1);
2982 REG_WR(bp
, BAR_TSTRORM_INTMEM
+
2983 TSTORM_STATS_FLAGS_OFFSET(func
) + 4, 0);
2985 REG_WR(bp
, BAR_CSTRORM_INTMEM
+ CSTORM_STATS_FLAGS_OFFSET(func
), 0);
2986 REG_WR(bp
, BAR_CSTRORM_INTMEM
+
2987 CSTORM_STATS_FLAGS_OFFSET(func
) + 4, 0);
2989 REG_WR(bp
, BAR_XSTRORM_INTMEM
+
2990 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func
),
2991 U64_LO(bnx2x_sp_mapping(bp
, fw_stats
)));
2992 REG_WR(bp
, BAR_XSTRORM_INTMEM
+
2993 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func
) + 4,
2994 U64_HI(bnx2x_sp_mapping(bp
, fw_stats
)));
2996 REG_WR(bp
, BAR_TSTRORM_INTMEM
+
2997 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func
),
2998 U64_LO(bnx2x_sp_mapping(bp
, fw_stats
)));
2999 REG_WR(bp
, BAR_TSTRORM_INTMEM
+
3000 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func
) + 4,
3001 U64_HI(bnx2x_sp_mapping(bp
, fw_stats
)));
3004 static void bnx2x_storm_stats_post(struct bnx2x
*bp
)
3006 if (!bp
->stats_pending
) {
3007 struct eth_query_ramrod_data ramrod_data
= {0};
3010 ramrod_data
.drv_counter
= bp
->stats_counter
++;
3011 ramrod_data
.collect_port_1b
= bp
->port
.pmf
? 1 : 0;
3012 ramrod_data
.ctr_id_vector
= (1 << BP_CL_ID(bp
));
3014 rc
= bnx2x_sp_post(bp
, RAMROD_CMD_ID_ETH_STAT_QUERY
, 0,
3015 ((u32
*)&ramrod_data
)[1],
3016 ((u32
*)&ramrod_data
)[0], 0);
3018 /* stats ramrod has it's own slot on the spq */
3020 bp
->stats_pending
= 1;
3025 static void bnx2x_stats_init(struct bnx2x
*bp
)
3027 int port
= BP_PORT(bp
);
3029 bp
->executer_idx
= 0;
3030 bp
->stats_counter
= 0;
3034 bp
->port
.port_stx
= SHMEM_RD(bp
, port_mb
[port
].port_stx
);
3036 bp
->port
.port_stx
= 0;
3037 DP(BNX2X_MSG_STATS
, "port_stx 0x%x\n", bp
->port
.port_stx
);
3039 memset(&(bp
->port
.old_nig_stats
), 0, sizeof(struct nig_stats
));
3040 bp
->port
.old_nig_stats
.brb_discard
=
3041 REG_RD(bp
, NIG_REG_STAT0_BRB_DISCARD
+ port
*0x38);
3042 REG_RD_DMAE(bp
, NIG_REG_STAT0_EGRESS_MAC_PKT0
+ port
*0x50,
3043 &(bp
->port
.old_nig_stats
.egress_mac_pkt0_lo
), 2);
3044 REG_RD_DMAE(bp
, NIG_REG_STAT0_EGRESS_MAC_PKT1
+ port
*0x50,
3045 &(bp
->port
.old_nig_stats
.egress_mac_pkt1_lo
), 2);
3047 /* function stats */
3048 memset(&bp
->dev
->stats
, 0, sizeof(struct net_device_stats
));
3049 memset(&bp
->old_tclient
, 0, sizeof(struct tstorm_per_client_stats
));
3050 memset(&bp
->old_xclient
, 0, sizeof(struct xstorm_per_client_stats
));
3051 memset(&bp
->eth_stats
, 0, sizeof(struct bnx2x_eth_stats
));
3053 bp
->stats_state
= STATS_STATE_DISABLED
;
3054 if (IS_E1HMF(bp
) && bp
->port
.pmf
&& bp
->port
.port_stx
)
3055 bnx2x_stats_handle(bp
, STATS_EVENT_PMF
);
3058 static void bnx2x_hw_stats_post(struct bnx2x
*bp
)
3060 struct dmae_command
*dmae
= &bp
->stats_dmae
;
3061 u32
*stats_comp
= bnx2x_sp(bp
, stats_comp
);
3063 *stats_comp
= DMAE_COMP_VAL
;
3066 if (bp
->executer_idx
) {
3067 int loader_idx
= PMF_DMAE_C(bp
);
3069 memset(dmae
, 0, sizeof(struct dmae_command
));
3071 dmae
->opcode
= (DMAE_CMD_SRC_PCI
| DMAE_CMD_DST_GRC
|
3072 DMAE_CMD_C_DST_GRC
| DMAE_CMD_C_ENABLE
|
3073 DMAE_CMD_DST_RESET
|
3075 DMAE_CMD_ENDIANITY_B_DW_SWAP
|
3077 DMAE_CMD_ENDIANITY_DW_SWAP
|
3079 (BP_PORT(bp
) ? DMAE_CMD_PORT_1
:
3081 (BP_E1HVN(bp
) << DMAE_CMD_E1HVN_SHIFT
));
3082 dmae
->src_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, dmae
[0]));
3083 dmae
->src_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, dmae
[0]));
3084 dmae
->dst_addr_lo
= (DMAE_REG_CMD_MEM
+
3085 sizeof(struct dmae_command
) *
3086 (loader_idx
+ 1)) >> 2;
3087 dmae
->dst_addr_hi
= 0;
3088 dmae
->len
= sizeof(struct dmae_command
) >> 2;
3091 dmae
->comp_addr_lo
= dmae_reg_go_c
[loader_idx
+ 1] >> 2;
3092 dmae
->comp_addr_hi
= 0;
3096 bnx2x_post_dmae(bp
, dmae
, loader_idx
);
3098 } else if (bp
->func_stx
) {
3100 bnx2x_post_dmae(bp
, dmae
, INIT_DMAE_C(bp
));
3104 static int bnx2x_stats_comp(struct bnx2x
*bp
)
3106 u32
*stats_comp
= bnx2x_sp(bp
, stats_comp
);
3110 while (*stats_comp
!= DMAE_COMP_VAL
) {
3113 BNX2X_ERR("timeout waiting for stats finished\n");
3122 * Statistics service functions
3125 static void bnx2x_stats_pmf_update(struct bnx2x
*bp
)
3127 struct dmae_command
*dmae
;
3129 int loader_idx
= PMF_DMAE_C(bp
);
3130 u32
*stats_comp
= bnx2x_sp(bp
, stats_comp
);
3133 if (!IS_E1HMF(bp
) || !bp
->port
.pmf
|| !bp
->port
.port_stx
) {
3134 BNX2X_ERR("BUG!\n");
3138 bp
->executer_idx
= 0;
3140 opcode
= (DMAE_CMD_SRC_GRC
| DMAE_CMD_DST_PCI
|
3142 DMAE_CMD_SRC_RESET
| DMAE_CMD_DST_RESET
|
3144 DMAE_CMD_ENDIANITY_B_DW_SWAP
|
3146 DMAE_CMD_ENDIANITY_DW_SWAP
|
3148 (BP_PORT(bp
) ? DMAE_CMD_PORT_1
: DMAE_CMD_PORT_0
) |
3149 (BP_E1HVN(bp
) << DMAE_CMD_E1HVN_SHIFT
));
3151 dmae
= bnx2x_sp(bp
, dmae
[bp
->executer_idx
++]);
3152 dmae
->opcode
= (opcode
| DMAE_CMD_C_DST_GRC
);
3153 dmae
->src_addr_lo
= bp
->port
.port_stx
>> 2;
3154 dmae
->src_addr_hi
= 0;
3155 dmae
->dst_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, port_stats
));
3156 dmae
->dst_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, port_stats
));
3157 dmae
->len
= DMAE_LEN32_RD_MAX
;
3158 dmae
->comp_addr_lo
= dmae_reg_go_c
[loader_idx
] >> 2;
3159 dmae
->comp_addr_hi
= 0;
3162 dmae
= bnx2x_sp(bp
, dmae
[bp
->executer_idx
++]);
3163 dmae
->opcode
= (opcode
| DMAE_CMD_C_DST_PCI
);
3164 dmae
->src_addr_lo
= (bp
->port
.port_stx
>> 2) + DMAE_LEN32_RD_MAX
;
3165 dmae
->src_addr_hi
= 0;
3166 dmae
->dst_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, port_stats
) +
3167 DMAE_LEN32_RD_MAX
* 4);
3168 dmae
->dst_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, port_stats
) +
3169 DMAE_LEN32_RD_MAX
* 4);
3170 dmae
->len
= (sizeof(struct host_port_stats
) >> 2) - DMAE_LEN32_RD_MAX
;
3171 dmae
->comp_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, stats_comp
));
3172 dmae
->comp_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, stats_comp
));
3173 dmae
->comp_val
= DMAE_COMP_VAL
;
3176 bnx2x_hw_stats_post(bp
);
3177 bnx2x_stats_comp(bp
);
3180 static void bnx2x_port_stats_init(struct bnx2x
*bp
)
3182 struct dmae_command
*dmae
;
3183 int port
= BP_PORT(bp
);
3184 int vn
= BP_E1HVN(bp
);
3186 int loader_idx
= PMF_DMAE_C(bp
);
3188 u32
*stats_comp
= bnx2x_sp(bp
, stats_comp
);
3191 if (!bp
->link_vars
.link_up
|| !bp
->port
.pmf
) {
3192 BNX2X_ERR("BUG!\n");
3196 bp
->executer_idx
= 0;
3199 opcode
= (DMAE_CMD_SRC_PCI
| DMAE_CMD_DST_GRC
|
3200 DMAE_CMD_C_DST_GRC
| DMAE_CMD_C_ENABLE
|
3201 DMAE_CMD_SRC_RESET
| DMAE_CMD_DST_RESET
|
3203 DMAE_CMD_ENDIANITY_B_DW_SWAP
|
3205 DMAE_CMD_ENDIANITY_DW_SWAP
|
3207 (port
? DMAE_CMD_PORT_1
: DMAE_CMD_PORT_0
) |
3208 (vn
<< DMAE_CMD_E1HVN_SHIFT
));
3210 if (bp
->port
.port_stx
) {
3212 dmae
= bnx2x_sp(bp
, dmae
[bp
->executer_idx
++]);
3213 dmae
->opcode
= opcode
;
3214 dmae
->src_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, port_stats
));
3215 dmae
->src_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, port_stats
));
3216 dmae
->dst_addr_lo
= bp
->port
.port_stx
>> 2;
3217 dmae
->dst_addr_hi
= 0;
3218 dmae
->len
= sizeof(struct host_port_stats
) >> 2;
3219 dmae
->comp_addr_lo
= dmae_reg_go_c
[loader_idx
] >> 2;
3220 dmae
->comp_addr_hi
= 0;
3226 dmae
= bnx2x_sp(bp
, dmae
[bp
->executer_idx
++]);
3227 dmae
->opcode
= opcode
;
3228 dmae
->src_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, func_stats
));
3229 dmae
->src_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, func_stats
));
3230 dmae
->dst_addr_lo
= bp
->func_stx
>> 2;
3231 dmae
->dst_addr_hi
= 0;
3232 dmae
->len
= sizeof(struct host_func_stats
) >> 2;
3233 dmae
->comp_addr_lo
= dmae_reg_go_c
[loader_idx
] >> 2;
3234 dmae
->comp_addr_hi
= 0;
3239 opcode
= (DMAE_CMD_SRC_GRC
| DMAE_CMD_DST_PCI
|
3240 DMAE_CMD_C_DST_GRC
| DMAE_CMD_C_ENABLE
|
3241 DMAE_CMD_SRC_RESET
| DMAE_CMD_DST_RESET
|
3243 DMAE_CMD_ENDIANITY_B_DW_SWAP
|
3245 DMAE_CMD_ENDIANITY_DW_SWAP
|
3247 (port
? DMAE_CMD_PORT_1
: DMAE_CMD_PORT_0
) |
3248 (vn
<< DMAE_CMD_E1HVN_SHIFT
));
3250 if (bp
->link_vars
.mac_type
== MAC_TYPE_BMAC
) {
3252 mac_addr
= (port
? NIG_REG_INGRESS_BMAC1_MEM
:
3253 NIG_REG_INGRESS_BMAC0_MEM
);
3255 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3256 BIGMAC_REGISTER_TX_STAT_GTBYT */
3257 dmae
= bnx2x_sp(bp
, dmae
[bp
->executer_idx
++]);
3258 dmae
->opcode
= opcode
;
3259 dmae
->src_addr_lo
= (mac_addr
+
3260 BIGMAC_REGISTER_TX_STAT_GTPKT
) >> 2;
3261 dmae
->src_addr_hi
= 0;
3262 dmae
->dst_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, mac_stats
));
3263 dmae
->dst_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, mac_stats
));
3264 dmae
->len
= (8 + BIGMAC_REGISTER_TX_STAT_GTBYT
-
3265 BIGMAC_REGISTER_TX_STAT_GTPKT
) >> 2;
3266 dmae
->comp_addr_lo
= dmae_reg_go_c
[loader_idx
] >> 2;
3267 dmae
->comp_addr_hi
= 0;
3270 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3271 BIGMAC_REGISTER_RX_STAT_GRIPJ */
3272 dmae
= bnx2x_sp(bp
, dmae
[bp
->executer_idx
++]);
3273 dmae
->opcode
= opcode
;
3274 dmae
->src_addr_lo
= (mac_addr
+
3275 BIGMAC_REGISTER_RX_STAT_GR64
) >> 2;
3276 dmae
->src_addr_hi
= 0;
3277 dmae
->dst_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, mac_stats
) +
3278 offsetof(struct bmac_stats
, rx_stat_gr64_lo
));
3279 dmae
->dst_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, mac_stats
) +
3280 offsetof(struct bmac_stats
, rx_stat_gr64_lo
));
3281 dmae
->len
= (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ
-
3282 BIGMAC_REGISTER_RX_STAT_GR64
) >> 2;
3283 dmae
->comp_addr_lo
= dmae_reg_go_c
[loader_idx
] >> 2;
3284 dmae
->comp_addr_hi
= 0;
3287 } else if (bp
->link_vars
.mac_type
== MAC_TYPE_EMAC
) {
3289 mac_addr
= (port
? GRCBASE_EMAC1
: GRCBASE_EMAC0
);
3291 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3292 dmae
= bnx2x_sp(bp
, dmae
[bp
->executer_idx
++]);
3293 dmae
->opcode
= opcode
;
3294 dmae
->src_addr_lo
= (mac_addr
+
3295 EMAC_REG_EMAC_RX_STAT_AC
) >> 2;
3296 dmae
->src_addr_hi
= 0;
3297 dmae
->dst_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, mac_stats
));
3298 dmae
->dst_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, mac_stats
));
3299 dmae
->len
= EMAC_REG_EMAC_RX_STAT_AC_COUNT
;
3300 dmae
->comp_addr_lo
= dmae_reg_go_c
[loader_idx
] >> 2;
3301 dmae
->comp_addr_hi
= 0;
3304 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3305 dmae
= bnx2x_sp(bp
, dmae
[bp
->executer_idx
++]);
3306 dmae
->opcode
= opcode
;
3307 dmae
->src_addr_lo
= (mac_addr
+
3308 EMAC_REG_EMAC_RX_STAT_AC_28
) >> 2;
3309 dmae
->src_addr_hi
= 0;
3310 dmae
->dst_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, mac_stats
) +
3311 offsetof(struct emac_stats
, rx_stat_falsecarriererrors
));
3312 dmae
->dst_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, mac_stats
) +
3313 offsetof(struct emac_stats
, rx_stat_falsecarriererrors
));
3315 dmae
->comp_addr_lo
= dmae_reg_go_c
[loader_idx
] >> 2;
3316 dmae
->comp_addr_hi
= 0;
3319 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3320 dmae
= bnx2x_sp(bp
, dmae
[bp
->executer_idx
++]);
3321 dmae
->opcode
= opcode
;
3322 dmae
->src_addr_lo
= (mac_addr
+
3323 EMAC_REG_EMAC_TX_STAT_AC
) >> 2;
3324 dmae
->src_addr_hi
= 0;
3325 dmae
->dst_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, mac_stats
) +
3326 offsetof(struct emac_stats
, tx_stat_ifhcoutoctets
));
3327 dmae
->dst_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, mac_stats
) +
3328 offsetof(struct emac_stats
, tx_stat_ifhcoutoctets
));
3329 dmae
->len
= EMAC_REG_EMAC_TX_STAT_AC_COUNT
;
3330 dmae
->comp_addr_lo
= dmae_reg_go_c
[loader_idx
] >> 2;
3331 dmae
->comp_addr_hi
= 0;
3336 dmae
= bnx2x_sp(bp
, dmae
[bp
->executer_idx
++]);
3337 dmae
->opcode
= opcode
;
3338 dmae
->src_addr_lo
= (port
? NIG_REG_STAT1_BRB_DISCARD
:
3339 NIG_REG_STAT0_BRB_DISCARD
) >> 2;
3340 dmae
->src_addr_hi
= 0;
3341 dmae
->dst_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, nig_stats
));
3342 dmae
->dst_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, nig_stats
));
3343 dmae
->len
= (sizeof(struct nig_stats
) - 4*sizeof(u32
)) >> 2;
3344 dmae
->comp_addr_lo
= dmae_reg_go_c
[loader_idx
] >> 2;
3345 dmae
->comp_addr_hi
= 0;
3348 dmae
= bnx2x_sp(bp
, dmae
[bp
->executer_idx
++]);
3349 dmae
->opcode
= opcode
;
3350 dmae
->src_addr_lo
= (port
? NIG_REG_STAT1_EGRESS_MAC_PKT0
:
3351 NIG_REG_STAT0_EGRESS_MAC_PKT0
) >> 2;
3352 dmae
->src_addr_hi
= 0;
3353 dmae
->dst_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, nig_stats
) +
3354 offsetof(struct nig_stats
, egress_mac_pkt0_lo
));
3355 dmae
->dst_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, nig_stats
) +
3356 offsetof(struct nig_stats
, egress_mac_pkt0_lo
));
3357 dmae
->len
= (2*sizeof(u32
)) >> 2;
3358 dmae
->comp_addr_lo
= dmae_reg_go_c
[loader_idx
] >> 2;
3359 dmae
->comp_addr_hi
= 0;
3362 dmae
= bnx2x_sp(bp
, dmae
[bp
->executer_idx
++]);
3363 dmae
->opcode
= (DMAE_CMD_SRC_GRC
| DMAE_CMD_DST_PCI
|
3364 DMAE_CMD_C_DST_PCI
| DMAE_CMD_C_ENABLE
|
3365 DMAE_CMD_SRC_RESET
| DMAE_CMD_DST_RESET
|
3367 DMAE_CMD_ENDIANITY_B_DW_SWAP
|
3369 DMAE_CMD_ENDIANITY_DW_SWAP
|
3371 (port
? DMAE_CMD_PORT_1
: DMAE_CMD_PORT_0
) |
3372 (vn
<< DMAE_CMD_E1HVN_SHIFT
));
3373 dmae
->src_addr_lo
= (port
? NIG_REG_STAT1_EGRESS_MAC_PKT1
:
3374 NIG_REG_STAT0_EGRESS_MAC_PKT1
) >> 2;
3375 dmae
->src_addr_hi
= 0;
3376 dmae
->dst_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, nig_stats
) +
3377 offsetof(struct nig_stats
, egress_mac_pkt1_lo
));
3378 dmae
->dst_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, nig_stats
) +
3379 offsetof(struct nig_stats
, egress_mac_pkt1_lo
));
3380 dmae
->len
= (2*sizeof(u32
)) >> 2;
3381 dmae
->comp_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, stats_comp
));
3382 dmae
->comp_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, stats_comp
));
3383 dmae
->comp_val
= DMAE_COMP_VAL
;
3388 static void bnx2x_func_stats_init(struct bnx2x
*bp
)
3390 struct dmae_command
*dmae
= &bp
->stats_dmae
;
3391 u32
*stats_comp
= bnx2x_sp(bp
, stats_comp
);
3394 if (!bp
->func_stx
) {
3395 BNX2X_ERR("BUG!\n");
3399 bp
->executer_idx
= 0;
3400 memset(dmae
, 0, sizeof(struct dmae_command
));
3402 dmae
->opcode
= (DMAE_CMD_SRC_PCI
| DMAE_CMD_DST_GRC
|
3403 DMAE_CMD_C_DST_PCI
| DMAE_CMD_C_ENABLE
|
3404 DMAE_CMD_SRC_RESET
| DMAE_CMD_DST_RESET
|
3406 DMAE_CMD_ENDIANITY_B_DW_SWAP
|
3408 DMAE_CMD_ENDIANITY_DW_SWAP
|
3410 (BP_PORT(bp
) ? DMAE_CMD_PORT_1
: DMAE_CMD_PORT_0
) |
3411 (BP_E1HVN(bp
) << DMAE_CMD_E1HVN_SHIFT
));
3412 dmae
->src_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, func_stats
));
3413 dmae
->src_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, func_stats
));
3414 dmae
->dst_addr_lo
= bp
->func_stx
>> 2;
3415 dmae
->dst_addr_hi
= 0;
3416 dmae
->len
= sizeof(struct host_func_stats
) >> 2;
3417 dmae
->comp_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, stats_comp
));
3418 dmae
->comp_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, stats_comp
));
3419 dmae
->comp_val
= DMAE_COMP_VAL
;
3424 static void bnx2x_stats_start(struct bnx2x
*bp
)
3427 bnx2x_port_stats_init(bp
);
3429 else if (bp
->func_stx
)
3430 bnx2x_func_stats_init(bp
);
3432 bnx2x_hw_stats_post(bp
);
3433 bnx2x_storm_stats_post(bp
);
3436 static void bnx2x_stats_pmf_start(struct bnx2x
*bp
)
3438 bnx2x_stats_comp(bp
);
3439 bnx2x_stats_pmf_update(bp
);
3440 bnx2x_stats_start(bp
);
3443 static void bnx2x_stats_restart(struct bnx2x
*bp
)
3445 bnx2x_stats_comp(bp
);
3446 bnx2x_stats_start(bp
);
3449 static void bnx2x_bmac_stats_update(struct bnx2x
*bp
)
3451 struct bmac_stats
*new = bnx2x_sp(bp
, mac_stats
.bmac_stats
);
3452 struct host_port_stats
*pstats
= bnx2x_sp(bp
, port_stats
);
3453 struct regpair diff
;
3455 UPDATE_STAT64(rx_stat_grerb
, rx_stat_ifhcinbadoctets
);
3456 UPDATE_STAT64(rx_stat_grfcs
, rx_stat_dot3statsfcserrors
);
3457 UPDATE_STAT64(rx_stat_grund
, rx_stat_etherstatsundersizepkts
);
3458 UPDATE_STAT64(rx_stat_grovr
, rx_stat_dot3statsframestoolong
);
3459 UPDATE_STAT64(rx_stat_grfrg
, rx_stat_etherstatsfragments
);
3460 UPDATE_STAT64(rx_stat_grjbr
, rx_stat_etherstatsjabbers
);
3461 UPDATE_STAT64(rx_stat_grxpf
, rx_stat_bmac_xpf
);
3462 UPDATE_STAT64(rx_stat_grxcf
, rx_stat_bmac_xcf
);
3463 UPDATE_STAT64(rx_stat_grxpf
, rx_stat_xoffstateentered
);
3464 UPDATE_STAT64(rx_stat_grxpf
, rx_stat_xoffpauseframesreceived
);
3465 UPDATE_STAT64(tx_stat_gtxpf
, tx_stat_outxoffsent
);
3466 UPDATE_STAT64(tx_stat_gtxpf
, tx_stat_flowcontroldone
);
3467 UPDATE_STAT64(tx_stat_gt64
, tx_stat_etherstatspkts64octets
);
3468 UPDATE_STAT64(tx_stat_gt127
,
3469 tx_stat_etherstatspkts65octetsto127octets
);
3470 UPDATE_STAT64(tx_stat_gt255
,
3471 tx_stat_etherstatspkts128octetsto255octets
);
3472 UPDATE_STAT64(tx_stat_gt511
,
3473 tx_stat_etherstatspkts256octetsto511octets
);
3474 UPDATE_STAT64(tx_stat_gt1023
,
3475 tx_stat_etherstatspkts512octetsto1023octets
);
3476 UPDATE_STAT64(tx_stat_gt1518
,
3477 tx_stat_etherstatspkts1024octetsto1522octets
);
3478 UPDATE_STAT64(tx_stat_gt2047
, tx_stat_bmac_2047
);
3479 UPDATE_STAT64(tx_stat_gt4095
, tx_stat_bmac_4095
);
3480 UPDATE_STAT64(tx_stat_gt9216
, tx_stat_bmac_9216
);
3481 UPDATE_STAT64(tx_stat_gt16383
, tx_stat_bmac_16383
);
3482 UPDATE_STAT64(tx_stat_gterr
,
3483 tx_stat_dot3statsinternalmactransmiterrors
);
3484 UPDATE_STAT64(tx_stat_gtufl
, tx_stat_bmac_ufl
);
3487 static void bnx2x_emac_stats_update(struct bnx2x
*bp
)
3489 struct emac_stats
*new = bnx2x_sp(bp
, mac_stats
.emac_stats
);
3490 struct host_port_stats
*pstats
= bnx2x_sp(bp
, port_stats
);
3492 UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets
);
3493 UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets
);
3494 UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors
);
3495 UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors
);
3496 UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors
);
3497 UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors
);
3498 UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts
);
3499 UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong
);
3500 UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments
);
3501 UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers
);
3502 UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived
);
3503 UPDATE_EXTEND_STAT(rx_stat_xoffstateentered
);
3504 UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived
);
3505 UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived
);
3506 UPDATE_EXTEND_STAT(tx_stat_outxonsent
);
3507 UPDATE_EXTEND_STAT(tx_stat_outxoffsent
);
3508 UPDATE_EXTEND_STAT(tx_stat_flowcontroldone
);
3509 UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions
);
3510 UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes
);
3511 UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes
);
3512 UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions
);
3513 UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions
);
3514 UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions
);
3515 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets
);
3516 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets
);
3517 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets
);
3518 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets
);
3519 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets
);
3520 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets
);
3521 UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets
);
3522 UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors
);
3525 static int bnx2x_hw_stats_update(struct bnx2x
*bp
)
3527 struct nig_stats
*new = bnx2x_sp(bp
, nig_stats
);
3528 struct nig_stats
*old
= &(bp
->port
.old_nig_stats
);
3529 struct host_port_stats
*pstats
= bnx2x_sp(bp
, port_stats
);
3530 struct bnx2x_eth_stats
*estats
= &bp
->eth_stats
;
3531 struct regpair diff
;
3533 if (bp
->link_vars
.mac_type
== MAC_TYPE_BMAC
)
3534 bnx2x_bmac_stats_update(bp
);
3536 else if (bp
->link_vars
.mac_type
== MAC_TYPE_EMAC
)
3537 bnx2x_emac_stats_update(bp
);
3539 else { /* unreached */
3540 BNX2X_ERR("stats updated by dmae but no MAC active\n");
3544 ADD_EXTEND_64(pstats
->brb_drop_hi
, pstats
->brb_drop_lo
,
3545 new->brb_discard
- old
->brb_discard
);
3547 UPDATE_STAT64_NIG(egress_mac_pkt0
,
3548 etherstatspkts1024octetsto1522octets
);
3549 UPDATE_STAT64_NIG(egress_mac_pkt1
, etherstatspktsover1522octets
);
3551 memcpy(old
, new, sizeof(struct nig_stats
));
3553 memcpy(&(estats
->rx_stat_ifhcinbadoctets_hi
), &(pstats
->mac_stx
[1]),
3554 sizeof(struct mac_stx
));
3555 estats
->brb_drop_hi
= pstats
->brb_drop_hi
;
3556 estats
->brb_drop_lo
= pstats
->brb_drop_lo
;
3558 pstats
->host_port_stats_start
= ++pstats
->host_port_stats_end
;
3563 static int bnx2x_storm_stats_update(struct bnx2x
*bp
)
3565 struct eth_stats_query
*stats
= bnx2x_sp(bp
, fw_stats
);
3566 int cl_id
= BP_CL_ID(bp
);
3567 struct tstorm_per_port_stats
*tport
=
3568 &stats
->tstorm_common
.port_statistics
;
3569 struct tstorm_per_client_stats
*tclient
=
3570 &stats
->tstorm_common
.client_statistics
[cl_id
];
3571 struct tstorm_per_client_stats
*old_tclient
= &bp
->old_tclient
;
3572 struct xstorm_per_client_stats
*xclient
=
3573 &stats
->xstorm_common
.client_statistics
[cl_id
];
3574 struct xstorm_per_client_stats
*old_xclient
= &bp
->old_xclient
;
3575 struct host_func_stats
*fstats
= bnx2x_sp(bp
, func_stats
);
3576 struct bnx2x_eth_stats
*estats
= &bp
->eth_stats
;
3579 /* are storm stats valid? */
3580 if ((u16
)(le16_to_cpu(tclient
->stats_counter
) + 1) !=
3581 bp
->stats_counter
) {
3582 DP(BNX2X_MSG_STATS
, "stats not updated by tstorm"
3583 " tstorm counter (%d) != stats_counter (%d)\n",
3584 tclient
->stats_counter
, bp
->stats_counter
);
3587 if ((u16
)(le16_to_cpu(xclient
->stats_counter
) + 1) !=
3588 bp
->stats_counter
) {
3589 DP(BNX2X_MSG_STATS
, "stats not updated by xstorm"
3590 " xstorm counter (%d) != stats_counter (%d)\n",
3591 xclient
->stats_counter
, bp
->stats_counter
);
3595 fstats
->total_bytes_received_hi
=
3596 fstats
->valid_bytes_received_hi
=
3597 le32_to_cpu(tclient
->total_rcv_bytes
.hi
);
3598 fstats
->total_bytes_received_lo
=
3599 fstats
->valid_bytes_received_lo
=
3600 le32_to_cpu(tclient
->total_rcv_bytes
.lo
);
3602 estats
->error_bytes_received_hi
=
3603 le32_to_cpu(tclient
->rcv_error_bytes
.hi
);
3604 estats
->error_bytes_received_lo
=
3605 le32_to_cpu(tclient
->rcv_error_bytes
.lo
);
3606 ADD_64(estats
->error_bytes_received_hi
,
3607 estats
->rx_stat_ifhcinbadoctets_hi
,
3608 estats
->error_bytes_received_lo
,
3609 estats
->rx_stat_ifhcinbadoctets_lo
);
3611 ADD_64(fstats
->total_bytes_received_hi
,
3612 estats
->error_bytes_received_hi
,
3613 fstats
->total_bytes_received_lo
,
3614 estats
->error_bytes_received_lo
);
3616 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts
, total_unicast_packets_received
);
3617 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts
,
3618 total_multicast_packets_received
);
3619 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts
,
3620 total_broadcast_packets_received
);
3622 fstats
->total_bytes_transmitted_hi
=
3623 le32_to_cpu(xclient
->total_sent_bytes
.hi
);
3624 fstats
->total_bytes_transmitted_lo
=
3625 le32_to_cpu(xclient
->total_sent_bytes
.lo
);
3627 UPDATE_EXTEND_XSTAT(unicast_pkts_sent
,
3628 total_unicast_packets_transmitted
);
3629 UPDATE_EXTEND_XSTAT(multicast_pkts_sent
,
3630 total_multicast_packets_transmitted
);
3631 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent
,
3632 total_broadcast_packets_transmitted
);
3634 memcpy(estats
, &(fstats
->total_bytes_received_hi
),
3635 sizeof(struct host_func_stats
) - 2*sizeof(u32
));
3637 estats
->mac_filter_discard
= le32_to_cpu(tport
->mac_filter_discard
);
3638 estats
->xxoverflow_discard
= le32_to_cpu(tport
->xxoverflow_discard
);
3639 estats
->brb_truncate_discard
=
3640 le32_to_cpu(tport
->brb_truncate_discard
);
3641 estats
->mac_discard
= le32_to_cpu(tport
->mac_discard
);
3643 old_tclient
->rcv_unicast_bytes
.hi
=
3644 le32_to_cpu(tclient
->rcv_unicast_bytes
.hi
);
3645 old_tclient
->rcv_unicast_bytes
.lo
=
3646 le32_to_cpu(tclient
->rcv_unicast_bytes
.lo
);
3647 old_tclient
->rcv_broadcast_bytes
.hi
=
3648 le32_to_cpu(tclient
->rcv_broadcast_bytes
.hi
);
3649 old_tclient
->rcv_broadcast_bytes
.lo
=
3650 le32_to_cpu(tclient
->rcv_broadcast_bytes
.lo
);
3651 old_tclient
->rcv_multicast_bytes
.hi
=
3652 le32_to_cpu(tclient
->rcv_multicast_bytes
.hi
);
3653 old_tclient
->rcv_multicast_bytes
.lo
=
3654 le32_to_cpu(tclient
->rcv_multicast_bytes
.lo
);
3655 old_tclient
->total_rcv_pkts
= le32_to_cpu(tclient
->total_rcv_pkts
);
3657 old_tclient
->checksum_discard
= le32_to_cpu(tclient
->checksum_discard
);
3658 old_tclient
->packets_too_big_discard
=
3659 le32_to_cpu(tclient
->packets_too_big_discard
);
3660 estats
->no_buff_discard
=
3661 old_tclient
->no_buff_discard
= le32_to_cpu(tclient
->no_buff_discard
);
3662 old_tclient
->ttl0_discard
= le32_to_cpu(tclient
->ttl0_discard
);
3664 old_xclient
->total_sent_pkts
= le32_to_cpu(xclient
->total_sent_pkts
);
3665 old_xclient
->unicast_bytes_sent
.hi
=
3666 le32_to_cpu(xclient
->unicast_bytes_sent
.hi
);
3667 old_xclient
->unicast_bytes_sent
.lo
=
3668 le32_to_cpu(xclient
->unicast_bytes_sent
.lo
);
3669 old_xclient
->multicast_bytes_sent
.hi
=
3670 le32_to_cpu(xclient
->multicast_bytes_sent
.hi
);
3671 old_xclient
->multicast_bytes_sent
.lo
=
3672 le32_to_cpu(xclient
->multicast_bytes_sent
.lo
);
3673 old_xclient
->broadcast_bytes_sent
.hi
=
3674 le32_to_cpu(xclient
->broadcast_bytes_sent
.hi
);
3675 old_xclient
->broadcast_bytes_sent
.lo
=
3676 le32_to_cpu(xclient
->broadcast_bytes_sent
.lo
);
3678 fstats
->host_func_stats_start
= ++fstats
->host_func_stats_end
;
3683 static void bnx2x_net_stats_update(struct bnx2x
*bp
)
3685 struct tstorm_per_client_stats
*old_tclient
= &bp
->old_tclient
;
3686 struct bnx2x_eth_stats
*estats
= &bp
->eth_stats
;
3687 struct net_device_stats
*nstats
= &bp
->dev
->stats
;
3689 nstats
->rx_packets
=
3690 bnx2x_hilo(&estats
->total_unicast_packets_received_hi
) +
3691 bnx2x_hilo(&estats
->total_multicast_packets_received_hi
) +
3692 bnx2x_hilo(&estats
->total_broadcast_packets_received_hi
);
3694 nstats
->tx_packets
=
3695 bnx2x_hilo(&estats
->total_unicast_packets_transmitted_hi
) +
3696 bnx2x_hilo(&estats
->total_multicast_packets_transmitted_hi
) +
3697 bnx2x_hilo(&estats
->total_broadcast_packets_transmitted_hi
);
3699 nstats
->rx_bytes
= bnx2x_hilo(&estats
->valid_bytes_received_hi
);
3701 nstats
->tx_bytes
= bnx2x_hilo(&estats
->total_bytes_transmitted_hi
);
3703 nstats
->rx_dropped
= old_tclient
->checksum_discard
+
3704 estats
->mac_discard
;
3705 nstats
->tx_dropped
= 0;
3708 bnx2x_hilo(&estats
->total_multicast_packets_transmitted_hi
);
3710 nstats
->collisions
=
3711 estats
->tx_stat_dot3statssinglecollisionframes_lo
+
3712 estats
->tx_stat_dot3statsmultiplecollisionframes_lo
+
3713 estats
->tx_stat_dot3statslatecollisions_lo
+
3714 estats
->tx_stat_dot3statsexcessivecollisions_lo
;
3716 estats
->jabber_packets_received
=
3717 old_tclient
->packets_too_big_discard
+
3718 estats
->rx_stat_dot3statsframestoolong_lo
;
3720 nstats
->rx_length_errors
=
3721 estats
->rx_stat_etherstatsundersizepkts_lo
+
3722 estats
->jabber_packets_received
;
3723 nstats
->rx_over_errors
= estats
->brb_drop_lo
+
3724 estats
->brb_truncate_discard
;
3725 nstats
->rx_crc_errors
= estats
->rx_stat_dot3statsfcserrors_lo
;
3726 nstats
->rx_frame_errors
= estats
->rx_stat_dot3statsalignmenterrors_lo
;
3727 nstats
->rx_fifo_errors
= old_tclient
->no_buff_discard
;
3728 nstats
->rx_missed_errors
= estats
->xxoverflow_discard
;
3730 nstats
->rx_errors
= nstats
->rx_length_errors
+
3731 nstats
->rx_over_errors
+
3732 nstats
->rx_crc_errors
+
3733 nstats
->rx_frame_errors
+
3734 nstats
->rx_fifo_errors
+
3735 nstats
->rx_missed_errors
;
3737 nstats
->tx_aborted_errors
=
3738 estats
->tx_stat_dot3statslatecollisions_lo
+
3739 estats
->tx_stat_dot3statsexcessivecollisions_lo
;
3740 nstats
->tx_carrier_errors
= estats
->rx_stat_falsecarriererrors_lo
;
3741 nstats
->tx_fifo_errors
= 0;
3742 nstats
->tx_heartbeat_errors
= 0;
3743 nstats
->tx_window_errors
= 0;
3745 nstats
->tx_errors
= nstats
->tx_aborted_errors
+
3746 nstats
->tx_carrier_errors
;
3749 static void bnx2x_stats_update(struct bnx2x
*bp
)
3751 u32
*stats_comp
= bnx2x_sp(bp
, stats_comp
);
3754 if (*stats_comp
!= DMAE_COMP_VAL
)
3758 update
= (bnx2x_hw_stats_update(bp
) == 0);
3760 update
|= (bnx2x_storm_stats_update(bp
) == 0);
3763 bnx2x_net_stats_update(bp
);
3766 if (bp
->stats_pending
) {
3767 bp
->stats_pending
++;
3768 if (bp
->stats_pending
== 3) {
3769 BNX2X_ERR("stats not updated for 3 times\n");
3776 if (bp
->msglevel
& NETIF_MSG_TIMER
) {
3777 struct tstorm_per_client_stats
*old_tclient
= &bp
->old_tclient
;
3778 struct bnx2x_eth_stats
*estats
= &bp
->eth_stats
;
3779 struct net_device_stats
*nstats
= &bp
->dev
->stats
;
3782 printk(KERN_DEBUG
"%s:\n", bp
->dev
->name
);
3783 printk(KERN_DEBUG
" tx avail (%4x) tx hc idx (%x)"
3785 bnx2x_tx_avail(bp
->fp
),
3786 le16_to_cpu(*bp
->fp
->tx_cons_sb
), nstats
->tx_packets
);
3787 printk(KERN_DEBUG
" rx usage (%4x) rx hc idx (%x)"
3789 (u16
)(le16_to_cpu(*bp
->fp
->rx_cons_sb
) -
3790 bp
->fp
->rx_comp_cons
),
3791 le16_to_cpu(*bp
->fp
->rx_cons_sb
), nstats
->rx_packets
);
3792 printk(KERN_DEBUG
" %s (Xoff events %u) brb drops %u\n",
3793 netif_queue_stopped(bp
->dev
)? "Xoff" : "Xon",
3794 estats
->driver_xoff
, estats
->brb_drop_lo
);
3795 printk(KERN_DEBUG
"tstats: checksum_discard %u "
3796 "packets_too_big_discard %u no_buff_discard %u "
3797 "mac_discard %u mac_filter_discard %u "
3798 "xxovrflow_discard %u brb_truncate_discard %u "
3799 "ttl0_discard %u\n",
3800 old_tclient
->checksum_discard
,
3801 old_tclient
->packets_too_big_discard
,
3802 old_tclient
->no_buff_discard
, estats
->mac_discard
,
3803 estats
->mac_filter_discard
, estats
->xxoverflow_discard
,
3804 estats
->brb_truncate_discard
,
3805 old_tclient
->ttl0_discard
);
3807 for_each_queue(bp
, i
) {
3808 printk(KERN_DEBUG
"[%d]: %lu\t%lu\t%lu\n", i
,
3809 bnx2x_fp(bp
, i
, tx_pkt
),
3810 bnx2x_fp(bp
, i
, rx_pkt
),
3811 bnx2x_fp(bp
, i
, rx_calls
));
3815 bnx2x_hw_stats_post(bp
);
3816 bnx2x_storm_stats_post(bp
);
3819 static void bnx2x_port_stats_stop(struct bnx2x
*bp
)
3821 struct dmae_command
*dmae
;
3823 int loader_idx
= PMF_DMAE_C(bp
);
3824 u32
*stats_comp
= bnx2x_sp(bp
, stats_comp
);
3826 bp
->executer_idx
= 0;
3828 opcode
= (DMAE_CMD_SRC_PCI
| DMAE_CMD_DST_GRC
|
3830 DMAE_CMD_SRC_RESET
| DMAE_CMD_DST_RESET
|
3832 DMAE_CMD_ENDIANITY_B_DW_SWAP
|
3834 DMAE_CMD_ENDIANITY_DW_SWAP
|
3836 (BP_PORT(bp
) ? DMAE_CMD_PORT_1
: DMAE_CMD_PORT_0
) |
3837 (BP_E1HVN(bp
) << DMAE_CMD_E1HVN_SHIFT
));
3839 if (bp
->port
.port_stx
) {
3841 dmae
= bnx2x_sp(bp
, dmae
[bp
->executer_idx
++]);
3843 dmae
->opcode
= (opcode
| DMAE_CMD_C_DST_GRC
);
3845 dmae
->opcode
= (opcode
| DMAE_CMD_C_DST_PCI
);
3846 dmae
->src_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, port_stats
));
3847 dmae
->src_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, port_stats
));
3848 dmae
->dst_addr_lo
= bp
->port
.port_stx
>> 2;
3849 dmae
->dst_addr_hi
= 0;
3850 dmae
->len
= sizeof(struct host_port_stats
) >> 2;
3852 dmae
->comp_addr_lo
= dmae_reg_go_c
[loader_idx
] >> 2;
3853 dmae
->comp_addr_hi
= 0;
3856 dmae
->comp_addr_lo
=
3857 U64_LO(bnx2x_sp_mapping(bp
, stats_comp
));
3858 dmae
->comp_addr_hi
=
3859 U64_HI(bnx2x_sp_mapping(bp
, stats_comp
));
3860 dmae
->comp_val
= DMAE_COMP_VAL
;
3868 dmae
= bnx2x_sp(bp
, dmae
[bp
->executer_idx
++]);
3869 dmae
->opcode
= (opcode
| DMAE_CMD_C_DST_PCI
);
3870 dmae
->src_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, func_stats
));
3871 dmae
->src_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, func_stats
));
3872 dmae
->dst_addr_lo
= bp
->func_stx
>> 2;
3873 dmae
->dst_addr_hi
= 0;
3874 dmae
->len
= sizeof(struct host_func_stats
) >> 2;
3875 dmae
->comp_addr_lo
= U64_LO(bnx2x_sp_mapping(bp
, stats_comp
));
3876 dmae
->comp_addr_hi
= U64_HI(bnx2x_sp_mapping(bp
, stats_comp
));
3877 dmae
->comp_val
= DMAE_COMP_VAL
;
3883 static void bnx2x_stats_stop(struct bnx2x
*bp
)
3887 bnx2x_stats_comp(bp
);
3890 update
= (bnx2x_hw_stats_update(bp
) == 0);
3892 update
|= (bnx2x_storm_stats_update(bp
) == 0);
3895 bnx2x_net_stats_update(bp
);
3898 bnx2x_port_stats_stop(bp
);
3900 bnx2x_hw_stats_post(bp
);
3901 bnx2x_stats_comp(bp
);
3905 static void bnx2x_stats_do_nothing(struct bnx2x
*bp
)
3909 static const struct {
3910 void (*action
)(struct bnx2x
*bp
);
3911 enum bnx2x_stats_state next_state
;
3912 } bnx2x_stats_stm
[STATS_STATE_MAX
][STATS_EVENT_MAX
] = {
3915 /* DISABLED PMF */ {bnx2x_stats_pmf_update
, STATS_STATE_DISABLED
},
3916 /* LINK_UP */ {bnx2x_stats_start
, STATS_STATE_ENABLED
},
3917 /* UPDATE */ {bnx2x_stats_do_nothing
, STATS_STATE_DISABLED
},
3918 /* STOP */ {bnx2x_stats_do_nothing
, STATS_STATE_DISABLED
}
3921 /* ENABLED PMF */ {bnx2x_stats_pmf_start
, STATS_STATE_ENABLED
},
3922 /* LINK_UP */ {bnx2x_stats_restart
, STATS_STATE_ENABLED
},
3923 /* UPDATE */ {bnx2x_stats_update
, STATS_STATE_ENABLED
},
3924 /* STOP */ {bnx2x_stats_stop
, STATS_STATE_DISABLED
}
3928 static void bnx2x_stats_handle(struct bnx2x
*bp
, enum bnx2x_stats_event event
)
3930 enum bnx2x_stats_state state
= bp
->stats_state
;
3932 bnx2x_stats_stm
[state
][event
].action(bp
);
3933 bp
->stats_state
= bnx2x_stats_stm
[state
][event
].next_state
;
3935 if ((event
!= STATS_EVENT_UPDATE
) || (bp
->msglevel
& NETIF_MSG_TIMER
))
3936 DP(BNX2X_MSG_STATS
, "state %d -> event %d -> state %d\n",
3937 state
, event
, bp
->stats_state
);
3940 static void bnx2x_timer(unsigned long data
)
3942 struct bnx2x
*bp
= (struct bnx2x
*) data
;
3944 if (!netif_running(bp
->dev
))
3947 if (atomic_read(&bp
->intr_sem
) != 0)
3951 struct bnx2x_fastpath
*fp
= &bp
->fp
[0];
3954 bnx2x_tx_int(fp
, 1000);
3955 rc
= bnx2x_rx_int(fp
, 1000);
3958 if (!BP_NOMCP(bp
)) {
3959 int func
= BP_FUNC(bp
);
3963 ++bp
->fw_drv_pulse_wr_seq
;
3964 bp
->fw_drv_pulse_wr_seq
&= DRV_PULSE_SEQ_MASK
;
3965 /* TBD - add SYSTEM_TIME */
3966 drv_pulse
= bp
->fw_drv_pulse_wr_seq
;
3967 SHMEM_WR(bp
, func_mb
[func
].drv_pulse_mb
, drv_pulse
);
3969 mcp_pulse
= (SHMEM_RD(bp
, func_mb
[func
].mcp_pulse_mb
) &
3970 MCP_PULSE_SEQ_MASK
);
3971 /* The delta between driver pulse and mcp response
3972 * should be 1 (before mcp response) or 0 (after mcp response)
3974 if ((drv_pulse
!= mcp_pulse
) &&
3975 (drv_pulse
!= ((mcp_pulse
+ 1) & MCP_PULSE_SEQ_MASK
))) {
3976 /* someone lost a heartbeat... */
3977 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
3978 drv_pulse
, mcp_pulse
);
3982 if ((bp
->state
== BNX2X_STATE_OPEN
) ||
3983 (bp
->state
== BNX2X_STATE_DISABLED
))
3984 bnx2x_stats_handle(bp
, STATS_EVENT_UPDATE
);
3987 mod_timer(&bp
->timer
, jiffies
+ bp
->current_interval
);
3990 /* end of Statistics */
3995 * nic init service functions
3998 static void bnx2x_zero_sb(struct bnx2x
*bp
, int sb_id
)
4000 int port
= BP_PORT(bp
);
4002 bnx2x_init_fill(bp
, BAR_USTRORM_INTMEM
+
4003 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port
, sb_id
), 0,
4004 sizeof(struct ustorm_def_status_block
)/4);
4005 bnx2x_init_fill(bp
, BAR_CSTRORM_INTMEM
+
4006 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port
, sb_id
), 0,
4007 sizeof(struct cstorm_def_status_block
)/4);
4010 static void bnx2x_init_sb(struct bnx2x
*bp
, int sb_id
,
4011 struct host_status_block
*sb
, dma_addr_t mapping
)
4013 int port
= BP_PORT(bp
);
4014 int func
= BP_FUNC(bp
);
4019 section
= ((u64
)mapping
) + offsetof(struct host_status_block
,
4021 sb
->u_status_block
.status_block_id
= sb_id
;
4023 REG_WR(bp
, BAR_USTRORM_INTMEM
+
4024 USTORM_SB_HOST_SB_ADDR_OFFSET(port
, sb_id
), U64_LO(section
));
4025 REG_WR(bp
, BAR_USTRORM_INTMEM
+
4026 ((USTORM_SB_HOST_SB_ADDR_OFFSET(port
, sb_id
)) + 4),
4028 REG_WR8(bp
, BAR_USTRORM_INTMEM
+ FP_USB_FUNC_OFF
+
4029 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port
, sb_id
), func
);
4031 for (index
= 0; index
< HC_USTORM_SB_NUM_INDICES
; index
++)
4032 REG_WR16(bp
, BAR_USTRORM_INTMEM
+
4033 USTORM_SB_HC_DISABLE_OFFSET(port
, sb_id
, index
), 1);
4036 section
= ((u64
)mapping
) + offsetof(struct host_status_block
,
4038 sb
->c_status_block
.status_block_id
= sb_id
;
4040 REG_WR(bp
, BAR_CSTRORM_INTMEM
+
4041 CSTORM_SB_HOST_SB_ADDR_OFFSET(port
, sb_id
), U64_LO(section
));
4042 REG_WR(bp
, BAR_CSTRORM_INTMEM
+
4043 ((CSTORM_SB_HOST_SB_ADDR_OFFSET(port
, sb_id
)) + 4),
4045 REG_WR8(bp
, BAR_CSTRORM_INTMEM
+ FP_CSB_FUNC_OFF
+
4046 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port
, sb_id
), func
);
4048 for (index
= 0; index
< HC_CSTORM_SB_NUM_INDICES
; index
++)
4049 REG_WR16(bp
, BAR_CSTRORM_INTMEM
+
4050 CSTORM_SB_HC_DISABLE_OFFSET(port
, sb_id
, index
), 1);
4052 bnx2x_ack_sb(bp
, sb_id
, CSTORM_ID
, 0, IGU_INT_ENABLE
, 0);
4055 static void bnx2x_zero_def_sb(struct bnx2x
*bp
)
4057 int func
= BP_FUNC(bp
);
4059 bnx2x_init_fill(bp
, BAR_USTRORM_INTMEM
+
4060 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func
), 0,
4061 sizeof(struct ustorm_def_status_block
)/4);
4062 bnx2x_init_fill(bp
, BAR_CSTRORM_INTMEM
+
4063 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func
), 0,
4064 sizeof(struct cstorm_def_status_block
)/4);
4065 bnx2x_init_fill(bp
, BAR_XSTRORM_INTMEM
+
4066 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func
), 0,
4067 sizeof(struct xstorm_def_status_block
)/4);
4068 bnx2x_init_fill(bp
, BAR_TSTRORM_INTMEM
+
4069 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func
), 0,
4070 sizeof(struct tstorm_def_status_block
)/4);
4073 static void bnx2x_init_def_sb(struct bnx2x
*bp
,
4074 struct host_def_status_block
*def_sb
,
4075 dma_addr_t mapping
, int sb_id
)
4077 int port
= BP_PORT(bp
);
4078 int func
= BP_FUNC(bp
);
4079 int index
, val
, reg_offset
;
4083 section
= ((u64
)mapping
) + offsetof(struct host_def_status_block
,
4084 atten_status_block
);
4085 def_sb
->atten_status_block
.status_block_id
= sb_id
;
4087 bp
->def_att_idx
= 0;
4090 reg_offset
= (port
? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0
:
4091 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0
);
4093 for (index
= 0; index
< MAX_DYNAMIC_ATTN_GRPS
; index
++) {
4094 bp
->attn_group
[index
].sig
[0] = REG_RD(bp
,
4095 reg_offset
+ 0x10*index
);
4096 bp
->attn_group
[index
].sig
[1] = REG_RD(bp
,
4097 reg_offset
+ 0x4 + 0x10*index
);
4098 bp
->attn_group
[index
].sig
[2] = REG_RD(bp
,
4099 reg_offset
+ 0x8 + 0x10*index
);
4100 bp
->attn_group
[index
].sig
[3] = REG_RD(bp
,
4101 reg_offset
+ 0xc + 0x10*index
);
4104 bp
->aeu_mask
= REG_RD(bp
, (port
? MISC_REG_AEU_MASK_ATTN_FUNC_1
:
4105 MISC_REG_AEU_MASK_ATTN_FUNC_0
));
4107 reg_offset
= (port
? HC_REG_ATTN_MSG1_ADDR_L
:
4108 HC_REG_ATTN_MSG0_ADDR_L
);
4110 REG_WR(bp
, reg_offset
, U64_LO(section
));
4111 REG_WR(bp
, reg_offset
+ 4, U64_HI(section
));
4113 reg_offset
= (port
? HC_REG_ATTN_NUM_P1
: HC_REG_ATTN_NUM_P0
);
4115 val
= REG_RD(bp
, reg_offset
);
4117 REG_WR(bp
, reg_offset
, val
);
4120 section
= ((u64
)mapping
) + offsetof(struct host_def_status_block
,
4121 u_def_status_block
);
4122 def_sb
->u_def_status_block
.status_block_id
= sb_id
;
4126 REG_WR(bp
, BAR_USTRORM_INTMEM
+
4127 USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func
), U64_LO(section
));
4128 REG_WR(bp
, BAR_USTRORM_INTMEM
+
4129 ((USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func
)) + 4),
4131 REG_WR8(bp
, BAR_USTRORM_INTMEM
+ DEF_USB_FUNC_OFF
+
4132 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func
), func
);
4133 REG_WR(bp
, BAR_USTRORM_INTMEM
+ USTORM_HC_BTR_OFFSET(func
),
4136 for (index
= 0; index
< HC_USTORM_DEF_SB_NUM_INDICES
; index
++)
4137 REG_WR16(bp
, BAR_USTRORM_INTMEM
+
4138 USTORM_DEF_SB_HC_DISABLE_OFFSET(func
, index
), 1);
4141 section
= ((u64
)mapping
) + offsetof(struct host_def_status_block
,
4142 c_def_status_block
);
4143 def_sb
->c_def_status_block
.status_block_id
= sb_id
;
4147 REG_WR(bp
, BAR_CSTRORM_INTMEM
+
4148 CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func
), U64_LO(section
));
4149 REG_WR(bp
, BAR_CSTRORM_INTMEM
+
4150 ((CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func
)) + 4),
4152 REG_WR8(bp
, BAR_CSTRORM_INTMEM
+ DEF_CSB_FUNC_OFF
+
4153 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func
), func
);
4154 REG_WR(bp
, BAR_CSTRORM_INTMEM
+ CSTORM_HC_BTR_OFFSET(func
),
4157 for (index
= 0; index
< HC_CSTORM_DEF_SB_NUM_INDICES
; index
++)
4158 REG_WR16(bp
, BAR_CSTRORM_INTMEM
+
4159 CSTORM_DEF_SB_HC_DISABLE_OFFSET(func
, index
), 1);
4162 section
= ((u64
)mapping
) + offsetof(struct host_def_status_block
,
4163 t_def_status_block
);
4164 def_sb
->t_def_status_block
.status_block_id
= sb_id
;
4168 REG_WR(bp
, BAR_TSTRORM_INTMEM
+
4169 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func
), U64_LO(section
));
4170 REG_WR(bp
, BAR_TSTRORM_INTMEM
+
4171 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func
)) + 4),
4173 REG_WR8(bp
, BAR_TSTRORM_INTMEM
+ DEF_TSB_FUNC_OFF
+
4174 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func
), func
);
4175 REG_WR(bp
, BAR_TSTRORM_INTMEM
+ TSTORM_HC_BTR_OFFSET(func
),
4178 for (index
= 0; index
< HC_TSTORM_DEF_SB_NUM_INDICES
; index
++)
4179 REG_WR16(bp
, BAR_TSTRORM_INTMEM
+
4180 TSTORM_DEF_SB_HC_DISABLE_OFFSET(func
, index
), 1);
4183 section
= ((u64
)mapping
) + offsetof(struct host_def_status_block
,
4184 x_def_status_block
);
4185 def_sb
->x_def_status_block
.status_block_id
= sb_id
;
4189 REG_WR(bp
, BAR_XSTRORM_INTMEM
+
4190 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func
), U64_LO(section
));
4191 REG_WR(bp
, BAR_XSTRORM_INTMEM
+
4192 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func
)) + 4),
4194 REG_WR8(bp
, BAR_XSTRORM_INTMEM
+ DEF_XSB_FUNC_OFF
+
4195 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func
), func
);
4196 REG_WR(bp
, BAR_XSTRORM_INTMEM
+ XSTORM_HC_BTR_OFFSET(func
),
4199 for (index
= 0; index
< HC_XSTORM_DEF_SB_NUM_INDICES
; index
++)
4200 REG_WR16(bp
, BAR_XSTRORM_INTMEM
+
4201 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func
, index
), 1);
4203 bp
->stats_pending
= 0;
4205 bnx2x_ack_sb(bp
, sb_id
, CSTORM_ID
, 0, IGU_INT_ENABLE
, 0);
4208 static void bnx2x_update_coalesce(struct bnx2x
*bp
)
4210 int port
= BP_PORT(bp
);
4213 for_each_queue(bp
, i
) {
4214 int sb_id
= bp
->fp
[i
].sb_id
;
4216 /* HC_INDEX_U_ETH_RX_CQ_CONS */
4217 REG_WR8(bp
, BAR_USTRORM_INTMEM
+
4218 USTORM_SB_HC_TIMEOUT_OFFSET(port
, sb_id
,
4219 HC_INDEX_U_ETH_RX_CQ_CONS
),
4221 REG_WR16(bp
, BAR_USTRORM_INTMEM
+
4222 USTORM_SB_HC_DISABLE_OFFSET(port
, sb_id
,
4223 HC_INDEX_U_ETH_RX_CQ_CONS
),
4224 bp
->rx_ticks
? 0 : 1);
4226 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4227 REG_WR8(bp
, BAR_CSTRORM_INTMEM
+
4228 CSTORM_SB_HC_TIMEOUT_OFFSET(port
, sb_id
,
4229 HC_INDEX_C_ETH_TX_CQ_CONS
),
4231 REG_WR16(bp
, BAR_CSTRORM_INTMEM
+
4232 CSTORM_SB_HC_DISABLE_OFFSET(port
, sb_id
,
4233 HC_INDEX_C_ETH_TX_CQ_CONS
),
4234 bp
->tx_ticks
? 0 : 1);
4238 static inline void bnx2x_free_tpa_pool(struct bnx2x
*bp
,
4239 struct bnx2x_fastpath
*fp
, int last
)
4243 for (i
= 0; i
< last
; i
++) {
4244 struct sw_rx_bd
*rx_buf
= &(fp
->tpa_pool
[i
]);
4245 struct sk_buff
*skb
= rx_buf
->skb
;
4248 DP(NETIF_MSG_IFDOWN
, "tpa bin %d empty on free\n", i
);
4252 if (fp
->tpa_state
[i
] == BNX2X_TPA_START
)
4253 pci_unmap_single(bp
->pdev
,
4254 pci_unmap_addr(rx_buf
, mapping
),
4255 bp
->rx_buf_use_size
,
4256 PCI_DMA_FROMDEVICE
);
4263 static void bnx2x_init_rx_rings(struct bnx2x
*bp
)
4265 int func
= BP_FUNC(bp
);
4266 u16 ring_prod
, cqe_ring_prod
= 0;
4269 bp
->rx_buf_use_size
= bp
->dev
->mtu
;
4270 bp
->rx_buf_use_size
+= bp
->rx_offset
+ ETH_OVREHEAD
;
4271 bp
->rx_buf_size
= bp
->rx_buf_use_size
+ 64;
4273 if (bp
->flags
& TPA_ENABLE_FLAG
) {
4275 "rx_buf_use_size %d rx_buf_size %d effective_mtu %d\n",
4276 bp
->rx_buf_use_size
, bp
->rx_buf_size
,
4277 bp
->dev
->mtu
+ ETH_OVREHEAD
);
4279 for_each_queue(bp
, j
) {
4280 for (i
= 0; i
< ETH_MAX_AGGREGATION_QUEUES_E1H
; i
++) {
4281 struct bnx2x_fastpath
*fp
= &bp
->fp
[j
];
4283 fp
->tpa_pool
[i
].skb
=
4284 netdev_alloc_skb(bp
->dev
, bp
->rx_buf_size
);
4285 if (!fp
->tpa_pool
[i
].skb
) {
4286 BNX2X_ERR("Failed to allocate TPA "
4287 "skb pool for queue[%d] - "
4288 "disabling TPA on this "
4290 bnx2x_free_tpa_pool(bp
, fp
, i
);
4291 fp
->disable_tpa
= 1;
4294 pci_unmap_addr_set((struct sw_rx_bd
*)
4295 &bp
->fp
->tpa_pool
[i
],
4297 fp
->tpa_state
[i
] = BNX2X_TPA_STOP
;
4302 for_each_queue(bp
, j
) {
4303 struct bnx2x_fastpath
*fp
= &bp
->fp
[j
];
4306 fp
->rx_cons_sb
= BNX2X_RX_SB_INDEX
;
4307 fp
->rx_bd_cons_sb
= BNX2X_RX_SB_BD_INDEX
;
4309 /* "next page" elements initialization */
4311 for (i
= 1; i
<= NUM_RX_SGE_PAGES
; i
++) {
4312 struct eth_rx_sge
*sge
;
4314 sge
= &fp
->rx_sge_ring
[RX_SGE_CNT
* i
- 2];
4316 cpu_to_le32(U64_HI(fp
->rx_sge_mapping
+
4317 BCM_PAGE_SIZE
*(i
% NUM_RX_SGE_PAGES
)));
4319 cpu_to_le32(U64_LO(fp
->rx_sge_mapping
+
4320 BCM_PAGE_SIZE
*(i
% NUM_RX_SGE_PAGES
)));
4323 bnx2x_init_sge_ring_bit_mask(fp
);
4326 for (i
= 1; i
<= NUM_RX_RINGS
; i
++) {
4327 struct eth_rx_bd
*rx_bd
;
4329 rx_bd
= &fp
->rx_desc_ring
[RX_DESC_CNT
* i
- 2];
4331 cpu_to_le32(U64_HI(fp
->rx_desc_mapping
+
4332 BCM_PAGE_SIZE
*(i
% NUM_RX_RINGS
)));
4334 cpu_to_le32(U64_LO(fp
->rx_desc_mapping
+
4335 BCM_PAGE_SIZE
*(i
% NUM_RX_RINGS
)));
4339 for (i
= 1; i
<= NUM_RCQ_RINGS
; i
++) {
4340 struct eth_rx_cqe_next_page
*nextpg
;
4342 nextpg
= (struct eth_rx_cqe_next_page
*)
4343 &fp
->rx_comp_ring
[RCQ_DESC_CNT
* i
- 1];
4345 cpu_to_le32(U64_HI(fp
->rx_comp_mapping
+
4346 BCM_PAGE_SIZE
*(i
% NUM_RCQ_RINGS
)));
4348 cpu_to_le32(U64_LO(fp
->rx_comp_mapping
+
4349 BCM_PAGE_SIZE
*(i
% NUM_RCQ_RINGS
)));
4352 /* Allocate SGEs and initialize the ring elements */
4353 for (i
= 0, ring_prod
= 0;
4354 i
< MAX_RX_SGE_CNT
*NUM_RX_SGE_PAGES
; i
++) {
4356 if (bnx2x_alloc_rx_sge(bp
, fp
, ring_prod
) < 0) {
4357 BNX2X_ERR("was only able to allocate "
4359 BNX2X_ERR("disabling TPA for queue[%d]\n", j
);
4360 /* Cleanup already allocated elements */
4361 bnx2x_free_rx_sge_range(bp
, fp
, ring_prod
);
4362 bnx2x_free_tpa_pool(bp
, fp
,
4363 ETH_MAX_AGGREGATION_QUEUES_E1H
);
4364 fp
->disable_tpa
= 1;
4368 ring_prod
= NEXT_SGE_IDX(ring_prod
);
4370 fp
->rx_sge_prod
= ring_prod
;
4372 /* Allocate BDs and initialize BD ring */
4373 fp
->rx_comp_cons
= fp
->rx_alloc_failed
= 0;
4374 cqe_ring_prod
= ring_prod
= 0;
4375 for (i
= 0; i
< bp
->rx_ring_size
; i
++) {
4376 if (bnx2x_alloc_rx_skb(bp
, fp
, ring_prod
) < 0) {
4377 BNX2X_ERR("was only able to allocate "
4379 fp
->rx_alloc_failed
++;
4382 ring_prod
= NEXT_RX_IDX(ring_prod
);
4383 cqe_ring_prod
= NEXT_RCQ_IDX(cqe_ring_prod
);
4384 WARN_ON(ring_prod
<= i
);
4387 fp
->rx_bd_prod
= ring_prod
;
4388 /* must not have more available CQEs than BDs */
4389 fp
->rx_comp_prod
= min((u16
)(NUM_RCQ_RINGS
*RCQ_DESC_CNT
),
4391 fp
->rx_pkt
= fp
->rx_calls
= 0;
4394 * this will generate an interrupt (to the TSTORM)
4395 * must only be done after chip is initialized
4397 bnx2x_update_rx_prod(bp
, fp
, ring_prod
, fp
->rx_comp_prod
,
4402 REG_WR(bp
, BAR_USTRORM_INTMEM
+
4403 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func
),
4404 U64_LO(fp
->rx_comp_mapping
));
4405 REG_WR(bp
, BAR_USTRORM_INTMEM
+
4406 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func
) + 4,
4407 U64_HI(fp
->rx_comp_mapping
));
4411 static void bnx2x_init_tx_ring(struct bnx2x
*bp
)
4415 for_each_queue(bp
, j
) {
4416 struct bnx2x_fastpath
*fp
= &bp
->fp
[j
];
4418 for (i
= 1; i
<= NUM_TX_RINGS
; i
++) {
4419 struct eth_tx_bd
*tx_bd
=
4420 &fp
->tx_desc_ring
[TX_DESC_CNT
* i
- 1];
4423 cpu_to_le32(U64_HI(fp
->tx_desc_mapping
+
4424 BCM_PAGE_SIZE
*(i
% NUM_TX_RINGS
)));
4426 cpu_to_le32(U64_LO(fp
->tx_desc_mapping
+
4427 BCM_PAGE_SIZE
*(i
% NUM_TX_RINGS
)));
4430 fp
->tx_pkt_prod
= 0;
4431 fp
->tx_pkt_cons
= 0;
4434 fp
->tx_cons_sb
= BNX2X_TX_SB_INDEX
;
4439 static void bnx2x_init_sp_ring(struct bnx2x
*bp
)
4441 int func
= BP_FUNC(bp
);
4443 spin_lock_init(&bp
->spq_lock
);
4445 bp
->spq_left
= MAX_SPQ_PENDING
;
4446 bp
->spq_prod_idx
= 0;
4447 bp
->dsb_sp_prod
= BNX2X_SP_DSB_INDEX
;
4448 bp
->spq_prod_bd
= bp
->spq
;
4449 bp
->spq_last_bd
= bp
->spq_prod_bd
+ MAX_SP_DESC_CNT
;
4451 REG_WR(bp
, XSEM_REG_FAST_MEMORY
+ XSTORM_SPQ_PAGE_BASE_OFFSET(func
),
4452 U64_LO(bp
->spq_mapping
));
4454 XSEM_REG_FAST_MEMORY
+ XSTORM_SPQ_PAGE_BASE_OFFSET(func
) + 4,
4455 U64_HI(bp
->spq_mapping
));
4457 REG_WR(bp
, XSEM_REG_FAST_MEMORY
+ XSTORM_SPQ_PROD_OFFSET(func
),
4461 static void bnx2x_init_context(struct bnx2x
*bp
)
4465 for_each_queue(bp
, i
) {
4466 struct eth_context
*context
= bnx2x_sp(bp
, context
[i
].eth
);
4467 struct bnx2x_fastpath
*fp
= &bp
->fp
[i
];
4468 u8 sb_id
= FP_SB_ID(fp
);
4470 context
->xstorm_st_context
.tx_bd_page_base_hi
=
4471 U64_HI(fp
->tx_desc_mapping
);
4472 context
->xstorm_st_context
.tx_bd_page_base_lo
=
4473 U64_LO(fp
->tx_desc_mapping
);
4474 context
->xstorm_st_context
.db_data_addr_hi
=
4475 U64_HI(fp
->tx_prods_mapping
);
4476 context
->xstorm_st_context
.db_data_addr_lo
=
4477 U64_LO(fp
->tx_prods_mapping
);
4478 context
->xstorm_st_context
.statistics_data
= (BP_CL_ID(bp
) |
4479 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE
);
4481 context
->ustorm_st_context
.common
.sb_index_numbers
=
4482 BNX2X_RX_SB_INDEX_NUM
;
4483 context
->ustorm_st_context
.common
.clientId
= FP_CL_ID(fp
);
4484 context
->ustorm_st_context
.common
.status_block_id
= sb_id
;
4485 context
->ustorm_st_context
.common
.flags
=
4486 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT
;
4487 context
->ustorm_st_context
.common
.mc_alignment_size
= 64;
4488 context
->ustorm_st_context
.common
.bd_buff_size
=
4489 bp
->rx_buf_use_size
;
4490 context
->ustorm_st_context
.common
.bd_page_base_hi
=
4491 U64_HI(fp
->rx_desc_mapping
);
4492 context
->ustorm_st_context
.common
.bd_page_base_lo
=
4493 U64_LO(fp
->rx_desc_mapping
);
4494 if (!fp
->disable_tpa
) {
4495 context
->ustorm_st_context
.common
.flags
|=
4496 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA
|
4497 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_SGE_RING
);
4498 context
->ustorm_st_context
.common
.sge_buff_size
=
4499 (u16
)(BCM_PAGE_SIZE
*PAGES_PER_SGE
);
4500 context
->ustorm_st_context
.common
.sge_page_base_hi
=
4501 U64_HI(fp
->rx_sge_mapping
);
4502 context
->ustorm_st_context
.common
.sge_page_base_lo
=
4503 U64_LO(fp
->rx_sge_mapping
);
4506 context
->cstorm_st_context
.sb_index_number
=
4507 HC_INDEX_C_ETH_TX_CQ_CONS
;
4508 context
->cstorm_st_context
.status_block_id
= sb_id
;
4510 context
->xstorm_ag_context
.cdu_reserved
=
4511 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp
, i
),
4512 CDU_REGION_NUMBER_XCM_AG
,
4513 ETH_CONNECTION_TYPE
);
4514 context
->ustorm_ag_context
.cdu_usage
=
4515 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp
, i
),
4516 CDU_REGION_NUMBER_UCM_AG
,
4517 ETH_CONNECTION_TYPE
);
4521 static void bnx2x_init_ind_table(struct bnx2x
*bp
)
4523 int port
= BP_PORT(bp
);
4529 DP(NETIF_MSG_IFUP
, "Initializing indirection table\n");
4530 for (i
= 0; i
< TSTORM_INDIRECTION_TABLE_SIZE
; i
++)
4531 REG_WR8(bp
, BAR_TSTRORM_INTMEM
+
4532 TSTORM_INDIRECTION_TABLE_OFFSET(port
) + i
,
4533 i
% bp
->num_queues
);
4535 REG_WR(bp
, PRS_REG_A_PRSU_20
, 0xf);
4538 static void bnx2x_set_client_config(struct bnx2x
*bp
)
4540 struct tstorm_eth_client_config tstorm_client
= {0};
4541 int port
= BP_PORT(bp
);
4544 tstorm_client
.mtu
= bp
->dev
->mtu
+ ETH_OVREHEAD
;
4545 tstorm_client
.statistics_counter_id
= 0;
4546 tstorm_client
.config_flags
=
4547 TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE
;
4549 if (bp
->rx_mode
&& bp
->vlgrp
) {
4550 tstorm_client
.config_flags
|=
4551 TSTORM_ETH_CLIENT_CONFIG_VLAN_REMOVAL_ENABLE
;
4552 DP(NETIF_MSG_IFUP
, "vlan removal enabled\n");
4556 if (bp
->flags
& TPA_ENABLE_FLAG
) {
4557 tstorm_client
.max_sges_for_packet
=
4558 BCM_PAGE_ALIGN(tstorm_client
.mtu
) >> BCM_PAGE_SHIFT
;
4559 tstorm_client
.max_sges_for_packet
=
4560 ((tstorm_client
.max_sges_for_packet
+
4561 PAGES_PER_SGE
- 1) & (~(PAGES_PER_SGE
- 1))) >>
4562 PAGES_PER_SGE_SHIFT
;
4564 tstorm_client
.config_flags
|=
4565 TSTORM_ETH_CLIENT_CONFIG_ENABLE_SGE_RING
;
4568 for_each_queue(bp
, i
) {
4569 REG_WR(bp
, BAR_TSTRORM_INTMEM
+
4570 TSTORM_CLIENT_CONFIG_OFFSET(port
, bp
->fp
[i
].cl_id
),
4571 ((u32
*)&tstorm_client
)[0]);
4572 REG_WR(bp
, BAR_TSTRORM_INTMEM
+
4573 TSTORM_CLIENT_CONFIG_OFFSET(port
, bp
->fp
[i
].cl_id
) + 4,
4574 ((u32
*)&tstorm_client
)[1]);
4577 DP(BNX2X_MSG_OFF
, "tstorm_client: 0x%08x 0x%08x\n",
4578 ((u32
*)&tstorm_client
)[0], ((u32
*)&tstorm_client
)[1]);
4581 static void bnx2x_set_storm_rx_mode(struct bnx2x
*bp
)
4583 struct tstorm_eth_mac_filter_config tstorm_mac_filter
= {0};
4584 int mode
= bp
->rx_mode
;
4585 int mask
= (1 << BP_L_ID(bp
));
4586 int func
= BP_FUNC(bp
);
4589 DP(NETIF_MSG_RX_STATUS
, "rx mode is %d\n", mode
);
4592 case BNX2X_RX_MODE_NONE
: /* no Rx */
4593 tstorm_mac_filter
.ucast_drop_all
= mask
;
4594 tstorm_mac_filter
.mcast_drop_all
= mask
;
4595 tstorm_mac_filter
.bcast_drop_all
= mask
;
4597 case BNX2X_RX_MODE_NORMAL
:
4598 tstorm_mac_filter
.bcast_accept_all
= mask
;
4600 case BNX2X_RX_MODE_ALLMULTI
:
4601 tstorm_mac_filter
.mcast_accept_all
= mask
;
4602 tstorm_mac_filter
.bcast_accept_all
= mask
;
4604 case BNX2X_RX_MODE_PROMISC
:
4605 tstorm_mac_filter
.ucast_accept_all
= mask
;
4606 tstorm_mac_filter
.mcast_accept_all
= mask
;
4607 tstorm_mac_filter
.bcast_accept_all
= mask
;
4610 BNX2X_ERR("BAD rx mode (%d)\n", mode
);
4614 for (i
= 0; i
< sizeof(struct tstorm_eth_mac_filter_config
)/4; i
++) {
4615 REG_WR(bp
, BAR_TSTRORM_INTMEM
+
4616 TSTORM_MAC_FILTER_CONFIG_OFFSET(func
) + i
* 4,
4617 ((u32
*)&tstorm_mac_filter
)[i
]);
4619 /* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
4620 ((u32 *)&tstorm_mac_filter)[i]); */
4623 if (mode
!= BNX2X_RX_MODE_NONE
)
4624 bnx2x_set_client_config(bp
);
4627 static void bnx2x_init_internal(struct bnx2x
*bp
)
4629 struct tstorm_eth_function_common_config tstorm_config
= {0};
4630 struct stats_indication_flags stats_flags
= {0};
4631 int port
= BP_PORT(bp
);
4632 int func
= BP_FUNC(bp
);
4636 tstorm_config
.config_flags
= MULTI_FLAGS
;
4637 tstorm_config
.rss_result_mask
= MULTI_MASK
;
4640 tstorm_config
.leading_client_id
= BP_L_ID(bp
);
4642 REG_WR(bp
, BAR_TSTRORM_INTMEM
+
4643 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func
),
4644 (*(u32
*)&tstorm_config
));
4646 /* DP(NETIF_MSG_IFUP, "tstorm_config: 0x%08x\n",
4647 (*(u32 *)&tstorm_config)); */
4649 bp
->rx_mode
= BNX2X_RX_MODE_NONE
; /* no rx until link is up */
4650 bnx2x_set_storm_rx_mode(bp
);
4652 stats_flags
.collect_eth
= 1;
4654 REG_WR(bp
, BAR_XSTRORM_INTMEM
+ XSTORM_STATS_FLAGS_OFFSET(port
),
4655 ((u32
*)&stats_flags
)[0]);
4656 REG_WR(bp
, BAR_XSTRORM_INTMEM
+ XSTORM_STATS_FLAGS_OFFSET(port
) + 4,
4657 ((u32
*)&stats_flags
)[1]);
4659 REG_WR(bp
, BAR_TSTRORM_INTMEM
+ TSTORM_STATS_FLAGS_OFFSET(port
),
4660 ((u32
*)&stats_flags
)[0]);
4661 REG_WR(bp
, BAR_TSTRORM_INTMEM
+ TSTORM_STATS_FLAGS_OFFSET(port
) + 4,
4662 ((u32
*)&stats_flags
)[1]);
4664 REG_WR(bp
, BAR_CSTRORM_INTMEM
+ CSTORM_STATS_FLAGS_OFFSET(port
),
4665 ((u32
*)&stats_flags
)[0]);
4666 REG_WR(bp
, BAR_CSTRORM_INTMEM
+ CSTORM_STATS_FLAGS_OFFSET(port
) + 4,
4667 ((u32
*)&stats_flags
)[1]);
4669 /* DP(NETIF_MSG_IFUP, "stats_flags: 0x%08x 0x%08x\n",
4670 ((u32 *)&stats_flags)[0], ((u32 *)&stats_flags)[1]); */
4672 if (CHIP_IS_E1H(bp
)) {
4673 REG_WR8(bp
, BAR_XSTRORM_INTMEM
+ XSTORM_FUNCTION_MODE_OFFSET
,
4675 REG_WR8(bp
, BAR_TSTRORM_INTMEM
+ TSTORM_FUNCTION_MODE_OFFSET
,
4677 REG_WR8(bp
, BAR_CSTRORM_INTMEM
+ CSTORM_FUNCTION_MODE_OFFSET
,
4679 REG_WR8(bp
, BAR_USTRORM_INTMEM
+ USTORM_FUNCTION_MODE_OFFSET
,
4682 REG_WR16(bp
, BAR_XSTRORM_INTMEM
+ XSTORM_E1HOV_OFFSET(func
),
4686 /* Zero this manualy as its initialization is
4687 currently missing in the initTool */
4688 for (i
= 0; i
< USTORM_AGG_DATA_SIZE
>> 2; i
++)
4689 REG_WR(bp
, BAR_USTRORM_INTMEM
+
4690 USTORM_AGG_DATA_OFFSET
+ 4*i
, 0);
4692 for_each_queue(bp
, i
) {
4693 struct bnx2x_fastpath
*fp
= &bp
->fp
[i
];
4696 REG_WR(bp
, BAR_USTRORM_INTMEM
+
4697 USTORM_CQE_PAGE_BASE_OFFSET(port
, FP_CL_ID(fp
)),
4698 U64_LO(fp
->rx_comp_mapping
));
4699 REG_WR(bp
, BAR_USTRORM_INTMEM
+
4700 USTORM_CQE_PAGE_BASE_OFFSET(port
, FP_CL_ID(fp
)) + 4,
4701 U64_HI(fp
->rx_comp_mapping
));
4703 max_agg_size
= min((u32
)(bp
->rx_buf_use_size
+
4704 8*BCM_PAGE_SIZE
*PAGES_PER_SGE
),
4706 REG_WR16(bp
, BAR_USTRORM_INTMEM
+
4707 USTORM_MAX_AGG_SIZE_OFFSET(port
, FP_CL_ID(fp
)),
4712 static void bnx2x_nic_init(struct bnx2x
*bp
)
4716 for_each_queue(bp
, i
) {
4717 struct bnx2x_fastpath
*fp
= &bp
->fp
[i
];
4720 fp
->state
= BNX2X_FP_STATE_CLOSED
;
4722 fp
->cl_id
= BP_L_ID(bp
) + i
;
4723 fp
->sb_id
= fp
->cl_id
;
4725 "bnx2x_init_sb(%p,%p) index %d cl_id %d sb %d\n",
4726 bp
, fp
->status_blk
, i
, FP_CL_ID(fp
), FP_SB_ID(fp
));
4727 bnx2x_init_sb(bp
, FP_SB_ID(fp
), fp
->status_blk
,
4728 fp
->status_blk_mapping
);
4731 bnx2x_init_def_sb(bp
, bp
->def_status_blk
,
4732 bp
->def_status_blk_mapping
, DEF_SB_ID
);
4733 bnx2x_update_coalesce(bp
);
4734 bnx2x_init_rx_rings(bp
);
4735 bnx2x_init_tx_ring(bp
);
4736 bnx2x_init_sp_ring(bp
);
4737 bnx2x_init_context(bp
);
4738 bnx2x_init_internal(bp
);
4739 bnx2x_storm_stats_init(bp
);
4740 bnx2x_init_ind_table(bp
);
4741 bnx2x_int_enable(bp
);
4744 /* end of nic init */
4747 * gzip service functions
4750 static int bnx2x_gunzip_init(struct bnx2x
*bp
)
4752 bp
->gunzip_buf
= pci_alloc_consistent(bp
->pdev
, FW_BUF_SIZE
,
4753 &bp
->gunzip_mapping
);
4754 if (bp
->gunzip_buf
== NULL
)
4757 bp
->strm
= kmalloc(sizeof(*bp
->strm
), GFP_KERNEL
);
4758 if (bp
->strm
== NULL
)
4761 bp
->strm
->workspace
= kmalloc(zlib_inflate_workspacesize(),
4763 if (bp
->strm
->workspace
== NULL
)
4773 pci_free_consistent(bp
->pdev
, FW_BUF_SIZE
, bp
->gunzip_buf
,
4774 bp
->gunzip_mapping
);
4775 bp
->gunzip_buf
= NULL
;
4778 printk(KERN_ERR PFX
"%s: Cannot allocate firmware buffer for"
4779 " un-compression\n", bp
->dev
->name
);
4783 static void bnx2x_gunzip_end(struct bnx2x
*bp
)
4785 kfree(bp
->strm
->workspace
);
4790 if (bp
->gunzip_buf
) {
4791 pci_free_consistent(bp
->pdev
, FW_BUF_SIZE
, bp
->gunzip_buf
,
4792 bp
->gunzip_mapping
);
4793 bp
->gunzip_buf
= NULL
;
4797 static int bnx2x_gunzip(struct bnx2x
*bp
, u8
*zbuf
, int len
)
4801 /* check gzip header */
4802 if ((zbuf
[0] != 0x1f) || (zbuf
[1] != 0x8b) || (zbuf
[2] != Z_DEFLATED
))
4809 if (zbuf
[3] & FNAME
)
4810 while ((zbuf
[n
++] != 0) && (n
< len
));
4812 bp
->strm
->next_in
= zbuf
+ n
;
4813 bp
->strm
->avail_in
= len
- n
;
4814 bp
->strm
->next_out
= bp
->gunzip_buf
;
4815 bp
->strm
->avail_out
= FW_BUF_SIZE
;
4817 rc
= zlib_inflateInit2(bp
->strm
, -MAX_WBITS
);
4821 rc
= zlib_inflate(bp
->strm
, Z_FINISH
);
4822 if ((rc
!= Z_OK
) && (rc
!= Z_STREAM_END
))
4823 printk(KERN_ERR PFX
"%s: Firmware decompression error: %s\n",
4824 bp
->dev
->name
, bp
->strm
->msg
);
4826 bp
->gunzip_outlen
= (FW_BUF_SIZE
- bp
->strm
->avail_out
);
4827 if (bp
->gunzip_outlen
& 0x3)
4828 printk(KERN_ERR PFX
"%s: Firmware decompression error:"
4829 " gunzip_outlen (%d) not aligned\n",
4830 bp
->dev
->name
, bp
->gunzip_outlen
);
4831 bp
->gunzip_outlen
>>= 2;
4833 zlib_inflateEnd(bp
->strm
);
4835 if (rc
== Z_STREAM_END
)
4841 /* nic load/unload */
4844 * General service functions
4847 /* send a NIG loopback debug packet */
4848 static void bnx2x_lb_pckt(struct bnx2x
*bp
)
4852 /* Ethernet source and destination addresses */
4853 wb_write
[0] = 0x55555555;
4854 wb_write
[1] = 0x55555555;
4855 wb_write
[2] = 0x20; /* SOP */
4856 REG_WR_DMAE(bp
, NIG_REG_DEBUG_PACKET_LB
, wb_write
, 3);
4858 /* NON-IP protocol */
4859 wb_write
[0] = 0x09000000;
4860 wb_write
[1] = 0x55555555;
4861 wb_write
[2] = 0x10; /* EOP, eop_bvalid = 0 */
4862 REG_WR_DMAE(bp
, NIG_REG_DEBUG_PACKET_LB
, wb_write
, 3);
4865 /* some of the internal memories
4866 * are not directly readable from the driver
4867 * to test them we send debug packets
4869 static int bnx2x_int_mem_test(struct bnx2x
*bp
)
4875 if (CHIP_REV_IS_FPGA(bp
))
4877 else if (CHIP_REV_IS_EMUL(bp
))
4882 DP(NETIF_MSG_HW
, "start part1\n");
4884 /* Disable inputs of parser neighbor blocks */
4885 REG_WR(bp
, TSDM_REG_ENABLE_IN1
, 0x0);
4886 REG_WR(bp
, TCM_REG_PRS_IFEN
, 0x0);
4887 REG_WR(bp
, CFC_REG_DEBUG0
, 0x1);
4888 NIG_WR(NIG_REG_PRS_REQ_IN_EN
, 0x0);
4890 /* Write 0 to parser credits for CFC search request */
4891 REG_WR(bp
, PRS_REG_CFC_SEARCH_INITIAL_CREDIT
, 0x0);
4893 /* send Ethernet packet */
4896 /* TODO do i reset NIG statistic? */
4897 /* Wait until NIG register shows 1 packet of size 0x10 */
4898 count
= 1000 * factor
;
4901 bnx2x_read_dmae(bp
, NIG_REG_STAT2_BRB_OCTET
, 2);
4902 val
= *bnx2x_sp(bp
, wb_data
[0]);
4910 BNX2X_ERR("NIG timeout val = 0x%x\n", val
);
4914 /* Wait until PRS register shows 1 packet */
4915 count
= 1000 * factor
;
4917 val
= REG_RD(bp
, PRS_REG_NUM_OF_PACKETS
);
4925 BNX2X_ERR("PRS timeout val = 0x%x\n", val
);
4929 /* Reset and init BRB, PRS */
4930 REG_WR(bp
, GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_1_CLEAR
, 0x03);
4932 REG_WR(bp
, GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_1_SET
, 0x03);
4934 bnx2x_init_block(bp
, BRB1_COMMON_START
, BRB1_COMMON_END
);
4935 bnx2x_init_block(bp
, PRS_COMMON_START
, PRS_COMMON_END
);
4937 DP(NETIF_MSG_HW
, "part2\n");
4939 /* Disable inputs of parser neighbor blocks */
4940 REG_WR(bp
, TSDM_REG_ENABLE_IN1
, 0x0);
4941 REG_WR(bp
, TCM_REG_PRS_IFEN
, 0x0);
4942 REG_WR(bp
, CFC_REG_DEBUG0
, 0x1);
4943 NIG_WR(NIG_REG_PRS_REQ_IN_EN
, 0x0);
4945 /* Write 0 to parser credits for CFC search request */
4946 REG_WR(bp
, PRS_REG_CFC_SEARCH_INITIAL_CREDIT
, 0x0);
4948 /* send 10 Ethernet packets */
4949 for (i
= 0; i
< 10; i
++)
4952 /* Wait until NIG register shows 10 + 1
4953 packets of size 11*0x10 = 0xb0 */
4954 count
= 1000 * factor
;
4957 bnx2x_read_dmae(bp
, NIG_REG_STAT2_BRB_OCTET
, 2);
4958 val
= *bnx2x_sp(bp
, wb_data
[0]);
4966 BNX2X_ERR("NIG timeout val = 0x%x\n", val
);
4970 /* Wait until PRS register shows 2 packets */
4971 val
= REG_RD(bp
, PRS_REG_NUM_OF_PACKETS
);
4973 BNX2X_ERR("PRS timeout val = 0x%x\n", val
);
4975 /* Write 1 to parser credits for CFC search request */
4976 REG_WR(bp
, PRS_REG_CFC_SEARCH_INITIAL_CREDIT
, 0x1);
4978 /* Wait until PRS register shows 3 packets */
4979 msleep(10 * factor
);
4980 /* Wait until NIG register shows 1 packet of size 0x10 */
4981 val
= REG_RD(bp
, PRS_REG_NUM_OF_PACKETS
);
4983 BNX2X_ERR("PRS timeout val = 0x%x\n", val
);
4985 /* clear NIG EOP FIFO */
4986 for (i
= 0; i
< 11; i
++)
4987 REG_RD(bp
, NIG_REG_INGRESS_EOP_LB_FIFO
);
4988 val
= REG_RD(bp
, NIG_REG_INGRESS_EOP_LB_EMPTY
);
4990 BNX2X_ERR("clear of NIG failed\n");
4994 /* Reset and init BRB, PRS, NIG */
4995 REG_WR(bp
, GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_1_CLEAR
, 0x03);
4997 REG_WR(bp
, GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_1_SET
, 0x03);
4999 bnx2x_init_block(bp
, BRB1_COMMON_START
, BRB1_COMMON_END
);
5000 bnx2x_init_block(bp
, PRS_COMMON_START
, PRS_COMMON_END
);
5003 REG_WR(bp
, PRS_REG_NIC_MODE
, 1);
5006 /* Enable inputs of parser neighbor blocks */
5007 REG_WR(bp
, TSDM_REG_ENABLE_IN1
, 0x7fffffff);
5008 REG_WR(bp
, TCM_REG_PRS_IFEN
, 0x1);
5009 REG_WR(bp
, CFC_REG_DEBUG0
, 0x0);
5010 NIG_WR(NIG_REG_PRS_REQ_IN_EN
, 0x1);
5012 DP(NETIF_MSG_HW
, "done\n");
5017 static void enable_blocks_attention(struct bnx2x
*bp
)
5019 REG_WR(bp
, PXP_REG_PXP_INT_MASK_0
, 0);
5020 REG_WR(bp
, PXP_REG_PXP_INT_MASK_1
, 0);
5021 REG_WR(bp
, DORQ_REG_DORQ_INT_MASK
, 0);
5022 REG_WR(bp
, CFC_REG_CFC_INT_MASK
, 0);
5023 REG_WR(bp
, QM_REG_QM_INT_MASK
, 0);
5024 REG_WR(bp
, TM_REG_TM_INT_MASK
, 0);
5025 REG_WR(bp
, XSDM_REG_XSDM_INT_MASK_0
, 0);
5026 REG_WR(bp
, XSDM_REG_XSDM_INT_MASK_1
, 0);
5027 REG_WR(bp
, XCM_REG_XCM_INT_MASK
, 0);
5028 /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5029 /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
5030 REG_WR(bp
, USDM_REG_USDM_INT_MASK_0
, 0);
5031 REG_WR(bp
, USDM_REG_USDM_INT_MASK_1
, 0);
5032 REG_WR(bp
, UCM_REG_UCM_INT_MASK
, 0);
5033 /* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5034 /* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
5035 REG_WR(bp
, GRCBASE_UPB
+ PB_REG_PB_INT_MASK
, 0);
5036 REG_WR(bp
, CSDM_REG_CSDM_INT_MASK_0
, 0);
5037 REG_WR(bp
, CSDM_REG_CSDM_INT_MASK_1
, 0);
5038 REG_WR(bp
, CCM_REG_CCM_INT_MASK
, 0);
5039 /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5040 /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5041 if (CHIP_REV_IS_FPGA(bp
))
5042 REG_WR(bp
, PXP2_REG_PXP2_INT_MASK_0
, 0x580000);
5044 REG_WR(bp
, PXP2_REG_PXP2_INT_MASK_0
, 0x480000);
5045 REG_WR(bp
, TSDM_REG_TSDM_INT_MASK_0
, 0);
5046 REG_WR(bp
, TSDM_REG_TSDM_INT_MASK_1
, 0);
5047 REG_WR(bp
, TCM_REG_TCM_INT_MASK
, 0);
5048 /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5049 /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
5050 REG_WR(bp
, CDU_REG_CDU_INT_MASK
, 0);
5051 REG_WR(bp
, DMAE_REG_DMAE_INT_MASK
, 0);
5052 /* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5053 REG_WR(bp
, PBF_REG_PBF_INT_MASK
, 0X18); /* bit 3,4 masked */
5057 static int bnx2x_init_common(struct bnx2x
*bp
)
5061 DP(BNX2X_MSG_MCP
, "starting common init func %d\n", BP_FUNC(bp
));
5063 REG_WR(bp
, GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_1_SET
, 0xffffffff);
5064 REG_WR(bp
, GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_2_SET
, 0xfffc);
5066 bnx2x_init_block(bp
, MISC_COMMON_START
, MISC_COMMON_END
);
5067 if (CHIP_IS_E1H(bp
))
5068 REG_WR(bp
, MISC_REG_E1HMF_MODE
, IS_E1HMF(bp
));
5070 REG_WR(bp
, MISC_REG_LCPLL_CTRL_REG_2
, 0x100);
5072 REG_WR(bp
, MISC_REG_LCPLL_CTRL_REG_2
, 0x0);
5074 bnx2x_init_block(bp
, PXP_COMMON_START
, PXP_COMMON_END
);
5075 if (CHIP_IS_E1(bp
)) {
5076 /* enable HW interrupt from PXP on USDM overflow
5077 bit 16 on INT_MASK_0 */
5078 REG_WR(bp
, PXP_REG_PXP_INT_MASK_0
, 0);
5081 bnx2x_init_block(bp
, PXP2_COMMON_START
, PXP2_COMMON_END
);
5085 REG_WR(bp
, PXP2_REG_RQ_QM_ENDIAN_M
, 1);
5086 REG_WR(bp
, PXP2_REG_RQ_TM_ENDIAN_M
, 1);
5087 REG_WR(bp
, PXP2_REG_RQ_SRC_ENDIAN_M
, 1);
5088 REG_WR(bp
, PXP2_REG_RQ_CDU_ENDIAN_M
, 1);
5089 REG_WR(bp
, PXP2_REG_RQ_DBG_ENDIAN_M
, 1);
5090 REG_WR(bp
, PXP2_REG_RQ_HC_ENDIAN_M
, 1);
5092 /* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
5093 REG_WR(bp
, PXP2_REG_RD_QM_SWAP_MODE
, 1);
5094 REG_WR(bp
, PXP2_REG_RD_TM_SWAP_MODE
, 1);
5095 REG_WR(bp
, PXP2_REG_RD_SRC_SWAP_MODE
, 1);
5096 REG_WR(bp
, PXP2_REG_RD_CDURD_SWAP_MODE
, 1);
5101 REG_WR(bp
, PRS_REG_NIC_MODE
, 1);
5104 REG_WR(bp
, PXP2_REG_RQ_CDU_P_SIZE
, 2);
5106 REG_WR(bp
, PXP2_REG_RQ_TM_P_SIZE
, 5);
5107 REG_WR(bp
, PXP2_REG_RQ_QM_P_SIZE
, 5);
5108 REG_WR(bp
, PXP2_REG_RQ_SRC_P_SIZE
, 5);
5111 if (CHIP_REV_IS_FPGA(bp
) && CHIP_IS_E1H(bp
))
5112 REG_WR(bp
, PXP2_REG_PGL_TAGS_LIMIT
, 0x1);
5114 /* let the HW do it's magic ... */
5116 /* finish PXP init */
5117 val
= REG_RD(bp
, PXP2_REG_RQ_CFG_DONE
);
5119 BNX2X_ERR("PXP2 CFG failed\n");
5122 val
= REG_RD(bp
, PXP2_REG_RD_INIT_DONE
);
5124 BNX2X_ERR("PXP2 RD_INIT failed\n");
5128 REG_WR(bp
, PXP2_REG_RQ_DISABLE_INPUTS
, 0);
5129 REG_WR(bp
, PXP2_REG_RD_DISABLE_INPUTS
, 0);
5131 bnx2x_init_block(bp
, DMAE_COMMON_START
, DMAE_COMMON_END
);
5133 /* clean the DMAE memory */
5135 bnx2x_init_fill(bp
, TSEM_REG_PRAM
, 0, 8);
5137 bnx2x_init_block(bp
, TCM_COMMON_START
, TCM_COMMON_END
);
5138 bnx2x_init_block(bp
, UCM_COMMON_START
, UCM_COMMON_END
);
5139 bnx2x_init_block(bp
, CCM_COMMON_START
, CCM_COMMON_END
);
5140 bnx2x_init_block(bp
, XCM_COMMON_START
, XCM_COMMON_END
);
5142 bnx2x_read_dmae(bp
, XSEM_REG_PASSIVE_BUFFER
, 3);
5143 bnx2x_read_dmae(bp
, CSEM_REG_PASSIVE_BUFFER
, 3);
5144 bnx2x_read_dmae(bp
, TSEM_REG_PASSIVE_BUFFER
, 3);
5145 bnx2x_read_dmae(bp
, USEM_REG_PASSIVE_BUFFER
, 3);
5147 bnx2x_init_block(bp
, QM_COMMON_START
, QM_COMMON_END
);
5148 /* soft reset pulse */
5149 REG_WR(bp
, QM_REG_SOFT_RESET
, 1);
5150 REG_WR(bp
, QM_REG_SOFT_RESET
, 0);
5153 bnx2x_init_block(bp
, TIMERS_COMMON_START
, TIMERS_COMMON_END
);
5156 bnx2x_init_block(bp
, DQ_COMMON_START
, DQ_COMMON_END
);
5157 REG_WR(bp
, DORQ_REG_DPM_CID_OFST
, BCM_PAGE_SHIFT
);
5158 if (!CHIP_REV_IS_SLOW(bp
)) {
5159 /* enable hw interrupt from doorbell Q */
5160 REG_WR(bp
, DORQ_REG_DORQ_INT_MASK
, 0);
5163 bnx2x_init_block(bp
, BRB1_COMMON_START
, BRB1_COMMON_END
);
5164 if (CHIP_REV_IS_SLOW(bp
)) {
5165 /* fix for emulation and FPGA for no pause */
5166 REG_WR(bp
, BRB1_REG_PAUSE_HIGH_THRESHOLD_0
, 513);
5167 REG_WR(bp
, BRB1_REG_PAUSE_HIGH_THRESHOLD_1
, 513);
5168 REG_WR(bp
, BRB1_REG_PAUSE_LOW_THRESHOLD_0
, 0);
5169 REG_WR(bp
, BRB1_REG_PAUSE_LOW_THRESHOLD_1
, 0);
5172 bnx2x_init_block(bp
, PRS_COMMON_START
, PRS_COMMON_END
);
5173 if (CHIP_IS_E1H(bp
))
5174 REG_WR(bp
, PRS_REG_E1HOV_MODE
, IS_E1HMF(bp
));
5176 bnx2x_init_block(bp
, TSDM_COMMON_START
, TSDM_COMMON_END
);
5177 bnx2x_init_block(bp
, CSDM_COMMON_START
, CSDM_COMMON_END
);
5178 bnx2x_init_block(bp
, USDM_COMMON_START
, USDM_COMMON_END
);
5179 bnx2x_init_block(bp
, XSDM_COMMON_START
, XSDM_COMMON_END
);
5181 if (CHIP_IS_E1H(bp
)) {
5182 bnx2x_init_fill(bp
, TSTORM_INTMEM_ADDR
, 0,
5183 STORM_INTMEM_SIZE_E1H
/2);
5185 TSTORM_INTMEM_ADDR
+ STORM_INTMEM_SIZE_E1H
/2,
5186 0, STORM_INTMEM_SIZE_E1H
/2);
5187 bnx2x_init_fill(bp
, CSTORM_INTMEM_ADDR
, 0,
5188 STORM_INTMEM_SIZE_E1H
/2);
5190 CSTORM_INTMEM_ADDR
+ STORM_INTMEM_SIZE_E1H
/2,
5191 0, STORM_INTMEM_SIZE_E1H
/2);
5192 bnx2x_init_fill(bp
, XSTORM_INTMEM_ADDR
, 0,
5193 STORM_INTMEM_SIZE_E1H
/2);
5195 XSTORM_INTMEM_ADDR
+ STORM_INTMEM_SIZE_E1H
/2,
5196 0, STORM_INTMEM_SIZE_E1H
/2);
5197 bnx2x_init_fill(bp
, USTORM_INTMEM_ADDR
, 0,
5198 STORM_INTMEM_SIZE_E1H
/2);
5200 USTORM_INTMEM_ADDR
+ STORM_INTMEM_SIZE_E1H
/2,
5201 0, STORM_INTMEM_SIZE_E1H
/2);
5203 bnx2x_init_fill(bp
, TSTORM_INTMEM_ADDR
, 0,
5204 STORM_INTMEM_SIZE_E1
);
5205 bnx2x_init_fill(bp
, CSTORM_INTMEM_ADDR
, 0,
5206 STORM_INTMEM_SIZE_E1
);
5207 bnx2x_init_fill(bp
, XSTORM_INTMEM_ADDR
, 0,
5208 STORM_INTMEM_SIZE_E1
);
5209 bnx2x_init_fill(bp
, USTORM_INTMEM_ADDR
, 0,
5210 STORM_INTMEM_SIZE_E1
);
5213 bnx2x_init_block(bp
, TSEM_COMMON_START
, TSEM_COMMON_END
);
5214 bnx2x_init_block(bp
, USEM_COMMON_START
, USEM_COMMON_END
);
5215 bnx2x_init_block(bp
, CSEM_COMMON_START
, CSEM_COMMON_END
);
5216 bnx2x_init_block(bp
, XSEM_COMMON_START
, XSEM_COMMON_END
);
5219 REG_WR(bp
, GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_1_CLEAR
,
5221 REG_WR(bp
, GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_1_SET
,
5224 bnx2x_init_block(bp
, UPB_COMMON_START
, UPB_COMMON_END
);
5225 bnx2x_init_block(bp
, XPB_COMMON_START
, XPB_COMMON_END
);
5226 bnx2x_init_block(bp
, PBF_COMMON_START
, PBF_COMMON_END
);
5228 REG_WR(bp
, SRC_REG_SOFT_RST
, 1);
5229 for (i
= SRC_REG_KEYRSS0_0
; i
<= SRC_REG_KEYRSS1_9
; i
+= 4) {
5230 REG_WR(bp
, i
, 0xc0cac01a);
5231 /* TODO: replace with something meaningful */
5233 if (CHIP_IS_E1H(bp
))
5234 bnx2x_init_block(bp
, SRCH_COMMON_START
, SRCH_COMMON_END
);
5235 REG_WR(bp
, SRC_REG_SOFT_RST
, 0);
5237 if (sizeof(union cdu_context
) != 1024)
5238 /* we currently assume that a context is 1024 bytes */
5239 printk(KERN_ALERT PFX
"please adjust the size of"
5240 " cdu_context(%ld)\n", (long)sizeof(union cdu_context
));
5242 bnx2x_init_block(bp
, CDU_COMMON_START
, CDU_COMMON_END
);
5243 val
= (4 << 24) + (0 << 12) + 1024;
5244 REG_WR(bp
, CDU_REG_CDU_GLOBAL_PARAMS
, val
);
5245 if (CHIP_IS_E1(bp
)) {
5246 /* !!! fix pxp client crdit until excel update */
5247 REG_WR(bp
, CDU_REG_CDU_DEBUG
, 0x264);
5248 REG_WR(bp
, CDU_REG_CDU_DEBUG
, 0);
5251 bnx2x_init_block(bp
, CFC_COMMON_START
, CFC_COMMON_END
);
5252 REG_WR(bp
, CFC_REG_INIT_REG
, 0x7FF);
5254 bnx2x_init_block(bp
, HC_COMMON_START
, HC_COMMON_END
);
5255 bnx2x_init_block(bp
, MISC_AEU_COMMON_START
, MISC_AEU_COMMON_END
);
5257 /* PXPCS COMMON comes here */
5258 /* Reset PCIE errors for debug */
5259 REG_WR(bp
, 0x2814, 0xffffffff);
5260 REG_WR(bp
, 0x3820, 0xffffffff);
5262 /* EMAC0 COMMON comes here */
5263 /* EMAC1 COMMON comes here */
5264 /* DBU COMMON comes here */
5265 /* DBG COMMON comes here */
5267 bnx2x_init_block(bp
, NIG_COMMON_START
, NIG_COMMON_END
);
5268 if (CHIP_IS_E1H(bp
)) {
5269 REG_WR(bp
, NIG_REG_LLH_MF_MODE
, IS_E1HMF(bp
));
5270 REG_WR(bp
, NIG_REG_LLH_E1HOV_MODE
, IS_E1HMF(bp
));
5273 if (CHIP_REV_IS_SLOW(bp
))
5276 /* finish CFC init */
5277 val
= reg_poll(bp
, CFC_REG_LL_INIT_DONE
, 1, 100, 10);
5279 BNX2X_ERR("CFC LL_INIT failed\n");
5282 val
= reg_poll(bp
, CFC_REG_AC_INIT_DONE
, 1, 100, 10);
5284 BNX2X_ERR("CFC AC_INIT failed\n");
5287 val
= reg_poll(bp
, CFC_REG_CAM_INIT_DONE
, 1, 100, 10);
5289 BNX2X_ERR("CFC CAM_INIT failed\n");
5292 REG_WR(bp
, CFC_REG_DEBUG0
, 0);
5294 /* read NIG statistic
5295 to see if this is our first up since powerup */
5296 bnx2x_read_dmae(bp
, NIG_REG_STAT2_BRB_OCTET
, 2);
5297 val
= *bnx2x_sp(bp
, wb_data
[0]);
5299 /* do internal memory self test */
5300 if ((CHIP_IS_E1(bp
)) && (val
== 0) && bnx2x_int_mem_test(bp
)) {
5301 BNX2X_ERR("internal mem self test failed\n");
5305 switch (bp
->common
.board
& SHARED_HW_CFG_BOARD_TYPE_MASK
) {
5306 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G
:
5307 /* Fan failure is indicated by SPIO 5 */
5308 bnx2x_set_spio(bp
, MISC_REGISTERS_SPIO_5
,
5309 MISC_REGISTERS_SPIO_INPUT_HI_Z
);
5311 /* set to active low mode */
5312 val
= REG_RD(bp
, MISC_REG_SPIO_INT
);
5313 val
|= ((1 << MISC_REGISTERS_SPIO_5
) <<
5314 MISC_REGISTERS_SPIO_INT_OLD_SET_POS
);
5315 REG_WR(bp
, MISC_REG_SPIO_INT
, val
);
5317 /* enable interrupt to signal the IGU */
5318 val
= REG_RD(bp
, MISC_REG_SPIO_EVENT_EN
);
5319 val
|= (1 << MISC_REGISTERS_SPIO_5
);
5320 REG_WR(bp
, MISC_REG_SPIO_EVENT_EN
, val
);
5327 /* clear PXP2 attentions */
5328 REG_RD(bp
, PXP2_REG_PXP2_INT_STS_CLR_0
);
5330 enable_blocks_attention(bp
);
5332 if (bp
->flags
& TPA_ENABLE_FLAG
) {
5333 struct tstorm_eth_tpa_exist tmp
= {0};
5337 REG_WR(bp
, BAR_TSTRORM_INTMEM
+ TSTORM_TPA_EXIST_OFFSET
,
5339 REG_WR(bp
, BAR_TSTRORM_INTMEM
+ TSTORM_TPA_EXIST_OFFSET
+ 4,
5346 static int bnx2x_init_port(struct bnx2x
*bp
)
5348 int port
= BP_PORT(bp
);
5351 DP(BNX2X_MSG_MCP
, "starting port init port %x\n", port
);
5353 REG_WR(bp
, NIG_REG_MASK_INTERRUPT_PORT0
+ port
*4, 0);
5355 /* Port PXP comes here */
5356 /* Port PXP2 comes here */
5361 wb_write
[0] = ONCHIP_ADDR1(bp
->timers_mapping
);
5362 wb_write
[1] = ONCHIP_ADDR2(bp
->timers_mapping
);
5363 REG_WR_DMAE(bp
, PXP2_REG_RQ_ONCHIP_AT
+ i
*8, wb_write
, 2);
5364 REG_WR(bp
, PXP2_REG_PSWRQ_TM0_L2P
+ func
*4, PXP_ONE_ILT(i
));
5369 wb_write
[0] = ONCHIP_ADDR1(bp
->qm_mapping
);
5370 wb_write
[1] = ONCHIP_ADDR2(bp
->qm_mapping
);
5371 REG_WR_DMAE(bp
, PXP2_REG_RQ_ONCHIP_AT
+ i
*8, wb_write
, 2);
5372 REG_WR(bp
, PXP2_REG_PSWRQ_QM0_L2P
+ func
*4, PXP_ONE_ILT(i
));
5377 wb_write
[0] = ONCHIP_ADDR1(bp
->t1_mapping
);
5378 wb_write
[1] = ONCHIP_ADDR2(bp
->t1_mapping
);
5379 REG_WR_DMAE(bp
, PXP2_REG_RQ_ONCHIP_AT
+ i
*8, wb_write
, 2);
5380 REG_WR(bp
, PXP2_REG_PSWRQ_SRC0_L2P
+ func
*4, PXP_ONE_ILT(i
));
5382 /* Port CMs come here */
5384 /* Port QM comes here */
5386 REG_WR(bp
, TM_REG_LIN0_SCAN_TIME
+ func
*4, 1024/64*20);
5387 REG_WR(bp
, TM_REG_LIN0_MAX_ACTIVE_CID
+ func
*4, 31);
5389 bnx2x_init_block(bp
, func
? TIMERS_PORT1_START
: TIMERS_PORT0_START
,
5390 func
? TIMERS_PORT1_END
: TIMERS_PORT0_END
);
5392 /* Port DQ comes here */
5393 /* Port BRB1 comes here */
5394 /* Port PRS comes here */
5395 /* Port TSDM comes here */
5396 /* Port CSDM comes here */
5397 /* Port USDM comes here */
5398 /* Port XSDM comes here */
5399 bnx2x_init_block(bp
, port
? TSEM_PORT1_START
: TSEM_PORT0_START
,
5400 port
? TSEM_PORT1_END
: TSEM_PORT0_END
);
5401 bnx2x_init_block(bp
, port
? USEM_PORT1_START
: USEM_PORT0_START
,
5402 port
? USEM_PORT1_END
: USEM_PORT0_END
);
5403 bnx2x_init_block(bp
, port
? CSEM_PORT1_START
: CSEM_PORT0_START
,
5404 port
? CSEM_PORT1_END
: CSEM_PORT0_END
);
5405 bnx2x_init_block(bp
, port
? XSEM_PORT1_START
: XSEM_PORT0_START
,
5406 port
? XSEM_PORT1_END
: XSEM_PORT0_END
);
5407 /* Port UPB comes here */
5408 /* Port XPB comes here */
5410 bnx2x_init_block(bp
, port
? PBF_PORT1_START
: PBF_PORT0_START
,
5411 port
? PBF_PORT1_END
: PBF_PORT0_END
);
5413 /* configure PBF to work without PAUSE mtu 9000 */
5414 REG_WR(bp
, PBF_REG_P0_PAUSE_ENABLE
+ port
*4, 0);
5416 /* update threshold */
5417 REG_WR(bp
, PBF_REG_P0_ARB_THRSH
+ port
*4, (9040/16));
5418 /* update init credit */
5419 REG_WR(bp
, PBF_REG_P0_INIT_CRD
+ port
*4, (9040/16) + 553 - 22);
5422 REG_WR(bp
, PBF_REG_INIT_P0
+ port
*4, 1);
5424 REG_WR(bp
, PBF_REG_INIT_P0
+ port
*4, 0);
5427 /* tell the searcher where the T2 table is */
5428 REG_WR(bp
, SRC_REG_COUNTFREE0
+ func
*4, 16*1024/64);
5430 wb_write
[0] = U64_LO(bp
->t2_mapping
);
5431 wb_write
[1] = U64_HI(bp
->t2_mapping
);
5432 REG_WR_DMAE(bp
, SRC_REG_FIRSTFREE0
+ func
*4, wb_write
, 2);
5433 wb_write
[0] = U64_LO((u64
)bp
->t2_mapping
+ 16*1024 - 64);
5434 wb_write
[1] = U64_HI((u64
)bp
->t2_mapping
+ 16*1024 - 64);
5435 REG_WR_DMAE(bp
, SRC_REG_LASTFREE0
+ func
*4, wb_write
, 2);
5437 REG_WR(bp
, SRC_REG_NUMBER_HASH_BITS0
+ func
*4, 10);
5438 /* Port SRCH comes here */
5440 /* Port CDU comes here */
5441 /* Port CFC comes here */
5443 if (CHIP_IS_E1(bp
)) {
5444 REG_WR(bp
, HC_REG_LEADING_EDGE_0
+ port
*8, 0);
5445 REG_WR(bp
, HC_REG_TRAILING_EDGE_0
+ port
*8, 0);
5447 bnx2x_init_block(bp
, port
? HC_PORT1_START
: HC_PORT0_START
,
5448 port
? HC_PORT1_END
: HC_PORT0_END
);
5450 bnx2x_init_block(bp
, port
? MISC_AEU_PORT1_START
:
5451 MISC_AEU_PORT0_START
,
5452 port
? MISC_AEU_PORT1_END
: MISC_AEU_PORT0_END
);
5453 /* init aeu_mask_attn_func_0/1:
5454 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
5455 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
5456 * bits 4-7 are used for "per vn group attention" */
5457 REG_WR(bp
, MISC_REG_AEU_MASK_ATTN_FUNC_0
+ port
*4,
5458 (IS_E1HMF(bp
) ? 0xF7 : 0x7));
5460 /* Port PXPCS comes here */
5461 /* Port EMAC0 comes here */
5462 /* Port EMAC1 comes here */
5463 /* Port DBU comes here */
5464 /* Port DBG comes here */
5465 bnx2x_init_block(bp
, port
? NIG_PORT1_START
: NIG_PORT0_START
,
5466 port
? NIG_PORT1_END
: NIG_PORT0_END
);
5468 REG_WR(bp
, NIG_REG_XGXS_SERDES0_MODE_SEL
+ port
*4, 1);
5470 if (CHIP_IS_E1H(bp
)) {
5472 struct cmng_struct_per_port m_cmng_port
;
5475 /* 0x2 disable e1hov, 0x1 enable */
5476 REG_WR(bp
, NIG_REG_LLH0_BRB1_DRV_MASK_MF
+ port
*4,
5477 (IS_E1HMF(bp
) ? 0x1 : 0x2));
5479 /* Init RATE SHAPING and FAIRNESS contexts.
5480 Initialize as if there is 10G link. */
5481 wsum
= bnx2x_calc_vn_wsum(bp
);
5482 bnx2x_init_port_minmax(bp
, (int)wsum
, 10000, &m_cmng_port
);
5484 for (vn
= VN_0
; vn
< E1HVN_MAX
; vn
++)
5485 bnx2x_init_vn_minmax(bp
, 2*vn
+ port
,
5486 wsum
, 10000, &m_cmng_port
);
5489 /* Port MCP comes here */
5490 /* Port DMAE comes here */
5492 switch (bp
->common
.board
& SHARED_HW_CFG_BOARD_TYPE_MASK
) {
5493 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G
:
5494 /* add SPIO 5 to group 0 */
5495 val
= REG_RD(bp
, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0
);
5496 val
|= AEU_INPUTS_ATTN_BITS_SPIO5
;
5497 REG_WR(bp
, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0
, val
);
5504 bnx2x__link_reset(bp
);
5509 #define ILT_PER_FUNC (768/2)
5510 #define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
5511 /* the phys address is shifted right 12 bits and has an added
5512 1=valid bit added to the 53rd bit
5513 then since this is a wide register(TM)
5514 we split it into two 32 bit writes
5516 #define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
5517 #define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
5518 #define PXP_ONE_ILT(x) (((x) << 10) | x)
5519 #define PXP_ILT_RANGE(f, l) (((l) << 10) | f)
5521 #define CNIC_ILT_LINES 0
5523 static void bnx2x_ilt_wr(struct bnx2x
*bp
, u32 index
, dma_addr_t addr
)
5527 if (CHIP_IS_E1H(bp
))
5528 reg
= PXP2_REG_RQ_ONCHIP_AT_B0
+ index
*8;
5530 reg
= PXP2_REG_RQ_ONCHIP_AT
+ index
*8;
5532 bnx2x_wb_wr(bp
, reg
, ONCHIP_ADDR1(addr
), ONCHIP_ADDR2(addr
));
5535 static int bnx2x_init_func(struct bnx2x
*bp
)
5537 int port
= BP_PORT(bp
);
5538 int func
= BP_FUNC(bp
);
5541 DP(BNX2X_MSG_MCP
, "starting func init func %x\n", func
);
5543 i
= FUNC_ILT_BASE(func
);
5545 bnx2x_ilt_wr(bp
, i
, bnx2x_sp_mapping(bp
, context
));
5546 if (CHIP_IS_E1H(bp
)) {
5547 REG_WR(bp
, PXP2_REG_RQ_CDU_FIRST_ILT
, i
);
5548 REG_WR(bp
, PXP2_REG_RQ_CDU_LAST_ILT
, i
+ CNIC_ILT_LINES
);
5550 REG_WR(bp
, PXP2_REG_PSWRQ_CDU0_L2P
+ func
*4,
5551 PXP_ILT_RANGE(i
, i
+ CNIC_ILT_LINES
));
5554 if (CHIP_IS_E1H(bp
)) {
5555 for (i
= 0; i
< 9; i
++)
5556 bnx2x_init_block(bp
,
5557 cm_start
[func
][i
], cm_end
[func
][i
]);
5559 REG_WR(bp
, NIG_REG_LLH0_FUNC_EN
+ port
*8, 1);
5560 REG_WR(bp
, NIG_REG_LLH0_FUNC_VLAN_ID
+ port
*8, bp
->e1hov
);
5563 /* HC init per function */
5564 if (CHIP_IS_E1H(bp
)) {
5565 REG_WR(bp
, MISC_REG_AEU_GENERAL_ATTN_12
+ func
*4, 0);
5567 REG_WR(bp
, HC_REG_LEADING_EDGE_0
+ port
*8, 0);
5568 REG_WR(bp
, HC_REG_TRAILING_EDGE_0
+ port
*8, 0);
5570 bnx2x_init_block(bp
, hc_limits
[func
][0], hc_limits
[func
][1]);
5572 if (CHIP_IS_E1H(bp
))
5573 REG_WR(bp
, HC_REG_FUNC_NUM_P0
+ port
*4, func
);
5575 /* Reset PCIE errors for debug */
5576 REG_WR(bp
, 0x2114, 0xffffffff);
5577 REG_WR(bp
, 0x2120, 0xffffffff);
5582 static int bnx2x_init_hw(struct bnx2x
*bp
, u32 load_code
)
5586 DP(BNX2X_MSG_MCP
, "function %d load_code %x\n",
5587 BP_FUNC(bp
), load_code
);
5590 mutex_init(&bp
->dmae_mutex
);
5591 bnx2x_gunzip_init(bp
);
5593 switch (load_code
) {
5594 case FW_MSG_CODE_DRV_LOAD_COMMON
:
5595 rc
= bnx2x_init_common(bp
);
5600 case FW_MSG_CODE_DRV_LOAD_PORT
:
5602 rc
= bnx2x_init_port(bp
);
5607 case FW_MSG_CODE_DRV_LOAD_FUNCTION
:
5609 rc
= bnx2x_init_func(bp
);
5615 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code
);
5619 if (!BP_NOMCP(bp
)) {
5620 int func
= BP_FUNC(bp
);
5622 bp
->fw_drv_pulse_wr_seq
=
5623 (SHMEM_RD(bp
, func_mb
[func
].drv_pulse_mb
) &
5624 DRV_PULSE_SEQ_MASK
);
5625 bp
->func_stx
= SHMEM_RD(bp
, func_mb
[func
].fw_mb_param
);
5626 DP(BNX2X_MSG_MCP
, "drv_pulse 0x%x func_stx 0x%x\n",
5627 bp
->fw_drv_pulse_wr_seq
, bp
->func_stx
);
5631 /* this needs to be done before gunzip end */
5632 bnx2x_zero_def_sb(bp
);
5633 for_each_queue(bp
, i
)
5634 bnx2x_zero_sb(bp
, BP_L_ID(bp
) + i
);
5637 bnx2x_gunzip_end(bp
);
5642 /* send the MCP a request, block until there is a reply */
5643 static u32
bnx2x_fw_command(struct bnx2x
*bp
, u32 command
)
5645 int func
= BP_FUNC(bp
);
5646 u32 seq
= ++bp
->fw_seq
;
5649 u8 delay
= CHIP_REV_IS_SLOW(bp
) ? 100 : 10;
5651 SHMEM_WR(bp
, func_mb
[func
].drv_mb_header
, (command
| seq
));
5652 DP(BNX2X_MSG_MCP
, "wrote command (%x) to FW MB\n", (command
| seq
));
5655 /* let the FW do it's magic ... */
5658 rc
= SHMEM_RD(bp
, func_mb
[func
].fw_mb_header
);
5660 /* Give the FW up to 2 second (200*10ms) */
5661 } while ((seq
!= (rc
& FW_MSG_SEQ_NUMBER_MASK
)) && (cnt
++ < 200));
5663 DP(BNX2X_MSG_MCP
, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
5664 cnt
*delay
, rc
, seq
);
5666 /* is this a reply to our command? */
5667 if (seq
== (rc
& FW_MSG_SEQ_NUMBER_MASK
)) {
5668 rc
&= FW_MSG_CODE_MASK
;
5672 BNX2X_ERR("FW failed to respond!\n");
5680 static void bnx2x_free_mem(struct bnx2x
*bp
)
5683 #define BNX2X_PCI_FREE(x, y, size) \
5686 pci_free_consistent(bp->pdev, size, x, y); \
5692 #define BNX2X_FREE(x) \
5703 for_each_queue(bp
, i
) {
5706 BNX2X_PCI_FREE(bnx2x_fp(bp
, i
, status_blk
),
5707 bnx2x_fp(bp
, i
, status_blk_mapping
),
5708 sizeof(struct host_status_block
) +
5709 sizeof(struct eth_tx_db_data
));
5711 /* fast path rings: tx_buf tx_desc rx_buf rx_desc rx_comp */
5712 BNX2X_FREE(bnx2x_fp(bp
, i
, tx_buf_ring
));
5713 BNX2X_PCI_FREE(bnx2x_fp(bp
, i
, tx_desc_ring
),
5714 bnx2x_fp(bp
, i
, tx_desc_mapping
),
5715 sizeof(struct eth_tx_bd
) * NUM_TX_BD
);
5717 BNX2X_FREE(bnx2x_fp(bp
, i
, rx_buf_ring
));
5718 BNX2X_PCI_FREE(bnx2x_fp(bp
, i
, rx_desc_ring
),
5719 bnx2x_fp(bp
, i
, rx_desc_mapping
),
5720 sizeof(struct eth_rx_bd
) * NUM_RX_BD
);
5722 BNX2X_PCI_FREE(bnx2x_fp(bp
, i
, rx_comp_ring
),
5723 bnx2x_fp(bp
, i
, rx_comp_mapping
),
5724 sizeof(struct eth_fast_path_rx_cqe
) *
5728 BNX2X_PCI_FREE(bnx2x_fp(bp
, i
, rx_sge_ring
),
5729 bnx2x_fp(bp
, i
, rx_sge_mapping
),
5730 BCM_PAGE_SIZE
* NUM_RX_SGE_PAGES
);
5732 /* end of fastpath */
5734 BNX2X_PCI_FREE(bp
->def_status_blk
, bp
->def_status_blk_mapping
,
5735 sizeof(struct host_def_status_block
));
5737 BNX2X_PCI_FREE(bp
->slowpath
, bp
->slowpath_mapping
,
5738 sizeof(struct bnx2x_slowpath
));
5741 BNX2X_PCI_FREE(bp
->t1
, bp
->t1_mapping
, 64*1024);
5742 BNX2X_PCI_FREE(bp
->t2
, bp
->t2_mapping
, 16*1024);
5743 BNX2X_PCI_FREE(bp
->timers
, bp
->timers_mapping
, 8*1024);
5744 BNX2X_PCI_FREE(bp
->qm
, bp
->qm_mapping
, 128*1024);
5746 BNX2X_PCI_FREE(bp
->spq
, bp
->spq_mapping
, BCM_PAGE_SIZE
);
5748 #undef BNX2X_PCI_FREE
5752 static int bnx2x_alloc_mem(struct bnx2x
*bp
)
5755 #define BNX2X_PCI_ALLOC(x, y, size) \
5757 x = pci_alloc_consistent(bp->pdev, size, y); \
5759 goto alloc_mem_err; \
5760 memset(x, 0, size); \
5763 #define BNX2X_ALLOC(x, size) \
5765 x = vmalloc(size); \
5767 goto alloc_mem_err; \
5768 memset(x, 0, size); \
5774 for_each_queue(bp
, i
) {
5775 bnx2x_fp(bp
, i
, bp
) = bp
;
5778 BNX2X_PCI_ALLOC(bnx2x_fp(bp
, i
, status_blk
),
5779 &bnx2x_fp(bp
, i
, status_blk_mapping
),
5780 sizeof(struct host_status_block
) +
5781 sizeof(struct eth_tx_db_data
));
5783 bnx2x_fp(bp
, i
, hw_tx_prods
) =
5784 (void *)(bnx2x_fp(bp
, i
, status_blk
) + 1);
5786 bnx2x_fp(bp
, i
, tx_prods_mapping
) =
5787 bnx2x_fp(bp
, i
, status_blk_mapping
) +
5788 sizeof(struct host_status_block
);
5790 /* fast path rings: tx_buf tx_desc rx_buf rx_desc rx_comp */
5791 BNX2X_ALLOC(bnx2x_fp(bp
, i
, tx_buf_ring
),
5792 sizeof(struct sw_tx_bd
) * NUM_TX_BD
);
5793 BNX2X_PCI_ALLOC(bnx2x_fp(bp
, i
, tx_desc_ring
),
5794 &bnx2x_fp(bp
, i
, tx_desc_mapping
),
5795 sizeof(struct eth_tx_bd
) * NUM_TX_BD
);
5797 BNX2X_ALLOC(bnx2x_fp(bp
, i
, rx_buf_ring
),
5798 sizeof(struct sw_rx_bd
) * NUM_RX_BD
);
5799 BNX2X_PCI_ALLOC(bnx2x_fp(bp
, i
, rx_desc_ring
),
5800 &bnx2x_fp(bp
, i
, rx_desc_mapping
),
5801 sizeof(struct eth_rx_bd
) * NUM_RX_BD
);
5803 BNX2X_PCI_ALLOC(bnx2x_fp(bp
, i
, rx_comp_ring
),
5804 &bnx2x_fp(bp
, i
, rx_comp_mapping
),
5805 sizeof(struct eth_fast_path_rx_cqe
) *
5809 BNX2X_ALLOC(bnx2x_fp(bp
, i
, rx_page_ring
),
5810 sizeof(struct sw_rx_page
) * NUM_RX_SGE
);
5811 BNX2X_PCI_ALLOC(bnx2x_fp(bp
, i
, rx_sge_ring
),
5812 &bnx2x_fp(bp
, i
, rx_sge_mapping
),
5813 BCM_PAGE_SIZE
* NUM_RX_SGE_PAGES
);
5815 /* end of fastpath */
5817 BNX2X_PCI_ALLOC(bp
->def_status_blk
, &bp
->def_status_blk_mapping
,
5818 sizeof(struct host_def_status_block
));
5820 BNX2X_PCI_ALLOC(bp
->slowpath
, &bp
->slowpath_mapping
,
5821 sizeof(struct bnx2x_slowpath
));
5824 BNX2X_PCI_ALLOC(bp
->t1
, &bp
->t1_mapping
, 64*1024);
5827 for (i
= 0; i
< 64*1024; i
+= 64) {
5828 *(u64
*)((char *)bp
->t1
+ i
+ 56) = 0x0UL
;
5829 *(u64
*)((char *)bp
->t1
+ i
+ 3) = 0x0UL
;
5832 /* allocate searcher T2 table
5833 we allocate 1/4 of alloc num for T2
5834 (which is not entered into the ILT) */
5835 BNX2X_PCI_ALLOC(bp
->t2
, &bp
->t2_mapping
, 16*1024);
5838 for (i
= 0; i
< 16*1024; i
+= 64)
5839 * (u64
*)((char *)bp
->t2
+ i
+ 56) = bp
->t2_mapping
+ i
+ 64;
5841 /* now fixup the last line in the block to point to the next block */
5842 *(u64
*)((char *)bp
->t2
+ 1024*16-8) = bp
->t2_mapping
;
5844 /* Timer block array (MAX_CONN*8) phys uncached for now 1024 conns */
5845 BNX2X_PCI_ALLOC(bp
->timers
, &bp
->timers_mapping
, 8*1024);
5847 /* QM queues (128*MAX_CONN) */
5848 BNX2X_PCI_ALLOC(bp
->qm
, &bp
->qm_mapping
, 128*1024);
5851 /* Slow path ring */
5852 BNX2X_PCI_ALLOC(bp
->spq
, &bp
->spq_mapping
, BCM_PAGE_SIZE
);
5860 #undef BNX2X_PCI_ALLOC
5864 static void bnx2x_free_tx_skbs(struct bnx2x
*bp
)
5868 for_each_queue(bp
, i
) {
5869 struct bnx2x_fastpath
*fp
= &bp
->fp
[i
];
5871 u16 bd_cons
= fp
->tx_bd_cons
;
5872 u16 sw_prod
= fp
->tx_pkt_prod
;
5873 u16 sw_cons
= fp
->tx_pkt_cons
;
5875 while (sw_cons
!= sw_prod
) {
5876 bd_cons
= bnx2x_free_tx_pkt(bp
, fp
, TX_BD(sw_cons
));
5882 static void bnx2x_free_rx_skbs(struct bnx2x
*bp
)
5886 for_each_queue(bp
, j
) {
5887 struct bnx2x_fastpath
*fp
= &bp
->fp
[j
];
5889 for (i
= 0; i
< NUM_RX_BD
; i
++) {
5890 struct sw_rx_bd
*rx_buf
= &fp
->rx_buf_ring
[i
];
5891 struct sk_buff
*skb
= rx_buf
->skb
;
5896 pci_unmap_single(bp
->pdev
,
5897 pci_unmap_addr(rx_buf
, mapping
),
5898 bp
->rx_buf_use_size
,
5899 PCI_DMA_FROMDEVICE
);
5904 if (!fp
->disable_tpa
)
5905 bnx2x_free_tpa_pool(bp
, fp
,
5906 ETH_MAX_AGGREGATION_QUEUES_E1H
);
5910 static void bnx2x_free_skbs(struct bnx2x
*bp
)
5912 bnx2x_free_tx_skbs(bp
);
5913 bnx2x_free_rx_skbs(bp
);
5916 static void bnx2x_free_msix_irqs(struct bnx2x
*bp
)
5920 free_irq(bp
->msix_table
[0].vector
, bp
->dev
);
5921 DP(NETIF_MSG_IFDOWN
, "released sp irq (%d)\n",
5922 bp
->msix_table
[0].vector
);
5924 for_each_queue(bp
, i
) {
5925 DP(NETIF_MSG_IFDOWN
, "about to release fp #%d->%d irq "
5926 "state %x\n", i
, bp
->msix_table
[i
+ offset
].vector
,
5927 bnx2x_fp(bp
, i
, state
));
5929 if (bnx2x_fp(bp
, i
, state
) != BNX2X_FP_STATE_CLOSED
)
5930 BNX2X_ERR("IRQ of fp #%d being freed while "
5931 "state != closed\n", i
);
5933 free_irq(bp
->msix_table
[i
+ offset
].vector
, &bp
->fp
[i
]);
5937 static void bnx2x_free_irq(struct bnx2x
*bp
)
5939 if (bp
->flags
& USING_MSIX_FLAG
) {
5940 bnx2x_free_msix_irqs(bp
);
5941 pci_disable_msix(bp
->pdev
);
5942 bp
->flags
&= ~USING_MSIX_FLAG
;
5945 free_irq(bp
->pdev
->irq
, bp
->dev
);
5948 static int bnx2x_enable_msix(struct bnx2x
*bp
)
5952 bp
->msix_table
[0].entry
= 0;
5954 DP(NETIF_MSG_IFUP
, "msix_table[0].entry = 0 (slowpath)\n");
5956 for_each_queue(bp
, i
) {
5957 int igu_vec
= offset
+ i
+ BP_L_ID(bp
);
5959 bp
->msix_table
[i
+ offset
].entry
= igu_vec
;
5960 DP(NETIF_MSG_IFUP
, "msix_table[%d].entry = %d "
5961 "(fastpath #%u)\n", i
+ offset
, igu_vec
, i
);
5964 rc
= pci_enable_msix(bp
->pdev
, &bp
->msix_table
[0],
5965 bp
->num_queues
+ offset
);
5967 DP(NETIF_MSG_IFUP
, "MSI-X is not attainable\n");
5970 bp
->flags
|= USING_MSIX_FLAG
;
5975 static int bnx2x_req_msix_irqs(struct bnx2x
*bp
)
5977 int i
, rc
, offset
= 1;
5979 rc
= request_irq(bp
->msix_table
[0].vector
, bnx2x_msix_sp_int
, 0,
5980 bp
->dev
->name
, bp
->dev
);
5982 BNX2X_ERR("request sp irq failed\n");
5986 for_each_queue(bp
, i
) {
5987 rc
= request_irq(bp
->msix_table
[i
+ offset
].vector
,
5988 bnx2x_msix_fp_int
, 0,
5989 bp
->dev
->name
, &bp
->fp
[i
]);
5991 BNX2X_ERR("request fp #%d irq failed rc %d\n",
5993 bnx2x_free_msix_irqs(bp
);
5997 bnx2x_fp(bp
, i
, state
) = BNX2X_FP_STATE_IRQ
;
6003 static int bnx2x_req_irq(struct bnx2x
*bp
)
6007 rc
= request_irq(bp
->pdev
->irq
, bnx2x_interrupt
, IRQF_SHARED
,
6008 bp
->dev
->name
, bp
->dev
);
6010 bnx2x_fp(bp
, 0, state
) = BNX2X_FP_STATE_IRQ
;
6016 * Init service functions
6019 static void bnx2x_set_mac_addr_e1(struct bnx2x
*bp
)
6021 struct mac_configuration_cmd
*config
= bnx2x_sp(bp
, mac_config
);
6022 int port
= BP_PORT(bp
);
6025 * unicasts 0-31:port0 32-63:port1
6026 * multicast 64-127:port0 128-191:port1
6028 config
->hdr
.length_6b
= 2;
6029 config
->hdr
.offset
= port
? 31 : 0;
6030 config
->hdr
.client_id
= BP_CL_ID(bp
);
6031 config
->hdr
.reserved1
= 0;
6034 config
->config_table
[0].cam_entry
.msb_mac_addr
=
6035 swab16(*(u16
*)&bp
->dev
->dev_addr
[0]);
6036 config
->config_table
[0].cam_entry
.middle_mac_addr
=
6037 swab16(*(u16
*)&bp
->dev
->dev_addr
[2]);
6038 config
->config_table
[0].cam_entry
.lsb_mac_addr
=
6039 swab16(*(u16
*)&bp
->dev
->dev_addr
[4]);
6040 config
->config_table
[0].cam_entry
.flags
= cpu_to_le16(port
);
6041 config
->config_table
[0].target_table_entry
.flags
= 0;
6042 config
->config_table
[0].target_table_entry
.client_id
= 0;
6043 config
->config_table
[0].target_table_entry
.vlan_id
= 0;
6045 DP(NETIF_MSG_IFUP
, "setting MAC (%04x:%04x:%04x)\n",
6046 config
->config_table
[0].cam_entry
.msb_mac_addr
,
6047 config
->config_table
[0].cam_entry
.middle_mac_addr
,
6048 config
->config_table
[0].cam_entry
.lsb_mac_addr
);
6051 config
->config_table
[1].cam_entry
.msb_mac_addr
= 0xffff;
6052 config
->config_table
[1].cam_entry
.middle_mac_addr
= 0xffff;
6053 config
->config_table
[1].cam_entry
.lsb_mac_addr
= 0xffff;
6054 config
->config_table
[1].cam_entry
.flags
= cpu_to_le16(port
);
6055 config
->config_table
[1].target_table_entry
.flags
=
6056 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST
;
6057 config
->config_table
[1].target_table_entry
.client_id
= 0;
6058 config
->config_table
[1].target_table_entry
.vlan_id
= 0;
6060 bnx2x_sp_post(bp
, RAMROD_CMD_ID_ETH_SET_MAC
, 0,
6061 U64_HI(bnx2x_sp_mapping(bp
, mac_config
)),
6062 U64_LO(bnx2x_sp_mapping(bp
, mac_config
)), 0);
6065 static void bnx2x_set_mac_addr_e1h(struct bnx2x
*bp
)
6067 struct mac_configuration_cmd_e1h
*config
=
6068 (struct mac_configuration_cmd_e1h
*)bnx2x_sp(bp
, mac_config
);
6070 if (bp
->state
!= BNX2X_STATE_OPEN
) {
6071 DP(NETIF_MSG_IFUP
, "state is %x, returning\n", bp
->state
);
6075 /* CAM allocation for E1H
6076 * unicasts: by func number
6077 * multicast: 20+FUNC*20, 20 each
6079 config
->hdr
.length_6b
= 1;
6080 config
->hdr
.offset
= BP_FUNC(bp
);
6081 config
->hdr
.client_id
= BP_CL_ID(bp
);
6082 config
->hdr
.reserved1
= 0;
6085 config
->config_table
[0].msb_mac_addr
=
6086 swab16(*(u16
*)&bp
->dev
->dev_addr
[0]);
6087 config
->config_table
[0].middle_mac_addr
=
6088 swab16(*(u16
*)&bp
->dev
->dev_addr
[2]);
6089 config
->config_table
[0].lsb_mac_addr
=
6090 swab16(*(u16
*)&bp
->dev
->dev_addr
[4]);
6091 config
->config_table
[0].client_id
= BP_L_ID(bp
);
6092 config
->config_table
[0].vlan_id
= 0;
6093 config
->config_table
[0].e1hov_id
= cpu_to_le16(bp
->e1hov
);
6094 config
->config_table
[0].flags
= BP_PORT(bp
);
6096 DP(NETIF_MSG_IFUP
, "setting MAC (%04x:%04x:%04x) E1HOV %d CLID %d\n",
6097 config
->config_table
[0].msb_mac_addr
,
6098 config
->config_table
[0].middle_mac_addr
,
6099 config
->config_table
[0].lsb_mac_addr
, bp
->e1hov
, BP_L_ID(bp
));
6101 bnx2x_sp_post(bp
, RAMROD_CMD_ID_ETH_SET_MAC
, 0,
6102 U64_HI(bnx2x_sp_mapping(bp
, mac_config
)),
6103 U64_LO(bnx2x_sp_mapping(bp
, mac_config
)), 0);
6106 static int bnx2x_wait_ramrod(struct bnx2x
*bp
, int state
, int idx
,
6107 int *state_p
, int poll
)
6109 /* can take a while if any port is running */
6112 DP(NETIF_MSG_IFUP
, "%s for state to become %x on IDX [%d]\n",
6113 poll
? "polling" : "waiting", state
, idx
);
6118 bnx2x_rx_int(bp
->fp
, 10);
6119 /* if index is different from 0
6120 * the reply for some commands will
6121 * be on the none default queue
6124 bnx2x_rx_int(&bp
->fp
[idx
], 10);
6126 mb(); /* state is changed by bnx2x_sp_event() */
6128 if (*state_p
== state
)
6135 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
6136 poll
? "polling" : "waiting", state
, idx
);
6137 #ifdef BNX2X_STOP_ON_ERROR
6144 static int bnx2x_setup_leading(struct bnx2x
*bp
)
6148 /* reset IGU state */
6149 bnx2x_ack_sb(bp
, bp
->fp
[0].sb_id
, CSTORM_ID
, 0, IGU_INT_ENABLE
, 0);
6152 bnx2x_sp_post(bp
, RAMROD_CMD_ID_ETH_PORT_SETUP
, 0, 0, 0, 0);
6154 /* Wait for completion */
6155 rc
= bnx2x_wait_ramrod(bp
, BNX2X_STATE_OPEN
, 0, &(bp
->state
), 0);
6160 static int bnx2x_setup_multi(struct bnx2x
*bp
, int index
)
6162 /* reset IGU state */
6163 bnx2x_ack_sb(bp
, bp
->fp
[index
].sb_id
, CSTORM_ID
, 0, IGU_INT_ENABLE
, 0);
6166 bp
->fp
[index
].state
= BNX2X_FP_STATE_OPENING
;
6167 bnx2x_sp_post(bp
, RAMROD_CMD_ID_ETH_CLIENT_SETUP
, index
, 0, index
, 0);
6169 /* Wait for completion */
6170 return bnx2x_wait_ramrod(bp
, BNX2X_FP_STATE_OPEN
, index
,
6171 &(bp
->fp
[index
].state
), 0);
6174 static int bnx2x_poll(struct napi_struct
*napi
, int budget
);
6175 static void bnx2x_set_rx_mode(struct net_device
*dev
);
6177 /* must be called with rtnl_lock */
6178 static int bnx2x_nic_load(struct bnx2x
*bp
, int load_mode
)
6183 #ifdef BNX2X_STOP_ON_ERROR
6184 if (unlikely(bp
->panic
))
6188 bp
->state
= BNX2X_STATE_OPENING_WAIT4_LOAD
;
6190 /* Send LOAD_REQUEST command to MCP
6191 Returns the type of LOAD command:
6192 if it is the first port to be initialized
6193 common blocks should be initialized, otherwise - not
6195 if (!BP_NOMCP(bp
)) {
6196 load_code
= bnx2x_fw_command(bp
, DRV_MSG_CODE_LOAD_REQ
);
6198 BNX2X_ERR("MCP response failure, unloading\n");
6201 if (load_code
== FW_MSG_CODE_DRV_LOAD_REFUSED
)
6202 return -EBUSY
; /* other port in diagnostic mode */
6205 DP(NETIF_MSG_IFUP
, "NO MCP load counts before us %d, %d, %d\n",
6206 load_count
[0], load_count
[1], load_count
[2]);
6208 load_count
[1 + BP_PORT(bp
)]++;
6209 DP(NETIF_MSG_IFUP
, "NO MCP new load counts %d, %d, %d\n",
6210 load_count
[0], load_count
[1], load_count
[2]);
6211 if (load_count
[0] == 1)
6212 load_code
= FW_MSG_CODE_DRV_LOAD_COMMON
;
6213 else if (load_count
[1 + BP_PORT(bp
)] == 1)
6214 load_code
= FW_MSG_CODE_DRV_LOAD_PORT
;
6216 load_code
= FW_MSG_CODE_DRV_LOAD_FUNCTION
;
6219 if ((load_code
== FW_MSG_CODE_DRV_LOAD_COMMON
) ||
6220 (load_code
== FW_MSG_CODE_DRV_LOAD_PORT
))
6224 DP(NETIF_MSG_LINK
, "pmf %d\n", bp
->port
.pmf
);
6226 /* if we can't use MSI-X we only need one fp,
6227 * so try to enable MSI-X with the requested number of fp's
6228 * and fallback to inta with one fp
6234 if ((use_multi
> 1) && (use_multi
<= BP_MAX_QUEUES(bp
)))
6235 /* user requested number */
6236 bp
->num_queues
= use_multi
;
6239 bp
->num_queues
= min_t(u32
, num_online_cpus(),
6244 if (bnx2x_enable_msix(bp
)) {
6245 /* failed to enable MSI-X */
6248 BNX2X_ERR("Multi requested but failed"
6249 " to enable MSI-X\n");
6253 "set number of queues to %d\n", bp
->num_queues
);
6255 if (bnx2x_alloc_mem(bp
))
6258 for_each_queue(bp
, i
)
6259 bnx2x_fp(bp
, i
, disable_tpa
) =
6260 ((bp
->flags
& TPA_ENABLE_FLAG
) == 0);
6262 /* Disable interrupt handling until HW is initialized */
6263 atomic_set(&bp
->intr_sem
, 1);
6265 if (bp
->flags
& USING_MSIX_FLAG
) {
6266 rc
= bnx2x_req_msix_irqs(bp
);
6268 pci_disable_msix(bp
->pdev
);
6273 rc
= bnx2x_req_irq(bp
);
6275 BNX2X_ERR("IRQ request failed, aborting\n");
6280 for_each_queue(bp
, i
)
6281 netif_napi_add(bp
->dev
, &bnx2x_fp(bp
, i
, napi
),
6285 rc
= bnx2x_init_hw(bp
, load_code
);
6287 BNX2X_ERR("HW init failed, aborting\n");
6291 /* Enable interrupt handling */
6292 atomic_set(&bp
->intr_sem
, 0);
6294 /* Setup NIC internals and enable interrupts */
6297 /* Send LOAD_DONE command to MCP */
6298 if (!BP_NOMCP(bp
)) {
6299 load_code
= bnx2x_fw_command(bp
, DRV_MSG_CODE_LOAD_DONE
);
6301 BNX2X_ERR("MCP response failure, unloading\n");
6303 goto load_int_disable
;
6307 bnx2x_stats_init(bp
);
6309 bp
->state
= BNX2X_STATE_OPENING_WAIT4_PORT
;
6311 /* Enable Rx interrupt handling before sending the ramrod
6312 as it's completed on Rx FP queue */
6313 for_each_queue(bp
, i
)
6314 napi_enable(&bnx2x_fp(bp
, i
, napi
));
6316 rc
= bnx2x_setup_leading(bp
);
6318 #ifdef BNX2X_STOP_ON_ERROR
6321 goto load_stop_netif
;
6324 if (CHIP_IS_E1H(bp
))
6325 if (bp
->mf_config
& FUNC_MF_CFG_FUNC_DISABLED
) {
6326 BNX2X_ERR("!!! mf_cfg function disabled\n");
6327 bp
->state
= BNX2X_STATE_DISABLED
;
6330 if (bp
->state
== BNX2X_STATE_OPEN
)
6331 for_each_nondefault_queue(bp
, i
) {
6332 rc
= bnx2x_setup_multi(bp
, i
);
6334 goto load_stop_netif
;
6338 bnx2x_set_mac_addr_e1(bp
);
6340 bnx2x_set_mac_addr_e1h(bp
);
6343 bnx2x_initial_phy_init(bp
);
6345 /* Start fast path */
6346 switch (load_mode
) {
6348 /* Tx queue should be only reenabled */
6349 netif_wake_queue(bp
->dev
);
6350 bnx2x_set_rx_mode(bp
->dev
);
6354 /* IRQ is only requested from bnx2x_open */
6355 netif_start_queue(bp
->dev
);
6356 bnx2x_set_rx_mode(bp
->dev
);
6357 if (bp
->flags
& USING_MSIX_FLAG
)
6358 printk(KERN_INFO PFX
"%s: using MSI-X\n",
6363 bnx2x_set_rx_mode(bp
->dev
);
6364 bp
->state
= BNX2X_STATE_DIAG
;
6372 bnx2x__link_status_update(bp
);
6374 /* start the timer */
6375 mod_timer(&bp
->timer
, jiffies
+ bp
->current_interval
);
6381 for_each_queue(bp
, i
)
6382 napi_disable(&bnx2x_fp(bp
, i
, napi
));
6385 bnx2x_int_disable_sync(bp
);
6390 /* Free SKBs, SGEs, TPA pool and driver internals */
6391 bnx2x_free_skbs(bp
);
6392 for_each_queue(bp
, i
)
6393 bnx2x_free_rx_sge_range(bp
, bp
->fp
+ i
,
6394 RX_SGE_CNT
*NUM_RX_SGE_PAGES
);
6398 /* TBD we really need to reset the chip
6399 if we want to recover from this */
6403 static int bnx2x_stop_multi(struct bnx2x
*bp
, int index
)
6407 /* halt the connection */
6408 bp
->fp
[index
].state
= BNX2X_FP_STATE_HALTING
;
6409 bnx2x_sp_post(bp
, RAMROD_CMD_ID_ETH_HALT
, index
, 0, 0, 0);
6411 /* Wait for completion */
6412 rc
= bnx2x_wait_ramrod(bp
, BNX2X_FP_STATE_HALTED
, index
,
6413 &(bp
->fp
[index
].state
), 1);
6414 if (rc
) /* timeout */
6417 /* delete cfc entry */
6418 bnx2x_sp_post(bp
, RAMROD_CMD_ID_ETH_CFC_DEL
, index
, 0, 0, 1);
6420 /* Wait for completion */
6421 rc
= bnx2x_wait_ramrod(bp
, BNX2X_FP_STATE_CLOSED
, index
,
6422 &(bp
->fp
[index
].state
), 1);
6426 static void bnx2x_stop_leading(struct bnx2x
*bp
)
6428 u16 dsb_sp_prod_idx
;
6429 /* if the other port is handling traffic,
6430 this can take a lot of time */
6436 /* Send HALT ramrod */
6437 bp
->fp
[0].state
= BNX2X_FP_STATE_HALTING
;
6438 bnx2x_sp_post(bp
, RAMROD_CMD_ID_ETH_HALT
, 0, 0, BP_CL_ID(bp
), 0);
6440 /* Wait for completion */
6441 rc
= bnx2x_wait_ramrod(bp
, BNX2X_FP_STATE_HALTED
, 0,
6442 &(bp
->fp
[0].state
), 1);
6443 if (rc
) /* timeout */
6446 dsb_sp_prod_idx
= *bp
->dsb_sp_prod
;
6448 /* Send PORT_DELETE ramrod */
6449 bnx2x_sp_post(bp
, RAMROD_CMD_ID_ETH_PORT_DEL
, 0, 0, 0, 1);
6451 /* Wait for completion to arrive on default status block
6452 we are going to reset the chip anyway
6453 so there is not much to do if this times out
6455 while (dsb_sp_prod_idx
== *bp
->dsb_sp_prod
) {
6458 DP(NETIF_MSG_IFDOWN
, "timeout waiting for port del "
6459 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
6460 *bp
->dsb_sp_prod
, dsb_sp_prod_idx
);
6461 #ifdef BNX2X_STOP_ON_ERROR
6468 bp
->state
= BNX2X_STATE_CLOSING_WAIT4_UNLOAD
;
6469 bp
->fp
[0].state
= BNX2X_FP_STATE_CLOSED
;
6472 static void bnx2x_reset_func(struct bnx2x
*bp
)
6474 int port
= BP_PORT(bp
);
6475 int func
= BP_FUNC(bp
);
6479 REG_WR(bp
, HC_REG_LEADING_EDGE_0
+ port
*8, 0);
6480 REG_WR(bp
, HC_REG_TRAILING_EDGE_0
+ port
*8, 0);
6482 REG_WR(bp
, HC_REG_CONFIG_0
+ port
*4, 0x1000);
6485 base
= FUNC_ILT_BASE(func
);
6486 for (i
= base
; i
< base
+ ILT_PER_FUNC
; i
++)
6487 bnx2x_ilt_wr(bp
, i
, 0);
6490 static void bnx2x_reset_port(struct bnx2x
*bp
)
6492 int port
= BP_PORT(bp
);
6495 REG_WR(bp
, NIG_REG_MASK_INTERRUPT_PORT0
+ port
*4, 0);
6497 /* Do not rcv packets to BRB */
6498 REG_WR(bp
, NIG_REG_LLH0_BRB1_DRV_MASK
+ port
*4, 0x0);
6499 /* Do not direct rcv packets that are not for MCP to the BRB */
6500 REG_WR(bp
, (port
? NIG_REG_LLH1_BRB1_NOT_MCP
:
6501 NIG_REG_LLH0_BRB1_NOT_MCP
), 0x0);
6504 REG_WR(bp
, MISC_REG_AEU_MASK_ATTN_FUNC_0
+ port
*4, 0);
6507 /* Check for BRB port occupancy */
6508 val
= REG_RD(bp
, BRB1_REG_PORT_NUM_OCC_BLOCKS_0
+ port
*4);
6510 DP(NETIF_MSG_IFDOWN
,
6511 "BRB1 is not empty %d blooks are occupied\n", val
);
6513 /* TODO: Close Doorbell port? */
6516 static void bnx2x_reset_common(struct bnx2x
*bp
)
6519 REG_WR(bp
, GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_1_CLEAR
,
6521 REG_WR(bp
, GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_2_CLEAR
, 0x1403);
6524 static void bnx2x_reset_chip(struct bnx2x
*bp
, u32 reset_code
)
6526 DP(BNX2X_MSG_MCP
, "function %d reset_code %x\n",
6527 BP_FUNC(bp
), reset_code
);
6529 switch (reset_code
) {
6530 case FW_MSG_CODE_DRV_UNLOAD_COMMON
:
6531 bnx2x_reset_port(bp
);
6532 bnx2x_reset_func(bp
);
6533 bnx2x_reset_common(bp
);
6536 case FW_MSG_CODE_DRV_UNLOAD_PORT
:
6537 bnx2x_reset_port(bp
);
6538 bnx2x_reset_func(bp
);
6541 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION
:
6542 bnx2x_reset_func(bp
);
6546 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code
);
6551 /* msut be called with rtnl_lock */
6552 static int bnx2x_nic_unload(struct bnx2x
*bp
, int unload_mode
)
6557 bp
->state
= BNX2X_STATE_CLOSING_WAIT4_HALT
;
6559 bp
->rx_mode
= BNX2X_RX_MODE_NONE
;
6560 bnx2x_set_storm_rx_mode(bp
);
6562 if (netif_running(bp
->dev
)) {
6563 netif_tx_disable(bp
->dev
);
6564 bp
->dev
->trans_start
= jiffies
; /* prevent tx timeout */
6567 del_timer_sync(&bp
->timer
);
6568 SHMEM_WR(bp
, func_mb
[BP_FUNC(bp
)].drv_pulse_mb
,
6569 (DRV_PULSE_ALWAYS_ALIVE
| bp
->fw_drv_pulse_wr_seq
));
6570 bnx2x_stats_handle(bp
, STATS_EVENT_STOP
);
6572 /* Wait until all fast path tasks complete */
6573 for_each_queue(bp
, i
) {
6574 struct bnx2x_fastpath
*fp
= &bp
->fp
[i
];
6576 #ifdef BNX2X_STOP_ON_ERROR
6577 #ifdef __powerpc64__
6578 DP(NETIF_MSG_RX_STATUS
, "fp->tpa_queue_used = 0x%lx\n",
6580 DP(NETIF_MSG_IFDOWN
, "fp->tpa_queue_used = 0x%llx\n",
6582 fp
->tpa_queue_used
);
6586 while (bnx2x_has_work(fp
)) {
6589 BNX2X_ERR("timeout waiting for queue[%d]\n",
6591 #ifdef BNX2X_STOP_ON_ERROR
6603 /* Wait until all slow path tasks complete */
6605 while ((bp
->spq_left
!= MAX_SPQ_PENDING
) && cnt
--)
6608 for_each_queue(bp
, i
)
6609 napi_disable(&bnx2x_fp(bp
, i
, napi
));
6610 /* Disable interrupts after Tx and Rx are disabled on stack level */
6611 bnx2x_int_disable_sync(bp
);
6616 if (bp
->flags
& NO_WOL_FLAG
)
6617 reset_code
= DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP
;
6620 u32 emac_base
= BP_PORT(bp
) ? GRCBASE_EMAC1
: GRCBASE_EMAC0
;
6621 u8
*mac_addr
= bp
->dev
->dev_addr
;
6624 /* The mac address is written to entries 1-4 to
6625 preserve entry 0 which is used by the PMF */
6626 val
= (mac_addr
[0] << 8) | mac_addr
[1];
6627 EMAC_WR(EMAC_REG_EMAC_MAC_MATCH
+ (BP_E1HVN(bp
) + 1)*8, val
);
6629 val
= (mac_addr
[2] << 24) | (mac_addr
[3] << 16) |
6630 (mac_addr
[4] << 8) | mac_addr
[5];
6631 EMAC_WR(EMAC_REG_EMAC_MAC_MATCH
+ (BP_E1HVN(bp
) + 1)*8 + 4,
6634 reset_code
= DRV_MSG_CODE_UNLOAD_REQ_WOL_EN
;
6637 reset_code
= DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS
;
6639 /* Close multi and leading connections
6640 Completions for ramrods are collected in a synchronous way */
6641 for_each_nondefault_queue(bp
, i
)
6642 if (bnx2x_stop_multi(bp
, i
))
6645 if (CHIP_IS_E1H(bp
))
6646 REG_WR(bp
, NIG_REG_LLH0_FUNC_EN
+ BP_PORT(bp
)*8, 0);
6648 bnx2x_stop_leading(bp
);
6649 #ifdef BNX2X_STOP_ON_ERROR
6650 /* If ramrod completion timed out - break here! */
6652 BNX2X_ERR("Stop leading failed!\n");
6657 if ((bp
->state
!= BNX2X_STATE_CLOSING_WAIT4_UNLOAD
) ||
6658 (bp
->fp
[0].state
!= BNX2X_FP_STATE_CLOSED
)) {
6659 DP(NETIF_MSG_IFDOWN
, "failed to close leading properly! "
6660 "state 0x%x fp[0].state 0x%x\n",
6661 bp
->state
, bp
->fp
[0].state
);
6666 reset_code
= bnx2x_fw_command(bp
, reset_code
);
6668 DP(NETIF_MSG_IFDOWN
, "NO MCP load counts %d, %d, %d\n",
6669 load_count
[0], load_count
[1], load_count
[2]);
6671 load_count
[1 + BP_PORT(bp
)]--;
6672 DP(NETIF_MSG_IFDOWN
, "NO MCP new load counts %d, %d, %d\n",
6673 load_count
[0], load_count
[1], load_count
[2]);
6674 if (load_count
[0] == 0)
6675 reset_code
= FW_MSG_CODE_DRV_UNLOAD_COMMON
;
6676 else if (load_count
[1 + BP_PORT(bp
)] == 0)
6677 reset_code
= FW_MSG_CODE_DRV_UNLOAD_PORT
;
6679 reset_code
= FW_MSG_CODE_DRV_UNLOAD_FUNCTION
;
6682 if ((reset_code
== FW_MSG_CODE_DRV_UNLOAD_COMMON
) ||
6683 (reset_code
== FW_MSG_CODE_DRV_UNLOAD_PORT
))
6684 bnx2x__link_reset(bp
);
6686 /* Reset the chip */
6687 bnx2x_reset_chip(bp
, reset_code
);
6689 /* Report UNLOAD_DONE to MCP */
6691 bnx2x_fw_command(bp
, DRV_MSG_CODE_UNLOAD_DONE
);
6693 /* Free SKBs, SGEs, TPA pool and driver internals */
6694 bnx2x_free_skbs(bp
);
6695 for_each_queue(bp
, i
)
6696 bnx2x_free_rx_sge_range(bp
, bp
->fp
+ i
,
6697 RX_SGE_CNT
*NUM_RX_SGE_PAGES
);
6700 bp
->state
= BNX2X_STATE_CLOSED
;
6702 netif_carrier_off(bp
->dev
);
6707 static void bnx2x_reset_task(struct work_struct
*work
)
6709 struct bnx2x
*bp
= container_of(work
, struct bnx2x
, reset_task
);
6711 #ifdef BNX2X_STOP_ON_ERROR
6712 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
6713 " so reset not done to allow debug dump,\n"
6714 KERN_ERR
" you will need to reboot when done\n");
6720 if (!netif_running(bp
->dev
))
6721 goto reset_task_exit
;
6723 bnx2x_nic_unload(bp
, UNLOAD_NORMAL
);
6724 bnx2x_nic_load(bp
, LOAD_NORMAL
);
6730 /* end of nic load/unload */
6735 * Init service functions
6738 static void __devinit
bnx2x_undi_unload(struct bnx2x
*bp
)
6742 /* Check if there is any driver already loaded */
6743 val
= REG_RD(bp
, MISC_REG_UNPREPARED
);
6745 /* Check if it is the UNDI driver
6746 * UNDI driver initializes CID offset for normal bell to 0x7
6748 val
= REG_RD(bp
, DORQ_REG_NORM_CID_OFST
);
6750 u32 reset_code
= DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS
;
6751 /* save our func and fw_seq */
6752 int func
= BP_FUNC(bp
);
6753 u16 fw_seq
= bp
->fw_seq
;
6755 BNX2X_DEV_INFO("UNDI is active! reset device\n");
6757 /* try unload UNDI on port 0 */
6759 bp
->fw_seq
= (SHMEM_RD(bp
,
6760 func_mb
[bp
->func
].drv_mb_header
) &
6761 DRV_MSG_SEQ_NUMBER_MASK
);
6763 reset_code
= bnx2x_fw_command(bp
, reset_code
);
6764 bnx2x_fw_command(bp
, DRV_MSG_CODE_UNLOAD_DONE
);
6766 /* if UNDI is loaded on the other port */
6767 if (reset_code
!= FW_MSG_CODE_DRV_UNLOAD_COMMON
) {
6770 bp
->fw_seq
= (SHMEM_RD(bp
,
6771 func_mb
[bp
->func
].drv_mb_header
) &
6772 DRV_MSG_SEQ_NUMBER_MASK
);
6774 bnx2x_fw_command(bp
,
6775 DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS
);
6776 bnx2x_fw_command(bp
,
6777 DRV_MSG_CODE_UNLOAD_DONE
);
6779 /* restore our func and fw_seq */
6781 bp
->fw_seq
= fw_seq
;
6786 GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_1_CLEAR
,
6789 GRCBASE_MISC
+ MISC_REGISTERS_RESET_REG_2_CLEAR
,
6795 static void __devinit
bnx2x_get_common_hwinfo(struct bnx2x
*bp
)
6797 u32 val
, val2
, val3
, val4
, id
;
6799 /* Get the chip revision id and number. */
6800 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
6801 val
= REG_RD(bp
, MISC_REG_CHIP_NUM
);
6802 id
= ((val
& 0xffff) << 16);
6803 val
= REG_RD(bp
, MISC_REG_CHIP_REV
);
6804 id
|= ((val
& 0xf) << 12);
6805 val
= REG_RD(bp
, MISC_REG_CHIP_METAL
);
6806 id
|= ((val
& 0xff) << 4);
6807 REG_RD(bp
, MISC_REG_BOND_ID
);
6809 bp
->common
.chip_id
= id
;
6810 bp
->link_params
.chip_id
= bp
->common
.chip_id
;
6811 BNX2X_DEV_INFO("chip ID is 0x%x\n", id
);
6813 val
= REG_RD(bp
, MCP_REG_MCPR_NVM_CFG4
);
6814 bp
->common
.flash_size
= (NVRAM_1MB_SIZE
<<
6815 (val
& MCPR_NVM_CFG4_FLASH_SIZE
));
6816 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
6817 bp
->common
.flash_size
, bp
->common
.flash_size
);
6819 bp
->common
.shmem_base
= REG_RD(bp
, MISC_REG_SHARED_MEM_ADDR
);
6820 bp
->link_params
.shmem_base
= bp
->common
.shmem_base
;
6821 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp
->common
.shmem_base
);
6823 if (!bp
->common
.shmem_base
||
6824 (bp
->common
.shmem_base
< 0xA0000) ||
6825 (bp
->common
.shmem_base
>= 0xC0000)) {
6826 BNX2X_DEV_INFO("MCP not active\n");
6827 bp
->flags
|= NO_MCP_FLAG
;
6831 val
= SHMEM_RD(bp
, validity_map
[BP_PORT(bp
)]);
6832 if ((val
& (SHR_MEM_VALIDITY_DEV_INFO
| SHR_MEM_VALIDITY_MB
))
6833 != (SHR_MEM_VALIDITY_DEV_INFO
| SHR_MEM_VALIDITY_MB
))
6834 BNX2X_ERR("BAD MCP validity signature\n");
6836 bp
->common
.hw_config
= SHMEM_RD(bp
, dev_info
.shared_hw_config
.config
);
6837 bp
->common
.board
= SHMEM_RD(bp
, dev_info
.shared_hw_config
.board
);
6839 BNX2X_DEV_INFO("hw_config 0x%08x board 0x%08x\n",
6840 bp
->common
.hw_config
, bp
->common
.board
);
6842 bp
->link_params
.hw_led_mode
= ((bp
->common
.hw_config
&
6843 SHARED_HW_CFG_LED_MODE_MASK
) >>
6844 SHARED_HW_CFG_LED_MODE_SHIFT
);
6846 val
= SHMEM_RD(bp
, dev_info
.bc_rev
) >> 8;
6847 bp
->common
.bc_ver
= val
;
6848 BNX2X_DEV_INFO("bc_ver %X\n", val
);
6849 if (val
< BNX2X_BC_VER
) {
6850 /* for now only warn
6851 * later we might need to enforce this */
6852 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
6853 " please upgrade BC\n", BNX2X_BC_VER
, val
);
6855 BNX2X_DEV_INFO("%sWoL Capable\n",
6856 (bp
->flags
& NO_WOL_FLAG
)? "Not " : "");
6858 val
= SHMEM_RD(bp
, dev_info
.shared_hw_config
.part_num
);
6859 val2
= SHMEM_RD(bp
, dev_info
.shared_hw_config
.part_num
[4]);
6860 val3
= SHMEM_RD(bp
, dev_info
.shared_hw_config
.part_num
[8]);
6861 val4
= SHMEM_RD(bp
, dev_info
.shared_hw_config
.part_num
[12]);
6863 printk(KERN_INFO PFX
"part number %X-%X-%X-%X\n",
6864 val
, val2
, val3
, val4
);
6867 static void __devinit
bnx2x_link_settings_supported(struct bnx2x
*bp
,
6870 int port
= BP_PORT(bp
);
6873 switch (switch_cfg
) {
6875 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg
);
6878 SERDES_EXT_PHY_TYPE(bp
->link_params
.ext_phy_config
);
6879 switch (ext_phy_type
) {
6880 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT
:
6881 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
6884 bp
->port
.supported
|= (SUPPORTED_10baseT_Half
|
6885 SUPPORTED_10baseT_Full
|
6886 SUPPORTED_100baseT_Half
|
6887 SUPPORTED_100baseT_Full
|
6888 SUPPORTED_1000baseT_Full
|
6889 SUPPORTED_2500baseX_Full
|
6894 SUPPORTED_Asym_Pause
);
6897 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482
:
6898 BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
6901 bp
->port
.supported
|= (SUPPORTED_10baseT_Half
|
6902 SUPPORTED_10baseT_Full
|
6903 SUPPORTED_100baseT_Half
|
6904 SUPPORTED_100baseT_Full
|
6905 SUPPORTED_1000baseT_Full
|
6910 SUPPORTED_Asym_Pause
);
6914 BNX2X_ERR("NVRAM config error. "
6915 "BAD SerDes ext_phy_config 0x%x\n",
6916 bp
->link_params
.ext_phy_config
);
6920 bp
->port
.phy_addr
= REG_RD(bp
, NIG_REG_SERDES0_CTRL_PHY_ADDR
+
6922 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp
->port
.phy_addr
);
6925 case SWITCH_CFG_10G
:
6926 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg
);
6929 XGXS_EXT_PHY_TYPE(bp
->link_params
.ext_phy_config
);
6930 switch (ext_phy_type
) {
6931 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT
:
6932 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
6935 bp
->port
.supported
|= (SUPPORTED_10baseT_Half
|
6936 SUPPORTED_10baseT_Full
|
6937 SUPPORTED_100baseT_Half
|
6938 SUPPORTED_100baseT_Full
|
6939 SUPPORTED_1000baseT_Full
|
6940 SUPPORTED_2500baseX_Full
|
6941 SUPPORTED_10000baseT_Full
|
6946 SUPPORTED_Asym_Pause
);
6949 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705
:
6950 BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
6953 bp
->port
.supported
|= (SUPPORTED_10000baseT_Full
|
6956 SUPPORTED_Asym_Pause
);
6959 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706
:
6960 BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
6963 bp
->port
.supported
|= (SUPPORTED_10000baseT_Full
|
6964 SUPPORTED_1000baseT_Full
|
6967 SUPPORTED_Asym_Pause
);
6970 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072
:
6971 BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
6974 bp
->port
.supported
|= (SUPPORTED_10000baseT_Full
|
6975 SUPPORTED_1000baseT_Full
|
6979 SUPPORTED_Asym_Pause
);
6982 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073
:
6983 BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
6986 bp
->port
.supported
|= (SUPPORTED_10000baseT_Full
|
6987 SUPPORTED_2500baseX_Full
|
6988 SUPPORTED_1000baseT_Full
|
6992 SUPPORTED_Asym_Pause
);
6995 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101
:
6996 BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
6999 bp
->port
.supported
|= (SUPPORTED_10000baseT_Full
|
7003 SUPPORTED_Asym_Pause
);
7006 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE
:
7007 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
7008 bp
->link_params
.ext_phy_config
);
7012 BNX2X_ERR("NVRAM config error. "
7013 "BAD XGXS ext_phy_config 0x%x\n",
7014 bp
->link_params
.ext_phy_config
);
7018 bp
->port
.phy_addr
= REG_RD(bp
, NIG_REG_XGXS0_CTRL_PHY_ADDR
+
7020 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp
->port
.phy_addr
);
7025 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
7026 bp
->port
.link_config
);
7029 bp
->link_params
.phy_addr
= bp
->port
.phy_addr
;
7031 /* mask what we support according to speed_cap_mask */
7032 if (!(bp
->link_params
.speed_cap_mask
&
7033 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF
))
7034 bp
->port
.supported
&= ~SUPPORTED_10baseT_Half
;
7036 if (!(bp
->link_params
.speed_cap_mask
&
7037 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL
))
7038 bp
->port
.supported
&= ~SUPPORTED_10baseT_Full
;
7040 if (!(bp
->link_params
.speed_cap_mask
&
7041 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF
))
7042 bp
->port
.supported
&= ~SUPPORTED_100baseT_Half
;
7044 if (!(bp
->link_params
.speed_cap_mask
&
7045 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL
))
7046 bp
->port
.supported
&= ~SUPPORTED_100baseT_Full
;
7048 if (!(bp
->link_params
.speed_cap_mask
&
7049 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G
))
7050 bp
->port
.supported
&= ~(SUPPORTED_1000baseT_Half
|
7051 SUPPORTED_1000baseT_Full
);
7053 if (!(bp
->link_params
.speed_cap_mask
&
7054 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G
))
7055 bp
->port
.supported
&= ~SUPPORTED_2500baseX_Full
;
7057 if (!(bp
->link_params
.speed_cap_mask
&
7058 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G
))
7059 bp
->port
.supported
&= ~SUPPORTED_10000baseT_Full
;
7061 BNX2X_DEV_INFO("supported 0x%x\n", bp
->port
.supported
);
7064 static void __devinit
bnx2x_link_settings_requested(struct bnx2x
*bp
)
7066 bp
->link_params
.req_duplex
= DUPLEX_FULL
;
7068 switch (bp
->port
.link_config
& PORT_FEATURE_LINK_SPEED_MASK
) {
7069 case PORT_FEATURE_LINK_SPEED_AUTO
:
7070 if (bp
->port
.supported
& SUPPORTED_Autoneg
) {
7071 bp
->link_params
.req_line_speed
= SPEED_AUTO_NEG
;
7072 bp
->port
.advertising
= bp
->port
.supported
;
7075 XGXS_EXT_PHY_TYPE(bp
->link_params
.ext_phy_config
);
7077 if ((ext_phy_type
==
7078 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705
) ||
7080 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706
)) {
7081 /* force 10G, no AN */
7082 bp
->link_params
.req_line_speed
= SPEED_10000
;
7083 bp
->port
.advertising
=
7084 (ADVERTISED_10000baseT_Full
|
7088 BNX2X_ERR("NVRAM config error. "
7089 "Invalid link_config 0x%x"
7090 " Autoneg not supported\n",
7091 bp
->port
.link_config
);
7096 case PORT_FEATURE_LINK_SPEED_10M_FULL
:
7097 if (bp
->port
.supported
& SUPPORTED_10baseT_Full
) {
7098 bp
->link_params
.req_line_speed
= SPEED_10
;
7099 bp
->port
.advertising
= (ADVERTISED_10baseT_Full
|
7102 BNX2X_ERR("NVRAM config error. "
7103 "Invalid link_config 0x%x"
7104 " speed_cap_mask 0x%x\n",
7105 bp
->port
.link_config
,
7106 bp
->link_params
.speed_cap_mask
);
7111 case PORT_FEATURE_LINK_SPEED_10M_HALF
:
7112 if (bp
->port
.supported
& SUPPORTED_10baseT_Half
) {
7113 bp
->link_params
.req_line_speed
= SPEED_10
;
7114 bp
->link_params
.req_duplex
= DUPLEX_HALF
;
7115 bp
->port
.advertising
= (ADVERTISED_10baseT_Half
|
7118 BNX2X_ERR("NVRAM config error. "
7119 "Invalid link_config 0x%x"
7120 " speed_cap_mask 0x%x\n",
7121 bp
->port
.link_config
,
7122 bp
->link_params
.speed_cap_mask
);
7127 case PORT_FEATURE_LINK_SPEED_100M_FULL
:
7128 if (bp
->port
.supported
& SUPPORTED_100baseT_Full
) {
7129 bp
->link_params
.req_line_speed
= SPEED_100
;
7130 bp
->port
.advertising
= (ADVERTISED_100baseT_Full
|
7133 BNX2X_ERR("NVRAM config error. "
7134 "Invalid link_config 0x%x"
7135 " speed_cap_mask 0x%x\n",
7136 bp
->port
.link_config
,
7137 bp
->link_params
.speed_cap_mask
);
7142 case PORT_FEATURE_LINK_SPEED_100M_HALF
:
7143 if (bp
->port
.supported
& SUPPORTED_100baseT_Half
) {
7144 bp
->link_params
.req_line_speed
= SPEED_100
;
7145 bp
->link_params
.req_duplex
= DUPLEX_HALF
;
7146 bp
->port
.advertising
= (ADVERTISED_100baseT_Half
|
7149 BNX2X_ERR("NVRAM config error. "
7150 "Invalid link_config 0x%x"
7151 " speed_cap_mask 0x%x\n",
7152 bp
->port
.link_config
,
7153 bp
->link_params
.speed_cap_mask
);
7158 case PORT_FEATURE_LINK_SPEED_1G
:
7159 if (bp
->port
.supported
& SUPPORTED_1000baseT_Full
) {
7160 bp
->link_params
.req_line_speed
= SPEED_1000
;
7161 bp
->port
.advertising
= (ADVERTISED_1000baseT_Full
|
7164 BNX2X_ERR("NVRAM config error. "
7165 "Invalid link_config 0x%x"
7166 " speed_cap_mask 0x%x\n",
7167 bp
->port
.link_config
,
7168 bp
->link_params
.speed_cap_mask
);
7173 case PORT_FEATURE_LINK_SPEED_2_5G
:
7174 if (bp
->port
.supported
& SUPPORTED_2500baseX_Full
) {
7175 bp
->link_params
.req_line_speed
= SPEED_2500
;
7176 bp
->port
.advertising
= (ADVERTISED_2500baseX_Full
|
7179 BNX2X_ERR("NVRAM config error. "
7180 "Invalid link_config 0x%x"
7181 " speed_cap_mask 0x%x\n",
7182 bp
->port
.link_config
,
7183 bp
->link_params
.speed_cap_mask
);
7188 case PORT_FEATURE_LINK_SPEED_10G_CX4
:
7189 case PORT_FEATURE_LINK_SPEED_10G_KX4
:
7190 case PORT_FEATURE_LINK_SPEED_10G_KR
:
7191 if (bp
->port
.supported
& SUPPORTED_10000baseT_Full
) {
7192 bp
->link_params
.req_line_speed
= SPEED_10000
;
7193 bp
->port
.advertising
= (ADVERTISED_10000baseT_Full
|
7196 BNX2X_ERR("NVRAM config error. "
7197 "Invalid link_config 0x%x"
7198 " speed_cap_mask 0x%x\n",
7199 bp
->port
.link_config
,
7200 bp
->link_params
.speed_cap_mask
);
7206 BNX2X_ERR("NVRAM config error. "
7207 "BAD link speed link_config 0x%x\n",
7208 bp
->port
.link_config
);
7209 bp
->link_params
.req_line_speed
= SPEED_AUTO_NEG
;
7210 bp
->port
.advertising
= bp
->port
.supported
;
7214 bp
->link_params
.req_flow_ctrl
= (bp
->port
.link_config
&
7215 PORT_FEATURE_FLOW_CONTROL_MASK
);
7216 if ((bp
->link_params
.req_flow_ctrl
== FLOW_CTRL_AUTO
) &&
7217 !(bp
->port
.supported
& SUPPORTED_Autoneg
))
7218 bp
->link_params
.req_flow_ctrl
= FLOW_CTRL_NONE
;
7220 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x"
7221 " advertising 0x%x\n",
7222 bp
->link_params
.req_line_speed
,
7223 bp
->link_params
.req_duplex
,
7224 bp
->link_params
.req_flow_ctrl
, bp
->port
.advertising
);
7227 static void __devinit
bnx2x_get_port_hwinfo(struct bnx2x
*bp
)
7229 int port
= BP_PORT(bp
);
7232 bp
->link_params
.bp
= bp
;
7233 bp
->link_params
.port
= port
;
7235 bp
->link_params
.serdes_config
=
7236 SHMEM_RD(bp
, dev_info
.port_hw_config
[port
].serdes_config
);
7237 bp
->link_params
.lane_config
=
7238 SHMEM_RD(bp
, dev_info
.port_hw_config
[port
].lane_config
);
7239 bp
->link_params
.ext_phy_config
=
7241 dev_info
.port_hw_config
[port
].external_phy_config
);
7242 bp
->link_params
.speed_cap_mask
=
7244 dev_info
.port_hw_config
[port
].speed_capability_mask
);
7246 bp
->port
.link_config
=
7247 SHMEM_RD(bp
, dev_info
.port_feature_config
[port
].link_config
);
7249 BNX2X_DEV_INFO("serdes_config 0x%08x lane_config 0x%08x\n"
7250 KERN_INFO
" ext_phy_config 0x%08x speed_cap_mask 0x%08x"
7251 " link_config 0x%08x\n",
7252 bp
->link_params
.serdes_config
,
7253 bp
->link_params
.lane_config
,
7254 bp
->link_params
.ext_phy_config
,
7255 bp
->link_params
.speed_cap_mask
, bp
->port
.link_config
);
7257 bp
->link_params
.switch_cfg
= (bp
->port
.link_config
&
7258 PORT_FEATURE_CONNECTED_SWITCH_MASK
);
7259 bnx2x_link_settings_supported(bp
, bp
->link_params
.switch_cfg
);
7261 bnx2x_link_settings_requested(bp
);
7263 val2
= SHMEM_RD(bp
, dev_info
.port_hw_config
[port
].mac_upper
);
7264 val
= SHMEM_RD(bp
, dev_info
.port_hw_config
[port
].mac_lower
);
7265 bp
->dev
->dev_addr
[0] = (u8
)(val2
>> 8 & 0xff);
7266 bp
->dev
->dev_addr
[1] = (u8
)(val2
& 0xff);
7267 bp
->dev
->dev_addr
[2] = (u8
)(val
>> 24 & 0xff);
7268 bp
->dev
->dev_addr
[3] = (u8
)(val
>> 16 & 0xff);
7269 bp
->dev
->dev_addr
[4] = (u8
)(val
>> 8 & 0xff);
7270 bp
->dev
->dev_addr
[5] = (u8
)(val
& 0xff);
7271 memcpy(bp
->link_params
.mac_addr
, bp
->dev
->dev_addr
, ETH_ALEN
);
7272 memcpy(bp
->dev
->perm_addr
, bp
->dev
->dev_addr
, ETH_ALEN
);
7275 static int __devinit
bnx2x_get_hwinfo(struct bnx2x
*bp
)
7277 int func
= BP_FUNC(bp
);
7281 bnx2x_get_common_hwinfo(bp
);
7285 if (CHIP_IS_E1H(bp
)) {
7287 SHMEM_RD(bp
, mf_cfg
.func_mf_config
[func
].config
);
7290 (SHMEM_RD(bp
, mf_cfg
.func_mf_config
[func
].e1hov_tag
) &
7291 FUNC_MF_CFG_E1HOV_TAG_MASK
);
7292 if (val
!= FUNC_MF_CFG_E1HOV_TAG_DEFAULT
) {
7296 BNX2X_DEV_INFO("MF mode E1HOV for func %d is %d "
7298 func
, bp
->e1hov
, bp
->e1hov
);
7300 BNX2X_DEV_INFO("Single function mode\n");
7302 BNX2X_ERR("!!! No valid E1HOV for func %d,"
7303 " aborting\n", func
);
7309 if (!BP_NOMCP(bp
)) {
7310 bnx2x_get_port_hwinfo(bp
);
7312 bp
->fw_seq
= (SHMEM_RD(bp
, func_mb
[func
].drv_mb_header
) &
7313 DRV_MSG_SEQ_NUMBER_MASK
);
7314 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp
->fw_seq
);
7318 val2
= SHMEM_RD(bp
, mf_cfg
.func_mf_config
[func
].mac_upper
);
7319 val
= SHMEM_RD(bp
, mf_cfg
.func_mf_config
[func
].mac_lower
);
7320 if ((val2
!= FUNC_MF_CFG_UPPERMAC_DEFAULT
) &&
7321 (val
!= FUNC_MF_CFG_LOWERMAC_DEFAULT
)) {
7322 bp
->dev
->dev_addr
[0] = (u8
)(val2
>> 8 & 0xff);
7323 bp
->dev
->dev_addr
[1] = (u8
)(val2
& 0xff);
7324 bp
->dev
->dev_addr
[2] = (u8
)(val
>> 24 & 0xff);
7325 bp
->dev
->dev_addr
[3] = (u8
)(val
>> 16 & 0xff);
7326 bp
->dev
->dev_addr
[4] = (u8
)(val
>> 8 & 0xff);
7327 bp
->dev
->dev_addr
[5] = (u8
)(val
& 0xff);
7328 memcpy(bp
->link_params
.mac_addr
, bp
->dev
->dev_addr
,
7330 memcpy(bp
->dev
->perm_addr
, bp
->dev
->dev_addr
,
7338 /* only supposed to happen on emulation/FPGA */
7339 BNX2X_ERR("warning rendom MAC workaround active\n");
7340 random_ether_addr(bp
->dev
->dev_addr
);
7341 memcpy(bp
->dev
->perm_addr
, bp
->dev
->dev_addr
, ETH_ALEN
);
7347 static int __devinit
bnx2x_init_bp(struct bnx2x
*bp
)
7349 int func
= BP_FUNC(bp
);
7352 mutex_init(&bp
->port
.phy_mutex
);
7354 INIT_WORK(&bp
->sp_task
, bnx2x_sp_task
);
7355 INIT_WORK(&bp
->reset_task
, bnx2x_reset_task
);
7357 rc
= bnx2x_get_hwinfo(bp
);
7359 /* need to reset chip if undi was active */
7361 bnx2x_undi_unload(bp
);
7363 if (CHIP_REV_IS_FPGA(bp
))
7364 printk(KERN_ERR PFX
"FPGA detected\n");
7366 if (BP_NOMCP(bp
) && (func
== 0))
7368 "MCP disabled, must load devices in order!\n");
7372 bp
->flags
&= ~TPA_ENABLE_FLAG
;
7373 bp
->dev
->features
&= ~NETIF_F_LRO
;
7375 bp
->flags
|= TPA_ENABLE_FLAG
;
7376 bp
->dev
->features
|= NETIF_F_LRO
;
7380 bp
->tx_ring_size
= MAX_TX_AVAIL
;
7381 bp
->rx_ring_size
= MAX_RX_AVAIL
;
7389 bp
->stats_ticks
= 1000000 & 0xffff00;
7391 bp
->timer_interval
= (CHIP_REV_IS_SLOW(bp
) ? 5*HZ
: HZ
);
7392 bp
->current_interval
= (poll
? poll
: bp
->timer_interval
);
7394 init_timer(&bp
->timer
);
7395 bp
->timer
.expires
= jiffies
+ bp
->current_interval
;
7396 bp
->timer
.data
= (unsigned long) bp
;
7397 bp
->timer
.function
= bnx2x_timer
;
7403 * ethtool service functions
7406 /* All ethtool functions called with rtnl_lock */
7408 static int bnx2x_get_settings(struct net_device
*dev
, struct ethtool_cmd
*cmd
)
7410 struct bnx2x
*bp
= netdev_priv(dev
);
7412 cmd
->supported
= bp
->port
.supported
;
7413 cmd
->advertising
= bp
->port
.advertising
;
7415 if (netif_carrier_ok(dev
)) {
7416 cmd
->speed
= bp
->link_vars
.line_speed
;
7417 cmd
->duplex
= bp
->link_vars
.duplex
;
7419 cmd
->speed
= bp
->link_params
.req_line_speed
;
7420 cmd
->duplex
= bp
->link_params
.req_duplex
;
7425 vn_max_rate
= ((bp
->mf_config
& FUNC_MF_CFG_MAX_BW_MASK
) >>
7426 FUNC_MF_CFG_MAX_BW_SHIFT
) * 100;
7427 if (vn_max_rate
< cmd
->speed
)
7428 cmd
->speed
= vn_max_rate
;
7431 if (bp
->link_params
.switch_cfg
== SWITCH_CFG_10G
) {
7433 XGXS_EXT_PHY_TYPE(bp
->link_params
.ext_phy_config
);
7435 switch (ext_phy_type
) {
7436 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT
:
7437 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705
:
7438 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706
:
7439 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072
:
7440 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073
:
7441 cmd
->port
= PORT_FIBRE
;
7444 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101
:
7445 cmd
->port
= PORT_TP
;
7448 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE
:
7449 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
7450 bp
->link_params
.ext_phy_config
);
7454 DP(NETIF_MSG_LINK
, "BAD XGXS ext_phy_config 0x%x\n",
7455 bp
->link_params
.ext_phy_config
);
7459 cmd
->port
= PORT_TP
;
7461 cmd
->phy_address
= bp
->port
.phy_addr
;
7462 cmd
->transceiver
= XCVR_INTERNAL
;
7464 if (bp
->link_params
.req_line_speed
== SPEED_AUTO_NEG
)
7465 cmd
->autoneg
= AUTONEG_ENABLE
;
7467 cmd
->autoneg
= AUTONEG_DISABLE
;
7472 DP(NETIF_MSG_LINK
, "ethtool_cmd: cmd %d\n"
7473 DP_LEVEL
" supported 0x%x advertising 0x%x speed %d\n"
7474 DP_LEVEL
" duplex %d port %d phy_address %d transceiver %d\n"
7475 DP_LEVEL
" autoneg %d maxtxpkt %d maxrxpkt %d\n",
7476 cmd
->cmd
, cmd
->supported
, cmd
->advertising
, cmd
->speed
,
7477 cmd
->duplex
, cmd
->port
, cmd
->phy_address
, cmd
->transceiver
,
7478 cmd
->autoneg
, cmd
->maxtxpkt
, cmd
->maxrxpkt
);
7483 static int bnx2x_set_settings(struct net_device
*dev
, struct ethtool_cmd
*cmd
)
7485 struct bnx2x
*bp
= netdev_priv(dev
);
7491 DP(NETIF_MSG_LINK
, "ethtool_cmd: cmd %d\n"
7492 DP_LEVEL
" supported 0x%x advertising 0x%x speed %d\n"
7493 DP_LEVEL
" duplex %d port %d phy_address %d transceiver %d\n"
7494 DP_LEVEL
" autoneg %d maxtxpkt %d maxrxpkt %d\n",
7495 cmd
->cmd
, cmd
->supported
, cmd
->advertising
, cmd
->speed
,
7496 cmd
->duplex
, cmd
->port
, cmd
->phy_address
, cmd
->transceiver
,
7497 cmd
->autoneg
, cmd
->maxtxpkt
, cmd
->maxrxpkt
);
7499 if (cmd
->autoneg
== AUTONEG_ENABLE
) {
7500 if (!(bp
->port
.supported
& SUPPORTED_Autoneg
)) {
7501 DP(NETIF_MSG_LINK
, "Autoneg not supported\n");
7505 /* advertise the requested speed and duplex if supported */
7506 cmd
->advertising
&= bp
->port
.supported
;
7508 bp
->link_params
.req_line_speed
= SPEED_AUTO_NEG
;
7509 bp
->link_params
.req_duplex
= DUPLEX_FULL
;
7510 bp
->port
.advertising
|= (ADVERTISED_Autoneg
|
7513 } else { /* forced speed */
7514 /* advertise the requested speed and duplex if supported */
7515 switch (cmd
->speed
) {
7517 if (cmd
->duplex
== DUPLEX_FULL
) {
7518 if (!(bp
->port
.supported
&
7519 SUPPORTED_10baseT_Full
)) {
7521 "10M full not supported\n");
7525 advertising
= (ADVERTISED_10baseT_Full
|
7528 if (!(bp
->port
.supported
&
7529 SUPPORTED_10baseT_Half
)) {
7531 "10M half not supported\n");
7535 advertising
= (ADVERTISED_10baseT_Half
|
7541 if (cmd
->duplex
== DUPLEX_FULL
) {
7542 if (!(bp
->port
.supported
&
7543 SUPPORTED_100baseT_Full
)) {
7545 "100M full not supported\n");
7549 advertising
= (ADVERTISED_100baseT_Full
|
7552 if (!(bp
->port
.supported
&
7553 SUPPORTED_100baseT_Half
)) {
7555 "100M half not supported\n");
7559 advertising
= (ADVERTISED_100baseT_Half
|
7565 if (cmd
->duplex
!= DUPLEX_FULL
) {
7566 DP(NETIF_MSG_LINK
, "1G half not supported\n");
7570 if (!(bp
->port
.supported
& SUPPORTED_1000baseT_Full
)) {
7571 DP(NETIF_MSG_LINK
, "1G full not supported\n");
7575 advertising
= (ADVERTISED_1000baseT_Full
|
7580 if (cmd
->duplex
!= DUPLEX_FULL
) {
7582 "2.5G half not supported\n");
7586 if (!(bp
->port
.supported
& SUPPORTED_2500baseX_Full
)) {
7588 "2.5G full not supported\n");
7592 advertising
= (ADVERTISED_2500baseX_Full
|
7597 if (cmd
->duplex
!= DUPLEX_FULL
) {
7598 DP(NETIF_MSG_LINK
, "10G half not supported\n");
7602 if (!(bp
->port
.supported
& SUPPORTED_10000baseT_Full
)) {
7603 DP(NETIF_MSG_LINK
, "10G full not supported\n");
7607 advertising
= (ADVERTISED_10000baseT_Full
|
7612 DP(NETIF_MSG_LINK
, "Unsupported speed\n");
7616 bp
->link_params
.req_line_speed
= cmd
->speed
;
7617 bp
->link_params
.req_duplex
= cmd
->duplex
;
7618 bp
->port
.advertising
= advertising
;
7621 DP(NETIF_MSG_LINK
, "req_line_speed %d\n"
7622 DP_LEVEL
" req_duplex %d advertising 0x%x\n",
7623 bp
->link_params
.req_line_speed
, bp
->link_params
.req_duplex
,
7624 bp
->port
.advertising
);
7626 if (netif_running(dev
)) {
7627 bnx2x_stats_handle(bp
, STATS_EVENT_STOP
);
7634 #define PHY_FW_VER_LEN 10
7636 static void bnx2x_get_drvinfo(struct net_device
*dev
,
7637 struct ethtool_drvinfo
*info
)
7639 struct bnx2x
*bp
= netdev_priv(dev
);
7640 char phy_fw_ver
[PHY_FW_VER_LEN
];
7642 strcpy(info
->driver
, DRV_MODULE_NAME
);
7643 strcpy(info
->version
, DRV_MODULE_VERSION
);
7645 phy_fw_ver
[0] = '\0';
7647 bnx2x_phy_hw_lock(bp
);
7648 bnx2x_get_ext_phy_fw_version(&bp
->link_params
,
7649 (bp
->state
!= BNX2X_STATE_CLOSED
),
7650 phy_fw_ver
, PHY_FW_VER_LEN
);
7651 bnx2x_phy_hw_unlock(bp
);
7654 snprintf(info
->fw_version
, 32, "%d.%d.%d:%d BC:%x%s%s",
7655 BCM_5710_FW_MAJOR_VERSION
, BCM_5710_FW_MINOR_VERSION
,
7656 BCM_5710_FW_REVISION_VERSION
,
7657 BCM_5710_FW_COMPILE_FLAGS
, bp
->common
.bc_ver
,
7658 ((phy_fw_ver
[0] != '\0')? " PHY:":""), phy_fw_ver
);
7659 strcpy(info
->bus_info
, pci_name(bp
->pdev
));
7660 info
->n_stats
= BNX2X_NUM_STATS
;
7661 info
->testinfo_len
= BNX2X_NUM_TESTS
;
7662 info
->eedump_len
= bp
->common
.flash_size
;
7663 info
->regdump_len
= 0;
7666 static void bnx2x_get_wol(struct net_device
*dev
, struct ethtool_wolinfo
*wol
)
7668 struct bnx2x
*bp
= netdev_priv(dev
);
7670 if (bp
->flags
& NO_WOL_FLAG
) {
7674 wol
->supported
= WAKE_MAGIC
;
7676 wol
->wolopts
= WAKE_MAGIC
;
7680 memset(&wol
->sopass
, 0, sizeof(wol
->sopass
));
7683 static int bnx2x_set_wol(struct net_device
*dev
, struct ethtool_wolinfo
*wol
)
7685 struct bnx2x
*bp
= netdev_priv(dev
);
7687 if (wol
->wolopts
& ~WAKE_MAGIC
)
7690 if (wol
->wolopts
& WAKE_MAGIC
) {
7691 if (bp
->flags
& NO_WOL_FLAG
)
7701 static u32
bnx2x_get_msglevel(struct net_device
*dev
)
7703 struct bnx2x
*bp
= netdev_priv(dev
);
7705 return bp
->msglevel
;
7708 static void bnx2x_set_msglevel(struct net_device
*dev
, u32 level
)
7710 struct bnx2x
*bp
= netdev_priv(dev
);
7712 if (capable(CAP_NET_ADMIN
))
7713 bp
->msglevel
= level
;
7716 static int bnx2x_nway_reset(struct net_device
*dev
)
7718 struct bnx2x
*bp
= netdev_priv(dev
);
7723 if (netif_running(dev
)) {
7724 bnx2x_stats_handle(bp
, STATS_EVENT_STOP
);
7731 static int bnx2x_get_eeprom_len(struct net_device
*dev
)
7733 struct bnx2x
*bp
= netdev_priv(dev
);
7735 return bp
->common
.flash_size
;
7738 static int bnx2x_acquire_nvram_lock(struct bnx2x
*bp
)
7740 int port
= BP_PORT(bp
);
7744 /* adjust timeout for emulation/FPGA */
7745 count
= NVRAM_TIMEOUT_COUNT
;
7746 if (CHIP_REV_IS_SLOW(bp
))
7749 /* request access to nvram interface */
7750 REG_WR(bp
, MCP_REG_MCPR_NVM_SW_ARB
,
7751 (MCPR_NVM_SW_ARB_ARB_REQ_SET1
<< port
));
7753 for (i
= 0; i
< count
*10; i
++) {
7754 val
= REG_RD(bp
, MCP_REG_MCPR_NVM_SW_ARB
);
7755 if (val
& (MCPR_NVM_SW_ARB_ARB_ARB1
<< port
))
7761 if (!(val
& (MCPR_NVM_SW_ARB_ARB_ARB1
<< port
))) {
7762 DP(BNX2X_MSG_NVM
, "cannot get access to nvram interface\n");
7769 static int bnx2x_release_nvram_lock(struct bnx2x
*bp
)
7771 int port
= BP_PORT(bp
);
7775 /* adjust timeout for emulation/FPGA */
7776 count
= NVRAM_TIMEOUT_COUNT
;
7777 if (CHIP_REV_IS_SLOW(bp
))
7780 /* relinquish nvram interface */
7781 REG_WR(bp
, MCP_REG_MCPR_NVM_SW_ARB
,
7782 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1
<< port
));
7784 for (i
= 0; i
< count
*10; i
++) {
7785 val
= REG_RD(bp
, MCP_REG_MCPR_NVM_SW_ARB
);
7786 if (!(val
& (MCPR_NVM_SW_ARB_ARB_ARB1
<< port
)))
7792 if (val
& (MCPR_NVM_SW_ARB_ARB_ARB1
<< port
)) {
7793 DP(BNX2X_MSG_NVM
, "cannot free access to nvram interface\n");
7800 static void bnx2x_enable_nvram_access(struct bnx2x
*bp
)
7804 val
= REG_RD(bp
, MCP_REG_MCPR_NVM_ACCESS_ENABLE
);
7806 /* enable both bits, even on read */
7807 REG_WR(bp
, MCP_REG_MCPR_NVM_ACCESS_ENABLE
,
7808 (val
| MCPR_NVM_ACCESS_ENABLE_EN
|
7809 MCPR_NVM_ACCESS_ENABLE_WR_EN
));
7812 static void bnx2x_disable_nvram_access(struct bnx2x
*bp
)
7816 val
= REG_RD(bp
, MCP_REG_MCPR_NVM_ACCESS_ENABLE
);
7818 /* disable both bits, even after read */
7819 REG_WR(bp
, MCP_REG_MCPR_NVM_ACCESS_ENABLE
,
7820 (val
& ~(MCPR_NVM_ACCESS_ENABLE_EN
|
7821 MCPR_NVM_ACCESS_ENABLE_WR_EN
)));
7824 static int bnx2x_nvram_read_dword(struct bnx2x
*bp
, u32 offset
, u32
*ret_val
,
7830 /* build the command word */
7831 cmd_flags
|= MCPR_NVM_COMMAND_DOIT
;
7833 /* need to clear DONE bit separately */
7834 REG_WR(bp
, MCP_REG_MCPR_NVM_COMMAND
, MCPR_NVM_COMMAND_DONE
);
7836 /* address of the NVRAM to read from */
7837 REG_WR(bp
, MCP_REG_MCPR_NVM_ADDR
,
7838 (offset
& MCPR_NVM_ADDR_NVM_ADDR_VALUE
));
7840 /* issue a read command */
7841 REG_WR(bp
, MCP_REG_MCPR_NVM_COMMAND
, cmd_flags
);
7843 /* adjust timeout for emulation/FPGA */
7844 count
= NVRAM_TIMEOUT_COUNT
;
7845 if (CHIP_REV_IS_SLOW(bp
))
7848 /* wait for completion */
7851 for (i
= 0; i
< count
; i
++) {
7853 val
= REG_RD(bp
, MCP_REG_MCPR_NVM_COMMAND
);
7855 if (val
& MCPR_NVM_COMMAND_DONE
) {
7856 val
= REG_RD(bp
, MCP_REG_MCPR_NVM_READ
);
7857 /* we read nvram data in cpu order
7858 * but ethtool sees it as an array of bytes
7859 * converting to big-endian will do the work */
7860 val
= cpu_to_be32(val
);
7870 static int bnx2x_nvram_read(struct bnx2x
*bp
, u32 offset
, u8
*ret_buf
,
7877 if ((offset
& 0x03) || (buf_size
& 0x03) || (buf_size
== 0)) {
7879 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
7884 if (offset
+ buf_size
> bp
->common
.flash_size
) {
7885 DP(BNX2X_MSG_NVM
, "Invalid parameter: offset (0x%x) +"
7886 " buf_size (0x%x) > flash_size (0x%x)\n",
7887 offset
, buf_size
, bp
->common
.flash_size
);
7891 /* request access to nvram interface */
7892 rc
= bnx2x_acquire_nvram_lock(bp
);
7896 /* enable access to nvram interface */
7897 bnx2x_enable_nvram_access(bp
);
7899 /* read the first word(s) */
7900 cmd_flags
= MCPR_NVM_COMMAND_FIRST
;
7901 while ((buf_size
> sizeof(u32
)) && (rc
== 0)) {
7902 rc
= bnx2x_nvram_read_dword(bp
, offset
, &val
, cmd_flags
);
7903 memcpy(ret_buf
, &val
, 4);
7905 /* advance to the next dword */
7906 offset
+= sizeof(u32
);
7907 ret_buf
+= sizeof(u32
);
7908 buf_size
-= sizeof(u32
);
7913 cmd_flags
|= MCPR_NVM_COMMAND_LAST
;
7914 rc
= bnx2x_nvram_read_dword(bp
, offset
, &val
, cmd_flags
);
7915 memcpy(ret_buf
, &val
, 4);
7918 /* disable access to nvram interface */
7919 bnx2x_disable_nvram_access(bp
);
7920 bnx2x_release_nvram_lock(bp
);
7925 static int bnx2x_get_eeprom(struct net_device
*dev
,
7926 struct ethtool_eeprom
*eeprom
, u8
*eebuf
)
7928 struct bnx2x
*bp
= netdev_priv(dev
);
7931 DP(BNX2X_MSG_NVM
, "ethtool_eeprom: cmd %d\n"
7932 DP_LEVEL
" magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
7933 eeprom
->cmd
, eeprom
->magic
, eeprom
->offset
, eeprom
->offset
,
7934 eeprom
->len
, eeprom
->len
);
7936 /* parameters already validated in ethtool_get_eeprom */
7938 rc
= bnx2x_nvram_read(bp
, eeprom
->offset
, eebuf
, eeprom
->len
);
7943 static int bnx2x_nvram_write_dword(struct bnx2x
*bp
, u32 offset
, u32 val
,
7948 /* build the command word */
7949 cmd_flags
|= MCPR_NVM_COMMAND_DOIT
| MCPR_NVM_COMMAND_WR
;
7951 /* need to clear DONE bit separately */
7952 REG_WR(bp
, MCP_REG_MCPR_NVM_COMMAND
, MCPR_NVM_COMMAND_DONE
);
7954 /* write the data */
7955 REG_WR(bp
, MCP_REG_MCPR_NVM_WRITE
, val
);
7957 /* address of the NVRAM to write to */
7958 REG_WR(bp
, MCP_REG_MCPR_NVM_ADDR
,
7959 (offset
& MCPR_NVM_ADDR_NVM_ADDR_VALUE
));
7961 /* issue the write command */
7962 REG_WR(bp
, MCP_REG_MCPR_NVM_COMMAND
, cmd_flags
);
7964 /* adjust timeout for emulation/FPGA */
7965 count
= NVRAM_TIMEOUT_COUNT
;
7966 if (CHIP_REV_IS_SLOW(bp
))
7969 /* wait for completion */
7971 for (i
= 0; i
< count
; i
++) {
7973 val
= REG_RD(bp
, MCP_REG_MCPR_NVM_COMMAND
);
7974 if (val
& MCPR_NVM_COMMAND_DONE
) {
7983 #define BYTE_OFFSET(offset) (8 * (offset & 0x03))
7985 static int bnx2x_nvram_write1(struct bnx2x
*bp
, u32 offset
, u8
*data_buf
,
7993 if (offset
+ buf_size
> bp
->common
.flash_size
) {
7994 DP(BNX2X_MSG_NVM
, "Invalid parameter: offset (0x%x) +"
7995 " buf_size (0x%x) > flash_size (0x%x)\n",
7996 offset
, buf_size
, bp
->common
.flash_size
);
8000 /* request access to nvram interface */
8001 rc
= bnx2x_acquire_nvram_lock(bp
);
8005 /* enable access to nvram interface */
8006 bnx2x_enable_nvram_access(bp
);
8008 cmd_flags
= (MCPR_NVM_COMMAND_FIRST
| MCPR_NVM_COMMAND_LAST
);
8009 align_offset
= (offset
& ~0x03);
8010 rc
= bnx2x_nvram_read_dword(bp
, align_offset
, &val
, cmd_flags
);
8013 val
&= ~(0xff << BYTE_OFFSET(offset
));
8014 val
|= (*data_buf
<< BYTE_OFFSET(offset
));
8016 /* nvram data is returned as an array of bytes
8017 * convert it back to cpu order */
8018 val
= be32_to_cpu(val
);
8020 rc
= bnx2x_nvram_write_dword(bp
, align_offset
, val
,
8024 /* disable access to nvram interface */
8025 bnx2x_disable_nvram_access(bp
);
8026 bnx2x_release_nvram_lock(bp
);
8031 static int bnx2x_nvram_write(struct bnx2x
*bp
, u32 offset
, u8
*data_buf
,
8039 if (buf_size
== 1) /* ethtool */
8040 return bnx2x_nvram_write1(bp
, offset
, data_buf
, buf_size
);
8042 if ((offset
& 0x03) || (buf_size
& 0x03) || (buf_size
== 0)) {
8044 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
8049 if (offset
+ buf_size
> bp
->common
.flash_size
) {
8050 DP(BNX2X_MSG_NVM
, "Invalid parameter: offset (0x%x) +"
8051 " buf_size (0x%x) > flash_size (0x%x)\n",
8052 offset
, buf_size
, bp
->common
.flash_size
);
8056 /* request access to nvram interface */
8057 rc
= bnx2x_acquire_nvram_lock(bp
);
8061 /* enable access to nvram interface */
8062 bnx2x_enable_nvram_access(bp
);
8065 cmd_flags
= MCPR_NVM_COMMAND_FIRST
;
8066 while ((written_so_far
< buf_size
) && (rc
== 0)) {
8067 if (written_so_far
== (buf_size
- sizeof(u32
)))
8068 cmd_flags
|= MCPR_NVM_COMMAND_LAST
;
8069 else if (((offset
+ 4) % NVRAM_PAGE_SIZE
) == 0)
8070 cmd_flags
|= MCPR_NVM_COMMAND_LAST
;
8071 else if ((offset
% NVRAM_PAGE_SIZE
) == 0)
8072 cmd_flags
|= MCPR_NVM_COMMAND_FIRST
;
8074 memcpy(&val
, data_buf
, 4);
8076 rc
= bnx2x_nvram_write_dword(bp
, offset
, val
, cmd_flags
);
8078 /* advance to the next dword */
8079 offset
+= sizeof(u32
);
8080 data_buf
+= sizeof(u32
);
8081 written_so_far
+= sizeof(u32
);
8085 /* disable access to nvram interface */
8086 bnx2x_disable_nvram_access(bp
);
8087 bnx2x_release_nvram_lock(bp
);
8092 static int bnx2x_set_eeprom(struct net_device
*dev
,
8093 struct ethtool_eeprom
*eeprom
, u8
*eebuf
)
8095 struct bnx2x
*bp
= netdev_priv(dev
);
8098 DP(BNX2X_MSG_NVM
, "ethtool_eeprom: cmd %d\n"
8099 DP_LEVEL
" magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
8100 eeprom
->cmd
, eeprom
->magic
, eeprom
->offset
, eeprom
->offset
,
8101 eeprom
->len
, eeprom
->len
);
8103 /* parameters already validated in ethtool_set_eeprom */
8105 /* If the magic number is PHY (0x00504859) upgrade the PHY FW */
8106 if (eeprom
->magic
== 0x00504859)
8109 bnx2x_phy_hw_lock(bp
);
8110 rc
= bnx2x_flash_download(bp
, BP_PORT(bp
),
8111 bp
->link_params
.ext_phy_config
,
8112 (bp
->state
!= BNX2X_STATE_CLOSED
),
8113 eebuf
, eeprom
->len
);
8114 if ((bp
->state
== BNX2X_STATE_OPEN
) ||
8115 (bp
->state
== BNX2X_STATE_DISABLED
)) {
8116 rc
|= bnx2x_link_reset(&bp
->link_params
,
8118 rc
|= bnx2x_phy_init(&bp
->link_params
,
8121 bnx2x_phy_hw_unlock(bp
);
8123 } else /* Only the PMF can access the PHY */
8126 rc
= bnx2x_nvram_write(bp
, eeprom
->offset
, eebuf
, eeprom
->len
);
8131 static int bnx2x_get_coalesce(struct net_device
*dev
,
8132 struct ethtool_coalesce
*coal
)
8134 struct bnx2x
*bp
= netdev_priv(dev
);
8136 memset(coal
, 0, sizeof(struct ethtool_coalesce
));
8138 coal
->rx_coalesce_usecs
= bp
->rx_ticks
;
8139 coal
->tx_coalesce_usecs
= bp
->tx_ticks
;
8140 coal
->stats_block_coalesce_usecs
= bp
->stats_ticks
;
8145 static int bnx2x_set_coalesce(struct net_device
*dev
,
8146 struct ethtool_coalesce
*coal
)
8148 struct bnx2x
*bp
= netdev_priv(dev
);
8150 bp
->rx_ticks
= (u16
) coal
->rx_coalesce_usecs
;
8151 if (bp
->rx_ticks
> 3000)
8152 bp
->rx_ticks
= 3000;
8154 bp
->tx_ticks
= (u16
) coal
->tx_coalesce_usecs
;
8155 if (bp
->tx_ticks
> 0x3000)
8156 bp
->tx_ticks
= 0x3000;
8158 bp
->stats_ticks
= coal
->stats_block_coalesce_usecs
;
8159 if (bp
->stats_ticks
> 0xffff00)
8160 bp
->stats_ticks
= 0xffff00;
8161 bp
->stats_ticks
&= 0xffff00;
8163 if (netif_running(dev
))
8164 bnx2x_update_coalesce(bp
);
8169 static int bnx2x_set_flags(struct net_device
*dev
, u32 data
)
8171 struct bnx2x
*bp
= netdev_priv(dev
);
8175 if (data
& ETH_FLAG_LRO
) {
8176 if (!(dev
->features
& NETIF_F_LRO
)) {
8177 dev
->features
|= NETIF_F_LRO
;
8178 bp
->flags
|= TPA_ENABLE_FLAG
;
8182 } else if (dev
->features
& NETIF_F_LRO
) {
8183 dev
->features
&= ~NETIF_F_LRO
;
8184 bp
->flags
&= ~TPA_ENABLE_FLAG
;
8188 if (changed
&& netif_running(dev
)) {
8189 bnx2x_nic_unload(bp
, UNLOAD_NORMAL
);
8190 rc
= bnx2x_nic_load(bp
, LOAD_NORMAL
);
8196 static void bnx2x_get_ringparam(struct net_device
*dev
,
8197 struct ethtool_ringparam
*ering
)
8199 struct bnx2x
*bp
= netdev_priv(dev
);
8201 ering
->rx_max_pending
= MAX_RX_AVAIL
;
8202 ering
->rx_mini_max_pending
= 0;
8203 ering
->rx_jumbo_max_pending
= 0;
8205 ering
->rx_pending
= bp
->rx_ring_size
;
8206 ering
->rx_mini_pending
= 0;
8207 ering
->rx_jumbo_pending
= 0;
8209 ering
->tx_max_pending
= MAX_TX_AVAIL
;
8210 ering
->tx_pending
= bp
->tx_ring_size
;
8213 static int bnx2x_set_ringparam(struct net_device
*dev
,
8214 struct ethtool_ringparam
*ering
)
8216 struct bnx2x
*bp
= netdev_priv(dev
);
8219 if ((ering
->rx_pending
> MAX_RX_AVAIL
) ||
8220 (ering
->tx_pending
> MAX_TX_AVAIL
) ||
8221 (ering
->tx_pending
<= MAX_SKB_FRAGS
+ 4))
8224 bp
->rx_ring_size
= ering
->rx_pending
;
8225 bp
->tx_ring_size
= ering
->tx_pending
;
8227 if (netif_running(dev
)) {
8228 bnx2x_nic_unload(bp
, UNLOAD_NORMAL
);
8229 rc
= bnx2x_nic_load(bp
, LOAD_NORMAL
);
8235 static void bnx2x_get_pauseparam(struct net_device
*dev
,
8236 struct ethtool_pauseparam
*epause
)
8238 struct bnx2x
*bp
= netdev_priv(dev
);
8240 epause
->autoneg
= (bp
->link_params
.req_flow_ctrl
== FLOW_CTRL_AUTO
) &&
8241 (bp
->link_params
.req_line_speed
== SPEED_AUTO_NEG
);
8243 epause
->rx_pause
= ((bp
->link_vars
.flow_ctrl
& FLOW_CTRL_RX
) ==
8245 epause
->tx_pause
= ((bp
->link_vars
.flow_ctrl
& FLOW_CTRL_TX
) ==
8248 DP(NETIF_MSG_LINK
, "ethtool_pauseparam: cmd %d\n"
8249 DP_LEVEL
" autoneg %d rx_pause %d tx_pause %d\n",
8250 epause
->cmd
, epause
->autoneg
, epause
->rx_pause
, epause
->tx_pause
);
8253 static int bnx2x_set_pauseparam(struct net_device
*dev
,
8254 struct ethtool_pauseparam
*epause
)
8256 struct bnx2x
*bp
= netdev_priv(dev
);
8261 DP(NETIF_MSG_LINK
, "ethtool_pauseparam: cmd %d\n"
8262 DP_LEVEL
" autoneg %d rx_pause %d tx_pause %d\n",
8263 epause
->cmd
, epause
->autoneg
, epause
->rx_pause
, epause
->tx_pause
);
8265 bp
->link_params
.req_flow_ctrl
= FLOW_CTRL_AUTO
;
8267 if (epause
->rx_pause
)
8268 bp
->link_params
.req_flow_ctrl
|= FLOW_CTRL_RX
;
8270 if (epause
->tx_pause
)
8271 bp
->link_params
.req_flow_ctrl
|= FLOW_CTRL_TX
;
8273 if (bp
->link_params
.req_flow_ctrl
== FLOW_CTRL_AUTO
)
8274 bp
->link_params
.req_flow_ctrl
= FLOW_CTRL_NONE
;
8276 if (epause
->autoneg
) {
8277 if (!(bp
->port
.supported
& SUPPORTED_Autoneg
)) {
8278 DP(NETIF_MSG_LINK
, "Autoneg not supported\n");
8282 if (bp
->link_params
.req_line_speed
== SPEED_AUTO_NEG
)
8283 bp
->link_params
.req_flow_ctrl
= FLOW_CTRL_AUTO
;
8287 "req_flow_ctrl 0x%x\n", bp
->link_params
.req_flow_ctrl
);
8289 if (netif_running(dev
)) {
8290 bnx2x_stats_handle(bp
, STATS_EVENT_STOP
);
8297 static u32
bnx2x_get_rx_csum(struct net_device
*dev
)
8299 struct bnx2x
*bp
= netdev_priv(dev
);
8304 static int bnx2x_set_rx_csum(struct net_device
*dev
, u32 data
)
8306 struct bnx2x
*bp
= netdev_priv(dev
);
8312 static int bnx2x_set_tso(struct net_device
*dev
, u32 data
)
8315 dev
->features
|= (NETIF_F_TSO
| NETIF_F_TSO_ECN
);
8316 dev
->features
|= NETIF_F_TSO6
;
8318 dev
->features
&= ~(NETIF_F_TSO
| NETIF_F_TSO_ECN
);
8319 dev
->features
&= ~NETIF_F_TSO6
;
8325 static const struct {
8326 char string
[ETH_GSTRING_LEN
];
8327 } bnx2x_tests_str_arr
[BNX2X_NUM_TESTS
] = {
8328 { "register_test (offline)" },
8329 { "memory_test (offline)" },
8330 { "loopback_test (offline)" },
8331 { "nvram_test (online)" },
8332 { "interrupt_test (online)" },
8333 { "link_test (online)" },
8334 { "idle check (online)" },
8335 { "MC errors (online)" }
8338 static int bnx2x_self_test_count(struct net_device
*dev
)
8340 return BNX2X_NUM_TESTS
;
8343 static int bnx2x_test_registers(struct bnx2x
*bp
)
8345 int idx
, i
, rc
= -ENODEV
;
8347 static const struct {
8352 /* 0 */ { BRB1_REG_PAUSE_LOW_THRESHOLD_0
, 4, 0x000003ff },
8353 { DORQ_REG_DB_ADDR0
, 4, 0xffffffff },
8354 { HC_REG_AGG_INT_0
, 4, 0x000003ff },
8355 { PBF_REG_MAC_IF0_ENABLE
, 4, 0x00000001 },
8356 { PBF_REG_P0_INIT_CRD
, 4, 0x000007ff },
8357 { PRS_REG_CID_PORT_0
, 4, 0x00ffffff },
8358 { PXP2_REG_PSWRQ_CDU0_L2P
, 4, 0x000fffff },
8359 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR
, 8, 0x0003ffff },
8360 { PXP2_REG_PSWRQ_TM0_L2P
, 4, 0x000fffff },
8361 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR
, 8, 0x0003ffff },
8362 /* 10 */ { PXP2_REG_PSWRQ_TSDM0_L2P
, 4, 0x000fffff },
8363 { QM_REG_CONNNUM_0
, 4, 0x000fffff },
8364 { TM_REG_LIN0_MAX_ACTIVE_CID
, 4, 0x0003ffff },
8365 { SRC_REG_KEYRSS0_0
, 40, 0xffffffff },
8366 { SRC_REG_KEYRSS0_7
, 40, 0xffffffff },
8367 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00
, 4, 0x00000001 },
8368 { XCM_REG_WU_DA_CNT_CMD00
, 4, 0x00000003 },
8369 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0
, 4, 0x000000ff },
8370 { NIG_REG_EGRESS_MNG0_FIFO
, 20, 0xffffffff },
8371 { NIG_REG_LLH0_T_BIT
, 4, 0x00000001 },
8372 /* 20 */ { NIG_REG_EMAC0_IN_EN
, 4, 0x00000001 },
8373 { NIG_REG_BMAC0_IN_EN
, 4, 0x00000001 },
8374 { NIG_REG_XCM0_OUT_EN
, 4, 0x00000001 },
8375 { NIG_REG_BRB0_OUT_EN
, 4, 0x00000001 },
8376 { NIG_REG_LLH0_XCM_MASK
, 4, 0x00000007 },
8377 { NIG_REG_LLH0_ACPI_PAT_6_LEN
, 68, 0x000000ff },
8378 { NIG_REG_LLH0_ACPI_PAT_0_CRC
, 68, 0xffffffff },
8379 { NIG_REG_LLH0_DEST_MAC_0_0
, 160, 0xffffffff },
8380 { NIG_REG_LLH0_DEST_IP_0_1
, 160, 0xffffffff },
8381 { NIG_REG_LLH0_IPV4_IPV6_0
, 160, 0x00000001 },
8382 /* 30 */ { NIG_REG_LLH0_DEST_UDP_0
, 160, 0x0000ffff },
8383 { NIG_REG_LLH0_DEST_TCP_0
, 160, 0x0000ffff },
8384 { NIG_REG_LLH0_VLAN_ID_0
, 160, 0x00000fff },
8385 { NIG_REG_XGXS_SERDES0_MODE_SEL
, 4, 0x00000001 },
8386 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0
, 4, 0x00000001 },
8387 { NIG_REG_STATUS_INTERRUPT_PORT0
, 4, 0x07ffffff },
8388 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST
, 24, 0x00000001 },
8389 { NIG_REG_SERDES0_CTRL_PHY_ADDR
, 16, 0x0000001f },
8391 { 0xffffffff, 0, 0x00000000 }
8394 if (!netif_running(bp
->dev
))
8397 /* Repeat the test twice:
8398 First by writing 0x00000000, second by writing 0xffffffff */
8399 for (idx
= 0; idx
< 2; idx
++) {
8406 wr_val
= 0xffffffff;
8410 for (i
= 0; reg_tbl
[i
].offset0
!= 0xffffffff; i
++) {
8411 u32 offset
, mask
, save_val
, val
;
8412 int port
= BP_PORT(bp
);
8414 offset
= reg_tbl
[i
].offset0
+ port
*reg_tbl
[i
].offset1
;
8415 mask
= reg_tbl
[i
].mask
;
8417 save_val
= REG_RD(bp
, offset
);
8419 REG_WR(bp
, offset
, wr_val
);
8420 val
= REG_RD(bp
, offset
);
8422 /* Restore the original register's value */
8423 REG_WR(bp
, offset
, save_val
);
8425 /* verify that value is as expected value */
8426 if ((val
& mask
) != (wr_val
& mask
))
8437 static int bnx2x_test_memory(struct bnx2x
*bp
)
8439 int i
, j
, rc
= -ENODEV
;
8441 static const struct {
8445 { CCM_REG_XX_DESCR_TABLE
, CCM_REG_XX_DESCR_TABLE_SIZE
},
8446 { CFC_REG_ACTIVITY_COUNTER
, CFC_REG_ACTIVITY_COUNTER_SIZE
},
8447 { CFC_REG_LINK_LIST
, CFC_REG_LINK_LIST_SIZE
},
8448 { DMAE_REG_CMD_MEM
, DMAE_REG_CMD_MEM_SIZE
},
8449 { TCM_REG_XX_DESCR_TABLE
, TCM_REG_XX_DESCR_TABLE_SIZE
},
8450 { UCM_REG_XX_DESCR_TABLE
, UCM_REG_XX_DESCR_TABLE_SIZE
},
8451 { XCM_REG_XX_DESCR_TABLE
, XCM_REG_XX_DESCR_TABLE_SIZE
},
8455 static const struct {
8460 { "CCM_REG_CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS
, 0 },
8461 { "CFC_REG_CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS
, 0 },
8462 { "DMAE_REG_DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS
, 0 },
8463 { "TCM_REG_TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS
, 0 },
8464 { "UCM_REG_UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS
, 0 },
8465 { "XCM_REG_XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS
, 0x1 },
8467 { NULL
, 0xffffffff, 0 }
8470 if (!netif_running(bp
->dev
))
8473 /* Go through all the memories */
8474 for (i
= 0; mem_tbl
[i
].offset
!= 0xffffffff; i
++)
8475 for (j
= 0; j
< mem_tbl
[i
].size
; j
++)
8476 REG_RD(bp
, mem_tbl
[i
].offset
+ j
*4);
8478 /* Check the parity status */
8479 for (i
= 0; prty_tbl
[i
].offset
!= 0xffffffff; i
++) {
8480 val
= REG_RD(bp
, prty_tbl
[i
].offset
);
8481 if (val
& ~(prty_tbl
[i
].mask
)) {
8483 "%s is 0x%x\n", prty_tbl
[i
].name
, val
);
8494 static void bnx2x_netif_start(struct bnx2x
*bp
)
8498 if (atomic_dec_and_test(&bp
->intr_sem
)) {
8499 if (netif_running(bp
->dev
)) {
8500 bnx2x_int_enable(bp
);
8501 for_each_queue(bp
, i
)
8502 napi_enable(&bnx2x_fp(bp
, i
, napi
));
8503 if (bp
->state
== BNX2X_STATE_OPEN
)
8504 netif_wake_queue(bp
->dev
);
8509 static void bnx2x_netif_stop(struct bnx2x
*bp
)
8513 if (netif_running(bp
->dev
)) {
8514 netif_tx_disable(bp
->dev
);
8515 bp
->dev
->trans_start
= jiffies
; /* prevent tx timeout */
8516 for_each_queue(bp
, i
)
8517 napi_disable(&bnx2x_fp(bp
, i
, napi
));
8519 bnx2x_int_disable_sync(bp
);
8522 static void bnx2x_wait_for_link(struct bnx2x
*bp
, u8 link_up
)
8527 while (bnx2x_link_test(bp
) && cnt
--)
8531 static int bnx2x_run_loopback(struct bnx2x
*bp
, int loopback_mode
, u8 link_up
)
8533 unsigned int pkt_size
, num_pkts
, i
;
8534 struct sk_buff
*skb
;
8535 unsigned char *packet
;
8536 struct bnx2x_fastpath
*fp
= &bp
->fp
[0];
8537 u16 tx_start_idx
, tx_idx
;
8538 u16 rx_start_idx
, rx_idx
;
8540 struct sw_tx_bd
*tx_buf
;
8541 struct eth_tx_bd
*tx_bd
;
8543 union eth_rx_cqe
*cqe
;
8545 struct sw_rx_bd
*rx_buf
;
8549 if (loopback_mode
== BNX2X_MAC_LOOPBACK
) {
8550 bp
->link_params
.loopback_mode
= LOOPBACK_BMAC
;
8551 bnx2x_phy_hw_lock(bp
);
8552 bnx2x_phy_init(&bp
->link_params
, &bp
->link_vars
);
8553 bnx2x_phy_hw_unlock(bp
);
8555 } else if (loopback_mode
== BNX2X_PHY_LOOPBACK
) {
8556 bp
->link_params
.loopback_mode
= LOOPBACK_XGXS_10
;
8557 bnx2x_phy_hw_lock(bp
);
8558 bnx2x_phy_init(&bp
->link_params
, &bp
->link_vars
);
8559 bnx2x_phy_hw_unlock(bp
);
8560 /* wait until link state is restored */
8561 bnx2x_wait_for_link(bp
, link_up
);
8567 skb
= netdev_alloc_skb(bp
->dev
, bp
->rx_buf_size
);
8570 goto test_loopback_exit
;
8572 packet
= skb_put(skb
, pkt_size
);
8573 memcpy(packet
, bp
->dev
->dev_addr
, ETH_ALEN
);
8574 memset(packet
+ ETH_ALEN
, 0, (ETH_HLEN
- ETH_ALEN
));
8575 for (i
= ETH_HLEN
; i
< pkt_size
; i
++)
8576 packet
[i
] = (unsigned char) (i
& 0xff);
8579 tx_start_idx
= le16_to_cpu(*fp
->tx_cons_sb
);
8580 rx_start_idx
= le16_to_cpu(*fp
->rx_cons_sb
);
8582 pkt_prod
= fp
->tx_pkt_prod
++;
8583 tx_buf
= &fp
->tx_buf_ring
[TX_BD(pkt_prod
)];
8584 tx_buf
->first_bd
= fp
->tx_bd_prod
;
8587 tx_bd
= &fp
->tx_desc_ring
[TX_BD(fp
->tx_bd_prod
)];
8588 mapping
= pci_map_single(bp
->pdev
, skb
->data
,
8589 skb_headlen(skb
), PCI_DMA_TODEVICE
);
8590 tx_bd
->addr_hi
= cpu_to_le32(U64_HI(mapping
));
8591 tx_bd
->addr_lo
= cpu_to_le32(U64_LO(mapping
));
8592 tx_bd
->nbd
= cpu_to_le16(1);
8593 tx_bd
->nbytes
= cpu_to_le16(skb_headlen(skb
));
8594 tx_bd
->vlan
= cpu_to_le16(pkt_prod
);
8595 tx_bd
->bd_flags
.as_bitfield
= (ETH_TX_BD_FLAGS_START_BD
|
8596 ETH_TX_BD_FLAGS_END_BD
);
8597 tx_bd
->general_data
= ((UNICAST_ADDRESS
<<
8598 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT
) | 1);
8600 fp
->hw_tx_prods
->bds_prod
=
8601 cpu_to_le16(le16_to_cpu(fp
->hw_tx_prods
->bds_prod
) + 1);
8602 mb(); /* FW restriction: must not reorder writing nbd and packets */
8603 fp
->hw_tx_prods
->packets_prod
=
8604 cpu_to_le32(le32_to_cpu(fp
->hw_tx_prods
->packets_prod
) + 1);
8605 DOORBELL(bp
, FP_IDX(fp
), 0);
8611 bp
->dev
->trans_start
= jiffies
;
8615 tx_idx
= le16_to_cpu(*fp
->tx_cons_sb
);
8616 if (tx_idx
!= tx_start_idx
+ num_pkts
)
8617 goto test_loopback_exit
;
8619 rx_idx
= le16_to_cpu(*fp
->rx_cons_sb
);
8620 if (rx_idx
!= rx_start_idx
+ num_pkts
)
8621 goto test_loopback_exit
;
8623 cqe
= &fp
->rx_comp_ring
[RCQ_BD(fp
->rx_comp_cons
)];
8624 cqe_fp_flags
= cqe
->fast_path_cqe
.type_error_flags
;
8625 if (CQE_TYPE(cqe_fp_flags
) || (cqe_fp_flags
& ETH_RX_ERROR_FALGS
))
8626 goto test_loopback_rx_exit
;
8628 len
= le16_to_cpu(cqe
->fast_path_cqe
.pkt_len
);
8629 if (len
!= pkt_size
)
8630 goto test_loopback_rx_exit
;
8632 rx_buf
= &fp
->rx_buf_ring
[RX_BD(fp
->rx_bd_cons
)];
8634 skb_reserve(skb
, cqe
->fast_path_cqe
.placement_offset
);
8635 for (i
= ETH_HLEN
; i
< pkt_size
; i
++)
8636 if (*(skb
->data
+ i
) != (unsigned char) (i
& 0xff))
8637 goto test_loopback_rx_exit
;
8641 test_loopback_rx_exit
:
8642 bp
->dev
->last_rx
= jiffies
;
8644 fp
->rx_bd_cons
= NEXT_RX_IDX(fp
->rx_bd_cons
);
8645 fp
->rx_bd_prod
= NEXT_RX_IDX(fp
->rx_bd_prod
);
8646 fp
->rx_comp_cons
= NEXT_RCQ_IDX(fp
->rx_comp_cons
);
8647 fp
->rx_comp_prod
= NEXT_RCQ_IDX(fp
->rx_comp_prod
);
8649 /* Update producers */
8650 bnx2x_update_rx_prod(bp
, fp
, fp
->rx_bd_prod
, fp
->rx_comp_prod
,
8652 mmiowb(); /* keep prod updates ordered */
8655 bp
->link_params
.loopback_mode
= LOOPBACK_NONE
;
8660 static int bnx2x_test_loopback(struct bnx2x
*bp
, u8 link_up
)
8664 if (!netif_running(bp
->dev
))
8665 return BNX2X_LOOPBACK_FAILED
;
8667 bnx2x_netif_stop(bp
);
8669 if (bnx2x_run_loopback(bp
, BNX2X_MAC_LOOPBACK
, link_up
)) {
8670 DP(NETIF_MSG_PROBE
, "MAC loopback failed\n");
8671 rc
|= BNX2X_MAC_LOOPBACK_FAILED
;
8674 if (bnx2x_run_loopback(bp
, BNX2X_PHY_LOOPBACK
, link_up
)) {
8675 DP(NETIF_MSG_PROBE
, "PHY loopback failed\n");
8676 rc
|= BNX2X_PHY_LOOPBACK_FAILED
;
8679 bnx2x_netif_start(bp
);
8684 #define CRC32_RESIDUAL 0xdebb20e3
8686 static int bnx2x_test_nvram(struct bnx2x
*bp
)
8688 static const struct {
8692 { 0, 0x14 }, /* bootstrap */
8693 { 0x14, 0xec }, /* dir */
8694 { 0x100, 0x350 }, /* manuf_info */
8695 { 0x450, 0xf0 }, /* feature_info */
8696 { 0x640, 0x64 }, /* upgrade_key_info */
8698 { 0x708, 0x70 }, /* manuf_key_info */
8703 u8
*data
= (u8
*)buf
;
8707 rc
= bnx2x_nvram_read(bp
, 0, data
, 4);
8709 DP(NETIF_MSG_PROBE
, "magic value read (rc -%d)\n", -rc
);
8710 goto test_nvram_exit
;
8713 magic
= be32_to_cpu(buf
[0]);
8714 if (magic
!= 0x669955aa) {
8715 DP(NETIF_MSG_PROBE
, "magic value (0x%08x)\n", magic
);
8717 goto test_nvram_exit
;
8720 for (i
= 0; nvram_tbl
[i
].size
; i
++) {
8722 rc
= bnx2x_nvram_read(bp
, nvram_tbl
[i
].offset
, data
,
8726 "nvram_tbl[%d] read data (rc -%d)\n", i
, -rc
);
8727 goto test_nvram_exit
;
8730 csum
= ether_crc_le(nvram_tbl
[i
].size
, data
);
8731 if (csum
!= CRC32_RESIDUAL
) {
8733 "nvram_tbl[%d] csum value (0x%08x)\n", i
, csum
);
8735 goto test_nvram_exit
;
8743 static int bnx2x_test_intr(struct bnx2x
*bp
)
8745 struct mac_configuration_cmd
*config
= bnx2x_sp(bp
, mac_config
);
8748 if (!netif_running(bp
->dev
))
8751 config
->hdr
.length_6b
= 0;
8752 config
->hdr
.offset
= 0;
8753 config
->hdr
.client_id
= BP_CL_ID(bp
);
8754 config
->hdr
.reserved1
= 0;
8756 rc
= bnx2x_sp_post(bp
, RAMROD_CMD_ID_ETH_SET_MAC
, 0,
8757 U64_HI(bnx2x_sp_mapping(bp
, mac_config
)),
8758 U64_LO(bnx2x_sp_mapping(bp
, mac_config
)), 0);
8760 bp
->set_mac_pending
++;
8761 for (i
= 0; i
< 10; i
++) {
8762 if (!bp
->set_mac_pending
)
8764 msleep_interruptible(10);
8773 static void bnx2x_self_test(struct net_device
*dev
,
8774 struct ethtool_test
*etest
, u64
*buf
)
8776 struct bnx2x
*bp
= netdev_priv(dev
);
8778 memset(buf
, 0, sizeof(u64
) * BNX2X_NUM_TESTS
);
8780 if (!netif_running(dev
))
8783 /* offline tests are not suppoerted in MF mode */
8785 etest
->flags
&= ~ETH_TEST_FL_OFFLINE
;
8787 if (etest
->flags
& ETH_TEST_FL_OFFLINE
) {
8790 link_up
= bp
->link_vars
.link_up
;
8791 bnx2x_nic_unload(bp
, UNLOAD_NORMAL
);
8792 bnx2x_nic_load(bp
, LOAD_DIAG
);
8793 /* wait until link state is restored */
8794 bnx2x_wait_for_link(bp
, link_up
);
8796 if (bnx2x_test_registers(bp
) != 0) {
8798 etest
->flags
|= ETH_TEST_FL_FAILED
;
8800 if (bnx2x_test_memory(bp
) != 0) {
8802 etest
->flags
|= ETH_TEST_FL_FAILED
;
8804 buf
[2] = bnx2x_test_loopback(bp
, link_up
);
8806 etest
->flags
|= ETH_TEST_FL_FAILED
;
8808 bnx2x_nic_unload(bp
, UNLOAD_NORMAL
);
8809 bnx2x_nic_load(bp
, LOAD_NORMAL
);
8810 /* wait until link state is restored */
8811 bnx2x_wait_for_link(bp
, link_up
);
8813 if (bnx2x_test_nvram(bp
) != 0) {
8815 etest
->flags
|= ETH_TEST_FL_FAILED
;
8817 if (bnx2x_test_intr(bp
) != 0) {
8819 etest
->flags
|= ETH_TEST_FL_FAILED
;
8822 if (bnx2x_link_test(bp
) != 0) {
8824 etest
->flags
|= ETH_TEST_FL_FAILED
;
8826 buf
[7] = bnx2x_mc_assert(bp
);
8828 etest
->flags
|= ETH_TEST_FL_FAILED
;
8830 #ifdef BNX2X_EXTRA_DEBUG
8831 bnx2x_panic_dump(bp
);
8835 static const struct {
8839 char string
[ETH_GSTRING_LEN
];
8840 } bnx2x_stats_arr
[BNX2X_NUM_STATS
] = {
8841 /* 1 */ { STATS_OFFSET32(valid_bytes_received_hi
), 8, 1, "rx_bytes" },
8842 { STATS_OFFSET32(error_bytes_received_hi
), 8, 1, "rx_error_bytes" },
8843 { STATS_OFFSET32(total_bytes_transmitted_hi
), 8, 1, "tx_bytes" },
8844 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi
), 8, 0, "tx_error_bytes" },
8845 { STATS_OFFSET32(total_unicast_packets_received_hi
),
8846 8, 1, "rx_ucast_packets" },
8847 { STATS_OFFSET32(total_multicast_packets_received_hi
),
8848 8, 1, "rx_mcast_packets" },
8849 { STATS_OFFSET32(total_broadcast_packets_received_hi
),
8850 8, 1, "rx_bcast_packets" },
8851 { STATS_OFFSET32(total_unicast_packets_transmitted_hi
),
8852 8, 1, "tx_packets" },
8853 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi
),
8854 8, 0, "tx_mac_errors" },
8855 /* 10 */{ STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi
),
8856 8, 0, "tx_carrier_errors" },
8857 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi
),
8858 8, 0, "rx_crc_errors" },
8859 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi
),
8860 8, 0, "rx_align_errors" },
8861 { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi
),
8862 8, 0, "tx_single_collisions" },
8863 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi
),
8864 8, 0, "tx_multi_collisions" },
8865 { STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi
),
8866 8, 0, "tx_deferred" },
8867 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi
),
8868 8, 0, "tx_excess_collisions" },
8869 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi
),
8870 8, 0, "tx_late_collisions" },
8871 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi
),
8872 8, 0, "tx_total_collisions" },
8873 { STATS_OFFSET32(rx_stat_etherstatsfragments_hi
),
8874 8, 0, "rx_fragments" },
8875 /* 20 */{ STATS_OFFSET32(rx_stat_etherstatsjabbers_hi
), 8, 0, "rx_jabbers" },
8876 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi
),
8877 8, 0, "rx_undersize_packets" },
8878 { STATS_OFFSET32(jabber_packets_received
),
8879 4, 1, "rx_oversize_packets" },
8880 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi
),
8881 8, 0, "tx_64_byte_packets" },
8882 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi
),
8883 8, 0, "tx_65_to_127_byte_packets" },
8884 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi
),
8885 8, 0, "tx_128_to_255_byte_packets" },
8886 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi
),
8887 8, 0, "tx_256_to_511_byte_packets" },
8888 { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi
),
8889 8, 0, "tx_512_to_1023_byte_packets" },
8890 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi
),
8891 8, 0, "tx_1024_to_1522_byte_packets" },
8892 { STATS_OFFSET32(etherstatspktsover1522octets_hi
),
8893 8, 0, "tx_1523_to_9022_byte_packets" },
8894 /* 30 */{ STATS_OFFSET32(rx_stat_xonpauseframesreceived_hi
),
8895 8, 0, "rx_xon_frames" },
8896 { STATS_OFFSET32(rx_stat_xoffpauseframesreceived_hi
),
8897 8, 0, "rx_xoff_frames" },
8898 { STATS_OFFSET32(tx_stat_outxonsent_hi
), 8, 0, "tx_xon_frames" },
8899 { STATS_OFFSET32(tx_stat_outxoffsent_hi
), 8, 0, "tx_xoff_frames" },
8900 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi
),
8901 8, 0, "rx_mac_ctrl_frames" },
8902 { STATS_OFFSET32(mac_filter_discard
), 4, 1, "rx_filtered_packets" },
8903 { STATS_OFFSET32(no_buff_discard
), 4, 1, "rx_discards" },
8904 { STATS_OFFSET32(xxoverflow_discard
), 4, 1, "rx_fw_discards" },
8905 { STATS_OFFSET32(brb_drop_hi
), 8, 1, "brb_discard" },
8906 /* 39 */{ STATS_OFFSET32(brb_truncate_discard
), 8, 1, "brb_truncate" }
8909 static void bnx2x_get_strings(struct net_device
*dev
, u32 stringset
, u8
*buf
)
8911 struct bnx2x
*bp
= netdev_priv(dev
);
8914 switch (stringset
) {
8916 for (i
= 0, j
= 0; i
< BNX2X_NUM_STATS
; i
++) {
8917 if (IS_E1HMF(bp
) && (!bnx2x_stats_arr
[i
].flags
))
8919 strcpy(buf
+ j
*ETH_GSTRING_LEN
,
8920 bnx2x_stats_arr
[i
].string
);
8926 memcpy(buf
, bnx2x_tests_str_arr
, sizeof(bnx2x_tests_str_arr
));
8931 static int bnx2x_get_stats_count(struct net_device
*dev
)
8933 struct bnx2x
*bp
= netdev_priv(dev
);
8934 int i
, num_stats
= 0;
8936 for (i
= 0; i
< BNX2X_NUM_STATS
; i
++) {
8937 if (IS_E1HMF(bp
) && (!bnx2x_stats_arr
[i
].flags
))
8944 static void bnx2x_get_ethtool_stats(struct net_device
*dev
,
8945 struct ethtool_stats
*stats
, u64
*buf
)
8947 struct bnx2x
*bp
= netdev_priv(dev
);
8948 u32
*hw_stats
= (u32
*)&bp
->eth_stats
;
8951 for (i
= 0, j
= 0; i
< BNX2X_NUM_STATS
; i
++) {
8952 if (IS_E1HMF(bp
) && (!bnx2x_stats_arr
[i
].flags
))
8955 if (bnx2x_stats_arr
[i
].size
== 0) {
8956 /* skip this counter */
8961 if (bnx2x_stats_arr
[i
].size
== 4) {
8962 /* 4-byte counter */
8963 buf
[j
] = (u64
) *(hw_stats
+ bnx2x_stats_arr
[i
].offset
);
8967 /* 8-byte counter */
8968 buf
[j
] = HILO_U64(*(hw_stats
+ bnx2x_stats_arr
[i
].offset
),
8969 *(hw_stats
+ bnx2x_stats_arr
[i
].offset
+ 1));
8974 static int bnx2x_phys_id(struct net_device
*dev
, u32 data
)
8976 struct bnx2x
*bp
= netdev_priv(dev
);
8977 int port
= BP_PORT(bp
);
8980 if (!netif_running(dev
))
8989 for (i
= 0; i
< (data
* 2); i
++) {
8991 bnx2x_set_led(bp
, port
, LED_MODE_OPER
, SPEED_1000
,
8992 bp
->link_params
.hw_led_mode
,
8993 bp
->link_params
.chip_id
);
8995 bnx2x_set_led(bp
, port
, LED_MODE_OFF
, 0,
8996 bp
->link_params
.hw_led_mode
,
8997 bp
->link_params
.chip_id
);
8999 msleep_interruptible(500);
9000 if (signal_pending(current
))
9004 if (bp
->link_vars
.link_up
)
9005 bnx2x_set_led(bp
, port
, LED_MODE_OPER
,
9006 bp
->link_vars
.line_speed
,
9007 bp
->link_params
.hw_led_mode
,
9008 bp
->link_params
.chip_id
);
9013 static struct ethtool_ops bnx2x_ethtool_ops
= {
9014 .get_settings
= bnx2x_get_settings
,
9015 .set_settings
= bnx2x_set_settings
,
9016 .get_drvinfo
= bnx2x_get_drvinfo
,
9017 .get_wol
= bnx2x_get_wol
,
9018 .set_wol
= bnx2x_set_wol
,
9019 .get_msglevel
= bnx2x_get_msglevel
,
9020 .set_msglevel
= bnx2x_set_msglevel
,
9021 .nway_reset
= bnx2x_nway_reset
,
9022 .get_link
= ethtool_op_get_link
,
9023 .get_eeprom_len
= bnx2x_get_eeprom_len
,
9024 .get_eeprom
= bnx2x_get_eeprom
,
9025 .set_eeprom
= bnx2x_set_eeprom
,
9026 .get_coalesce
= bnx2x_get_coalesce
,
9027 .set_coalesce
= bnx2x_set_coalesce
,
9028 .get_ringparam
= bnx2x_get_ringparam
,
9029 .set_ringparam
= bnx2x_set_ringparam
,
9030 .get_pauseparam
= bnx2x_get_pauseparam
,
9031 .set_pauseparam
= bnx2x_set_pauseparam
,
9032 .get_rx_csum
= bnx2x_get_rx_csum
,
9033 .set_rx_csum
= bnx2x_set_rx_csum
,
9034 .get_tx_csum
= ethtool_op_get_tx_csum
,
9035 .set_tx_csum
= ethtool_op_set_tx_hw_csum
,
9036 .set_flags
= bnx2x_set_flags
,
9037 .get_flags
= ethtool_op_get_flags
,
9038 .get_sg
= ethtool_op_get_sg
,
9039 .set_sg
= ethtool_op_set_sg
,
9040 .get_tso
= ethtool_op_get_tso
,
9041 .set_tso
= bnx2x_set_tso
,
9042 .self_test_count
= bnx2x_self_test_count
,
9043 .self_test
= bnx2x_self_test
,
9044 .get_strings
= bnx2x_get_strings
,
9045 .phys_id
= bnx2x_phys_id
,
9046 .get_stats_count
= bnx2x_get_stats_count
,
9047 .get_ethtool_stats
= bnx2x_get_ethtool_stats
,
9050 /* end of ethtool_ops */
9052 /****************************************************************************
9053 * General service functions
9054 ****************************************************************************/
9056 static int bnx2x_set_power_state(struct bnx2x
*bp
, pci_power_t state
)
9060 pci_read_config_word(bp
->pdev
, bp
->pm_cap
+ PCI_PM_CTRL
, &pmcsr
);
9064 pci_write_config_word(bp
->pdev
, bp
->pm_cap
+ PCI_PM_CTRL
,
9065 ((pmcsr
& ~PCI_PM_CTRL_STATE_MASK
) |
9066 PCI_PM_CTRL_PME_STATUS
));
9068 if (pmcsr
& PCI_PM_CTRL_STATE_MASK
)
9069 /* delay required during transition out of D3hot */
9074 pmcsr
&= ~PCI_PM_CTRL_STATE_MASK
;
9078 pmcsr
|= PCI_PM_CTRL_PME_ENABLE
;
9080 pci_write_config_word(bp
->pdev
, bp
->pm_cap
+ PCI_PM_CTRL
,
9083 /* No more memory access after this point until
9084 * device is brought back to D0.
9095 * net_device service functions
9098 static int bnx2x_poll(struct napi_struct
*napi
, int budget
)
9100 struct bnx2x_fastpath
*fp
= container_of(napi
, struct bnx2x_fastpath
,
9102 struct bnx2x
*bp
= fp
->bp
;
9105 #ifdef BNX2X_STOP_ON_ERROR
9106 if (unlikely(bp
->panic
))
9110 prefetch(fp
->tx_buf_ring
[TX_BD(fp
->tx_pkt_cons
)].skb
);
9111 prefetch(fp
->rx_buf_ring
[RX_BD(fp
->rx_bd_cons
)].skb
);
9112 prefetch((char *)(fp
->rx_buf_ring
[RX_BD(fp
->rx_bd_cons
)].skb
) + 256);
9114 bnx2x_update_fpsb_idx(fp
);
9116 if ((fp
->tx_pkt_prod
!= le16_to_cpu(*fp
->tx_cons_sb
)) ||
9117 (fp
->tx_pkt_prod
!= fp
->tx_pkt_cons
))
9118 bnx2x_tx_int(fp
, budget
);
9120 if (le16_to_cpu(*fp
->rx_cons_sb
) != fp
->rx_comp_cons
)
9121 work_done
= bnx2x_rx_int(fp
, budget
);
9123 rmb(); /* bnx2x_has_work() reads the status block */
9125 /* must not complete if we consumed full budget */
9126 if ((work_done
< budget
) && !bnx2x_has_work(fp
)) {
9128 #ifdef BNX2X_STOP_ON_ERROR
9131 netif_rx_complete(bp
->dev
, napi
);
9133 bnx2x_ack_sb(bp
, FP_SB_ID(fp
), USTORM_ID
,
9134 le16_to_cpu(fp
->fp_u_idx
), IGU_INT_NOP
, 1);
9135 bnx2x_ack_sb(bp
, FP_SB_ID(fp
), CSTORM_ID
,
9136 le16_to_cpu(fp
->fp_c_idx
), IGU_INT_ENABLE
, 1);
9142 /* we split the first BD into headers and data BDs
9143 * to ease the pain of our fellow micocode engineers
9144 * we use one mapping for both BDs
9145 * So far this has only been observed to happen
9146 * in Other Operating Systems(TM)
9148 static noinline u16
bnx2x_tx_split(struct bnx2x
*bp
,
9149 struct bnx2x_fastpath
*fp
,
9150 struct eth_tx_bd
**tx_bd
, u16 hlen
,
9151 u16 bd_prod
, int nbd
)
9153 struct eth_tx_bd
*h_tx_bd
= *tx_bd
;
9154 struct eth_tx_bd
*d_tx_bd
;
9156 int old_len
= le16_to_cpu(h_tx_bd
->nbytes
);
9158 /* first fix first BD */
9159 h_tx_bd
->nbd
= cpu_to_le16(nbd
);
9160 h_tx_bd
->nbytes
= cpu_to_le16(hlen
);
9162 DP(NETIF_MSG_TX_QUEUED
, "TSO split header size is %d "
9163 "(%x:%x) nbd %d\n", h_tx_bd
->nbytes
, h_tx_bd
->addr_hi
,
9164 h_tx_bd
->addr_lo
, h_tx_bd
->nbd
);
9166 /* now get a new data BD
9167 * (after the pbd) and fill it */
9168 bd_prod
= TX_BD(NEXT_TX_IDX(bd_prod
));
9169 d_tx_bd
= &fp
->tx_desc_ring
[bd_prod
];
9171 mapping
= HILO_U64(le32_to_cpu(h_tx_bd
->addr_hi
),
9172 le32_to_cpu(h_tx_bd
->addr_lo
)) + hlen
;
9174 d_tx_bd
->addr_hi
= cpu_to_le32(U64_HI(mapping
));
9175 d_tx_bd
->addr_lo
= cpu_to_le32(U64_LO(mapping
));
9176 d_tx_bd
->nbytes
= cpu_to_le16(old_len
- hlen
);
9178 /* this marks the BD as one that has no individual mapping
9179 * the FW ignores this flag in a BD not marked start
9181 d_tx_bd
->bd_flags
.as_bitfield
= ETH_TX_BD_FLAGS_SW_LSO
;
9182 DP(NETIF_MSG_TX_QUEUED
,
9183 "TSO split data size is %d (%x:%x)\n",
9184 d_tx_bd
->nbytes
, d_tx_bd
->addr_hi
, d_tx_bd
->addr_lo
);
9186 /* update tx_bd for marking the last BD flag */
9192 static inline u16
bnx2x_csum_fix(unsigned char *t_header
, u16 csum
, s8 fix
)
9195 csum
= (u16
) ~csum_fold(csum_sub(csum
,
9196 csum_partial(t_header
- fix
, fix
, 0)));
9199 csum
= (u16
) ~csum_fold(csum_add(csum
,
9200 csum_partial(t_header
, -fix
, 0)));
9202 return swab16(csum
);
9205 static inline u32
bnx2x_xmit_type(struct bnx2x
*bp
, struct sk_buff
*skb
)
9209 if (skb
->ip_summed
!= CHECKSUM_PARTIAL
)
9213 if (skb
->protocol
== ntohs(ETH_P_IPV6
)) {
9215 if (ipv6_hdr(skb
)->nexthdr
== IPPROTO_TCP
)
9216 rc
|= XMIT_CSUM_TCP
;
9220 if (ip_hdr(skb
)->protocol
== IPPROTO_TCP
)
9221 rc
|= XMIT_CSUM_TCP
;
9225 if (skb_shinfo(skb
)->gso_type
& SKB_GSO_TCPV4
)
9228 else if (skb_shinfo(skb
)->gso_type
& SKB_GSO_TCPV6
)
9234 /* check if packet requires linearization (packet is too fragmented) */
9235 static int bnx2x_pkt_req_lin(struct bnx2x
*bp
, struct sk_buff
*skb
,
9240 int first_bd_sz
= 0;
9242 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
9243 if (skb_shinfo(skb
)->nr_frags
>= (MAX_FETCH_BD
- 3)) {
9245 if (xmit_type
& XMIT_GSO
) {
9246 unsigned short lso_mss
= skb_shinfo(skb
)->gso_size
;
9247 /* Check if LSO packet needs to be copied:
9248 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
9249 int wnd_size
= MAX_FETCH_BD
- 3;
9250 /* Number of widnows to check */
9251 int num_wnds
= skb_shinfo(skb
)->nr_frags
- wnd_size
;
9256 /* Headers length */
9257 hlen
= (int)(skb_transport_header(skb
) - skb
->data
) +
9260 /* Amount of data (w/o headers) on linear part of SKB*/
9261 first_bd_sz
= skb_headlen(skb
) - hlen
;
9263 wnd_sum
= first_bd_sz
;
9265 /* Calculate the first sum - it's special */
9266 for (frag_idx
= 0; frag_idx
< wnd_size
- 1; frag_idx
++)
9268 skb_shinfo(skb
)->frags
[frag_idx
].size
;
9270 /* If there was data on linear skb data - check it */
9271 if (first_bd_sz
> 0) {
9272 if (unlikely(wnd_sum
< lso_mss
)) {
9277 wnd_sum
-= first_bd_sz
;
9280 /* Others are easier: run through the frag list and
9281 check all windows */
9282 for (wnd_idx
= 0; wnd_idx
<= num_wnds
; wnd_idx
++) {
9284 skb_shinfo(skb
)->frags
[wnd_idx
+ wnd_size
- 1].size
;
9286 if (unlikely(wnd_sum
< lso_mss
)) {
9291 skb_shinfo(skb
)->frags
[wnd_idx
].size
;
9295 /* in non-LSO too fragmented packet should always
9302 if (unlikely(to_copy
))
9303 DP(NETIF_MSG_TX_QUEUED
,
9304 "Linearization IS REQUIRED for %s packet. "
9305 "num_frags %d hlen %d first_bd_sz %d\n",
9306 (xmit_type
& XMIT_GSO
) ? "LSO" : "non-LSO",
9307 skb_shinfo(skb
)->nr_frags
, hlen
, first_bd_sz
);
9312 /* called with netif_tx_lock
9313 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
9314 * netif_wake_queue()
9316 static int bnx2x_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
9318 struct bnx2x
*bp
= netdev_priv(dev
);
9319 struct bnx2x_fastpath
*fp
;
9320 struct sw_tx_bd
*tx_buf
;
9321 struct eth_tx_bd
*tx_bd
;
9322 struct eth_tx_parse_bd
*pbd
= NULL
;
9323 u16 pkt_prod
, bd_prod
;
9326 u32 xmit_type
= bnx2x_xmit_type(bp
, skb
);
9327 int vlan_off
= (bp
->e1hov
? 4 : 0);
9331 #ifdef BNX2X_STOP_ON_ERROR
9332 if (unlikely(bp
->panic
))
9333 return NETDEV_TX_BUSY
;
9336 fp_index
= (smp_processor_id() % bp
->num_queues
);
9337 fp
= &bp
->fp
[fp_index
];
9339 if (unlikely(bnx2x_tx_avail(bp
->fp
) <
9340 (skb_shinfo(skb
)->nr_frags
+ 3))) {
9341 bp
->eth_stats
.driver_xoff
++,
9342 netif_stop_queue(dev
);
9343 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
9344 return NETDEV_TX_BUSY
;
9347 DP(NETIF_MSG_TX_QUEUED
, "SKB: summed %x protocol %x protocol(%x,%x)"
9348 " gso type %x xmit_type %x\n",
9349 skb
->ip_summed
, skb
->protocol
, ipv6_hdr(skb
)->nexthdr
,
9350 ip_hdr(skb
)->protocol
, skb_shinfo(skb
)->gso_type
, xmit_type
);
9352 /* First, check if we need to linearaize the skb
9353 (due to FW restrictions) */
9354 if (bnx2x_pkt_req_lin(bp
, skb
, xmit_type
)) {
9355 /* Statistics of linearization */
9357 if (skb_linearize(skb
) != 0) {
9358 DP(NETIF_MSG_TX_QUEUED
, "SKB linearization failed - "
9359 "silently dropping this SKB\n");
9360 dev_kfree_skb_any(skb
);
9366 Please read carefully. First we use one BD which we mark as start,
9367 then for TSO or xsum we have a parsing info BD,
9368 and only then we have the rest of the TSO BDs.
9369 (don't forget to mark the last one as last,
9370 and to unmap only AFTER you write to the BD ...)
9371 And above all, all pdb sizes are in words - NOT DWORDS!
9374 pkt_prod
= fp
->tx_pkt_prod
++;
9375 bd_prod
= TX_BD(fp
->tx_bd_prod
);
9377 /* get a tx_buf and first BD */
9378 tx_buf
= &fp
->tx_buf_ring
[TX_BD(pkt_prod
)];
9379 tx_bd
= &fp
->tx_desc_ring
[bd_prod
];
9381 tx_bd
->bd_flags
.as_bitfield
= ETH_TX_BD_FLAGS_START_BD
;
9382 tx_bd
->general_data
= (UNICAST_ADDRESS
<<
9383 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT
);
9384 tx_bd
->general_data
|= 1; /* header nbd */
9386 /* remember the first BD of the packet */
9387 tx_buf
->first_bd
= fp
->tx_bd_prod
;
9390 DP(NETIF_MSG_TX_QUEUED
,
9391 "sending pkt %u @%p next_idx %u bd %u @%p\n",
9392 pkt_prod
, tx_buf
, fp
->tx_pkt_prod
, bd_prod
, tx_bd
);
9394 if ((bp
->vlgrp
!= NULL
) && vlan_tx_tag_present(skb
)) {
9395 tx_bd
->vlan
= cpu_to_le16(vlan_tx_tag_get(skb
));
9396 tx_bd
->bd_flags
.as_bitfield
|= ETH_TX_BD_FLAGS_VLAN_TAG
;
9399 tx_bd
->vlan
= cpu_to_le16(pkt_prod
);
9403 /* turn on parsing and get a BD */
9404 bd_prod
= TX_BD(NEXT_TX_IDX(bd_prod
));
9405 pbd
= (void *)&fp
->tx_desc_ring
[bd_prod
];
9407 memset(pbd
, 0, sizeof(struct eth_tx_parse_bd
));
9410 if (xmit_type
& XMIT_CSUM
) {
9411 hlen
= (skb_network_header(skb
) - skb
->data
+ vlan_off
) / 2;
9413 /* for now NS flag is not used in Linux */
9414 pbd
->global_data
= (hlen
|
9415 ((skb
->protocol
== ntohs(ETH_P_8021Q
)) <<
9416 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT
));
9418 pbd
->ip_hlen
= (skb_transport_header(skb
) -
9419 skb_network_header(skb
)) / 2;
9421 hlen
+= pbd
->ip_hlen
+ tcp_hdrlen(skb
) / 2;
9423 pbd
->total_hlen
= cpu_to_le16(hlen
);
9424 hlen
= hlen
*2 - vlan_off
;
9426 tx_bd
->bd_flags
.as_bitfield
|= ETH_TX_BD_FLAGS_TCP_CSUM
;
9428 if (xmit_type
& XMIT_CSUM_V4
)
9429 tx_bd
->bd_flags
.as_bitfield
|=
9430 ETH_TX_BD_FLAGS_IP_CSUM
;
9432 tx_bd
->bd_flags
.as_bitfield
|= ETH_TX_BD_FLAGS_IPV6
;
9434 if (xmit_type
& XMIT_CSUM_TCP
) {
9435 pbd
->tcp_pseudo_csum
= swab16(tcp_hdr(skb
)->check
);
9438 s8 fix
= SKB_CS_OFF(skb
); /* signed! */
9440 pbd
->global_data
|= ETH_TX_PARSE_BD_CS_ANY_FLG
;
9441 pbd
->cs_offset
= fix
/ 2;
9443 DP(NETIF_MSG_TX_QUEUED
,
9444 "hlen %d offset %d fix %d csum before fix %x\n",
9445 le16_to_cpu(pbd
->total_hlen
), pbd
->cs_offset
, fix
,
9448 /* HW bug: fixup the CSUM */
9449 pbd
->tcp_pseudo_csum
=
9450 bnx2x_csum_fix(skb_transport_header(skb
),
9453 DP(NETIF_MSG_TX_QUEUED
, "csum after fix %x\n",
9454 pbd
->tcp_pseudo_csum
);
9458 mapping
= pci_map_single(bp
->pdev
, skb
->data
,
9459 skb_headlen(skb
), PCI_DMA_TODEVICE
);
9461 tx_bd
->addr_hi
= cpu_to_le32(U64_HI(mapping
));
9462 tx_bd
->addr_lo
= cpu_to_le32(U64_LO(mapping
));
9463 nbd
= skb_shinfo(skb
)->nr_frags
+ ((pbd
== NULL
)? 1 : 2);
9464 tx_bd
->nbd
= cpu_to_le16(nbd
);
9465 tx_bd
->nbytes
= cpu_to_le16(skb_headlen(skb
));
9467 DP(NETIF_MSG_TX_QUEUED
, "first bd @%p addr (%x:%x) nbd %d"
9468 " nbytes %d flags %x vlan %x\n",
9469 tx_bd
, tx_bd
->addr_hi
, tx_bd
->addr_lo
, le16_to_cpu(tx_bd
->nbd
),
9470 le16_to_cpu(tx_bd
->nbytes
), tx_bd
->bd_flags
.as_bitfield
,
9471 le16_to_cpu(tx_bd
->vlan
));
9473 if (xmit_type
& XMIT_GSO
) {
9475 DP(NETIF_MSG_TX_QUEUED
,
9476 "TSO packet len %d hlen %d total len %d tso size %d\n",
9477 skb
->len
, hlen
, skb_headlen(skb
),
9478 skb_shinfo(skb
)->gso_size
);
9480 tx_bd
->bd_flags
.as_bitfield
|= ETH_TX_BD_FLAGS_SW_LSO
;
9482 if (unlikely(skb_headlen(skb
) > hlen
))
9483 bd_prod
= bnx2x_tx_split(bp
, fp
, &tx_bd
, hlen
,
9486 pbd
->lso_mss
= cpu_to_le16(skb_shinfo(skb
)->gso_size
);
9487 pbd
->tcp_send_seq
= swab32(tcp_hdr(skb
)->seq
);
9488 pbd
->tcp_flags
= pbd_tcp_flags(skb
);
9490 if (xmit_type
& XMIT_GSO_V4
) {
9491 pbd
->ip_id
= swab16(ip_hdr(skb
)->id
);
9492 pbd
->tcp_pseudo_csum
=
9493 swab16(~csum_tcpudp_magic(ip_hdr(skb
)->saddr
,
9495 0, IPPROTO_TCP
, 0));
9498 pbd
->tcp_pseudo_csum
=
9499 swab16(~csum_ipv6_magic(&ipv6_hdr(skb
)->saddr
,
9500 &ipv6_hdr(skb
)->daddr
,
9501 0, IPPROTO_TCP
, 0));
9503 pbd
->global_data
|= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN
;
9506 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
9507 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
9509 bd_prod
= TX_BD(NEXT_TX_IDX(bd_prod
));
9510 tx_bd
= &fp
->tx_desc_ring
[bd_prod
];
9512 mapping
= pci_map_page(bp
->pdev
, frag
->page
, frag
->page_offset
,
9513 frag
->size
, PCI_DMA_TODEVICE
);
9515 tx_bd
->addr_hi
= cpu_to_le32(U64_HI(mapping
));
9516 tx_bd
->addr_lo
= cpu_to_le32(U64_LO(mapping
));
9517 tx_bd
->nbytes
= cpu_to_le16(frag
->size
);
9518 tx_bd
->vlan
= cpu_to_le16(pkt_prod
);
9519 tx_bd
->bd_flags
.as_bitfield
= 0;
9521 DP(NETIF_MSG_TX_QUEUED
,
9522 "frag %d bd @%p addr (%x:%x) nbytes %d flags %x\n",
9523 i
, tx_bd
, tx_bd
->addr_hi
, tx_bd
->addr_lo
,
9524 le16_to_cpu(tx_bd
->nbytes
), tx_bd
->bd_flags
.as_bitfield
);
9527 /* now at last mark the BD as the last BD */
9528 tx_bd
->bd_flags
.as_bitfield
|= ETH_TX_BD_FLAGS_END_BD
;
9530 DP(NETIF_MSG_TX_QUEUED
, "last bd @%p flags %x\n",
9531 tx_bd
, tx_bd
->bd_flags
.as_bitfield
);
9533 bd_prod
= TX_BD(NEXT_TX_IDX(bd_prod
));
9535 /* now send a tx doorbell, counting the next BD
9536 * if the packet contains or ends with it
9538 if (TX_BD_POFF(bd_prod
) < nbd
)
9542 DP(NETIF_MSG_TX_QUEUED
,
9543 "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
9544 " tcp_flags %x xsum %x seq %u hlen %u\n",
9545 pbd
, pbd
->global_data
, pbd
->ip_hlen
, pbd
->ip_id
,
9546 pbd
->lso_mss
, pbd
->tcp_flags
, pbd
->tcp_pseudo_csum
,
9547 pbd
->tcp_send_seq
, le16_to_cpu(pbd
->total_hlen
));
9549 DP(NETIF_MSG_TX_QUEUED
, "doorbell: nbd %d bd %u\n", nbd
, bd_prod
);
9551 fp
->hw_tx_prods
->bds_prod
=
9552 cpu_to_le16(le16_to_cpu(fp
->hw_tx_prods
->bds_prod
) + nbd
);
9553 mb(); /* FW restriction: must not reorder writing nbd and packets */
9554 fp
->hw_tx_prods
->packets_prod
=
9555 cpu_to_le32(le32_to_cpu(fp
->hw_tx_prods
->packets_prod
) + 1);
9556 DOORBELL(bp
, FP_IDX(fp
), 0);
9560 fp
->tx_bd_prod
+= nbd
;
9561 dev
->trans_start
= jiffies
;
9563 if (unlikely(bnx2x_tx_avail(fp
) < MAX_SKB_FRAGS
+ 3)) {
9564 netif_stop_queue(dev
);
9565 bp
->eth_stats
.driver_xoff
++;
9566 if (bnx2x_tx_avail(fp
) >= MAX_SKB_FRAGS
+ 3)
9567 netif_wake_queue(dev
);
9571 return NETDEV_TX_OK
;
9574 /* called with rtnl_lock */
9575 static int bnx2x_open(struct net_device
*dev
)
9577 struct bnx2x
*bp
= netdev_priv(dev
);
9579 bnx2x_set_power_state(bp
, PCI_D0
);
9581 return bnx2x_nic_load(bp
, LOAD_OPEN
);
9584 /* called with rtnl_lock */
9585 static int bnx2x_close(struct net_device
*dev
)
9587 struct bnx2x
*bp
= netdev_priv(dev
);
9589 /* Unload the driver, release IRQs */
9590 bnx2x_nic_unload(bp
, UNLOAD_CLOSE
);
9591 if (atomic_read(&bp
->pdev
->enable_cnt
) == 1)
9592 if (!CHIP_REV_IS_SLOW(bp
))
9593 bnx2x_set_power_state(bp
, PCI_D3hot
);
9598 /* called with netif_tx_lock from set_multicast */
9599 static void bnx2x_set_rx_mode(struct net_device
*dev
)
9601 struct bnx2x
*bp
= netdev_priv(dev
);
9602 u32 rx_mode
= BNX2X_RX_MODE_NORMAL
;
9603 int port
= BP_PORT(bp
);
9605 if (bp
->state
!= BNX2X_STATE_OPEN
) {
9606 DP(NETIF_MSG_IFUP
, "state is %x, returning\n", bp
->state
);
9610 DP(NETIF_MSG_IFUP
, "dev->flags = %x\n", dev
->flags
);
9612 if (dev
->flags
& IFF_PROMISC
)
9613 rx_mode
= BNX2X_RX_MODE_PROMISC
;
9615 else if ((dev
->flags
& IFF_ALLMULTI
) ||
9616 ((dev
->mc_count
> BNX2X_MAX_MULTICAST
) && CHIP_IS_E1(bp
)))
9617 rx_mode
= BNX2X_RX_MODE_ALLMULTI
;
9619 else { /* some multicasts */
9620 if (CHIP_IS_E1(bp
)) {
9622 struct dev_mc_list
*mclist
;
9623 struct mac_configuration_cmd
*config
=
9624 bnx2x_sp(bp
, mcast_config
);
9626 for (i
= 0, mclist
= dev
->mc_list
;
9627 mclist
&& (i
< dev
->mc_count
);
9628 i
++, mclist
= mclist
->next
) {
9630 config
->config_table
[i
].
9631 cam_entry
.msb_mac_addr
=
9632 swab16(*(u16
*)&mclist
->dmi_addr
[0]);
9633 config
->config_table
[i
].
9634 cam_entry
.middle_mac_addr
=
9635 swab16(*(u16
*)&mclist
->dmi_addr
[2]);
9636 config
->config_table
[i
].
9637 cam_entry
.lsb_mac_addr
=
9638 swab16(*(u16
*)&mclist
->dmi_addr
[4]);
9639 config
->config_table
[i
].cam_entry
.flags
=
9641 config
->config_table
[i
].
9642 target_table_entry
.flags
= 0;
9643 config
->config_table
[i
].
9644 target_table_entry
.client_id
= 0;
9645 config
->config_table
[i
].
9646 target_table_entry
.vlan_id
= 0;
9649 "setting MCAST[%d] (%04x:%04x:%04x)\n", i
,
9650 config
->config_table
[i
].
9651 cam_entry
.msb_mac_addr
,
9652 config
->config_table
[i
].
9653 cam_entry
.middle_mac_addr
,
9654 config
->config_table
[i
].
9655 cam_entry
.lsb_mac_addr
);
9657 old
= config
->hdr
.length_6b
;
9659 for (; i
< old
; i
++) {
9660 if (CAM_IS_INVALID(config
->
9662 i
--; /* already invalidated */
9666 CAM_INVALIDATE(config
->
9671 if (CHIP_REV_IS_SLOW(bp
))
9672 offset
= BNX2X_MAX_EMUL_MULTI
*(1 + port
);
9674 offset
= BNX2X_MAX_MULTICAST
*(1 + port
);
9676 config
->hdr
.length_6b
= i
;
9677 config
->hdr
.offset
= offset
;
9678 config
->hdr
.client_id
= BP_CL_ID(bp
);
9679 config
->hdr
.reserved1
= 0;
9681 bnx2x_sp_post(bp
, RAMROD_CMD_ID_ETH_SET_MAC
, 0,
9682 U64_HI(bnx2x_sp_mapping(bp
, mcast_config
)),
9683 U64_LO(bnx2x_sp_mapping(bp
, mcast_config
)),
9686 /* Accept one or more multicasts */
9687 struct dev_mc_list
*mclist
;
9688 u32 mc_filter
[MC_HASH_SIZE
];
9689 u32 crc
, bit
, regidx
;
9692 memset(mc_filter
, 0, 4 * MC_HASH_SIZE
);
9694 for (i
= 0, mclist
= dev
->mc_list
;
9695 mclist
&& (i
< dev
->mc_count
);
9696 i
++, mclist
= mclist
->next
) {
9698 DP(NETIF_MSG_IFUP
, "Adding mcast MAC: "
9699 "%02x:%02x:%02x:%02x:%02x:%02x\n",
9700 mclist
->dmi_addr
[0], mclist
->dmi_addr
[1],
9701 mclist
->dmi_addr
[2], mclist
->dmi_addr
[3],
9702 mclist
->dmi_addr
[4], mclist
->dmi_addr
[5]);
9704 crc
= crc32c_le(0, mclist
->dmi_addr
, ETH_ALEN
);
9705 bit
= (crc
>> 24) & 0xff;
9708 mc_filter
[regidx
] |= (1 << bit
);
9711 for (i
= 0; i
< MC_HASH_SIZE
; i
++)
9712 REG_WR(bp
, MC_HASH_OFFSET(bp
, i
),
9717 bp
->rx_mode
= rx_mode
;
9718 bnx2x_set_storm_rx_mode(bp
);
9721 /* called with rtnl_lock */
9722 static int bnx2x_change_mac_addr(struct net_device
*dev
, void *p
)
9724 struct sockaddr
*addr
= p
;
9725 struct bnx2x
*bp
= netdev_priv(dev
);
9727 if (!is_valid_ether_addr((u8
*)(addr
->sa_data
)))
9730 memcpy(dev
->dev_addr
, addr
->sa_data
, dev
->addr_len
);
9731 if (netif_running(dev
)) {
9733 bnx2x_set_mac_addr_e1(bp
);
9735 bnx2x_set_mac_addr_e1h(bp
);
9741 /* called with rtnl_lock */
9742 static int bnx2x_ioctl(struct net_device
*dev
, struct ifreq
*ifr
, int cmd
)
9744 struct mii_ioctl_data
*data
= if_mii(ifr
);
9745 struct bnx2x
*bp
= netdev_priv(dev
);
9750 data
->phy_id
= bp
->port
.phy_addr
;
9757 if (!netif_running(dev
))
9760 mutex_lock(&bp
->port
.phy_mutex
);
9761 err
= bnx2x_cl45_read(bp
, BP_PORT(bp
), 0, bp
->port
.phy_addr
,
9762 DEFAULT_PHY_DEV_ADDR
,
9763 (data
->reg_num
& 0x1f), &mii_regval
);
9764 data
->val_out
= mii_regval
;
9765 mutex_unlock(&bp
->port
.phy_mutex
);
9770 if (!capable(CAP_NET_ADMIN
))
9773 if (!netif_running(dev
))
9776 mutex_lock(&bp
->port
.phy_mutex
);
9777 err
= bnx2x_cl45_write(bp
, BP_PORT(bp
), 0, bp
->port
.phy_addr
,
9778 DEFAULT_PHY_DEV_ADDR
,
9779 (data
->reg_num
& 0x1f), data
->val_in
);
9780 mutex_unlock(&bp
->port
.phy_mutex
);
9791 /* called with rtnl_lock */
9792 static int bnx2x_change_mtu(struct net_device
*dev
, int new_mtu
)
9794 struct bnx2x
*bp
= netdev_priv(dev
);
9797 if ((new_mtu
> ETH_MAX_JUMBO_PACKET_SIZE
) ||
9798 ((new_mtu
+ ETH_HLEN
) < ETH_MIN_PACKET_SIZE
))
9801 /* This does not race with packet allocation
9802 * because the actual alloc size is
9803 * only updated as part of load
9807 if (netif_running(dev
)) {
9808 bnx2x_nic_unload(bp
, UNLOAD_NORMAL
);
9809 rc
= bnx2x_nic_load(bp
, LOAD_NORMAL
);
9815 static void bnx2x_tx_timeout(struct net_device
*dev
)
9817 struct bnx2x
*bp
= netdev_priv(dev
);
9819 #ifdef BNX2X_STOP_ON_ERROR
9823 /* This allows the netif to be shutdown gracefully before resetting */
9824 schedule_work(&bp
->reset_task
);
9828 /* called with rtnl_lock */
9829 static void bnx2x_vlan_rx_register(struct net_device
*dev
,
9830 struct vlan_group
*vlgrp
)
9832 struct bnx2x
*bp
= netdev_priv(dev
);
9835 if (netif_running(dev
))
9836 bnx2x_set_client_config(bp
);
9841 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
9842 static void poll_bnx2x(struct net_device
*dev
)
9844 struct bnx2x
*bp
= netdev_priv(dev
);
9846 disable_irq(bp
->pdev
->irq
);
9847 bnx2x_interrupt(bp
->pdev
->irq
, dev
);
9848 enable_irq(bp
->pdev
->irq
);
9852 static int __devinit
bnx2x_init_dev(struct pci_dev
*pdev
,
9853 struct net_device
*dev
)
9858 SET_NETDEV_DEV(dev
, &pdev
->dev
);
9859 bp
= netdev_priv(dev
);
9864 bp
->func
= PCI_FUNC(pdev
->devfn
);
9866 rc
= pci_enable_device(pdev
);
9868 printk(KERN_ERR PFX
"Cannot enable PCI device, aborting\n");
9872 if (!(pci_resource_flags(pdev
, 0) & IORESOURCE_MEM
)) {
9873 printk(KERN_ERR PFX
"Cannot find PCI device base address,"
9876 goto err_out_disable
;
9879 if (!(pci_resource_flags(pdev
, 2) & IORESOURCE_MEM
)) {
9880 printk(KERN_ERR PFX
"Cannot find second PCI device"
9881 " base address, aborting\n");
9883 goto err_out_disable
;
9886 if (atomic_read(&pdev
->enable_cnt
) == 1) {
9887 rc
= pci_request_regions(pdev
, DRV_MODULE_NAME
);
9889 printk(KERN_ERR PFX
"Cannot obtain PCI resources,"
9891 goto err_out_disable
;
9894 pci_set_master(pdev
);
9895 pci_save_state(pdev
);
9898 bp
->pm_cap
= pci_find_capability(pdev
, PCI_CAP_ID_PM
);
9899 if (bp
->pm_cap
== 0) {
9900 printk(KERN_ERR PFX
"Cannot find power management"
9901 " capability, aborting\n");
9903 goto err_out_release
;
9906 bp
->pcie_cap
= pci_find_capability(pdev
, PCI_CAP_ID_EXP
);
9907 if (bp
->pcie_cap
== 0) {
9908 printk(KERN_ERR PFX
"Cannot find PCI Express capability,"
9911 goto err_out_release
;
9914 if (pci_set_dma_mask(pdev
, DMA_64BIT_MASK
) == 0) {
9915 bp
->flags
|= USING_DAC_FLAG
;
9916 if (pci_set_consistent_dma_mask(pdev
, DMA_64BIT_MASK
) != 0) {
9917 printk(KERN_ERR PFX
"pci_set_consistent_dma_mask"
9918 " failed, aborting\n");
9920 goto err_out_release
;
9923 } else if (pci_set_dma_mask(pdev
, DMA_32BIT_MASK
) != 0) {
9924 printk(KERN_ERR PFX
"System does not support DMA,"
9927 goto err_out_release
;
9930 dev
->mem_start
= pci_resource_start(pdev
, 0);
9931 dev
->base_addr
= dev
->mem_start
;
9932 dev
->mem_end
= pci_resource_end(pdev
, 0);
9934 dev
->irq
= pdev
->irq
;
9936 bp
->regview
= ioremap_nocache(dev
->base_addr
,
9937 pci_resource_len(pdev
, 0));
9939 printk(KERN_ERR PFX
"Cannot map register space, aborting\n");
9941 goto err_out_release
;
9944 bp
->doorbells
= ioremap_nocache(pci_resource_start(pdev
, 2),
9945 min_t(u64
, BNX2X_DB_SIZE
,
9946 pci_resource_len(pdev
, 2)));
9947 if (!bp
->doorbells
) {
9948 printk(KERN_ERR PFX
"Cannot map doorbell space, aborting\n");
9953 bnx2x_set_power_state(bp
, PCI_D0
);
9955 /* clean indirect addresses */
9956 pci_write_config_dword(bp
->pdev
, PCICFG_GRC_ADDRESS
,
9957 PCICFG_VENDOR_ID_OFFSET
);
9958 REG_WR(bp
, PXP2_REG_PGL_ADDR_88_F0
+ BP_PORT(bp
)*16, 0);
9959 REG_WR(bp
, PXP2_REG_PGL_ADDR_8C_F0
+ BP_PORT(bp
)*16, 0);
9960 REG_WR(bp
, PXP2_REG_PGL_ADDR_90_F0
+ BP_PORT(bp
)*16, 0);
9961 REG_WR(bp
, PXP2_REG_PGL_ADDR_94_F0
+ BP_PORT(bp
)*16, 0);
9963 dev
->hard_start_xmit
= bnx2x_start_xmit
;
9964 dev
->watchdog_timeo
= TX_TIMEOUT
;
9966 dev
->ethtool_ops
= &bnx2x_ethtool_ops
;
9967 dev
->open
= bnx2x_open
;
9968 dev
->stop
= bnx2x_close
;
9969 dev
->set_multicast_list
= bnx2x_set_rx_mode
;
9970 dev
->set_mac_address
= bnx2x_change_mac_addr
;
9971 dev
->do_ioctl
= bnx2x_ioctl
;
9972 dev
->change_mtu
= bnx2x_change_mtu
;
9973 dev
->tx_timeout
= bnx2x_tx_timeout
;
9975 dev
->vlan_rx_register
= bnx2x_vlan_rx_register
;
9977 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
9978 dev
->poll_controller
= poll_bnx2x
;
9980 dev
->features
|= NETIF_F_SG
;
9981 dev
->features
|= NETIF_F_HW_CSUM
;
9982 if (bp
->flags
& USING_DAC_FLAG
)
9983 dev
->features
|= NETIF_F_HIGHDMA
;
9985 dev
->features
|= (NETIF_F_HW_VLAN_TX
| NETIF_F_HW_VLAN_RX
);
9987 dev
->features
|= (NETIF_F_TSO
| NETIF_F_TSO_ECN
);
9988 dev
->features
|= NETIF_F_TSO6
;
9994 iounmap(bp
->regview
);
9997 if (bp
->doorbells
) {
9998 iounmap(bp
->doorbells
);
9999 bp
->doorbells
= NULL
;
10003 if (atomic_read(&pdev
->enable_cnt
) == 1)
10004 pci_release_regions(pdev
);
10007 pci_disable_device(pdev
);
10008 pci_set_drvdata(pdev
, NULL
);
10014 static int __devinit
bnx2x_get_pcie_width(struct bnx2x
*bp
)
10016 u32 val
= REG_RD(bp
, PCICFG_OFFSET
+ PCICFG_LINK_CONTROL
);
10018 val
= (val
& PCICFG_LINK_WIDTH
) >> PCICFG_LINK_WIDTH_SHIFT
;
10022 /* return value of 1=2.5GHz 2=5GHz */
10023 static int __devinit
bnx2x_get_pcie_speed(struct bnx2x
*bp
)
10025 u32 val
= REG_RD(bp
, PCICFG_OFFSET
+ PCICFG_LINK_CONTROL
);
10027 val
= (val
& PCICFG_LINK_SPEED
) >> PCICFG_LINK_SPEED_SHIFT
;
10031 static int __devinit
bnx2x_init_one(struct pci_dev
*pdev
,
10032 const struct pci_device_id
*ent
)
10034 static int version_printed
;
10035 struct net_device
*dev
= NULL
;
10038 DECLARE_MAC_BUF(mac
);
10040 if (version_printed
++ == 0)
10041 printk(KERN_INFO
"%s", version
);
10043 /* dev zeroed in init_etherdev */
10044 dev
= alloc_etherdev(sizeof(*bp
));
10046 printk(KERN_ERR PFX
"Cannot allocate net device\n");
10050 netif_carrier_off(dev
);
10052 bp
= netdev_priv(dev
);
10053 bp
->msglevel
= debug
;
10055 rc
= bnx2x_init_dev(pdev
, dev
);
10061 rc
= register_netdev(dev
);
10063 dev_err(&pdev
->dev
, "Cannot register net device\n");
10064 goto init_one_exit
;
10067 pci_set_drvdata(pdev
, dev
);
10069 rc
= bnx2x_init_bp(bp
);
10071 unregister_netdev(dev
);
10072 goto init_one_exit
;
10075 bp
->common
.name
= board_info
[ent
->driver_data
].name
;
10076 printk(KERN_INFO
"%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
10077 " IRQ %d, ", dev
->name
, bp
->common
.name
,
10078 (CHIP_REV(bp
) >> 12) + 'A', (CHIP_METAL(bp
) >> 4),
10079 bnx2x_get_pcie_width(bp
),
10080 (bnx2x_get_pcie_speed(bp
) == 2) ? "5GHz (Gen2)" : "2.5GHz",
10081 dev
->base_addr
, bp
->pdev
->irq
);
10082 printk(KERN_CONT
"node addr %s\n", print_mac(mac
, dev
->dev_addr
));
10087 iounmap(bp
->regview
);
10090 iounmap(bp
->doorbells
);
10094 if (atomic_read(&pdev
->enable_cnt
) == 1)
10095 pci_release_regions(pdev
);
10097 pci_disable_device(pdev
);
10098 pci_set_drvdata(pdev
, NULL
);
10103 static void __devexit
bnx2x_remove_one(struct pci_dev
*pdev
)
10105 struct net_device
*dev
= pci_get_drvdata(pdev
);
10109 printk(KERN_ERR PFX
"BAD net device from bnx2x_init_one\n");
10112 bp
= netdev_priv(dev
);
10114 unregister_netdev(dev
);
10117 iounmap(bp
->regview
);
10120 iounmap(bp
->doorbells
);
10124 if (atomic_read(&pdev
->enable_cnt
) == 1)
10125 pci_release_regions(pdev
);
10127 pci_disable_device(pdev
);
10128 pci_set_drvdata(pdev
, NULL
);
10131 static int bnx2x_suspend(struct pci_dev
*pdev
, pm_message_t state
)
10133 struct net_device
*dev
= pci_get_drvdata(pdev
);
10137 printk(KERN_ERR PFX
"BAD net device from bnx2x_init_one\n");
10140 bp
= netdev_priv(dev
);
10144 pci_save_state(pdev
);
10146 if (!netif_running(dev
)) {
10151 netif_device_detach(dev
);
10153 bnx2x_nic_unload(bp
, UNLOAD_NORMAL
);
10155 bnx2x_set_power_state(bp
, pci_choose_state(pdev
, state
));
10162 static int bnx2x_resume(struct pci_dev
*pdev
)
10164 struct net_device
*dev
= pci_get_drvdata(pdev
);
10169 printk(KERN_ERR PFX
"BAD net device from bnx2x_init_one\n");
10172 bp
= netdev_priv(dev
);
10176 pci_restore_state(pdev
);
10178 if (!netif_running(dev
)) {
10183 bnx2x_set_power_state(bp
, PCI_D0
);
10184 netif_device_attach(dev
);
10186 rc
= bnx2x_nic_load(bp
, LOAD_NORMAL
);
10194 * bnx2x_io_error_detected - called when PCI error is detected
10195 * @pdev: Pointer to PCI device
10196 * @state: The current pci connection state
10198 * This function is called after a PCI bus error affecting
10199 * this device has been detected.
10201 static pci_ers_result_t
bnx2x_io_error_detected(struct pci_dev
*pdev
,
10202 pci_channel_state_t state
)
10204 struct net_device
*dev
= pci_get_drvdata(pdev
);
10205 struct bnx2x
*bp
= netdev_priv(dev
);
10209 netif_device_detach(dev
);
10211 if (netif_running(dev
))
10212 bnx2x_nic_unload(bp
, UNLOAD_CLOSE
);
10214 pci_disable_device(pdev
);
10218 /* Request a slot reset */
10219 return PCI_ERS_RESULT_NEED_RESET
;
10223 * bnx2x_io_slot_reset - called after the PCI bus has been reset
10224 * @pdev: Pointer to PCI device
10226 * Restart the card from scratch, as if from a cold-boot.
10228 static pci_ers_result_t
bnx2x_io_slot_reset(struct pci_dev
*pdev
)
10230 struct net_device
*dev
= pci_get_drvdata(pdev
);
10231 struct bnx2x
*bp
= netdev_priv(dev
);
10235 if (pci_enable_device(pdev
)) {
10236 dev_err(&pdev
->dev
,
10237 "Cannot re-enable PCI device after reset\n");
10239 return PCI_ERS_RESULT_DISCONNECT
;
10242 pci_set_master(pdev
);
10243 pci_restore_state(pdev
);
10245 if (netif_running(dev
))
10246 bnx2x_set_power_state(bp
, PCI_D0
);
10250 return PCI_ERS_RESULT_RECOVERED
;
10254 * bnx2x_io_resume - called when traffic can start flowing again
10255 * @pdev: Pointer to PCI device
10257 * This callback is called when the error recovery driver tells us that
10258 * its OK to resume normal operation.
10260 static void bnx2x_io_resume(struct pci_dev
*pdev
)
10262 struct net_device
*dev
= pci_get_drvdata(pdev
);
10263 struct bnx2x
*bp
= netdev_priv(dev
);
10267 if (netif_running(dev
))
10268 bnx2x_nic_load(bp
, LOAD_OPEN
);
10270 netif_device_attach(dev
);
10275 static struct pci_error_handlers bnx2x_err_handler
= {
10276 .error_detected
= bnx2x_io_error_detected
,
10277 .slot_reset
= bnx2x_io_slot_reset
,
10278 .resume
= bnx2x_io_resume
,
10281 static struct pci_driver bnx2x_pci_driver
= {
10282 .name
= DRV_MODULE_NAME
,
10283 .id_table
= bnx2x_pci_tbl
,
10284 .probe
= bnx2x_init_one
,
10285 .remove
= __devexit_p(bnx2x_remove_one
),
10286 .suspend
= bnx2x_suspend
,
10287 .resume
= bnx2x_resume
,
10288 .err_handler
= &bnx2x_err_handler
,
10291 static int __init
bnx2x_init(void)
10293 return pci_register_driver(&bnx2x_pci_driver
);
10296 static void __exit
bnx2x_cleanup(void)
10298 pci_unregister_driver(&bnx2x_pci_driver
);
10301 module_init(bnx2x_init
);
10302 module_exit(bnx2x_cleanup
);