bnx2x: Supporting Device Control Channel
[deliverable/linux.git] / drivers / net / bnx2x_main.c
CommitLineData
34f80b04 1/* bnx2x_main.c: Broadcom Everest network driver.
a2fbb9ea 2 *
d05c26ce 3 * Copyright (c) 2007-2009 Broadcom Corporation
a2fbb9ea
ET
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
24e3fcef
EG
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
a2fbb9ea
ET
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
ca00392c 13 * Slowpath and fastpath rework by Vladislav Zolotarov
c14423fe 14 * Statistics and Link management by Yitchak Gertner
a2fbb9ea
ET
15 *
16 */
17
a2fbb9ea
ET
18#include <linux/module.h>
19#include <linux/moduleparam.h>
20#include <linux/kernel.h>
21#include <linux/device.h> /* for dev_info() */
22#include <linux/timer.h>
23#include <linux/errno.h>
24#include <linux/ioport.h>
25#include <linux/slab.h>
26#include <linux/vmalloc.h>
27#include <linux/interrupt.h>
28#include <linux/pci.h>
29#include <linux/init.h>
30#include <linux/netdevice.h>
31#include <linux/etherdevice.h>
32#include <linux/skbuff.h>
33#include <linux/dma-mapping.h>
34#include <linux/bitops.h>
35#include <linux/irq.h>
36#include <linux/delay.h>
37#include <asm/byteorder.h>
38#include <linux/time.h>
39#include <linux/ethtool.h>
40#include <linux/mii.h>
0c6671b0 41#include <linux/if_vlan.h>
a2fbb9ea
ET
42#include <net/ip.h>
43#include <net/tcp.h>
44#include <net/checksum.h>
34f80b04 45#include <net/ip6_checksum.h>
a2fbb9ea
ET
46#include <linux/workqueue.h>
47#include <linux/crc32.h>
34f80b04 48#include <linux/crc32c.h>
a2fbb9ea
ET
49#include <linux/prefetch.h>
50#include <linux/zlib.h>
a2fbb9ea
ET
51#include <linux/io.h>
52
359d8b15 53
a2fbb9ea
ET
54#include "bnx2x.h"
55#include "bnx2x_init.h"
94a78b79 56#include "bnx2x_init_ops.h"
0a64ea57 57#include "bnx2x_dump.h"
a2fbb9ea 58
573dd788
EG
59#define DRV_MODULE_VERSION "1.48.114-1"
60#define DRV_MODULE_RELDATE "2009/07/29"
34f80b04 61#define BNX2X_BC_VER 0x040200
a2fbb9ea 62
94a78b79
VZ
63#include <linux/firmware.h>
64#include "bnx2x_fw_file_hdr.h"
65/* FW files */
66#define FW_FILE_PREFIX_E1 "bnx2x-e1-"
67#define FW_FILE_PREFIX_E1H "bnx2x-e1h-"
68
34f80b04
EG
69/* Time in jiffies before concluding the transmitter is hung */
70#define TX_TIMEOUT (5*HZ)
a2fbb9ea 71
53a10565 72static char version[] __devinitdata =
34f80b04 73 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
a2fbb9ea
ET
74 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
75
24e3fcef 76MODULE_AUTHOR("Eliezer Tamir");
e47d7e6e 77MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
a2fbb9ea
ET
78MODULE_LICENSE("GPL");
79MODULE_VERSION(DRV_MODULE_VERSION);
a2fbb9ea 80
555f6c78
EG
81static int multi_mode = 1;
82module_param(multi_mode, int, 0);
ca00392c
EG
83MODULE_PARM_DESC(multi_mode, " Multi queue mode "
84 "(0 Disable; 1 Enable (default))");
85
86static int num_rx_queues;
87module_param(num_rx_queues, int, 0);
88MODULE_PARM_DESC(num_rx_queues, " Number of Rx queues for multi_mode=1"
89 " (default is half number of CPUs)");
90
91static int num_tx_queues;
92module_param(num_tx_queues, int, 0);
93MODULE_PARM_DESC(num_tx_queues, " Number of Tx queues for multi_mode=1"
94 " (default is half number of CPUs)");
555f6c78 95
19680c48 96static int disable_tpa;
19680c48 97module_param(disable_tpa, int, 0);
9898f86d 98MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
8badd27a
EG
99
100static int int_mode;
101module_param(int_mode, int, 0);
102MODULE_PARM_DESC(int_mode, " Force interrupt mode (1 INT#x; 2 MSI)");
103
9898f86d 104static int poll;
a2fbb9ea 105module_param(poll, int, 0);
9898f86d 106MODULE_PARM_DESC(poll, " Use polling (for debug)");
8d5726c4
EG
107
108static int mrrs = -1;
109module_param(mrrs, int, 0);
110MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
111
9898f86d 112static int debug;
a2fbb9ea 113module_param(debug, int, 0);
9898f86d
EG
114MODULE_PARM_DESC(debug, " Default debug msglevel");
115
116static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
a2fbb9ea 117
1cf167f2 118static struct workqueue_struct *bnx2x_wq;
a2fbb9ea
ET
119
120enum bnx2x_board_type {
121 BCM57710 = 0,
34f80b04
EG
122 BCM57711 = 1,
123 BCM57711E = 2,
a2fbb9ea
ET
124};
125
34f80b04 126/* indexed by board_type, above */
53a10565 127static struct {
a2fbb9ea
ET
128 char *name;
129} board_info[] __devinitdata = {
34f80b04
EG
130 { "Broadcom NetXtreme II BCM57710 XGb" },
131 { "Broadcom NetXtreme II BCM57711 XGb" },
132 { "Broadcom NetXtreme II BCM57711E XGb" }
a2fbb9ea
ET
133};
134
34f80b04 135
a2fbb9ea
ET
136static const struct pci_device_id bnx2x_pci_tbl[] = {
137 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57710,
138 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57710 },
34f80b04
EG
139 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711,
140 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711 },
141 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711E,
142 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711E },
a2fbb9ea
ET
143 { 0 }
144};
145
146MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
147
148/****************************************************************************
149* General service functions
150****************************************************************************/
151
152/* used only at init
153 * locking is done by mcp
154 */
155static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
156{
157 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
158 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
159 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
160 PCICFG_VENDOR_ID_OFFSET);
161}
162
a2fbb9ea
ET
163static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
164{
165 u32 val;
166
167 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
168 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
169 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
170 PCICFG_VENDOR_ID_OFFSET);
171
172 return val;
173}
a2fbb9ea
ET
174
175static const u32 dmae_reg_go_c[] = {
176 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
177 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
178 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
179 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
180};
181
182/* copy command into DMAE command memory and set DMAE command go */
183static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
184 int idx)
185{
186 u32 cmd_offset;
187 int i;
188
189 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
190 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
191 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
192
ad8d3948
EG
193 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
194 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
a2fbb9ea
ET
195 }
196 REG_WR(bp, dmae_reg_go_c[idx], 1);
197}
198
ad8d3948
EG
199void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
200 u32 len32)
a2fbb9ea 201{
ad8d3948 202 struct dmae_command *dmae = &bp->init_dmae;
a2fbb9ea 203 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
ad8d3948
EG
204 int cnt = 200;
205
206 if (!bp->dmae_ready) {
207 u32 *data = bnx2x_sp(bp, wb_data[0]);
208
209 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
210 " using indirect\n", dst_addr, len32);
211 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
212 return;
213 }
214
215 mutex_lock(&bp->dmae_mutex);
a2fbb9ea
ET
216
217 memset(dmae, 0, sizeof(struct dmae_command));
218
219 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
220 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
221 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
222#ifdef __BIG_ENDIAN
223 DMAE_CMD_ENDIANITY_B_DW_SWAP |
224#else
225 DMAE_CMD_ENDIANITY_DW_SWAP |
226#endif
34f80b04
EG
227 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
228 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea
ET
229 dmae->src_addr_lo = U64_LO(dma_addr);
230 dmae->src_addr_hi = U64_HI(dma_addr);
231 dmae->dst_addr_lo = dst_addr >> 2;
232 dmae->dst_addr_hi = 0;
233 dmae->len = len32;
234 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
235 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
ad8d3948 236 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 237
c3eefaf6 238 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
a2fbb9ea
ET
239 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
240 "dst_addr [%x:%08x (%08x)]\n"
241 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
242 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
243 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, dst_addr,
244 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
ad8d3948 245 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
a2fbb9ea
ET
246 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
247 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
a2fbb9ea
ET
248
249 *wb_comp = 0;
250
34f80b04 251 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
a2fbb9ea
ET
252
253 udelay(5);
ad8d3948
EG
254
255 while (*wb_comp != DMAE_COMP_VAL) {
256 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
257
ad8d3948 258 if (!cnt) {
c3eefaf6 259 BNX2X_ERR("DMAE timeout!\n");
a2fbb9ea
ET
260 break;
261 }
ad8d3948 262 cnt--;
12469401
YG
263 /* adjust delay for emulation/FPGA */
264 if (CHIP_REV_IS_SLOW(bp))
265 msleep(100);
266 else
267 udelay(5);
a2fbb9ea 268 }
ad8d3948
EG
269
270 mutex_unlock(&bp->dmae_mutex);
a2fbb9ea
ET
271}
272
c18487ee 273void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
a2fbb9ea 274{
ad8d3948 275 struct dmae_command *dmae = &bp->init_dmae;
a2fbb9ea 276 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
ad8d3948
EG
277 int cnt = 200;
278
279 if (!bp->dmae_ready) {
280 u32 *data = bnx2x_sp(bp, wb_data[0]);
281 int i;
282
283 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
284 " using indirect\n", src_addr, len32);
285 for (i = 0; i < len32; i++)
286 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
287 return;
288 }
289
290 mutex_lock(&bp->dmae_mutex);
a2fbb9ea
ET
291
292 memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
293 memset(dmae, 0, sizeof(struct dmae_command));
294
295 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
296 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
297 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
298#ifdef __BIG_ENDIAN
299 DMAE_CMD_ENDIANITY_B_DW_SWAP |
300#else
301 DMAE_CMD_ENDIANITY_DW_SWAP |
302#endif
34f80b04
EG
303 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
304 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea
ET
305 dmae->src_addr_lo = src_addr >> 2;
306 dmae->src_addr_hi = 0;
307 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
308 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
309 dmae->len = len32;
310 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
311 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
ad8d3948 312 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 313
c3eefaf6 314 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
a2fbb9ea
ET
315 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
316 "dst_addr [%x:%08x (%08x)]\n"
317 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
318 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
319 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, src_addr,
320 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
a2fbb9ea
ET
321
322 *wb_comp = 0;
323
34f80b04 324 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
a2fbb9ea
ET
325
326 udelay(5);
ad8d3948
EG
327
328 while (*wb_comp != DMAE_COMP_VAL) {
329
ad8d3948 330 if (!cnt) {
c3eefaf6 331 BNX2X_ERR("DMAE timeout!\n");
a2fbb9ea
ET
332 break;
333 }
ad8d3948 334 cnt--;
12469401
YG
335 /* adjust delay for emulation/FPGA */
336 if (CHIP_REV_IS_SLOW(bp))
337 msleep(100);
338 else
339 udelay(5);
a2fbb9ea 340 }
ad8d3948 341 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
a2fbb9ea
ET
342 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
343 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
ad8d3948
EG
344
345 mutex_unlock(&bp->dmae_mutex);
346}
347
348/* used only for slowpath so not inlined */
349static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
350{
351 u32 wb_write[2];
352
353 wb_write[0] = val_hi;
354 wb_write[1] = val_lo;
355 REG_WR_DMAE(bp, reg, wb_write, 2);
a2fbb9ea 356}
a2fbb9ea 357
ad8d3948
EG
358#ifdef USE_WB_RD
359static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
360{
361 u32 wb_data[2];
362
363 REG_RD_DMAE(bp, reg, wb_data, 2);
364
365 return HILO_U64(wb_data[0], wb_data[1]);
366}
367#endif
368
a2fbb9ea
ET
369static int bnx2x_mc_assert(struct bnx2x *bp)
370{
a2fbb9ea 371 char last_idx;
34f80b04
EG
372 int i, rc = 0;
373 u32 row0, row1, row2, row3;
374
375 /* XSTORM */
376 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
377 XSTORM_ASSERT_LIST_INDEX_OFFSET);
378 if (last_idx)
379 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
380
381 /* print the asserts */
382 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
383
384 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
385 XSTORM_ASSERT_LIST_OFFSET(i));
386 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
387 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
388 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
389 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
390 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
391 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
392
393 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
394 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
395 " 0x%08x 0x%08x 0x%08x\n",
396 i, row3, row2, row1, row0);
397 rc++;
398 } else {
399 break;
400 }
401 }
402
403 /* TSTORM */
404 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
405 TSTORM_ASSERT_LIST_INDEX_OFFSET);
406 if (last_idx)
407 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
408
409 /* print the asserts */
410 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
411
412 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
413 TSTORM_ASSERT_LIST_OFFSET(i));
414 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
415 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
416 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
417 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
418 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
419 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
420
421 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
422 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
423 " 0x%08x 0x%08x 0x%08x\n",
424 i, row3, row2, row1, row0);
425 rc++;
426 } else {
427 break;
428 }
429 }
430
431 /* CSTORM */
432 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
433 CSTORM_ASSERT_LIST_INDEX_OFFSET);
434 if (last_idx)
435 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
436
437 /* print the asserts */
438 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
439
440 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
441 CSTORM_ASSERT_LIST_OFFSET(i));
442 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
443 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
444 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
445 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
446 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
447 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
448
449 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
450 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
451 " 0x%08x 0x%08x 0x%08x\n",
452 i, row3, row2, row1, row0);
453 rc++;
454 } else {
455 break;
456 }
457 }
458
459 /* USTORM */
460 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
461 USTORM_ASSERT_LIST_INDEX_OFFSET);
462 if (last_idx)
463 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
464
465 /* print the asserts */
466 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
467
468 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
469 USTORM_ASSERT_LIST_OFFSET(i));
470 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
471 USTORM_ASSERT_LIST_OFFSET(i) + 4);
472 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
473 USTORM_ASSERT_LIST_OFFSET(i) + 8);
474 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
475 USTORM_ASSERT_LIST_OFFSET(i) + 12);
476
477 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
478 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
479 " 0x%08x 0x%08x 0x%08x\n",
480 i, row3, row2, row1, row0);
481 rc++;
482 } else {
483 break;
a2fbb9ea
ET
484 }
485 }
34f80b04 486
a2fbb9ea
ET
487 return rc;
488}
c14423fe 489
a2fbb9ea
ET
490static void bnx2x_fw_dump(struct bnx2x *bp)
491{
492 u32 mark, offset;
4781bfad 493 __be32 data[9];
a2fbb9ea
ET
494 int word;
495
496 mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
49d66772 497 mark = ((mark + 0x3) & ~0x3);
ad361c98 498 printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n", mark);
a2fbb9ea 499
ad361c98 500 printk(KERN_ERR PFX);
a2fbb9ea
ET
501 for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
502 for (word = 0; word < 8; word++)
503 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
504 offset + 4*word));
505 data[8] = 0x0;
49d66772 506 printk(KERN_CONT "%s", (char *)data);
a2fbb9ea
ET
507 }
508 for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
509 for (word = 0; word < 8; word++)
510 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
511 offset + 4*word));
512 data[8] = 0x0;
49d66772 513 printk(KERN_CONT "%s", (char *)data);
a2fbb9ea 514 }
ad361c98 515 printk(KERN_ERR PFX "end of fw dump\n");
a2fbb9ea
ET
516}
517
518static void bnx2x_panic_dump(struct bnx2x *bp)
519{
520 int i;
521 u16 j, start, end;
522
66e855f3
YG
523 bp->stats_state = STATS_STATE_DISABLED;
524 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
525
a2fbb9ea
ET
526 BNX2X_ERR("begin crash dump -----------------\n");
527
8440d2b6
EG
528 /* Indices */
529 /* Common */
530 BNX2X_ERR("def_c_idx(%u) def_u_idx(%u) def_x_idx(%u)"
531 " def_t_idx(%u) def_att_idx(%u) attn_state(%u)"
532 " spq_prod_idx(%u)\n",
533 bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
534 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
535
536 /* Rx */
537 for_each_rx_queue(bp, i) {
a2fbb9ea 538 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea 539
c3eefaf6 540 BNX2X_ERR("fp%d: rx_bd_prod(%x) rx_bd_cons(%x)"
66e855f3
YG
541 " *rx_bd_cons_sb(%x) rx_comp_prod(%x)"
542 " rx_comp_cons(%x) *rx_cons_sb(%x)\n",
8440d2b6 543 i, fp->rx_bd_prod, fp->rx_bd_cons,
66e855f3
YG
544 le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
545 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
c3eefaf6 546 BNX2X_ERR(" rx_sge_prod(%x) last_max_sge(%x)"
8440d2b6
EG
547 " fp_u_idx(%x) *sb_u_idx(%x)\n",
548 fp->rx_sge_prod, fp->last_max_sge,
549 le16_to_cpu(fp->fp_u_idx),
550 fp->status_blk->u_status_block.status_block_index);
551 }
a2fbb9ea 552
8440d2b6
EG
553 /* Tx */
554 for_each_tx_queue(bp, i) {
555 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea 556
c3eefaf6 557 BNX2X_ERR("fp%d: tx_pkt_prod(%x) tx_pkt_cons(%x)"
8440d2b6
EG
558 " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)\n",
559 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
560 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
c3eefaf6 561 BNX2X_ERR(" fp_c_idx(%x) *sb_c_idx(%x)"
ca00392c 562 " tx_db_prod(%x)\n", le16_to_cpu(fp->fp_c_idx),
8440d2b6 563 fp->status_blk->c_status_block.status_block_index,
ca00392c 564 fp->tx_db.data.prod);
8440d2b6 565 }
a2fbb9ea 566
8440d2b6
EG
567 /* Rings */
568 /* Rx */
569 for_each_rx_queue(bp, i) {
570 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea
ET
571
572 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
573 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
8440d2b6 574 for (j = start; j != end; j = RX_BD(j + 1)) {
a2fbb9ea
ET
575 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
576 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
577
c3eefaf6
EG
578 BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
579 i, j, rx_bd[1], rx_bd[0], sw_bd->skb);
a2fbb9ea
ET
580 }
581
3196a88a
EG
582 start = RX_SGE(fp->rx_sge_prod);
583 end = RX_SGE(fp->last_max_sge);
8440d2b6 584 for (j = start; j != end; j = RX_SGE(j + 1)) {
7a9b2557
VZ
585 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
586 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
587
c3eefaf6
EG
588 BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
589 i, j, rx_sge[1], rx_sge[0], sw_page->page);
7a9b2557
VZ
590 }
591
a2fbb9ea
ET
592 start = RCQ_BD(fp->rx_comp_cons - 10);
593 end = RCQ_BD(fp->rx_comp_cons + 503);
8440d2b6 594 for (j = start; j != end; j = RCQ_BD(j + 1)) {
a2fbb9ea
ET
595 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
596
c3eefaf6
EG
597 BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
598 i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
a2fbb9ea
ET
599 }
600 }
601
8440d2b6
EG
602 /* Tx */
603 for_each_tx_queue(bp, i) {
604 struct bnx2x_fastpath *fp = &bp->fp[i];
605
606 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
607 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
608 for (j = start; j != end; j = TX_BD(j + 1)) {
609 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
610
c3eefaf6
EG
611 BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n",
612 i, j, sw_bd->skb, sw_bd->first_bd);
8440d2b6
EG
613 }
614
615 start = TX_BD(fp->tx_bd_cons - 10);
616 end = TX_BD(fp->tx_bd_cons + 254);
617 for (j = start; j != end; j = TX_BD(j + 1)) {
618 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
619
c3eefaf6
EG
620 BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n",
621 i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
8440d2b6
EG
622 }
623 }
a2fbb9ea 624
34f80b04 625 bnx2x_fw_dump(bp);
a2fbb9ea
ET
626 bnx2x_mc_assert(bp);
627 BNX2X_ERR("end crash dump -----------------\n");
a2fbb9ea
ET
628}
629
615f8fd9 630static void bnx2x_int_enable(struct bnx2x *bp)
a2fbb9ea 631{
34f80b04 632 int port = BP_PORT(bp);
a2fbb9ea
ET
633 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
634 u32 val = REG_RD(bp, addr);
635 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
8badd27a 636 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
a2fbb9ea
ET
637
638 if (msix) {
8badd27a
EG
639 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
640 HC_CONFIG_0_REG_INT_LINE_EN_0);
a2fbb9ea
ET
641 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
642 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
8badd27a
EG
643 } else if (msi) {
644 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
645 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
646 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
647 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
a2fbb9ea
ET
648 } else {
649 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
615f8fd9 650 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
a2fbb9ea
ET
651 HC_CONFIG_0_REG_INT_LINE_EN_0 |
652 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
615f8fd9 653
8badd27a
EG
654 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
655 val, port, addr);
615f8fd9
ET
656
657 REG_WR(bp, addr, val);
658
a2fbb9ea
ET
659 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
660 }
661
8badd27a
EG
662 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n",
663 val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
a2fbb9ea
ET
664
665 REG_WR(bp, addr, val);
37dbbf32
EG
666 /*
667 * Ensure that HC_CONFIG is written before leading/trailing edge config
668 */
669 mmiowb();
670 barrier();
34f80b04
EG
671
672 if (CHIP_IS_E1H(bp)) {
673 /* init leading/trailing edge */
674 if (IS_E1HMF(bp)) {
8badd27a 675 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
34f80b04 676 if (bp->port.pmf)
4acac6a5
EG
677 /* enable nig and gpio3 attention */
678 val |= 0x1100;
34f80b04
EG
679 } else
680 val = 0xffff;
681
682 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
683 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
684 }
37dbbf32
EG
685
686 /* Make sure that interrupts are indeed enabled from here on */
687 mmiowb();
a2fbb9ea
ET
688}
689
615f8fd9 690static void bnx2x_int_disable(struct bnx2x *bp)
a2fbb9ea 691{
34f80b04 692 int port = BP_PORT(bp);
a2fbb9ea
ET
693 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
694 u32 val = REG_RD(bp, addr);
695
696 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
697 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
698 HC_CONFIG_0_REG_INT_LINE_EN_0 |
699 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
700
701 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
702 val, port, addr);
703
8badd27a
EG
704 /* flush all outstanding writes */
705 mmiowb();
706
a2fbb9ea
ET
707 REG_WR(bp, addr, val);
708 if (REG_RD(bp, addr) != val)
709 BNX2X_ERR("BUG! proper val not read from IGU!\n");
356e2385 710
a2fbb9ea
ET
711}
712
f8ef6e44 713static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
a2fbb9ea 714{
a2fbb9ea 715 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
8badd27a 716 int i, offset;
a2fbb9ea 717
34f80b04 718 /* disable interrupt handling */
a2fbb9ea 719 atomic_inc(&bp->intr_sem);
e1510706
EG
720 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
721
f8ef6e44
YG
722 if (disable_hw)
723 /* prevent the HW from sending interrupts */
724 bnx2x_int_disable(bp);
a2fbb9ea
ET
725
726 /* make sure all ISRs are done */
727 if (msix) {
8badd27a
EG
728 synchronize_irq(bp->msix_table[0].vector);
729 offset = 1;
a2fbb9ea 730 for_each_queue(bp, i)
8badd27a 731 synchronize_irq(bp->msix_table[i + offset].vector);
a2fbb9ea
ET
732 } else
733 synchronize_irq(bp->pdev->irq);
734
735 /* make sure sp_task is not running */
1cf167f2
EG
736 cancel_delayed_work(&bp->sp_task);
737 flush_workqueue(bnx2x_wq);
a2fbb9ea
ET
738}
739
34f80b04 740/* fast path */
a2fbb9ea
ET
741
742/*
34f80b04 743 * General service functions
a2fbb9ea
ET
744 */
745
34f80b04 746static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
a2fbb9ea
ET
747 u8 storm, u16 index, u8 op, u8 update)
748{
5c862848
EG
749 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
750 COMMAND_REG_INT_ACK);
a2fbb9ea
ET
751 struct igu_ack_register igu_ack;
752
753 igu_ack.status_block_index = index;
754 igu_ack.sb_id_and_flags =
34f80b04 755 ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
a2fbb9ea
ET
756 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
757 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
758 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
759
5c862848
EG
760 DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
761 (*(u32 *)&igu_ack), hc_addr);
762 REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
37dbbf32
EG
763
764 /* Make sure that ACK is written */
765 mmiowb();
766 barrier();
a2fbb9ea
ET
767}
768
769static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
770{
771 struct host_status_block *fpsb = fp->status_blk;
772 u16 rc = 0;
773
774 barrier(); /* status block is written to by the chip */
775 if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
776 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
777 rc |= 1;
778 }
779 if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
780 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
781 rc |= 2;
782 }
783 return rc;
784}
785
a2fbb9ea
ET
786static u16 bnx2x_ack_int(struct bnx2x *bp)
787{
5c862848
EG
788 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
789 COMMAND_REG_SIMD_MASK);
790 u32 result = REG_RD(bp, hc_addr);
a2fbb9ea 791
5c862848
EG
792 DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
793 result, hc_addr);
a2fbb9ea 794
a2fbb9ea
ET
795 return result;
796}
797
798
799/*
800 * fast path service functions
801 */
802
e8b5fc51
VZ
803static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp)
804{
805 /* Tell compiler that consumer and producer can change */
806 barrier();
807 return (fp->tx_pkt_prod != fp->tx_pkt_cons);
237907c1
EG
808}
809
a2fbb9ea
ET
810/* free skb in the packet ring at pos idx
811 * return idx of last bd freed
812 */
813static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
814 u16 idx)
815{
816 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
ca00392c
EG
817 struct eth_tx_start_bd *tx_start_bd;
818 struct eth_tx_bd *tx_data_bd;
a2fbb9ea 819 struct sk_buff *skb = tx_buf->skb;
34f80b04 820 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
a2fbb9ea
ET
821 int nbd;
822
823 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
824 idx, tx_buf, skb);
825
826 /* unmap first bd */
827 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
ca00392c
EG
828 tx_start_bd = &fp->tx_desc_ring[bd_idx].start_bd;
829 pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_start_bd),
830 BD_UNMAP_LEN(tx_start_bd), PCI_DMA_TODEVICE);
a2fbb9ea 831
ca00392c 832 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
a2fbb9ea 833#ifdef BNX2X_STOP_ON_ERROR
ca00392c 834 if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
34f80b04 835 BNX2X_ERR("BAD nbd!\n");
a2fbb9ea
ET
836 bnx2x_panic();
837 }
838#endif
ca00392c 839 new_cons = nbd + tx_buf->first_bd;
a2fbb9ea 840
ca00392c
EG
841 /* Get the next bd */
842 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
a2fbb9ea 843
ca00392c
EG
844 /* Skip a parse bd... */
845 --nbd;
846 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
847
848 /* ...and the TSO split header bd since they have no mapping */
849 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
850 --nbd;
851 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
a2fbb9ea
ET
852 }
853
854 /* now free frags */
855 while (nbd > 0) {
856
857 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
ca00392c
EG
858 tx_data_bd = &fp->tx_desc_ring[bd_idx].reg_bd;
859 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_data_bd),
860 BD_UNMAP_LEN(tx_data_bd), PCI_DMA_TODEVICE);
a2fbb9ea
ET
861 if (--nbd)
862 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
863 }
864
865 /* release skb */
53e5e96e 866 WARN_ON(!skb);
ca00392c 867 dev_kfree_skb_any(skb);
a2fbb9ea
ET
868 tx_buf->first_bd = 0;
869 tx_buf->skb = NULL;
870
34f80b04 871 return new_cons;
a2fbb9ea
ET
872}
873
34f80b04 874static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
a2fbb9ea 875{
34f80b04
EG
876 s16 used;
877 u16 prod;
878 u16 cons;
a2fbb9ea 879
34f80b04 880 barrier(); /* Tell compiler that prod and cons can change */
a2fbb9ea
ET
881 prod = fp->tx_bd_prod;
882 cons = fp->tx_bd_cons;
883
34f80b04
EG
884 /* NUM_TX_RINGS = number of "next-page" entries
885 It will be used as a threshold */
886 used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
a2fbb9ea 887
34f80b04 888#ifdef BNX2X_STOP_ON_ERROR
53e5e96e
IJ
889 WARN_ON(used < 0);
890 WARN_ON(used > fp->bp->tx_ring_size);
891 WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
34f80b04 892#endif
a2fbb9ea 893
34f80b04 894 return (s16)(fp->bp->tx_ring_size) - used;
a2fbb9ea
ET
895}
896
7961f791 897static void bnx2x_tx_int(struct bnx2x_fastpath *fp)
a2fbb9ea
ET
898{
899 struct bnx2x *bp = fp->bp;
555f6c78 900 struct netdev_queue *txq;
a2fbb9ea
ET
901 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
902 int done = 0;
903
904#ifdef BNX2X_STOP_ON_ERROR
905 if (unlikely(bp->panic))
906 return;
907#endif
908
ca00392c 909 txq = netdev_get_tx_queue(bp->dev, fp->index - bp->num_rx_queues);
a2fbb9ea
ET
910 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
911 sw_cons = fp->tx_pkt_cons;
912
913 while (sw_cons != hw_cons) {
914 u16 pkt_cons;
915
916 pkt_cons = TX_BD(sw_cons);
917
918 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
919
34f80b04 920 DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %u\n",
a2fbb9ea
ET
921 hw_cons, sw_cons, pkt_cons);
922
34f80b04 923/* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
a2fbb9ea
ET
924 rmb();
925 prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
926 }
927*/
928 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
929 sw_cons++;
930 done++;
a2fbb9ea
ET
931 }
932
933 fp->tx_pkt_cons = sw_cons;
934 fp->tx_bd_cons = bd_cons;
935
a2fbb9ea 936 /* TBD need a thresh? */
555f6c78 937 if (unlikely(netif_tx_queue_stopped(txq))) {
a2fbb9ea 938
6044735d
EG
939 /* Need to make the tx_bd_cons update visible to start_xmit()
940 * before checking for netif_tx_queue_stopped(). Without the
941 * memory barrier, there is a small possibility that
942 * start_xmit() will miss it and cause the queue to be stopped
943 * forever.
944 */
945 smp_mb();
946
555f6c78 947 if ((netif_tx_queue_stopped(txq)) &&
da5a662a 948 (bp->state == BNX2X_STATE_OPEN) &&
a2fbb9ea 949 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
555f6c78 950 netif_tx_wake_queue(txq);
a2fbb9ea
ET
951 }
952}
953
3196a88a 954
a2fbb9ea
ET
955static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
956 union eth_rx_cqe *rr_cqe)
957{
958 struct bnx2x *bp = fp->bp;
959 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
960 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
961
34f80b04 962 DP(BNX2X_MSG_SP,
a2fbb9ea 963 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
0626b899 964 fp->index, cid, command, bp->state,
34f80b04 965 rr_cqe->ramrod_cqe.ramrod_type);
a2fbb9ea
ET
966
967 bp->spq_left++;
968
0626b899 969 if (fp->index) {
a2fbb9ea
ET
970 switch (command | fp->state) {
971 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
972 BNX2X_FP_STATE_OPENING):
973 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
974 cid);
975 fp->state = BNX2X_FP_STATE_OPEN;
976 break;
977
978 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
979 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
980 cid);
981 fp->state = BNX2X_FP_STATE_HALTED;
982 break;
983
984 default:
34f80b04
EG
985 BNX2X_ERR("unexpected MC reply (%d) "
986 "fp->state is %x\n", command, fp->state);
987 break;
a2fbb9ea 988 }
34f80b04 989 mb(); /* force bnx2x_wait_ramrod() to see the change */
a2fbb9ea
ET
990 return;
991 }
c14423fe 992
a2fbb9ea
ET
993 switch (command | bp->state) {
994 case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
995 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
996 bp->state = BNX2X_STATE_OPEN;
997 break;
998
999 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
1000 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
1001 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
1002 fp->state = BNX2X_FP_STATE_HALTED;
1003 break;
1004
a2fbb9ea 1005 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
34f80b04 1006 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
49d66772 1007 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
a2fbb9ea
ET
1008 break;
1009
3196a88a 1010
a2fbb9ea 1011 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
34f80b04 1012 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
a2fbb9ea 1013 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
bb2a0f7a 1014 bp->set_mac_pending = 0;
a2fbb9ea
ET
1015 break;
1016
49d66772 1017 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
ca00392c 1018 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DISABLED):
34f80b04 1019 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
49d66772
ET
1020 break;
1021
a2fbb9ea 1022 default:
34f80b04 1023 BNX2X_ERR("unexpected MC reply (%d) bp->state is %x\n",
a2fbb9ea 1024 command, bp->state);
34f80b04 1025 break;
a2fbb9ea 1026 }
34f80b04 1027 mb(); /* force bnx2x_wait_ramrod() to see the change */
a2fbb9ea
ET
1028}
1029
7a9b2557
VZ
1030static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
1031 struct bnx2x_fastpath *fp, u16 index)
1032{
1033 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1034 struct page *page = sw_buf->page;
1035 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1036
1037 /* Skip "next page" elements */
1038 if (!page)
1039 return;
1040
1041 pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
4f40f2cb 1042 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
1043 __free_pages(page, PAGES_PER_SGE_SHIFT);
1044
1045 sw_buf->page = NULL;
1046 sge->addr_hi = 0;
1047 sge->addr_lo = 0;
1048}
1049
1050static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
1051 struct bnx2x_fastpath *fp, int last)
1052{
1053 int i;
1054
1055 for (i = 0; i < last; i++)
1056 bnx2x_free_rx_sge(bp, fp, i);
1057}
1058
1059static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
1060 struct bnx2x_fastpath *fp, u16 index)
1061{
1062 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
1063 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1064 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1065 dma_addr_t mapping;
1066
1067 if (unlikely(page == NULL))
1068 return -ENOMEM;
1069
4f40f2cb 1070 mapping = pci_map_page(bp->pdev, page, 0, SGE_PAGE_SIZE*PAGES_PER_SGE,
7a9b2557 1071 PCI_DMA_FROMDEVICE);
8d8bb39b 1072 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
7a9b2557
VZ
1073 __free_pages(page, PAGES_PER_SGE_SHIFT);
1074 return -ENOMEM;
1075 }
1076
1077 sw_buf->page = page;
1078 pci_unmap_addr_set(sw_buf, mapping, mapping);
1079
1080 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1081 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1082
1083 return 0;
1084}
1085
a2fbb9ea
ET
1086static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1087 struct bnx2x_fastpath *fp, u16 index)
1088{
1089 struct sk_buff *skb;
1090 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1091 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1092 dma_addr_t mapping;
1093
1094 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1095 if (unlikely(skb == NULL))
1096 return -ENOMEM;
1097
437cf2f1 1098 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_size,
a2fbb9ea 1099 PCI_DMA_FROMDEVICE);
8d8bb39b 1100 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
a2fbb9ea
ET
1101 dev_kfree_skb(skb);
1102 return -ENOMEM;
1103 }
1104
1105 rx_buf->skb = skb;
1106 pci_unmap_addr_set(rx_buf, mapping, mapping);
1107
1108 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1109 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1110
1111 return 0;
1112}
1113
1114/* note that we are not allocating a new skb,
1115 * we are just moving one from cons to prod
1116 * we are not creating a new mapping,
1117 * so there is no need to check for dma_mapping_error().
1118 */
1119static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1120 struct sk_buff *skb, u16 cons, u16 prod)
1121{
1122 struct bnx2x *bp = fp->bp;
1123 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1124 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1125 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1126 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1127
1128 pci_dma_sync_single_for_device(bp->pdev,
1129 pci_unmap_addr(cons_rx_buf, mapping),
87942b46 1130 RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
a2fbb9ea
ET
1131
1132 prod_rx_buf->skb = cons_rx_buf->skb;
1133 pci_unmap_addr_set(prod_rx_buf, mapping,
1134 pci_unmap_addr(cons_rx_buf, mapping));
1135 *prod_bd = *cons_bd;
1136}
1137
7a9b2557
VZ
1138static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1139 u16 idx)
1140{
1141 u16 last_max = fp->last_max_sge;
1142
1143 if (SUB_S16(idx, last_max) > 0)
1144 fp->last_max_sge = idx;
1145}
1146
1147static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1148{
1149 int i, j;
1150
1151 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1152 int idx = RX_SGE_CNT * i - 1;
1153
1154 for (j = 0; j < 2; j++) {
1155 SGE_MASK_CLEAR_BIT(fp, idx);
1156 idx--;
1157 }
1158 }
1159}
1160
1161static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1162 struct eth_fast_path_rx_cqe *fp_cqe)
1163{
1164 struct bnx2x *bp = fp->bp;
4f40f2cb 1165 u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
7a9b2557 1166 le16_to_cpu(fp_cqe->len_on_bd)) >>
4f40f2cb 1167 SGE_PAGE_SHIFT;
7a9b2557
VZ
1168 u16 last_max, last_elem, first_elem;
1169 u16 delta = 0;
1170 u16 i;
1171
1172 if (!sge_len)
1173 return;
1174
1175 /* First mark all used pages */
1176 for (i = 0; i < sge_len; i++)
1177 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1178
1179 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1180 sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1181
1182 /* Here we assume that the last SGE index is the biggest */
1183 prefetch((void *)(fp->sge_mask));
1184 bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1185
1186 last_max = RX_SGE(fp->last_max_sge);
1187 last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1188 first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1189
1190 /* If ring is not full */
1191 if (last_elem + 1 != first_elem)
1192 last_elem++;
1193
1194 /* Now update the prod */
1195 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1196 if (likely(fp->sge_mask[i]))
1197 break;
1198
1199 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1200 delta += RX_SGE_MASK_ELEM_SZ;
1201 }
1202
1203 if (delta > 0) {
1204 fp->rx_sge_prod += delta;
1205 /* clear page-end entries */
1206 bnx2x_clear_sge_mask_next_elems(fp);
1207 }
1208
1209 DP(NETIF_MSG_RX_STATUS,
1210 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
1211 fp->last_max_sge, fp->rx_sge_prod);
1212}
1213
1214static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1215{
1216 /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1217 memset(fp->sge_mask, 0xff,
1218 (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1219
33471629
EG
1220 /* Clear the two last indices in the page to 1:
1221 these are the indices that correspond to the "next" element,
7a9b2557
VZ
1222 hence will never be indicated and should be removed from
1223 the calculations. */
1224 bnx2x_clear_sge_mask_next_elems(fp);
1225}
1226
1227static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1228 struct sk_buff *skb, u16 cons, u16 prod)
1229{
1230 struct bnx2x *bp = fp->bp;
1231 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1232 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1233 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1234 dma_addr_t mapping;
1235
1236 /* move empty skb from pool to prod and map it */
1237 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1238 mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
437cf2f1 1239 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
1240 pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1241
1242 /* move partial skb from cons to pool (don't unmap yet) */
1243 fp->tpa_pool[queue] = *cons_rx_buf;
1244
1245 /* mark bin state as start - print error if current state != stop */
1246 if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1247 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1248
1249 fp->tpa_state[queue] = BNX2X_TPA_START;
1250
1251 /* point prod_bd to new skb */
1252 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1253 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1254
1255#ifdef BNX2X_STOP_ON_ERROR
1256 fp->tpa_queue_used |= (1 << queue);
1257#ifdef __powerpc64__
1258 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1259#else
1260 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1261#endif
1262 fp->tpa_queue_used);
1263#endif
1264}
1265
1266static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1267 struct sk_buff *skb,
1268 struct eth_fast_path_rx_cqe *fp_cqe,
1269 u16 cqe_idx)
1270{
1271 struct sw_rx_page *rx_pg, old_rx_pg;
7a9b2557
VZ
1272 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1273 u32 i, frag_len, frag_size, pages;
1274 int err;
1275 int j;
1276
1277 frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
4f40f2cb 1278 pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
7a9b2557
VZ
1279
1280 /* This is needed in order to enable forwarding support */
1281 if (frag_size)
4f40f2cb 1282 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
7a9b2557
VZ
1283 max(frag_size, (u32)len_on_bd));
1284
1285#ifdef BNX2X_STOP_ON_ERROR
4f40f2cb
EG
1286 if (pages >
1287 min((u32)8, (u32)MAX_SKB_FRAGS) * SGE_PAGE_SIZE * PAGES_PER_SGE) {
7a9b2557
VZ
1288 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1289 pages, cqe_idx);
1290 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
1291 fp_cqe->pkt_len, len_on_bd);
1292 bnx2x_panic();
1293 return -EINVAL;
1294 }
1295#endif
1296
1297 /* Run through the SGL and compose the fragmented skb */
1298 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1299 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1300
1301 /* FW gives the indices of the SGE as if the ring is an array
1302 (meaning that "next" element will consume 2 indices) */
4f40f2cb 1303 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
7a9b2557 1304 rx_pg = &fp->rx_page_ring[sge_idx];
7a9b2557
VZ
1305 old_rx_pg = *rx_pg;
1306
1307 /* If we fail to allocate a substitute page, we simply stop
1308 where we are and drop the whole packet */
1309 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1310 if (unlikely(err)) {
de832a55 1311 fp->eth_q_stats.rx_skb_alloc_failed++;
7a9b2557
VZ
1312 return err;
1313 }
1314
1315 /* Unmap the page as we r going to pass it to the stack */
1316 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
4f40f2cb 1317 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
1318
1319 /* Add one frag and update the appropriate fields in the skb */
1320 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1321
1322 skb->data_len += frag_len;
1323 skb->truesize += frag_len;
1324 skb->len += frag_len;
1325
1326 frag_size -= frag_len;
1327 }
1328
1329 return 0;
1330}
1331
1332static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1333 u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1334 u16 cqe_idx)
1335{
1336 struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1337 struct sk_buff *skb = rx_buf->skb;
1338 /* alloc new skb */
1339 struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1340
1341 /* Unmap skb in the pool anyway, as we are going to change
1342 pool entry status to BNX2X_TPA_STOP even if new skb allocation
1343 fails. */
1344 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
437cf2f1 1345 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
7a9b2557 1346
7a9b2557 1347 if (likely(new_skb)) {
66e855f3
YG
1348 /* fix ip xsum and give it to the stack */
1349 /* (no need to map the new skb) */
0c6671b0
EG
1350#ifdef BCM_VLAN
1351 int is_vlan_cqe =
1352 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1353 PARSING_FLAGS_VLAN);
1354 int is_not_hwaccel_vlan_cqe =
1355 (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
1356#endif
7a9b2557
VZ
1357
1358 prefetch(skb);
1359 prefetch(((char *)(skb)) + 128);
1360
7a9b2557
VZ
1361#ifdef BNX2X_STOP_ON_ERROR
1362 if (pad + len > bp->rx_buf_size) {
1363 BNX2X_ERR("skb_put is about to fail... "
1364 "pad %d len %d rx_buf_size %d\n",
1365 pad, len, bp->rx_buf_size);
1366 bnx2x_panic();
1367 return;
1368 }
1369#endif
1370
1371 skb_reserve(skb, pad);
1372 skb_put(skb, len);
1373
1374 skb->protocol = eth_type_trans(skb, bp->dev);
1375 skb->ip_summed = CHECKSUM_UNNECESSARY;
1376
1377 {
1378 struct iphdr *iph;
1379
1380 iph = (struct iphdr *)skb->data;
0c6671b0
EG
1381#ifdef BCM_VLAN
1382 /* If there is no Rx VLAN offloading -
1383 take VLAN tag into an account */
1384 if (unlikely(is_not_hwaccel_vlan_cqe))
1385 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
1386#endif
7a9b2557
VZ
1387 iph->check = 0;
1388 iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1389 }
1390
1391 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1392 &cqe->fast_path_cqe, cqe_idx)) {
1393#ifdef BCM_VLAN
0c6671b0
EG
1394 if ((bp->vlgrp != NULL) && is_vlan_cqe &&
1395 (!is_not_hwaccel_vlan_cqe))
7a9b2557
VZ
1396 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1397 le16_to_cpu(cqe->fast_path_cqe.
1398 vlan_tag));
1399 else
1400#endif
1401 netif_receive_skb(skb);
1402 } else {
1403 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1404 " - dropping packet!\n");
1405 dev_kfree_skb(skb);
1406 }
1407
7a9b2557
VZ
1408
1409 /* put new skb in bin */
1410 fp->tpa_pool[queue].skb = new_skb;
1411
1412 } else {
66e855f3 1413 /* else drop the packet and keep the buffer in the bin */
7a9b2557
VZ
1414 DP(NETIF_MSG_RX_STATUS,
1415 "Failed to allocate new skb - dropping packet!\n");
de832a55 1416 fp->eth_q_stats.rx_skb_alloc_failed++;
7a9b2557
VZ
1417 }
1418
1419 fp->tpa_state[queue] = BNX2X_TPA_STOP;
1420}
1421
1422static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1423 struct bnx2x_fastpath *fp,
1424 u16 bd_prod, u16 rx_comp_prod,
1425 u16 rx_sge_prod)
1426{
8d9c5f34 1427 struct ustorm_eth_rx_producers rx_prods = {0};
7a9b2557
VZ
1428 int i;
1429
1430 /* Update producers */
1431 rx_prods.bd_prod = bd_prod;
1432 rx_prods.cqe_prod = rx_comp_prod;
1433 rx_prods.sge_prod = rx_sge_prod;
1434
58f4c4cf
EG
1435 /*
1436 * Make sure that the BD and SGE data is updated before updating the
1437 * producers since FW might read the BD/SGE right after the producer
1438 * is updated.
1439 * This is only applicable for weak-ordered memory model archs such
1440 * as IA-64. The following barrier is also mandatory since FW will
1441 * assumes BDs must have buffers.
1442 */
1443 wmb();
1444
8d9c5f34
EG
1445 for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++)
1446 REG_WR(bp, BAR_USTRORM_INTMEM +
0626b899 1447 USTORM_RX_PRODS_OFFSET(BP_PORT(bp), fp->cl_id) + i*4,
7a9b2557
VZ
1448 ((u32 *)&rx_prods)[i]);
1449
58f4c4cf
EG
1450 mmiowb(); /* keep prod updates ordered */
1451
7a9b2557 1452 DP(NETIF_MSG_RX_STATUS,
555f6c78
EG
1453 "queue[%d]: wrote bd_prod %u cqe_prod %u sge_prod %u\n",
1454 fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
7a9b2557
VZ
1455}
1456
a2fbb9ea
ET
1457static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1458{
1459 struct bnx2x *bp = fp->bp;
34f80b04 1460 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
a2fbb9ea
ET
1461 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1462 int rx_pkt = 0;
1463
1464#ifdef BNX2X_STOP_ON_ERROR
1465 if (unlikely(bp->panic))
1466 return 0;
1467#endif
1468
34f80b04
EG
1469 /* CQ "next element" is of the size of the regular element,
1470 that's why it's ok here */
a2fbb9ea
ET
1471 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1472 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1473 hw_comp_cons++;
1474
1475 bd_cons = fp->rx_bd_cons;
1476 bd_prod = fp->rx_bd_prod;
34f80b04 1477 bd_prod_fw = bd_prod;
a2fbb9ea
ET
1478 sw_comp_cons = fp->rx_comp_cons;
1479 sw_comp_prod = fp->rx_comp_prod;
1480
1481 /* Memory barrier necessary as speculative reads of the rx
1482 * buffer can be ahead of the index in the status block
1483 */
1484 rmb();
1485
1486 DP(NETIF_MSG_RX_STATUS,
1487 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
0626b899 1488 fp->index, hw_comp_cons, sw_comp_cons);
a2fbb9ea
ET
1489
1490 while (sw_comp_cons != hw_comp_cons) {
34f80b04 1491 struct sw_rx_bd *rx_buf = NULL;
a2fbb9ea
ET
1492 struct sk_buff *skb;
1493 union eth_rx_cqe *cqe;
34f80b04
EG
1494 u8 cqe_fp_flags;
1495 u16 len, pad;
a2fbb9ea
ET
1496
1497 comp_ring_cons = RCQ_BD(sw_comp_cons);
1498 bd_prod = RX_BD(bd_prod);
1499 bd_cons = RX_BD(bd_cons);
1500
1501 cqe = &fp->rx_comp_ring[comp_ring_cons];
34f80b04 1502 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
a2fbb9ea 1503
a2fbb9ea 1504 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
34f80b04
EG
1505 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
1506 cqe_fp_flags, cqe->fast_path_cqe.status_flags,
68d59484 1507 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
34f80b04
EG
1508 le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1509 le16_to_cpu(cqe->fast_path_cqe.pkt_len));
a2fbb9ea
ET
1510
1511 /* is this a slowpath msg? */
34f80b04 1512 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
a2fbb9ea
ET
1513 bnx2x_sp_event(fp, cqe);
1514 goto next_cqe;
1515
1516 /* this is an rx packet */
1517 } else {
1518 rx_buf = &fp->rx_buf_ring[bd_cons];
1519 skb = rx_buf->skb;
a2fbb9ea
ET
1520 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1521 pad = cqe->fast_path_cqe.placement_offset;
1522
7a9b2557
VZ
1523 /* If CQE is marked both TPA_START and TPA_END
1524 it is a non-TPA CQE */
1525 if ((!fp->disable_tpa) &&
1526 (TPA_TYPE(cqe_fp_flags) !=
1527 (TPA_TYPE_START | TPA_TYPE_END))) {
3196a88a 1528 u16 queue = cqe->fast_path_cqe.queue_index;
7a9b2557
VZ
1529
1530 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1531 DP(NETIF_MSG_RX_STATUS,
1532 "calling tpa_start on queue %d\n",
1533 queue);
1534
1535 bnx2x_tpa_start(fp, queue, skb,
1536 bd_cons, bd_prod);
1537 goto next_rx;
1538 }
1539
1540 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1541 DP(NETIF_MSG_RX_STATUS,
1542 "calling tpa_stop on queue %d\n",
1543 queue);
1544
1545 if (!BNX2X_RX_SUM_FIX(cqe))
1546 BNX2X_ERR("STOP on none TCP "
1547 "data\n");
1548
1549 /* This is a size of the linear data
1550 on this skb */
1551 len = le16_to_cpu(cqe->fast_path_cqe.
1552 len_on_bd);
1553 bnx2x_tpa_stop(bp, fp, queue, pad,
1554 len, cqe, comp_ring_cons);
1555#ifdef BNX2X_STOP_ON_ERROR
1556 if (bp->panic)
17cb4006 1557 return 0;
7a9b2557
VZ
1558#endif
1559
1560 bnx2x_update_sge_prod(fp,
1561 &cqe->fast_path_cqe);
1562 goto next_cqe;
1563 }
1564 }
1565
a2fbb9ea
ET
1566 pci_dma_sync_single_for_device(bp->pdev,
1567 pci_unmap_addr(rx_buf, mapping),
1568 pad + RX_COPY_THRESH,
1569 PCI_DMA_FROMDEVICE);
1570 prefetch(skb);
1571 prefetch(((char *)(skb)) + 128);
1572
1573 /* is this an error packet? */
34f80b04 1574 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
a2fbb9ea 1575 DP(NETIF_MSG_RX_ERR,
34f80b04
EG
1576 "ERROR flags %x rx packet %u\n",
1577 cqe_fp_flags, sw_comp_cons);
de832a55 1578 fp->eth_q_stats.rx_err_discard_pkt++;
a2fbb9ea
ET
1579 goto reuse_rx;
1580 }
1581
1582 /* Since we don't have a jumbo ring
1583 * copy small packets if mtu > 1500
1584 */
1585 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1586 (len <= RX_COPY_THRESH)) {
1587 struct sk_buff *new_skb;
1588
1589 new_skb = netdev_alloc_skb(bp->dev,
1590 len + pad);
1591 if (new_skb == NULL) {
1592 DP(NETIF_MSG_RX_ERR,
34f80b04 1593 "ERROR packet dropped "
a2fbb9ea 1594 "because of alloc failure\n");
de832a55 1595 fp->eth_q_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
1596 goto reuse_rx;
1597 }
1598
1599 /* aligned copy */
1600 skb_copy_from_linear_data_offset(skb, pad,
1601 new_skb->data + pad, len);
1602 skb_reserve(new_skb, pad);
1603 skb_put(new_skb, len);
1604
1605 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1606
1607 skb = new_skb;
1608
1609 } else if (bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0) {
1610 pci_unmap_single(bp->pdev,
1611 pci_unmap_addr(rx_buf, mapping),
437cf2f1 1612 bp->rx_buf_size,
a2fbb9ea
ET
1613 PCI_DMA_FROMDEVICE);
1614 skb_reserve(skb, pad);
1615 skb_put(skb, len);
1616
1617 } else {
1618 DP(NETIF_MSG_RX_ERR,
34f80b04 1619 "ERROR packet dropped because "
a2fbb9ea 1620 "of alloc failure\n");
de832a55 1621 fp->eth_q_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
1622reuse_rx:
1623 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1624 goto next_rx;
1625 }
1626
1627 skb->protocol = eth_type_trans(skb, bp->dev);
1628
1629 skb->ip_summed = CHECKSUM_NONE;
66e855f3 1630 if (bp->rx_csum) {
1adcd8be
EG
1631 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1632 skb->ip_summed = CHECKSUM_UNNECESSARY;
66e855f3 1633 else
de832a55 1634 fp->eth_q_stats.hw_csum_err++;
66e855f3 1635 }
a2fbb9ea
ET
1636 }
1637
748e5439 1638 skb_record_rx_queue(skb, fp->index);
a2fbb9ea 1639#ifdef BCM_VLAN
0c6671b0 1640 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
34f80b04
EG
1641 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1642 PARSING_FLAGS_VLAN))
a2fbb9ea
ET
1643 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1644 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1645 else
1646#endif
34f80b04 1647 netif_receive_skb(skb);
a2fbb9ea 1648
a2fbb9ea
ET
1649
1650next_rx:
1651 rx_buf->skb = NULL;
1652
1653 bd_cons = NEXT_RX_IDX(bd_cons);
1654 bd_prod = NEXT_RX_IDX(bd_prod);
34f80b04
EG
1655 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1656 rx_pkt++;
a2fbb9ea
ET
1657next_cqe:
1658 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1659 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
a2fbb9ea 1660
34f80b04 1661 if (rx_pkt == budget)
a2fbb9ea
ET
1662 break;
1663 } /* while */
1664
1665 fp->rx_bd_cons = bd_cons;
34f80b04 1666 fp->rx_bd_prod = bd_prod_fw;
a2fbb9ea
ET
1667 fp->rx_comp_cons = sw_comp_cons;
1668 fp->rx_comp_prod = sw_comp_prod;
1669
7a9b2557
VZ
1670 /* Update producers */
1671 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1672 fp->rx_sge_prod);
a2fbb9ea
ET
1673
1674 fp->rx_pkt += rx_pkt;
1675 fp->rx_calls++;
1676
1677 return rx_pkt;
1678}
1679
1680static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1681{
1682 struct bnx2x_fastpath *fp = fp_cookie;
1683 struct bnx2x *bp = fp->bp;
a2fbb9ea 1684
da5a662a
VZ
1685 /* Return here if interrupt is disabled */
1686 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1687 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1688 return IRQ_HANDLED;
1689 }
1690
34f80b04 1691 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
ca00392c 1692 fp->index, fp->sb_id);
0626b899 1693 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
a2fbb9ea
ET
1694
1695#ifdef BNX2X_STOP_ON_ERROR
1696 if (unlikely(bp->panic))
1697 return IRQ_HANDLED;
1698#endif
ca00392c
EG
1699 /* Handle Rx or Tx according to MSI-X vector */
1700 if (fp->is_rx_queue) {
1701 prefetch(fp->rx_cons_sb);
1702 prefetch(&fp->status_blk->u_status_block.status_block_index);
a2fbb9ea 1703
ca00392c 1704 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
a2fbb9ea 1705
ca00392c
EG
1706 } else {
1707 prefetch(fp->tx_cons_sb);
1708 prefetch(&fp->status_blk->c_status_block.status_block_index);
1709
1710 bnx2x_update_fpsb_idx(fp);
1711 rmb();
1712 bnx2x_tx_int(fp);
1713
1714 /* Re-enable interrupts */
1715 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
1716 le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
1717 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
1718 le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
1719 }
34f80b04 1720
a2fbb9ea
ET
1721 return IRQ_HANDLED;
1722}
1723
1724static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1725{
555f6c78 1726 struct bnx2x *bp = netdev_priv(dev_instance);
a2fbb9ea 1727 u16 status = bnx2x_ack_int(bp);
34f80b04 1728 u16 mask;
ca00392c 1729 int i;
a2fbb9ea 1730
34f80b04 1731 /* Return here if interrupt is shared and it's not for us */
a2fbb9ea
ET
1732 if (unlikely(status == 0)) {
1733 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1734 return IRQ_NONE;
1735 }
f5372251 1736 DP(NETIF_MSG_INTR, "got an interrupt status 0x%x\n", status);
a2fbb9ea 1737
34f80b04 1738 /* Return here if interrupt is disabled */
a2fbb9ea
ET
1739 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1740 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1741 return IRQ_HANDLED;
1742 }
1743
3196a88a
EG
1744#ifdef BNX2X_STOP_ON_ERROR
1745 if (unlikely(bp->panic))
1746 return IRQ_HANDLED;
1747#endif
1748
ca00392c
EG
1749 for (i = 0; i < BNX2X_NUM_QUEUES(bp); i++) {
1750 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea 1751
ca00392c
EG
1752 mask = 0x2 << fp->sb_id;
1753 if (status & mask) {
1754 /* Handle Rx or Tx according to SB id */
1755 if (fp->is_rx_queue) {
1756 prefetch(fp->rx_cons_sb);
1757 prefetch(&fp->status_blk->u_status_block.
1758 status_block_index);
a2fbb9ea 1759
ca00392c 1760 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
a2fbb9ea 1761
ca00392c
EG
1762 } else {
1763 prefetch(fp->tx_cons_sb);
1764 prefetch(&fp->status_blk->c_status_block.
1765 status_block_index);
1766
1767 bnx2x_update_fpsb_idx(fp);
1768 rmb();
1769 bnx2x_tx_int(fp);
1770
1771 /* Re-enable interrupts */
1772 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
1773 le16_to_cpu(fp->fp_u_idx),
1774 IGU_INT_NOP, 1);
1775 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
1776 le16_to_cpu(fp->fp_c_idx),
1777 IGU_INT_ENABLE, 1);
1778 }
1779 status &= ~mask;
1780 }
a2fbb9ea
ET
1781 }
1782
a2fbb9ea 1783
34f80b04 1784 if (unlikely(status & 0x1)) {
1cf167f2 1785 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
a2fbb9ea
ET
1786
1787 status &= ~0x1;
1788 if (!status)
1789 return IRQ_HANDLED;
1790 }
1791
34f80b04
EG
1792 if (status)
1793 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1794 status);
a2fbb9ea 1795
c18487ee 1796 return IRQ_HANDLED;
a2fbb9ea
ET
1797}
1798
c18487ee 1799/* end of fast path */
a2fbb9ea 1800
bb2a0f7a 1801static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
a2fbb9ea 1802
c18487ee
YR
1803/* Link */
1804
1805/*
1806 * General service functions
1807 */
a2fbb9ea 1808
4a37fb66 1809static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
c18487ee
YR
1810{
1811 u32 lock_status;
1812 u32 resource_bit = (1 << resource);
4a37fb66
YG
1813 int func = BP_FUNC(bp);
1814 u32 hw_lock_control_reg;
c18487ee 1815 int cnt;
a2fbb9ea 1816
c18487ee
YR
1817 /* Validating that the resource is within range */
1818 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1819 DP(NETIF_MSG_HW,
1820 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1821 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1822 return -EINVAL;
1823 }
a2fbb9ea 1824
4a37fb66
YG
1825 if (func <= 5) {
1826 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1827 } else {
1828 hw_lock_control_reg =
1829 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1830 }
1831
c18487ee 1832 /* Validating that the resource is not already taken */
4a37fb66 1833 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1834 if (lock_status & resource_bit) {
1835 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1836 lock_status, resource_bit);
1837 return -EEXIST;
1838 }
a2fbb9ea 1839
46230476
EG
1840 /* Try for 5 second every 5ms */
1841 for (cnt = 0; cnt < 1000; cnt++) {
c18487ee 1842 /* Try to acquire the lock */
4a37fb66
YG
1843 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1844 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1845 if (lock_status & resource_bit)
1846 return 0;
a2fbb9ea 1847
c18487ee 1848 msleep(5);
a2fbb9ea 1849 }
c18487ee
YR
1850 DP(NETIF_MSG_HW, "Timeout\n");
1851 return -EAGAIN;
1852}
a2fbb9ea 1853
4a37fb66 1854static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
c18487ee
YR
1855{
1856 u32 lock_status;
1857 u32 resource_bit = (1 << resource);
4a37fb66
YG
1858 int func = BP_FUNC(bp);
1859 u32 hw_lock_control_reg;
a2fbb9ea 1860
c18487ee
YR
1861 /* Validating that the resource is within range */
1862 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1863 DP(NETIF_MSG_HW,
1864 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1865 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1866 return -EINVAL;
1867 }
1868
4a37fb66
YG
1869 if (func <= 5) {
1870 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1871 } else {
1872 hw_lock_control_reg =
1873 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1874 }
1875
c18487ee 1876 /* Validating that the resource is currently taken */
4a37fb66 1877 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1878 if (!(lock_status & resource_bit)) {
1879 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1880 lock_status, resource_bit);
1881 return -EFAULT;
a2fbb9ea
ET
1882 }
1883
4a37fb66 1884 REG_WR(bp, hw_lock_control_reg, resource_bit);
c18487ee
YR
1885 return 0;
1886}
1887
1888/* HW Lock for shared dual port PHYs */
4a37fb66 1889static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
c18487ee 1890{
34f80b04 1891 mutex_lock(&bp->port.phy_mutex);
a2fbb9ea 1892
46c6a674
EG
1893 if (bp->port.need_hw_lock)
1894 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
c18487ee 1895}
a2fbb9ea 1896
4a37fb66 1897static void bnx2x_release_phy_lock(struct bnx2x *bp)
c18487ee 1898{
46c6a674
EG
1899 if (bp->port.need_hw_lock)
1900 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
a2fbb9ea 1901
34f80b04 1902 mutex_unlock(&bp->port.phy_mutex);
c18487ee 1903}
a2fbb9ea 1904
4acac6a5
EG
1905int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1906{
1907 /* The GPIO should be swapped if swap register is set and active */
1908 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1909 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1910 int gpio_shift = gpio_num +
1911 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1912 u32 gpio_mask = (1 << gpio_shift);
1913 u32 gpio_reg;
1914 int value;
1915
1916 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1917 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1918 return -EINVAL;
1919 }
1920
1921 /* read GPIO value */
1922 gpio_reg = REG_RD(bp, MISC_REG_GPIO);
1923
1924 /* get the requested pin value */
1925 if ((gpio_reg & gpio_mask) == gpio_mask)
1926 value = 1;
1927 else
1928 value = 0;
1929
1930 DP(NETIF_MSG_LINK, "pin %d value 0x%x\n", gpio_num, value);
1931
1932 return value;
1933}
1934
17de50b7 1935int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
c18487ee
YR
1936{
1937 /* The GPIO should be swapped if swap register is set and active */
1938 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
17de50b7 1939 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
c18487ee
YR
1940 int gpio_shift = gpio_num +
1941 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1942 u32 gpio_mask = (1 << gpio_shift);
1943 u32 gpio_reg;
a2fbb9ea 1944
c18487ee
YR
1945 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1946 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1947 return -EINVAL;
1948 }
a2fbb9ea 1949
4a37fb66 1950 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
c18487ee
YR
1951 /* read GPIO and mask except the float bits */
1952 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
a2fbb9ea 1953
c18487ee
YR
1954 switch (mode) {
1955 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1956 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1957 gpio_num, gpio_shift);
1958 /* clear FLOAT and set CLR */
1959 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1960 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1961 break;
a2fbb9ea 1962
c18487ee
YR
1963 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1964 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1965 gpio_num, gpio_shift);
1966 /* clear FLOAT and set SET */
1967 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1968 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1969 break;
a2fbb9ea 1970
17de50b7 1971 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
c18487ee
YR
1972 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1973 gpio_num, gpio_shift);
1974 /* set FLOAT */
1975 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1976 break;
a2fbb9ea 1977
c18487ee
YR
1978 default:
1979 break;
a2fbb9ea
ET
1980 }
1981
c18487ee 1982 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
4a37fb66 1983 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
f1410647 1984
c18487ee 1985 return 0;
a2fbb9ea
ET
1986}
1987
4acac6a5
EG
1988int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1989{
1990 /* The GPIO should be swapped if swap register is set and active */
1991 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1992 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1993 int gpio_shift = gpio_num +
1994 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1995 u32 gpio_mask = (1 << gpio_shift);
1996 u32 gpio_reg;
1997
1998 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1999 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2000 return -EINVAL;
2001 }
2002
2003 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2004 /* read GPIO int */
2005 gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
2006
2007 switch (mode) {
2008 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
2009 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
2010 "output low\n", gpio_num, gpio_shift);
2011 /* clear SET and set CLR */
2012 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2013 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2014 break;
2015
2016 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
2017 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
2018 "output high\n", gpio_num, gpio_shift);
2019 /* clear CLR and set SET */
2020 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2021 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2022 break;
2023
2024 default:
2025 break;
2026 }
2027
2028 REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
2029 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2030
2031 return 0;
2032}
2033
c18487ee 2034static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
a2fbb9ea 2035{
c18487ee
YR
2036 u32 spio_mask = (1 << spio_num);
2037 u32 spio_reg;
a2fbb9ea 2038
c18487ee
YR
2039 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
2040 (spio_num > MISC_REGISTERS_SPIO_7)) {
2041 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
2042 return -EINVAL;
a2fbb9ea
ET
2043 }
2044
4a37fb66 2045 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
c18487ee
YR
2046 /* read SPIO and mask except the float bits */
2047 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
a2fbb9ea 2048
c18487ee 2049 switch (mode) {
6378c025 2050 case MISC_REGISTERS_SPIO_OUTPUT_LOW:
c18487ee
YR
2051 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
2052 /* clear FLOAT and set CLR */
2053 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2054 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
2055 break;
a2fbb9ea 2056
6378c025 2057 case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
c18487ee
YR
2058 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
2059 /* clear FLOAT and set SET */
2060 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2061 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
2062 break;
a2fbb9ea 2063
c18487ee
YR
2064 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
2065 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
2066 /* set FLOAT */
2067 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2068 break;
a2fbb9ea 2069
c18487ee
YR
2070 default:
2071 break;
a2fbb9ea
ET
2072 }
2073
c18487ee 2074 REG_WR(bp, MISC_REG_SPIO, spio_reg);
4a37fb66 2075 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
c18487ee 2076
a2fbb9ea
ET
2077 return 0;
2078}
2079
c18487ee 2080static void bnx2x_calc_fc_adv(struct bnx2x *bp)
a2fbb9ea 2081{
ad33ea3a
EG
2082 switch (bp->link_vars.ieee_fc &
2083 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
c18487ee 2084 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
34f80b04 2085 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
c18487ee
YR
2086 ADVERTISED_Pause);
2087 break;
356e2385 2088
c18487ee 2089 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
34f80b04 2090 bp->port.advertising |= (ADVERTISED_Asym_Pause |
c18487ee
YR
2091 ADVERTISED_Pause);
2092 break;
356e2385 2093
c18487ee 2094 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
34f80b04 2095 bp->port.advertising |= ADVERTISED_Asym_Pause;
c18487ee 2096 break;
356e2385 2097
c18487ee 2098 default:
34f80b04 2099 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
c18487ee
YR
2100 ADVERTISED_Pause);
2101 break;
2102 }
2103}
f1410647 2104
c18487ee
YR
2105static void bnx2x_link_report(struct bnx2x *bp)
2106{
2691d51d
EG
2107 if (bp->state == BNX2X_STATE_DISABLED) {
2108 netif_carrier_off(bp->dev);
2109 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
2110 return;
2111 }
2112
c18487ee
YR
2113 if (bp->link_vars.link_up) {
2114 if (bp->state == BNX2X_STATE_OPEN)
2115 netif_carrier_on(bp->dev);
2116 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
f1410647 2117
c18487ee 2118 printk("%d Mbps ", bp->link_vars.line_speed);
f1410647 2119
c18487ee
YR
2120 if (bp->link_vars.duplex == DUPLEX_FULL)
2121 printk("full duplex");
2122 else
2123 printk("half duplex");
f1410647 2124
c0700f90
DM
2125 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
2126 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
c18487ee 2127 printk(", receive ");
356e2385
EG
2128 if (bp->link_vars.flow_ctrl &
2129 BNX2X_FLOW_CTRL_TX)
c18487ee
YR
2130 printk("& transmit ");
2131 } else {
2132 printk(", transmit ");
2133 }
2134 printk("flow control ON");
2135 }
2136 printk("\n");
f1410647 2137
c18487ee
YR
2138 } else { /* link_down */
2139 netif_carrier_off(bp->dev);
2140 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
f1410647 2141 }
c18487ee
YR
2142}
2143
b5bf9068 2144static u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
c18487ee 2145{
19680c48
EG
2146 if (!BP_NOMCP(bp)) {
2147 u8 rc;
a2fbb9ea 2148
19680c48 2149 /* Initialize link parameters structure variables */
8c99e7b0
YR
2150 /* It is recommended to turn off RX FC for jumbo frames
2151 for better performance */
2152 if (IS_E1HMF(bp))
c0700f90 2153 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
8c99e7b0 2154 else if (bp->dev->mtu > 5000)
c0700f90 2155 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
8c99e7b0 2156 else
c0700f90 2157 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
a2fbb9ea 2158
4a37fb66 2159 bnx2x_acquire_phy_lock(bp);
b5bf9068
EG
2160
2161 if (load_mode == LOAD_DIAG)
2162 bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
2163
19680c48 2164 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
b5bf9068 2165
4a37fb66 2166 bnx2x_release_phy_lock(bp);
a2fbb9ea 2167
3c96c68b
EG
2168 bnx2x_calc_fc_adv(bp);
2169
b5bf9068
EG
2170 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
2171 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
19680c48 2172 bnx2x_link_report(bp);
b5bf9068 2173 }
34f80b04 2174
19680c48
EG
2175 return rc;
2176 }
f5372251 2177 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
19680c48 2178 return -EINVAL;
a2fbb9ea
ET
2179}
2180
c18487ee 2181static void bnx2x_link_set(struct bnx2x *bp)
a2fbb9ea 2182{
19680c48 2183 if (!BP_NOMCP(bp)) {
4a37fb66 2184 bnx2x_acquire_phy_lock(bp);
19680c48 2185 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
4a37fb66 2186 bnx2x_release_phy_lock(bp);
a2fbb9ea 2187
19680c48
EG
2188 bnx2x_calc_fc_adv(bp);
2189 } else
f5372251 2190 BNX2X_ERR("Bootcode is missing - can not set link\n");
c18487ee 2191}
a2fbb9ea 2192
c18487ee
YR
2193static void bnx2x__link_reset(struct bnx2x *bp)
2194{
19680c48 2195 if (!BP_NOMCP(bp)) {
4a37fb66 2196 bnx2x_acquire_phy_lock(bp);
589abe3a 2197 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
4a37fb66 2198 bnx2x_release_phy_lock(bp);
19680c48 2199 } else
f5372251 2200 BNX2X_ERR("Bootcode is missing - can not reset link\n");
c18487ee 2201}
a2fbb9ea 2202
c18487ee
YR
2203static u8 bnx2x_link_test(struct bnx2x *bp)
2204{
2205 u8 rc;
a2fbb9ea 2206
4a37fb66 2207 bnx2x_acquire_phy_lock(bp);
c18487ee 2208 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
4a37fb66 2209 bnx2x_release_phy_lock(bp);
a2fbb9ea 2210
c18487ee
YR
2211 return rc;
2212}
a2fbb9ea 2213
8a1c38d1 2214static void bnx2x_init_port_minmax(struct bnx2x *bp)
34f80b04 2215{
8a1c38d1
EG
2216 u32 r_param = bp->link_vars.line_speed / 8;
2217 u32 fair_periodic_timeout_usec;
2218 u32 t_fair;
34f80b04 2219
8a1c38d1
EG
2220 memset(&(bp->cmng.rs_vars), 0,
2221 sizeof(struct rate_shaping_vars_per_port));
2222 memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
34f80b04 2223
8a1c38d1
EG
2224 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2225 bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
34f80b04 2226
8a1c38d1
EG
2227 /* this is the threshold below which no timer arming will occur
2228 1.25 coefficient is for the threshold to be a little bigger
2229 than the real time, to compensate for timer in-accuracy */
2230 bp->cmng.rs_vars.rs_threshold =
34f80b04
EG
2231 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2232
8a1c38d1
EG
2233 /* resolution of fairness timer */
2234 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2235 /* for 10G it is 1000usec. for 1G it is 10000usec. */
2236 t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
34f80b04 2237
8a1c38d1
EG
2238 /* this is the threshold below which we won't arm the timer anymore */
2239 bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
34f80b04 2240
8a1c38d1
EG
2241 /* we multiply by 1e3/8 to get bytes/msec.
2242 We don't want the credits to pass a credit
2243 of the t_fair*FAIR_MEM (algorithm resolution) */
2244 bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
2245 /* since each tick is 4 usec */
2246 bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
34f80b04
EG
2247}
2248
2691d51d
EG
2249/* Calculates the sum of vn_min_rates.
2250 It's needed for further normalizing of the min_rates.
2251 Returns:
2252 sum of vn_min_rates.
2253 or
2254 0 - if all the min_rates are 0.
2255 In the later case fainess algorithm should be deactivated.
2256 If not all min_rates are zero then those that are zeroes will be set to 1.
2257 */
2258static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
2259{
2260 int all_zero = 1;
2261 int port = BP_PORT(bp);
2262 int vn;
2263
2264 bp->vn_weight_sum = 0;
2265 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2266 int func = 2*vn + port;
2267 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2268 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2269 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2270
2271 /* Skip hidden vns */
2272 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
2273 continue;
2274
2275 /* If min rate is zero - set it to 1 */
2276 if (!vn_min_rate)
2277 vn_min_rate = DEF_MIN_RATE;
2278 else
2279 all_zero = 0;
2280
2281 bp->vn_weight_sum += vn_min_rate;
2282 }
2283
2284 /* ... only if all min rates are zeros - disable fairness */
2285 if (all_zero)
2286 bp->vn_weight_sum = 0;
2287}
2288
8a1c38d1 2289static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
34f80b04
EG
2290{
2291 struct rate_shaping_vars_per_vn m_rs_vn;
2292 struct fairness_vars_per_vn m_fair_vn;
2293 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2294 u16 vn_min_rate, vn_max_rate;
2295 int i;
2296
2297 /* If function is hidden - set min and max to zeroes */
2298 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2299 vn_min_rate = 0;
2300 vn_max_rate = 0;
2301
2302 } else {
2303 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2304 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
8a1c38d1 2305 /* If fairness is enabled (not all min rates are zeroes) and
34f80b04 2306 if current min rate is zero - set it to 1.
33471629 2307 This is a requirement of the algorithm. */
8a1c38d1 2308 if (bp->vn_weight_sum && (vn_min_rate == 0))
34f80b04
EG
2309 vn_min_rate = DEF_MIN_RATE;
2310 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2311 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2312 }
2313
8a1c38d1
EG
2314 DP(NETIF_MSG_IFUP,
2315 "func %d: vn_min_rate=%d vn_max_rate=%d vn_weight_sum=%d\n",
2316 func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
34f80b04
EG
2317
2318 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2319 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2320
2321 /* global vn counter - maximal Mbps for this vn */
2322 m_rs_vn.vn_counter.rate = vn_max_rate;
2323
2324 /* quota - number of bytes transmitted in this period */
2325 m_rs_vn.vn_counter.quota =
2326 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2327
8a1c38d1 2328 if (bp->vn_weight_sum) {
34f80b04
EG
2329 /* credit for each period of the fairness algorithm:
2330 number of bytes in T_FAIR (the vn share the port rate).
8a1c38d1
EG
2331 vn_weight_sum should not be larger than 10000, thus
2332 T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
2333 than zero */
34f80b04 2334 m_fair_vn.vn_credit_delta =
8a1c38d1
EG
2335 max((u32)(vn_min_rate * (T_FAIR_COEF /
2336 (8 * bp->vn_weight_sum))),
2337 (u32)(bp->cmng.fair_vars.fair_threshold * 2));
34f80b04
EG
2338 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2339 m_fair_vn.vn_credit_delta);
2340 }
2341
34f80b04
EG
2342 /* Store it to internal memory */
2343 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2344 REG_WR(bp, BAR_XSTRORM_INTMEM +
2345 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2346 ((u32 *)(&m_rs_vn))[i]);
2347
2348 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2349 REG_WR(bp, BAR_XSTRORM_INTMEM +
2350 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2351 ((u32 *)(&m_fair_vn))[i]);
2352}
2353
8a1c38d1 2354
c18487ee
YR
2355/* This function is called upon link interrupt */
2356static void bnx2x_link_attn(struct bnx2x *bp)
2357{
bb2a0f7a
YG
2358 /* Make sure that we are synced with the current statistics */
2359 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2360
c18487ee 2361 bnx2x_link_update(&bp->link_params, &bp->link_vars);
a2fbb9ea 2362
bb2a0f7a
YG
2363 if (bp->link_vars.link_up) {
2364
1c06328c
EG
2365 /* dropless flow control */
2366 if (CHIP_IS_E1H(bp)) {
2367 int port = BP_PORT(bp);
2368 u32 pause_enabled = 0;
2369
2370 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2371 pause_enabled = 1;
2372
2373 REG_WR(bp, BAR_USTRORM_INTMEM +
ca00392c 2374 USTORM_ETH_PAUSE_ENABLED_OFFSET(port),
1c06328c
EG
2375 pause_enabled);
2376 }
2377
bb2a0f7a
YG
2378 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2379 struct host_port_stats *pstats;
2380
2381 pstats = bnx2x_sp(bp, port_stats);
2382 /* reset old bmac stats */
2383 memset(&(pstats->mac_stx[0]), 0,
2384 sizeof(struct mac_stx));
2385 }
2386 if ((bp->state == BNX2X_STATE_OPEN) ||
2387 (bp->state == BNX2X_STATE_DISABLED))
2388 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2389 }
2390
c18487ee
YR
2391 /* indicate link status */
2392 bnx2x_link_report(bp);
34f80b04
EG
2393
2394 if (IS_E1HMF(bp)) {
8a1c38d1 2395 int port = BP_PORT(bp);
34f80b04 2396 int func;
8a1c38d1 2397 int vn;
34f80b04
EG
2398
2399 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2400 if (vn == BP_E1HVN(bp))
2401 continue;
2402
8a1c38d1 2403 func = ((vn << 1) | port);
34f80b04
EG
2404
2405 /* Set the attention towards other drivers
2406 on the same port */
2407 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2408 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2409 }
34f80b04 2410
8a1c38d1
EG
2411 if (bp->link_vars.link_up) {
2412 int i;
2413
2414 /* Init rate shaping and fairness contexts */
2415 bnx2x_init_port_minmax(bp);
34f80b04 2416
34f80b04 2417 for (vn = VN_0; vn < E1HVN_MAX; vn++)
8a1c38d1
EG
2418 bnx2x_init_vn_minmax(bp, 2*vn + port);
2419
2420 /* Store it to internal memory */
2421 for (i = 0;
2422 i < sizeof(struct cmng_struct_per_port) / 4; i++)
2423 REG_WR(bp, BAR_XSTRORM_INTMEM +
2424 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2425 ((u32 *)(&bp->cmng))[i]);
2426 }
34f80b04 2427 }
c18487ee 2428}
a2fbb9ea 2429
c18487ee
YR
2430static void bnx2x__link_status_update(struct bnx2x *bp)
2431{
2691d51d
EG
2432 int func = BP_FUNC(bp);
2433
c18487ee
YR
2434 if (bp->state != BNX2X_STATE_OPEN)
2435 return;
a2fbb9ea 2436
c18487ee 2437 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
a2fbb9ea 2438
bb2a0f7a
YG
2439 if (bp->link_vars.link_up)
2440 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2441 else
2442 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2443
2691d51d
EG
2444 bp->mf_config = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2445 bnx2x_calc_vn_weight_sum(bp);
2446
c18487ee
YR
2447 /* indicate link status */
2448 bnx2x_link_report(bp);
a2fbb9ea 2449}
a2fbb9ea 2450
34f80b04
EG
2451static void bnx2x_pmf_update(struct bnx2x *bp)
2452{
2453 int port = BP_PORT(bp);
2454 u32 val;
2455
2456 bp->port.pmf = 1;
2457 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2458
2459 /* enable nig attention */
2460 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2461 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2462 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
bb2a0f7a
YG
2463
2464 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
34f80b04
EG
2465}
2466
c18487ee 2467/* end of Link */
a2fbb9ea
ET
2468
2469/* slow path */
2470
2471/*
2472 * General service functions
2473 */
2474
2691d51d
EG
2475/* send the MCP a request, block until there is a reply */
2476u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
2477{
2478 int func = BP_FUNC(bp);
2479 u32 seq = ++bp->fw_seq;
2480 u32 rc = 0;
2481 u32 cnt = 1;
2482 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
2483
2484 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
2485 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
2486
2487 do {
2488 /* let the FW do it's magic ... */
2489 msleep(delay);
2490
2491 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
2492
2493 /* Give the FW up to 2 second (200*10ms) */
2494 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 200));
2495
2496 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
2497 cnt*delay, rc, seq);
2498
2499 /* is this a reply to our command? */
2500 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK))
2501 rc &= FW_MSG_CODE_MASK;
2502 else {
2503 /* FW BUG! */
2504 BNX2X_ERR("FW failed to respond!\n");
2505 bnx2x_fw_dump(bp);
2506 rc = 0;
2507 }
2508
2509 return rc;
2510}
2511
2512static void bnx2x_set_storm_rx_mode(struct bnx2x *bp);
2513static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp, int set);
2514static void bnx2x_set_rx_mode(struct net_device *dev);
2515
2516static void bnx2x_e1h_disable(struct bnx2x *bp)
2517{
2518 int port = BP_PORT(bp);
2519 int i;
2520
2521 bp->rx_mode = BNX2X_RX_MODE_NONE;
2522 bnx2x_set_storm_rx_mode(bp);
2523
2524 netif_tx_disable(bp->dev);
2525 bp->dev->trans_start = jiffies; /* prevent tx timeout */
2526
2527 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
2528
2529 bnx2x_set_mac_addr_e1h(bp, 0);
2530
2531 for (i = 0; i < MC_HASH_SIZE; i++)
2532 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
2533
2534 netif_carrier_off(bp->dev);
2535}
2536
2537static void bnx2x_e1h_enable(struct bnx2x *bp)
2538{
2539 int port = BP_PORT(bp);
2540
2541 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
2542
2543 bnx2x_set_mac_addr_e1h(bp, 1);
2544
2545 /* Tx queue should be only reenabled */
2546 netif_tx_wake_all_queues(bp->dev);
2547
2548 /* Initialize the receive filter. */
2549 bnx2x_set_rx_mode(bp->dev);
2550}
2551
2552static void bnx2x_update_min_max(struct bnx2x *bp)
2553{
2554 int port = BP_PORT(bp);
2555 int vn, i;
2556
2557 /* Init rate shaping and fairness contexts */
2558 bnx2x_init_port_minmax(bp);
2559
2560 bnx2x_calc_vn_weight_sum(bp);
2561
2562 for (vn = VN_0; vn < E1HVN_MAX; vn++)
2563 bnx2x_init_vn_minmax(bp, 2*vn + port);
2564
2565 if (bp->port.pmf) {
2566 int func;
2567
2568 /* Set the attention towards other drivers on the same port */
2569 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2570 if (vn == BP_E1HVN(bp))
2571 continue;
2572
2573 func = ((vn << 1) | port);
2574 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2575 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2576 }
2577
2578 /* Store it to internal memory */
2579 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
2580 REG_WR(bp, BAR_XSTRORM_INTMEM +
2581 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2582 ((u32 *)(&bp->cmng))[i]);
2583 }
2584}
2585
2586static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
2587{
2588 int func = BP_FUNC(bp);
2589
2590 DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);
2591 bp->mf_config = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2592
2593 if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
2594
2595 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
2596 DP(NETIF_MSG_IFDOWN, "mf_cfg function disabled\n");
2597 bp->state = BNX2X_STATE_DISABLED;
2598
2599 bnx2x_e1h_disable(bp);
2600 } else {
2601 DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
2602 bp->state = BNX2X_STATE_OPEN;
2603
2604 bnx2x_e1h_enable(bp);
2605 }
2606 dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
2607 }
2608 if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
2609
2610 bnx2x_update_min_max(bp);
2611 dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
2612 }
2613
2614 /* Report results to MCP */
2615 if (dcc_event)
2616 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE);
2617 else
2618 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK);
2619}
2620
a2fbb9ea
ET
2621/* the slow path queue is odd since completions arrive on the fastpath ring */
2622static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2623 u32 data_hi, u32 data_lo, int common)
2624{
34f80b04 2625 int func = BP_FUNC(bp);
a2fbb9ea 2626
34f80b04
EG
2627 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2628 "SPQE (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
a2fbb9ea
ET
2629 (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2630 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2631 HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2632
2633#ifdef BNX2X_STOP_ON_ERROR
2634 if (unlikely(bp->panic))
2635 return -EIO;
2636#endif
2637
34f80b04 2638 spin_lock_bh(&bp->spq_lock);
a2fbb9ea
ET
2639
2640 if (!bp->spq_left) {
2641 BNX2X_ERR("BUG! SPQ ring full!\n");
34f80b04 2642 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
2643 bnx2x_panic();
2644 return -EBUSY;
2645 }
f1410647 2646
a2fbb9ea
ET
2647 /* CID needs port number to be encoded int it */
2648 bp->spq_prod_bd->hdr.conn_and_cmd_data =
2649 cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2650 HW_CID(bp, cid)));
2651 bp->spq_prod_bd->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2652 if (common)
2653 bp->spq_prod_bd->hdr.type |=
2654 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2655
2656 bp->spq_prod_bd->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2657 bp->spq_prod_bd->data.mac_config_addr.lo = cpu_to_le32(data_lo);
2658
2659 bp->spq_left--;
2660
2661 if (bp->spq_prod_bd == bp->spq_last_bd) {
2662 bp->spq_prod_bd = bp->spq;
2663 bp->spq_prod_idx = 0;
2664 DP(NETIF_MSG_TIMER, "end of spq\n");
2665
2666 } else {
2667 bp->spq_prod_bd++;
2668 bp->spq_prod_idx++;
2669 }
2670
37dbbf32
EG
2671 /* Make sure that BD data is updated before writing the producer */
2672 wmb();
2673
34f80b04 2674 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
a2fbb9ea
ET
2675 bp->spq_prod_idx);
2676
37dbbf32
EG
2677 mmiowb();
2678
34f80b04 2679 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
2680 return 0;
2681}
2682
2683/* acquire split MCP access lock register */
4a37fb66 2684static int bnx2x_acquire_alr(struct bnx2x *bp)
a2fbb9ea 2685{
a2fbb9ea 2686 u32 i, j, val;
34f80b04 2687 int rc = 0;
a2fbb9ea
ET
2688
2689 might_sleep();
2690 i = 100;
2691 for (j = 0; j < i*10; j++) {
2692 val = (1UL << 31);
2693 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2694 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2695 if (val & (1L << 31))
2696 break;
2697
2698 msleep(5);
2699 }
a2fbb9ea 2700 if (!(val & (1L << 31))) {
19680c48 2701 BNX2X_ERR("Cannot acquire MCP access lock register\n");
a2fbb9ea
ET
2702 rc = -EBUSY;
2703 }
2704
2705 return rc;
2706}
2707
4a37fb66
YG
2708/* release split MCP access lock register */
2709static void bnx2x_release_alr(struct bnx2x *bp)
a2fbb9ea
ET
2710{
2711 u32 val = 0;
2712
2713 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2714}
2715
2716static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2717{
2718 struct host_def_status_block *def_sb = bp->def_status_blk;
2719 u16 rc = 0;
2720
2721 barrier(); /* status block is written to by the chip */
a2fbb9ea
ET
2722 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2723 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2724 rc |= 1;
2725 }
2726 if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2727 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2728 rc |= 2;
2729 }
2730 if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2731 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2732 rc |= 4;
2733 }
2734 if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2735 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2736 rc |= 8;
2737 }
2738 if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2739 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2740 rc |= 16;
2741 }
2742 return rc;
2743}
2744
2745/*
2746 * slow path service functions
2747 */
2748
2749static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2750{
34f80b04 2751 int port = BP_PORT(bp);
5c862848
EG
2752 u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2753 COMMAND_REG_ATTN_BITS_SET);
a2fbb9ea
ET
2754 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2755 MISC_REG_AEU_MASK_ATTN_FUNC_0;
877e9aa4
ET
2756 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2757 NIG_REG_MASK_INTERRUPT_PORT0;
3fcaf2e5 2758 u32 aeu_mask;
87942b46 2759 u32 nig_mask = 0;
a2fbb9ea 2760
a2fbb9ea
ET
2761 if (bp->attn_state & asserted)
2762 BNX2X_ERR("IGU ERROR\n");
2763
3fcaf2e5
EG
2764 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2765 aeu_mask = REG_RD(bp, aeu_addr);
2766
a2fbb9ea 2767 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
3fcaf2e5
EG
2768 aeu_mask, asserted);
2769 aeu_mask &= ~(asserted & 0xff);
2770 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
a2fbb9ea 2771
3fcaf2e5
EG
2772 REG_WR(bp, aeu_addr, aeu_mask);
2773 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
a2fbb9ea 2774
3fcaf2e5 2775 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
a2fbb9ea 2776 bp->attn_state |= asserted;
3fcaf2e5 2777 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
a2fbb9ea
ET
2778
2779 if (asserted & ATTN_HARD_WIRED_MASK) {
2780 if (asserted & ATTN_NIG_FOR_FUNC) {
a2fbb9ea 2781
a5e9a7cf
EG
2782 bnx2x_acquire_phy_lock(bp);
2783
877e9aa4 2784 /* save nig interrupt mask */
87942b46 2785 nig_mask = REG_RD(bp, nig_int_mask_addr);
877e9aa4 2786 REG_WR(bp, nig_int_mask_addr, 0);
a2fbb9ea 2787
c18487ee 2788 bnx2x_link_attn(bp);
a2fbb9ea
ET
2789
2790 /* handle unicore attn? */
2791 }
2792 if (asserted & ATTN_SW_TIMER_4_FUNC)
2793 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2794
2795 if (asserted & GPIO_2_FUNC)
2796 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2797
2798 if (asserted & GPIO_3_FUNC)
2799 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2800
2801 if (asserted & GPIO_4_FUNC)
2802 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2803
2804 if (port == 0) {
2805 if (asserted & ATTN_GENERAL_ATTN_1) {
2806 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2807 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2808 }
2809 if (asserted & ATTN_GENERAL_ATTN_2) {
2810 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2811 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2812 }
2813 if (asserted & ATTN_GENERAL_ATTN_3) {
2814 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2815 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2816 }
2817 } else {
2818 if (asserted & ATTN_GENERAL_ATTN_4) {
2819 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2820 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2821 }
2822 if (asserted & ATTN_GENERAL_ATTN_5) {
2823 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2824 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2825 }
2826 if (asserted & ATTN_GENERAL_ATTN_6) {
2827 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2828 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2829 }
2830 }
2831
2832 } /* if hardwired */
2833
5c862848
EG
2834 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2835 asserted, hc_addr);
2836 REG_WR(bp, hc_addr, asserted);
a2fbb9ea
ET
2837
2838 /* now set back the mask */
a5e9a7cf 2839 if (asserted & ATTN_NIG_FOR_FUNC) {
87942b46 2840 REG_WR(bp, nig_int_mask_addr, nig_mask);
a5e9a7cf
EG
2841 bnx2x_release_phy_lock(bp);
2842 }
a2fbb9ea
ET
2843}
2844
fd4ef40d
EG
2845static inline void bnx2x_fan_failure(struct bnx2x *bp)
2846{
2847 int port = BP_PORT(bp);
2848
2849 /* mark the failure */
2850 bp->link_params.ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2851 bp->link_params.ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2852 SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
2853 bp->link_params.ext_phy_config);
2854
2855 /* log the failure */
2856 printk(KERN_ERR PFX "Fan Failure on Network Controller %s has caused"
2857 " the driver to shutdown the card to prevent permanent"
2858 " damage. Please contact Dell Support for assistance\n",
2859 bp->dev->name);
2860}
877e9aa4 2861static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
a2fbb9ea 2862{
34f80b04 2863 int port = BP_PORT(bp);
877e9aa4 2864 int reg_offset;
4d295db0 2865 u32 val, swap_val, swap_override;
877e9aa4 2866
34f80b04
EG
2867 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2868 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
877e9aa4 2869
34f80b04 2870 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
877e9aa4
ET
2871
2872 val = REG_RD(bp, reg_offset);
2873 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2874 REG_WR(bp, reg_offset, val);
2875
2876 BNX2X_ERR("SPIO5 hw attention\n");
2877
fd4ef40d 2878 /* Fan failure attention */
35b19ba5
EG
2879 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
2880 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
17de50b7 2881 /* Low power mode is controlled by GPIO 2 */
877e9aa4 2882 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
17de50b7 2883 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
fd4ef40d
EG
2884 /* The PHY reset is controlled by GPIO 1 */
2885 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2886 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
877e9aa4
ET
2887 break;
2888
4d295db0
EG
2889 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
2890 /* The PHY reset is controlled by GPIO 1 */
2891 /* fake the port number to cancel the swap done in
2892 set_gpio() */
2893 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
2894 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
2895 port = (swap_val && swap_override) ^ 1;
2896 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2897 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2898 break;
2899
877e9aa4
ET
2900 default:
2901 break;
2902 }
fd4ef40d 2903 bnx2x_fan_failure(bp);
877e9aa4 2904 }
34f80b04 2905
589abe3a
EG
2906 if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
2907 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
2908 bnx2x_acquire_phy_lock(bp);
2909 bnx2x_handle_module_detect_int(&bp->link_params);
2910 bnx2x_release_phy_lock(bp);
2911 }
2912
34f80b04
EG
2913 if (attn & HW_INTERRUT_ASSERT_SET_0) {
2914
2915 val = REG_RD(bp, reg_offset);
2916 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2917 REG_WR(bp, reg_offset, val);
2918
2919 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2920 (attn & HW_INTERRUT_ASSERT_SET_0));
2921 bnx2x_panic();
2922 }
877e9aa4
ET
2923}
2924
2925static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2926{
2927 u32 val;
2928
0626b899 2929 if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
877e9aa4
ET
2930
2931 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2932 BNX2X_ERR("DB hw attention 0x%x\n", val);
2933 /* DORQ discard attention */
2934 if (val & 0x2)
2935 BNX2X_ERR("FATAL error from DORQ\n");
2936 }
34f80b04
EG
2937
2938 if (attn & HW_INTERRUT_ASSERT_SET_1) {
2939
2940 int port = BP_PORT(bp);
2941 int reg_offset;
2942
2943 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2944 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2945
2946 val = REG_RD(bp, reg_offset);
2947 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2948 REG_WR(bp, reg_offset, val);
2949
2950 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2951 (attn & HW_INTERRUT_ASSERT_SET_1));
2952 bnx2x_panic();
2953 }
877e9aa4
ET
2954}
2955
2956static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2957{
2958 u32 val;
2959
2960 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2961
2962 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2963 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2964 /* CFC error attention */
2965 if (val & 0x2)
2966 BNX2X_ERR("FATAL error from CFC\n");
2967 }
2968
2969 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
2970
2971 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
2972 BNX2X_ERR("PXP hw attention 0x%x\n", val);
2973 /* RQ_USDMDP_FIFO_OVERFLOW */
2974 if (val & 0x18000)
2975 BNX2X_ERR("FATAL error from PXP\n");
2976 }
34f80b04
EG
2977
2978 if (attn & HW_INTERRUT_ASSERT_SET_2) {
2979
2980 int port = BP_PORT(bp);
2981 int reg_offset;
2982
2983 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
2984 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
2985
2986 val = REG_RD(bp, reg_offset);
2987 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
2988 REG_WR(bp, reg_offset, val);
2989
2990 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
2991 (attn & HW_INTERRUT_ASSERT_SET_2));
2992 bnx2x_panic();
2993 }
877e9aa4
ET
2994}
2995
2996static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
2997{
34f80b04
EG
2998 u32 val;
2999
877e9aa4
ET
3000 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
3001
34f80b04
EG
3002 if (attn & BNX2X_PMF_LINK_ASSERT) {
3003 int func = BP_FUNC(bp);
3004
3005 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
2691d51d
EG
3006 val = SHMEM_RD(bp, func_mb[func].drv_status);
3007 if (val & DRV_STATUS_DCC_EVENT_MASK)
3008 bnx2x_dcc_event(bp,
3009 (val & DRV_STATUS_DCC_EVENT_MASK));
34f80b04 3010 bnx2x__link_status_update(bp);
2691d51d 3011 if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
34f80b04
EG
3012 bnx2x_pmf_update(bp);
3013
3014 } else if (attn & BNX2X_MC_ASSERT_BITS) {
877e9aa4
ET
3015
3016 BNX2X_ERR("MC assert!\n");
3017 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
3018 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
3019 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
3020 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
3021 bnx2x_panic();
3022
3023 } else if (attn & BNX2X_MCP_ASSERT) {
3024
3025 BNX2X_ERR("MCP assert!\n");
3026 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
34f80b04 3027 bnx2x_fw_dump(bp);
877e9aa4
ET
3028
3029 } else
3030 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
3031 }
3032
3033 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
34f80b04
EG
3034 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
3035 if (attn & BNX2X_GRC_TIMEOUT) {
3036 val = CHIP_IS_E1H(bp) ?
3037 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
3038 BNX2X_ERR("GRC time-out 0x%08x\n", val);
3039 }
3040 if (attn & BNX2X_GRC_RSV) {
3041 val = CHIP_IS_E1H(bp) ?
3042 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
3043 BNX2X_ERR("GRC reserved 0x%08x\n", val);
3044 }
877e9aa4 3045 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
877e9aa4
ET
3046 }
3047}
3048
3049static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
3050{
a2fbb9ea
ET
3051 struct attn_route attn;
3052 struct attn_route group_mask;
34f80b04 3053 int port = BP_PORT(bp);
877e9aa4 3054 int index;
a2fbb9ea
ET
3055 u32 reg_addr;
3056 u32 val;
3fcaf2e5 3057 u32 aeu_mask;
a2fbb9ea
ET
3058
3059 /* need to take HW lock because MCP or other port might also
3060 try to handle this event */
4a37fb66 3061 bnx2x_acquire_alr(bp);
a2fbb9ea
ET
3062
3063 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
3064 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
3065 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
3066 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
34f80b04
EG
3067 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
3068 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
a2fbb9ea
ET
3069
3070 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
3071 if (deasserted & (1 << index)) {
3072 group_mask = bp->attn_group[index];
3073
34f80b04
EG
3074 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
3075 index, group_mask.sig[0], group_mask.sig[1],
3076 group_mask.sig[2], group_mask.sig[3]);
a2fbb9ea 3077
877e9aa4
ET
3078 bnx2x_attn_int_deasserted3(bp,
3079 attn.sig[3] & group_mask.sig[3]);
3080 bnx2x_attn_int_deasserted1(bp,
3081 attn.sig[1] & group_mask.sig[1]);
3082 bnx2x_attn_int_deasserted2(bp,
3083 attn.sig[2] & group_mask.sig[2]);
3084 bnx2x_attn_int_deasserted0(bp,
3085 attn.sig[0] & group_mask.sig[0]);
a2fbb9ea 3086
a2fbb9ea
ET
3087 if ((attn.sig[0] & group_mask.sig[0] &
3088 HW_PRTY_ASSERT_SET_0) ||
3089 (attn.sig[1] & group_mask.sig[1] &
3090 HW_PRTY_ASSERT_SET_1) ||
3091 (attn.sig[2] & group_mask.sig[2] &
3092 HW_PRTY_ASSERT_SET_2))
6378c025 3093 BNX2X_ERR("FATAL HW block parity attention\n");
a2fbb9ea
ET
3094 }
3095 }
3096
4a37fb66 3097 bnx2x_release_alr(bp);
a2fbb9ea 3098
5c862848 3099 reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
a2fbb9ea
ET
3100
3101 val = ~deasserted;
3fcaf2e5
EG
3102 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
3103 val, reg_addr);
5c862848 3104 REG_WR(bp, reg_addr, val);
a2fbb9ea 3105
a2fbb9ea 3106 if (~bp->attn_state & deasserted)
3fcaf2e5 3107 BNX2X_ERR("IGU ERROR\n");
a2fbb9ea
ET
3108
3109 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
3110 MISC_REG_AEU_MASK_ATTN_FUNC_0;
3111
3fcaf2e5
EG
3112 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3113 aeu_mask = REG_RD(bp, reg_addr);
3114
3115 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
3116 aeu_mask, deasserted);
3117 aeu_mask |= (deasserted & 0xff);
3118 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
a2fbb9ea 3119
3fcaf2e5
EG
3120 REG_WR(bp, reg_addr, aeu_mask);
3121 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
a2fbb9ea
ET
3122
3123 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
3124 bp->attn_state &= ~deasserted;
3125 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
3126}
3127
3128static void bnx2x_attn_int(struct bnx2x *bp)
3129{
3130 /* read local copy of bits */
68d59484
EG
3131 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
3132 attn_bits);
3133 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
3134 attn_bits_ack);
a2fbb9ea
ET
3135 u32 attn_state = bp->attn_state;
3136
3137 /* look for changed bits */
3138 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
3139 u32 deasserted = ~attn_bits & attn_ack & attn_state;
3140
3141 DP(NETIF_MSG_HW,
3142 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
3143 attn_bits, attn_ack, asserted, deasserted);
3144
3145 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
34f80b04 3146 BNX2X_ERR("BAD attention state\n");
a2fbb9ea
ET
3147
3148 /* handle bits that were raised */
3149 if (asserted)
3150 bnx2x_attn_int_asserted(bp, asserted);
3151
3152 if (deasserted)
3153 bnx2x_attn_int_deasserted(bp, deasserted);
3154}
3155
3156static void bnx2x_sp_task(struct work_struct *work)
3157{
1cf167f2 3158 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
a2fbb9ea
ET
3159 u16 status;
3160
34f80b04 3161
a2fbb9ea
ET
3162 /* Return here if interrupt is disabled */
3163 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3196a88a 3164 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
3165 return;
3166 }
3167
3168 status = bnx2x_update_dsb_idx(bp);
34f80b04
EG
3169/* if (status == 0) */
3170/* BNX2X_ERR("spurious slowpath interrupt!\n"); */
a2fbb9ea 3171
3196a88a 3172 DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
a2fbb9ea 3173
877e9aa4
ET
3174 /* HW attentions */
3175 if (status & 0x1)
a2fbb9ea 3176 bnx2x_attn_int(bp);
a2fbb9ea 3177
68d59484 3178 bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
a2fbb9ea
ET
3179 IGU_INT_NOP, 1);
3180 bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
3181 IGU_INT_NOP, 1);
3182 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
3183 IGU_INT_NOP, 1);
3184 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
3185 IGU_INT_NOP, 1);
3186 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
3187 IGU_INT_ENABLE, 1);
877e9aa4 3188
a2fbb9ea
ET
3189}
3190
3191static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
3192{
3193 struct net_device *dev = dev_instance;
3194 struct bnx2x *bp = netdev_priv(dev);
3195
3196 /* Return here if interrupt is disabled */
3197 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3196a88a 3198 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
3199 return IRQ_HANDLED;
3200 }
3201
8d9c5f34 3202 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0);
a2fbb9ea
ET
3203
3204#ifdef BNX2X_STOP_ON_ERROR
3205 if (unlikely(bp->panic))
3206 return IRQ_HANDLED;
3207#endif
3208
1cf167f2 3209 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
a2fbb9ea
ET
3210
3211 return IRQ_HANDLED;
3212}
3213
3214/* end of slow path */
3215
3216/* Statistics */
3217
3218/****************************************************************************
3219* Macros
3220****************************************************************************/
3221
a2fbb9ea
ET
3222/* sum[hi:lo] += add[hi:lo] */
3223#define ADD_64(s_hi, a_hi, s_lo, a_lo) \
3224 do { \
3225 s_lo += a_lo; \
f5ba6772 3226 s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
a2fbb9ea
ET
3227 } while (0)
3228
3229/* difference = minuend - subtrahend */
3230#define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
3231 do { \
bb2a0f7a
YG
3232 if (m_lo < s_lo) { \
3233 /* underflow */ \
a2fbb9ea 3234 d_hi = m_hi - s_hi; \
bb2a0f7a 3235 if (d_hi > 0) { \
6378c025 3236 /* we can 'loan' 1 */ \
a2fbb9ea
ET
3237 d_hi--; \
3238 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
bb2a0f7a 3239 } else { \
6378c025 3240 /* m_hi <= s_hi */ \
a2fbb9ea
ET
3241 d_hi = 0; \
3242 d_lo = 0; \
3243 } \
bb2a0f7a
YG
3244 } else { \
3245 /* m_lo >= s_lo */ \
a2fbb9ea 3246 if (m_hi < s_hi) { \
bb2a0f7a
YG
3247 d_hi = 0; \
3248 d_lo = 0; \
3249 } else { \
6378c025 3250 /* m_hi >= s_hi */ \
bb2a0f7a
YG
3251 d_hi = m_hi - s_hi; \
3252 d_lo = m_lo - s_lo; \
a2fbb9ea
ET
3253 } \
3254 } \
3255 } while (0)
3256
bb2a0f7a 3257#define UPDATE_STAT64(s, t) \
a2fbb9ea 3258 do { \
bb2a0f7a
YG
3259 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
3260 diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
3261 pstats->mac_stx[0].t##_hi = new->s##_hi; \
3262 pstats->mac_stx[0].t##_lo = new->s##_lo; \
3263 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
3264 pstats->mac_stx[1].t##_lo, diff.lo); \
a2fbb9ea
ET
3265 } while (0)
3266
bb2a0f7a 3267#define UPDATE_STAT64_NIG(s, t) \
a2fbb9ea 3268 do { \
bb2a0f7a
YG
3269 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
3270 diff.lo, new->s##_lo, old->s##_lo); \
3271 ADD_64(estats->t##_hi, diff.hi, \
3272 estats->t##_lo, diff.lo); \
a2fbb9ea
ET
3273 } while (0)
3274
3275/* sum[hi:lo] += add */
3276#define ADD_EXTEND_64(s_hi, s_lo, a) \
3277 do { \
3278 s_lo += a; \
3279 s_hi += (s_lo < a) ? 1 : 0; \
3280 } while (0)
3281
bb2a0f7a 3282#define UPDATE_EXTEND_STAT(s) \
a2fbb9ea 3283 do { \
bb2a0f7a
YG
3284 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
3285 pstats->mac_stx[1].s##_lo, \
3286 new->s); \
a2fbb9ea
ET
3287 } while (0)
3288
bb2a0f7a 3289#define UPDATE_EXTEND_TSTAT(s, t) \
a2fbb9ea 3290 do { \
4781bfad
EG
3291 diff = le32_to_cpu(tclient->s) - le32_to_cpu(old_tclient->s); \
3292 old_tclient->s = tclient->s; \
de832a55
EG
3293 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3294 } while (0)
3295
3296#define UPDATE_EXTEND_USTAT(s, t) \
3297 do { \
3298 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3299 old_uclient->s = uclient->s; \
3300 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
bb2a0f7a
YG
3301 } while (0)
3302
3303#define UPDATE_EXTEND_XSTAT(s, t) \
3304 do { \
4781bfad
EG
3305 diff = le32_to_cpu(xclient->s) - le32_to_cpu(old_xclient->s); \
3306 old_xclient->s = xclient->s; \
de832a55
EG
3307 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3308 } while (0)
3309
3310/* minuend -= subtrahend */
3311#define SUB_64(m_hi, s_hi, m_lo, s_lo) \
3312 do { \
3313 DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
3314 } while (0)
3315
3316/* minuend[hi:lo] -= subtrahend */
3317#define SUB_EXTEND_64(m_hi, m_lo, s) \
3318 do { \
3319 SUB_64(m_hi, 0, m_lo, s); \
3320 } while (0)
3321
3322#define SUB_EXTEND_USTAT(s, t) \
3323 do { \
3324 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3325 SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
a2fbb9ea
ET
3326 } while (0)
3327
3328/*
3329 * General service functions
3330 */
3331
3332static inline long bnx2x_hilo(u32 *hiref)
3333{
3334 u32 lo = *(hiref + 1);
3335#if (BITS_PER_LONG == 64)
3336 u32 hi = *hiref;
3337
3338 return HILO_U64(hi, lo);
3339#else
3340 return lo;
3341#endif
3342}
3343
3344/*
3345 * Init service functions
3346 */
3347
bb2a0f7a
YG
3348static void bnx2x_storm_stats_post(struct bnx2x *bp)
3349{
3350 if (!bp->stats_pending) {
3351 struct eth_query_ramrod_data ramrod_data = {0};
de832a55 3352 int i, rc;
bb2a0f7a
YG
3353
3354 ramrod_data.drv_counter = bp->stats_counter++;
8d9c5f34 3355 ramrod_data.collect_port = bp->port.pmf ? 1 : 0;
de832a55
EG
3356 for_each_queue(bp, i)
3357 ramrod_data.ctr_id_vector |= (1 << bp->fp[i].cl_id);
bb2a0f7a
YG
3358
3359 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3360 ((u32 *)&ramrod_data)[1],
3361 ((u32 *)&ramrod_data)[0], 0);
3362 if (rc == 0) {
3363 /* stats ramrod has it's own slot on the spq */
3364 bp->spq_left++;
3365 bp->stats_pending = 1;
3366 }
3367 }
3368}
3369
3370static void bnx2x_stats_init(struct bnx2x *bp)
3371{
3372 int port = BP_PORT(bp);
de832a55 3373 int i;
bb2a0f7a 3374
de832a55 3375 bp->stats_pending = 0;
bb2a0f7a
YG
3376 bp->executer_idx = 0;
3377 bp->stats_counter = 0;
3378
3379 /* port stats */
3380 if (!BP_NOMCP(bp))
3381 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
3382 else
3383 bp->port.port_stx = 0;
3384 DP(BNX2X_MSG_STATS, "port_stx 0x%x\n", bp->port.port_stx);
3385
3386 memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
3387 bp->port.old_nig_stats.brb_discard =
3388 REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
66e855f3
YG
3389 bp->port.old_nig_stats.brb_truncate =
3390 REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
bb2a0f7a
YG
3391 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
3392 &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
3393 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
3394 &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
3395
3396 /* function stats */
de832a55
EG
3397 for_each_queue(bp, i) {
3398 struct bnx2x_fastpath *fp = &bp->fp[i];
3399
3400 memset(&fp->old_tclient, 0,
3401 sizeof(struct tstorm_per_client_stats));
3402 memset(&fp->old_uclient, 0,
3403 sizeof(struct ustorm_per_client_stats));
3404 memset(&fp->old_xclient, 0,
3405 sizeof(struct xstorm_per_client_stats));
3406 memset(&fp->eth_q_stats, 0, sizeof(struct bnx2x_eth_q_stats));
3407 }
3408
bb2a0f7a 3409 memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
bb2a0f7a
YG
3410 memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
3411
3412 bp->stats_state = STATS_STATE_DISABLED;
3413 if (IS_E1HMF(bp) && bp->port.pmf && bp->port.port_stx)
3414 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
3415}
3416
3417static void bnx2x_hw_stats_post(struct bnx2x *bp)
3418{
3419 struct dmae_command *dmae = &bp->stats_dmae;
3420 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3421
3422 *stats_comp = DMAE_COMP_VAL;
de832a55
EG
3423 if (CHIP_REV_IS_SLOW(bp))
3424 return;
bb2a0f7a
YG
3425
3426 /* loader */
3427 if (bp->executer_idx) {
3428 int loader_idx = PMF_DMAE_C(bp);
3429
3430 memset(dmae, 0, sizeof(struct dmae_command));
3431
3432 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3433 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3434 DMAE_CMD_DST_RESET |
3435#ifdef __BIG_ENDIAN
3436 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3437#else
3438 DMAE_CMD_ENDIANITY_DW_SWAP |
3439#endif
3440 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3441 DMAE_CMD_PORT_0) |
3442 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3443 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3444 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3445 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3446 sizeof(struct dmae_command) *
3447 (loader_idx + 1)) >> 2;
3448 dmae->dst_addr_hi = 0;
3449 dmae->len = sizeof(struct dmae_command) >> 2;
3450 if (CHIP_IS_E1(bp))
3451 dmae->len--;
3452 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3453 dmae->comp_addr_hi = 0;
3454 dmae->comp_val = 1;
3455
3456 *stats_comp = 0;
3457 bnx2x_post_dmae(bp, dmae, loader_idx);
3458
3459 } else if (bp->func_stx) {
3460 *stats_comp = 0;
3461 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3462 }
3463}
3464
3465static int bnx2x_stats_comp(struct bnx2x *bp)
3466{
3467 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3468 int cnt = 10;
3469
3470 might_sleep();
3471 while (*stats_comp != DMAE_COMP_VAL) {
bb2a0f7a
YG
3472 if (!cnt) {
3473 BNX2X_ERR("timeout waiting for stats finished\n");
3474 break;
3475 }
3476 cnt--;
12469401 3477 msleep(1);
bb2a0f7a
YG
3478 }
3479 return 1;
3480}
3481
3482/*
3483 * Statistics service functions
3484 */
3485
3486static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3487{
3488 struct dmae_command *dmae;
3489 u32 opcode;
3490 int loader_idx = PMF_DMAE_C(bp);
3491 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3492
3493 /* sanity */
3494 if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3495 BNX2X_ERR("BUG!\n");
3496 return;
3497 }
3498
3499 bp->executer_idx = 0;
3500
3501 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3502 DMAE_CMD_C_ENABLE |
3503 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3504#ifdef __BIG_ENDIAN
3505 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3506#else
3507 DMAE_CMD_ENDIANITY_DW_SWAP |
3508#endif
3509 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3510 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3511
3512 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3513 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3514 dmae->src_addr_lo = bp->port.port_stx >> 2;
3515 dmae->src_addr_hi = 0;
3516 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3517 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3518 dmae->len = DMAE_LEN32_RD_MAX;
3519 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3520 dmae->comp_addr_hi = 0;
3521 dmae->comp_val = 1;
3522
3523 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3524 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3525 dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3526 dmae->src_addr_hi = 0;
7a9b2557
VZ
3527 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3528 DMAE_LEN32_RD_MAX * 4);
3529 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3530 DMAE_LEN32_RD_MAX * 4);
bb2a0f7a
YG
3531 dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3532 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3533 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3534 dmae->comp_val = DMAE_COMP_VAL;
3535
3536 *stats_comp = 0;
3537 bnx2x_hw_stats_post(bp);
3538 bnx2x_stats_comp(bp);
3539}
3540
3541static void bnx2x_port_stats_init(struct bnx2x *bp)
a2fbb9ea
ET
3542{
3543 struct dmae_command *dmae;
34f80b04 3544 int port = BP_PORT(bp);
bb2a0f7a 3545 int vn = BP_E1HVN(bp);
a2fbb9ea 3546 u32 opcode;
bb2a0f7a 3547 int loader_idx = PMF_DMAE_C(bp);
a2fbb9ea 3548 u32 mac_addr;
bb2a0f7a
YG
3549 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3550
3551 /* sanity */
3552 if (!bp->link_vars.link_up || !bp->port.pmf) {
3553 BNX2X_ERR("BUG!\n");
3554 return;
3555 }
a2fbb9ea
ET
3556
3557 bp->executer_idx = 0;
bb2a0f7a
YG
3558
3559 /* MCP */
3560 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3561 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3562 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 3563#ifdef __BIG_ENDIAN
bb2a0f7a 3564 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 3565#else
bb2a0f7a 3566 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 3567#endif
bb2a0f7a
YG
3568 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3569 (vn << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea 3570
bb2a0f7a 3571 if (bp->port.port_stx) {
a2fbb9ea
ET
3572
3573 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3574 dmae->opcode = opcode;
bb2a0f7a
YG
3575 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3576 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3577 dmae->dst_addr_lo = bp->port.port_stx >> 2;
a2fbb9ea 3578 dmae->dst_addr_hi = 0;
bb2a0f7a
YG
3579 dmae->len = sizeof(struct host_port_stats) >> 2;
3580 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3581 dmae->comp_addr_hi = 0;
3582 dmae->comp_val = 1;
a2fbb9ea
ET
3583 }
3584
bb2a0f7a
YG
3585 if (bp->func_stx) {
3586
3587 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3588 dmae->opcode = opcode;
3589 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3590 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3591 dmae->dst_addr_lo = bp->func_stx >> 2;
3592 dmae->dst_addr_hi = 0;
3593 dmae->len = sizeof(struct host_func_stats) >> 2;
3594 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3595 dmae->comp_addr_hi = 0;
3596 dmae->comp_val = 1;
a2fbb9ea
ET
3597 }
3598
bb2a0f7a 3599 /* MAC */
a2fbb9ea
ET
3600 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3601 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3602 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3603#ifdef __BIG_ENDIAN
3604 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3605#else
3606 DMAE_CMD_ENDIANITY_DW_SWAP |
3607#endif
bb2a0f7a
YG
3608 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3609 (vn << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea 3610
c18487ee 3611 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
a2fbb9ea
ET
3612
3613 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3614 NIG_REG_INGRESS_BMAC0_MEM);
3615
3616 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3617 BIGMAC_REGISTER_TX_STAT_GTBYT */
3618 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3619 dmae->opcode = opcode;
3620 dmae->src_addr_lo = (mac_addr +
3621 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3622 dmae->src_addr_hi = 0;
3623 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3624 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3625 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3626 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3627 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3628 dmae->comp_addr_hi = 0;
3629 dmae->comp_val = 1;
3630
3631 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3632 BIGMAC_REGISTER_RX_STAT_GRIPJ */
3633 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3634 dmae->opcode = opcode;
3635 dmae->src_addr_lo = (mac_addr +
3636 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3637 dmae->src_addr_hi = 0;
3638 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3639 offsetof(struct bmac_stats, rx_stat_gr64_lo));
a2fbb9ea 3640 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3641 offsetof(struct bmac_stats, rx_stat_gr64_lo));
a2fbb9ea
ET
3642 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3643 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3644 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3645 dmae->comp_addr_hi = 0;
3646 dmae->comp_val = 1;
3647
c18487ee 3648 } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
a2fbb9ea
ET
3649
3650 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3651
3652 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3653 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3654 dmae->opcode = opcode;
3655 dmae->src_addr_lo = (mac_addr +
3656 EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3657 dmae->src_addr_hi = 0;
3658 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3659 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3660 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3661 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3662 dmae->comp_addr_hi = 0;
3663 dmae->comp_val = 1;
3664
3665 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3666 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3667 dmae->opcode = opcode;
3668 dmae->src_addr_lo = (mac_addr +
3669 EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3670 dmae->src_addr_hi = 0;
3671 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3672 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
a2fbb9ea 3673 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3674 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
a2fbb9ea
ET
3675 dmae->len = 1;
3676 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3677 dmae->comp_addr_hi = 0;
3678 dmae->comp_val = 1;
3679
3680 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3681 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3682 dmae->opcode = opcode;
3683 dmae->src_addr_lo = (mac_addr +
3684 EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3685 dmae->src_addr_hi = 0;
3686 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3687 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
a2fbb9ea 3688 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3689 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
a2fbb9ea
ET
3690 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3691 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3692 dmae->comp_addr_hi = 0;
3693 dmae->comp_val = 1;
3694 }
3695
3696 /* NIG */
bb2a0f7a
YG
3697 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3698 dmae->opcode = opcode;
3699 dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3700 NIG_REG_STAT0_BRB_DISCARD) >> 2;
3701 dmae->src_addr_hi = 0;
3702 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3703 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3704 dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3705 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3706 dmae->comp_addr_hi = 0;
3707 dmae->comp_val = 1;
3708
3709 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3710 dmae->opcode = opcode;
3711 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3712 NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3713 dmae->src_addr_hi = 0;
3714 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3715 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3716 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3717 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3718 dmae->len = (2*sizeof(u32)) >> 2;
3719 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3720 dmae->comp_addr_hi = 0;
3721 dmae->comp_val = 1;
3722
a2fbb9ea
ET
3723 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3724 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3725 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3726 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3727#ifdef __BIG_ENDIAN
3728 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3729#else
3730 DMAE_CMD_ENDIANITY_DW_SWAP |
3731#endif
bb2a0f7a
YG
3732 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3733 (vn << DMAE_CMD_E1HVN_SHIFT));
3734 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3735 NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
a2fbb9ea 3736 dmae->src_addr_hi = 0;
bb2a0f7a
YG
3737 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3738 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3739 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3740 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3741 dmae->len = (2*sizeof(u32)) >> 2;
3742 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3743 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3744 dmae->comp_val = DMAE_COMP_VAL;
3745
3746 *stats_comp = 0;
a2fbb9ea
ET
3747}
3748
bb2a0f7a 3749static void bnx2x_func_stats_init(struct bnx2x *bp)
a2fbb9ea 3750{
bb2a0f7a
YG
3751 struct dmae_command *dmae = &bp->stats_dmae;
3752 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 3753
bb2a0f7a
YG
3754 /* sanity */
3755 if (!bp->func_stx) {
3756 BNX2X_ERR("BUG!\n");
3757 return;
3758 }
a2fbb9ea 3759
bb2a0f7a
YG
3760 bp->executer_idx = 0;
3761 memset(dmae, 0, sizeof(struct dmae_command));
a2fbb9ea 3762
bb2a0f7a
YG
3763 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3764 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3765 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3766#ifdef __BIG_ENDIAN
3767 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3768#else
3769 DMAE_CMD_ENDIANITY_DW_SWAP |
3770#endif
3771 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3772 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3773 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3774 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3775 dmae->dst_addr_lo = bp->func_stx >> 2;
3776 dmae->dst_addr_hi = 0;
3777 dmae->len = sizeof(struct host_func_stats) >> 2;
3778 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3779 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3780 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 3781
bb2a0f7a
YG
3782 *stats_comp = 0;
3783}
a2fbb9ea 3784
bb2a0f7a
YG
3785static void bnx2x_stats_start(struct bnx2x *bp)
3786{
3787 if (bp->port.pmf)
3788 bnx2x_port_stats_init(bp);
3789
3790 else if (bp->func_stx)
3791 bnx2x_func_stats_init(bp);
3792
3793 bnx2x_hw_stats_post(bp);
3794 bnx2x_storm_stats_post(bp);
3795}
3796
3797static void bnx2x_stats_pmf_start(struct bnx2x *bp)
3798{
3799 bnx2x_stats_comp(bp);
3800 bnx2x_stats_pmf_update(bp);
3801 bnx2x_stats_start(bp);
3802}
3803
3804static void bnx2x_stats_restart(struct bnx2x *bp)
3805{
3806 bnx2x_stats_comp(bp);
3807 bnx2x_stats_start(bp);
3808}
3809
3810static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3811{
3812 struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3813 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
de832a55 3814 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4781bfad
EG
3815 struct {
3816 u32 lo;
3817 u32 hi;
3818 } diff;
bb2a0f7a
YG
3819
3820 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3821 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3822 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3823 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3824 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3825 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
66e855f3 3826 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
bb2a0f7a 3827 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
de832a55 3828 UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf);
bb2a0f7a
YG
3829 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3830 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3831 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3832 UPDATE_STAT64(tx_stat_gt127,
3833 tx_stat_etherstatspkts65octetsto127octets);
3834 UPDATE_STAT64(tx_stat_gt255,
3835 tx_stat_etherstatspkts128octetsto255octets);
3836 UPDATE_STAT64(tx_stat_gt511,
3837 tx_stat_etherstatspkts256octetsto511octets);
3838 UPDATE_STAT64(tx_stat_gt1023,
3839 tx_stat_etherstatspkts512octetsto1023octets);
3840 UPDATE_STAT64(tx_stat_gt1518,
3841 tx_stat_etherstatspkts1024octetsto1522octets);
3842 UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3843 UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3844 UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3845 UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3846 UPDATE_STAT64(tx_stat_gterr,
3847 tx_stat_dot3statsinternalmactransmiterrors);
3848 UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
de832a55
EG
3849
3850 estats->pause_frames_received_hi =
3851 pstats->mac_stx[1].rx_stat_bmac_xpf_hi;
3852 estats->pause_frames_received_lo =
3853 pstats->mac_stx[1].rx_stat_bmac_xpf_lo;
3854
3855 estats->pause_frames_sent_hi =
3856 pstats->mac_stx[1].tx_stat_outxoffsent_hi;
3857 estats->pause_frames_sent_lo =
3858 pstats->mac_stx[1].tx_stat_outxoffsent_lo;
bb2a0f7a
YG
3859}
3860
3861static void bnx2x_emac_stats_update(struct bnx2x *bp)
3862{
3863 struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3864 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
de832a55 3865 struct bnx2x_eth_stats *estats = &bp->eth_stats;
bb2a0f7a
YG
3866
3867 UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3868 UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3869 UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3870 UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3871 UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3872 UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3873 UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3874 UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3875 UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3876 UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3877 UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3878 UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3879 UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3880 UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3881 UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3882 UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3883 UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3884 UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3885 UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3886 UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3887 UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3888 UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3889 UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3890 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3891 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3892 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3893 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3894 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3895 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3896 UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3897 UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
de832a55
EG
3898
3899 estats->pause_frames_received_hi =
3900 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
3901 estats->pause_frames_received_lo =
3902 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
3903 ADD_64(estats->pause_frames_received_hi,
3904 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
3905 estats->pause_frames_received_lo,
3906 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
3907
3908 estats->pause_frames_sent_hi =
3909 pstats->mac_stx[1].tx_stat_outxonsent_hi;
3910 estats->pause_frames_sent_lo =
3911 pstats->mac_stx[1].tx_stat_outxonsent_lo;
3912 ADD_64(estats->pause_frames_sent_hi,
3913 pstats->mac_stx[1].tx_stat_outxoffsent_hi,
3914 estats->pause_frames_sent_lo,
3915 pstats->mac_stx[1].tx_stat_outxoffsent_lo);
bb2a0f7a
YG
3916}
3917
3918static int bnx2x_hw_stats_update(struct bnx2x *bp)
3919{
3920 struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3921 struct nig_stats *old = &(bp->port.old_nig_stats);
3922 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3923 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4781bfad
EG
3924 struct {
3925 u32 lo;
3926 u32 hi;
3927 } diff;
de832a55 3928 u32 nig_timer_max;
bb2a0f7a
YG
3929
3930 if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3931 bnx2x_bmac_stats_update(bp);
3932
3933 else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3934 bnx2x_emac_stats_update(bp);
3935
3936 else { /* unreached */
c3eefaf6 3937 BNX2X_ERR("stats updated by DMAE but no MAC active\n");
bb2a0f7a
YG
3938 return -1;
3939 }
a2fbb9ea 3940
bb2a0f7a
YG
3941 ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3942 new->brb_discard - old->brb_discard);
66e855f3
YG
3943 ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3944 new->brb_truncate - old->brb_truncate);
a2fbb9ea 3945
bb2a0f7a
YG
3946 UPDATE_STAT64_NIG(egress_mac_pkt0,
3947 etherstatspkts1024octetsto1522octets);
3948 UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
a2fbb9ea 3949
bb2a0f7a 3950 memcpy(old, new, sizeof(struct nig_stats));
a2fbb9ea 3951
bb2a0f7a
YG
3952 memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3953 sizeof(struct mac_stx));
3954 estats->brb_drop_hi = pstats->brb_drop_hi;
3955 estats->brb_drop_lo = pstats->brb_drop_lo;
a2fbb9ea 3956
bb2a0f7a 3957 pstats->host_port_stats_start = ++pstats->host_port_stats_end;
a2fbb9ea 3958
de832a55
EG
3959 nig_timer_max = SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
3960 if (nig_timer_max != estats->nig_timer_max) {
3961 estats->nig_timer_max = nig_timer_max;
3962 BNX2X_ERR("NIG timer max (%u)\n", estats->nig_timer_max);
3963 }
3964
bb2a0f7a 3965 return 0;
a2fbb9ea
ET
3966}
3967
bb2a0f7a 3968static int bnx2x_storm_stats_update(struct bnx2x *bp)
a2fbb9ea
ET
3969{
3970 struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
bb2a0f7a 3971 struct tstorm_per_port_stats *tport =
de832a55 3972 &stats->tstorm_common.port_statistics;
bb2a0f7a
YG
3973 struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
3974 struct bnx2x_eth_stats *estats = &bp->eth_stats;
de832a55
EG
3975 int i;
3976
3977 memset(&(fstats->total_bytes_received_hi), 0,
3978 sizeof(struct host_func_stats) - 2*sizeof(u32));
3979 estats->error_bytes_received_hi = 0;
3980 estats->error_bytes_received_lo = 0;
3981 estats->etherstatsoverrsizepkts_hi = 0;
3982 estats->etherstatsoverrsizepkts_lo = 0;
3983 estats->no_buff_discard_hi = 0;
3984 estats->no_buff_discard_lo = 0;
a2fbb9ea 3985
ca00392c 3986 for_each_rx_queue(bp, i) {
de832a55
EG
3987 struct bnx2x_fastpath *fp = &bp->fp[i];
3988 int cl_id = fp->cl_id;
3989 struct tstorm_per_client_stats *tclient =
3990 &stats->tstorm_common.client_statistics[cl_id];
3991 struct tstorm_per_client_stats *old_tclient = &fp->old_tclient;
3992 struct ustorm_per_client_stats *uclient =
3993 &stats->ustorm_common.client_statistics[cl_id];
3994 struct ustorm_per_client_stats *old_uclient = &fp->old_uclient;
3995 struct xstorm_per_client_stats *xclient =
3996 &stats->xstorm_common.client_statistics[cl_id];
3997 struct xstorm_per_client_stats *old_xclient = &fp->old_xclient;
3998 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
3999 u32 diff;
4000
4001 /* are storm stats valid? */
4002 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
bb2a0f7a 4003 bp->stats_counter) {
de832a55
EG
4004 DP(BNX2X_MSG_STATS, "[%d] stats not updated by xstorm"
4005 " xstorm counter (%d) != stats_counter (%d)\n",
4006 i, xclient->stats_counter, bp->stats_counter);
4007 return -1;
4008 }
4009 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
bb2a0f7a 4010 bp->stats_counter) {
de832a55
EG
4011 DP(BNX2X_MSG_STATS, "[%d] stats not updated by tstorm"
4012 " tstorm counter (%d) != stats_counter (%d)\n",
4013 i, tclient->stats_counter, bp->stats_counter);
4014 return -2;
4015 }
4016 if ((u16)(le16_to_cpu(uclient->stats_counter) + 1) !=
4017 bp->stats_counter) {
4018 DP(BNX2X_MSG_STATS, "[%d] stats not updated by ustorm"
4019 " ustorm counter (%d) != stats_counter (%d)\n",
4020 i, uclient->stats_counter, bp->stats_counter);
4021 return -4;
4022 }
a2fbb9ea 4023
de832a55 4024 qstats->total_bytes_received_hi =
ca00392c 4025 le32_to_cpu(tclient->rcv_broadcast_bytes.hi);
de832a55 4026 qstats->total_bytes_received_lo =
ca00392c
EG
4027 le32_to_cpu(tclient->rcv_broadcast_bytes.lo);
4028
4029 ADD_64(qstats->total_bytes_received_hi,
4030 le32_to_cpu(tclient->rcv_multicast_bytes.hi),
4031 qstats->total_bytes_received_lo,
4032 le32_to_cpu(tclient->rcv_multicast_bytes.lo));
4033
4034 ADD_64(qstats->total_bytes_received_hi,
4035 le32_to_cpu(tclient->rcv_unicast_bytes.hi),
4036 qstats->total_bytes_received_lo,
4037 le32_to_cpu(tclient->rcv_unicast_bytes.lo));
4038
4039 qstats->valid_bytes_received_hi =
4040 qstats->total_bytes_received_hi;
de832a55 4041 qstats->valid_bytes_received_lo =
ca00392c 4042 qstats->total_bytes_received_lo;
bb2a0f7a 4043
de832a55 4044 qstats->error_bytes_received_hi =
bb2a0f7a 4045 le32_to_cpu(tclient->rcv_error_bytes.hi);
de832a55 4046 qstats->error_bytes_received_lo =
bb2a0f7a 4047 le32_to_cpu(tclient->rcv_error_bytes.lo);
bb2a0f7a 4048
de832a55
EG
4049 ADD_64(qstats->total_bytes_received_hi,
4050 qstats->error_bytes_received_hi,
4051 qstats->total_bytes_received_lo,
4052 qstats->error_bytes_received_lo);
4053
4054 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts,
4055 total_unicast_packets_received);
4056 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
4057 total_multicast_packets_received);
4058 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
4059 total_broadcast_packets_received);
4060 UPDATE_EXTEND_TSTAT(packets_too_big_discard,
4061 etherstatsoverrsizepkts);
4062 UPDATE_EXTEND_TSTAT(no_buff_discard, no_buff_discard);
4063
4064 SUB_EXTEND_USTAT(ucast_no_buff_pkts,
4065 total_unicast_packets_received);
4066 SUB_EXTEND_USTAT(mcast_no_buff_pkts,
4067 total_multicast_packets_received);
4068 SUB_EXTEND_USTAT(bcast_no_buff_pkts,
4069 total_broadcast_packets_received);
4070 UPDATE_EXTEND_USTAT(ucast_no_buff_pkts, no_buff_discard);
4071 UPDATE_EXTEND_USTAT(mcast_no_buff_pkts, no_buff_discard);
4072 UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard);
4073
4074 qstats->total_bytes_transmitted_hi =
ca00392c 4075 le32_to_cpu(xclient->unicast_bytes_sent.hi);
de832a55 4076 qstats->total_bytes_transmitted_lo =
ca00392c
EG
4077 le32_to_cpu(xclient->unicast_bytes_sent.lo);
4078
4079 ADD_64(qstats->total_bytes_transmitted_hi,
4080 le32_to_cpu(xclient->multicast_bytes_sent.hi),
4081 qstats->total_bytes_transmitted_lo,
4082 le32_to_cpu(xclient->multicast_bytes_sent.lo));
4083
4084 ADD_64(qstats->total_bytes_transmitted_hi,
4085 le32_to_cpu(xclient->broadcast_bytes_sent.hi),
4086 qstats->total_bytes_transmitted_lo,
4087 le32_to_cpu(xclient->broadcast_bytes_sent.lo));
bb2a0f7a 4088
de832a55
EG
4089 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
4090 total_unicast_packets_transmitted);
4091 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
4092 total_multicast_packets_transmitted);
4093 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
4094 total_broadcast_packets_transmitted);
4095
4096 old_tclient->checksum_discard = tclient->checksum_discard;
4097 old_tclient->ttl0_discard = tclient->ttl0_discard;
4098
4099 ADD_64(fstats->total_bytes_received_hi,
4100 qstats->total_bytes_received_hi,
4101 fstats->total_bytes_received_lo,
4102 qstats->total_bytes_received_lo);
4103 ADD_64(fstats->total_bytes_transmitted_hi,
4104 qstats->total_bytes_transmitted_hi,
4105 fstats->total_bytes_transmitted_lo,
4106 qstats->total_bytes_transmitted_lo);
4107 ADD_64(fstats->total_unicast_packets_received_hi,
4108 qstats->total_unicast_packets_received_hi,
4109 fstats->total_unicast_packets_received_lo,
4110 qstats->total_unicast_packets_received_lo);
4111 ADD_64(fstats->total_multicast_packets_received_hi,
4112 qstats->total_multicast_packets_received_hi,
4113 fstats->total_multicast_packets_received_lo,
4114 qstats->total_multicast_packets_received_lo);
4115 ADD_64(fstats->total_broadcast_packets_received_hi,
4116 qstats->total_broadcast_packets_received_hi,
4117 fstats->total_broadcast_packets_received_lo,
4118 qstats->total_broadcast_packets_received_lo);
4119 ADD_64(fstats->total_unicast_packets_transmitted_hi,
4120 qstats->total_unicast_packets_transmitted_hi,
4121 fstats->total_unicast_packets_transmitted_lo,
4122 qstats->total_unicast_packets_transmitted_lo);
4123 ADD_64(fstats->total_multicast_packets_transmitted_hi,
4124 qstats->total_multicast_packets_transmitted_hi,
4125 fstats->total_multicast_packets_transmitted_lo,
4126 qstats->total_multicast_packets_transmitted_lo);
4127 ADD_64(fstats->total_broadcast_packets_transmitted_hi,
4128 qstats->total_broadcast_packets_transmitted_hi,
4129 fstats->total_broadcast_packets_transmitted_lo,
4130 qstats->total_broadcast_packets_transmitted_lo);
4131 ADD_64(fstats->valid_bytes_received_hi,
4132 qstats->valid_bytes_received_hi,
4133 fstats->valid_bytes_received_lo,
4134 qstats->valid_bytes_received_lo);
4135
4136 ADD_64(estats->error_bytes_received_hi,
4137 qstats->error_bytes_received_hi,
4138 estats->error_bytes_received_lo,
4139 qstats->error_bytes_received_lo);
4140 ADD_64(estats->etherstatsoverrsizepkts_hi,
4141 qstats->etherstatsoverrsizepkts_hi,
4142 estats->etherstatsoverrsizepkts_lo,
4143 qstats->etherstatsoverrsizepkts_lo);
4144 ADD_64(estats->no_buff_discard_hi, qstats->no_buff_discard_hi,
4145 estats->no_buff_discard_lo, qstats->no_buff_discard_lo);
4146 }
4147
4148 ADD_64(fstats->total_bytes_received_hi,
4149 estats->rx_stat_ifhcinbadoctets_hi,
4150 fstats->total_bytes_received_lo,
4151 estats->rx_stat_ifhcinbadoctets_lo);
bb2a0f7a
YG
4152
4153 memcpy(estats, &(fstats->total_bytes_received_hi),
4154 sizeof(struct host_func_stats) - 2*sizeof(u32));
4155
de832a55
EG
4156 ADD_64(estats->etherstatsoverrsizepkts_hi,
4157 estats->rx_stat_dot3statsframestoolong_hi,
4158 estats->etherstatsoverrsizepkts_lo,
4159 estats->rx_stat_dot3statsframestoolong_lo);
4160 ADD_64(estats->error_bytes_received_hi,
4161 estats->rx_stat_ifhcinbadoctets_hi,
4162 estats->error_bytes_received_lo,
4163 estats->rx_stat_ifhcinbadoctets_lo);
4164
4165 if (bp->port.pmf) {
4166 estats->mac_filter_discard =
4167 le32_to_cpu(tport->mac_filter_discard);
4168 estats->xxoverflow_discard =
4169 le32_to_cpu(tport->xxoverflow_discard);
4170 estats->brb_truncate_discard =
bb2a0f7a 4171 le32_to_cpu(tport->brb_truncate_discard);
de832a55
EG
4172 estats->mac_discard = le32_to_cpu(tport->mac_discard);
4173 }
bb2a0f7a
YG
4174
4175 fstats->host_func_stats_start = ++fstats->host_func_stats_end;
a2fbb9ea 4176
de832a55
EG
4177 bp->stats_pending = 0;
4178
a2fbb9ea
ET
4179 return 0;
4180}
4181
bb2a0f7a 4182static void bnx2x_net_stats_update(struct bnx2x *bp)
a2fbb9ea 4183{
bb2a0f7a 4184 struct bnx2x_eth_stats *estats = &bp->eth_stats;
a2fbb9ea 4185 struct net_device_stats *nstats = &bp->dev->stats;
de832a55 4186 int i;
a2fbb9ea
ET
4187
4188 nstats->rx_packets =
4189 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
4190 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
4191 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
4192
4193 nstats->tx_packets =
4194 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
4195 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
4196 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
4197
de832a55 4198 nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
a2fbb9ea 4199
0e39e645 4200 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
a2fbb9ea 4201
de832a55 4202 nstats->rx_dropped = estats->mac_discard;
ca00392c 4203 for_each_rx_queue(bp, i)
de832a55
EG
4204 nstats->rx_dropped +=
4205 le32_to_cpu(bp->fp[i].old_tclient.checksum_discard);
4206
a2fbb9ea
ET
4207 nstats->tx_dropped = 0;
4208
4209 nstats->multicast =
de832a55 4210 bnx2x_hilo(&estats->total_multicast_packets_received_hi);
a2fbb9ea 4211
bb2a0f7a 4212 nstats->collisions =
de832a55 4213 bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi);
bb2a0f7a
YG
4214
4215 nstats->rx_length_errors =
de832a55
EG
4216 bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) +
4217 bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi);
4218 nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) +
4219 bnx2x_hilo(&estats->brb_truncate_hi);
4220 nstats->rx_crc_errors =
4221 bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi);
4222 nstats->rx_frame_errors =
4223 bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi);
4224 nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi);
a2fbb9ea
ET
4225 nstats->rx_missed_errors = estats->xxoverflow_discard;
4226
4227 nstats->rx_errors = nstats->rx_length_errors +
4228 nstats->rx_over_errors +
4229 nstats->rx_crc_errors +
4230 nstats->rx_frame_errors +
0e39e645
ET
4231 nstats->rx_fifo_errors +
4232 nstats->rx_missed_errors;
a2fbb9ea 4233
bb2a0f7a 4234 nstats->tx_aborted_errors =
de832a55
EG
4235 bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) +
4236 bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi);
4237 nstats->tx_carrier_errors =
4238 bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi);
a2fbb9ea
ET
4239 nstats->tx_fifo_errors = 0;
4240 nstats->tx_heartbeat_errors = 0;
4241 nstats->tx_window_errors = 0;
4242
4243 nstats->tx_errors = nstats->tx_aborted_errors +
de832a55
EG
4244 nstats->tx_carrier_errors +
4245 bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi);
4246}
4247
4248static void bnx2x_drv_stats_update(struct bnx2x *bp)
4249{
4250 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4251 int i;
4252
4253 estats->driver_xoff = 0;
4254 estats->rx_err_discard_pkt = 0;
4255 estats->rx_skb_alloc_failed = 0;
4256 estats->hw_csum_err = 0;
ca00392c 4257 for_each_rx_queue(bp, i) {
de832a55
EG
4258 struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats;
4259
4260 estats->driver_xoff += qstats->driver_xoff;
4261 estats->rx_err_discard_pkt += qstats->rx_err_discard_pkt;
4262 estats->rx_skb_alloc_failed += qstats->rx_skb_alloc_failed;
4263 estats->hw_csum_err += qstats->hw_csum_err;
4264 }
a2fbb9ea
ET
4265}
4266
bb2a0f7a 4267static void bnx2x_stats_update(struct bnx2x *bp)
a2fbb9ea 4268{
bb2a0f7a 4269 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 4270
bb2a0f7a
YG
4271 if (*stats_comp != DMAE_COMP_VAL)
4272 return;
4273
4274 if (bp->port.pmf)
de832a55 4275 bnx2x_hw_stats_update(bp);
a2fbb9ea 4276
de832a55
EG
4277 if (bnx2x_storm_stats_update(bp) && (bp->stats_pending++ == 3)) {
4278 BNX2X_ERR("storm stats were not updated for 3 times\n");
4279 bnx2x_panic();
4280 return;
a2fbb9ea
ET
4281 }
4282
de832a55
EG
4283 bnx2x_net_stats_update(bp);
4284 bnx2x_drv_stats_update(bp);
4285
a2fbb9ea 4286 if (bp->msglevel & NETIF_MSG_TIMER) {
ca00392c
EG
4287 struct bnx2x_fastpath *fp0_rx = bp->fp;
4288 struct bnx2x_fastpath *fp0_tx = &(bp->fp[bp->num_rx_queues]);
de832a55
EG
4289 struct tstorm_per_client_stats *old_tclient =
4290 &bp->fp->old_tclient;
4291 struct bnx2x_eth_q_stats *qstats = &bp->fp->eth_q_stats;
bb2a0f7a 4292 struct bnx2x_eth_stats *estats = &bp->eth_stats;
a2fbb9ea 4293 struct net_device_stats *nstats = &bp->dev->stats;
34f80b04 4294 int i;
a2fbb9ea
ET
4295
4296 printk(KERN_DEBUG "%s:\n", bp->dev->name);
4297 printk(KERN_DEBUG " tx avail (%4x) tx hc idx (%x)"
4298 " tx pkt (%lx)\n",
ca00392c
EG
4299 bnx2x_tx_avail(fp0_tx),
4300 le16_to_cpu(*fp0_tx->tx_cons_sb), nstats->tx_packets);
a2fbb9ea
ET
4301 printk(KERN_DEBUG " rx usage (%4x) rx hc idx (%x)"
4302 " rx pkt (%lx)\n",
ca00392c
EG
4303 (u16)(le16_to_cpu(*fp0_rx->rx_cons_sb) -
4304 fp0_rx->rx_comp_cons),
4305 le16_to_cpu(*fp0_rx->rx_cons_sb), nstats->rx_packets);
de832a55
EG
4306 printk(KERN_DEBUG " %s (Xoff events %u) brb drops %u "
4307 "brb truncate %u\n",
4308 (netif_queue_stopped(bp->dev) ? "Xoff" : "Xon"),
4309 qstats->driver_xoff,
4310 estats->brb_drop_lo, estats->brb_truncate_lo);
a2fbb9ea 4311 printk(KERN_DEBUG "tstats: checksum_discard %u "
de832a55 4312 "packets_too_big_discard %lu no_buff_discard %lu "
a2fbb9ea
ET
4313 "mac_discard %u mac_filter_discard %u "
4314 "xxovrflow_discard %u brb_truncate_discard %u "
4315 "ttl0_discard %u\n",
4781bfad 4316 le32_to_cpu(old_tclient->checksum_discard),
de832a55
EG
4317 bnx2x_hilo(&qstats->etherstatsoverrsizepkts_hi),
4318 bnx2x_hilo(&qstats->no_buff_discard_hi),
4319 estats->mac_discard, estats->mac_filter_discard,
4320 estats->xxoverflow_discard, estats->brb_truncate_discard,
4781bfad 4321 le32_to_cpu(old_tclient->ttl0_discard));
a2fbb9ea
ET
4322
4323 for_each_queue(bp, i) {
4324 printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
4325 bnx2x_fp(bp, i, tx_pkt),
4326 bnx2x_fp(bp, i, rx_pkt),
4327 bnx2x_fp(bp, i, rx_calls));
4328 }
4329 }
4330
bb2a0f7a
YG
4331 bnx2x_hw_stats_post(bp);
4332 bnx2x_storm_stats_post(bp);
4333}
a2fbb9ea 4334
bb2a0f7a
YG
4335static void bnx2x_port_stats_stop(struct bnx2x *bp)
4336{
4337 struct dmae_command *dmae;
4338 u32 opcode;
4339 int loader_idx = PMF_DMAE_C(bp);
4340 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 4341
bb2a0f7a 4342 bp->executer_idx = 0;
a2fbb9ea 4343
bb2a0f7a
YG
4344 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4345 DMAE_CMD_C_ENABLE |
4346 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 4347#ifdef __BIG_ENDIAN
bb2a0f7a 4348 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 4349#else
bb2a0f7a 4350 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 4351#endif
bb2a0f7a
YG
4352 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4353 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4354
4355 if (bp->port.port_stx) {
4356
4357 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4358 if (bp->func_stx)
4359 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
4360 else
4361 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4362 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4363 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4364 dmae->dst_addr_lo = bp->port.port_stx >> 2;
a2fbb9ea 4365 dmae->dst_addr_hi = 0;
bb2a0f7a
YG
4366 dmae->len = sizeof(struct host_port_stats) >> 2;
4367 if (bp->func_stx) {
4368 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4369 dmae->comp_addr_hi = 0;
4370 dmae->comp_val = 1;
4371 } else {
4372 dmae->comp_addr_lo =
4373 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4374 dmae->comp_addr_hi =
4375 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4376 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 4377
bb2a0f7a
YG
4378 *stats_comp = 0;
4379 }
a2fbb9ea
ET
4380 }
4381
bb2a0f7a
YG
4382 if (bp->func_stx) {
4383
4384 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4385 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4386 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
4387 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
4388 dmae->dst_addr_lo = bp->func_stx >> 2;
4389 dmae->dst_addr_hi = 0;
4390 dmae->len = sizeof(struct host_func_stats) >> 2;
4391 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4392 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4393 dmae->comp_val = DMAE_COMP_VAL;
4394
4395 *stats_comp = 0;
a2fbb9ea 4396 }
bb2a0f7a
YG
4397}
4398
4399static void bnx2x_stats_stop(struct bnx2x *bp)
4400{
4401 int update = 0;
4402
4403 bnx2x_stats_comp(bp);
4404
4405 if (bp->port.pmf)
4406 update = (bnx2x_hw_stats_update(bp) == 0);
4407
4408 update |= (bnx2x_storm_stats_update(bp) == 0);
4409
4410 if (update) {
4411 bnx2x_net_stats_update(bp);
a2fbb9ea 4412
bb2a0f7a
YG
4413 if (bp->port.pmf)
4414 bnx2x_port_stats_stop(bp);
4415
4416 bnx2x_hw_stats_post(bp);
4417 bnx2x_stats_comp(bp);
a2fbb9ea
ET
4418 }
4419}
4420
bb2a0f7a
YG
4421static void bnx2x_stats_do_nothing(struct bnx2x *bp)
4422{
4423}
4424
4425static const struct {
4426 void (*action)(struct bnx2x *bp);
4427 enum bnx2x_stats_state next_state;
4428} bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
4429/* state event */
4430{
4431/* DISABLED PMF */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
4432/* LINK_UP */ {bnx2x_stats_start, STATS_STATE_ENABLED},
4433/* UPDATE */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
4434/* STOP */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
4435},
4436{
4437/* ENABLED PMF */ {bnx2x_stats_pmf_start, STATS_STATE_ENABLED},
4438/* LINK_UP */ {bnx2x_stats_restart, STATS_STATE_ENABLED},
4439/* UPDATE */ {bnx2x_stats_update, STATS_STATE_ENABLED},
4440/* STOP */ {bnx2x_stats_stop, STATS_STATE_DISABLED}
4441}
4442};
4443
4444static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
4445{
4446 enum bnx2x_stats_state state = bp->stats_state;
4447
4448 bnx2x_stats_stm[state][event].action(bp);
4449 bp->stats_state = bnx2x_stats_stm[state][event].next_state;
4450
4451 if ((event != STATS_EVENT_UPDATE) || (bp->msglevel & NETIF_MSG_TIMER))
4452 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
4453 state, event, bp->stats_state);
4454}
4455
a2fbb9ea
ET
4456static void bnx2x_timer(unsigned long data)
4457{
4458 struct bnx2x *bp = (struct bnx2x *) data;
4459
4460 if (!netif_running(bp->dev))
4461 return;
4462
4463 if (atomic_read(&bp->intr_sem) != 0)
f1410647 4464 goto timer_restart;
a2fbb9ea
ET
4465
4466 if (poll) {
4467 struct bnx2x_fastpath *fp = &bp->fp[0];
4468 int rc;
4469
7961f791 4470 bnx2x_tx_int(fp);
a2fbb9ea
ET
4471 rc = bnx2x_rx_int(fp, 1000);
4472 }
4473
34f80b04
EG
4474 if (!BP_NOMCP(bp)) {
4475 int func = BP_FUNC(bp);
a2fbb9ea
ET
4476 u32 drv_pulse;
4477 u32 mcp_pulse;
4478
4479 ++bp->fw_drv_pulse_wr_seq;
4480 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
4481 /* TBD - add SYSTEM_TIME */
4482 drv_pulse = bp->fw_drv_pulse_wr_seq;
34f80b04 4483 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
a2fbb9ea 4484
34f80b04 4485 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
a2fbb9ea
ET
4486 MCP_PULSE_SEQ_MASK);
4487 /* The delta between driver pulse and mcp response
4488 * should be 1 (before mcp response) or 0 (after mcp response)
4489 */
4490 if ((drv_pulse != mcp_pulse) &&
4491 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
4492 /* someone lost a heartbeat... */
4493 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
4494 drv_pulse, mcp_pulse);
4495 }
4496 }
4497
bb2a0f7a
YG
4498 if ((bp->state == BNX2X_STATE_OPEN) ||
4499 (bp->state == BNX2X_STATE_DISABLED))
4500 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
a2fbb9ea 4501
f1410647 4502timer_restart:
a2fbb9ea
ET
4503 mod_timer(&bp->timer, jiffies + bp->current_interval);
4504}
4505
4506/* end of Statistics */
4507
4508/* nic init */
4509
4510/*
4511 * nic init service functions
4512 */
4513
34f80b04 4514static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
a2fbb9ea 4515{
34f80b04
EG
4516 int port = BP_PORT(bp);
4517
ca00392c
EG
4518 /* "CSTORM" */
4519 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4520 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), 0,
4521 CSTORM_SB_STATUS_BLOCK_U_SIZE / 4);
4522 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4523 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), 0,
4524 CSTORM_SB_STATUS_BLOCK_C_SIZE / 4);
34f80b04
EG
4525}
4526
5c862848
EG
4527static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
4528 dma_addr_t mapping, int sb_id)
34f80b04
EG
4529{
4530 int port = BP_PORT(bp);
bb2a0f7a 4531 int func = BP_FUNC(bp);
a2fbb9ea 4532 int index;
34f80b04 4533 u64 section;
a2fbb9ea
ET
4534
4535 /* USTORM */
4536 section = ((u64)mapping) + offsetof(struct host_status_block,
4537 u_status_block);
34f80b04 4538 sb->u_status_block.status_block_id = sb_id;
a2fbb9ea 4539
ca00392c
EG
4540 REG_WR(bp, BAR_CSTRORM_INTMEM +
4541 CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id), U64_LO(section));
4542 REG_WR(bp, BAR_CSTRORM_INTMEM +
4543 ((CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id)) + 4),
a2fbb9ea 4544 U64_HI(section));
ca00392c
EG
4545 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_USB_FUNC_OFF +
4546 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), func);
a2fbb9ea
ET
4547
4548 for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
ca00392c
EG
4549 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4550 CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id, index), 1);
a2fbb9ea
ET
4551
4552 /* CSTORM */
4553 section = ((u64)mapping) + offsetof(struct host_status_block,
4554 c_status_block);
34f80b04 4555 sb->c_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4556
4557 REG_WR(bp, BAR_CSTRORM_INTMEM +
ca00392c 4558 CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id), U64_LO(section));
a2fbb9ea 4559 REG_WR(bp, BAR_CSTRORM_INTMEM +
ca00392c 4560 ((CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id)) + 4),
a2fbb9ea 4561 U64_HI(section));
7a9b2557 4562 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
ca00392c 4563 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), func);
a2fbb9ea
ET
4564
4565 for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4566 REG_WR16(bp, BAR_CSTRORM_INTMEM +
ca00392c 4567 CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id, index), 1);
34f80b04
EG
4568
4569 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4570}
4571
4572static void bnx2x_zero_def_sb(struct bnx2x *bp)
4573{
4574 int func = BP_FUNC(bp);
a2fbb9ea 4575
ca00392c 4576 bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY +
490c3c9b
EG
4577 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4578 sizeof(struct tstorm_def_status_block)/4);
ca00392c
EG
4579 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4580 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), 0,
4581 sizeof(struct cstorm_def_status_block_u)/4);
4582 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4583 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), 0,
4584 sizeof(struct cstorm_def_status_block_c)/4);
4585 bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY +
34f80b04
EG
4586 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4587 sizeof(struct xstorm_def_status_block)/4);
a2fbb9ea
ET
4588}
4589
4590static void bnx2x_init_def_sb(struct bnx2x *bp,
4591 struct host_def_status_block *def_sb,
34f80b04 4592 dma_addr_t mapping, int sb_id)
a2fbb9ea 4593{
34f80b04
EG
4594 int port = BP_PORT(bp);
4595 int func = BP_FUNC(bp);
a2fbb9ea
ET
4596 int index, val, reg_offset;
4597 u64 section;
4598
4599 /* ATTN */
4600 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4601 atten_status_block);
34f80b04 4602 def_sb->atten_status_block.status_block_id = sb_id;
a2fbb9ea 4603
49d66772
ET
4604 bp->attn_state = 0;
4605
a2fbb9ea
ET
4606 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4607 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4608
34f80b04 4609 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
a2fbb9ea
ET
4610 bp->attn_group[index].sig[0] = REG_RD(bp,
4611 reg_offset + 0x10*index);
4612 bp->attn_group[index].sig[1] = REG_RD(bp,
4613 reg_offset + 0x4 + 0x10*index);
4614 bp->attn_group[index].sig[2] = REG_RD(bp,
4615 reg_offset + 0x8 + 0x10*index);
4616 bp->attn_group[index].sig[3] = REG_RD(bp,
4617 reg_offset + 0xc + 0x10*index);
4618 }
4619
a2fbb9ea
ET
4620 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4621 HC_REG_ATTN_MSG0_ADDR_L);
4622
4623 REG_WR(bp, reg_offset, U64_LO(section));
4624 REG_WR(bp, reg_offset + 4, U64_HI(section));
4625
4626 reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4627
4628 val = REG_RD(bp, reg_offset);
34f80b04 4629 val |= sb_id;
a2fbb9ea
ET
4630 REG_WR(bp, reg_offset, val);
4631
4632 /* USTORM */
4633 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4634 u_def_status_block);
34f80b04 4635 def_sb->u_def_status_block.status_block_id = sb_id;
a2fbb9ea 4636
ca00392c
EG
4637 REG_WR(bp, BAR_CSTRORM_INTMEM +
4638 CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func), U64_LO(section));
4639 REG_WR(bp, BAR_CSTRORM_INTMEM +
4640 ((CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func)) + 4),
a2fbb9ea 4641 U64_HI(section));
ca00392c
EG
4642 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_USB_FUNC_OFF +
4643 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), func);
a2fbb9ea
ET
4644
4645 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
ca00392c
EG
4646 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4647 CSTORM_DEF_SB_HC_DISABLE_U_OFFSET(func, index), 1);
a2fbb9ea
ET
4648
4649 /* CSTORM */
4650 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4651 c_def_status_block);
34f80b04 4652 def_sb->c_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4653
4654 REG_WR(bp, BAR_CSTRORM_INTMEM +
ca00392c 4655 CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func), U64_LO(section));
a2fbb9ea 4656 REG_WR(bp, BAR_CSTRORM_INTMEM +
ca00392c 4657 ((CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func)) + 4),
a2fbb9ea 4658 U64_HI(section));
5c862848 4659 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
ca00392c 4660 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), func);
a2fbb9ea
ET
4661
4662 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4663 REG_WR16(bp, BAR_CSTRORM_INTMEM +
ca00392c 4664 CSTORM_DEF_SB_HC_DISABLE_C_OFFSET(func, index), 1);
a2fbb9ea
ET
4665
4666 /* TSTORM */
4667 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4668 t_def_status_block);
34f80b04 4669 def_sb->t_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4670
4671 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4672 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4673 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4674 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4675 U64_HI(section));
5c862848 4676 REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
34f80b04 4677 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4678
4679 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4680 REG_WR16(bp, BAR_TSTRORM_INTMEM +
34f80b04 4681 TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
a2fbb9ea
ET
4682
4683 /* XSTORM */
4684 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4685 x_def_status_block);
34f80b04 4686 def_sb->x_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4687
4688 REG_WR(bp, BAR_XSTRORM_INTMEM +
34f80b04 4689 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4690 REG_WR(bp, BAR_XSTRORM_INTMEM +
34f80b04 4691 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4692 U64_HI(section));
5c862848 4693 REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
34f80b04 4694 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4695
4696 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4697 REG_WR16(bp, BAR_XSTRORM_INTMEM +
34f80b04 4698 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
49d66772 4699
bb2a0f7a 4700 bp->stats_pending = 0;
66e855f3 4701 bp->set_mac_pending = 0;
bb2a0f7a 4702
34f80b04 4703 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea
ET
4704}
4705
4706static void bnx2x_update_coalesce(struct bnx2x *bp)
4707{
34f80b04 4708 int port = BP_PORT(bp);
a2fbb9ea
ET
4709 int i;
4710
4711 for_each_queue(bp, i) {
34f80b04 4712 int sb_id = bp->fp[i].sb_id;
a2fbb9ea
ET
4713
4714 /* HC_INDEX_U_ETH_RX_CQ_CONS */
ca00392c
EG
4715 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4716 CSTORM_SB_HC_TIMEOUT_U_OFFSET(port, sb_id,
4717 U_SB_ETH_RX_CQ_INDEX),
34f80b04 4718 bp->rx_ticks/12);
ca00392c
EG
4719 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4720 CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id,
4721 U_SB_ETH_RX_CQ_INDEX),
3799cf47 4722 (bp->rx_ticks/12) ? 0 : 1);
a2fbb9ea
ET
4723
4724 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4725 REG_WR8(bp, BAR_CSTRORM_INTMEM +
ca00392c
EG
4726 CSTORM_SB_HC_TIMEOUT_C_OFFSET(port, sb_id,
4727 C_SB_ETH_TX_CQ_INDEX),
34f80b04 4728 bp->tx_ticks/12);
a2fbb9ea 4729 REG_WR16(bp, BAR_CSTRORM_INTMEM +
ca00392c
EG
4730 CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id,
4731 C_SB_ETH_TX_CQ_INDEX),
3799cf47 4732 (bp->tx_ticks/12) ? 0 : 1);
a2fbb9ea
ET
4733 }
4734}
4735
7a9b2557
VZ
4736static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4737 struct bnx2x_fastpath *fp, int last)
4738{
4739 int i;
4740
4741 for (i = 0; i < last; i++) {
4742 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
4743 struct sk_buff *skb = rx_buf->skb;
4744
4745 if (skb == NULL) {
4746 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
4747 continue;
4748 }
4749
4750 if (fp->tpa_state[i] == BNX2X_TPA_START)
4751 pci_unmap_single(bp->pdev,
4752 pci_unmap_addr(rx_buf, mapping),
356e2385 4753 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
4754
4755 dev_kfree_skb(skb);
4756 rx_buf->skb = NULL;
4757 }
4758}
4759
a2fbb9ea
ET
4760static void bnx2x_init_rx_rings(struct bnx2x *bp)
4761{
7a9b2557 4762 int func = BP_FUNC(bp);
32626230
EG
4763 int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
4764 ETH_MAX_AGGREGATION_QUEUES_E1H;
4765 u16 ring_prod, cqe_ring_prod;
a2fbb9ea 4766 int i, j;
a2fbb9ea 4767
87942b46 4768 bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN;
0f00846d
EG
4769 DP(NETIF_MSG_IFUP,
4770 "mtu %d rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
a2fbb9ea 4771
7a9b2557 4772 if (bp->flags & TPA_ENABLE_FLAG) {
7a9b2557 4773
555f6c78 4774 for_each_rx_queue(bp, j) {
32626230 4775 struct bnx2x_fastpath *fp = &bp->fp[j];
7a9b2557 4776
32626230 4777 for (i = 0; i < max_agg_queues; i++) {
7a9b2557
VZ
4778 fp->tpa_pool[i].skb =
4779 netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4780 if (!fp->tpa_pool[i].skb) {
4781 BNX2X_ERR("Failed to allocate TPA "
4782 "skb pool for queue[%d] - "
4783 "disabling TPA on this "
4784 "queue!\n", j);
4785 bnx2x_free_tpa_pool(bp, fp, i);
4786 fp->disable_tpa = 1;
4787 break;
4788 }
4789 pci_unmap_addr_set((struct sw_rx_bd *)
4790 &bp->fp->tpa_pool[i],
4791 mapping, 0);
4792 fp->tpa_state[i] = BNX2X_TPA_STOP;
4793 }
4794 }
4795 }
4796
555f6c78 4797 for_each_rx_queue(bp, j) {
a2fbb9ea
ET
4798 struct bnx2x_fastpath *fp = &bp->fp[j];
4799
4800 fp->rx_bd_cons = 0;
4801 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
7a9b2557
VZ
4802 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
4803
ca00392c
EG
4804 /* Mark queue as Rx */
4805 fp->is_rx_queue = 1;
4806
7a9b2557
VZ
4807 /* "next page" elements initialization */
4808 /* SGE ring */
4809 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
4810 struct eth_rx_sge *sge;
4811
4812 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
4813 sge->addr_hi =
4814 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
4815 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4816 sge->addr_lo =
4817 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
4818 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4819 }
4820
4821 bnx2x_init_sge_ring_bit_mask(fp);
a2fbb9ea 4822
7a9b2557 4823 /* RX BD ring */
a2fbb9ea
ET
4824 for (i = 1; i <= NUM_RX_RINGS; i++) {
4825 struct eth_rx_bd *rx_bd;
4826
4827 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
4828 rx_bd->addr_hi =
4829 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
34f80b04 4830 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
a2fbb9ea
ET
4831 rx_bd->addr_lo =
4832 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
34f80b04 4833 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
a2fbb9ea
ET
4834 }
4835
34f80b04 4836 /* CQ ring */
a2fbb9ea
ET
4837 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4838 struct eth_rx_cqe_next_page *nextpg;
4839
4840 nextpg = (struct eth_rx_cqe_next_page *)
4841 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4842 nextpg->addr_hi =
4843 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
34f80b04 4844 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
a2fbb9ea
ET
4845 nextpg->addr_lo =
4846 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
34f80b04 4847 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
a2fbb9ea
ET
4848 }
4849
7a9b2557
VZ
4850 /* Allocate SGEs and initialize the ring elements */
4851 for (i = 0, ring_prod = 0;
4852 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
a2fbb9ea 4853
7a9b2557
VZ
4854 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
4855 BNX2X_ERR("was only able to allocate "
4856 "%d rx sges\n", i);
4857 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
4858 /* Cleanup already allocated elements */
4859 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
32626230 4860 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
7a9b2557
VZ
4861 fp->disable_tpa = 1;
4862 ring_prod = 0;
4863 break;
4864 }
4865 ring_prod = NEXT_SGE_IDX(ring_prod);
4866 }
4867 fp->rx_sge_prod = ring_prod;
4868
4869 /* Allocate BDs and initialize BD ring */
66e855f3 4870 fp->rx_comp_cons = 0;
7a9b2557 4871 cqe_ring_prod = ring_prod = 0;
a2fbb9ea
ET
4872 for (i = 0; i < bp->rx_ring_size; i++) {
4873 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
4874 BNX2X_ERR("was only able to allocate "
de832a55
EG
4875 "%d rx skbs on queue[%d]\n", i, j);
4876 fp->eth_q_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
4877 break;
4878 }
4879 ring_prod = NEXT_RX_IDX(ring_prod);
7a9b2557 4880 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
53e5e96e 4881 WARN_ON(ring_prod <= i);
a2fbb9ea
ET
4882 }
4883
7a9b2557
VZ
4884 fp->rx_bd_prod = ring_prod;
4885 /* must not have more available CQEs than BDs */
4886 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
4887 cqe_ring_prod);
a2fbb9ea
ET
4888 fp->rx_pkt = fp->rx_calls = 0;
4889
7a9b2557
VZ
4890 /* Warning!
4891 * this will generate an interrupt (to the TSTORM)
4892 * must only be done after chip is initialized
4893 */
4894 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
4895 fp->rx_sge_prod);
a2fbb9ea
ET
4896 if (j != 0)
4897 continue;
4898
4899 REG_WR(bp, BAR_USTRORM_INTMEM +
7a9b2557 4900 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
a2fbb9ea
ET
4901 U64_LO(fp->rx_comp_mapping));
4902 REG_WR(bp, BAR_USTRORM_INTMEM +
7a9b2557 4903 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
a2fbb9ea
ET
4904 U64_HI(fp->rx_comp_mapping));
4905 }
4906}
4907
4908static void bnx2x_init_tx_ring(struct bnx2x *bp)
4909{
4910 int i, j;
4911
555f6c78 4912 for_each_tx_queue(bp, j) {
a2fbb9ea
ET
4913 struct bnx2x_fastpath *fp = &bp->fp[j];
4914
4915 for (i = 1; i <= NUM_TX_RINGS; i++) {
ca00392c
EG
4916 struct eth_tx_next_bd *tx_next_bd =
4917 &fp->tx_desc_ring[TX_DESC_CNT * i - 1].next_bd;
a2fbb9ea 4918
ca00392c 4919 tx_next_bd->addr_hi =
a2fbb9ea 4920 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
34f80b04 4921 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
ca00392c 4922 tx_next_bd->addr_lo =
a2fbb9ea 4923 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
34f80b04 4924 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
a2fbb9ea
ET
4925 }
4926
ca00392c
EG
4927 fp->tx_db.data.header.header = DOORBELL_HDR_DB_TYPE;
4928 fp->tx_db.data.zero_fill1 = 0;
4929 fp->tx_db.data.prod = 0;
4930
a2fbb9ea
ET
4931 fp->tx_pkt_prod = 0;
4932 fp->tx_pkt_cons = 0;
4933 fp->tx_bd_prod = 0;
4934 fp->tx_bd_cons = 0;
4935 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
4936 fp->tx_pkt = 0;
4937 }
4938}
4939
4940static void bnx2x_init_sp_ring(struct bnx2x *bp)
4941{
34f80b04 4942 int func = BP_FUNC(bp);
a2fbb9ea
ET
4943
4944 spin_lock_init(&bp->spq_lock);
4945
4946 bp->spq_left = MAX_SPQ_PENDING;
4947 bp->spq_prod_idx = 0;
a2fbb9ea
ET
4948 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
4949 bp->spq_prod_bd = bp->spq;
4950 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
4951
34f80b04 4952 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
a2fbb9ea 4953 U64_LO(bp->spq_mapping));
34f80b04
EG
4954 REG_WR(bp,
4955 XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
a2fbb9ea
ET
4956 U64_HI(bp->spq_mapping));
4957
34f80b04 4958 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
a2fbb9ea
ET
4959 bp->spq_prod_idx);
4960}
4961
4962static void bnx2x_init_context(struct bnx2x *bp)
4963{
4964 int i;
4965
ca00392c 4966 for_each_rx_queue(bp, i) {
a2fbb9ea
ET
4967 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
4968 struct bnx2x_fastpath *fp = &bp->fp[i];
de832a55 4969 u8 cl_id = fp->cl_id;
a2fbb9ea 4970
34f80b04
EG
4971 context->ustorm_st_context.common.sb_index_numbers =
4972 BNX2X_RX_SB_INDEX_NUM;
0626b899 4973 context->ustorm_st_context.common.clientId = cl_id;
ca00392c 4974 context->ustorm_st_context.common.status_block_id = fp->sb_id;
34f80b04 4975 context->ustorm_st_context.common.flags =
de832a55
EG
4976 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT |
4977 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS);
4978 context->ustorm_st_context.common.statistics_counter_id =
4979 cl_id;
8d9c5f34 4980 context->ustorm_st_context.common.mc_alignment_log_size =
0f00846d 4981 BNX2X_RX_ALIGN_SHIFT;
34f80b04 4982 context->ustorm_st_context.common.bd_buff_size =
437cf2f1 4983 bp->rx_buf_size;
34f80b04 4984 context->ustorm_st_context.common.bd_page_base_hi =
a2fbb9ea 4985 U64_HI(fp->rx_desc_mapping);
34f80b04 4986 context->ustorm_st_context.common.bd_page_base_lo =
a2fbb9ea 4987 U64_LO(fp->rx_desc_mapping);
7a9b2557
VZ
4988 if (!fp->disable_tpa) {
4989 context->ustorm_st_context.common.flags |=
ca00392c 4990 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA;
7a9b2557 4991 context->ustorm_st_context.common.sge_buff_size =
8d9c5f34
EG
4992 (u16)min((u32)SGE_PAGE_SIZE*PAGES_PER_SGE,
4993 (u32)0xffff);
7a9b2557
VZ
4994 context->ustorm_st_context.common.sge_page_base_hi =
4995 U64_HI(fp->rx_sge_mapping);
4996 context->ustorm_st_context.common.sge_page_base_lo =
4997 U64_LO(fp->rx_sge_mapping);
ca00392c
EG
4998
4999 context->ustorm_st_context.common.max_sges_for_packet =
5000 SGE_PAGE_ALIGN(bp->dev->mtu) >> SGE_PAGE_SHIFT;
5001 context->ustorm_st_context.common.max_sges_for_packet =
5002 ((context->ustorm_st_context.common.
5003 max_sges_for_packet + PAGES_PER_SGE - 1) &
5004 (~(PAGES_PER_SGE - 1))) >> PAGES_PER_SGE_SHIFT;
7a9b2557
VZ
5005 }
5006
8d9c5f34
EG
5007 context->ustorm_ag_context.cdu_usage =
5008 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
5009 CDU_REGION_NUMBER_UCM_AG,
5010 ETH_CONNECTION_TYPE);
5011
ca00392c
EG
5012 context->xstorm_ag_context.cdu_reserved =
5013 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
5014 CDU_REGION_NUMBER_XCM_AG,
5015 ETH_CONNECTION_TYPE);
5016 }
5017
5018 for_each_tx_queue(bp, i) {
5019 struct bnx2x_fastpath *fp = &bp->fp[i];
5020 struct eth_context *context =
5021 bnx2x_sp(bp, context[i - bp->num_rx_queues].eth);
5022
5023 context->cstorm_st_context.sb_index_number =
5024 C_SB_ETH_TX_CQ_INDEX;
5025 context->cstorm_st_context.status_block_id = fp->sb_id;
5026
8d9c5f34
EG
5027 context->xstorm_st_context.tx_bd_page_base_hi =
5028 U64_HI(fp->tx_desc_mapping);
5029 context->xstorm_st_context.tx_bd_page_base_lo =
5030 U64_LO(fp->tx_desc_mapping);
ca00392c 5031 context->xstorm_st_context.statistics_data = (fp->cl_id |
8d9c5f34 5032 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
a2fbb9ea
ET
5033 }
5034}
5035
5036static void bnx2x_init_ind_table(struct bnx2x *bp)
5037{
26c8fa4d 5038 int func = BP_FUNC(bp);
a2fbb9ea
ET
5039 int i;
5040
555f6c78 5041 if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
a2fbb9ea
ET
5042 return;
5043
555f6c78
EG
5044 DP(NETIF_MSG_IFUP,
5045 "Initializing indirection table multi_mode %d\n", bp->multi_mode);
a2fbb9ea 5046 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
34f80b04 5047 REG_WR8(bp, BAR_TSTRORM_INTMEM +
26c8fa4d 5048 TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
0626b899 5049 bp->fp->cl_id + (i % bp->num_rx_queues));
a2fbb9ea
ET
5050}
5051
49d66772
ET
5052static void bnx2x_set_client_config(struct bnx2x *bp)
5053{
49d66772 5054 struct tstorm_eth_client_config tstorm_client = {0};
34f80b04
EG
5055 int port = BP_PORT(bp);
5056 int i;
49d66772 5057
e7799c5f 5058 tstorm_client.mtu = bp->dev->mtu;
49d66772 5059 tstorm_client.config_flags =
de832a55
EG
5060 (TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE |
5061 TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE);
49d66772 5062#ifdef BCM_VLAN
0c6671b0 5063 if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
49d66772 5064 tstorm_client.config_flags |=
8d9c5f34 5065 TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE;
49d66772
ET
5066 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
5067 }
5068#endif
49d66772
ET
5069
5070 for_each_queue(bp, i) {
de832a55
EG
5071 tstorm_client.statistics_counter_id = bp->fp[i].cl_id;
5072
49d66772 5073 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 5074 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
49d66772
ET
5075 ((u32 *)&tstorm_client)[0]);
5076 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 5077 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
49d66772
ET
5078 ((u32 *)&tstorm_client)[1]);
5079 }
5080
34f80b04
EG
5081 DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
5082 ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
49d66772
ET
5083}
5084
a2fbb9ea
ET
5085static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
5086{
a2fbb9ea 5087 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
34f80b04
EG
5088 int mode = bp->rx_mode;
5089 int mask = (1 << BP_L_ID(bp));
5090 int func = BP_FUNC(bp);
581ce43d 5091 int port = BP_PORT(bp);
a2fbb9ea 5092 int i;
581ce43d
EG
5093 /* All but management unicast packets should pass to the host as well */
5094 u32 llh_mask =
5095 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST |
5096 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST |
5097 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN |
5098 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN;
a2fbb9ea 5099
3196a88a 5100 DP(NETIF_MSG_IFUP, "rx mode %d mask 0x%x\n", mode, mask);
a2fbb9ea
ET
5101
5102 switch (mode) {
5103 case BNX2X_RX_MODE_NONE: /* no Rx */
34f80b04
EG
5104 tstorm_mac_filter.ucast_drop_all = mask;
5105 tstorm_mac_filter.mcast_drop_all = mask;
5106 tstorm_mac_filter.bcast_drop_all = mask;
a2fbb9ea 5107 break;
356e2385 5108
a2fbb9ea 5109 case BNX2X_RX_MODE_NORMAL:
34f80b04 5110 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea 5111 break;
356e2385 5112
a2fbb9ea 5113 case BNX2X_RX_MODE_ALLMULTI:
34f80b04
EG
5114 tstorm_mac_filter.mcast_accept_all = mask;
5115 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea 5116 break;
356e2385 5117
a2fbb9ea 5118 case BNX2X_RX_MODE_PROMISC:
34f80b04
EG
5119 tstorm_mac_filter.ucast_accept_all = mask;
5120 tstorm_mac_filter.mcast_accept_all = mask;
5121 tstorm_mac_filter.bcast_accept_all = mask;
581ce43d
EG
5122 /* pass management unicast packets as well */
5123 llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST;
a2fbb9ea 5124 break;
356e2385 5125
a2fbb9ea 5126 default:
34f80b04
EG
5127 BNX2X_ERR("BAD rx mode (%d)\n", mode);
5128 break;
a2fbb9ea
ET
5129 }
5130
581ce43d
EG
5131 REG_WR(bp,
5132 (port ? NIG_REG_LLH1_BRB1_DRV_MASK : NIG_REG_LLH0_BRB1_DRV_MASK),
5133 llh_mask);
5134
a2fbb9ea
ET
5135 for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
5136 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 5137 TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
a2fbb9ea
ET
5138 ((u32 *)&tstorm_mac_filter)[i]);
5139
34f80b04 5140/* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
a2fbb9ea
ET
5141 ((u32 *)&tstorm_mac_filter)[i]); */
5142 }
a2fbb9ea 5143
49d66772
ET
5144 if (mode != BNX2X_RX_MODE_NONE)
5145 bnx2x_set_client_config(bp);
a2fbb9ea
ET
5146}
5147
471de716
EG
5148static void bnx2x_init_internal_common(struct bnx2x *bp)
5149{
5150 int i;
5151
5152 /* Zero this manually as its initialization is
5153 currently missing in the initTool */
5154 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
5155 REG_WR(bp, BAR_USTRORM_INTMEM +
5156 USTORM_AGG_DATA_OFFSET + i * 4, 0);
5157}
5158
5159static void bnx2x_init_internal_port(struct bnx2x *bp)
5160{
5161 int port = BP_PORT(bp);
5162
ca00392c
EG
5163 REG_WR(bp,
5164 BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_U_OFFSET(port), BNX2X_BTR);
5165 REG_WR(bp,
5166 BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_C_OFFSET(port), BNX2X_BTR);
471de716
EG
5167 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
5168 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
5169}
5170
5171static void bnx2x_init_internal_func(struct bnx2x *bp)
a2fbb9ea 5172{
a2fbb9ea
ET
5173 struct tstorm_eth_function_common_config tstorm_config = {0};
5174 struct stats_indication_flags stats_flags = {0};
34f80b04
EG
5175 int port = BP_PORT(bp);
5176 int func = BP_FUNC(bp);
de832a55
EG
5177 int i, j;
5178 u32 offset;
471de716 5179 u16 max_agg_size;
a2fbb9ea
ET
5180
5181 if (is_multi(bp)) {
555f6c78 5182 tstorm_config.config_flags = MULTI_FLAGS(bp);
a2fbb9ea
ET
5183 tstorm_config.rss_result_mask = MULTI_MASK;
5184 }
ca00392c
EG
5185
5186 /* Enable TPA if needed */
5187 if (bp->flags & TPA_ENABLE_FLAG)
5188 tstorm_config.config_flags |=
5189 TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA;
5190
8d9c5f34
EG
5191 if (IS_E1HMF(bp))
5192 tstorm_config.config_flags |=
5193 TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM;
a2fbb9ea 5194
34f80b04
EG
5195 tstorm_config.leading_client_id = BP_L_ID(bp);
5196
a2fbb9ea 5197 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 5198 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
a2fbb9ea
ET
5199 (*(u32 *)&tstorm_config));
5200
c14423fe 5201 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
a2fbb9ea
ET
5202 bnx2x_set_storm_rx_mode(bp);
5203
de832a55
EG
5204 for_each_queue(bp, i) {
5205 u8 cl_id = bp->fp[i].cl_id;
5206
5207 /* reset xstorm per client statistics */
5208 offset = BAR_XSTRORM_INTMEM +
5209 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5210 for (j = 0;
5211 j < sizeof(struct xstorm_per_client_stats) / 4; j++)
5212 REG_WR(bp, offset + j*4, 0);
5213
5214 /* reset tstorm per client statistics */
5215 offset = BAR_TSTRORM_INTMEM +
5216 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5217 for (j = 0;
5218 j < sizeof(struct tstorm_per_client_stats) / 4; j++)
5219 REG_WR(bp, offset + j*4, 0);
5220
5221 /* reset ustorm per client statistics */
5222 offset = BAR_USTRORM_INTMEM +
5223 USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5224 for (j = 0;
5225 j < sizeof(struct ustorm_per_client_stats) / 4; j++)
5226 REG_WR(bp, offset + j*4, 0);
66e855f3
YG
5227 }
5228
5229 /* Init statistics related context */
34f80b04 5230 stats_flags.collect_eth = 1;
a2fbb9ea 5231
66e855f3 5232 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 5233 ((u32 *)&stats_flags)[0]);
66e855f3 5234 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
5235 ((u32 *)&stats_flags)[1]);
5236
66e855f3 5237 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 5238 ((u32 *)&stats_flags)[0]);
66e855f3 5239 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
5240 ((u32 *)&stats_flags)[1]);
5241
de832a55
EG
5242 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func),
5243 ((u32 *)&stats_flags)[0]);
5244 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func) + 4,
5245 ((u32 *)&stats_flags)[1]);
5246
66e855f3 5247 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 5248 ((u32 *)&stats_flags)[0]);
66e855f3 5249 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
5250 ((u32 *)&stats_flags)[1]);
5251
66e855f3
YG
5252 REG_WR(bp, BAR_XSTRORM_INTMEM +
5253 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5254 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5255 REG_WR(bp, BAR_XSTRORM_INTMEM +
5256 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5257 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5258
5259 REG_WR(bp, BAR_TSTRORM_INTMEM +
5260 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5261 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5262 REG_WR(bp, BAR_TSTRORM_INTMEM +
5263 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5264 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
34f80b04 5265
de832a55
EG
5266 REG_WR(bp, BAR_USTRORM_INTMEM +
5267 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5268 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5269 REG_WR(bp, BAR_USTRORM_INTMEM +
5270 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5271 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5272
34f80b04
EG
5273 if (CHIP_IS_E1H(bp)) {
5274 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
5275 IS_E1HMF(bp));
5276 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
5277 IS_E1HMF(bp));
5278 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
5279 IS_E1HMF(bp));
5280 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
5281 IS_E1HMF(bp));
5282
7a9b2557
VZ
5283 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
5284 bp->e1hov);
34f80b04
EG
5285 }
5286
4f40f2cb
EG
5287 /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
5288 max_agg_size =
5289 min((u32)(min((u32)8, (u32)MAX_SKB_FRAGS) *
5290 SGE_PAGE_SIZE * PAGES_PER_SGE),
5291 (u32)0xffff);
555f6c78 5292 for_each_rx_queue(bp, i) {
7a9b2557 5293 struct bnx2x_fastpath *fp = &bp->fp[i];
7a9b2557
VZ
5294
5295 REG_WR(bp, BAR_USTRORM_INTMEM +
0626b899 5296 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id),
7a9b2557
VZ
5297 U64_LO(fp->rx_comp_mapping));
5298 REG_WR(bp, BAR_USTRORM_INTMEM +
0626b899 5299 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id) + 4,
7a9b2557
VZ
5300 U64_HI(fp->rx_comp_mapping));
5301
ca00392c
EG
5302 /* Next page */
5303 REG_WR(bp, BAR_USTRORM_INTMEM +
5304 USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id),
5305 U64_LO(fp->rx_comp_mapping + BCM_PAGE_SIZE));
5306 REG_WR(bp, BAR_USTRORM_INTMEM +
5307 USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id) + 4,
5308 U64_HI(fp->rx_comp_mapping + BCM_PAGE_SIZE));
5309
7a9b2557 5310 REG_WR16(bp, BAR_USTRORM_INTMEM +
0626b899 5311 USTORM_MAX_AGG_SIZE_OFFSET(port, fp->cl_id),
7a9b2557
VZ
5312 max_agg_size);
5313 }
8a1c38d1 5314
1c06328c
EG
5315 /* dropless flow control */
5316 if (CHIP_IS_E1H(bp)) {
5317 struct ustorm_eth_rx_pause_data_e1h rx_pause = {0};
5318
5319 rx_pause.bd_thr_low = 250;
5320 rx_pause.cqe_thr_low = 250;
5321 rx_pause.cos = 1;
5322 rx_pause.sge_thr_low = 0;
5323 rx_pause.bd_thr_high = 350;
5324 rx_pause.cqe_thr_high = 350;
5325 rx_pause.sge_thr_high = 0;
5326
5327 for_each_rx_queue(bp, i) {
5328 struct bnx2x_fastpath *fp = &bp->fp[i];
5329
5330 if (!fp->disable_tpa) {
5331 rx_pause.sge_thr_low = 150;
5332 rx_pause.sge_thr_high = 250;
5333 }
5334
5335
5336 offset = BAR_USTRORM_INTMEM +
5337 USTORM_ETH_RING_PAUSE_DATA_OFFSET(port,
5338 fp->cl_id);
5339 for (j = 0;
5340 j < sizeof(struct ustorm_eth_rx_pause_data_e1h)/4;
5341 j++)
5342 REG_WR(bp, offset + j*4,
5343 ((u32 *)&rx_pause)[j]);
5344 }
5345 }
5346
8a1c38d1
EG
5347 memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
5348
5349 /* Init rate shaping and fairness contexts */
5350 if (IS_E1HMF(bp)) {
5351 int vn;
5352
5353 /* During init there is no active link
5354 Until link is up, set link rate to 10Gbps */
5355 bp->link_vars.line_speed = SPEED_10000;
5356 bnx2x_init_port_minmax(bp);
5357
5358 bnx2x_calc_vn_weight_sum(bp);
5359
5360 for (vn = VN_0; vn < E1HVN_MAX; vn++)
5361 bnx2x_init_vn_minmax(bp, 2*vn + port);
5362
5363 /* Enable rate shaping and fairness */
5364 bp->cmng.flags.cmng_enables =
5365 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
5366 if (bp->vn_weight_sum)
5367 bp->cmng.flags.cmng_enables |=
5368 CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
5369 else
5370 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
5371 " fairness will be disabled\n");
5372 } else {
5373 /* rate shaping and fairness are disabled */
5374 DP(NETIF_MSG_IFUP,
5375 "single function mode minmax will be disabled\n");
5376 }
5377
5378
5379 /* Store it to internal memory */
5380 if (bp->port.pmf)
5381 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
5382 REG_WR(bp, BAR_XSTRORM_INTMEM +
5383 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
5384 ((u32 *)(&bp->cmng))[i]);
a2fbb9ea
ET
5385}
5386
471de716
EG
5387static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
5388{
5389 switch (load_code) {
5390 case FW_MSG_CODE_DRV_LOAD_COMMON:
5391 bnx2x_init_internal_common(bp);
5392 /* no break */
5393
5394 case FW_MSG_CODE_DRV_LOAD_PORT:
5395 bnx2x_init_internal_port(bp);
5396 /* no break */
5397
5398 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5399 bnx2x_init_internal_func(bp);
5400 break;
5401
5402 default:
5403 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5404 break;
5405 }
5406}
5407
5408static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
a2fbb9ea
ET
5409{
5410 int i;
5411
5412 for_each_queue(bp, i) {
5413 struct bnx2x_fastpath *fp = &bp->fp[i];
5414
34f80b04 5415 fp->bp = bp;
a2fbb9ea 5416 fp->state = BNX2X_FP_STATE_CLOSED;
a2fbb9ea 5417 fp->index = i;
34f80b04
EG
5418 fp->cl_id = BP_L_ID(bp) + i;
5419 fp->sb_id = fp->cl_id;
ca00392c
EG
5420 /* Suitable Rx and Tx SBs are served by the same client */
5421 if (i >= bp->num_rx_queues)
5422 fp->cl_id -= bp->num_rx_queues;
34f80b04 5423 DP(NETIF_MSG_IFUP,
f5372251
EG
5424 "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d sb %d\n",
5425 i, bp, fp->status_blk, fp->cl_id, fp->sb_id);
5c862848 5426 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
0626b899 5427 fp->sb_id);
5c862848 5428 bnx2x_update_fpsb_idx(fp);
a2fbb9ea
ET
5429 }
5430
16119785
EG
5431 /* ensure status block indices were read */
5432 rmb();
5433
5434
5c862848
EG
5435 bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
5436 DEF_SB_ID);
5437 bnx2x_update_dsb_idx(bp);
a2fbb9ea
ET
5438 bnx2x_update_coalesce(bp);
5439 bnx2x_init_rx_rings(bp);
5440 bnx2x_init_tx_ring(bp);
5441 bnx2x_init_sp_ring(bp);
5442 bnx2x_init_context(bp);
471de716 5443 bnx2x_init_internal(bp, load_code);
a2fbb9ea 5444 bnx2x_init_ind_table(bp);
0ef00459
EG
5445 bnx2x_stats_init(bp);
5446
5447 /* At this point, we are ready for interrupts */
5448 atomic_set(&bp->intr_sem, 0);
5449
5450 /* flush all before enabling interrupts */
5451 mb();
5452 mmiowb();
5453
615f8fd9 5454 bnx2x_int_enable(bp);
eb8da205
EG
5455
5456 /* Check for SPIO5 */
5457 bnx2x_attn_int_deasserted0(bp,
5458 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
5459 AEU_INPUTS_ATTN_BITS_SPIO5);
a2fbb9ea
ET
5460}
5461
5462/* end of nic init */
5463
5464/*
5465 * gzip service functions
5466 */
5467
5468static int bnx2x_gunzip_init(struct bnx2x *bp)
5469{
5470 bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
5471 &bp->gunzip_mapping);
5472 if (bp->gunzip_buf == NULL)
5473 goto gunzip_nomem1;
5474
5475 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
5476 if (bp->strm == NULL)
5477 goto gunzip_nomem2;
5478
5479 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
5480 GFP_KERNEL);
5481 if (bp->strm->workspace == NULL)
5482 goto gunzip_nomem3;
5483
5484 return 0;
5485
5486gunzip_nomem3:
5487 kfree(bp->strm);
5488 bp->strm = NULL;
5489
5490gunzip_nomem2:
5491 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5492 bp->gunzip_mapping);
5493 bp->gunzip_buf = NULL;
5494
5495gunzip_nomem1:
5496 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
34f80b04 5497 " un-compression\n", bp->dev->name);
a2fbb9ea
ET
5498 return -ENOMEM;
5499}
5500
5501static void bnx2x_gunzip_end(struct bnx2x *bp)
5502{
5503 kfree(bp->strm->workspace);
5504
5505 kfree(bp->strm);
5506 bp->strm = NULL;
5507
5508 if (bp->gunzip_buf) {
5509 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5510 bp->gunzip_mapping);
5511 bp->gunzip_buf = NULL;
5512 }
5513}
5514
94a78b79 5515static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
a2fbb9ea
ET
5516{
5517 int n, rc;
5518
5519 /* check gzip header */
94a78b79
VZ
5520 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
5521 BNX2X_ERR("Bad gzip header\n");
a2fbb9ea 5522 return -EINVAL;
94a78b79 5523 }
a2fbb9ea
ET
5524
5525 n = 10;
5526
34f80b04 5527#define FNAME 0x8
a2fbb9ea
ET
5528
5529 if (zbuf[3] & FNAME)
5530 while ((zbuf[n++] != 0) && (n < len));
5531
94a78b79 5532 bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
a2fbb9ea
ET
5533 bp->strm->avail_in = len - n;
5534 bp->strm->next_out = bp->gunzip_buf;
5535 bp->strm->avail_out = FW_BUF_SIZE;
5536
5537 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
5538 if (rc != Z_OK)
5539 return rc;
5540
5541 rc = zlib_inflate(bp->strm, Z_FINISH);
5542 if ((rc != Z_OK) && (rc != Z_STREAM_END))
5543 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
5544 bp->dev->name, bp->strm->msg);
5545
5546 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
5547 if (bp->gunzip_outlen & 0x3)
5548 printk(KERN_ERR PFX "%s: Firmware decompression error:"
5549 " gunzip_outlen (%d) not aligned\n",
5550 bp->dev->name, bp->gunzip_outlen);
5551 bp->gunzip_outlen >>= 2;
5552
5553 zlib_inflateEnd(bp->strm);
5554
5555 if (rc == Z_STREAM_END)
5556 return 0;
5557
5558 return rc;
5559}
5560
5561/* nic load/unload */
5562
5563/*
34f80b04 5564 * General service functions
a2fbb9ea
ET
5565 */
5566
5567/* send a NIG loopback debug packet */
5568static void bnx2x_lb_pckt(struct bnx2x *bp)
5569{
a2fbb9ea 5570 u32 wb_write[3];
a2fbb9ea
ET
5571
5572 /* Ethernet source and destination addresses */
a2fbb9ea
ET
5573 wb_write[0] = 0x55555555;
5574 wb_write[1] = 0x55555555;
34f80b04 5575 wb_write[2] = 0x20; /* SOP */
a2fbb9ea 5576 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
5577
5578 /* NON-IP protocol */
a2fbb9ea
ET
5579 wb_write[0] = 0x09000000;
5580 wb_write[1] = 0x55555555;
34f80b04 5581 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
a2fbb9ea 5582 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
5583}
5584
5585/* some of the internal memories
5586 * are not directly readable from the driver
5587 * to test them we send debug packets
5588 */
5589static int bnx2x_int_mem_test(struct bnx2x *bp)
5590{
5591 int factor;
5592 int count, i;
5593 u32 val = 0;
5594
ad8d3948 5595 if (CHIP_REV_IS_FPGA(bp))
a2fbb9ea 5596 factor = 120;
ad8d3948
EG
5597 else if (CHIP_REV_IS_EMUL(bp))
5598 factor = 200;
5599 else
a2fbb9ea 5600 factor = 1;
a2fbb9ea
ET
5601
5602 DP(NETIF_MSG_HW, "start part1\n");
5603
5604 /* Disable inputs of parser neighbor blocks */
5605 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5606 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5607 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3196a88a 5608 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
a2fbb9ea
ET
5609
5610 /* Write 0 to parser credits for CFC search request */
5611 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5612
5613 /* send Ethernet packet */
5614 bnx2x_lb_pckt(bp);
5615
5616 /* TODO do i reset NIG statistic? */
5617 /* Wait until NIG register shows 1 packet of size 0x10 */
5618 count = 1000 * factor;
5619 while (count) {
34f80b04 5620
a2fbb9ea
ET
5621 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5622 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
5623 if (val == 0x10)
5624 break;
5625
5626 msleep(10);
5627 count--;
5628 }
5629 if (val != 0x10) {
5630 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5631 return -1;
5632 }
5633
5634 /* Wait until PRS register shows 1 packet */
5635 count = 1000 * factor;
5636 while (count) {
5637 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
a2fbb9ea
ET
5638 if (val == 1)
5639 break;
5640
5641 msleep(10);
5642 count--;
5643 }
5644 if (val != 0x1) {
5645 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5646 return -2;
5647 }
5648
5649 /* Reset and init BRB, PRS */
34f80b04 5650 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
a2fbb9ea 5651 msleep(50);
34f80b04 5652 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
a2fbb9ea 5653 msleep(50);
94a78b79
VZ
5654 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5655 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
a2fbb9ea
ET
5656
5657 DP(NETIF_MSG_HW, "part2\n");
5658
5659 /* Disable inputs of parser neighbor blocks */
5660 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5661 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5662 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3196a88a 5663 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
a2fbb9ea
ET
5664
5665 /* Write 0 to parser credits for CFC search request */
5666 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5667
5668 /* send 10 Ethernet packets */
5669 for (i = 0; i < 10; i++)
5670 bnx2x_lb_pckt(bp);
5671
5672 /* Wait until NIG register shows 10 + 1
5673 packets of size 11*0x10 = 0xb0 */
5674 count = 1000 * factor;
5675 while (count) {
34f80b04 5676
a2fbb9ea
ET
5677 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5678 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
5679 if (val == 0xb0)
5680 break;
5681
5682 msleep(10);
5683 count--;
5684 }
5685 if (val != 0xb0) {
5686 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5687 return -3;
5688 }
5689
5690 /* Wait until PRS register shows 2 packets */
5691 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5692 if (val != 2)
5693 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5694
5695 /* Write 1 to parser credits for CFC search request */
5696 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5697
5698 /* Wait until PRS register shows 3 packets */
5699 msleep(10 * factor);
5700 /* Wait until NIG register shows 1 packet of size 0x10 */
5701 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5702 if (val != 3)
5703 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5704
5705 /* clear NIG EOP FIFO */
5706 for (i = 0; i < 11; i++)
5707 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5708 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5709 if (val != 1) {
5710 BNX2X_ERR("clear of NIG failed\n");
5711 return -4;
5712 }
5713
5714 /* Reset and init BRB, PRS, NIG */
5715 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5716 msleep(50);
5717 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5718 msleep(50);
94a78b79
VZ
5719 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5720 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
a2fbb9ea
ET
5721#ifndef BCM_ISCSI
5722 /* set NIC mode */
5723 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5724#endif
5725
5726 /* Enable inputs of parser neighbor blocks */
5727 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5728 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5729 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
3196a88a 5730 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
a2fbb9ea
ET
5731
5732 DP(NETIF_MSG_HW, "done\n");
5733
5734 return 0; /* OK */
5735}
5736
5737static void enable_blocks_attention(struct bnx2x *bp)
5738{
5739 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5740 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5741 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5742 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5743 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5744 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5745 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5746 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5747 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
34f80b04
EG
5748/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5749/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5750 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5751 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5752 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
34f80b04
EG
5753/* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5754/* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5755 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5756 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5757 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5758 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
34f80b04
EG
5759/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5760/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5761 if (CHIP_REV_IS_FPGA(bp))
5762 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
5763 else
5764 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
a2fbb9ea
ET
5765 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5766 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5767 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
34f80b04
EG
5768/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5769/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5770 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5771 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
34f80b04
EG
5772/* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5773 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
a2fbb9ea
ET
5774}
5775
34f80b04 5776
81f75bbf
EG
5777static void bnx2x_reset_common(struct bnx2x *bp)
5778{
5779 /* reset_common */
5780 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5781 0xd3ffff7f);
5782 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
5783}
5784
fd4ef40d
EG
5785
5786static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
5787{
5788 u32 val;
5789 u8 port;
5790 u8 is_required = 0;
5791
5792 val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
5793 SHARED_HW_CFG_FAN_FAILURE_MASK;
5794
5795 if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
5796 is_required = 1;
5797
5798 /*
5799 * The fan failure mechanism is usually related to the PHY type since
5800 * the power consumption of the board is affected by the PHY. Currently,
5801 * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
5802 */
5803 else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
5804 for (port = PORT_0; port < PORT_MAX; port++) {
5805 u32 phy_type =
5806 SHMEM_RD(bp, dev_info.port_hw_config[port].
5807 external_phy_config) &
5808 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
5809 is_required |=
5810 ((phy_type ==
5811 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) ||
4d295db0
EG
5812 (phy_type ==
5813 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) ||
fd4ef40d
EG
5814 (phy_type ==
5815 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481));
5816 }
5817
5818 DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
5819
5820 if (is_required == 0)
5821 return;
5822
5823 /* Fan failure is indicated by SPIO 5 */
5824 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
5825 MISC_REGISTERS_SPIO_INPUT_HI_Z);
5826
5827 /* set to active low mode */
5828 val = REG_RD(bp, MISC_REG_SPIO_INT);
5829 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
5830 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
5831 REG_WR(bp, MISC_REG_SPIO_INT, val);
5832
5833 /* enable interrupt to signal the IGU */
5834 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
5835 val |= (1 << MISC_REGISTERS_SPIO_5);
5836 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
5837}
5838
34f80b04 5839static int bnx2x_init_common(struct bnx2x *bp)
a2fbb9ea 5840{
a2fbb9ea 5841 u32 val, i;
a2fbb9ea 5842
34f80b04 5843 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp));
a2fbb9ea 5844
81f75bbf 5845 bnx2x_reset_common(bp);
34f80b04
EG
5846 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
5847 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
a2fbb9ea 5848
94a78b79 5849 bnx2x_init_block(bp, MISC_BLOCK, COMMON_STAGE);
34f80b04
EG
5850 if (CHIP_IS_E1H(bp))
5851 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
a2fbb9ea 5852
34f80b04
EG
5853 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
5854 msleep(30);
5855 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
a2fbb9ea 5856
94a78b79 5857 bnx2x_init_block(bp, PXP_BLOCK, COMMON_STAGE);
34f80b04
EG
5858 if (CHIP_IS_E1(bp)) {
5859 /* enable HW interrupt from PXP on USDM overflow
5860 bit 16 on INT_MASK_0 */
5861 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5862 }
a2fbb9ea 5863
94a78b79 5864 bnx2x_init_block(bp, PXP2_BLOCK, COMMON_STAGE);
34f80b04 5865 bnx2x_init_pxp(bp);
a2fbb9ea
ET
5866
5867#ifdef __BIG_ENDIAN
34f80b04
EG
5868 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
5869 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
5870 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
5871 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
5872 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
8badd27a
EG
5873 /* make sure this value is 0 */
5874 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
34f80b04
EG
5875
5876/* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
5877 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
5878 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
5879 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
5880 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
a2fbb9ea
ET
5881#endif
5882
34f80b04 5883 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
a2fbb9ea 5884#ifdef BCM_ISCSI
34f80b04
EG
5885 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
5886 REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
5887 REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
a2fbb9ea
ET
5888#endif
5889
34f80b04
EG
5890 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
5891 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
a2fbb9ea 5892
34f80b04
EG
5893 /* let the HW do it's magic ... */
5894 msleep(100);
5895 /* finish PXP init */
5896 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
5897 if (val != 1) {
5898 BNX2X_ERR("PXP2 CFG failed\n");
5899 return -EBUSY;
5900 }
5901 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
5902 if (val != 1) {
5903 BNX2X_ERR("PXP2 RD_INIT failed\n");
5904 return -EBUSY;
5905 }
a2fbb9ea 5906
34f80b04
EG
5907 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
5908 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
a2fbb9ea 5909
94a78b79 5910 bnx2x_init_block(bp, DMAE_BLOCK, COMMON_STAGE);
a2fbb9ea 5911
34f80b04
EG
5912 /* clean the DMAE memory */
5913 bp->dmae_ready = 1;
5914 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
a2fbb9ea 5915
94a78b79
VZ
5916 bnx2x_init_block(bp, TCM_BLOCK, COMMON_STAGE);
5917 bnx2x_init_block(bp, UCM_BLOCK, COMMON_STAGE);
5918 bnx2x_init_block(bp, CCM_BLOCK, COMMON_STAGE);
5919 bnx2x_init_block(bp, XCM_BLOCK, COMMON_STAGE);
a2fbb9ea 5920
34f80b04
EG
5921 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
5922 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
5923 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
5924 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
5925
94a78b79 5926 bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE);
34f80b04
EG
5927 /* soft reset pulse */
5928 REG_WR(bp, QM_REG_SOFT_RESET, 1);
5929 REG_WR(bp, QM_REG_SOFT_RESET, 0);
a2fbb9ea
ET
5930
5931#ifdef BCM_ISCSI
94a78b79 5932 bnx2x_init_block(bp, TIMERS_BLOCK, COMMON_STAGE);
a2fbb9ea 5933#endif
a2fbb9ea 5934
94a78b79 5935 bnx2x_init_block(bp, DQ_BLOCK, COMMON_STAGE);
34f80b04
EG
5936 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
5937 if (!CHIP_REV_IS_SLOW(bp)) {
5938 /* enable hw interrupt from doorbell Q */
5939 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5940 }
a2fbb9ea 5941
94a78b79
VZ
5942 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5943 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
26c8fa4d 5944 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
3196a88a
EG
5945 /* set NIC mode */
5946 REG_WR(bp, PRS_REG_NIC_MODE, 1);
34f80b04
EG
5947 if (CHIP_IS_E1H(bp))
5948 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
a2fbb9ea 5949
94a78b79
VZ
5950 bnx2x_init_block(bp, TSDM_BLOCK, COMMON_STAGE);
5951 bnx2x_init_block(bp, CSDM_BLOCK, COMMON_STAGE);
5952 bnx2x_init_block(bp, USDM_BLOCK, COMMON_STAGE);
5953 bnx2x_init_block(bp, XSDM_BLOCK, COMMON_STAGE);
a2fbb9ea 5954
ca00392c
EG
5955 bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
5956 bnx2x_init_fill(bp, USEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
5957 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
5958 bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
a2fbb9ea 5959
94a78b79
VZ
5960 bnx2x_init_block(bp, TSEM_BLOCK, COMMON_STAGE);
5961 bnx2x_init_block(bp, USEM_BLOCK, COMMON_STAGE);
5962 bnx2x_init_block(bp, CSEM_BLOCK, COMMON_STAGE);
5963 bnx2x_init_block(bp, XSEM_BLOCK, COMMON_STAGE);
a2fbb9ea 5964
34f80b04
EG
5965 /* sync semi rtc */
5966 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5967 0x80000000);
5968 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
5969 0x80000000);
a2fbb9ea 5970
94a78b79
VZ
5971 bnx2x_init_block(bp, UPB_BLOCK, COMMON_STAGE);
5972 bnx2x_init_block(bp, XPB_BLOCK, COMMON_STAGE);
5973 bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE);
a2fbb9ea 5974
34f80b04
EG
5975 REG_WR(bp, SRC_REG_SOFT_RST, 1);
5976 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
5977 REG_WR(bp, i, 0xc0cac01a);
5978 /* TODO: replace with something meaningful */
5979 }
94a78b79 5980 bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE);
34f80b04 5981 REG_WR(bp, SRC_REG_SOFT_RST, 0);
a2fbb9ea 5982
34f80b04
EG
5983 if (sizeof(union cdu_context) != 1024)
5984 /* we currently assume that a context is 1024 bytes */
5985 printk(KERN_ALERT PFX "please adjust the size of"
5986 " cdu_context(%ld)\n", (long)sizeof(union cdu_context));
a2fbb9ea 5987
94a78b79 5988 bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE);
34f80b04
EG
5989 val = (4 << 24) + (0 << 12) + 1024;
5990 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
a2fbb9ea 5991
94a78b79 5992 bnx2x_init_block(bp, CFC_BLOCK, COMMON_STAGE);
34f80b04 5993 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
8d9c5f34
EG
5994 /* enable context validation interrupt from CFC */
5995 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5996
5997 /* set the thresholds to prevent CFC/CDU race */
5998 REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
a2fbb9ea 5999
94a78b79
VZ
6000 bnx2x_init_block(bp, HC_BLOCK, COMMON_STAGE);
6001 bnx2x_init_block(bp, MISC_AEU_BLOCK, COMMON_STAGE);
a2fbb9ea 6002
94a78b79 6003 bnx2x_init_block(bp, PXPCS_BLOCK, COMMON_STAGE);
34f80b04
EG
6004 /* Reset PCIE errors for debug */
6005 REG_WR(bp, 0x2814, 0xffffffff);
6006 REG_WR(bp, 0x3820, 0xffffffff);
a2fbb9ea 6007
94a78b79 6008 bnx2x_init_block(bp, EMAC0_BLOCK, COMMON_STAGE);
94a78b79 6009 bnx2x_init_block(bp, EMAC1_BLOCK, COMMON_STAGE);
94a78b79 6010 bnx2x_init_block(bp, DBU_BLOCK, COMMON_STAGE);
94a78b79 6011 bnx2x_init_block(bp, DBG_BLOCK, COMMON_STAGE);
34f80b04 6012
94a78b79 6013 bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE);
34f80b04
EG
6014 if (CHIP_IS_E1H(bp)) {
6015 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
6016 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
6017 }
6018
6019 if (CHIP_REV_IS_SLOW(bp))
6020 msleep(200);
6021
6022 /* finish CFC init */
6023 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
6024 if (val != 1) {
6025 BNX2X_ERR("CFC LL_INIT failed\n");
6026 return -EBUSY;
6027 }
6028 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
6029 if (val != 1) {
6030 BNX2X_ERR("CFC AC_INIT failed\n");
6031 return -EBUSY;
6032 }
6033 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
6034 if (val != 1) {
6035 BNX2X_ERR("CFC CAM_INIT failed\n");
6036 return -EBUSY;
6037 }
6038 REG_WR(bp, CFC_REG_DEBUG0, 0);
f1410647 6039
34f80b04
EG
6040 /* read NIG statistic
6041 to see if this is our first up since powerup */
6042 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
6043 val = *bnx2x_sp(bp, wb_data[0]);
6044
6045 /* do internal memory self test */
6046 if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
6047 BNX2X_ERR("internal mem self test failed\n");
6048 return -EBUSY;
6049 }
6050
35b19ba5 6051 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
46c6a674
EG
6052 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
6053 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
6054 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
4d295db0 6055 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
46c6a674
EG
6056 bp->port.need_hw_lock = 1;
6057 break;
6058
34f80b04
EG
6059 default:
6060 break;
6061 }
f1410647 6062
fd4ef40d
EG
6063 bnx2x_setup_fan_failure_detection(bp);
6064
34f80b04
EG
6065 /* clear PXP2 attentions */
6066 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
a2fbb9ea 6067
34f80b04 6068 enable_blocks_attention(bp);
a2fbb9ea 6069
6bbca910
YR
6070 if (!BP_NOMCP(bp)) {
6071 bnx2x_acquire_phy_lock(bp);
6072 bnx2x_common_init_phy(bp, bp->common.shmem_base);
6073 bnx2x_release_phy_lock(bp);
6074 } else
6075 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
6076
34f80b04
EG
6077 return 0;
6078}
a2fbb9ea 6079
34f80b04
EG
6080static int bnx2x_init_port(struct bnx2x *bp)
6081{
6082 int port = BP_PORT(bp);
94a78b79 6083 int init_stage = port ? PORT1_STAGE : PORT0_STAGE;
1c06328c 6084 u32 low, high;
34f80b04 6085 u32 val;
a2fbb9ea 6086
34f80b04
EG
6087 DP(BNX2X_MSG_MCP, "starting port init port %x\n", port);
6088
6089 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
a2fbb9ea 6090
94a78b79 6091 bnx2x_init_block(bp, PXP_BLOCK, init_stage);
94a78b79 6092 bnx2x_init_block(bp, PXP2_BLOCK, init_stage);
ca00392c
EG
6093
6094 bnx2x_init_block(bp, TCM_BLOCK, init_stage);
6095 bnx2x_init_block(bp, UCM_BLOCK, init_stage);
6096 bnx2x_init_block(bp, CCM_BLOCK, init_stage);
a2fbb9ea
ET
6097#ifdef BCM_ISCSI
6098 /* Port0 1
6099 * Port1 385 */
6100 i++;
6101 wb_write[0] = ONCHIP_ADDR1(bp->timers_mapping);
6102 wb_write[1] = ONCHIP_ADDR2(bp->timers_mapping);
6103 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
6104 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
6105
6106 /* Port0 2
6107 * Port1 386 */
6108 i++;
6109 wb_write[0] = ONCHIP_ADDR1(bp->qm_mapping);
6110 wb_write[1] = ONCHIP_ADDR2(bp->qm_mapping);
6111 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
6112 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
6113
6114 /* Port0 3
6115 * Port1 387 */
6116 i++;
6117 wb_write[0] = ONCHIP_ADDR1(bp->t1_mapping);
6118 wb_write[1] = ONCHIP_ADDR2(bp->t1_mapping);
6119 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
6120 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
6121#endif
94a78b79 6122 bnx2x_init_block(bp, XCM_BLOCK, init_stage);
a2fbb9ea 6123
a2fbb9ea
ET
6124#ifdef BCM_ISCSI
6125 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + func*4, 1024/64*20);
6126 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + func*4, 31);
6127
94a78b79 6128 bnx2x_init_block(bp, TIMERS_BLOCK, init_stage);
a2fbb9ea 6129#endif
94a78b79 6130 bnx2x_init_block(bp, DQ_BLOCK, init_stage);
1c06328c 6131
94a78b79 6132 bnx2x_init_block(bp, BRB1_BLOCK, init_stage);
1c06328c
EG
6133 if (CHIP_REV_IS_SLOW(bp) && !CHIP_IS_E1H(bp)) {
6134 /* no pause for emulation and FPGA */
6135 low = 0;
6136 high = 513;
6137 } else {
6138 if (IS_E1HMF(bp))
6139 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
6140 else if (bp->dev->mtu > 4096) {
6141 if (bp->flags & ONE_PORT_FLAG)
6142 low = 160;
6143 else {
6144 val = bp->dev->mtu;
6145 /* (24*1024 + val*4)/256 */
6146 low = 96 + (val/64) + ((val % 64) ? 1 : 0);
6147 }
6148 } else
6149 low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
6150 high = low + 56; /* 14*1024/256 */
6151 }
6152 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
6153 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
6154
6155
94a78b79 6156 bnx2x_init_block(bp, PRS_BLOCK, init_stage);
ca00392c 6157
94a78b79 6158 bnx2x_init_block(bp, TSDM_BLOCK, init_stage);
94a78b79 6159 bnx2x_init_block(bp, CSDM_BLOCK, init_stage);
94a78b79 6160 bnx2x_init_block(bp, USDM_BLOCK, init_stage);
94a78b79 6161 bnx2x_init_block(bp, XSDM_BLOCK, init_stage);
356e2385 6162
94a78b79
VZ
6163 bnx2x_init_block(bp, TSEM_BLOCK, init_stage);
6164 bnx2x_init_block(bp, USEM_BLOCK, init_stage);
6165 bnx2x_init_block(bp, CSEM_BLOCK, init_stage);
6166 bnx2x_init_block(bp, XSEM_BLOCK, init_stage);
356e2385 6167
94a78b79 6168 bnx2x_init_block(bp, UPB_BLOCK, init_stage);
94a78b79 6169 bnx2x_init_block(bp, XPB_BLOCK, init_stage);
34f80b04 6170
94a78b79 6171 bnx2x_init_block(bp, PBF_BLOCK, init_stage);
a2fbb9ea
ET
6172
6173 /* configure PBF to work without PAUSE mtu 9000 */
34f80b04 6174 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
a2fbb9ea
ET
6175
6176 /* update threshold */
34f80b04 6177 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
a2fbb9ea 6178 /* update init credit */
34f80b04 6179 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
a2fbb9ea
ET
6180
6181 /* probe changes */
34f80b04 6182 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
a2fbb9ea 6183 msleep(5);
34f80b04 6184 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
a2fbb9ea
ET
6185
6186#ifdef BCM_ISCSI
6187 /* tell the searcher where the T2 table is */
6188 REG_WR(bp, SRC_REG_COUNTFREE0 + func*4, 16*1024/64);
6189
6190 wb_write[0] = U64_LO(bp->t2_mapping);
6191 wb_write[1] = U64_HI(bp->t2_mapping);
6192 REG_WR_DMAE(bp, SRC_REG_FIRSTFREE0 + func*4, wb_write, 2);
6193 wb_write[0] = U64_LO((u64)bp->t2_mapping + 16*1024 - 64);
6194 wb_write[1] = U64_HI((u64)bp->t2_mapping + 16*1024 - 64);
6195 REG_WR_DMAE(bp, SRC_REG_LASTFREE0 + func*4, wb_write, 2);
6196
6197 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + func*4, 10);
a2fbb9ea 6198#endif
94a78b79 6199 bnx2x_init_block(bp, CDU_BLOCK, init_stage);
94a78b79 6200 bnx2x_init_block(bp, CFC_BLOCK, init_stage);
34f80b04
EG
6201
6202 if (CHIP_IS_E1(bp)) {
6203 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6204 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6205 }
94a78b79 6206 bnx2x_init_block(bp, HC_BLOCK, init_stage);
34f80b04 6207
94a78b79 6208 bnx2x_init_block(bp, MISC_AEU_BLOCK, init_stage);
34f80b04
EG
6209 /* init aeu_mask_attn_func_0/1:
6210 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
6211 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
6212 * bits 4-7 are used for "per vn group attention" */
6213 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
6214 (IS_E1HMF(bp) ? 0xF7 : 0x7));
6215
94a78b79 6216 bnx2x_init_block(bp, PXPCS_BLOCK, init_stage);
94a78b79 6217 bnx2x_init_block(bp, EMAC0_BLOCK, init_stage);
94a78b79 6218 bnx2x_init_block(bp, EMAC1_BLOCK, init_stage);
94a78b79 6219 bnx2x_init_block(bp, DBU_BLOCK, init_stage);
94a78b79 6220 bnx2x_init_block(bp, DBG_BLOCK, init_stage);
356e2385 6221
94a78b79 6222 bnx2x_init_block(bp, NIG_BLOCK, init_stage);
34f80b04
EG
6223
6224 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
6225
6226 if (CHIP_IS_E1H(bp)) {
34f80b04
EG
6227 /* 0x2 disable e1hov, 0x1 enable */
6228 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
6229 (IS_E1HMF(bp) ? 0x1 : 0x2));
6230
1c06328c
EG
6231 /* support pause requests from USDM, TSDM and BRB */
6232 REG_WR(bp, NIG_REG_LLFC_EGRESS_SRC_ENABLE_0 + port*4, 0x7);
6233
6234 {
6235 REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
6236 REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
6237 REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
6238 }
34f80b04
EG
6239 }
6240
94a78b79 6241 bnx2x_init_block(bp, MCP_BLOCK, init_stage);
94a78b79 6242 bnx2x_init_block(bp, DMAE_BLOCK, init_stage);
a2fbb9ea 6243
35b19ba5 6244 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
589abe3a
EG
6245 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
6246 {
6247 u32 swap_val, swap_override, aeu_gpio_mask, offset;
6248
6249 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
6250 MISC_REGISTERS_GPIO_INPUT_HI_Z, port);
6251
6252 /* The GPIO should be swapped if the swap register is
6253 set and active */
6254 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
6255 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
6256
6257 /* Select function upon port-swap configuration */
6258 if (port == 0) {
6259 offset = MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
6260 aeu_gpio_mask = (swap_val && swap_override) ?
6261 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1 :
6262 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0;
6263 } else {
6264 offset = MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0;
6265 aeu_gpio_mask = (swap_val && swap_override) ?
6266 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 :
6267 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1;
6268 }
6269 val = REG_RD(bp, offset);
6270 /* add GPIO3 to group */
6271 val |= aeu_gpio_mask;
6272 REG_WR(bp, offset, val);
6273 }
6274 break;
6275
35b19ba5 6276 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
4d295db0 6277 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
f1410647 6278 /* add SPIO 5 to group 0 */
4d295db0
EG
6279 {
6280 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
6281 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
6282 val = REG_RD(bp, reg_addr);
f1410647 6283 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
4d295db0
EG
6284 REG_WR(bp, reg_addr, val);
6285 }
f1410647
ET
6286 break;
6287
6288 default:
6289 break;
6290 }
6291
c18487ee 6292 bnx2x__link_reset(bp);
a2fbb9ea 6293
34f80b04
EG
6294 return 0;
6295}
6296
6297#define ILT_PER_FUNC (768/2)
6298#define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
6299/* the phys address is shifted right 12 bits and has an added
6300 1=valid bit added to the 53rd bit
6301 then since this is a wide register(TM)
6302 we split it into two 32 bit writes
6303 */
6304#define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
6305#define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
6306#define PXP_ONE_ILT(x) (((x) << 10) | x)
6307#define PXP_ILT_RANGE(f, l) (((l) << 10) | f)
6308
6309#define CNIC_ILT_LINES 0
6310
6311static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
6312{
6313 int reg;
6314
6315 if (CHIP_IS_E1H(bp))
6316 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
6317 else /* E1 */
6318 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
6319
6320 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
6321}
6322
6323static int bnx2x_init_func(struct bnx2x *bp)
6324{
6325 int port = BP_PORT(bp);
6326 int func = BP_FUNC(bp);
8badd27a 6327 u32 addr, val;
34f80b04
EG
6328 int i;
6329
6330 DP(BNX2X_MSG_MCP, "starting func init func %x\n", func);
6331
8badd27a
EG
6332 /* set MSI reconfigure capability */
6333 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
6334 val = REG_RD(bp, addr);
6335 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
6336 REG_WR(bp, addr, val);
6337
34f80b04
EG
6338 i = FUNC_ILT_BASE(func);
6339
6340 bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
6341 if (CHIP_IS_E1H(bp)) {
6342 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
6343 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
6344 } else /* E1 */
6345 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
6346 PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
6347
6348
6349 if (CHIP_IS_E1H(bp)) {
6350 for (i = 0; i < 9; i++)
6351 bnx2x_init_block(bp,
94a78b79 6352 cm_blocks[i], FUNC0_STAGE + func);
34f80b04
EG
6353
6354 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
6355 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
6356 }
6357
6358 /* HC init per function */
6359 if (CHIP_IS_E1H(bp)) {
6360 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
6361
6362 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6363 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6364 }
94a78b79 6365 bnx2x_init_block(bp, HC_BLOCK, FUNC0_STAGE + func);
34f80b04 6366
c14423fe 6367 /* Reset PCIE errors for debug */
a2fbb9ea
ET
6368 REG_WR(bp, 0x2114, 0xffffffff);
6369 REG_WR(bp, 0x2120, 0xffffffff);
a2fbb9ea 6370
34f80b04
EG
6371 return 0;
6372}
6373
6374static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
6375{
6376 int i, rc = 0;
a2fbb9ea 6377
34f80b04
EG
6378 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
6379 BP_FUNC(bp), load_code);
a2fbb9ea 6380
34f80b04
EG
6381 bp->dmae_ready = 0;
6382 mutex_init(&bp->dmae_mutex);
6383 bnx2x_gunzip_init(bp);
a2fbb9ea 6384
34f80b04
EG
6385 switch (load_code) {
6386 case FW_MSG_CODE_DRV_LOAD_COMMON:
6387 rc = bnx2x_init_common(bp);
6388 if (rc)
6389 goto init_hw_err;
6390 /* no break */
6391
6392 case FW_MSG_CODE_DRV_LOAD_PORT:
6393 bp->dmae_ready = 1;
6394 rc = bnx2x_init_port(bp);
6395 if (rc)
6396 goto init_hw_err;
6397 /* no break */
6398
6399 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
6400 bp->dmae_ready = 1;
6401 rc = bnx2x_init_func(bp);
6402 if (rc)
6403 goto init_hw_err;
6404 break;
6405
6406 default:
6407 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
6408 break;
6409 }
6410
6411 if (!BP_NOMCP(bp)) {
6412 int func = BP_FUNC(bp);
a2fbb9ea
ET
6413
6414 bp->fw_drv_pulse_wr_seq =
34f80b04 6415 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
a2fbb9ea 6416 DRV_PULSE_SEQ_MASK);
34f80b04
EG
6417 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
6418 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x func_stx 0x%x\n",
6419 bp->fw_drv_pulse_wr_seq, bp->func_stx);
6420 } else
6421 bp->func_stx = 0;
a2fbb9ea 6422
34f80b04
EG
6423 /* this needs to be done before gunzip end */
6424 bnx2x_zero_def_sb(bp);
6425 for_each_queue(bp, i)
6426 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
6427
6428init_hw_err:
6429 bnx2x_gunzip_end(bp);
6430
6431 return rc;
a2fbb9ea
ET
6432}
6433
a2fbb9ea
ET
6434static void bnx2x_free_mem(struct bnx2x *bp)
6435{
6436
6437#define BNX2X_PCI_FREE(x, y, size) \
6438 do { \
6439 if (x) { \
6440 pci_free_consistent(bp->pdev, size, x, y); \
6441 x = NULL; \
6442 y = 0; \
6443 } \
6444 } while (0)
6445
6446#define BNX2X_FREE(x) \
6447 do { \
6448 if (x) { \
6449 vfree(x); \
6450 x = NULL; \
6451 } \
6452 } while (0)
6453
6454 int i;
6455
6456 /* fastpath */
555f6c78 6457 /* Common */
a2fbb9ea
ET
6458 for_each_queue(bp, i) {
6459
555f6c78 6460 /* status blocks */
a2fbb9ea
ET
6461 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
6462 bnx2x_fp(bp, i, status_blk_mapping),
ca00392c 6463 sizeof(struct host_status_block));
555f6c78
EG
6464 }
6465 /* Rx */
6466 for_each_rx_queue(bp, i) {
a2fbb9ea 6467
555f6c78 6468 /* fastpath rx rings: rx_buf rx_desc rx_comp */
a2fbb9ea
ET
6469 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
6470 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
6471 bnx2x_fp(bp, i, rx_desc_mapping),
6472 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6473
6474 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
6475 bnx2x_fp(bp, i, rx_comp_mapping),
6476 sizeof(struct eth_fast_path_rx_cqe) *
6477 NUM_RCQ_BD);
a2fbb9ea 6478
7a9b2557 6479 /* SGE ring */
32626230 6480 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
7a9b2557
VZ
6481 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
6482 bnx2x_fp(bp, i, rx_sge_mapping),
6483 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
6484 }
555f6c78
EG
6485 /* Tx */
6486 for_each_tx_queue(bp, i) {
6487
6488 /* fastpath tx rings: tx_buf tx_desc */
6489 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
6490 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
6491 bnx2x_fp(bp, i, tx_desc_mapping),
ca00392c 6492 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
555f6c78 6493 }
a2fbb9ea
ET
6494 /* end of fastpath */
6495
6496 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
34f80b04 6497 sizeof(struct host_def_status_block));
a2fbb9ea
ET
6498
6499 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
34f80b04 6500 sizeof(struct bnx2x_slowpath));
a2fbb9ea
ET
6501
6502#ifdef BCM_ISCSI
6503 BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
6504 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
6505 BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
6506 BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
6507#endif
7a9b2557 6508 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
a2fbb9ea
ET
6509
6510#undef BNX2X_PCI_FREE
6511#undef BNX2X_KFREE
6512}
6513
6514static int bnx2x_alloc_mem(struct bnx2x *bp)
6515{
6516
6517#define BNX2X_PCI_ALLOC(x, y, size) \
6518 do { \
6519 x = pci_alloc_consistent(bp->pdev, size, y); \
6520 if (x == NULL) \
6521 goto alloc_mem_err; \
6522 memset(x, 0, size); \
6523 } while (0)
6524
6525#define BNX2X_ALLOC(x, size) \
6526 do { \
6527 x = vmalloc(size); \
6528 if (x == NULL) \
6529 goto alloc_mem_err; \
6530 memset(x, 0, size); \
6531 } while (0)
6532
6533 int i;
6534
6535 /* fastpath */
555f6c78 6536 /* Common */
a2fbb9ea
ET
6537 for_each_queue(bp, i) {
6538 bnx2x_fp(bp, i, bp) = bp;
6539
555f6c78 6540 /* status blocks */
a2fbb9ea
ET
6541 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
6542 &bnx2x_fp(bp, i, status_blk_mapping),
ca00392c 6543 sizeof(struct host_status_block));
555f6c78
EG
6544 }
6545 /* Rx */
6546 for_each_rx_queue(bp, i) {
a2fbb9ea 6547
555f6c78 6548 /* fastpath rx rings: rx_buf rx_desc rx_comp */
a2fbb9ea
ET
6549 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
6550 sizeof(struct sw_rx_bd) * NUM_RX_BD);
6551 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
6552 &bnx2x_fp(bp, i, rx_desc_mapping),
6553 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6554
6555 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
6556 &bnx2x_fp(bp, i, rx_comp_mapping),
6557 sizeof(struct eth_fast_path_rx_cqe) *
6558 NUM_RCQ_BD);
6559
7a9b2557
VZ
6560 /* SGE ring */
6561 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
6562 sizeof(struct sw_rx_page) * NUM_RX_SGE);
6563 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
6564 &bnx2x_fp(bp, i, rx_sge_mapping),
6565 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
a2fbb9ea 6566 }
555f6c78
EG
6567 /* Tx */
6568 for_each_tx_queue(bp, i) {
6569
555f6c78
EG
6570 /* fastpath tx rings: tx_buf tx_desc */
6571 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
6572 sizeof(struct sw_tx_bd) * NUM_TX_BD);
6573 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
6574 &bnx2x_fp(bp, i, tx_desc_mapping),
ca00392c 6575 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
555f6c78 6576 }
a2fbb9ea
ET
6577 /* end of fastpath */
6578
6579 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
6580 sizeof(struct host_def_status_block));
6581
6582 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
6583 sizeof(struct bnx2x_slowpath));
6584
6585#ifdef BCM_ISCSI
6586 BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
6587
6588 /* Initialize T1 */
6589 for (i = 0; i < 64*1024; i += 64) {
6590 *(u64 *)((char *)bp->t1 + i + 56) = 0x0UL;
6591 *(u64 *)((char *)bp->t1 + i + 3) = 0x0UL;
6592 }
6593
6594 /* allocate searcher T2 table
6595 we allocate 1/4 of alloc num for T2
6596 (which is not entered into the ILT) */
6597 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
6598
6599 /* Initialize T2 */
6600 for (i = 0; i < 16*1024; i += 64)
6601 * (u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
6602
c14423fe 6603 /* now fixup the last line in the block to point to the next block */
a2fbb9ea
ET
6604 *(u64 *)((char *)bp->t2 + 1024*16-8) = bp->t2_mapping;
6605
6606 /* Timer block array (MAX_CONN*8) phys uncached for now 1024 conns */
6607 BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
6608
6609 /* QM queues (128*MAX_CONN) */
6610 BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
6611#endif
6612
6613 /* Slow path ring */
6614 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
6615
6616 return 0;
6617
6618alloc_mem_err:
6619 bnx2x_free_mem(bp);
6620 return -ENOMEM;
6621
6622#undef BNX2X_PCI_ALLOC
6623#undef BNX2X_ALLOC
6624}
6625
6626static void bnx2x_free_tx_skbs(struct bnx2x *bp)
6627{
6628 int i;
6629
555f6c78 6630 for_each_tx_queue(bp, i) {
a2fbb9ea
ET
6631 struct bnx2x_fastpath *fp = &bp->fp[i];
6632
6633 u16 bd_cons = fp->tx_bd_cons;
6634 u16 sw_prod = fp->tx_pkt_prod;
6635 u16 sw_cons = fp->tx_pkt_cons;
6636
a2fbb9ea
ET
6637 while (sw_cons != sw_prod) {
6638 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
6639 sw_cons++;
6640 }
6641 }
6642}
6643
6644static void bnx2x_free_rx_skbs(struct bnx2x *bp)
6645{
6646 int i, j;
6647
555f6c78 6648 for_each_rx_queue(bp, j) {
a2fbb9ea
ET
6649 struct bnx2x_fastpath *fp = &bp->fp[j];
6650
a2fbb9ea
ET
6651 for (i = 0; i < NUM_RX_BD; i++) {
6652 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
6653 struct sk_buff *skb = rx_buf->skb;
6654
6655 if (skb == NULL)
6656 continue;
6657
6658 pci_unmap_single(bp->pdev,
6659 pci_unmap_addr(rx_buf, mapping),
356e2385 6660 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
a2fbb9ea
ET
6661
6662 rx_buf->skb = NULL;
6663 dev_kfree_skb(skb);
6664 }
7a9b2557 6665 if (!fp->disable_tpa)
32626230
EG
6666 bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
6667 ETH_MAX_AGGREGATION_QUEUES_E1 :
7a9b2557 6668 ETH_MAX_AGGREGATION_QUEUES_E1H);
a2fbb9ea
ET
6669 }
6670}
6671
6672static void bnx2x_free_skbs(struct bnx2x *bp)
6673{
6674 bnx2x_free_tx_skbs(bp);
6675 bnx2x_free_rx_skbs(bp);
6676}
6677
6678static void bnx2x_free_msix_irqs(struct bnx2x *bp)
6679{
34f80b04 6680 int i, offset = 1;
a2fbb9ea
ET
6681
6682 free_irq(bp->msix_table[0].vector, bp->dev);
c14423fe 6683 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
a2fbb9ea
ET
6684 bp->msix_table[0].vector);
6685
6686 for_each_queue(bp, i) {
c14423fe 6687 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
34f80b04 6688 "state %x\n", i, bp->msix_table[i + offset].vector,
a2fbb9ea
ET
6689 bnx2x_fp(bp, i, state));
6690
34f80b04 6691 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
a2fbb9ea 6692 }
a2fbb9ea
ET
6693}
6694
6695static void bnx2x_free_irq(struct bnx2x *bp)
6696{
a2fbb9ea 6697 if (bp->flags & USING_MSIX_FLAG) {
a2fbb9ea
ET
6698 bnx2x_free_msix_irqs(bp);
6699 pci_disable_msix(bp->pdev);
a2fbb9ea
ET
6700 bp->flags &= ~USING_MSIX_FLAG;
6701
8badd27a
EG
6702 } else if (bp->flags & USING_MSI_FLAG) {
6703 free_irq(bp->pdev->irq, bp->dev);
6704 pci_disable_msi(bp->pdev);
6705 bp->flags &= ~USING_MSI_FLAG;
6706
a2fbb9ea
ET
6707 } else
6708 free_irq(bp->pdev->irq, bp->dev);
6709}
6710
6711static int bnx2x_enable_msix(struct bnx2x *bp)
6712{
8badd27a
EG
6713 int i, rc, offset = 1;
6714 int igu_vec = 0;
a2fbb9ea 6715
8badd27a
EG
6716 bp->msix_table[0].entry = igu_vec;
6717 DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n", igu_vec);
a2fbb9ea 6718
34f80b04 6719 for_each_queue(bp, i) {
8badd27a 6720 igu_vec = BP_L_ID(bp) + offset + i;
34f80b04
EG
6721 bp->msix_table[i + offset].entry = igu_vec;
6722 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
6723 "(fastpath #%u)\n", i + offset, igu_vec, i);
a2fbb9ea
ET
6724 }
6725
34f80b04 6726 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
555f6c78 6727 BNX2X_NUM_QUEUES(bp) + offset);
34f80b04 6728 if (rc) {
8badd27a
EG
6729 DP(NETIF_MSG_IFUP, "MSI-X is not attainable rc %d\n", rc);
6730 return rc;
34f80b04 6731 }
8badd27a 6732
a2fbb9ea
ET
6733 bp->flags |= USING_MSIX_FLAG;
6734
6735 return 0;
a2fbb9ea
ET
6736}
6737
a2fbb9ea
ET
6738static int bnx2x_req_msix_irqs(struct bnx2x *bp)
6739{
34f80b04 6740 int i, rc, offset = 1;
a2fbb9ea 6741
a2fbb9ea
ET
6742 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
6743 bp->dev->name, bp->dev);
a2fbb9ea
ET
6744 if (rc) {
6745 BNX2X_ERR("request sp irq failed\n");
6746 return -EBUSY;
6747 }
6748
6749 for_each_queue(bp, i) {
555f6c78
EG
6750 struct bnx2x_fastpath *fp = &bp->fp[i];
6751
ca00392c
EG
6752 if (i < bp->num_rx_queues)
6753 sprintf(fp->name, "%s-rx-%d", bp->dev->name, i);
6754 else
6755 sprintf(fp->name, "%s-tx-%d",
6756 bp->dev->name, i - bp->num_rx_queues);
6757
34f80b04 6758 rc = request_irq(bp->msix_table[i + offset].vector,
555f6c78 6759 bnx2x_msix_fp_int, 0, fp->name, fp);
a2fbb9ea 6760 if (rc) {
555f6c78 6761 BNX2X_ERR("request fp #%d irq failed rc %d\n", i, rc);
a2fbb9ea
ET
6762 bnx2x_free_msix_irqs(bp);
6763 return -EBUSY;
6764 }
6765
555f6c78 6766 fp->state = BNX2X_FP_STATE_IRQ;
a2fbb9ea
ET
6767 }
6768
555f6c78 6769 i = BNX2X_NUM_QUEUES(bp);
ca00392c
EG
6770 printk(KERN_INFO PFX "%s: using MSI-X IRQs: sp %d fp[%d] %d"
6771 " ... fp[%d] %d\n",
6772 bp->dev->name, bp->msix_table[0].vector,
6773 0, bp->msix_table[offset].vector,
6774 i - 1, bp->msix_table[offset + i - 1].vector);
555f6c78 6775
a2fbb9ea 6776 return 0;
a2fbb9ea
ET
6777}
6778
8badd27a
EG
6779static int bnx2x_enable_msi(struct bnx2x *bp)
6780{
6781 int rc;
6782
6783 rc = pci_enable_msi(bp->pdev);
6784 if (rc) {
6785 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
6786 return -1;
6787 }
6788 bp->flags |= USING_MSI_FLAG;
6789
6790 return 0;
6791}
6792
a2fbb9ea
ET
6793static int bnx2x_req_irq(struct bnx2x *bp)
6794{
8badd27a 6795 unsigned long flags;
34f80b04 6796 int rc;
a2fbb9ea 6797
8badd27a
EG
6798 if (bp->flags & USING_MSI_FLAG)
6799 flags = 0;
6800 else
6801 flags = IRQF_SHARED;
6802
6803 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
34f80b04 6804 bp->dev->name, bp->dev);
a2fbb9ea
ET
6805 if (!rc)
6806 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
6807
6808 return rc;
a2fbb9ea
ET
6809}
6810
65abd74d
YG
6811static void bnx2x_napi_enable(struct bnx2x *bp)
6812{
6813 int i;
6814
555f6c78 6815 for_each_rx_queue(bp, i)
65abd74d
YG
6816 napi_enable(&bnx2x_fp(bp, i, napi));
6817}
6818
6819static void bnx2x_napi_disable(struct bnx2x *bp)
6820{
6821 int i;
6822
555f6c78 6823 for_each_rx_queue(bp, i)
65abd74d
YG
6824 napi_disable(&bnx2x_fp(bp, i, napi));
6825}
6826
6827static void bnx2x_netif_start(struct bnx2x *bp)
6828{
e1510706
EG
6829 int intr_sem;
6830
6831 intr_sem = atomic_dec_and_test(&bp->intr_sem);
6832 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
6833
6834 if (intr_sem) {
65abd74d 6835 if (netif_running(bp->dev)) {
65abd74d
YG
6836 bnx2x_napi_enable(bp);
6837 bnx2x_int_enable(bp);
555f6c78
EG
6838 if (bp->state == BNX2X_STATE_OPEN)
6839 netif_tx_wake_all_queues(bp->dev);
65abd74d
YG
6840 }
6841 }
6842}
6843
f8ef6e44 6844static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
65abd74d 6845{
f8ef6e44 6846 bnx2x_int_disable_sync(bp, disable_hw);
e94d8af3 6847 bnx2x_napi_disable(bp);
762d5f6c
EG
6848 netif_tx_disable(bp->dev);
6849 bp->dev->trans_start = jiffies; /* prevent tx timeout */
65abd74d
YG
6850}
6851
a2fbb9ea
ET
6852/*
6853 * Init service functions
6854 */
6855
3101c2bc 6856static void bnx2x_set_mac_addr_e1(struct bnx2x *bp, int set)
a2fbb9ea
ET
6857{
6858 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
34f80b04 6859 int port = BP_PORT(bp);
a2fbb9ea
ET
6860
6861 /* CAM allocation
6862 * unicasts 0-31:port0 32-63:port1
6863 * multicast 64-127:port0 128-191:port1
6864 */
8d9c5f34 6865 config->hdr.length = 2;
af246401 6866 config->hdr.offset = port ? 32 : 0;
0626b899 6867 config->hdr.client_id = bp->fp->cl_id;
a2fbb9ea
ET
6868 config->hdr.reserved1 = 0;
6869
6870 /* primary MAC */
6871 config->config_table[0].cam_entry.msb_mac_addr =
6872 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6873 config->config_table[0].cam_entry.middle_mac_addr =
6874 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6875 config->config_table[0].cam_entry.lsb_mac_addr =
6876 swab16(*(u16 *)&bp->dev->dev_addr[4]);
34f80b04 6877 config->config_table[0].cam_entry.flags = cpu_to_le16(port);
3101c2bc
YG
6878 if (set)
6879 config->config_table[0].target_table_entry.flags = 0;
6880 else
6881 CAM_INVALIDATE(config->config_table[0]);
ca00392c
EG
6882 config->config_table[0].target_table_entry.clients_bit_vector =
6883 cpu_to_le32(1 << BP_L_ID(bp));
a2fbb9ea
ET
6884 config->config_table[0].target_table_entry.vlan_id = 0;
6885
3101c2bc
YG
6886 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
6887 (set ? "setting" : "clearing"),
a2fbb9ea
ET
6888 config->config_table[0].cam_entry.msb_mac_addr,
6889 config->config_table[0].cam_entry.middle_mac_addr,
6890 config->config_table[0].cam_entry.lsb_mac_addr);
6891
6892 /* broadcast */
4781bfad
EG
6893 config->config_table[1].cam_entry.msb_mac_addr = cpu_to_le16(0xffff);
6894 config->config_table[1].cam_entry.middle_mac_addr = cpu_to_le16(0xffff);
6895 config->config_table[1].cam_entry.lsb_mac_addr = cpu_to_le16(0xffff);
34f80b04 6896 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
3101c2bc
YG
6897 if (set)
6898 config->config_table[1].target_table_entry.flags =
a2fbb9ea 6899 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
3101c2bc
YG
6900 else
6901 CAM_INVALIDATE(config->config_table[1]);
ca00392c
EG
6902 config->config_table[1].target_table_entry.clients_bit_vector =
6903 cpu_to_le32(1 << BP_L_ID(bp));
a2fbb9ea
ET
6904 config->config_table[1].target_table_entry.vlan_id = 0;
6905
6906 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6907 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6908 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6909}
6910
3101c2bc 6911static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp, int set)
34f80b04
EG
6912{
6913 struct mac_configuration_cmd_e1h *config =
6914 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
6915
34f80b04
EG
6916 /* CAM allocation for E1H
6917 * unicasts: by func number
6918 * multicast: 20+FUNC*20, 20 each
6919 */
8d9c5f34 6920 config->hdr.length = 1;
34f80b04 6921 config->hdr.offset = BP_FUNC(bp);
0626b899 6922 config->hdr.client_id = bp->fp->cl_id;
34f80b04
EG
6923 config->hdr.reserved1 = 0;
6924
6925 /* primary MAC */
6926 config->config_table[0].msb_mac_addr =
6927 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6928 config->config_table[0].middle_mac_addr =
6929 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6930 config->config_table[0].lsb_mac_addr =
6931 swab16(*(u16 *)&bp->dev->dev_addr[4]);
ca00392c
EG
6932 config->config_table[0].clients_bit_vector =
6933 cpu_to_le32(1 << BP_L_ID(bp));
34f80b04
EG
6934 config->config_table[0].vlan_id = 0;
6935 config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
3101c2bc
YG
6936 if (set)
6937 config->config_table[0].flags = BP_PORT(bp);
6938 else
6939 config->config_table[0].flags =
6940 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
34f80b04 6941
3101c2bc
YG
6942 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) E1HOV %d CLID %d\n",
6943 (set ? "setting" : "clearing"),
34f80b04
EG
6944 config->config_table[0].msb_mac_addr,
6945 config->config_table[0].middle_mac_addr,
6946 config->config_table[0].lsb_mac_addr, bp->e1hov, BP_L_ID(bp));
6947
6948 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6949 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6950 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6951}
6952
a2fbb9ea
ET
6953static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6954 int *state_p, int poll)
6955{
6956 /* can take a while if any port is running */
8b3a0f0b 6957 int cnt = 5000;
a2fbb9ea 6958
c14423fe
ET
6959 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
6960 poll ? "polling" : "waiting", state, idx);
a2fbb9ea
ET
6961
6962 might_sleep();
34f80b04 6963 while (cnt--) {
a2fbb9ea
ET
6964 if (poll) {
6965 bnx2x_rx_int(bp->fp, 10);
34f80b04
EG
6966 /* if index is different from 0
6967 * the reply for some commands will
3101c2bc 6968 * be on the non default queue
a2fbb9ea
ET
6969 */
6970 if (idx)
6971 bnx2x_rx_int(&bp->fp[idx], 10);
6972 }
a2fbb9ea 6973
3101c2bc 6974 mb(); /* state is changed by bnx2x_sp_event() */
8b3a0f0b
EG
6975 if (*state_p == state) {
6976#ifdef BNX2X_STOP_ON_ERROR
6977 DP(NETIF_MSG_IFUP, "exit (cnt %d)\n", 5000 - cnt);
6978#endif
a2fbb9ea 6979 return 0;
8b3a0f0b 6980 }
a2fbb9ea 6981
a2fbb9ea 6982 msleep(1);
a2fbb9ea
ET
6983 }
6984
a2fbb9ea 6985 /* timeout! */
49d66772
ET
6986 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
6987 poll ? "polling" : "waiting", state, idx);
34f80b04
EG
6988#ifdef BNX2X_STOP_ON_ERROR
6989 bnx2x_panic();
6990#endif
a2fbb9ea 6991
49d66772 6992 return -EBUSY;
a2fbb9ea
ET
6993}
6994
6995static int bnx2x_setup_leading(struct bnx2x *bp)
6996{
34f80b04 6997 int rc;
a2fbb9ea 6998
c14423fe 6999 /* reset IGU state */
34f80b04 7000 bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea
ET
7001
7002 /* SETUP ramrod */
7003 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
7004
34f80b04
EG
7005 /* Wait for completion */
7006 rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
a2fbb9ea 7007
34f80b04 7008 return rc;
a2fbb9ea
ET
7009}
7010
7011static int bnx2x_setup_multi(struct bnx2x *bp, int index)
7012{
555f6c78
EG
7013 struct bnx2x_fastpath *fp = &bp->fp[index];
7014
a2fbb9ea 7015 /* reset IGU state */
555f6c78 7016 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea 7017
228241eb 7018 /* SETUP ramrod */
555f6c78
EG
7019 fp->state = BNX2X_FP_STATE_OPENING;
7020 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0,
7021 fp->cl_id, 0);
a2fbb9ea
ET
7022
7023 /* Wait for completion */
7024 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
555f6c78 7025 &(fp->state), 0);
a2fbb9ea
ET
7026}
7027
a2fbb9ea 7028static int bnx2x_poll(struct napi_struct *napi, int budget);
a2fbb9ea 7029
ca00392c
EG
7030static void bnx2x_set_int_mode_msix(struct bnx2x *bp, int *num_rx_queues_out,
7031 int *num_tx_queues_out)
7032{
7033 int _num_rx_queues = 0, _num_tx_queues = 0;
7034
7035 switch (bp->multi_mode) {
7036 case ETH_RSS_MODE_DISABLED:
7037 _num_rx_queues = 1;
7038 _num_tx_queues = 1;
7039 break;
7040
7041 case ETH_RSS_MODE_REGULAR:
7042 if (num_rx_queues)
7043 _num_rx_queues = min_t(u32, num_rx_queues,
7044 BNX2X_MAX_QUEUES(bp));
7045 else
7046 _num_rx_queues = min_t(u32, num_online_cpus(),
7047 BNX2X_MAX_QUEUES(bp));
7048
7049 if (num_tx_queues)
7050 _num_tx_queues = min_t(u32, num_tx_queues,
7051 BNX2X_MAX_QUEUES(bp));
7052 else
7053 _num_tx_queues = min_t(u32, num_online_cpus(),
7054 BNX2X_MAX_QUEUES(bp));
7055
7056 /* There must be not more Tx queues than Rx queues */
7057 if (_num_tx_queues > _num_rx_queues) {
7058 BNX2X_ERR("number of tx queues (%d) > "
7059 "number of rx queues (%d)"
7060 " defaulting to %d\n",
7061 _num_tx_queues, _num_rx_queues,
7062 _num_rx_queues);
7063 _num_tx_queues = _num_rx_queues;
7064 }
7065 break;
7066
7067
7068 default:
7069 _num_rx_queues = 1;
7070 _num_tx_queues = 1;
7071 break;
7072 }
7073
7074 *num_rx_queues_out = _num_rx_queues;
7075 *num_tx_queues_out = _num_tx_queues;
7076}
7077
7078static int bnx2x_set_int_mode(struct bnx2x *bp)
a2fbb9ea 7079{
ca00392c 7080 int rc = 0;
a2fbb9ea 7081
8badd27a
EG
7082 switch (int_mode) {
7083 case INT_MODE_INTx:
7084 case INT_MODE_MSI:
ca00392c
EG
7085 bp->num_rx_queues = 1;
7086 bp->num_tx_queues = 1;
7087 DP(NETIF_MSG_IFUP, "set number of queues to 1\n");
8badd27a
EG
7088 break;
7089
7090 case INT_MODE_MSIX:
7091 default:
ca00392c
EG
7092 /* Set interrupt mode according to bp->multi_mode value */
7093 bnx2x_set_int_mode_msix(bp, &bp->num_rx_queues,
7094 &bp->num_tx_queues);
7095
7096 DP(NETIF_MSG_IFUP, "set number of queues to: rx %d tx %d\n",
555f6c78 7097 bp->num_rx_queues, bp->num_tx_queues);
ca00392c 7098
2dfe0e1f
EG
7099 /* if we can't use MSI-X we only need one fp,
7100 * so try to enable MSI-X with the requested number of fp's
7101 * and fallback to MSI or legacy INTx with one fp
7102 */
ca00392c
EG
7103 rc = bnx2x_enable_msix(bp);
7104 if (rc) {
34f80b04 7105 /* failed to enable MSI-X */
555f6c78
EG
7106 if (bp->multi_mode)
7107 BNX2X_ERR("Multi requested but failed to "
ca00392c
EG
7108 "enable MSI-X (rx %d tx %d), "
7109 "set number of queues to 1\n",
7110 bp->num_rx_queues, bp->num_tx_queues);
7111 bp->num_rx_queues = 1;
7112 bp->num_tx_queues = 1;
a2fbb9ea 7113 }
8badd27a 7114 break;
a2fbb9ea 7115 }
555f6c78 7116 bp->dev->real_num_tx_queues = bp->num_tx_queues;
ca00392c 7117 return rc;
8badd27a
EG
7118}
7119
8badd27a
EG
7120
7121/* must be called with rtnl_lock */
7122static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
7123{
7124 u32 load_code;
ca00392c
EG
7125 int i, rc;
7126
8badd27a 7127#ifdef BNX2X_STOP_ON_ERROR
8badd27a
EG
7128 if (unlikely(bp->panic))
7129 return -EPERM;
7130#endif
7131
7132 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
7133
ca00392c 7134 rc = bnx2x_set_int_mode(bp);
c14423fe 7135
a2fbb9ea
ET
7136 if (bnx2x_alloc_mem(bp))
7137 return -ENOMEM;
7138
555f6c78 7139 for_each_rx_queue(bp, i)
7a9b2557
VZ
7140 bnx2x_fp(bp, i, disable_tpa) =
7141 ((bp->flags & TPA_ENABLE_FLAG) == 0);
7142
555f6c78 7143 for_each_rx_queue(bp, i)
2dfe0e1f
EG
7144 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
7145 bnx2x_poll, 128);
7146
2dfe0e1f
EG
7147 bnx2x_napi_enable(bp);
7148
34f80b04
EG
7149 if (bp->flags & USING_MSIX_FLAG) {
7150 rc = bnx2x_req_msix_irqs(bp);
7151 if (rc) {
7152 pci_disable_msix(bp->pdev);
2dfe0e1f 7153 goto load_error1;
34f80b04
EG
7154 }
7155 } else {
ca00392c
EG
7156 /* Fall to INTx if failed to enable MSI-X due to lack of
7157 memory (in bnx2x_set_int_mode()) */
8badd27a
EG
7158 if ((rc != -ENOMEM) && (int_mode != INT_MODE_INTx))
7159 bnx2x_enable_msi(bp);
34f80b04
EG
7160 bnx2x_ack_int(bp);
7161 rc = bnx2x_req_irq(bp);
7162 if (rc) {
2dfe0e1f 7163 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
8badd27a
EG
7164 if (bp->flags & USING_MSI_FLAG)
7165 pci_disable_msi(bp->pdev);
2dfe0e1f 7166 goto load_error1;
a2fbb9ea 7167 }
8badd27a
EG
7168 if (bp->flags & USING_MSI_FLAG) {
7169 bp->dev->irq = bp->pdev->irq;
7170 printk(KERN_INFO PFX "%s: using MSI IRQ %d\n",
7171 bp->dev->name, bp->pdev->irq);
7172 }
a2fbb9ea
ET
7173 }
7174
2dfe0e1f
EG
7175 /* Send LOAD_REQUEST command to MCP
7176 Returns the type of LOAD command:
7177 if it is the first port to be initialized
7178 common blocks should be initialized, otherwise - not
7179 */
7180 if (!BP_NOMCP(bp)) {
7181 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
7182 if (!load_code) {
7183 BNX2X_ERR("MCP response failure, aborting\n");
7184 rc = -EBUSY;
7185 goto load_error2;
7186 }
7187 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
7188 rc = -EBUSY; /* other port in diagnostic mode */
7189 goto load_error2;
7190 }
7191
7192 } else {
7193 int port = BP_PORT(bp);
7194
f5372251 7195 DP(NETIF_MSG_IFUP, "NO MCP - load counts %d, %d, %d\n",
2dfe0e1f
EG
7196 load_count[0], load_count[1], load_count[2]);
7197 load_count[0]++;
7198 load_count[1 + port]++;
f5372251 7199 DP(NETIF_MSG_IFUP, "NO MCP - new load counts %d, %d, %d\n",
2dfe0e1f
EG
7200 load_count[0], load_count[1], load_count[2]);
7201 if (load_count[0] == 1)
7202 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
7203 else if (load_count[1 + port] == 1)
7204 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
7205 else
7206 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
7207 }
7208
7209 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
7210 (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
7211 bp->port.pmf = 1;
7212 else
7213 bp->port.pmf = 0;
7214 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
a2fbb9ea 7215
a2fbb9ea 7216 /* Initialize HW */
34f80b04
EG
7217 rc = bnx2x_init_hw(bp, load_code);
7218 if (rc) {
a2fbb9ea 7219 BNX2X_ERR("HW init failed, aborting\n");
2dfe0e1f 7220 goto load_error2;
a2fbb9ea
ET
7221 }
7222
a2fbb9ea 7223 /* Setup NIC internals and enable interrupts */
471de716 7224 bnx2x_nic_init(bp, load_code);
a2fbb9ea 7225
2691d51d
EG
7226 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) &&
7227 (bp->common.shmem2_base))
7228 SHMEM2_WR(bp, dcc_support,
7229 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
7230 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
7231
a2fbb9ea 7232 /* Send LOAD_DONE command to MCP */
34f80b04 7233 if (!BP_NOMCP(bp)) {
228241eb
ET
7234 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
7235 if (!load_code) {
da5a662a 7236 BNX2X_ERR("MCP response failure, aborting\n");
34f80b04 7237 rc = -EBUSY;
2dfe0e1f 7238 goto load_error3;
a2fbb9ea
ET
7239 }
7240 }
7241
7242 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
7243
34f80b04
EG
7244 rc = bnx2x_setup_leading(bp);
7245 if (rc) {
da5a662a 7246 BNX2X_ERR("Setup leading failed!\n");
2dfe0e1f 7247 goto load_error3;
34f80b04 7248 }
a2fbb9ea 7249
34f80b04
EG
7250 if (CHIP_IS_E1H(bp))
7251 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
f5372251 7252 DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
34f80b04
EG
7253 bp->state = BNX2X_STATE_DISABLED;
7254 }
a2fbb9ea 7255
ca00392c 7256 if (bp->state == BNX2X_STATE_OPEN) {
34f80b04
EG
7257 for_each_nondefault_queue(bp, i) {
7258 rc = bnx2x_setup_multi(bp, i);
7259 if (rc)
2dfe0e1f 7260 goto load_error3;
34f80b04 7261 }
a2fbb9ea 7262
ca00392c
EG
7263 if (CHIP_IS_E1(bp))
7264 bnx2x_set_mac_addr_e1(bp, 1);
7265 else
7266 bnx2x_set_mac_addr_e1h(bp, 1);
7267 }
34f80b04
EG
7268
7269 if (bp->port.pmf)
b5bf9068 7270 bnx2x_initial_phy_init(bp, load_mode);
a2fbb9ea
ET
7271
7272 /* Start fast path */
34f80b04
EG
7273 switch (load_mode) {
7274 case LOAD_NORMAL:
ca00392c
EG
7275 if (bp->state == BNX2X_STATE_OPEN) {
7276 /* Tx queue should be only reenabled */
7277 netif_tx_wake_all_queues(bp->dev);
7278 }
2dfe0e1f 7279 /* Initialize the receive filter. */
34f80b04
EG
7280 bnx2x_set_rx_mode(bp->dev);
7281 break;
7282
7283 case LOAD_OPEN:
555f6c78 7284 netif_tx_start_all_queues(bp->dev);
ca00392c
EG
7285 if (bp->state != BNX2X_STATE_OPEN)
7286 netif_tx_disable(bp->dev);
2dfe0e1f 7287 /* Initialize the receive filter. */
34f80b04 7288 bnx2x_set_rx_mode(bp->dev);
34f80b04 7289 break;
a2fbb9ea 7290
34f80b04 7291 case LOAD_DIAG:
2dfe0e1f 7292 /* Initialize the receive filter. */
a2fbb9ea 7293 bnx2x_set_rx_mode(bp->dev);
34f80b04
EG
7294 bp->state = BNX2X_STATE_DIAG;
7295 break;
7296
7297 default:
7298 break;
a2fbb9ea
ET
7299 }
7300
34f80b04
EG
7301 if (!bp->port.pmf)
7302 bnx2x__link_status_update(bp);
7303
a2fbb9ea
ET
7304 /* start the timer */
7305 mod_timer(&bp->timer, jiffies + bp->current_interval);
7306
34f80b04 7307
a2fbb9ea
ET
7308 return 0;
7309
2dfe0e1f
EG
7310load_error3:
7311 bnx2x_int_disable_sync(bp, 1);
7312 if (!BP_NOMCP(bp)) {
7313 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
7314 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7315 }
7316 bp->port.pmf = 0;
7a9b2557
VZ
7317 /* Free SKBs, SGEs, TPA pool and driver internals */
7318 bnx2x_free_skbs(bp);
555f6c78 7319 for_each_rx_queue(bp, i)
3196a88a 7320 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
2dfe0e1f 7321load_error2:
d1014634
YG
7322 /* Release IRQs */
7323 bnx2x_free_irq(bp);
2dfe0e1f
EG
7324load_error1:
7325 bnx2x_napi_disable(bp);
555f6c78 7326 for_each_rx_queue(bp, i)
7cde1c8b 7327 netif_napi_del(&bnx2x_fp(bp, i, napi));
a2fbb9ea
ET
7328 bnx2x_free_mem(bp);
7329
34f80b04 7330 return rc;
a2fbb9ea
ET
7331}
7332
7333static int bnx2x_stop_multi(struct bnx2x *bp, int index)
7334{
555f6c78 7335 struct bnx2x_fastpath *fp = &bp->fp[index];
a2fbb9ea
ET
7336 int rc;
7337
c14423fe 7338 /* halt the connection */
555f6c78
EG
7339 fp->state = BNX2X_FP_STATE_HALTING;
7340 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, fp->cl_id, 0);
a2fbb9ea 7341
34f80b04 7342 /* Wait for completion */
a2fbb9ea 7343 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
555f6c78 7344 &(fp->state), 1);
c14423fe 7345 if (rc) /* timeout */
a2fbb9ea
ET
7346 return rc;
7347
7348 /* delete cfc entry */
7349 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
7350
34f80b04
EG
7351 /* Wait for completion */
7352 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
555f6c78 7353 &(fp->state), 1);
34f80b04 7354 return rc;
a2fbb9ea
ET
7355}
7356
da5a662a 7357static int bnx2x_stop_leading(struct bnx2x *bp)
a2fbb9ea 7358{
4781bfad 7359 __le16 dsb_sp_prod_idx;
c14423fe 7360 /* if the other port is handling traffic,
a2fbb9ea 7361 this can take a lot of time */
34f80b04
EG
7362 int cnt = 500;
7363 int rc;
a2fbb9ea
ET
7364
7365 might_sleep();
7366
7367 /* Send HALT ramrod */
7368 bp->fp[0].state = BNX2X_FP_STATE_HALTING;
0626b899 7369 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, bp->fp->cl_id, 0);
a2fbb9ea 7370
34f80b04
EG
7371 /* Wait for completion */
7372 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
7373 &(bp->fp[0].state), 1);
7374 if (rc) /* timeout */
da5a662a 7375 return rc;
a2fbb9ea 7376
49d66772 7377 dsb_sp_prod_idx = *bp->dsb_sp_prod;
a2fbb9ea 7378
228241eb 7379 /* Send PORT_DELETE ramrod */
a2fbb9ea
ET
7380 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
7381
49d66772 7382 /* Wait for completion to arrive on default status block
a2fbb9ea
ET
7383 we are going to reset the chip anyway
7384 so there is not much to do if this times out
7385 */
34f80b04 7386 while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
34f80b04
EG
7387 if (!cnt) {
7388 DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
7389 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
7390 *bp->dsb_sp_prod, dsb_sp_prod_idx);
7391#ifdef BNX2X_STOP_ON_ERROR
7392 bnx2x_panic();
7393#endif
36e552ab 7394 rc = -EBUSY;
34f80b04
EG
7395 break;
7396 }
7397 cnt--;
da5a662a 7398 msleep(1);
5650d9d4 7399 rmb(); /* Refresh the dsb_sp_prod */
49d66772
ET
7400 }
7401 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
7402 bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
da5a662a
VZ
7403
7404 return rc;
a2fbb9ea
ET
7405}
7406
34f80b04
EG
7407static void bnx2x_reset_func(struct bnx2x *bp)
7408{
7409 int port = BP_PORT(bp);
7410 int func = BP_FUNC(bp);
7411 int base, i;
7412
7413 /* Configure IGU */
7414 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
7415 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
7416
34f80b04
EG
7417 /* Clear ILT */
7418 base = FUNC_ILT_BASE(func);
7419 for (i = base; i < base + ILT_PER_FUNC; i++)
7420 bnx2x_ilt_wr(bp, i, 0);
7421}
7422
7423static void bnx2x_reset_port(struct bnx2x *bp)
7424{
7425 int port = BP_PORT(bp);
7426 u32 val;
7427
7428 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
7429
7430 /* Do not rcv packets to BRB */
7431 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
7432 /* Do not direct rcv packets that are not for MCP to the BRB */
7433 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
7434 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7435
7436 /* Configure AEU */
7437 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
7438
7439 msleep(100);
7440 /* Check for BRB port occupancy */
7441 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
7442 if (val)
7443 DP(NETIF_MSG_IFDOWN,
33471629 7444 "BRB1 is not empty %d blocks are occupied\n", val);
34f80b04
EG
7445
7446 /* TODO: Close Doorbell port? */
7447}
7448
34f80b04
EG
7449static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
7450{
7451 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
7452 BP_FUNC(bp), reset_code);
7453
7454 switch (reset_code) {
7455 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
7456 bnx2x_reset_port(bp);
7457 bnx2x_reset_func(bp);
7458 bnx2x_reset_common(bp);
7459 break;
7460
7461 case FW_MSG_CODE_DRV_UNLOAD_PORT:
7462 bnx2x_reset_port(bp);
7463 bnx2x_reset_func(bp);
7464 break;
7465
7466 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
7467 bnx2x_reset_func(bp);
7468 break;
49d66772 7469
34f80b04
EG
7470 default:
7471 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
7472 break;
7473 }
7474}
7475
33471629 7476/* must be called with rtnl_lock */
34f80b04 7477static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
a2fbb9ea 7478{
da5a662a 7479 int port = BP_PORT(bp);
a2fbb9ea 7480 u32 reset_code = 0;
da5a662a 7481 int i, cnt, rc;
a2fbb9ea
ET
7482
7483 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
7484
228241eb
ET
7485 bp->rx_mode = BNX2X_RX_MODE_NONE;
7486 bnx2x_set_storm_rx_mode(bp);
a2fbb9ea 7487
f8ef6e44 7488 bnx2x_netif_stop(bp, 1);
e94d8af3 7489
34f80b04
EG
7490 del_timer_sync(&bp->timer);
7491 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
7492 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
bb2a0f7a 7493 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04 7494
70b9986c
EG
7495 /* Release IRQs */
7496 bnx2x_free_irq(bp);
7497
555f6c78
EG
7498 /* Wait until tx fastpath tasks complete */
7499 for_each_tx_queue(bp, i) {
228241eb
ET
7500 struct bnx2x_fastpath *fp = &bp->fp[i];
7501
34f80b04 7502 cnt = 1000;
e8b5fc51 7503 while (bnx2x_has_tx_work_unload(fp)) {
da5a662a 7504
7961f791 7505 bnx2x_tx_int(fp);
34f80b04
EG
7506 if (!cnt) {
7507 BNX2X_ERR("timeout waiting for queue[%d]\n",
7508 i);
7509#ifdef BNX2X_STOP_ON_ERROR
7510 bnx2x_panic();
7511 return -EBUSY;
7512#else
7513 break;
7514#endif
7515 }
7516 cnt--;
da5a662a 7517 msleep(1);
34f80b04 7518 }
228241eb 7519 }
da5a662a
VZ
7520 /* Give HW time to discard old tx messages */
7521 msleep(1);
a2fbb9ea 7522
3101c2bc
YG
7523 if (CHIP_IS_E1(bp)) {
7524 struct mac_configuration_cmd *config =
7525 bnx2x_sp(bp, mcast_config);
7526
7527 bnx2x_set_mac_addr_e1(bp, 0);
7528
8d9c5f34 7529 for (i = 0; i < config->hdr.length; i++)
3101c2bc
YG
7530 CAM_INVALIDATE(config->config_table[i]);
7531
8d9c5f34 7532 config->hdr.length = i;
3101c2bc
YG
7533 if (CHIP_REV_IS_SLOW(bp))
7534 config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
7535 else
7536 config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
0626b899 7537 config->hdr.client_id = bp->fp->cl_id;
3101c2bc
YG
7538 config->hdr.reserved1 = 0;
7539
7540 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7541 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
7542 U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
7543
7544 } else { /* E1H */
65abd74d
YG
7545 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
7546
3101c2bc
YG
7547 bnx2x_set_mac_addr_e1h(bp, 0);
7548
7549 for (i = 0; i < MC_HASH_SIZE; i++)
7550 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
7d0446c2
EG
7551
7552 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
3101c2bc
YG
7553 }
7554
65abd74d
YG
7555 if (unload_mode == UNLOAD_NORMAL)
7556 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7557
7d0446c2 7558 else if (bp->flags & NO_WOL_FLAG)
65abd74d 7559 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
65abd74d 7560
7d0446c2 7561 else if (bp->wol) {
65abd74d
YG
7562 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
7563 u8 *mac_addr = bp->dev->dev_addr;
7564 u32 val;
7565 /* The mac address is written to entries 1-4 to
7566 preserve entry 0 which is used by the PMF */
7567 u8 entry = (BP_E1HVN(bp) + 1)*8;
7568
7569 val = (mac_addr[0] << 8) | mac_addr[1];
7570 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
7571
7572 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
7573 (mac_addr[4] << 8) | mac_addr[5];
7574 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
7575
7576 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
7577
7578 } else
7579 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
da5a662a 7580
34f80b04
EG
7581 /* Close multi and leading connections
7582 Completions for ramrods are collected in a synchronous way */
a2fbb9ea
ET
7583 for_each_nondefault_queue(bp, i)
7584 if (bnx2x_stop_multi(bp, i))
228241eb 7585 goto unload_error;
a2fbb9ea 7586
da5a662a
VZ
7587 rc = bnx2x_stop_leading(bp);
7588 if (rc) {
34f80b04 7589 BNX2X_ERR("Stop leading failed!\n");
da5a662a 7590#ifdef BNX2X_STOP_ON_ERROR
34f80b04 7591 return -EBUSY;
da5a662a
VZ
7592#else
7593 goto unload_error;
34f80b04 7594#endif
228241eb
ET
7595 }
7596
7597unload_error:
34f80b04 7598 if (!BP_NOMCP(bp))
228241eb 7599 reset_code = bnx2x_fw_command(bp, reset_code);
34f80b04 7600 else {
f5372251 7601 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts %d, %d, %d\n",
34f80b04
EG
7602 load_count[0], load_count[1], load_count[2]);
7603 load_count[0]--;
da5a662a 7604 load_count[1 + port]--;
f5372251 7605 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts %d, %d, %d\n",
34f80b04
EG
7606 load_count[0], load_count[1], load_count[2]);
7607 if (load_count[0] == 0)
7608 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
da5a662a 7609 else if (load_count[1 + port] == 0)
34f80b04
EG
7610 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
7611 else
7612 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
7613 }
a2fbb9ea 7614
34f80b04
EG
7615 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
7616 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
7617 bnx2x__link_reset(bp);
a2fbb9ea
ET
7618
7619 /* Reset the chip */
228241eb 7620 bnx2x_reset_chip(bp, reset_code);
a2fbb9ea
ET
7621
7622 /* Report UNLOAD_DONE to MCP */
34f80b04 7623 if (!BP_NOMCP(bp))
a2fbb9ea 7624 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
356e2385 7625
9a035440 7626 bp->port.pmf = 0;
a2fbb9ea 7627
7a9b2557 7628 /* Free SKBs, SGEs, TPA pool and driver internals */
a2fbb9ea 7629 bnx2x_free_skbs(bp);
555f6c78 7630 for_each_rx_queue(bp, i)
3196a88a 7631 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
555f6c78 7632 for_each_rx_queue(bp, i)
7cde1c8b 7633 netif_napi_del(&bnx2x_fp(bp, i, napi));
a2fbb9ea
ET
7634 bnx2x_free_mem(bp);
7635
7636 bp->state = BNX2X_STATE_CLOSED;
228241eb 7637
a2fbb9ea
ET
7638 netif_carrier_off(bp->dev);
7639
7640 return 0;
7641}
7642
34f80b04
EG
7643static void bnx2x_reset_task(struct work_struct *work)
7644{
7645 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
7646
7647#ifdef BNX2X_STOP_ON_ERROR
7648 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
7649 " so reset not done to allow debug dump,\n"
ad361c98 7650 " you will need to reboot when done\n");
34f80b04
EG
7651 return;
7652#endif
7653
7654 rtnl_lock();
7655
7656 if (!netif_running(bp->dev))
7657 goto reset_task_exit;
7658
7659 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
7660 bnx2x_nic_load(bp, LOAD_NORMAL);
7661
7662reset_task_exit:
7663 rtnl_unlock();
7664}
7665
a2fbb9ea
ET
7666/* end of nic load/unload */
7667
7668/* ethtool_ops */
7669
7670/*
7671 * Init service functions
7672 */
7673
f1ef27ef
EG
7674static inline u32 bnx2x_get_pretend_reg(struct bnx2x *bp, int func)
7675{
7676 switch (func) {
7677 case 0: return PXP2_REG_PGL_PRETEND_FUNC_F0;
7678 case 1: return PXP2_REG_PGL_PRETEND_FUNC_F1;
7679 case 2: return PXP2_REG_PGL_PRETEND_FUNC_F2;
7680 case 3: return PXP2_REG_PGL_PRETEND_FUNC_F3;
7681 case 4: return PXP2_REG_PGL_PRETEND_FUNC_F4;
7682 case 5: return PXP2_REG_PGL_PRETEND_FUNC_F5;
7683 case 6: return PXP2_REG_PGL_PRETEND_FUNC_F6;
7684 case 7: return PXP2_REG_PGL_PRETEND_FUNC_F7;
7685 default:
7686 BNX2X_ERR("Unsupported function index: %d\n", func);
7687 return (u32)(-1);
7688 }
7689}
7690
7691static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp, int orig_func)
7692{
7693 u32 reg = bnx2x_get_pretend_reg(bp, orig_func), new_val;
7694
7695 /* Flush all outstanding writes */
7696 mmiowb();
7697
7698 /* Pretend to be function 0 */
7699 REG_WR(bp, reg, 0);
7700 /* Flush the GRC transaction (in the chip) */
7701 new_val = REG_RD(bp, reg);
7702 if (new_val != 0) {
7703 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (0,%d)!\n",
7704 new_val);
7705 BUG();
7706 }
7707
7708 /* From now we are in the "like-E1" mode */
7709 bnx2x_int_disable(bp);
7710
7711 /* Flush all outstanding writes */
7712 mmiowb();
7713
7714 /* Restore the original funtion settings */
7715 REG_WR(bp, reg, orig_func);
7716 new_val = REG_RD(bp, reg);
7717 if (new_val != orig_func) {
7718 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (%d,%d)!\n",
7719 orig_func, new_val);
7720 BUG();
7721 }
7722}
7723
7724static inline void bnx2x_undi_int_disable(struct bnx2x *bp, int func)
7725{
7726 if (CHIP_IS_E1H(bp))
7727 bnx2x_undi_int_disable_e1h(bp, func);
7728 else
7729 bnx2x_int_disable(bp);
7730}
7731
34f80b04
EG
7732static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
7733{
7734 u32 val;
7735
7736 /* Check if there is any driver already loaded */
7737 val = REG_RD(bp, MISC_REG_UNPREPARED);
7738 if (val == 0x1) {
7739 /* Check if it is the UNDI driver
7740 * UNDI driver initializes CID offset for normal bell to 0x7
7741 */
4a37fb66 7742 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
34f80b04
EG
7743 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
7744 if (val == 0x7) {
7745 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
da5a662a 7746 /* save our func */
34f80b04 7747 int func = BP_FUNC(bp);
da5a662a
VZ
7748 u32 swap_en;
7749 u32 swap_val;
34f80b04 7750
b4661739
EG
7751 /* clear the UNDI indication */
7752 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
7753
34f80b04
EG
7754 BNX2X_DEV_INFO("UNDI is active! reset device\n");
7755
7756 /* try unload UNDI on port 0 */
7757 bp->func = 0;
da5a662a
VZ
7758 bp->fw_seq =
7759 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7760 DRV_MSG_SEQ_NUMBER_MASK);
34f80b04 7761 reset_code = bnx2x_fw_command(bp, reset_code);
34f80b04
EG
7762
7763 /* if UNDI is loaded on the other port */
7764 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
7765
da5a662a
VZ
7766 /* send "DONE" for previous unload */
7767 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7768
7769 /* unload UNDI on port 1 */
34f80b04 7770 bp->func = 1;
da5a662a
VZ
7771 bp->fw_seq =
7772 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7773 DRV_MSG_SEQ_NUMBER_MASK);
7774 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7775
7776 bnx2x_fw_command(bp, reset_code);
34f80b04
EG
7777 }
7778
b4661739
EG
7779 /* now it's safe to release the lock */
7780 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7781
f1ef27ef 7782 bnx2x_undi_int_disable(bp, func);
da5a662a
VZ
7783
7784 /* close input traffic and wait for it */
7785 /* Do not rcv packets to BRB */
7786 REG_WR(bp,
7787 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
7788 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
7789 /* Do not direct rcv packets that are not for MCP to
7790 * the BRB */
7791 REG_WR(bp,
7792 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
7793 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7794 /* clear AEU */
7795 REG_WR(bp,
7796 (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
7797 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
7798 msleep(10);
7799
7800 /* save NIG port swap info */
7801 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
7802 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
34f80b04
EG
7803 /* reset device */
7804 REG_WR(bp,
7805 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
da5a662a 7806 0xd3ffffff);
34f80b04
EG
7807 REG_WR(bp,
7808 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
7809 0x1403);
da5a662a
VZ
7810 /* take the NIG out of reset and restore swap values */
7811 REG_WR(bp,
7812 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
7813 MISC_REGISTERS_RESET_REG_1_RST_NIG);
7814 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
7815 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
7816
7817 /* send unload done to the MCP */
7818 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7819
7820 /* restore our func and fw_seq */
7821 bp->func = func;
7822 bp->fw_seq =
7823 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7824 DRV_MSG_SEQ_NUMBER_MASK);
b4661739
EG
7825
7826 } else
7827 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
34f80b04
EG
7828 }
7829}
7830
7831static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
7832{
7833 u32 val, val2, val3, val4, id;
72ce58c3 7834 u16 pmc;
34f80b04
EG
7835
7836 /* Get the chip revision id and number. */
7837 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
7838 val = REG_RD(bp, MISC_REG_CHIP_NUM);
7839 id = ((val & 0xffff) << 16);
7840 val = REG_RD(bp, MISC_REG_CHIP_REV);
7841 id |= ((val & 0xf) << 12);
7842 val = REG_RD(bp, MISC_REG_CHIP_METAL);
7843 id |= ((val & 0xff) << 4);
5a40e08e 7844 val = REG_RD(bp, MISC_REG_BOND_ID);
34f80b04
EG
7845 id |= (val & 0xf);
7846 bp->common.chip_id = id;
7847 bp->link_params.chip_id = bp->common.chip_id;
7848 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
7849
1c06328c
EG
7850 val = (REG_RD(bp, 0x2874) & 0x55);
7851 if ((bp->common.chip_id & 0x1) ||
7852 (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
7853 bp->flags |= ONE_PORT_FLAG;
7854 BNX2X_DEV_INFO("single port device\n");
7855 }
7856
34f80b04
EG
7857 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
7858 bp->common.flash_size = (NVRAM_1MB_SIZE <<
7859 (val & MCPR_NVM_CFG4_FLASH_SIZE));
7860 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
7861 bp->common.flash_size, bp->common.flash_size);
7862
7863 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
2691d51d 7864 bp->common.shmem2_base = REG_RD(bp, MISC_REG_GENERIC_CR_0);
34f80b04 7865 bp->link_params.shmem_base = bp->common.shmem_base;
2691d51d
EG
7866 BNX2X_DEV_INFO("shmem offset 0x%x shmem2 offset 0x%x\n",
7867 bp->common.shmem_base, bp->common.shmem2_base);
34f80b04
EG
7868
7869 if (!bp->common.shmem_base ||
7870 (bp->common.shmem_base < 0xA0000) ||
7871 (bp->common.shmem_base >= 0xC0000)) {
7872 BNX2X_DEV_INFO("MCP not active\n");
7873 bp->flags |= NO_MCP_FLAG;
7874 return;
7875 }
7876
7877 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
7878 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7879 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7880 BNX2X_ERR("BAD MCP validity signature\n");
7881
7882 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
35b19ba5 7883 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
34f80b04
EG
7884
7885 bp->link_params.hw_led_mode = ((bp->common.hw_config &
7886 SHARED_HW_CFG_LED_MODE_MASK) >>
7887 SHARED_HW_CFG_LED_MODE_SHIFT);
7888
c2c8b03e
EG
7889 bp->link_params.feature_config_flags = 0;
7890 val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
7891 if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
7892 bp->link_params.feature_config_flags |=
7893 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
7894 else
7895 bp->link_params.feature_config_flags &=
7896 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
7897
34f80b04
EG
7898 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
7899 bp->common.bc_ver = val;
7900 BNX2X_DEV_INFO("bc_ver %X\n", val);
7901 if (val < BNX2X_BC_VER) {
7902 /* for now only warn
7903 * later we might need to enforce this */
7904 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
7905 " please upgrade BC\n", BNX2X_BC_VER, val);
7906 }
4d295db0
EG
7907 bp->link_params.feature_config_flags |=
7908 (val >= REQ_BC_VER_4_VRFY_OPT_MDL) ?
7909 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0;
72ce58c3
EG
7910
7911 if (BP_E1HVN(bp) == 0) {
7912 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
7913 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
7914 } else {
7915 /* no WOL capability for E1HVN != 0 */
7916 bp->flags |= NO_WOL_FLAG;
7917 }
7918 BNX2X_DEV_INFO("%sWoL capable\n",
f5372251 7919 (bp->flags & NO_WOL_FLAG) ? "not " : "");
34f80b04
EG
7920
7921 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
7922 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
7923 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
7924 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
7925
7926 printk(KERN_INFO PFX "part number %X-%X-%X-%X\n",
7927 val, val2, val3, val4);
7928}
7929
7930static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
7931 u32 switch_cfg)
a2fbb9ea 7932{
34f80b04 7933 int port = BP_PORT(bp);
a2fbb9ea
ET
7934 u32 ext_phy_type;
7935
a2fbb9ea
ET
7936 switch (switch_cfg) {
7937 case SWITCH_CFG_1G:
7938 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
7939
c18487ee
YR
7940 ext_phy_type =
7941 SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea
ET
7942 switch (ext_phy_type) {
7943 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
7944 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7945 ext_phy_type);
7946
34f80b04
EG
7947 bp->port.supported |= (SUPPORTED_10baseT_Half |
7948 SUPPORTED_10baseT_Full |
7949 SUPPORTED_100baseT_Half |
7950 SUPPORTED_100baseT_Full |
7951 SUPPORTED_1000baseT_Full |
7952 SUPPORTED_2500baseX_Full |
7953 SUPPORTED_TP |
7954 SUPPORTED_FIBRE |
7955 SUPPORTED_Autoneg |
7956 SUPPORTED_Pause |
7957 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
7958 break;
7959
7960 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
7961 BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
7962 ext_phy_type);
7963
34f80b04
EG
7964 bp->port.supported |= (SUPPORTED_10baseT_Half |
7965 SUPPORTED_10baseT_Full |
7966 SUPPORTED_100baseT_Half |
7967 SUPPORTED_100baseT_Full |
7968 SUPPORTED_1000baseT_Full |
7969 SUPPORTED_TP |
7970 SUPPORTED_FIBRE |
7971 SUPPORTED_Autoneg |
7972 SUPPORTED_Pause |
7973 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
7974 break;
7975
7976 default:
7977 BNX2X_ERR("NVRAM config error. "
7978 "BAD SerDes ext_phy_config 0x%x\n",
c18487ee 7979 bp->link_params.ext_phy_config);
a2fbb9ea
ET
7980 return;
7981 }
7982
34f80b04
EG
7983 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
7984 port*0x10);
7985 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea
ET
7986 break;
7987
7988 case SWITCH_CFG_10G:
7989 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
7990
c18487ee
YR
7991 ext_phy_type =
7992 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea
ET
7993 switch (ext_phy_type) {
7994 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7995 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7996 ext_phy_type);
7997
34f80b04
EG
7998 bp->port.supported |= (SUPPORTED_10baseT_Half |
7999 SUPPORTED_10baseT_Full |
8000 SUPPORTED_100baseT_Half |
8001 SUPPORTED_100baseT_Full |
8002 SUPPORTED_1000baseT_Full |
8003 SUPPORTED_2500baseX_Full |
8004 SUPPORTED_10000baseT_Full |
8005 SUPPORTED_TP |
8006 SUPPORTED_FIBRE |
8007 SUPPORTED_Autoneg |
8008 SUPPORTED_Pause |
8009 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
8010 break;
8011
589abe3a
EG
8012 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
8013 BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
34f80b04 8014 ext_phy_type);
f1410647 8015
34f80b04 8016 bp->port.supported |= (SUPPORTED_10000baseT_Full |
589abe3a 8017 SUPPORTED_1000baseT_Full |
34f80b04 8018 SUPPORTED_FIBRE |
589abe3a 8019 SUPPORTED_Autoneg |
34f80b04
EG
8020 SUPPORTED_Pause |
8021 SUPPORTED_Asym_Pause);
f1410647
ET
8022 break;
8023
589abe3a
EG
8024 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
8025 BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
f1410647
ET
8026 ext_phy_type);
8027
34f80b04 8028 bp->port.supported |= (SUPPORTED_10000baseT_Full |
589abe3a 8029 SUPPORTED_2500baseX_Full |
34f80b04 8030 SUPPORTED_1000baseT_Full |
589abe3a
EG
8031 SUPPORTED_FIBRE |
8032 SUPPORTED_Autoneg |
8033 SUPPORTED_Pause |
8034 SUPPORTED_Asym_Pause);
8035 break;
8036
8037 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
8038 BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
8039 ext_phy_type);
8040
8041 bp->port.supported |= (SUPPORTED_10000baseT_Full |
34f80b04
EG
8042 SUPPORTED_FIBRE |
8043 SUPPORTED_Pause |
8044 SUPPORTED_Asym_Pause);
f1410647
ET
8045 break;
8046
589abe3a
EG
8047 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
8048 BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
a2fbb9ea
ET
8049 ext_phy_type);
8050
34f80b04
EG
8051 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8052 SUPPORTED_1000baseT_Full |
8053 SUPPORTED_FIBRE |
34f80b04
EG
8054 SUPPORTED_Pause |
8055 SUPPORTED_Asym_Pause);
f1410647
ET
8056 break;
8057
589abe3a
EG
8058 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
8059 BNX2X_DEV_INFO("ext_phy_type 0x%x (8726)\n",
c18487ee
YR
8060 ext_phy_type);
8061
34f80b04 8062 bp->port.supported |= (SUPPORTED_10000baseT_Full |
34f80b04 8063 SUPPORTED_1000baseT_Full |
34f80b04 8064 SUPPORTED_Autoneg |
589abe3a 8065 SUPPORTED_FIBRE |
34f80b04
EG
8066 SUPPORTED_Pause |
8067 SUPPORTED_Asym_Pause);
c18487ee
YR
8068 break;
8069
4d295db0
EG
8070 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
8071 BNX2X_DEV_INFO("ext_phy_type 0x%x (8727)\n",
8072 ext_phy_type);
8073
8074 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8075 SUPPORTED_1000baseT_Full |
8076 SUPPORTED_Autoneg |
8077 SUPPORTED_FIBRE |
8078 SUPPORTED_Pause |
8079 SUPPORTED_Asym_Pause);
8080 break;
8081
f1410647
ET
8082 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
8083 BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
8084 ext_phy_type);
8085
34f80b04
EG
8086 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8087 SUPPORTED_TP |
8088 SUPPORTED_Autoneg |
8089 SUPPORTED_Pause |
8090 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
8091 break;
8092
28577185
EG
8093 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
8094 BNX2X_DEV_INFO("ext_phy_type 0x%x (BCM8481)\n",
8095 ext_phy_type);
8096
8097 bp->port.supported |= (SUPPORTED_10baseT_Half |
8098 SUPPORTED_10baseT_Full |
8099 SUPPORTED_100baseT_Half |
8100 SUPPORTED_100baseT_Full |
8101 SUPPORTED_1000baseT_Full |
8102 SUPPORTED_10000baseT_Full |
8103 SUPPORTED_TP |
8104 SUPPORTED_Autoneg |
8105 SUPPORTED_Pause |
8106 SUPPORTED_Asym_Pause);
8107 break;
8108
c18487ee
YR
8109 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
8110 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
8111 bp->link_params.ext_phy_config);
8112 break;
8113
a2fbb9ea
ET
8114 default:
8115 BNX2X_ERR("NVRAM config error. "
8116 "BAD XGXS ext_phy_config 0x%x\n",
c18487ee 8117 bp->link_params.ext_phy_config);
a2fbb9ea
ET
8118 return;
8119 }
8120
34f80b04
EG
8121 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
8122 port*0x18);
8123 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea 8124
a2fbb9ea
ET
8125 break;
8126
8127 default:
8128 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
34f80b04 8129 bp->port.link_config);
a2fbb9ea
ET
8130 return;
8131 }
34f80b04 8132 bp->link_params.phy_addr = bp->port.phy_addr;
a2fbb9ea
ET
8133
8134 /* mask what we support according to speed_cap_mask */
c18487ee
YR
8135 if (!(bp->link_params.speed_cap_mask &
8136 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
34f80b04 8137 bp->port.supported &= ~SUPPORTED_10baseT_Half;
a2fbb9ea 8138
c18487ee
YR
8139 if (!(bp->link_params.speed_cap_mask &
8140 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
34f80b04 8141 bp->port.supported &= ~SUPPORTED_10baseT_Full;
a2fbb9ea 8142
c18487ee
YR
8143 if (!(bp->link_params.speed_cap_mask &
8144 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
34f80b04 8145 bp->port.supported &= ~SUPPORTED_100baseT_Half;
a2fbb9ea 8146
c18487ee
YR
8147 if (!(bp->link_params.speed_cap_mask &
8148 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
34f80b04 8149 bp->port.supported &= ~SUPPORTED_100baseT_Full;
a2fbb9ea 8150
c18487ee
YR
8151 if (!(bp->link_params.speed_cap_mask &
8152 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
34f80b04
EG
8153 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
8154 SUPPORTED_1000baseT_Full);
a2fbb9ea 8155
c18487ee
YR
8156 if (!(bp->link_params.speed_cap_mask &
8157 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
34f80b04 8158 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
a2fbb9ea 8159
c18487ee
YR
8160 if (!(bp->link_params.speed_cap_mask &
8161 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
34f80b04 8162 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
a2fbb9ea 8163
34f80b04 8164 BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
a2fbb9ea
ET
8165}
8166
34f80b04 8167static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
a2fbb9ea 8168{
c18487ee 8169 bp->link_params.req_duplex = DUPLEX_FULL;
a2fbb9ea 8170
34f80b04 8171 switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
a2fbb9ea 8172 case PORT_FEATURE_LINK_SPEED_AUTO:
34f80b04 8173 if (bp->port.supported & SUPPORTED_Autoneg) {
c18487ee 8174 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
34f80b04 8175 bp->port.advertising = bp->port.supported;
a2fbb9ea 8176 } else {
c18487ee
YR
8177 u32 ext_phy_type =
8178 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8179
8180 if ((ext_phy_type ==
8181 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
8182 (ext_phy_type ==
8183 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
a2fbb9ea 8184 /* force 10G, no AN */
c18487ee 8185 bp->link_params.req_line_speed = SPEED_10000;
34f80b04 8186 bp->port.advertising =
a2fbb9ea
ET
8187 (ADVERTISED_10000baseT_Full |
8188 ADVERTISED_FIBRE);
8189 break;
8190 }
8191 BNX2X_ERR("NVRAM config error. "
8192 "Invalid link_config 0x%x"
8193 " Autoneg not supported\n",
34f80b04 8194 bp->port.link_config);
a2fbb9ea
ET
8195 return;
8196 }
8197 break;
8198
8199 case PORT_FEATURE_LINK_SPEED_10M_FULL:
34f80b04 8200 if (bp->port.supported & SUPPORTED_10baseT_Full) {
c18487ee 8201 bp->link_params.req_line_speed = SPEED_10;
34f80b04
EG
8202 bp->port.advertising = (ADVERTISED_10baseT_Full |
8203 ADVERTISED_TP);
a2fbb9ea
ET
8204 } else {
8205 BNX2X_ERR("NVRAM config error. "
8206 "Invalid link_config 0x%x"
8207 " speed_cap_mask 0x%x\n",
34f80b04 8208 bp->port.link_config,
c18487ee 8209 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
8210 return;
8211 }
8212 break;
8213
8214 case PORT_FEATURE_LINK_SPEED_10M_HALF:
34f80b04 8215 if (bp->port.supported & SUPPORTED_10baseT_Half) {
c18487ee
YR
8216 bp->link_params.req_line_speed = SPEED_10;
8217 bp->link_params.req_duplex = DUPLEX_HALF;
34f80b04
EG
8218 bp->port.advertising = (ADVERTISED_10baseT_Half |
8219 ADVERTISED_TP);
a2fbb9ea
ET
8220 } else {
8221 BNX2X_ERR("NVRAM config error. "
8222 "Invalid link_config 0x%x"
8223 " speed_cap_mask 0x%x\n",
34f80b04 8224 bp->port.link_config,
c18487ee 8225 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
8226 return;
8227 }
8228 break;
8229
8230 case PORT_FEATURE_LINK_SPEED_100M_FULL:
34f80b04 8231 if (bp->port.supported & SUPPORTED_100baseT_Full) {
c18487ee 8232 bp->link_params.req_line_speed = SPEED_100;
34f80b04
EG
8233 bp->port.advertising = (ADVERTISED_100baseT_Full |
8234 ADVERTISED_TP);
a2fbb9ea
ET
8235 } else {
8236 BNX2X_ERR("NVRAM config error. "
8237 "Invalid link_config 0x%x"
8238 " speed_cap_mask 0x%x\n",
34f80b04 8239 bp->port.link_config,
c18487ee 8240 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
8241 return;
8242 }
8243 break;
8244
8245 case PORT_FEATURE_LINK_SPEED_100M_HALF:
34f80b04 8246 if (bp->port.supported & SUPPORTED_100baseT_Half) {
c18487ee
YR
8247 bp->link_params.req_line_speed = SPEED_100;
8248 bp->link_params.req_duplex = DUPLEX_HALF;
34f80b04
EG
8249 bp->port.advertising = (ADVERTISED_100baseT_Half |
8250 ADVERTISED_TP);
a2fbb9ea
ET
8251 } else {
8252 BNX2X_ERR("NVRAM config error. "
8253 "Invalid link_config 0x%x"
8254 " speed_cap_mask 0x%x\n",
34f80b04 8255 bp->port.link_config,
c18487ee 8256 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
8257 return;
8258 }
8259 break;
8260
8261 case PORT_FEATURE_LINK_SPEED_1G:
34f80b04 8262 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
c18487ee 8263 bp->link_params.req_line_speed = SPEED_1000;
34f80b04
EG
8264 bp->port.advertising = (ADVERTISED_1000baseT_Full |
8265 ADVERTISED_TP);
a2fbb9ea
ET
8266 } else {
8267 BNX2X_ERR("NVRAM config error. "
8268 "Invalid link_config 0x%x"
8269 " speed_cap_mask 0x%x\n",
34f80b04 8270 bp->port.link_config,
c18487ee 8271 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
8272 return;
8273 }
8274 break;
8275
8276 case PORT_FEATURE_LINK_SPEED_2_5G:
34f80b04 8277 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
c18487ee 8278 bp->link_params.req_line_speed = SPEED_2500;
34f80b04
EG
8279 bp->port.advertising = (ADVERTISED_2500baseX_Full |
8280 ADVERTISED_TP);
a2fbb9ea
ET
8281 } else {
8282 BNX2X_ERR("NVRAM config error. "
8283 "Invalid link_config 0x%x"
8284 " speed_cap_mask 0x%x\n",
34f80b04 8285 bp->port.link_config,
c18487ee 8286 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
8287 return;
8288 }
8289 break;
8290
8291 case PORT_FEATURE_LINK_SPEED_10G_CX4:
8292 case PORT_FEATURE_LINK_SPEED_10G_KX4:
8293 case PORT_FEATURE_LINK_SPEED_10G_KR:
34f80b04 8294 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
c18487ee 8295 bp->link_params.req_line_speed = SPEED_10000;
34f80b04
EG
8296 bp->port.advertising = (ADVERTISED_10000baseT_Full |
8297 ADVERTISED_FIBRE);
a2fbb9ea
ET
8298 } else {
8299 BNX2X_ERR("NVRAM config error. "
8300 "Invalid link_config 0x%x"
8301 " speed_cap_mask 0x%x\n",
34f80b04 8302 bp->port.link_config,
c18487ee 8303 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
8304 return;
8305 }
8306 break;
8307
8308 default:
8309 BNX2X_ERR("NVRAM config error. "
8310 "BAD link speed link_config 0x%x\n",
34f80b04 8311 bp->port.link_config);
c18487ee 8312 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
34f80b04 8313 bp->port.advertising = bp->port.supported;
a2fbb9ea
ET
8314 break;
8315 }
a2fbb9ea 8316
34f80b04
EG
8317 bp->link_params.req_flow_ctrl = (bp->port.link_config &
8318 PORT_FEATURE_FLOW_CONTROL_MASK);
c0700f90 8319 if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
4ab84d45 8320 !(bp->port.supported & SUPPORTED_Autoneg))
c0700f90 8321 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
a2fbb9ea 8322
c18487ee 8323 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x"
f1410647 8324 " advertising 0x%x\n",
c18487ee
YR
8325 bp->link_params.req_line_speed,
8326 bp->link_params.req_duplex,
34f80b04 8327 bp->link_params.req_flow_ctrl, bp->port.advertising);
a2fbb9ea
ET
8328}
8329
34f80b04 8330static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
a2fbb9ea 8331{
34f80b04
EG
8332 int port = BP_PORT(bp);
8333 u32 val, val2;
589abe3a 8334 u32 config;
c2c8b03e 8335 u16 i;
a2fbb9ea 8336
c18487ee 8337 bp->link_params.bp = bp;
34f80b04 8338 bp->link_params.port = port;
c18487ee 8339
c18487ee 8340 bp->link_params.lane_config =
a2fbb9ea 8341 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
c18487ee 8342 bp->link_params.ext_phy_config =
a2fbb9ea
ET
8343 SHMEM_RD(bp,
8344 dev_info.port_hw_config[port].external_phy_config);
4d295db0
EG
8345 /* BCM8727_NOC => BCM8727 no over current */
8346 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
8347 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC) {
8348 bp->link_params.ext_phy_config &=
8349 ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
8350 bp->link_params.ext_phy_config |=
8351 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727;
8352 bp->link_params.feature_config_flags |=
8353 FEATURE_CONFIG_BCM8727_NOC;
8354 }
8355
c18487ee 8356 bp->link_params.speed_cap_mask =
a2fbb9ea
ET
8357 SHMEM_RD(bp,
8358 dev_info.port_hw_config[port].speed_capability_mask);
8359
34f80b04 8360 bp->port.link_config =
a2fbb9ea
ET
8361 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
8362
c2c8b03e
EG
8363 /* Get the 4 lanes xgxs config rx and tx */
8364 for (i = 0; i < 2; i++) {
8365 val = SHMEM_RD(bp,
8366 dev_info.port_hw_config[port].xgxs_config_rx[i<<1]);
8367 bp->link_params.xgxs_config_rx[i << 1] = ((val>>16) & 0xffff);
8368 bp->link_params.xgxs_config_rx[(i << 1) + 1] = (val & 0xffff);
8369
8370 val = SHMEM_RD(bp,
8371 dev_info.port_hw_config[port].xgxs_config_tx[i<<1]);
8372 bp->link_params.xgxs_config_tx[i << 1] = ((val>>16) & 0xffff);
8373 bp->link_params.xgxs_config_tx[(i << 1) + 1] = (val & 0xffff);
8374 }
8375
3ce2c3f9
EG
8376 /* If the device is capable of WoL, set the default state according
8377 * to the HW
8378 */
4d295db0 8379 config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
3ce2c3f9
EG
8380 bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
8381 (config & PORT_FEATURE_WOL_ENABLED));
8382
c2c8b03e
EG
8383 BNX2X_DEV_INFO("lane_config 0x%08x ext_phy_config 0x%08x"
8384 " speed_cap_mask 0x%08x link_config 0x%08x\n",
c18487ee
YR
8385 bp->link_params.lane_config,
8386 bp->link_params.ext_phy_config,
34f80b04 8387 bp->link_params.speed_cap_mask, bp->port.link_config);
a2fbb9ea 8388
4d295db0
EG
8389 bp->link_params.switch_cfg |= (bp->port.link_config &
8390 PORT_FEATURE_CONNECTED_SWITCH_MASK);
c18487ee 8391 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
a2fbb9ea
ET
8392
8393 bnx2x_link_settings_requested(bp);
8394
8395 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
8396 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
8397 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
8398 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8399 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8400 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8401 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
8402 bp->dev->dev_addr[5] = (u8)(val & 0xff);
c18487ee
YR
8403 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
8404 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
34f80b04
EG
8405}
8406
8407static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
8408{
8409 int func = BP_FUNC(bp);
8410 u32 val, val2;
8411 int rc = 0;
a2fbb9ea 8412
34f80b04 8413 bnx2x_get_common_hwinfo(bp);
a2fbb9ea 8414
34f80b04
EG
8415 bp->e1hov = 0;
8416 bp->e1hmf = 0;
8417 if (CHIP_IS_E1H(bp)) {
8418 bp->mf_config =
8419 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
a2fbb9ea 8420
2691d51d 8421 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[FUNC_0].e1hov_tag) &
3196a88a 8422 FUNC_MF_CFG_E1HOV_TAG_MASK);
2691d51d 8423 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT)
34f80b04 8424 bp->e1hmf = 1;
2691d51d
EG
8425 BNX2X_DEV_INFO("%s function mode\n",
8426 IS_E1HMF(bp) ? "multi" : "single");
8427
8428 if (IS_E1HMF(bp)) {
8429 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].
8430 e1hov_tag) &
8431 FUNC_MF_CFG_E1HOV_TAG_MASK);
8432 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
8433 bp->e1hov = val;
8434 BNX2X_DEV_INFO("E1HOV for func %d is %d "
8435 "(0x%04x)\n",
8436 func, bp->e1hov, bp->e1hov);
8437 } else {
34f80b04
EG
8438 BNX2X_ERR("!!! No valid E1HOV for func %d,"
8439 " aborting\n", func);
8440 rc = -EPERM;
8441 }
2691d51d
EG
8442 } else {
8443 if (BP_E1HVN(bp)) {
8444 BNX2X_ERR("!!! VN %d in single function mode,"
8445 " aborting\n", BP_E1HVN(bp));
8446 rc = -EPERM;
8447 }
34f80b04
EG
8448 }
8449 }
a2fbb9ea 8450
34f80b04
EG
8451 if (!BP_NOMCP(bp)) {
8452 bnx2x_get_port_hwinfo(bp);
8453
8454 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
8455 DRV_MSG_SEQ_NUMBER_MASK);
8456 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
8457 }
8458
8459 if (IS_E1HMF(bp)) {
8460 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
8461 val = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_lower);
8462 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
8463 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
8464 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
8465 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8466 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8467 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8468 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
8469 bp->dev->dev_addr[5] = (u8)(val & 0xff);
8470 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
8471 ETH_ALEN);
8472 memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
8473 ETH_ALEN);
a2fbb9ea 8474 }
34f80b04
EG
8475
8476 return rc;
a2fbb9ea
ET
8477 }
8478
34f80b04
EG
8479 if (BP_NOMCP(bp)) {
8480 /* only supposed to happen on emulation/FPGA */
33471629 8481 BNX2X_ERR("warning random MAC workaround active\n");
34f80b04
EG
8482 random_ether_addr(bp->dev->dev_addr);
8483 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8484 }
a2fbb9ea 8485
34f80b04
EG
8486 return rc;
8487}
8488
8489static int __devinit bnx2x_init_bp(struct bnx2x *bp)
8490{
8491 int func = BP_FUNC(bp);
87942b46 8492 int timer_interval;
34f80b04
EG
8493 int rc;
8494
da5a662a
VZ
8495 /* Disable interrupt handling until HW is initialized */
8496 atomic_set(&bp->intr_sem, 1);
e1510706 8497 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
da5a662a 8498
34f80b04 8499 mutex_init(&bp->port.phy_mutex);
a2fbb9ea 8500
1cf167f2 8501 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
34f80b04
EG
8502 INIT_WORK(&bp->reset_task, bnx2x_reset_task);
8503
8504 rc = bnx2x_get_hwinfo(bp);
8505
8506 /* need to reset chip if undi was active */
8507 if (!BP_NOMCP(bp))
8508 bnx2x_undi_unload(bp);
8509
8510 if (CHIP_REV_IS_FPGA(bp))
8511 printk(KERN_ERR PFX "FPGA detected\n");
8512
8513 if (BP_NOMCP(bp) && (func == 0))
8514 printk(KERN_ERR PFX
8515 "MCP disabled, must load devices in order!\n");
8516
555f6c78 8517 /* Set multi queue mode */
8badd27a
EG
8518 if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
8519 ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
555f6c78 8520 printk(KERN_ERR PFX
8badd27a 8521 "Multi disabled since int_mode requested is not MSI-X\n");
555f6c78
EG
8522 multi_mode = ETH_RSS_MODE_DISABLED;
8523 }
8524 bp->multi_mode = multi_mode;
8525
8526
7a9b2557
VZ
8527 /* Set TPA flags */
8528 if (disable_tpa) {
8529 bp->flags &= ~TPA_ENABLE_FLAG;
8530 bp->dev->features &= ~NETIF_F_LRO;
8531 } else {
8532 bp->flags |= TPA_ENABLE_FLAG;
8533 bp->dev->features |= NETIF_F_LRO;
8534 }
8535
8d5726c4 8536 bp->mrrs = mrrs;
7a9b2557 8537
34f80b04
EG
8538 bp->tx_ring_size = MAX_TX_AVAIL;
8539 bp->rx_ring_size = MAX_RX_AVAIL;
8540
8541 bp->rx_csum = 1;
34f80b04
EG
8542
8543 bp->tx_ticks = 50;
8544 bp->rx_ticks = 25;
8545
87942b46
EG
8546 timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
8547 bp->current_interval = (poll ? poll : timer_interval);
34f80b04
EG
8548
8549 init_timer(&bp->timer);
8550 bp->timer.expires = jiffies + bp->current_interval;
8551 bp->timer.data = (unsigned long) bp;
8552 bp->timer.function = bnx2x_timer;
8553
8554 return rc;
a2fbb9ea
ET
8555}
8556
8557/*
8558 * ethtool service functions
8559 */
8560
8561/* All ethtool functions called with rtnl_lock */
8562
8563static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8564{
8565 struct bnx2x *bp = netdev_priv(dev);
8566
34f80b04
EG
8567 cmd->supported = bp->port.supported;
8568 cmd->advertising = bp->port.advertising;
a2fbb9ea
ET
8569
8570 if (netif_carrier_ok(dev)) {
c18487ee
YR
8571 cmd->speed = bp->link_vars.line_speed;
8572 cmd->duplex = bp->link_vars.duplex;
a2fbb9ea 8573 } else {
c18487ee
YR
8574 cmd->speed = bp->link_params.req_line_speed;
8575 cmd->duplex = bp->link_params.req_duplex;
a2fbb9ea 8576 }
34f80b04
EG
8577 if (IS_E1HMF(bp)) {
8578 u16 vn_max_rate;
8579
8580 vn_max_rate = ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
8581 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
8582 if (vn_max_rate < cmd->speed)
8583 cmd->speed = vn_max_rate;
8584 }
a2fbb9ea 8585
c18487ee
YR
8586 if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
8587 u32 ext_phy_type =
8588 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
f1410647
ET
8589
8590 switch (ext_phy_type) {
8591 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
f1410647 8592 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
c18487ee 8593 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
589abe3a
EG
8594 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
8595 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
8596 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
4d295db0 8597 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
f1410647
ET
8598 cmd->port = PORT_FIBRE;
8599 break;
8600
8601 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
28577185 8602 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
f1410647
ET
8603 cmd->port = PORT_TP;
8604 break;
8605
c18487ee
YR
8606 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
8607 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
8608 bp->link_params.ext_phy_config);
8609 break;
8610
f1410647
ET
8611 default:
8612 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
c18487ee
YR
8613 bp->link_params.ext_phy_config);
8614 break;
f1410647
ET
8615 }
8616 } else
a2fbb9ea 8617 cmd->port = PORT_TP;
a2fbb9ea 8618
34f80b04 8619 cmd->phy_address = bp->port.phy_addr;
a2fbb9ea
ET
8620 cmd->transceiver = XCVR_INTERNAL;
8621
c18487ee 8622 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
a2fbb9ea 8623 cmd->autoneg = AUTONEG_ENABLE;
f1410647 8624 else
a2fbb9ea 8625 cmd->autoneg = AUTONEG_DISABLE;
a2fbb9ea
ET
8626
8627 cmd->maxtxpkt = 0;
8628 cmd->maxrxpkt = 0;
8629
8630 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
8631 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
8632 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
8633 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
8634 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
8635 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
8636 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
8637
8638 return 0;
8639}
8640
8641static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8642{
8643 struct bnx2x *bp = netdev_priv(dev);
8644 u32 advertising;
8645
34f80b04
EG
8646 if (IS_E1HMF(bp))
8647 return 0;
8648
a2fbb9ea
ET
8649 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
8650 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
8651 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
8652 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
8653 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
8654 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
8655 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
8656
a2fbb9ea 8657 if (cmd->autoneg == AUTONEG_ENABLE) {
34f80b04
EG
8658 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
8659 DP(NETIF_MSG_LINK, "Autoneg not supported\n");
a2fbb9ea 8660 return -EINVAL;
f1410647 8661 }
a2fbb9ea
ET
8662
8663 /* advertise the requested speed and duplex if supported */
34f80b04 8664 cmd->advertising &= bp->port.supported;
a2fbb9ea 8665
c18487ee
YR
8666 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
8667 bp->link_params.req_duplex = DUPLEX_FULL;
34f80b04
EG
8668 bp->port.advertising |= (ADVERTISED_Autoneg |
8669 cmd->advertising);
a2fbb9ea
ET
8670
8671 } else { /* forced speed */
8672 /* advertise the requested speed and duplex if supported */
8673 switch (cmd->speed) {
8674 case SPEED_10:
8675 if (cmd->duplex == DUPLEX_FULL) {
34f80b04 8676 if (!(bp->port.supported &
f1410647
ET
8677 SUPPORTED_10baseT_Full)) {
8678 DP(NETIF_MSG_LINK,
8679 "10M full not supported\n");
a2fbb9ea 8680 return -EINVAL;
f1410647 8681 }
a2fbb9ea
ET
8682
8683 advertising = (ADVERTISED_10baseT_Full |
8684 ADVERTISED_TP);
8685 } else {
34f80b04 8686 if (!(bp->port.supported &
f1410647
ET
8687 SUPPORTED_10baseT_Half)) {
8688 DP(NETIF_MSG_LINK,
8689 "10M half not supported\n");
a2fbb9ea 8690 return -EINVAL;
f1410647 8691 }
a2fbb9ea
ET
8692
8693 advertising = (ADVERTISED_10baseT_Half |
8694 ADVERTISED_TP);
8695 }
8696 break;
8697
8698 case SPEED_100:
8699 if (cmd->duplex == DUPLEX_FULL) {
34f80b04 8700 if (!(bp->port.supported &
f1410647
ET
8701 SUPPORTED_100baseT_Full)) {
8702 DP(NETIF_MSG_LINK,
8703 "100M full not supported\n");
a2fbb9ea 8704 return -EINVAL;
f1410647 8705 }
a2fbb9ea
ET
8706
8707 advertising = (ADVERTISED_100baseT_Full |
8708 ADVERTISED_TP);
8709 } else {
34f80b04 8710 if (!(bp->port.supported &
f1410647
ET
8711 SUPPORTED_100baseT_Half)) {
8712 DP(NETIF_MSG_LINK,
8713 "100M half not supported\n");
a2fbb9ea 8714 return -EINVAL;
f1410647 8715 }
a2fbb9ea
ET
8716
8717 advertising = (ADVERTISED_100baseT_Half |
8718 ADVERTISED_TP);
8719 }
8720 break;
8721
8722 case SPEED_1000:
f1410647
ET
8723 if (cmd->duplex != DUPLEX_FULL) {
8724 DP(NETIF_MSG_LINK, "1G half not supported\n");
a2fbb9ea 8725 return -EINVAL;
f1410647 8726 }
a2fbb9ea 8727
34f80b04 8728 if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
f1410647 8729 DP(NETIF_MSG_LINK, "1G full not supported\n");
a2fbb9ea 8730 return -EINVAL;
f1410647 8731 }
a2fbb9ea
ET
8732
8733 advertising = (ADVERTISED_1000baseT_Full |
8734 ADVERTISED_TP);
8735 break;
8736
8737 case SPEED_2500:
f1410647
ET
8738 if (cmd->duplex != DUPLEX_FULL) {
8739 DP(NETIF_MSG_LINK,
8740 "2.5G half not supported\n");
a2fbb9ea 8741 return -EINVAL;
f1410647 8742 }
a2fbb9ea 8743
34f80b04 8744 if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
f1410647
ET
8745 DP(NETIF_MSG_LINK,
8746 "2.5G full not supported\n");
a2fbb9ea 8747 return -EINVAL;
f1410647 8748 }
a2fbb9ea 8749
f1410647 8750 advertising = (ADVERTISED_2500baseX_Full |
a2fbb9ea
ET
8751 ADVERTISED_TP);
8752 break;
8753
8754 case SPEED_10000:
f1410647
ET
8755 if (cmd->duplex != DUPLEX_FULL) {
8756 DP(NETIF_MSG_LINK, "10G half not supported\n");
a2fbb9ea 8757 return -EINVAL;
f1410647 8758 }
a2fbb9ea 8759
34f80b04 8760 if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
f1410647 8761 DP(NETIF_MSG_LINK, "10G full not supported\n");
a2fbb9ea 8762 return -EINVAL;
f1410647 8763 }
a2fbb9ea
ET
8764
8765 advertising = (ADVERTISED_10000baseT_Full |
8766 ADVERTISED_FIBRE);
8767 break;
8768
8769 default:
f1410647 8770 DP(NETIF_MSG_LINK, "Unsupported speed\n");
a2fbb9ea
ET
8771 return -EINVAL;
8772 }
8773
c18487ee
YR
8774 bp->link_params.req_line_speed = cmd->speed;
8775 bp->link_params.req_duplex = cmd->duplex;
34f80b04 8776 bp->port.advertising = advertising;
a2fbb9ea
ET
8777 }
8778
c18487ee 8779 DP(NETIF_MSG_LINK, "req_line_speed %d\n"
a2fbb9ea 8780 DP_LEVEL " req_duplex %d advertising 0x%x\n",
c18487ee 8781 bp->link_params.req_line_speed, bp->link_params.req_duplex,
34f80b04 8782 bp->port.advertising);
a2fbb9ea 8783
34f80b04 8784 if (netif_running(dev)) {
bb2a0f7a 8785 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
8786 bnx2x_link_set(bp);
8787 }
a2fbb9ea
ET
8788
8789 return 0;
8790}
8791
c18487ee
YR
8792#define PHY_FW_VER_LEN 10
8793
a2fbb9ea
ET
8794static void bnx2x_get_drvinfo(struct net_device *dev,
8795 struct ethtool_drvinfo *info)
8796{
8797 struct bnx2x *bp = netdev_priv(dev);
f0e53a84 8798 u8 phy_fw_ver[PHY_FW_VER_LEN];
a2fbb9ea
ET
8799
8800 strcpy(info->driver, DRV_MODULE_NAME);
8801 strcpy(info->version, DRV_MODULE_VERSION);
c18487ee
YR
8802
8803 phy_fw_ver[0] = '\0';
34f80b04 8804 if (bp->port.pmf) {
4a37fb66 8805 bnx2x_acquire_phy_lock(bp);
34f80b04
EG
8806 bnx2x_get_ext_phy_fw_version(&bp->link_params,
8807 (bp->state != BNX2X_STATE_CLOSED),
8808 phy_fw_ver, PHY_FW_VER_LEN);
4a37fb66 8809 bnx2x_release_phy_lock(bp);
34f80b04 8810 }
c18487ee 8811
f0e53a84
EG
8812 snprintf(info->fw_version, 32, "BC:%d.%d.%d%s%s",
8813 (bp->common.bc_ver & 0xff0000) >> 16,
8814 (bp->common.bc_ver & 0xff00) >> 8,
8815 (bp->common.bc_ver & 0xff),
8816 ((phy_fw_ver[0] != '\0') ? " PHY:" : ""), phy_fw_ver);
a2fbb9ea
ET
8817 strcpy(info->bus_info, pci_name(bp->pdev));
8818 info->n_stats = BNX2X_NUM_STATS;
8819 info->testinfo_len = BNX2X_NUM_TESTS;
34f80b04 8820 info->eedump_len = bp->common.flash_size;
a2fbb9ea
ET
8821 info->regdump_len = 0;
8822}
8823
0a64ea57
EG
8824#define IS_E1_ONLINE(info) (((info) & RI_E1_ONLINE) == RI_E1_ONLINE)
8825#define IS_E1H_ONLINE(info) (((info) & RI_E1H_ONLINE) == RI_E1H_ONLINE)
8826
8827static int bnx2x_get_regs_len(struct net_device *dev)
8828{
8829 static u32 regdump_len;
8830 struct bnx2x *bp = netdev_priv(dev);
8831 int i;
8832
8833 if (regdump_len)
8834 return regdump_len;
8835
8836 if (CHIP_IS_E1(bp)) {
8837 for (i = 0; i < REGS_COUNT; i++)
8838 if (IS_E1_ONLINE(reg_addrs[i].info))
8839 regdump_len += reg_addrs[i].size;
8840
8841 for (i = 0; i < WREGS_COUNT_E1; i++)
8842 if (IS_E1_ONLINE(wreg_addrs_e1[i].info))
8843 regdump_len += wreg_addrs_e1[i].size *
8844 (1 + wreg_addrs_e1[i].read_regs_count);
8845
8846 } else { /* E1H */
8847 for (i = 0; i < REGS_COUNT; i++)
8848 if (IS_E1H_ONLINE(reg_addrs[i].info))
8849 regdump_len += reg_addrs[i].size;
8850
8851 for (i = 0; i < WREGS_COUNT_E1H; i++)
8852 if (IS_E1H_ONLINE(wreg_addrs_e1h[i].info))
8853 regdump_len += wreg_addrs_e1h[i].size *
8854 (1 + wreg_addrs_e1h[i].read_regs_count);
8855 }
8856 regdump_len *= 4;
8857 regdump_len += sizeof(struct dump_hdr);
8858
8859 return regdump_len;
8860}
8861
8862static void bnx2x_get_regs(struct net_device *dev,
8863 struct ethtool_regs *regs, void *_p)
8864{
8865 u32 *p = _p, i, j;
8866 struct bnx2x *bp = netdev_priv(dev);
8867 struct dump_hdr dump_hdr = {0};
8868
8869 regs->version = 0;
8870 memset(p, 0, regs->len);
8871
8872 if (!netif_running(bp->dev))
8873 return;
8874
8875 dump_hdr.hdr_size = (sizeof(struct dump_hdr) / 4) - 1;
8876 dump_hdr.dump_sign = dump_sign_all;
8877 dump_hdr.xstorm_waitp = REG_RD(bp, XSTORM_WAITP_ADDR);
8878 dump_hdr.tstorm_waitp = REG_RD(bp, TSTORM_WAITP_ADDR);
8879 dump_hdr.ustorm_waitp = REG_RD(bp, USTORM_WAITP_ADDR);
8880 dump_hdr.cstorm_waitp = REG_RD(bp, CSTORM_WAITP_ADDR);
8881 dump_hdr.info = CHIP_IS_E1(bp) ? RI_E1_ONLINE : RI_E1H_ONLINE;
8882
8883 memcpy(p, &dump_hdr, sizeof(struct dump_hdr));
8884 p += dump_hdr.hdr_size + 1;
8885
8886 if (CHIP_IS_E1(bp)) {
8887 for (i = 0; i < REGS_COUNT; i++)
8888 if (IS_E1_ONLINE(reg_addrs[i].info))
8889 for (j = 0; j < reg_addrs[i].size; j++)
8890 *p++ = REG_RD(bp,
8891 reg_addrs[i].addr + j*4);
8892
8893 } else { /* E1H */
8894 for (i = 0; i < REGS_COUNT; i++)
8895 if (IS_E1H_ONLINE(reg_addrs[i].info))
8896 for (j = 0; j < reg_addrs[i].size; j++)
8897 *p++ = REG_RD(bp,
8898 reg_addrs[i].addr + j*4);
8899 }
8900}
8901
a2fbb9ea
ET
8902static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8903{
8904 struct bnx2x *bp = netdev_priv(dev);
8905
8906 if (bp->flags & NO_WOL_FLAG) {
8907 wol->supported = 0;
8908 wol->wolopts = 0;
8909 } else {
8910 wol->supported = WAKE_MAGIC;
8911 if (bp->wol)
8912 wol->wolopts = WAKE_MAGIC;
8913 else
8914 wol->wolopts = 0;
8915 }
8916 memset(&wol->sopass, 0, sizeof(wol->sopass));
8917}
8918
8919static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8920{
8921 struct bnx2x *bp = netdev_priv(dev);
8922
8923 if (wol->wolopts & ~WAKE_MAGIC)
8924 return -EINVAL;
8925
8926 if (wol->wolopts & WAKE_MAGIC) {
8927 if (bp->flags & NO_WOL_FLAG)
8928 return -EINVAL;
8929
8930 bp->wol = 1;
34f80b04 8931 } else
a2fbb9ea 8932 bp->wol = 0;
34f80b04 8933
a2fbb9ea
ET
8934 return 0;
8935}
8936
8937static u32 bnx2x_get_msglevel(struct net_device *dev)
8938{
8939 struct bnx2x *bp = netdev_priv(dev);
8940
8941 return bp->msglevel;
8942}
8943
8944static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
8945{
8946 struct bnx2x *bp = netdev_priv(dev);
8947
8948 if (capable(CAP_NET_ADMIN))
8949 bp->msglevel = level;
8950}
8951
8952static int bnx2x_nway_reset(struct net_device *dev)
8953{
8954 struct bnx2x *bp = netdev_priv(dev);
8955
34f80b04
EG
8956 if (!bp->port.pmf)
8957 return 0;
a2fbb9ea 8958
34f80b04 8959 if (netif_running(dev)) {
bb2a0f7a 8960 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
8961 bnx2x_link_set(bp);
8962 }
a2fbb9ea
ET
8963
8964 return 0;
8965}
8966
01e53298
NO
8967static u32
8968bnx2x_get_link(struct net_device *dev)
8969{
8970 struct bnx2x *bp = netdev_priv(dev);
8971
8972 return bp->link_vars.link_up;
8973}
8974
a2fbb9ea
ET
8975static int bnx2x_get_eeprom_len(struct net_device *dev)
8976{
8977 struct bnx2x *bp = netdev_priv(dev);
8978
34f80b04 8979 return bp->common.flash_size;
a2fbb9ea
ET
8980}
8981
8982static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
8983{
34f80b04 8984 int port = BP_PORT(bp);
a2fbb9ea
ET
8985 int count, i;
8986 u32 val = 0;
8987
8988 /* adjust timeout for emulation/FPGA */
8989 count = NVRAM_TIMEOUT_COUNT;
8990 if (CHIP_REV_IS_SLOW(bp))
8991 count *= 100;
8992
8993 /* request access to nvram interface */
8994 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
8995 (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
8996
8997 for (i = 0; i < count*10; i++) {
8998 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
8999 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
9000 break;
9001
9002 udelay(5);
9003 }
9004
9005 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
34f80b04 9006 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
a2fbb9ea
ET
9007 return -EBUSY;
9008 }
9009
9010 return 0;
9011}
9012
9013static int bnx2x_release_nvram_lock(struct bnx2x *bp)
9014{
34f80b04 9015 int port = BP_PORT(bp);
a2fbb9ea
ET
9016 int count, i;
9017 u32 val = 0;
9018
9019 /* adjust timeout for emulation/FPGA */
9020 count = NVRAM_TIMEOUT_COUNT;
9021 if (CHIP_REV_IS_SLOW(bp))
9022 count *= 100;
9023
9024 /* relinquish nvram interface */
9025 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
9026 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
9027
9028 for (i = 0; i < count*10; i++) {
9029 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
9030 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
9031 break;
9032
9033 udelay(5);
9034 }
9035
9036 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
34f80b04 9037 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
a2fbb9ea
ET
9038 return -EBUSY;
9039 }
9040
9041 return 0;
9042}
9043
9044static void bnx2x_enable_nvram_access(struct bnx2x *bp)
9045{
9046 u32 val;
9047
9048 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
9049
9050 /* enable both bits, even on read */
9051 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
9052 (val | MCPR_NVM_ACCESS_ENABLE_EN |
9053 MCPR_NVM_ACCESS_ENABLE_WR_EN));
9054}
9055
9056static void bnx2x_disable_nvram_access(struct bnx2x *bp)
9057{
9058 u32 val;
9059
9060 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
9061
9062 /* disable both bits, even after read */
9063 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
9064 (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
9065 MCPR_NVM_ACCESS_ENABLE_WR_EN)));
9066}
9067
4781bfad 9068static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, __be32 *ret_val,
a2fbb9ea
ET
9069 u32 cmd_flags)
9070{
f1410647 9071 int count, i, rc;
a2fbb9ea
ET
9072 u32 val;
9073
9074 /* build the command word */
9075 cmd_flags |= MCPR_NVM_COMMAND_DOIT;
9076
9077 /* need to clear DONE bit separately */
9078 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
9079
9080 /* address of the NVRAM to read from */
9081 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
9082 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
9083
9084 /* issue a read command */
9085 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
9086
9087 /* adjust timeout for emulation/FPGA */
9088 count = NVRAM_TIMEOUT_COUNT;
9089 if (CHIP_REV_IS_SLOW(bp))
9090 count *= 100;
9091
9092 /* wait for completion */
9093 *ret_val = 0;
9094 rc = -EBUSY;
9095 for (i = 0; i < count; i++) {
9096 udelay(5);
9097 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
9098
9099 if (val & MCPR_NVM_COMMAND_DONE) {
9100 val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
a2fbb9ea
ET
9101 /* we read nvram data in cpu order
9102 * but ethtool sees it as an array of bytes
9103 * converting to big-endian will do the work */
4781bfad 9104 *ret_val = cpu_to_be32(val);
a2fbb9ea
ET
9105 rc = 0;
9106 break;
9107 }
9108 }
9109
9110 return rc;
9111}
9112
9113static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
9114 int buf_size)
9115{
9116 int rc;
9117 u32 cmd_flags;
4781bfad 9118 __be32 val;
a2fbb9ea
ET
9119
9120 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
34f80b04 9121 DP(BNX2X_MSG_NVM,
c14423fe 9122 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
a2fbb9ea
ET
9123 offset, buf_size);
9124 return -EINVAL;
9125 }
9126
34f80b04
EG
9127 if (offset + buf_size > bp->common.flash_size) {
9128 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 9129 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 9130 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
9131 return -EINVAL;
9132 }
9133
9134 /* request access to nvram interface */
9135 rc = bnx2x_acquire_nvram_lock(bp);
9136 if (rc)
9137 return rc;
9138
9139 /* enable access to nvram interface */
9140 bnx2x_enable_nvram_access(bp);
9141
9142 /* read the first word(s) */
9143 cmd_flags = MCPR_NVM_COMMAND_FIRST;
9144 while ((buf_size > sizeof(u32)) && (rc == 0)) {
9145 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
9146 memcpy(ret_buf, &val, 4);
9147
9148 /* advance to the next dword */
9149 offset += sizeof(u32);
9150 ret_buf += sizeof(u32);
9151 buf_size -= sizeof(u32);
9152 cmd_flags = 0;
9153 }
9154
9155 if (rc == 0) {
9156 cmd_flags |= MCPR_NVM_COMMAND_LAST;
9157 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
9158 memcpy(ret_buf, &val, 4);
9159 }
9160
9161 /* disable access to nvram interface */
9162 bnx2x_disable_nvram_access(bp);
9163 bnx2x_release_nvram_lock(bp);
9164
9165 return rc;
9166}
9167
9168static int bnx2x_get_eeprom(struct net_device *dev,
9169 struct ethtool_eeprom *eeprom, u8 *eebuf)
9170{
9171 struct bnx2x *bp = netdev_priv(dev);
9172 int rc;
9173
2add3acb
EG
9174 if (!netif_running(dev))
9175 return -EAGAIN;
9176
34f80b04 9177 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
a2fbb9ea
ET
9178 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
9179 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
9180 eeprom->len, eeprom->len);
9181
9182 /* parameters already validated in ethtool_get_eeprom */
9183
9184 rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
9185
9186 return rc;
9187}
9188
9189static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
9190 u32 cmd_flags)
9191{
f1410647 9192 int count, i, rc;
a2fbb9ea
ET
9193
9194 /* build the command word */
9195 cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
9196
9197 /* need to clear DONE bit separately */
9198 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
9199
9200 /* write the data */
9201 REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
9202
9203 /* address of the NVRAM to write to */
9204 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
9205 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
9206
9207 /* issue the write command */
9208 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
9209
9210 /* adjust timeout for emulation/FPGA */
9211 count = NVRAM_TIMEOUT_COUNT;
9212 if (CHIP_REV_IS_SLOW(bp))
9213 count *= 100;
9214
9215 /* wait for completion */
9216 rc = -EBUSY;
9217 for (i = 0; i < count; i++) {
9218 udelay(5);
9219 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
9220 if (val & MCPR_NVM_COMMAND_DONE) {
9221 rc = 0;
9222 break;
9223 }
9224 }
9225
9226 return rc;
9227}
9228
f1410647 9229#define BYTE_OFFSET(offset) (8 * (offset & 0x03))
a2fbb9ea
ET
9230
9231static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
9232 int buf_size)
9233{
9234 int rc;
9235 u32 cmd_flags;
9236 u32 align_offset;
4781bfad 9237 __be32 val;
a2fbb9ea 9238
34f80b04
EG
9239 if (offset + buf_size > bp->common.flash_size) {
9240 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 9241 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 9242 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
9243 return -EINVAL;
9244 }
9245
9246 /* request access to nvram interface */
9247 rc = bnx2x_acquire_nvram_lock(bp);
9248 if (rc)
9249 return rc;
9250
9251 /* enable access to nvram interface */
9252 bnx2x_enable_nvram_access(bp);
9253
9254 cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
9255 align_offset = (offset & ~0x03);
9256 rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
9257
9258 if (rc == 0) {
9259 val &= ~(0xff << BYTE_OFFSET(offset));
9260 val |= (*data_buf << BYTE_OFFSET(offset));
9261
9262 /* nvram data is returned as an array of bytes
9263 * convert it back to cpu order */
9264 val = be32_to_cpu(val);
9265
a2fbb9ea
ET
9266 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
9267 cmd_flags);
9268 }
9269
9270 /* disable access to nvram interface */
9271 bnx2x_disable_nvram_access(bp);
9272 bnx2x_release_nvram_lock(bp);
9273
9274 return rc;
9275}
9276
9277static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
9278 int buf_size)
9279{
9280 int rc;
9281 u32 cmd_flags;
9282 u32 val;
9283 u32 written_so_far;
9284
34f80b04 9285 if (buf_size == 1) /* ethtool */
a2fbb9ea 9286 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
a2fbb9ea
ET
9287
9288 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
34f80b04 9289 DP(BNX2X_MSG_NVM,
c14423fe 9290 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
a2fbb9ea
ET
9291 offset, buf_size);
9292 return -EINVAL;
9293 }
9294
34f80b04
EG
9295 if (offset + buf_size > bp->common.flash_size) {
9296 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 9297 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 9298 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
9299 return -EINVAL;
9300 }
9301
9302 /* request access to nvram interface */
9303 rc = bnx2x_acquire_nvram_lock(bp);
9304 if (rc)
9305 return rc;
9306
9307 /* enable access to nvram interface */
9308 bnx2x_enable_nvram_access(bp);
9309
9310 written_so_far = 0;
9311 cmd_flags = MCPR_NVM_COMMAND_FIRST;
9312 while ((written_so_far < buf_size) && (rc == 0)) {
9313 if (written_so_far == (buf_size - sizeof(u32)))
9314 cmd_flags |= MCPR_NVM_COMMAND_LAST;
9315 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
9316 cmd_flags |= MCPR_NVM_COMMAND_LAST;
9317 else if ((offset % NVRAM_PAGE_SIZE) == 0)
9318 cmd_flags |= MCPR_NVM_COMMAND_FIRST;
9319
9320 memcpy(&val, data_buf, 4);
a2fbb9ea
ET
9321
9322 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
9323
9324 /* advance to the next dword */
9325 offset += sizeof(u32);
9326 data_buf += sizeof(u32);
9327 written_so_far += sizeof(u32);
9328 cmd_flags = 0;
9329 }
9330
9331 /* disable access to nvram interface */
9332 bnx2x_disable_nvram_access(bp);
9333 bnx2x_release_nvram_lock(bp);
9334
9335 return rc;
9336}
9337
9338static int bnx2x_set_eeprom(struct net_device *dev,
9339 struct ethtool_eeprom *eeprom, u8 *eebuf)
9340{
9341 struct bnx2x *bp = netdev_priv(dev);
9342 int rc;
9343
9f4c9583
EG
9344 if (!netif_running(dev))
9345 return -EAGAIN;
9346
34f80b04 9347 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
a2fbb9ea
ET
9348 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
9349 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
9350 eeprom->len, eeprom->len);
9351
9352 /* parameters already validated in ethtool_set_eeprom */
9353
c18487ee 9354 /* If the magic number is PHY (0x00504859) upgrade the PHY FW */
34f80b04
EG
9355 if (eeprom->magic == 0x00504859)
9356 if (bp->port.pmf) {
9357
4a37fb66 9358 bnx2x_acquire_phy_lock(bp);
34f80b04
EG
9359 rc = bnx2x_flash_download(bp, BP_PORT(bp),
9360 bp->link_params.ext_phy_config,
9361 (bp->state != BNX2X_STATE_CLOSED),
9362 eebuf, eeprom->len);
bb2a0f7a
YG
9363 if ((bp->state == BNX2X_STATE_OPEN) ||
9364 (bp->state == BNX2X_STATE_DISABLED)) {
34f80b04 9365 rc |= bnx2x_link_reset(&bp->link_params,
589abe3a 9366 &bp->link_vars, 1);
34f80b04
EG
9367 rc |= bnx2x_phy_init(&bp->link_params,
9368 &bp->link_vars);
bb2a0f7a 9369 }
4a37fb66 9370 bnx2x_release_phy_lock(bp);
34f80b04
EG
9371
9372 } else /* Only the PMF can access the PHY */
9373 return -EINVAL;
9374 else
c18487ee 9375 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
a2fbb9ea
ET
9376
9377 return rc;
9378}
9379
9380static int bnx2x_get_coalesce(struct net_device *dev,
9381 struct ethtool_coalesce *coal)
9382{
9383 struct bnx2x *bp = netdev_priv(dev);
9384
9385 memset(coal, 0, sizeof(struct ethtool_coalesce));
9386
9387 coal->rx_coalesce_usecs = bp->rx_ticks;
9388 coal->tx_coalesce_usecs = bp->tx_ticks;
a2fbb9ea
ET
9389
9390 return 0;
9391}
9392
ca00392c 9393#define BNX2X_MAX_COALES_TOUT (0xf0*12) /* Maximal coalescing timeout in us */
a2fbb9ea
ET
9394static int bnx2x_set_coalesce(struct net_device *dev,
9395 struct ethtool_coalesce *coal)
9396{
9397 struct bnx2x *bp = netdev_priv(dev);
9398
9399 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
ca00392c
EG
9400 if (bp->rx_ticks > BNX2X_MAX_COALES_TOUT)
9401 bp->rx_ticks = BNX2X_MAX_COALES_TOUT;
a2fbb9ea
ET
9402
9403 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
ca00392c
EG
9404 if (bp->tx_ticks > BNX2X_MAX_COALES_TOUT)
9405 bp->tx_ticks = BNX2X_MAX_COALES_TOUT;
a2fbb9ea 9406
34f80b04 9407 if (netif_running(dev))
a2fbb9ea
ET
9408 bnx2x_update_coalesce(bp);
9409
9410 return 0;
9411}
9412
9413static void bnx2x_get_ringparam(struct net_device *dev,
9414 struct ethtool_ringparam *ering)
9415{
9416 struct bnx2x *bp = netdev_priv(dev);
9417
9418 ering->rx_max_pending = MAX_RX_AVAIL;
9419 ering->rx_mini_max_pending = 0;
9420 ering->rx_jumbo_max_pending = 0;
9421
9422 ering->rx_pending = bp->rx_ring_size;
9423 ering->rx_mini_pending = 0;
9424 ering->rx_jumbo_pending = 0;
9425
9426 ering->tx_max_pending = MAX_TX_AVAIL;
9427 ering->tx_pending = bp->tx_ring_size;
9428}
9429
9430static int bnx2x_set_ringparam(struct net_device *dev,
9431 struct ethtool_ringparam *ering)
9432{
9433 struct bnx2x *bp = netdev_priv(dev);
34f80b04 9434 int rc = 0;
a2fbb9ea
ET
9435
9436 if ((ering->rx_pending > MAX_RX_AVAIL) ||
9437 (ering->tx_pending > MAX_TX_AVAIL) ||
9438 (ering->tx_pending <= MAX_SKB_FRAGS + 4))
9439 return -EINVAL;
9440
9441 bp->rx_ring_size = ering->rx_pending;
9442 bp->tx_ring_size = ering->tx_pending;
9443
34f80b04
EG
9444 if (netif_running(dev)) {
9445 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9446 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
a2fbb9ea
ET
9447 }
9448
34f80b04 9449 return rc;
a2fbb9ea
ET
9450}
9451
9452static void bnx2x_get_pauseparam(struct net_device *dev,
9453 struct ethtool_pauseparam *epause)
9454{
9455 struct bnx2x *bp = netdev_priv(dev);
9456
356e2385
EG
9457 epause->autoneg = (bp->link_params.req_flow_ctrl ==
9458 BNX2X_FLOW_CTRL_AUTO) &&
c18487ee
YR
9459 (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
9460
c0700f90
DM
9461 epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) ==
9462 BNX2X_FLOW_CTRL_RX);
9463 epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) ==
9464 BNX2X_FLOW_CTRL_TX);
a2fbb9ea
ET
9465
9466 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9467 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
9468 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9469}
9470
9471static int bnx2x_set_pauseparam(struct net_device *dev,
9472 struct ethtool_pauseparam *epause)
9473{
9474 struct bnx2x *bp = netdev_priv(dev);
9475
34f80b04
EG
9476 if (IS_E1HMF(bp))
9477 return 0;
9478
a2fbb9ea
ET
9479 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9480 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
9481 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9482
c0700f90 9483 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
a2fbb9ea 9484
f1410647 9485 if (epause->rx_pause)
c0700f90 9486 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_RX;
c18487ee 9487
f1410647 9488 if (epause->tx_pause)
c0700f90 9489 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_TX;
c18487ee 9490
c0700f90
DM
9491 if (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO)
9492 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
a2fbb9ea 9493
c18487ee 9494 if (epause->autoneg) {
34f80b04 9495 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
3196a88a 9496 DP(NETIF_MSG_LINK, "autoneg not supported\n");
c18487ee
YR
9497 return -EINVAL;
9498 }
a2fbb9ea 9499
c18487ee 9500 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
c0700f90 9501 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
c18487ee 9502 }
a2fbb9ea 9503
c18487ee
YR
9504 DP(NETIF_MSG_LINK,
9505 "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
34f80b04
EG
9506
9507 if (netif_running(dev)) {
bb2a0f7a 9508 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
9509 bnx2x_link_set(bp);
9510 }
a2fbb9ea
ET
9511
9512 return 0;
9513}
9514
df0f2343
VZ
9515static int bnx2x_set_flags(struct net_device *dev, u32 data)
9516{
9517 struct bnx2x *bp = netdev_priv(dev);
9518 int changed = 0;
9519 int rc = 0;
9520
9521 /* TPA requires Rx CSUM offloading */
9522 if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
9523 if (!(dev->features & NETIF_F_LRO)) {
9524 dev->features |= NETIF_F_LRO;
9525 bp->flags |= TPA_ENABLE_FLAG;
9526 changed = 1;
9527 }
9528
9529 } else if (dev->features & NETIF_F_LRO) {
9530 dev->features &= ~NETIF_F_LRO;
9531 bp->flags &= ~TPA_ENABLE_FLAG;
9532 changed = 1;
9533 }
9534
9535 if (changed && netif_running(dev)) {
9536 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9537 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
9538 }
9539
9540 return rc;
9541}
9542
a2fbb9ea
ET
9543static u32 bnx2x_get_rx_csum(struct net_device *dev)
9544{
9545 struct bnx2x *bp = netdev_priv(dev);
9546
9547 return bp->rx_csum;
9548}
9549
9550static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
9551{
9552 struct bnx2x *bp = netdev_priv(dev);
df0f2343 9553 int rc = 0;
a2fbb9ea
ET
9554
9555 bp->rx_csum = data;
df0f2343
VZ
9556
9557 /* Disable TPA, when Rx CSUM is disabled. Otherwise all
9558 TPA'ed packets will be discarded due to wrong TCP CSUM */
9559 if (!data) {
9560 u32 flags = ethtool_op_get_flags(dev);
9561
9562 rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
9563 }
9564
9565 return rc;
a2fbb9ea
ET
9566}
9567
9568static int bnx2x_set_tso(struct net_device *dev, u32 data)
9569{
755735eb 9570 if (data) {
a2fbb9ea 9571 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb
EG
9572 dev->features |= NETIF_F_TSO6;
9573 } else {
a2fbb9ea 9574 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb
EG
9575 dev->features &= ~NETIF_F_TSO6;
9576 }
9577
a2fbb9ea
ET
9578 return 0;
9579}
9580
f3c87cdd 9581static const struct {
a2fbb9ea
ET
9582 char string[ETH_GSTRING_LEN];
9583} bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
f3c87cdd
YG
9584 { "register_test (offline)" },
9585 { "memory_test (offline)" },
9586 { "loopback_test (offline)" },
9587 { "nvram_test (online)" },
9588 { "interrupt_test (online)" },
9589 { "link_test (online)" },
d3d4f495 9590 { "idle check (online)" }
a2fbb9ea
ET
9591};
9592
9593static int bnx2x_self_test_count(struct net_device *dev)
9594{
9595 return BNX2X_NUM_TESTS;
9596}
9597
f3c87cdd
YG
9598static int bnx2x_test_registers(struct bnx2x *bp)
9599{
9600 int idx, i, rc = -ENODEV;
9601 u32 wr_val = 0;
9dabc424 9602 int port = BP_PORT(bp);
f3c87cdd
YG
9603 static const struct {
9604 u32 offset0;
9605 u32 offset1;
9606 u32 mask;
9607 } reg_tbl[] = {
9608/* 0 */ { BRB1_REG_PAUSE_LOW_THRESHOLD_0, 4, 0x000003ff },
9609 { DORQ_REG_DB_ADDR0, 4, 0xffffffff },
9610 { HC_REG_AGG_INT_0, 4, 0x000003ff },
9611 { PBF_REG_MAC_IF0_ENABLE, 4, 0x00000001 },
9612 { PBF_REG_P0_INIT_CRD, 4, 0x000007ff },
9613 { PRS_REG_CID_PORT_0, 4, 0x00ffffff },
9614 { PXP2_REG_PSWRQ_CDU0_L2P, 4, 0x000fffff },
9615 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
9616 { PXP2_REG_PSWRQ_TM0_L2P, 4, 0x000fffff },
9617 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
9618/* 10 */ { PXP2_REG_PSWRQ_TSDM0_L2P, 4, 0x000fffff },
9619 { QM_REG_CONNNUM_0, 4, 0x000fffff },
9620 { TM_REG_LIN0_MAX_ACTIVE_CID, 4, 0x0003ffff },
9621 { SRC_REG_KEYRSS0_0, 40, 0xffffffff },
9622 { SRC_REG_KEYRSS0_7, 40, 0xffffffff },
9623 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
9624 { XCM_REG_WU_DA_CNT_CMD00, 4, 0x00000003 },
9625 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 4, 0x000000ff },
f3c87cdd 9626 { NIG_REG_LLH0_T_BIT, 4, 0x00000001 },
c1f1a06f
EG
9627 { NIG_REG_EMAC0_IN_EN, 4, 0x00000001 },
9628/* 20 */ { NIG_REG_BMAC0_IN_EN, 4, 0x00000001 },
f3c87cdd
YG
9629 { NIG_REG_XCM0_OUT_EN, 4, 0x00000001 },
9630 { NIG_REG_BRB0_OUT_EN, 4, 0x00000001 },
9631 { NIG_REG_LLH0_XCM_MASK, 4, 0x00000007 },
9632 { NIG_REG_LLH0_ACPI_PAT_6_LEN, 68, 0x000000ff },
9633 { NIG_REG_LLH0_ACPI_PAT_0_CRC, 68, 0xffffffff },
9634 { NIG_REG_LLH0_DEST_MAC_0_0, 160, 0xffffffff },
9635 { NIG_REG_LLH0_DEST_IP_0_1, 160, 0xffffffff },
9636 { NIG_REG_LLH0_IPV4_IPV6_0, 160, 0x00000001 },
c1f1a06f
EG
9637 { NIG_REG_LLH0_DEST_UDP_0, 160, 0x0000ffff },
9638/* 30 */ { NIG_REG_LLH0_DEST_TCP_0, 160, 0x0000ffff },
f3c87cdd
YG
9639 { NIG_REG_LLH0_VLAN_ID_0, 160, 0x00000fff },
9640 { NIG_REG_XGXS_SERDES0_MODE_SEL, 4, 0x00000001 },
9641 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
9642 { NIG_REG_STATUS_INTERRUPT_PORT0, 4, 0x07ffffff },
9643 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
9644 { NIG_REG_SERDES0_CTRL_PHY_ADDR, 16, 0x0000001f },
9645
9646 { 0xffffffff, 0, 0x00000000 }
9647 };
9648
9649 if (!netif_running(bp->dev))
9650 return rc;
9651
9652 /* Repeat the test twice:
9653 First by writing 0x00000000, second by writing 0xffffffff */
9654 for (idx = 0; idx < 2; idx++) {
9655
9656 switch (idx) {
9657 case 0:
9658 wr_val = 0;
9659 break;
9660 case 1:
9661 wr_val = 0xffffffff;
9662 break;
9663 }
9664
9665 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
9666 u32 offset, mask, save_val, val;
f3c87cdd
YG
9667
9668 offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
9669 mask = reg_tbl[i].mask;
9670
9671 save_val = REG_RD(bp, offset);
9672
9673 REG_WR(bp, offset, wr_val);
9674 val = REG_RD(bp, offset);
9675
9676 /* Restore the original register's value */
9677 REG_WR(bp, offset, save_val);
9678
9679 /* verify that value is as expected value */
9680 if ((val & mask) != (wr_val & mask))
9681 goto test_reg_exit;
9682 }
9683 }
9684
9685 rc = 0;
9686
9687test_reg_exit:
9688 return rc;
9689}
9690
9691static int bnx2x_test_memory(struct bnx2x *bp)
9692{
9693 int i, j, rc = -ENODEV;
9694 u32 val;
9695 static const struct {
9696 u32 offset;
9697 int size;
9698 } mem_tbl[] = {
9699 { CCM_REG_XX_DESCR_TABLE, CCM_REG_XX_DESCR_TABLE_SIZE },
9700 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
9701 { CFC_REG_LINK_LIST, CFC_REG_LINK_LIST_SIZE },
9702 { DMAE_REG_CMD_MEM, DMAE_REG_CMD_MEM_SIZE },
9703 { TCM_REG_XX_DESCR_TABLE, TCM_REG_XX_DESCR_TABLE_SIZE },
9704 { UCM_REG_XX_DESCR_TABLE, UCM_REG_XX_DESCR_TABLE_SIZE },
9705 { XCM_REG_XX_DESCR_TABLE, XCM_REG_XX_DESCR_TABLE_SIZE },
9706
9707 { 0xffffffff, 0 }
9708 };
9709 static const struct {
9710 char *name;
9711 u32 offset;
9dabc424
YG
9712 u32 e1_mask;
9713 u32 e1h_mask;
f3c87cdd 9714 } prty_tbl[] = {
9dabc424
YG
9715 { "CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, 0x3ffc0, 0 },
9716 { "CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, 0x2, 0x2 },
9717 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0, 0 },
9718 { "TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, 0x3ffc0, 0 },
9719 { "UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, 0x3ffc0, 0 },
9720 { "XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, 0x3ffc1, 0 },
9721
9722 { NULL, 0xffffffff, 0, 0 }
f3c87cdd
YG
9723 };
9724
9725 if (!netif_running(bp->dev))
9726 return rc;
9727
9728 /* Go through all the memories */
9729 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
9730 for (j = 0; j < mem_tbl[i].size; j++)
9731 REG_RD(bp, mem_tbl[i].offset + j*4);
9732
9733 /* Check the parity status */
9734 for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
9735 val = REG_RD(bp, prty_tbl[i].offset);
9dabc424
YG
9736 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
9737 (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
f3c87cdd
YG
9738 DP(NETIF_MSG_HW,
9739 "%s is 0x%x\n", prty_tbl[i].name, val);
9740 goto test_mem_exit;
9741 }
9742 }
9743
9744 rc = 0;
9745
9746test_mem_exit:
9747 return rc;
9748}
9749
f3c87cdd
YG
9750static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
9751{
9752 int cnt = 1000;
9753
9754 if (link_up)
9755 while (bnx2x_link_test(bp) && cnt--)
9756 msleep(10);
9757}
9758
9759static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
9760{
9761 unsigned int pkt_size, num_pkts, i;
9762 struct sk_buff *skb;
9763 unsigned char *packet;
ca00392c
EG
9764 struct bnx2x_fastpath *fp_rx = &bp->fp[0];
9765 struct bnx2x_fastpath *fp_tx = &bp->fp[bp->num_rx_queues];
f3c87cdd
YG
9766 u16 tx_start_idx, tx_idx;
9767 u16 rx_start_idx, rx_idx;
ca00392c 9768 u16 pkt_prod, bd_prod;
f3c87cdd 9769 struct sw_tx_bd *tx_buf;
ca00392c
EG
9770 struct eth_tx_start_bd *tx_start_bd;
9771 struct eth_tx_parse_bd *pbd = NULL;
f3c87cdd
YG
9772 dma_addr_t mapping;
9773 union eth_rx_cqe *cqe;
9774 u8 cqe_fp_flags;
9775 struct sw_rx_bd *rx_buf;
9776 u16 len;
9777 int rc = -ENODEV;
9778
b5bf9068
EG
9779 /* check the loopback mode */
9780 switch (loopback_mode) {
9781 case BNX2X_PHY_LOOPBACK:
9782 if (bp->link_params.loopback_mode != LOOPBACK_XGXS_10)
9783 return -EINVAL;
9784 break;
9785 case BNX2X_MAC_LOOPBACK:
f3c87cdd 9786 bp->link_params.loopback_mode = LOOPBACK_BMAC;
f3c87cdd 9787 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
b5bf9068
EG
9788 break;
9789 default:
f3c87cdd 9790 return -EINVAL;
b5bf9068 9791 }
f3c87cdd 9792
b5bf9068
EG
9793 /* prepare the loopback packet */
9794 pkt_size = (((bp->dev->mtu < ETH_MAX_PACKET_SIZE) ?
9795 bp->dev->mtu : ETH_MAX_PACKET_SIZE) + ETH_HLEN);
f3c87cdd
YG
9796 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
9797 if (!skb) {
9798 rc = -ENOMEM;
9799 goto test_loopback_exit;
9800 }
9801 packet = skb_put(skb, pkt_size);
9802 memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
ca00392c
EG
9803 memset(packet + ETH_ALEN, 0, ETH_ALEN);
9804 memset(packet + 2*ETH_ALEN, 0x77, (ETH_HLEN - 2*ETH_ALEN));
f3c87cdd
YG
9805 for (i = ETH_HLEN; i < pkt_size; i++)
9806 packet[i] = (unsigned char) (i & 0xff);
9807
b5bf9068 9808 /* send the loopback packet */
f3c87cdd 9809 num_pkts = 0;
ca00392c
EG
9810 tx_start_idx = le16_to_cpu(*fp_tx->tx_cons_sb);
9811 rx_start_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
f3c87cdd 9812
ca00392c
EG
9813 pkt_prod = fp_tx->tx_pkt_prod++;
9814 tx_buf = &fp_tx->tx_buf_ring[TX_BD(pkt_prod)];
9815 tx_buf->first_bd = fp_tx->tx_bd_prod;
f3c87cdd 9816 tx_buf->skb = skb;
ca00392c 9817 tx_buf->flags = 0;
f3c87cdd 9818
ca00392c
EG
9819 bd_prod = TX_BD(fp_tx->tx_bd_prod);
9820 tx_start_bd = &fp_tx->tx_desc_ring[bd_prod].start_bd;
f3c87cdd
YG
9821 mapping = pci_map_single(bp->pdev, skb->data,
9822 skb_headlen(skb), PCI_DMA_TODEVICE);
ca00392c
EG
9823 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9824 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9825 tx_start_bd->nbd = cpu_to_le16(2); /* start + pbd */
9826 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
9827 tx_start_bd->vlan = cpu_to_le16(pkt_prod);
9828 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
9829 tx_start_bd->general_data = ((UNICAST_ADDRESS <<
9830 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT) | 1);
9831
9832 /* turn on parsing and get a BD */
9833 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9834 pbd = &fp_tx->tx_desc_ring[bd_prod].parse_bd;
9835
9836 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
f3c87cdd 9837
58f4c4cf
EG
9838 wmb();
9839
ca00392c
EG
9840 fp_tx->tx_db.data.prod += 2;
9841 barrier();
9842 DOORBELL(bp, fp_tx->index - bp->num_rx_queues, fp_tx->tx_db.raw);
f3c87cdd
YG
9843
9844 mmiowb();
9845
9846 num_pkts++;
ca00392c 9847 fp_tx->tx_bd_prod += 2; /* start + pbd */
f3c87cdd
YG
9848 bp->dev->trans_start = jiffies;
9849
9850 udelay(100);
9851
ca00392c 9852 tx_idx = le16_to_cpu(*fp_tx->tx_cons_sb);
f3c87cdd
YG
9853 if (tx_idx != tx_start_idx + num_pkts)
9854 goto test_loopback_exit;
9855
ca00392c 9856 rx_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
f3c87cdd
YG
9857 if (rx_idx != rx_start_idx + num_pkts)
9858 goto test_loopback_exit;
9859
ca00392c 9860 cqe = &fp_rx->rx_comp_ring[RCQ_BD(fp_rx->rx_comp_cons)];
f3c87cdd
YG
9861 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
9862 if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
9863 goto test_loopback_rx_exit;
9864
9865 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
9866 if (len != pkt_size)
9867 goto test_loopback_rx_exit;
9868
ca00392c 9869 rx_buf = &fp_rx->rx_buf_ring[RX_BD(fp_rx->rx_bd_cons)];
f3c87cdd
YG
9870 skb = rx_buf->skb;
9871 skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
9872 for (i = ETH_HLEN; i < pkt_size; i++)
9873 if (*(skb->data + i) != (unsigned char) (i & 0xff))
9874 goto test_loopback_rx_exit;
9875
9876 rc = 0;
9877
9878test_loopback_rx_exit:
f3c87cdd 9879
ca00392c
EG
9880 fp_rx->rx_bd_cons = NEXT_RX_IDX(fp_rx->rx_bd_cons);
9881 fp_rx->rx_bd_prod = NEXT_RX_IDX(fp_rx->rx_bd_prod);
9882 fp_rx->rx_comp_cons = NEXT_RCQ_IDX(fp_rx->rx_comp_cons);
9883 fp_rx->rx_comp_prod = NEXT_RCQ_IDX(fp_rx->rx_comp_prod);
f3c87cdd
YG
9884
9885 /* Update producers */
ca00392c
EG
9886 bnx2x_update_rx_prod(bp, fp_rx, fp_rx->rx_bd_prod, fp_rx->rx_comp_prod,
9887 fp_rx->rx_sge_prod);
f3c87cdd
YG
9888
9889test_loopback_exit:
9890 bp->link_params.loopback_mode = LOOPBACK_NONE;
9891
9892 return rc;
9893}
9894
9895static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
9896{
b5bf9068 9897 int rc = 0, res;
f3c87cdd
YG
9898
9899 if (!netif_running(bp->dev))
9900 return BNX2X_LOOPBACK_FAILED;
9901
f8ef6e44 9902 bnx2x_netif_stop(bp, 1);
3910c8ae 9903 bnx2x_acquire_phy_lock(bp);
f3c87cdd 9904
b5bf9068
EG
9905 res = bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up);
9906 if (res) {
9907 DP(NETIF_MSG_PROBE, " PHY loopback failed (res %d)\n", res);
9908 rc |= BNX2X_PHY_LOOPBACK_FAILED;
f3c87cdd
YG
9909 }
9910
b5bf9068
EG
9911 res = bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up);
9912 if (res) {
9913 DP(NETIF_MSG_PROBE, " MAC loopback failed (res %d)\n", res);
9914 rc |= BNX2X_MAC_LOOPBACK_FAILED;
f3c87cdd
YG
9915 }
9916
3910c8ae 9917 bnx2x_release_phy_lock(bp);
f3c87cdd
YG
9918 bnx2x_netif_start(bp);
9919
9920 return rc;
9921}
9922
9923#define CRC32_RESIDUAL 0xdebb20e3
9924
9925static int bnx2x_test_nvram(struct bnx2x *bp)
9926{
9927 static const struct {
9928 int offset;
9929 int size;
9930 } nvram_tbl[] = {
9931 { 0, 0x14 }, /* bootstrap */
9932 { 0x14, 0xec }, /* dir */
9933 { 0x100, 0x350 }, /* manuf_info */
9934 { 0x450, 0xf0 }, /* feature_info */
9935 { 0x640, 0x64 }, /* upgrade_key_info */
9936 { 0x6a4, 0x64 },
9937 { 0x708, 0x70 }, /* manuf_key_info */
9938 { 0x778, 0x70 },
9939 { 0, 0 }
9940 };
4781bfad 9941 __be32 buf[0x350 / 4];
f3c87cdd
YG
9942 u8 *data = (u8 *)buf;
9943 int i, rc;
9944 u32 magic, csum;
9945
9946 rc = bnx2x_nvram_read(bp, 0, data, 4);
9947 if (rc) {
f5372251 9948 DP(NETIF_MSG_PROBE, "magic value read (rc %d)\n", rc);
f3c87cdd
YG
9949 goto test_nvram_exit;
9950 }
9951
9952 magic = be32_to_cpu(buf[0]);
9953 if (magic != 0x669955aa) {
9954 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
9955 rc = -ENODEV;
9956 goto test_nvram_exit;
9957 }
9958
9959 for (i = 0; nvram_tbl[i].size; i++) {
9960
9961 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
9962 nvram_tbl[i].size);
9963 if (rc) {
9964 DP(NETIF_MSG_PROBE,
f5372251 9965 "nvram_tbl[%d] read data (rc %d)\n", i, rc);
f3c87cdd
YG
9966 goto test_nvram_exit;
9967 }
9968
9969 csum = ether_crc_le(nvram_tbl[i].size, data);
9970 if (csum != CRC32_RESIDUAL) {
9971 DP(NETIF_MSG_PROBE,
9972 "nvram_tbl[%d] csum value (0x%08x)\n", i, csum);
9973 rc = -ENODEV;
9974 goto test_nvram_exit;
9975 }
9976 }
9977
9978test_nvram_exit:
9979 return rc;
9980}
9981
9982static int bnx2x_test_intr(struct bnx2x *bp)
9983{
9984 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
9985 int i, rc;
9986
9987 if (!netif_running(bp->dev))
9988 return -ENODEV;
9989
8d9c5f34 9990 config->hdr.length = 0;
af246401
EG
9991 if (CHIP_IS_E1(bp))
9992 config->hdr.offset = (BP_PORT(bp) ? 32 : 0);
9993 else
9994 config->hdr.offset = BP_FUNC(bp);
0626b899 9995 config->hdr.client_id = bp->fp->cl_id;
f3c87cdd
YG
9996 config->hdr.reserved1 = 0;
9997
9998 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
9999 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
10000 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
10001 if (rc == 0) {
10002 bp->set_mac_pending++;
10003 for (i = 0; i < 10; i++) {
10004 if (!bp->set_mac_pending)
10005 break;
10006 msleep_interruptible(10);
10007 }
10008 if (i == 10)
10009 rc = -ENODEV;
10010 }
10011
10012 return rc;
10013}
10014
a2fbb9ea
ET
10015static void bnx2x_self_test(struct net_device *dev,
10016 struct ethtool_test *etest, u64 *buf)
10017{
10018 struct bnx2x *bp = netdev_priv(dev);
a2fbb9ea
ET
10019
10020 memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
10021
f3c87cdd 10022 if (!netif_running(dev))
a2fbb9ea 10023 return;
a2fbb9ea 10024
33471629 10025 /* offline tests are not supported in MF mode */
f3c87cdd
YG
10026 if (IS_E1HMF(bp))
10027 etest->flags &= ~ETH_TEST_FL_OFFLINE;
10028
10029 if (etest->flags & ETH_TEST_FL_OFFLINE) {
279abdf5
EG
10030 int port = BP_PORT(bp);
10031 u32 val;
f3c87cdd
YG
10032 u8 link_up;
10033
279abdf5
EG
10034 /* save current value of input enable for TX port IF */
10035 val = REG_RD(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4);
10036 /* disable input for TX port IF */
10037 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, 0);
10038
f3c87cdd
YG
10039 link_up = bp->link_vars.link_up;
10040 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10041 bnx2x_nic_load(bp, LOAD_DIAG);
10042 /* wait until link state is restored */
10043 bnx2x_wait_for_link(bp, link_up);
10044
10045 if (bnx2x_test_registers(bp) != 0) {
10046 buf[0] = 1;
10047 etest->flags |= ETH_TEST_FL_FAILED;
10048 }
10049 if (bnx2x_test_memory(bp) != 0) {
10050 buf[1] = 1;
10051 etest->flags |= ETH_TEST_FL_FAILED;
10052 }
10053 buf[2] = bnx2x_test_loopback(bp, link_up);
10054 if (buf[2] != 0)
10055 etest->flags |= ETH_TEST_FL_FAILED;
a2fbb9ea 10056
f3c87cdd 10057 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
279abdf5
EG
10058
10059 /* restore input for TX port IF */
10060 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, val);
10061
f3c87cdd
YG
10062 bnx2x_nic_load(bp, LOAD_NORMAL);
10063 /* wait until link state is restored */
10064 bnx2x_wait_for_link(bp, link_up);
10065 }
10066 if (bnx2x_test_nvram(bp) != 0) {
10067 buf[3] = 1;
a2fbb9ea
ET
10068 etest->flags |= ETH_TEST_FL_FAILED;
10069 }
f3c87cdd
YG
10070 if (bnx2x_test_intr(bp) != 0) {
10071 buf[4] = 1;
10072 etest->flags |= ETH_TEST_FL_FAILED;
10073 }
10074 if (bp->port.pmf)
10075 if (bnx2x_link_test(bp) != 0) {
10076 buf[5] = 1;
10077 etest->flags |= ETH_TEST_FL_FAILED;
10078 }
f3c87cdd
YG
10079
10080#ifdef BNX2X_EXTRA_DEBUG
10081 bnx2x_panic_dump(bp);
10082#endif
a2fbb9ea
ET
10083}
10084
de832a55
EG
10085static const struct {
10086 long offset;
10087 int size;
10088 u8 string[ETH_GSTRING_LEN];
10089} bnx2x_q_stats_arr[BNX2X_NUM_Q_STATS] = {
10090/* 1 */ { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%d]: rx_bytes" },
10091 { Q_STATS_OFFSET32(error_bytes_received_hi),
10092 8, "[%d]: rx_error_bytes" },
10093 { Q_STATS_OFFSET32(total_unicast_packets_received_hi),
10094 8, "[%d]: rx_ucast_packets" },
10095 { Q_STATS_OFFSET32(total_multicast_packets_received_hi),
10096 8, "[%d]: rx_mcast_packets" },
10097 { Q_STATS_OFFSET32(total_broadcast_packets_received_hi),
10098 8, "[%d]: rx_bcast_packets" },
10099 { Q_STATS_OFFSET32(no_buff_discard_hi), 8, "[%d]: rx_discards" },
10100 { Q_STATS_OFFSET32(rx_err_discard_pkt),
10101 4, "[%d]: rx_phy_ip_err_discards"},
10102 { Q_STATS_OFFSET32(rx_skb_alloc_failed),
10103 4, "[%d]: rx_skb_alloc_discard" },
10104 { Q_STATS_OFFSET32(hw_csum_err), 4, "[%d]: rx_csum_offload_errors" },
10105
10106/* 10 */{ Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%d]: tx_bytes" },
10107 { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
10108 8, "[%d]: tx_packets" }
10109};
10110
bb2a0f7a
YG
10111static const struct {
10112 long offset;
10113 int size;
10114 u32 flags;
66e855f3
YG
10115#define STATS_FLAGS_PORT 1
10116#define STATS_FLAGS_FUNC 2
de832a55 10117#define STATS_FLAGS_BOTH (STATS_FLAGS_FUNC | STATS_FLAGS_PORT)
66e855f3 10118 u8 string[ETH_GSTRING_LEN];
bb2a0f7a 10119} bnx2x_stats_arr[BNX2X_NUM_STATS] = {
de832a55
EG
10120/* 1 */ { STATS_OFFSET32(total_bytes_received_hi),
10121 8, STATS_FLAGS_BOTH, "rx_bytes" },
66e855f3 10122 { STATS_OFFSET32(error_bytes_received_hi),
de832a55 10123 8, STATS_FLAGS_BOTH, "rx_error_bytes" },
bb2a0f7a 10124 { STATS_OFFSET32(total_unicast_packets_received_hi),
de832a55 10125 8, STATS_FLAGS_BOTH, "rx_ucast_packets" },
bb2a0f7a 10126 { STATS_OFFSET32(total_multicast_packets_received_hi),
de832a55 10127 8, STATS_FLAGS_BOTH, "rx_mcast_packets" },
bb2a0f7a 10128 { STATS_OFFSET32(total_broadcast_packets_received_hi),
de832a55 10129 8, STATS_FLAGS_BOTH, "rx_bcast_packets" },
bb2a0f7a 10130 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
66e855f3 10131 8, STATS_FLAGS_PORT, "rx_crc_errors" },
bb2a0f7a 10132 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
66e855f3 10133 8, STATS_FLAGS_PORT, "rx_align_errors" },
de832a55
EG
10134 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
10135 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
10136 { STATS_OFFSET32(etherstatsoverrsizepkts_hi),
10137 8, STATS_FLAGS_PORT, "rx_oversize_packets" },
10138/* 10 */{ STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
10139 8, STATS_FLAGS_PORT, "rx_fragments" },
10140 { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
10141 8, STATS_FLAGS_PORT, "rx_jabbers" },
10142 { STATS_OFFSET32(no_buff_discard_hi),
10143 8, STATS_FLAGS_BOTH, "rx_discards" },
10144 { STATS_OFFSET32(mac_filter_discard),
10145 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
10146 { STATS_OFFSET32(xxoverflow_discard),
10147 4, STATS_FLAGS_PORT, "rx_fw_discards" },
10148 { STATS_OFFSET32(brb_drop_hi),
10149 8, STATS_FLAGS_PORT, "rx_brb_discard" },
10150 { STATS_OFFSET32(brb_truncate_hi),
10151 8, STATS_FLAGS_PORT, "rx_brb_truncate" },
10152 { STATS_OFFSET32(pause_frames_received_hi),
10153 8, STATS_FLAGS_PORT, "rx_pause_frames" },
10154 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
10155 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
10156 { STATS_OFFSET32(nig_timer_max),
10157 4, STATS_FLAGS_PORT, "rx_constant_pause_events" },
10158/* 20 */{ STATS_OFFSET32(rx_err_discard_pkt),
10159 4, STATS_FLAGS_BOTH, "rx_phy_ip_err_discards"},
10160 { STATS_OFFSET32(rx_skb_alloc_failed),
10161 4, STATS_FLAGS_BOTH, "rx_skb_alloc_discard" },
10162 { STATS_OFFSET32(hw_csum_err),
10163 4, STATS_FLAGS_BOTH, "rx_csum_offload_errors" },
10164
10165 { STATS_OFFSET32(total_bytes_transmitted_hi),
10166 8, STATS_FLAGS_BOTH, "tx_bytes" },
10167 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
10168 8, STATS_FLAGS_PORT, "tx_error_bytes" },
10169 { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
10170 8, STATS_FLAGS_BOTH, "tx_packets" },
10171 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
10172 8, STATS_FLAGS_PORT, "tx_mac_errors" },
10173 { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
10174 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
bb2a0f7a 10175 { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
66e855f3 10176 8, STATS_FLAGS_PORT, "tx_single_collisions" },
bb2a0f7a 10177 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
66e855f3 10178 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
de832a55 10179/* 30 */{ STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
66e855f3 10180 8, STATS_FLAGS_PORT, "tx_deferred" },
bb2a0f7a 10181 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
66e855f3 10182 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
bb2a0f7a 10183 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
66e855f3 10184 8, STATS_FLAGS_PORT, "tx_late_collisions" },
bb2a0f7a 10185 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
66e855f3 10186 8, STATS_FLAGS_PORT, "tx_total_collisions" },
bb2a0f7a 10187 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
66e855f3 10188 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
bb2a0f7a 10189 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
66e855f3 10190 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
bb2a0f7a 10191 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
66e855f3 10192 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
bb2a0f7a 10193 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
66e855f3 10194 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
bb2a0f7a 10195 { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
66e855f3 10196 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
bb2a0f7a 10197 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
66e855f3 10198 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
de832a55 10199/* 40 */{ STATS_OFFSET32(etherstatspktsover1522octets_hi),
66e855f3 10200 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
de832a55
EG
10201 { STATS_OFFSET32(pause_frames_sent_hi),
10202 8, STATS_FLAGS_PORT, "tx_pause_frames" }
a2fbb9ea
ET
10203};
10204
de832a55
EG
10205#define IS_PORT_STAT(i) \
10206 ((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT)
10207#define IS_FUNC_STAT(i) (bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC)
10208#define IS_E1HMF_MODE_STAT(bp) \
10209 (IS_E1HMF(bp) && !(bp->msglevel & BNX2X_MSG_STATS))
66e855f3 10210
a2fbb9ea
ET
10211static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
10212{
bb2a0f7a 10213 struct bnx2x *bp = netdev_priv(dev);
de832a55 10214 int i, j, k;
bb2a0f7a 10215
a2fbb9ea
ET
10216 switch (stringset) {
10217 case ETH_SS_STATS:
de832a55
EG
10218 if (is_multi(bp)) {
10219 k = 0;
ca00392c 10220 for_each_rx_queue(bp, i) {
de832a55
EG
10221 for (j = 0; j < BNX2X_NUM_Q_STATS; j++)
10222 sprintf(buf + (k + j)*ETH_GSTRING_LEN,
10223 bnx2x_q_stats_arr[j].string, i);
10224 k += BNX2X_NUM_Q_STATS;
10225 }
10226 if (IS_E1HMF_MODE_STAT(bp))
10227 break;
10228 for (j = 0; j < BNX2X_NUM_STATS; j++)
10229 strcpy(buf + (k + j)*ETH_GSTRING_LEN,
10230 bnx2x_stats_arr[j].string);
10231 } else {
10232 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
10233 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
10234 continue;
10235 strcpy(buf + j*ETH_GSTRING_LEN,
10236 bnx2x_stats_arr[i].string);
10237 j++;
10238 }
bb2a0f7a 10239 }
a2fbb9ea
ET
10240 break;
10241
10242 case ETH_SS_TEST:
10243 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
10244 break;
10245 }
10246}
10247
10248static int bnx2x_get_stats_count(struct net_device *dev)
10249{
bb2a0f7a 10250 struct bnx2x *bp = netdev_priv(dev);
de832a55 10251 int i, num_stats;
bb2a0f7a 10252
de832a55 10253 if (is_multi(bp)) {
ca00392c 10254 num_stats = BNX2X_NUM_Q_STATS * bp->num_rx_queues;
de832a55
EG
10255 if (!IS_E1HMF_MODE_STAT(bp))
10256 num_stats += BNX2X_NUM_STATS;
10257 } else {
10258 if (IS_E1HMF_MODE_STAT(bp)) {
10259 num_stats = 0;
10260 for (i = 0; i < BNX2X_NUM_STATS; i++)
10261 if (IS_FUNC_STAT(i))
10262 num_stats++;
10263 } else
10264 num_stats = BNX2X_NUM_STATS;
bb2a0f7a 10265 }
de832a55 10266
bb2a0f7a 10267 return num_stats;
a2fbb9ea
ET
10268}
10269
10270static void bnx2x_get_ethtool_stats(struct net_device *dev,
10271 struct ethtool_stats *stats, u64 *buf)
10272{
10273 struct bnx2x *bp = netdev_priv(dev);
de832a55
EG
10274 u32 *hw_stats, *offset;
10275 int i, j, k;
bb2a0f7a 10276
de832a55
EG
10277 if (is_multi(bp)) {
10278 k = 0;
ca00392c 10279 for_each_rx_queue(bp, i) {
de832a55
EG
10280 hw_stats = (u32 *)&bp->fp[i].eth_q_stats;
10281 for (j = 0; j < BNX2X_NUM_Q_STATS; j++) {
10282 if (bnx2x_q_stats_arr[j].size == 0) {
10283 /* skip this counter */
10284 buf[k + j] = 0;
10285 continue;
10286 }
10287 offset = (hw_stats +
10288 bnx2x_q_stats_arr[j].offset);
10289 if (bnx2x_q_stats_arr[j].size == 4) {
10290 /* 4-byte counter */
10291 buf[k + j] = (u64) *offset;
10292 continue;
10293 }
10294 /* 8-byte counter */
10295 buf[k + j] = HILO_U64(*offset, *(offset + 1));
10296 }
10297 k += BNX2X_NUM_Q_STATS;
10298 }
10299 if (IS_E1HMF_MODE_STAT(bp))
10300 return;
10301 hw_stats = (u32 *)&bp->eth_stats;
10302 for (j = 0; j < BNX2X_NUM_STATS; j++) {
10303 if (bnx2x_stats_arr[j].size == 0) {
10304 /* skip this counter */
10305 buf[k + j] = 0;
10306 continue;
10307 }
10308 offset = (hw_stats + bnx2x_stats_arr[j].offset);
10309 if (bnx2x_stats_arr[j].size == 4) {
10310 /* 4-byte counter */
10311 buf[k + j] = (u64) *offset;
10312 continue;
10313 }
10314 /* 8-byte counter */
10315 buf[k + j] = HILO_U64(*offset, *(offset + 1));
a2fbb9ea 10316 }
de832a55
EG
10317 } else {
10318 hw_stats = (u32 *)&bp->eth_stats;
10319 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
10320 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
10321 continue;
10322 if (bnx2x_stats_arr[i].size == 0) {
10323 /* skip this counter */
10324 buf[j] = 0;
10325 j++;
10326 continue;
10327 }
10328 offset = (hw_stats + bnx2x_stats_arr[i].offset);
10329 if (bnx2x_stats_arr[i].size == 4) {
10330 /* 4-byte counter */
10331 buf[j] = (u64) *offset;
10332 j++;
10333 continue;
10334 }
10335 /* 8-byte counter */
10336 buf[j] = HILO_U64(*offset, *(offset + 1));
bb2a0f7a 10337 j++;
a2fbb9ea 10338 }
a2fbb9ea
ET
10339 }
10340}
10341
10342static int bnx2x_phys_id(struct net_device *dev, u32 data)
10343{
10344 struct bnx2x *bp = netdev_priv(dev);
34f80b04 10345 int port = BP_PORT(bp);
a2fbb9ea
ET
10346 int i;
10347
34f80b04
EG
10348 if (!netif_running(dev))
10349 return 0;
10350
10351 if (!bp->port.pmf)
10352 return 0;
10353
a2fbb9ea
ET
10354 if (data == 0)
10355 data = 2;
10356
10357 for (i = 0; i < (data * 2); i++) {
c18487ee 10358 if ((i % 2) == 0)
34f80b04 10359 bnx2x_set_led(bp, port, LED_MODE_OPER, SPEED_1000,
c18487ee
YR
10360 bp->link_params.hw_led_mode,
10361 bp->link_params.chip_id);
10362 else
34f80b04 10363 bnx2x_set_led(bp, port, LED_MODE_OFF, 0,
c18487ee
YR
10364 bp->link_params.hw_led_mode,
10365 bp->link_params.chip_id);
10366
a2fbb9ea
ET
10367 msleep_interruptible(500);
10368 if (signal_pending(current))
10369 break;
10370 }
10371
c18487ee 10372 if (bp->link_vars.link_up)
34f80b04 10373 bnx2x_set_led(bp, port, LED_MODE_OPER,
c18487ee
YR
10374 bp->link_vars.line_speed,
10375 bp->link_params.hw_led_mode,
10376 bp->link_params.chip_id);
a2fbb9ea
ET
10377
10378 return 0;
10379}
10380
10381static struct ethtool_ops bnx2x_ethtool_ops = {
7a9b2557
VZ
10382 .get_settings = bnx2x_get_settings,
10383 .set_settings = bnx2x_set_settings,
10384 .get_drvinfo = bnx2x_get_drvinfo,
0a64ea57
EG
10385 .get_regs_len = bnx2x_get_regs_len,
10386 .get_regs = bnx2x_get_regs,
a2fbb9ea
ET
10387 .get_wol = bnx2x_get_wol,
10388 .set_wol = bnx2x_set_wol,
7a9b2557
VZ
10389 .get_msglevel = bnx2x_get_msglevel,
10390 .set_msglevel = bnx2x_set_msglevel,
10391 .nway_reset = bnx2x_nway_reset,
01e53298 10392 .get_link = bnx2x_get_link,
7a9b2557
VZ
10393 .get_eeprom_len = bnx2x_get_eeprom_len,
10394 .get_eeprom = bnx2x_get_eeprom,
10395 .set_eeprom = bnx2x_set_eeprom,
10396 .get_coalesce = bnx2x_get_coalesce,
10397 .set_coalesce = bnx2x_set_coalesce,
10398 .get_ringparam = bnx2x_get_ringparam,
10399 .set_ringparam = bnx2x_set_ringparam,
10400 .get_pauseparam = bnx2x_get_pauseparam,
10401 .set_pauseparam = bnx2x_set_pauseparam,
10402 .get_rx_csum = bnx2x_get_rx_csum,
10403 .set_rx_csum = bnx2x_set_rx_csum,
10404 .get_tx_csum = ethtool_op_get_tx_csum,
755735eb 10405 .set_tx_csum = ethtool_op_set_tx_hw_csum,
7a9b2557
VZ
10406 .set_flags = bnx2x_set_flags,
10407 .get_flags = ethtool_op_get_flags,
10408 .get_sg = ethtool_op_get_sg,
10409 .set_sg = ethtool_op_set_sg,
a2fbb9ea
ET
10410 .get_tso = ethtool_op_get_tso,
10411 .set_tso = bnx2x_set_tso,
10412 .self_test_count = bnx2x_self_test_count,
7a9b2557
VZ
10413 .self_test = bnx2x_self_test,
10414 .get_strings = bnx2x_get_strings,
a2fbb9ea
ET
10415 .phys_id = bnx2x_phys_id,
10416 .get_stats_count = bnx2x_get_stats_count,
bb2a0f7a 10417 .get_ethtool_stats = bnx2x_get_ethtool_stats,
a2fbb9ea
ET
10418};
10419
10420/* end of ethtool_ops */
10421
10422/****************************************************************************
10423* General service functions
10424****************************************************************************/
10425
10426static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
10427{
10428 u16 pmcsr;
10429
10430 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
10431
10432 switch (state) {
10433 case PCI_D0:
34f80b04 10434 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
a2fbb9ea
ET
10435 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
10436 PCI_PM_CTRL_PME_STATUS));
10437
10438 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
33471629 10439 /* delay required during transition out of D3hot */
a2fbb9ea 10440 msleep(20);
34f80b04 10441 break;
a2fbb9ea 10442
34f80b04
EG
10443 case PCI_D3hot:
10444 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
10445 pmcsr |= 3;
a2fbb9ea 10446
34f80b04
EG
10447 if (bp->wol)
10448 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
a2fbb9ea 10449
34f80b04
EG
10450 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
10451 pmcsr);
a2fbb9ea 10452
34f80b04
EG
10453 /* No more memory access after this point until
10454 * device is brought back to D0.
10455 */
10456 break;
10457
10458 default:
10459 return -EINVAL;
10460 }
10461 return 0;
a2fbb9ea
ET
10462}
10463
237907c1
EG
10464static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
10465{
10466 u16 rx_cons_sb;
10467
10468 /* Tell compiler that status block fields can change */
10469 barrier();
10470 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
10471 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
10472 rx_cons_sb++;
10473 return (fp->rx_comp_cons != rx_cons_sb);
10474}
10475
34f80b04
EG
10476/*
10477 * net_device service functions
10478 */
10479
a2fbb9ea
ET
10480static int bnx2x_poll(struct napi_struct *napi, int budget)
10481{
10482 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
10483 napi);
10484 struct bnx2x *bp = fp->bp;
10485 int work_done = 0;
10486
10487#ifdef BNX2X_STOP_ON_ERROR
10488 if (unlikely(bp->panic))
34f80b04 10489 goto poll_panic;
a2fbb9ea
ET
10490#endif
10491
a2fbb9ea
ET
10492 prefetch(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb);
10493 prefetch((char *)(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb) + 256);
10494
10495 bnx2x_update_fpsb_idx(fp);
10496
8534f32c 10497 if (bnx2x_has_rx_work(fp)) {
a2fbb9ea 10498 work_done = bnx2x_rx_int(fp, budget);
356e2385 10499
8534f32c
EG
10500 /* must not complete if we consumed full budget */
10501 if (work_done >= budget)
10502 goto poll_again;
10503 }
a2fbb9ea 10504
ca00392c 10505 /* bnx2x_has_rx_work() reads the status block, thus we need to
8534f32c 10506 * ensure that status block indices have been actually read
ca00392c 10507 * (bnx2x_update_fpsb_idx) prior to this check (bnx2x_has_rx_work)
8534f32c 10508 * so that we won't write the "newer" value of the status block to IGU
ca00392c 10509 * (if there was a DMA right after bnx2x_has_rx_work and
8534f32c
EG
10510 * if there is no rmb, the memory reading (bnx2x_update_fpsb_idx)
10511 * may be postponed to right before bnx2x_ack_sb). In this case
10512 * there will never be another interrupt until there is another update
10513 * of the status block, while there is still unhandled work.
10514 */
10515 rmb();
a2fbb9ea 10516
ca00392c 10517 if (!bnx2x_has_rx_work(fp)) {
a2fbb9ea 10518#ifdef BNX2X_STOP_ON_ERROR
34f80b04 10519poll_panic:
a2fbb9ea 10520#endif
288379f0 10521 napi_complete(napi);
a2fbb9ea 10522
0626b899 10523 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
a2fbb9ea 10524 le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
0626b899 10525 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
a2fbb9ea
ET
10526 le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
10527 }
356e2385 10528
8534f32c 10529poll_again:
a2fbb9ea
ET
10530 return work_done;
10531}
10532
755735eb
EG
10533
10534/* we split the first BD into headers and data BDs
33471629 10535 * to ease the pain of our fellow microcode engineers
755735eb
EG
10536 * we use one mapping for both BDs
10537 * So far this has only been observed to happen
10538 * in Other Operating Systems(TM)
10539 */
10540static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
10541 struct bnx2x_fastpath *fp,
ca00392c
EG
10542 struct sw_tx_bd *tx_buf,
10543 struct eth_tx_start_bd **tx_bd, u16 hlen,
755735eb
EG
10544 u16 bd_prod, int nbd)
10545{
ca00392c 10546 struct eth_tx_start_bd *h_tx_bd = *tx_bd;
755735eb
EG
10547 struct eth_tx_bd *d_tx_bd;
10548 dma_addr_t mapping;
10549 int old_len = le16_to_cpu(h_tx_bd->nbytes);
10550
10551 /* first fix first BD */
10552 h_tx_bd->nbd = cpu_to_le16(nbd);
10553 h_tx_bd->nbytes = cpu_to_le16(hlen);
10554
10555 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
10556 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
10557 h_tx_bd->addr_lo, h_tx_bd->nbd);
10558
10559 /* now get a new data BD
10560 * (after the pbd) and fill it */
10561 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
ca00392c 10562 d_tx_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
755735eb
EG
10563
10564 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
10565 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
10566
10567 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10568 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10569 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
ca00392c
EG
10570
10571 /* this marks the BD as one that has no individual mapping */
10572 tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
10573
755735eb
EG
10574 DP(NETIF_MSG_TX_QUEUED,
10575 "TSO split data size is %d (%x:%x)\n",
10576 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
10577
ca00392c
EG
10578 /* update tx_bd */
10579 *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
755735eb
EG
10580
10581 return bd_prod;
10582}
10583
10584static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
10585{
10586 if (fix > 0)
10587 csum = (u16) ~csum_fold(csum_sub(csum,
10588 csum_partial(t_header - fix, fix, 0)));
10589
10590 else if (fix < 0)
10591 csum = (u16) ~csum_fold(csum_add(csum,
10592 csum_partial(t_header, -fix, 0)));
10593
10594 return swab16(csum);
10595}
10596
10597static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
10598{
10599 u32 rc;
10600
10601 if (skb->ip_summed != CHECKSUM_PARTIAL)
10602 rc = XMIT_PLAIN;
10603
10604 else {
4781bfad 10605 if (skb->protocol == htons(ETH_P_IPV6)) {
755735eb
EG
10606 rc = XMIT_CSUM_V6;
10607 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
10608 rc |= XMIT_CSUM_TCP;
10609
10610 } else {
10611 rc = XMIT_CSUM_V4;
10612 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
10613 rc |= XMIT_CSUM_TCP;
10614 }
10615 }
10616
10617 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
10618 rc |= XMIT_GSO_V4;
10619
10620 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
10621 rc |= XMIT_GSO_V6;
10622
10623 return rc;
10624}
10625
632da4d6 10626#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
f5372251
EG
10627/* check if packet requires linearization (packet is too fragmented)
10628 no need to check fragmentation if page size > 8K (there will be no
10629 violation to FW restrictions) */
755735eb
EG
10630static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
10631 u32 xmit_type)
10632{
10633 int to_copy = 0;
10634 int hlen = 0;
10635 int first_bd_sz = 0;
10636
10637 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
10638 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
10639
10640 if (xmit_type & XMIT_GSO) {
10641 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
10642 /* Check if LSO packet needs to be copied:
10643 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
10644 int wnd_size = MAX_FETCH_BD - 3;
33471629 10645 /* Number of windows to check */
755735eb
EG
10646 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
10647 int wnd_idx = 0;
10648 int frag_idx = 0;
10649 u32 wnd_sum = 0;
10650
10651 /* Headers length */
10652 hlen = (int)(skb_transport_header(skb) - skb->data) +
10653 tcp_hdrlen(skb);
10654
10655 /* Amount of data (w/o headers) on linear part of SKB*/
10656 first_bd_sz = skb_headlen(skb) - hlen;
10657
10658 wnd_sum = first_bd_sz;
10659
10660 /* Calculate the first sum - it's special */
10661 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
10662 wnd_sum +=
10663 skb_shinfo(skb)->frags[frag_idx].size;
10664
10665 /* If there was data on linear skb data - check it */
10666 if (first_bd_sz > 0) {
10667 if (unlikely(wnd_sum < lso_mss)) {
10668 to_copy = 1;
10669 goto exit_lbl;
10670 }
10671
10672 wnd_sum -= first_bd_sz;
10673 }
10674
10675 /* Others are easier: run through the frag list and
10676 check all windows */
10677 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
10678 wnd_sum +=
10679 skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
10680
10681 if (unlikely(wnd_sum < lso_mss)) {
10682 to_copy = 1;
10683 break;
10684 }
10685 wnd_sum -=
10686 skb_shinfo(skb)->frags[wnd_idx].size;
10687 }
755735eb
EG
10688 } else {
10689 /* in non-LSO too fragmented packet should always
10690 be linearized */
10691 to_copy = 1;
10692 }
10693 }
10694
10695exit_lbl:
10696 if (unlikely(to_copy))
10697 DP(NETIF_MSG_TX_QUEUED,
10698 "Linearization IS REQUIRED for %s packet. "
10699 "num_frags %d hlen %d first_bd_sz %d\n",
10700 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
10701 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
10702
10703 return to_copy;
10704}
632da4d6 10705#endif
755735eb
EG
10706
10707/* called with netif_tx_lock
a2fbb9ea 10708 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
755735eb 10709 * netif_wake_queue()
a2fbb9ea
ET
10710 */
10711static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
10712{
10713 struct bnx2x *bp = netdev_priv(dev);
ca00392c 10714 struct bnx2x_fastpath *fp, *fp_stat;
555f6c78 10715 struct netdev_queue *txq;
a2fbb9ea 10716 struct sw_tx_bd *tx_buf;
ca00392c
EG
10717 struct eth_tx_start_bd *tx_start_bd;
10718 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
a2fbb9ea
ET
10719 struct eth_tx_parse_bd *pbd = NULL;
10720 u16 pkt_prod, bd_prod;
755735eb 10721 int nbd, fp_index;
a2fbb9ea 10722 dma_addr_t mapping;
755735eb 10723 u32 xmit_type = bnx2x_xmit_type(bp, skb);
755735eb
EG
10724 int i;
10725 u8 hlen = 0;
ca00392c 10726 __le16 pkt_size = 0;
a2fbb9ea
ET
10727
10728#ifdef BNX2X_STOP_ON_ERROR
10729 if (unlikely(bp->panic))
10730 return NETDEV_TX_BUSY;
10731#endif
10732
555f6c78
EG
10733 fp_index = skb_get_queue_mapping(skb);
10734 txq = netdev_get_tx_queue(dev, fp_index);
10735
ca00392c
EG
10736 fp = &bp->fp[fp_index + bp->num_rx_queues];
10737 fp_stat = &bp->fp[fp_index];
755735eb 10738
231fd58a 10739 if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
ca00392c 10740 fp_stat->eth_q_stats.driver_xoff++;
555f6c78 10741 netif_tx_stop_queue(txq);
a2fbb9ea
ET
10742 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
10743 return NETDEV_TX_BUSY;
10744 }
10745
755735eb
EG
10746 DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x protocol %x protocol(%x,%x)"
10747 " gso type %x xmit_type %x\n",
10748 skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
10749 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
10750
632da4d6 10751#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
f5372251
EG
10752 /* First, check if we need to linearize the skb (due to FW
10753 restrictions). No need to check fragmentation if page size > 8K
10754 (there will be no violation to FW restrictions) */
755735eb
EG
10755 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
10756 /* Statistics of linearization */
10757 bp->lin_cnt++;
10758 if (skb_linearize(skb) != 0) {
10759 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
10760 "silently dropping this SKB\n");
10761 dev_kfree_skb_any(skb);
da5a662a 10762 return NETDEV_TX_OK;
755735eb
EG
10763 }
10764 }
632da4d6 10765#endif
755735eb 10766
a2fbb9ea 10767 /*
755735eb 10768 Please read carefully. First we use one BD which we mark as start,
ca00392c 10769 then we have a parsing info BD (used for TSO or xsum),
755735eb 10770 and only then we have the rest of the TSO BDs.
a2fbb9ea
ET
10771 (don't forget to mark the last one as last,
10772 and to unmap only AFTER you write to the BD ...)
755735eb 10773 And above all, all pdb sizes are in words - NOT DWORDS!
a2fbb9ea
ET
10774 */
10775
10776 pkt_prod = fp->tx_pkt_prod++;
755735eb 10777 bd_prod = TX_BD(fp->tx_bd_prod);
a2fbb9ea 10778
755735eb 10779 /* get a tx_buf and first BD */
a2fbb9ea 10780 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
ca00392c 10781 tx_start_bd = &fp->tx_desc_ring[bd_prod].start_bd;
a2fbb9ea 10782
ca00392c
EG
10783 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
10784 tx_start_bd->general_data = (UNICAST_ADDRESS <<
10785 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT);
3196a88a 10786 /* header nbd */
ca00392c 10787 tx_start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT);
a2fbb9ea 10788
755735eb
EG
10789 /* remember the first BD of the packet */
10790 tx_buf->first_bd = fp->tx_bd_prod;
10791 tx_buf->skb = skb;
ca00392c 10792 tx_buf->flags = 0;
a2fbb9ea
ET
10793
10794 DP(NETIF_MSG_TX_QUEUED,
10795 "sending pkt %u @%p next_idx %u bd %u @%p\n",
ca00392c 10796 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_start_bd);
a2fbb9ea 10797
0c6671b0
EG
10798#ifdef BCM_VLAN
10799 if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
10800 (bp->flags & HW_VLAN_TX_FLAG)) {
ca00392c
EG
10801 tx_start_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
10802 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
755735eb 10803 } else
0c6671b0 10804#endif
ca00392c 10805 tx_start_bd->vlan = cpu_to_le16(pkt_prod);
a2fbb9ea 10806
ca00392c
EG
10807 /* turn on parsing and get a BD */
10808 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10809 pbd = &fp->tx_desc_ring[bd_prod].parse_bd;
755735eb 10810
ca00392c 10811 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
755735eb
EG
10812
10813 if (xmit_type & XMIT_CSUM) {
ca00392c 10814 hlen = (skb_network_header(skb) - skb->data) / 2;
a2fbb9ea
ET
10815
10816 /* for now NS flag is not used in Linux */
4781bfad
EG
10817 pbd->global_data =
10818 (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
10819 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
a2fbb9ea 10820
755735eb
EG
10821 pbd->ip_hlen = (skb_transport_header(skb) -
10822 skb_network_header(skb)) / 2;
10823
10824 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
a2fbb9ea 10825
755735eb 10826 pbd->total_hlen = cpu_to_le16(hlen);
ca00392c 10827 hlen = hlen*2;
a2fbb9ea 10828
ca00392c 10829 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
755735eb
EG
10830
10831 if (xmit_type & XMIT_CSUM_V4)
ca00392c 10832 tx_start_bd->bd_flags.as_bitfield |=
755735eb
EG
10833 ETH_TX_BD_FLAGS_IP_CSUM;
10834 else
ca00392c
EG
10835 tx_start_bd->bd_flags.as_bitfield |=
10836 ETH_TX_BD_FLAGS_IPV6;
755735eb
EG
10837
10838 if (xmit_type & XMIT_CSUM_TCP) {
10839 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
10840
10841 } else {
10842 s8 fix = SKB_CS_OFF(skb); /* signed! */
10843
ca00392c 10844 pbd->global_data |= ETH_TX_PARSE_BD_UDP_CS_FLG;
a2fbb9ea 10845
755735eb 10846 DP(NETIF_MSG_TX_QUEUED,
ca00392c
EG
10847 "hlen %d fix %d csum before fix %x\n",
10848 le16_to_cpu(pbd->total_hlen), fix, SKB_CS(skb));
755735eb
EG
10849
10850 /* HW bug: fixup the CSUM */
10851 pbd->tcp_pseudo_csum =
10852 bnx2x_csum_fix(skb_transport_header(skb),
10853 SKB_CS(skb), fix);
10854
10855 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
10856 pbd->tcp_pseudo_csum);
10857 }
a2fbb9ea
ET
10858 }
10859
10860 mapping = pci_map_single(bp->pdev, skb->data,
755735eb 10861 skb_headlen(skb), PCI_DMA_TODEVICE);
a2fbb9ea 10862
ca00392c
EG
10863 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10864 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10865 nbd = skb_shinfo(skb)->nr_frags + 2; /* start_bd + pbd + frags */
10866 tx_start_bd->nbd = cpu_to_le16(nbd);
10867 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
10868 pkt_size = tx_start_bd->nbytes;
a2fbb9ea
ET
10869
10870 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
755735eb 10871 " nbytes %d flags %x vlan %x\n",
ca00392c
EG
10872 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
10873 le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
10874 tx_start_bd->bd_flags.as_bitfield, le16_to_cpu(tx_start_bd->vlan));
a2fbb9ea 10875
755735eb 10876 if (xmit_type & XMIT_GSO) {
a2fbb9ea
ET
10877
10878 DP(NETIF_MSG_TX_QUEUED,
10879 "TSO packet len %d hlen %d total len %d tso size %d\n",
10880 skb->len, hlen, skb_headlen(skb),
10881 skb_shinfo(skb)->gso_size);
10882
ca00392c 10883 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
a2fbb9ea 10884
755735eb 10885 if (unlikely(skb_headlen(skb) > hlen))
ca00392c
EG
10886 bd_prod = bnx2x_tx_split(bp, fp, tx_buf, &tx_start_bd,
10887 hlen, bd_prod, ++nbd);
a2fbb9ea
ET
10888
10889 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
10890 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
755735eb
EG
10891 pbd->tcp_flags = pbd_tcp_flags(skb);
10892
10893 if (xmit_type & XMIT_GSO_V4) {
10894 pbd->ip_id = swab16(ip_hdr(skb)->id);
10895 pbd->tcp_pseudo_csum =
a2fbb9ea
ET
10896 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
10897 ip_hdr(skb)->daddr,
10898 0, IPPROTO_TCP, 0));
755735eb
EG
10899
10900 } else
10901 pbd->tcp_pseudo_csum =
10902 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
10903 &ipv6_hdr(skb)->daddr,
10904 0, IPPROTO_TCP, 0));
10905
a2fbb9ea
ET
10906 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
10907 }
ca00392c 10908 tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
a2fbb9ea 10909
755735eb
EG
10910 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
10911 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
a2fbb9ea 10912
755735eb 10913 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
ca00392c
EG
10914 tx_data_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
10915 if (total_pkt_bd == NULL)
10916 total_pkt_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
a2fbb9ea 10917
755735eb
EG
10918 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
10919 frag->size, PCI_DMA_TODEVICE);
a2fbb9ea 10920
ca00392c
EG
10921 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10922 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10923 tx_data_bd->nbytes = cpu_to_le16(frag->size);
10924 le16_add_cpu(&pkt_size, frag->size);
a2fbb9ea 10925
755735eb 10926 DP(NETIF_MSG_TX_QUEUED,
ca00392c
EG
10927 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
10928 i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
10929 le16_to_cpu(tx_data_bd->nbytes));
a2fbb9ea
ET
10930 }
10931
ca00392c 10932 DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
a2fbb9ea 10933
a2fbb9ea
ET
10934 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10935
755735eb 10936 /* now send a tx doorbell, counting the next BD
a2fbb9ea
ET
10937 * if the packet contains or ends with it
10938 */
10939 if (TX_BD_POFF(bd_prod) < nbd)
10940 nbd++;
10941
ca00392c
EG
10942 if (total_pkt_bd != NULL)
10943 total_pkt_bd->total_pkt_bytes = pkt_size;
10944
a2fbb9ea
ET
10945 if (pbd)
10946 DP(NETIF_MSG_TX_QUEUED,
10947 "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
10948 " tcp_flags %x xsum %x seq %u hlen %u\n",
10949 pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
10950 pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
755735eb 10951 pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
a2fbb9ea 10952
755735eb 10953 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
a2fbb9ea 10954
58f4c4cf
EG
10955 /*
10956 * Make sure that the BD data is updated before updating the producer
10957 * since FW might read the BD right after the producer is updated.
10958 * This is only applicable for weak-ordered memory model archs such
10959 * as IA-64. The following barrier is also mandatory since FW will
10960 * assumes packets must have BDs.
10961 */
10962 wmb();
10963
ca00392c
EG
10964 fp->tx_db.data.prod += nbd;
10965 barrier();
10966 DOORBELL(bp, fp->index - bp->num_rx_queues, fp->tx_db.raw);
a2fbb9ea
ET
10967
10968 mmiowb();
10969
755735eb 10970 fp->tx_bd_prod += nbd;
a2fbb9ea
ET
10971
10972 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
ca00392c 10973 netif_tx_stop_queue(txq);
58f4c4cf
EG
10974 /* We want bnx2x_tx_int to "see" the updated tx_bd_prod
10975 if we put Tx into XOFF state. */
10976 smp_mb();
ca00392c 10977 fp_stat->eth_q_stats.driver_xoff++;
a2fbb9ea 10978 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
555f6c78 10979 netif_tx_wake_queue(txq);
a2fbb9ea 10980 }
ca00392c 10981 fp_stat->tx_pkt++;
a2fbb9ea
ET
10982
10983 return NETDEV_TX_OK;
10984}
10985
bb2a0f7a 10986/* called with rtnl_lock */
a2fbb9ea
ET
10987static int bnx2x_open(struct net_device *dev)
10988{
10989 struct bnx2x *bp = netdev_priv(dev);
10990
6eccabb3
EG
10991 netif_carrier_off(dev);
10992
a2fbb9ea
ET
10993 bnx2x_set_power_state(bp, PCI_D0);
10994
bb2a0f7a 10995 return bnx2x_nic_load(bp, LOAD_OPEN);
a2fbb9ea
ET
10996}
10997
bb2a0f7a 10998/* called with rtnl_lock */
a2fbb9ea
ET
10999static int bnx2x_close(struct net_device *dev)
11000{
a2fbb9ea
ET
11001 struct bnx2x *bp = netdev_priv(dev);
11002
11003 /* Unload the driver, release IRQs */
bb2a0f7a
YG
11004 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
11005 if (atomic_read(&bp->pdev->enable_cnt) == 1)
11006 if (!CHIP_REV_IS_SLOW(bp))
11007 bnx2x_set_power_state(bp, PCI_D3hot);
a2fbb9ea
ET
11008
11009 return 0;
11010}
11011
f5372251 11012/* called with netif_tx_lock from dev_mcast.c */
34f80b04
EG
11013static void bnx2x_set_rx_mode(struct net_device *dev)
11014{
11015 struct bnx2x *bp = netdev_priv(dev);
11016 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
11017 int port = BP_PORT(bp);
11018
11019 if (bp->state != BNX2X_STATE_OPEN) {
11020 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
11021 return;
11022 }
11023
11024 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
11025
11026 if (dev->flags & IFF_PROMISC)
11027 rx_mode = BNX2X_RX_MODE_PROMISC;
11028
11029 else if ((dev->flags & IFF_ALLMULTI) ||
11030 ((dev->mc_count > BNX2X_MAX_MULTICAST) && CHIP_IS_E1(bp)))
11031 rx_mode = BNX2X_RX_MODE_ALLMULTI;
11032
11033 else { /* some multicasts */
11034 if (CHIP_IS_E1(bp)) {
11035 int i, old, offset;
11036 struct dev_mc_list *mclist;
11037 struct mac_configuration_cmd *config =
11038 bnx2x_sp(bp, mcast_config);
11039
11040 for (i = 0, mclist = dev->mc_list;
11041 mclist && (i < dev->mc_count);
11042 i++, mclist = mclist->next) {
11043
11044 config->config_table[i].
11045 cam_entry.msb_mac_addr =
11046 swab16(*(u16 *)&mclist->dmi_addr[0]);
11047 config->config_table[i].
11048 cam_entry.middle_mac_addr =
11049 swab16(*(u16 *)&mclist->dmi_addr[2]);
11050 config->config_table[i].
11051 cam_entry.lsb_mac_addr =
11052 swab16(*(u16 *)&mclist->dmi_addr[4]);
11053 config->config_table[i].cam_entry.flags =
11054 cpu_to_le16(port);
11055 config->config_table[i].
11056 target_table_entry.flags = 0;
ca00392c
EG
11057 config->config_table[i].target_table_entry.
11058 clients_bit_vector =
11059 cpu_to_le32(1 << BP_L_ID(bp));
34f80b04
EG
11060 config->config_table[i].
11061 target_table_entry.vlan_id = 0;
11062
11063 DP(NETIF_MSG_IFUP,
11064 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
11065 config->config_table[i].
11066 cam_entry.msb_mac_addr,
11067 config->config_table[i].
11068 cam_entry.middle_mac_addr,
11069 config->config_table[i].
11070 cam_entry.lsb_mac_addr);
11071 }
8d9c5f34 11072 old = config->hdr.length;
34f80b04
EG
11073 if (old > i) {
11074 for (; i < old; i++) {
11075 if (CAM_IS_INVALID(config->
11076 config_table[i])) {
af246401 11077 /* already invalidated */
34f80b04
EG
11078 break;
11079 }
11080 /* invalidate */
11081 CAM_INVALIDATE(config->
11082 config_table[i]);
11083 }
11084 }
11085
11086 if (CHIP_REV_IS_SLOW(bp))
11087 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
11088 else
11089 offset = BNX2X_MAX_MULTICAST*(1 + port);
11090
8d9c5f34 11091 config->hdr.length = i;
34f80b04 11092 config->hdr.offset = offset;
8d9c5f34 11093 config->hdr.client_id = bp->fp->cl_id;
34f80b04
EG
11094 config->hdr.reserved1 = 0;
11095
11096 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
11097 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
11098 U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
11099 0);
11100 } else { /* E1H */
11101 /* Accept one or more multicasts */
11102 struct dev_mc_list *mclist;
11103 u32 mc_filter[MC_HASH_SIZE];
11104 u32 crc, bit, regidx;
11105 int i;
11106
11107 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
11108
11109 for (i = 0, mclist = dev->mc_list;
11110 mclist && (i < dev->mc_count);
11111 i++, mclist = mclist->next) {
11112
7c510e4b
JB
11113 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
11114 mclist->dmi_addr);
34f80b04
EG
11115
11116 crc = crc32c_le(0, mclist->dmi_addr, ETH_ALEN);
11117 bit = (crc >> 24) & 0xff;
11118 regidx = bit >> 5;
11119 bit &= 0x1f;
11120 mc_filter[regidx] |= (1 << bit);
11121 }
11122
11123 for (i = 0; i < MC_HASH_SIZE; i++)
11124 REG_WR(bp, MC_HASH_OFFSET(bp, i),
11125 mc_filter[i]);
11126 }
11127 }
11128
11129 bp->rx_mode = rx_mode;
11130 bnx2x_set_storm_rx_mode(bp);
11131}
11132
11133/* called with rtnl_lock */
a2fbb9ea
ET
11134static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
11135{
11136 struct sockaddr *addr = p;
11137 struct bnx2x *bp = netdev_priv(dev);
11138
34f80b04 11139 if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
a2fbb9ea
ET
11140 return -EINVAL;
11141
11142 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
34f80b04
EG
11143 if (netif_running(dev)) {
11144 if (CHIP_IS_E1(bp))
3101c2bc 11145 bnx2x_set_mac_addr_e1(bp, 1);
34f80b04 11146 else
3101c2bc 11147 bnx2x_set_mac_addr_e1h(bp, 1);
34f80b04 11148 }
a2fbb9ea
ET
11149
11150 return 0;
11151}
11152
c18487ee 11153/* called with rtnl_lock */
a2fbb9ea
ET
11154static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
11155{
11156 struct mii_ioctl_data *data = if_mii(ifr);
11157 struct bnx2x *bp = netdev_priv(dev);
3196a88a 11158 int port = BP_PORT(bp);
a2fbb9ea
ET
11159 int err;
11160
11161 switch (cmd) {
11162 case SIOCGMIIPHY:
34f80b04 11163 data->phy_id = bp->port.phy_addr;
a2fbb9ea 11164
c14423fe 11165 /* fallthrough */
c18487ee 11166
a2fbb9ea 11167 case SIOCGMIIREG: {
c18487ee 11168 u16 mii_regval;
a2fbb9ea 11169
c18487ee
YR
11170 if (!netif_running(dev))
11171 return -EAGAIN;
a2fbb9ea 11172
34f80b04 11173 mutex_lock(&bp->port.phy_mutex);
3196a88a 11174 err = bnx2x_cl45_read(bp, port, 0, bp->port.phy_addr,
c18487ee
YR
11175 DEFAULT_PHY_DEV_ADDR,
11176 (data->reg_num & 0x1f), &mii_regval);
11177 data->val_out = mii_regval;
34f80b04 11178 mutex_unlock(&bp->port.phy_mutex);
a2fbb9ea
ET
11179 return err;
11180 }
11181
11182 case SIOCSMIIREG:
11183 if (!capable(CAP_NET_ADMIN))
11184 return -EPERM;
11185
c18487ee
YR
11186 if (!netif_running(dev))
11187 return -EAGAIN;
11188
34f80b04 11189 mutex_lock(&bp->port.phy_mutex);
3196a88a 11190 err = bnx2x_cl45_write(bp, port, 0, bp->port.phy_addr,
c18487ee
YR
11191 DEFAULT_PHY_DEV_ADDR,
11192 (data->reg_num & 0x1f), data->val_in);
34f80b04 11193 mutex_unlock(&bp->port.phy_mutex);
a2fbb9ea
ET
11194 return err;
11195
11196 default:
11197 /* do nothing */
11198 break;
11199 }
11200
11201 return -EOPNOTSUPP;
11202}
11203
34f80b04 11204/* called with rtnl_lock */
a2fbb9ea
ET
11205static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
11206{
11207 struct bnx2x *bp = netdev_priv(dev);
34f80b04 11208 int rc = 0;
a2fbb9ea
ET
11209
11210 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
11211 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
11212 return -EINVAL;
11213
11214 /* This does not race with packet allocation
c14423fe 11215 * because the actual alloc size is
a2fbb9ea
ET
11216 * only updated as part of load
11217 */
11218 dev->mtu = new_mtu;
11219
11220 if (netif_running(dev)) {
34f80b04
EG
11221 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
11222 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
a2fbb9ea 11223 }
34f80b04
EG
11224
11225 return rc;
a2fbb9ea
ET
11226}
11227
11228static void bnx2x_tx_timeout(struct net_device *dev)
11229{
11230 struct bnx2x *bp = netdev_priv(dev);
11231
11232#ifdef BNX2X_STOP_ON_ERROR
11233 if (!bp->panic)
11234 bnx2x_panic();
11235#endif
11236 /* This allows the netif to be shutdown gracefully before resetting */
11237 schedule_work(&bp->reset_task);
11238}
11239
11240#ifdef BCM_VLAN
34f80b04 11241/* called with rtnl_lock */
a2fbb9ea
ET
11242static void bnx2x_vlan_rx_register(struct net_device *dev,
11243 struct vlan_group *vlgrp)
11244{
11245 struct bnx2x *bp = netdev_priv(dev);
11246
11247 bp->vlgrp = vlgrp;
0c6671b0
EG
11248
11249 /* Set flags according to the required capabilities */
11250 bp->flags &= ~(HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
11251
11252 if (dev->features & NETIF_F_HW_VLAN_TX)
11253 bp->flags |= HW_VLAN_TX_FLAG;
11254
11255 if (dev->features & NETIF_F_HW_VLAN_RX)
11256 bp->flags |= HW_VLAN_RX_FLAG;
11257
a2fbb9ea 11258 if (netif_running(dev))
49d66772 11259 bnx2x_set_client_config(bp);
a2fbb9ea 11260}
34f80b04 11261
a2fbb9ea
ET
11262#endif
11263
11264#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
11265static void poll_bnx2x(struct net_device *dev)
11266{
11267 struct bnx2x *bp = netdev_priv(dev);
11268
11269 disable_irq(bp->pdev->irq);
11270 bnx2x_interrupt(bp->pdev->irq, dev);
11271 enable_irq(bp->pdev->irq);
11272}
11273#endif
11274
c64213cd
SH
11275static const struct net_device_ops bnx2x_netdev_ops = {
11276 .ndo_open = bnx2x_open,
11277 .ndo_stop = bnx2x_close,
11278 .ndo_start_xmit = bnx2x_start_xmit,
356e2385 11279 .ndo_set_multicast_list = bnx2x_set_rx_mode,
c64213cd
SH
11280 .ndo_set_mac_address = bnx2x_change_mac_addr,
11281 .ndo_validate_addr = eth_validate_addr,
11282 .ndo_do_ioctl = bnx2x_ioctl,
11283 .ndo_change_mtu = bnx2x_change_mtu,
11284 .ndo_tx_timeout = bnx2x_tx_timeout,
11285#ifdef BCM_VLAN
11286 .ndo_vlan_rx_register = bnx2x_vlan_rx_register,
11287#endif
11288#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
11289 .ndo_poll_controller = poll_bnx2x,
11290#endif
11291};
11292
34f80b04
EG
11293static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
11294 struct net_device *dev)
a2fbb9ea
ET
11295{
11296 struct bnx2x *bp;
11297 int rc;
11298
11299 SET_NETDEV_DEV(dev, &pdev->dev);
11300 bp = netdev_priv(dev);
11301
34f80b04
EG
11302 bp->dev = dev;
11303 bp->pdev = pdev;
a2fbb9ea 11304 bp->flags = 0;
34f80b04 11305 bp->func = PCI_FUNC(pdev->devfn);
a2fbb9ea
ET
11306
11307 rc = pci_enable_device(pdev);
11308 if (rc) {
11309 printk(KERN_ERR PFX "Cannot enable PCI device, aborting\n");
11310 goto err_out;
11311 }
11312
11313 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
11314 printk(KERN_ERR PFX "Cannot find PCI device base address,"
11315 " aborting\n");
11316 rc = -ENODEV;
11317 goto err_out_disable;
11318 }
11319
11320 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
11321 printk(KERN_ERR PFX "Cannot find second PCI device"
11322 " base address, aborting\n");
11323 rc = -ENODEV;
11324 goto err_out_disable;
11325 }
11326
34f80b04
EG
11327 if (atomic_read(&pdev->enable_cnt) == 1) {
11328 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
11329 if (rc) {
11330 printk(KERN_ERR PFX "Cannot obtain PCI resources,"
11331 " aborting\n");
11332 goto err_out_disable;
11333 }
a2fbb9ea 11334
34f80b04
EG
11335 pci_set_master(pdev);
11336 pci_save_state(pdev);
11337 }
a2fbb9ea
ET
11338
11339 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
11340 if (bp->pm_cap == 0) {
11341 printk(KERN_ERR PFX "Cannot find power management"
11342 " capability, aborting\n");
11343 rc = -EIO;
11344 goto err_out_release;
11345 }
11346
11347 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
11348 if (bp->pcie_cap == 0) {
11349 printk(KERN_ERR PFX "Cannot find PCI Express capability,"
11350 " aborting\n");
11351 rc = -EIO;
11352 goto err_out_release;
11353 }
11354
6a35528a 11355 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) {
a2fbb9ea 11356 bp->flags |= USING_DAC_FLAG;
6a35528a 11357 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) {
a2fbb9ea
ET
11358 printk(KERN_ERR PFX "pci_set_consistent_dma_mask"
11359 " failed, aborting\n");
11360 rc = -EIO;
11361 goto err_out_release;
11362 }
11363
284901a9 11364 } else if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) {
a2fbb9ea
ET
11365 printk(KERN_ERR PFX "System does not support DMA,"
11366 " aborting\n");
11367 rc = -EIO;
11368 goto err_out_release;
11369 }
11370
34f80b04
EG
11371 dev->mem_start = pci_resource_start(pdev, 0);
11372 dev->base_addr = dev->mem_start;
11373 dev->mem_end = pci_resource_end(pdev, 0);
a2fbb9ea
ET
11374
11375 dev->irq = pdev->irq;
11376
275f165f 11377 bp->regview = pci_ioremap_bar(pdev, 0);
a2fbb9ea
ET
11378 if (!bp->regview) {
11379 printk(KERN_ERR PFX "Cannot map register space, aborting\n");
11380 rc = -ENOMEM;
11381 goto err_out_release;
11382 }
11383
34f80b04
EG
11384 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
11385 min_t(u64, BNX2X_DB_SIZE,
11386 pci_resource_len(pdev, 2)));
a2fbb9ea
ET
11387 if (!bp->doorbells) {
11388 printk(KERN_ERR PFX "Cannot map doorbell space, aborting\n");
11389 rc = -ENOMEM;
11390 goto err_out_unmap;
11391 }
11392
11393 bnx2x_set_power_state(bp, PCI_D0);
11394
34f80b04
EG
11395 /* clean indirect addresses */
11396 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
11397 PCICFG_VENDOR_ID_OFFSET);
11398 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
11399 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
11400 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
11401 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
a2fbb9ea 11402
34f80b04 11403 dev->watchdog_timeo = TX_TIMEOUT;
a2fbb9ea 11404
c64213cd 11405 dev->netdev_ops = &bnx2x_netdev_ops;
34f80b04 11406 dev->ethtool_ops = &bnx2x_ethtool_ops;
34f80b04
EG
11407 dev->features |= NETIF_F_SG;
11408 dev->features |= NETIF_F_HW_CSUM;
11409 if (bp->flags & USING_DAC_FLAG)
11410 dev->features |= NETIF_F_HIGHDMA;
5316bc0b
EG
11411 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
11412 dev->features |= NETIF_F_TSO6;
34f80b04
EG
11413#ifdef BCM_VLAN
11414 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
0c6671b0 11415 bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
5316bc0b
EG
11416
11417 dev->vlan_features |= NETIF_F_SG;
11418 dev->vlan_features |= NETIF_F_HW_CSUM;
11419 if (bp->flags & USING_DAC_FLAG)
11420 dev->vlan_features |= NETIF_F_HIGHDMA;
11421 dev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
11422 dev->vlan_features |= NETIF_F_TSO6;
34f80b04 11423#endif
a2fbb9ea
ET
11424
11425 return 0;
11426
11427err_out_unmap:
11428 if (bp->regview) {
11429 iounmap(bp->regview);
11430 bp->regview = NULL;
11431 }
a2fbb9ea
ET
11432 if (bp->doorbells) {
11433 iounmap(bp->doorbells);
11434 bp->doorbells = NULL;
11435 }
11436
11437err_out_release:
34f80b04
EG
11438 if (atomic_read(&pdev->enable_cnt) == 1)
11439 pci_release_regions(pdev);
a2fbb9ea
ET
11440
11441err_out_disable:
11442 pci_disable_device(pdev);
11443 pci_set_drvdata(pdev, NULL);
11444
11445err_out:
11446 return rc;
11447}
11448
25047950
ET
11449static int __devinit bnx2x_get_pcie_width(struct bnx2x *bp)
11450{
11451 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
11452
11453 val = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
11454 return val;
11455}
11456
11457/* return value of 1=2.5GHz 2=5GHz */
11458static int __devinit bnx2x_get_pcie_speed(struct bnx2x *bp)
11459{
11460 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
11461
11462 val = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
11463 return val;
11464}
94a78b79
VZ
11465static int __devinit bnx2x_check_firmware(struct bnx2x *bp)
11466{
11467 struct bnx2x_fw_file_hdr *fw_hdr;
11468 struct bnx2x_fw_file_section *sections;
11469 u16 *ops_offsets;
11470 u32 offset, len, num_ops;
11471 int i;
11472 const struct firmware *firmware = bp->firmware;
11473 const u8 * fw_ver;
11474
11475 if (firmware->size < sizeof(struct bnx2x_fw_file_hdr))
11476 return -EINVAL;
11477
11478 fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
11479 sections = (struct bnx2x_fw_file_section *)fw_hdr;
11480
11481 /* Make sure none of the offsets and sizes make us read beyond
11482 * the end of the firmware data */
11483 for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
11484 offset = be32_to_cpu(sections[i].offset);
11485 len = be32_to_cpu(sections[i].len);
11486 if (offset + len > firmware->size) {
11487 printk(KERN_ERR PFX "Section %d length is out of bounds\n", i);
11488 return -EINVAL;
11489 }
11490 }
11491
11492 /* Likewise for the init_ops offsets */
11493 offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
11494 ops_offsets = (u16 *)(firmware->data + offset);
11495 num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
11496
11497 for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
11498 if (be16_to_cpu(ops_offsets[i]) > num_ops) {
11499 printk(KERN_ERR PFX "Section offset %d is out of bounds\n", i);
11500 return -EINVAL;
11501 }
11502 }
11503
11504 /* Check FW version */
11505 offset = be32_to_cpu(fw_hdr->fw_version.offset);
11506 fw_ver = firmware->data + offset;
11507 if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
11508 (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
11509 (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
11510 (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
11511 printk(KERN_ERR PFX "Bad FW version:%d.%d.%d.%d."
11512 " Should be %d.%d.%d.%d\n",
11513 fw_ver[0], fw_ver[1], fw_ver[2],
11514 fw_ver[3], BCM_5710_FW_MAJOR_VERSION,
11515 BCM_5710_FW_MINOR_VERSION,
11516 BCM_5710_FW_REVISION_VERSION,
11517 BCM_5710_FW_ENGINEERING_VERSION);
11518 return -EINVAL;
11519 }
11520
11521 return 0;
11522}
11523
11524static void inline be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
11525{
11526 u32 i;
11527 const __be32 *source = (const __be32*)_source;
11528 u32 *target = (u32*)_target;
11529
11530 for (i = 0; i < n/4; i++)
11531 target[i] = be32_to_cpu(source[i]);
11532}
11533
11534/*
11535 Ops array is stored in the following format:
11536 {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
11537 */
11538static void inline bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
11539{
11540 u32 i, j, tmp;
11541 const __be32 *source = (const __be32*)_source;
11542 struct raw_op *target = (struct raw_op*)_target;
11543
11544 for (i = 0, j = 0; i < n/8; i++, j+=2) {
11545 tmp = be32_to_cpu(source[j]);
11546 target[i].op = (tmp >> 24) & 0xff;
11547 target[i].offset = tmp & 0xffffff;
11548 target[i].raw_data = be32_to_cpu(source[j+1]);
11549 }
11550}
11551static void inline be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
11552{
11553 u32 i;
11554 u16 *target = (u16*)_target;
11555 const __be16 *source = (const __be16*)_source;
11556
11557 for (i = 0; i < n/2; i++)
11558 target[i] = be16_to_cpu(source[i]);
11559}
11560
11561#define BNX2X_ALLOC_AND_SET(arr, lbl, func) \
11562 do { \
11563 u32 len = be32_to_cpu(fw_hdr->arr.len); \
11564 bp->arr = kmalloc(len, GFP_KERNEL); \
11565 if (!bp->arr) { \
11566 printk(KERN_ERR PFX "Failed to allocate %d bytes for "#arr"\n", len); \
11567 goto lbl; \
11568 } \
11569 func(bp->firmware->data + \
11570 be32_to_cpu(fw_hdr->arr.offset), \
11571 (u8*)bp->arr, len); \
11572 } while (0)
11573
11574
11575static int __devinit bnx2x_init_firmware(struct bnx2x *bp, struct device *dev)
11576{
11577 char fw_file_name[40] = {0};
11578 int rc, offset;
11579 struct bnx2x_fw_file_hdr *fw_hdr;
11580
11581 /* Create a FW file name */
11582 if (CHIP_IS_E1(bp))
11583 offset = sprintf(fw_file_name, FW_FILE_PREFIX_E1);
11584 else
11585 offset = sprintf(fw_file_name, FW_FILE_PREFIX_E1H);
11586
11587 sprintf(fw_file_name + offset, "%d.%d.%d.%d.fw",
11588 BCM_5710_FW_MAJOR_VERSION,
11589 BCM_5710_FW_MINOR_VERSION,
11590 BCM_5710_FW_REVISION_VERSION,
11591 BCM_5710_FW_ENGINEERING_VERSION);
11592
11593 printk(KERN_INFO PFX "Loading %s\n", fw_file_name);
11594
11595 rc = request_firmware(&bp->firmware, fw_file_name, dev);
11596 if (rc) {
11597 printk(KERN_ERR PFX "Can't load firmware file %s\n", fw_file_name);
11598 goto request_firmware_exit;
11599 }
11600
11601 rc = bnx2x_check_firmware(bp);
11602 if (rc) {
11603 printk(KERN_ERR PFX "Corrupt firmware file %s\n", fw_file_name);
11604 goto request_firmware_exit;
11605 }
11606
11607 fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
11608
11609 /* Initialize the pointers to the init arrays */
11610 /* Blob */
11611 BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
11612
11613 /* Opcodes */
11614 BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
11615
11616 /* Offsets */
11617 BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err, be16_to_cpu_n);
11618
11619 /* STORMs firmware */
11620 bp->tsem_int_table_data = bp->firmware->data +
11621 be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
11622 bp->tsem_pram_data = bp->firmware->data +
11623 be32_to_cpu(fw_hdr->tsem_pram_data.offset);
11624 bp->usem_int_table_data = bp->firmware->data +
11625 be32_to_cpu(fw_hdr->usem_int_table_data.offset);
11626 bp->usem_pram_data = bp->firmware->data +
11627 be32_to_cpu(fw_hdr->usem_pram_data.offset);
11628 bp->xsem_int_table_data = bp->firmware->data +
11629 be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
11630 bp->xsem_pram_data = bp->firmware->data +
11631 be32_to_cpu(fw_hdr->xsem_pram_data.offset);
11632 bp->csem_int_table_data = bp->firmware->data +
11633 be32_to_cpu(fw_hdr->csem_int_table_data.offset);
11634 bp->csem_pram_data = bp->firmware->data +
11635 be32_to_cpu(fw_hdr->csem_pram_data.offset);
11636
11637 return 0;
11638init_offsets_alloc_err:
11639 kfree(bp->init_ops);
11640init_ops_alloc_err:
11641 kfree(bp->init_data);
11642request_firmware_exit:
11643 release_firmware(bp->firmware);
11644
11645 return rc;
11646}
11647
11648
25047950 11649
a2fbb9ea
ET
11650static int __devinit bnx2x_init_one(struct pci_dev *pdev,
11651 const struct pci_device_id *ent)
11652{
11653 static int version_printed;
11654 struct net_device *dev = NULL;
11655 struct bnx2x *bp;
25047950 11656 int rc;
a2fbb9ea
ET
11657
11658 if (version_printed++ == 0)
11659 printk(KERN_INFO "%s", version);
11660
11661 /* dev zeroed in init_etherdev */
555f6c78 11662 dev = alloc_etherdev_mq(sizeof(*bp), MAX_CONTEXT);
34f80b04
EG
11663 if (!dev) {
11664 printk(KERN_ERR PFX "Cannot allocate net device\n");
a2fbb9ea 11665 return -ENOMEM;
34f80b04 11666 }
a2fbb9ea 11667
a2fbb9ea
ET
11668 bp = netdev_priv(dev);
11669 bp->msglevel = debug;
11670
34f80b04 11671 rc = bnx2x_init_dev(pdev, dev);
a2fbb9ea
ET
11672 if (rc < 0) {
11673 free_netdev(dev);
11674 return rc;
11675 }
11676
a2fbb9ea
ET
11677 pci_set_drvdata(pdev, dev);
11678
34f80b04 11679 rc = bnx2x_init_bp(bp);
693fc0d1
EG
11680 if (rc)
11681 goto init_one_exit;
11682
94a78b79
VZ
11683 /* Set init arrays */
11684 rc = bnx2x_init_firmware(bp, &pdev->dev);
11685 if (rc) {
11686 printk(KERN_ERR PFX "Error loading firmware\n");
11687 goto init_one_exit;
11688 }
11689
693fc0d1 11690 rc = register_netdev(dev);
34f80b04 11691 if (rc) {
693fc0d1 11692 dev_err(&pdev->dev, "Cannot register net device\n");
34f80b04
EG
11693 goto init_one_exit;
11694 }
11695
25047950 11696 printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
87942b46 11697 " IRQ %d, ", dev->name, board_info[ent->driver_data].name,
34f80b04 11698 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
25047950
ET
11699 bnx2x_get_pcie_width(bp),
11700 (bnx2x_get_pcie_speed(bp) == 2) ? "5GHz (Gen2)" : "2.5GHz",
11701 dev->base_addr, bp->pdev->irq);
e174961c 11702 printk(KERN_CONT "node addr %pM\n", dev->dev_addr);
c016201c 11703
a2fbb9ea 11704 return 0;
34f80b04
EG
11705
11706init_one_exit:
11707 if (bp->regview)
11708 iounmap(bp->regview);
11709
11710 if (bp->doorbells)
11711 iounmap(bp->doorbells);
11712
11713 free_netdev(dev);
11714
11715 if (atomic_read(&pdev->enable_cnt) == 1)
11716 pci_release_regions(pdev);
11717
11718 pci_disable_device(pdev);
11719 pci_set_drvdata(pdev, NULL);
11720
11721 return rc;
a2fbb9ea
ET
11722}
11723
11724static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
11725{
11726 struct net_device *dev = pci_get_drvdata(pdev);
228241eb
ET
11727 struct bnx2x *bp;
11728
11729 if (!dev) {
228241eb
ET
11730 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
11731 return;
11732 }
228241eb 11733 bp = netdev_priv(dev);
a2fbb9ea 11734
a2fbb9ea
ET
11735 unregister_netdev(dev);
11736
94a78b79
VZ
11737 kfree(bp->init_ops_offsets);
11738 kfree(bp->init_ops);
11739 kfree(bp->init_data);
11740 release_firmware(bp->firmware);
11741
a2fbb9ea
ET
11742 if (bp->regview)
11743 iounmap(bp->regview);
11744
11745 if (bp->doorbells)
11746 iounmap(bp->doorbells);
11747
11748 free_netdev(dev);
34f80b04
EG
11749
11750 if (atomic_read(&pdev->enable_cnt) == 1)
11751 pci_release_regions(pdev);
11752
a2fbb9ea
ET
11753 pci_disable_device(pdev);
11754 pci_set_drvdata(pdev, NULL);
11755}
11756
11757static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
11758{
11759 struct net_device *dev = pci_get_drvdata(pdev);
228241eb
ET
11760 struct bnx2x *bp;
11761
34f80b04
EG
11762 if (!dev) {
11763 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
11764 return -ENODEV;
11765 }
11766 bp = netdev_priv(dev);
a2fbb9ea 11767
34f80b04 11768 rtnl_lock();
a2fbb9ea 11769
34f80b04 11770 pci_save_state(pdev);
228241eb 11771
34f80b04
EG
11772 if (!netif_running(dev)) {
11773 rtnl_unlock();
11774 return 0;
11775 }
a2fbb9ea
ET
11776
11777 netif_device_detach(dev);
a2fbb9ea 11778
da5a662a 11779 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
34f80b04 11780
a2fbb9ea 11781 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
228241eb 11782
34f80b04
EG
11783 rtnl_unlock();
11784
a2fbb9ea
ET
11785 return 0;
11786}
11787
11788static int bnx2x_resume(struct pci_dev *pdev)
11789{
11790 struct net_device *dev = pci_get_drvdata(pdev);
228241eb 11791 struct bnx2x *bp;
a2fbb9ea
ET
11792 int rc;
11793
228241eb
ET
11794 if (!dev) {
11795 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
11796 return -ENODEV;
11797 }
228241eb 11798 bp = netdev_priv(dev);
a2fbb9ea 11799
34f80b04
EG
11800 rtnl_lock();
11801
228241eb 11802 pci_restore_state(pdev);
34f80b04
EG
11803
11804 if (!netif_running(dev)) {
11805 rtnl_unlock();
11806 return 0;
11807 }
11808
a2fbb9ea
ET
11809 bnx2x_set_power_state(bp, PCI_D0);
11810 netif_device_attach(dev);
11811
da5a662a 11812 rc = bnx2x_nic_load(bp, LOAD_OPEN);
a2fbb9ea 11813
34f80b04
EG
11814 rtnl_unlock();
11815
11816 return rc;
a2fbb9ea
ET
11817}
11818
f8ef6e44
YG
11819static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
11820{
11821 int i;
11822
11823 bp->state = BNX2X_STATE_ERROR;
11824
11825 bp->rx_mode = BNX2X_RX_MODE_NONE;
11826
11827 bnx2x_netif_stop(bp, 0);
11828
11829 del_timer_sync(&bp->timer);
11830 bp->stats_state = STATS_STATE_DISABLED;
11831 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
11832
11833 /* Release IRQs */
11834 bnx2x_free_irq(bp);
11835
11836 if (CHIP_IS_E1(bp)) {
11837 struct mac_configuration_cmd *config =
11838 bnx2x_sp(bp, mcast_config);
11839
8d9c5f34 11840 for (i = 0; i < config->hdr.length; i++)
f8ef6e44
YG
11841 CAM_INVALIDATE(config->config_table[i]);
11842 }
11843
11844 /* Free SKBs, SGEs, TPA pool and driver internals */
11845 bnx2x_free_skbs(bp);
555f6c78 11846 for_each_rx_queue(bp, i)
f8ef6e44 11847 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
555f6c78 11848 for_each_rx_queue(bp, i)
7cde1c8b 11849 netif_napi_del(&bnx2x_fp(bp, i, napi));
f8ef6e44
YG
11850 bnx2x_free_mem(bp);
11851
11852 bp->state = BNX2X_STATE_CLOSED;
11853
11854 netif_carrier_off(bp->dev);
11855
11856 return 0;
11857}
11858
11859static void bnx2x_eeh_recover(struct bnx2x *bp)
11860{
11861 u32 val;
11862
11863 mutex_init(&bp->port.phy_mutex);
11864
11865 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
11866 bp->link_params.shmem_base = bp->common.shmem_base;
11867 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
11868
11869 if (!bp->common.shmem_base ||
11870 (bp->common.shmem_base < 0xA0000) ||
11871 (bp->common.shmem_base >= 0xC0000)) {
11872 BNX2X_DEV_INFO("MCP not active\n");
11873 bp->flags |= NO_MCP_FLAG;
11874 return;
11875 }
11876
11877 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
11878 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
11879 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
11880 BNX2X_ERR("BAD MCP validity signature\n");
11881
11882 if (!BP_NOMCP(bp)) {
11883 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
11884 & DRV_MSG_SEQ_NUMBER_MASK);
11885 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
11886 }
11887}
11888
493adb1f
WX
11889/**
11890 * bnx2x_io_error_detected - called when PCI error is detected
11891 * @pdev: Pointer to PCI device
11892 * @state: The current pci connection state
11893 *
11894 * This function is called after a PCI bus error affecting
11895 * this device has been detected.
11896 */
11897static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
11898 pci_channel_state_t state)
11899{
11900 struct net_device *dev = pci_get_drvdata(pdev);
11901 struct bnx2x *bp = netdev_priv(dev);
11902
11903 rtnl_lock();
11904
11905 netif_device_detach(dev);
11906
07ce50e4
DN
11907 if (state == pci_channel_io_perm_failure) {
11908 rtnl_unlock();
11909 return PCI_ERS_RESULT_DISCONNECT;
11910 }
11911
493adb1f 11912 if (netif_running(dev))
f8ef6e44 11913 bnx2x_eeh_nic_unload(bp);
493adb1f
WX
11914
11915 pci_disable_device(pdev);
11916
11917 rtnl_unlock();
11918
11919 /* Request a slot reset */
11920 return PCI_ERS_RESULT_NEED_RESET;
11921}
11922
11923/**
11924 * bnx2x_io_slot_reset - called after the PCI bus has been reset
11925 * @pdev: Pointer to PCI device
11926 *
11927 * Restart the card from scratch, as if from a cold-boot.
11928 */
11929static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
11930{
11931 struct net_device *dev = pci_get_drvdata(pdev);
11932 struct bnx2x *bp = netdev_priv(dev);
11933
11934 rtnl_lock();
11935
11936 if (pci_enable_device(pdev)) {
11937 dev_err(&pdev->dev,
11938 "Cannot re-enable PCI device after reset\n");
11939 rtnl_unlock();
11940 return PCI_ERS_RESULT_DISCONNECT;
11941 }
11942
11943 pci_set_master(pdev);
11944 pci_restore_state(pdev);
11945
11946 if (netif_running(dev))
11947 bnx2x_set_power_state(bp, PCI_D0);
11948
11949 rtnl_unlock();
11950
11951 return PCI_ERS_RESULT_RECOVERED;
11952}
11953
11954/**
11955 * bnx2x_io_resume - called when traffic can start flowing again
11956 * @pdev: Pointer to PCI device
11957 *
11958 * This callback is called when the error recovery driver tells us that
11959 * its OK to resume normal operation.
11960 */
11961static void bnx2x_io_resume(struct pci_dev *pdev)
11962{
11963 struct net_device *dev = pci_get_drvdata(pdev);
11964 struct bnx2x *bp = netdev_priv(dev);
11965
11966 rtnl_lock();
11967
f8ef6e44
YG
11968 bnx2x_eeh_recover(bp);
11969
493adb1f 11970 if (netif_running(dev))
f8ef6e44 11971 bnx2x_nic_load(bp, LOAD_NORMAL);
493adb1f
WX
11972
11973 netif_device_attach(dev);
11974
11975 rtnl_unlock();
11976}
11977
11978static struct pci_error_handlers bnx2x_err_handler = {
11979 .error_detected = bnx2x_io_error_detected,
356e2385
EG
11980 .slot_reset = bnx2x_io_slot_reset,
11981 .resume = bnx2x_io_resume,
493adb1f
WX
11982};
11983
a2fbb9ea 11984static struct pci_driver bnx2x_pci_driver = {
493adb1f
WX
11985 .name = DRV_MODULE_NAME,
11986 .id_table = bnx2x_pci_tbl,
11987 .probe = bnx2x_init_one,
11988 .remove = __devexit_p(bnx2x_remove_one),
11989 .suspend = bnx2x_suspend,
11990 .resume = bnx2x_resume,
11991 .err_handler = &bnx2x_err_handler,
a2fbb9ea
ET
11992};
11993
11994static int __init bnx2x_init(void)
11995{
dd21ca6d
SG
11996 int ret;
11997
1cf167f2
EG
11998 bnx2x_wq = create_singlethread_workqueue("bnx2x");
11999 if (bnx2x_wq == NULL) {
12000 printk(KERN_ERR PFX "Cannot create workqueue\n");
12001 return -ENOMEM;
12002 }
12003
dd21ca6d
SG
12004 ret = pci_register_driver(&bnx2x_pci_driver);
12005 if (ret) {
12006 printk(KERN_ERR PFX "Cannot register driver\n");
12007 destroy_workqueue(bnx2x_wq);
12008 }
12009 return ret;
a2fbb9ea
ET
12010}
12011
12012static void __exit bnx2x_cleanup(void)
12013{
12014 pci_unregister_driver(&bnx2x_pci_driver);
1cf167f2
EG
12015
12016 destroy_workqueue(bnx2x_wq);
a2fbb9ea
ET
12017}
12018
12019module_init(bnx2x_init);
12020module_exit(bnx2x_cleanup);
12021
94a78b79 12022
This page took 0.968141 seconds and 5 git commands to generate.