bnx2x: Stop management traffic in loopback test
[deliverable/linux.git] / drivers / net / bnx2x_main.c
CommitLineData
34f80b04 1/* bnx2x_main.c: Broadcom Everest network driver.
a2fbb9ea 2 *
d05c26ce 3 * Copyright (c) 2007-2009 Broadcom Corporation
a2fbb9ea
ET
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
24e3fcef
EG
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
a2fbb9ea
ET
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath rework by Vladislav Zolotarov
c14423fe 14 * Statistics and Link management by Yitchak Gertner
a2fbb9ea
ET
15 *
16 */
17
a2fbb9ea
ET
18#include <linux/module.h>
19#include <linux/moduleparam.h>
20#include <linux/kernel.h>
21#include <linux/device.h> /* for dev_info() */
22#include <linux/timer.h>
23#include <linux/errno.h>
24#include <linux/ioport.h>
25#include <linux/slab.h>
26#include <linux/vmalloc.h>
27#include <linux/interrupt.h>
28#include <linux/pci.h>
29#include <linux/init.h>
30#include <linux/netdevice.h>
31#include <linux/etherdevice.h>
32#include <linux/skbuff.h>
33#include <linux/dma-mapping.h>
34#include <linux/bitops.h>
35#include <linux/irq.h>
36#include <linux/delay.h>
37#include <asm/byteorder.h>
38#include <linux/time.h>
39#include <linux/ethtool.h>
40#include <linux/mii.h>
0c6671b0 41#include <linux/if_vlan.h>
a2fbb9ea
ET
42#include <net/ip.h>
43#include <net/tcp.h>
44#include <net/checksum.h>
34f80b04 45#include <net/ip6_checksum.h>
a2fbb9ea
ET
46#include <linux/workqueue.h>
47#include <linux/crc32.h>
34f80b04 48#include <linux/crc32c.h>
a2fbb9ea
ET
49#include <linux/prefetch.h>
50#include <linux/zlib.h>
a2fbb9ea
ET
51#include <linux/io.h>
52
359d8b15 53
a2fbb9ea
ET
54#include "bnx2x.h"
55#include "bnx2x_init.h"
94a78b79 56#include "bnx2x_init_ops.h"
0a64ea57 57#include "bnx2x_dump.h"
a2fbb9ea 58
56ed4351
VZ
59#define DRV_MODULE_VERSION "1.48.105-1"
60#define DRV_MODULE_RELDATE "2009/04/22"
34f80b04 61#define BNX2X_BC_VER 0x040200
a2fbb9ea 62
94a78b79
VZ
63#include <linux/firmware.h>
64#include "bnx2x_fw_file_hdr.h"
65/* FW files */
66#define FW_FILE_PREFIX_E1 "bnx2x-e1-"
67#define FW_FILE_PREFIX_E1H "bnx2x-e1h-"
68
34f80b04
EG
69/* Time in jiffies before concluding the transmitter is hung */
70#define TX_TIMEOUT (5*HZ)
a2fbb9ea 71
53a10565 72static char version[] __devinitdata =
34f80b04 73 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
a2fbb9ea
ET
74 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
75
24e3fcef 76MODULE_AUTHOR("Eliezer Tamir");
e47d7e6e 77MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
a2fbb9ea
ET
78MODULE_LICENSE("GPL");
79MODULE_VERSION(DRV_MODULE_VERSION);
a2fbb9ea 80
555f6c78
EG
81static int multi_mode = 1;
82module_param(multi_mode, int, 0);
2059aba7 83MODULE_PARM_DESC(multi_mode, " Use per-CPU queues");
555f6c78 84
19680c48 85static int disable_tpa;
19680c48 86module_param(disable_tpa, int, 0);
9898f86d 87MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
8badd27a
EG
88
89static int int_mode;
90module_param(int_mode, int, 0);
91MODULE_PARM_DESC(int_mode, " Force interrupt mode (1 INT#x; 2 MSI)");
92
9898f86d 93static int poll;
a2fbb9ea 94module_param(poll, int, 0);
9898f86d 95MODULE_PARM_DESC(poll, " Use polling (for debug)");
8d5726c4
EG
96
97static int mrrs = -1;
98module_param(mrrs, int, 0);
99MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
100
9898f86d 101static int debug;
a2fbb9ea 102module_param(debug, int, 0);
9898f86d
EG
103MODULE_PARM_DESC(debug, " Default debug msglevel");
104
105static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
a2fbb9ea 106
1cf167f2 107static struct workqueue_struct *bnx2x_wq;
a2fbb9ea
ET
108
109enum bnx2x_board_type {
110 BCM57710 = 0,
34f80b04
EG
111 BCM57711 = 1,
112 BCM57711E = 2,
a2fbb9ea
ET
113};
114
34f80b04 115/* indexed by board_type, above */
53a10565 116static struct {
a2fbb9ea
ET
117 char *name;
118} board_info[] __devinitdata = {
34f80b04
EG
119 { "Broadcom NetXtreme II BCM57710 XGb" },
120 { "Broadcom NetXtreme II BCM57711 XGb" },
121 { "Broadcom NetXtreme II BCM57711E XGb" }
a2fbb9ea
ET
122};
123
34f80b04 124
a2fbb9ea
ET
125static const struct pci_device_id bnx2x_pci_tbl[] = {
126 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57710,
127 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57710 },
34f80b04
EG
128 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711,
129 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711 },
130 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711E,
131 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711E },
a2fbb9ea
ET
132 { 0 }
133};
134
135MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
136
137/****************************************************************************
138* General service functions
139****************************************************************************/
140
141/* used only at init
142 * locking is done by mcp
143 */
144static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
145{
146 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
147 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
148 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
149 PCICFG_VENDOR_ID_OFFSET);
150}
151
a2fbb9ea
ET
152static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
153{
154 u32 val;
155
156 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
157 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
158 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
159 PCICFG_VENDOR_ID_OFFSET);
160
161 return val;
162}
a2fbb9ea
ET
163
164static const u32 dmae_reg_go_c[] = {
165 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
166 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
167 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
168 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
169};
170
171/* copy command into DMAE command memory and set DMAE command go */
172static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
173 int idx)
174{
175 u32 cmd_offset;
176 int i;
177
178 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
179 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
180 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
181
ad8d3948
EG
182 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
183 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
a2fbb9ea
ET
184 }
185 REG_WR(bp, dmae_reg_go_c[idx], 1);
186}
187
ad8d3948
EG
188void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
189 u32 len32)
a2fbb9ea 190{
ad8d3948 191 struct dmae_command *dmae = &bp->init_dmae;
a2fbb9ea 192 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
ad8d3948
EG
193 int cnt = 200;
194
195 if (!bp->dmae_ready) {
196 u32 *data = bnx2x_sp(bp, wb_data[0]);
197
198 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
199 " using indirect\n", dst_addr, len32);
200 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
201 return;
202 }
203
204 mutex_lock(&bp->dmae_mutex);
a2fbb9ea
ET
205
206 memset(dmae, 0, sizeof(struct dmae_command));
207
208 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
209 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
210 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
211#ifdef __BIG_ENDIAN
212 DMAE_CMD_ENDIANITY_B_DW_SWAP |
213#else
214 DMAE_CMD_ENDIANITY_DW_SWAP |
215#endif
34f80b04
EG
216 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
217 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea
ET
218 dmae->src_addr_lo = U64_LO(dma_addr);
219 dmae->src_addr_hi = U64_HI(dma_addr);
220 dmae->dst_addr_lo = dst_addr >> 2;
221 dmae->dst_addr_hi = 0;
222 dmae->len = len32;
223 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
224 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
ad8d3948 225 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 226
c3eefaf6 227 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
a2fbb9ea
ET
228 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
229 "dst_addr [%x:%08x (%08x)]\n"
230 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
231 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
232 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, dst_addr,
233 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
ad8d3948 234 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
a2fbb9ea
ET
235 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
236 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
a2fbb9ea
ET
237
238 *wb_comp = 0;
239
34f80b04 240 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
a2fbb9ea
ET
241
242 udelay(5);
ad8d3948
EG
243
244 while (*wb_comp != DMAE_COMP_VAL) {
245 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
246
ad8d3948 247 if (!cnt) {
c3eefaf6 248 BNX2X_ERR("DMAE timeout!\n");
a2fbb9ea
ET
249 break;
250 }
ad8d3948 251 cnt--;
12469401
YG
252 /* adjust delay for emulation/FPGA */
253 if (CHIP_REV_IS_SLOW(bp))
254 msleep(100);
255 else
256 udelay(5);
a2fbb9ea 257 }
ad8d3948
EG
258
259 mutex_unlock(&bp->dmae_mutex);
a2fbb9ea
ET
260}
261
c18487ee 262void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
a2fbb9ea 263{
ad8d3948 264 struct dmae_command *dmae = &bp->init_dmae;
a2fbb9ea 265 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
ad8d3948
EG
266 int cnt = 200;
267
268 if (!bp->dmae_ready) {
269 u32 *data = bnx2x_sp(bp, wb_data[0]);
270 int i;
271
272 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
273 " using indirect\n", src_addr, len32);
274 for (i = 0; i < len32; i++)
275 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
276 return;
277 }
278
279 mutex_lock(&bp->dmae_mutex);
a2fbb9ea
ET
280
281 memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
282 memset(dmae, 0, sizeof(struct dmae_command));
283
284 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
285 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
286 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
287#ifdef __BIG_ENDIAN
288 DMAE_CMD_ENDIANITY_B_DW_SWAP |
289#else
290 DMAE_CMD_ENDIANITY_DW_SWAP |
291#endif
34f80b04
EG
292 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
293 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea
ET
294 dmae->src_addr_lo = src_addr >> 2;
295 dmae->src_addr_hi = 0;
296 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
297 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
298 dmae->len = len32;
299 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
300 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
ad8d3948 301 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 302
c3eefaf6 303 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
a2fbb9ea
ET
304 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
305 "dst_addr [%x:%08x (%08x)]\n"
306 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
307 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
308 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, src_addr,
309 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
a2fbb9ea
ET
310
311 *wb_comp = 0;
312
34f80b04 313 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
a2fbb9ea
ET
314
315 udelay(5);
ad8d3948
EG
316
317 while (*wb_comp != DMAE_COMP_VAL) {
318
ad8d3948 319 if (!cnt) {
c3eefaf6 320 BNX2X_ERR("DMAE timeout!\n");
a2fbb9ea
ET
321 break;
322 }
ad8d3948 323 cnt--;
12469401
YG
324 /* adjust delay for emulation/FPGA */
325 if (CHIP_REV_IS_SLOW(bp))
326 msleep(100);
327 else
328 udelay(5);
a2fbb9ea 329 }
ad8d3948 330 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
a2fbb9ea
ET
331 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
332 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
ad8d3948
EG
333
334 mutex_unlock(&bp->dmae_mutex);
335}
336
337/* used only for slowpath so not inlined */
338static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
339{
340 u32 wb_write[2];
341
342 wb_write[0] = val_hi;
343 wb_write[1] = val_lo;
344 REG_WR_DMAE(bp, reg, wb_write, 2);
a2fbb9ea 345}
a2fbb9ea 346
ad8d3948
EG
347#ifdef USE_WB_RD
348static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
349{
350 u32 wb_data[2];
351
352 REG_RD_DMAE(bp, reg, wb_data, 2);
353
354 return HILO_U64(wb_data[0], wb_data[1]);
355}
356#endif
357
a2fbb9ea
ET
358static int bnx2x_mc_assert(struct bnx2x *bp)
359{
a2fbb9ea 360 char last_idx;
34f80b04
EG
361 int i, rc = 0;
362 u32 row0, row1, row2, row3;
363
364 /* XSTORM */
365 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
366 XSTORM_ASSERT_LIST_INDEX_OFFSET);
367 if (last_idx)
368 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
369
370 /* print the asserts */
371 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
372
373 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
374 XSTORM_ASSERT_LIST_OFFSET(i));
375 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
376 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
377 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
378 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
379 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
380 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
381
382 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
383 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
384 " 0x%08x 0x%08x 0x%08x\n",
385 i, row3, row2, row1, row0);
386 rc++;
387 } else {
388 break;
389 }
390 }
391
392 /* TSTORM */
393 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
394 TSTORM_ASSERT_LIST_INDEX_OFFSET);
395 if (last_idx)
396 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
397
398 /* print the asserts */
399 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
400
401 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
402 TSTORM_ASSERT_LIST_OFFSET(i));
403 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
404 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
405 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
406 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
407 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
408 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
409
410 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
411 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
412 " 0x%08x 0x%08x 0x%08x\n",
413 i, row3, row2, row1, row0);
414 rc++;
415 } else {
416 break;
417 }
418 }
419
420 /* CSTORM */
421 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
422 CSTORM_ASSERT_LIST_INDEX_OFFSET);
423 if (last_idx)
424 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
425
426 /* print the asserts */
427 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
428
429 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
430 CSTORM_ASSERT_LIST_OFFSET(i));
431 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
432 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
433 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
434 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
435 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
436 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
437
438 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
439 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
440 " 0x%08x 0x%08x 0x%08x\n",
441 i, row3, row2, row1, row0);
442 rc++;
443 } else {
444 break;
445 }
446 }
447
448 /* USTORM */
449 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
450 USTORM_ASSERT_LIST_INDEX_OFFSET);
451 if (last_idx)
452 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
453
454 /* print the asserts */
455 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
456
457 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
458 USTORM_ASSERT_LIST_OFFSET(i));
459 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
460 USTORM_ASSERT_LIST_OFFSET(i) + 4);
461 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
462 USTORM_ASSERT_LIST_OFFSET(i) + 8);
463 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
464 USTORM_ASSERT_LIST_OFFSET(i) + 12);
465
466 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
467 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
468 " 0x%08x 0x%08x 0x%08x\n",
469 i, row3, row2, row1, row0);
470 rc++;
471 } else {
472 break;
a2fbb9ea
ET
473 }
474 }
34f80b04 475
a2fbb9ea
ET
476 return rc;
477}
c14423fe 478
a2fbb9ea
ET
479static void bnx2x_fw_dump(struct bnx2x *bp)
480{
481 u32 mark, offset;
4781bfad 482 __be32 data[9];
a2fbb9ea
ET
483 int word;
484
485 mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
49d66772
ET
486 mark = ((mark + 0x3) & ~0x3);
487 printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n" KERN_ERR, mark);
a2fbb9ea
ET
488
489 for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
490 for (word = 0; word < 8; word++)
491 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
492 offset + 4*word));
493 data[8] = 0x0;
49d66772 494 printk(KERN_CONT "%s", (char *)data);
a2fbb9ea
ET
495 }
496 for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
497 for (word = 0; word < 8; word++)
498 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
499 offset + 4*word));
500 data[8] = 0x0;
49d66772 501 printk(KERN_CONT "%s", (char *)data);
a2fbb9ea
ET
502 }
503 printk("\n" KERN_ERR PFX "end of fw dump\n");
504}
505
506static void bnx2x_panic_dump(struct bnx2x *bp)
507{
508 int i;
509 u16 j, start, end;
510
66e855f3
YG
511 bp->stats_state = STATS_STATE_DISABLED;
512 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
513
a2fbb9ea
ET
514 BNX2X_ERR("begin crash dump -----------------\n");
515
8440d2b6
EG
516 /* Indices */
517 /* Common */
518 BNX2X_ERR("def_c_idx(%u) def_u_idx(%u) def_x_idx(%u)"
519 " def_t_idx(%u) def_att_idx(%u) attn_state(%u)"
520 " spq_prod_idx(%u)\n",
521 bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
522 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
523
524 /* Rx */
525 for_each_rx_queue(bp, i) {
a2fbb9ea 526 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea 527
c3eefaf6 528 BNX2X_ERR("fp%d: rx_bd_prod(%x) rx_bd_cons(%x)"
66e855f3
YG
529 " *rx_bd_cons_sb(%x) rx_comp_prod(%x)"
530 " rx_comp_cons(%x) *rx_cons_sb(%x)\n",
8440d2b6 531 i, fp->rx_bd_prod, fp->rx_bd_cons,
66e855f3
YG
532 le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
533 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
c3eefaf6 534 BNX2X_ERR(" rx_sge_prod(%x) last_max_sge(%x)"
8440d2b6
EG
535 " fp_u_idx(%x) *sb_u_idx(%x)\n",
536 fp->rx_sge_prod, fp->last_max_sge,
537 le16_to_cpu(fp->fp_u_idx),
538 fp->status_blk->u_status_block.status_block_index);
539 }
a2fbb9ea 540
8440d2b6
EG
541 /* Tx */
542 for_each_tx_queue(bp, i) {
543 struct bnx2x_fastpath *fp = &bp->fp[i];
544 struct eth_tx_db_data *hw_prods = fp->hw_tx_prods;
a2fbb9ea 545
c3eefaf6 546 BNX2X_ERR("fp%d: tx_pkt_prod(%x) tx_pkt_cons(%x)"
8440d2b6
EG
547 " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)\n",
548 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
549 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
c3eefaf6 550 BNX2X_ERR(" fp_c_idx(%x) *sb_c_idx(%x)"
8440d2b6
EG
551 " bd data(%x,%x)\n", le16_to_cpu(fp->fp_c_idx),
552 fp->status_blk->c_status_block.status_block_index,
553 hw_prods->packets_prod, hw_prods->bds_prod);
554 }
a2fbb9ea 555
8440d2b6
EG
556 /* Rings */
557 /* Rx */
558 for_each_rx_queue(bp, i) {
559 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea
ET
560
561 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
562 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
8440d2b6 563 for (j = start; j != end; j = RX_BD(j + 1)) {
a2fbb9ea
ET
564 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
565 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
566
c3eefaf6
EG
567 BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
568 i, j, rx_bd[1], rx_bd[0], sw_bd->skb);
a2fbb9ea
ET
569 }
570
3196a88a
EG
571 start = RX_SGE(fp->rx_sge_prod);
572 end = RX_SGE(fp->last_max_sge);
8440d2b6 573 for (j = start; j != end; j = RX_SGE(j + 1)) {
7a9b2557
VZ
574 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
575 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
576
c3eefaf6
EG
577 BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
578 i, j, rx_sge[1], rx_sge[0], sw_page->page);
7a9b2557
VZ
579 }
580
a2fbb9ea
ET
581 start = RCQ_BD(fp->rx_comp_cons - 10);
582 end = RCQ_BD(fp->rx_comp_cons + 503);
8440d2b6 583 for (j = start; j != end; j = RCQ_BD(j + 1)) {
a2fbb9ea
ET
584 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
585
c3eefaf6
EG
586 BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
587 i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
a2fbb9ea
ET
588 }
589 }
590
8440d2b6
EG
591 /* Tx */
592 for_each_tx_queue(bp, i) {
593 struct bnx2x_fastpath *fp = &bp->fp[i];
594
595 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
596 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
597 for (j = start; j != end; j = TX_BD(j + 1)) {
598 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
599
c3eefaf6
EG
600 BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n",
601 i, j, sw_bd->skb, sw_bd->first_bd);
8440d2b6
EG
602 }
603
604 start = TX_BD(fp->tx_bd_cons - 10);
605 end = TX_BD(fp->tx_bd_cons + 254);
606 for (j = start; j != end; j = TX_BD(j + 1)) {
607 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
608
c3eefaf6
EG
609 BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n",
610 i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
8440d2b6
EG
611 }
612 }
a2fbb9ea 613
34f80b04 614 bnx2x_fw_dump(bp);
a2fbb9ea
ET
615 bnx2x_mc_assert(bp);
616 BNX2X_ERR("end crash dump -----------------\n");
a2fbb9ea
ET
617}
618
615f8fd9 619static void bnx2x_int_enable(struct bnx2x *bp)
a2fbb9ea 620{
34f80b04 621 int port = BP_PORT(bp);
a2fbb9ea
ET
622 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
623 u32 val = REG_RD(bp, addr);
624 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
8badd27a 625 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
a2fbb9ea
ET
626
627 if (msix) {
8badd27a
EG
628 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
629 HC_CONFIG_0_REG_INT_LINE_EN_0);
a2fbb9ea
ET
630 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
631 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
8badd27a
EG
632 } else if (msi) {
633 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
634 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
635 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
636 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
a2fbb9ea
ET
637 } else {
638 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
615f8fd9 639 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
a2fbb9ea
ET
640 HC_CONFIG_0_REG_INT_LINE_EN_0 |
641 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
615f8fd9 642
8badd27a
EG
643 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
644 val, port, addr);
615f8fd9
ET
645
646 REG_WR(bp, addr, val);
647
a2fbb9ea
ET
648 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
649 }
650
8badd27a
EG
651 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n",
652 val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
a2fbb9ea
ET
653
654 REG_WR(bp, addr, val);
34f80b04
EG
655
656 if (CHIP_IS_E1H(bp)) {
657 /* init leading/trailing edge */
658 if (IS_E1HMF(bp)) {
8badd27a 659 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
34f80b04 660 if (bp->port.pmf)
4acac6a5
EG
661 /* enable nig and gpio3 attention */
662 val |= 0x1100;
34f80b04
EG
663 } else
664 val = 0xffff;
665
666 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
667 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
668 }
a2fbb9ea
ET
669}
670
615f8fd9 671static void bnx2x_int_disable(struct bnx2x *bp)
a2fbb9ea 672{
34f80b04 673 int port = BP_PORT(bp);
a2fbb9ea
ET
674 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
675 u32 val = REG_RD(bp, addr);
676
677 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
678 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
679 HC_CONFIG_0_REG_INT_LINE_EN_0 |
680 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
681
682 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
683 val, port, addr);
684
8badd27a
EG
685 /* flush all outstanding writes */
686 mmiowb();
687
a2fbb9ea
ET
688 REG_WR(bp, addr, val);
689 if (REG_RD(bp, addr) != val)
690 BNX2X_ERR("BUG! proper val not read from IGU!\n");
356e2385 691
a2fbb9ea
ET
692}
693
f8ef6e44 694static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
a2fbb9ea 695{
a2fbb9ea 696 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
8badd27a 697 int i, offset;
a2fbb9ea 698
34f80b04 699 /* disable interrupt handling */
a2fbb9ea 700 atomic_inc(&bp->intr_sem);
f8ef6e44
YG
701 if (disable_hw)
702 /* prevent the HW from sending interrupts */
703 bnx2x_int_disable(bp);
a2fbb9ea
ET
704
705 /* make sure all ISRs are done */
706 if (msix) {
8badd27a
EG
707 synchronize_irq(bp->msix_table[0].vector);
708 offset = 1;
a2fbb9ea 709 for_each_queue(bp, i)
8badd27a 710 synchronize_irq(bp->msix_table[i + offset].vector);
a2fbb9ea
ET
711 } else
712 synchronize_irq(bp->pdev->irq);
713
714 /* make sure sp_task is not running */
1cf167f2
EG
715 cancel_delayed_work(&bp->sp_task);
716 flush_workqueue(bnx2x_wq);
a2fbb9ea
ET
717}
718
34f80b04 719/* fast path */
a2fbb9ea
ET
720
721/*
34f80b04 722 * General service functions
a2fbb9ea
ET
723 */
724
34f80b04 725static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
a2fbb9ea
ET
726 u8 storm, u16 index, u8 op, u8 update)
727{
5c862848
EG
728 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
729 COMMAND_REG_INT_ACK);
a2fbb9ea
ET
730 struct igu_ack_register igu_ack;
731
732 igu_ack.status_block_index = index;
733 igu_ack.sb_id_and_flags =
34f80b04 734 ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
a2fbb9ea
ET
735 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
736 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
737 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
738
5c862848
EG
739 DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
740 (*(u32 *)&igu_ack), hc_addr);
741 REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
a2fbb9ea
ET
742}
743
744static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
745{
746 struct host_status_block *fpsb = fp->status_blk;
747 u16 rc = 0;
748
749 barrier(); /* status block is written to by the chip */
750 if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
751 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
752 rc |= 1;
753 }
754 if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
755 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
756 rc |= 2;
757 }
758 return rc;
759}
760
a2fbb9ea
ET
761static u16 bnx2x_ack_int(struct bnx2x *bp)
762{
5c862848
EG
763 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
764 COMMAND_REG_SIMD_MASK);
765 u32 result = REG_RD(bp, hc_addr);
a2fbb9ea 766
5c862848
EG
767 DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
768 result, hc_addr);
a2fbb9ea 769
a2fbb9ea
ET
770 return result;
771}
772
773
774/*
775 * fast path service functions
776 */
777
237907c1
EG
778static inline int bnx2x_has_tx_work(struct bnx2x_fastpath *fp)
779{
780 u16 tx_cons_sb;
781
782 /* Tell compiler that status block fields can change */
783 barrier();
784 tx_cons_sb = le16_to_cpu(*fp->tx_cons_sb);
e8b5fc51
VZ
785 return (fp->tx_pkt_cons != tx_cons_sb);
786}
787
788static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp)
789{
790 /* Tell compiler that consumer and producer can change */
791 barrier();
792 return (fp->tx_pkt_prod != fp->tx_pkt_cons);
237907c1
EG
793}
794
a2fbb9ea
ET
795/* free skb in the packet ring at pos idx
796 * return idx of last bd freed
797 */
798static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
799 u16 idx)
800{
801 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
802 struct eth_tx_bd *tx_bd;
803 struct sk_buff *skb = tx_buf->skb;
34f80b04 804 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
a2fbb9ea
ET
805 int nbd;
806
807 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
808 idx, tx_buf, skb);
809
810 /* unmap first bd */
811 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
812 tx_bd = &fp->tx_desc_ring[bd_idx];
813 pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_bd),
814 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
815
816 nbd = le16_to_cpu(tx_bd->nbd) - 1;
34f80b04 817 new_cons = nbd + tx_buf->first_bd;
a2fbb9ea
ET
818#ifdef BNX2X_STOP_ON_ERROR
819 if (nbd > (MAX_SKB_FRAGS + 2)) {
34f80b04 820 BNX2X_ERR("BAD nbd!\n");
a2fbb9ea
ET
821 bnx2x_panic();
822 }
823#endif
824
825 /* Skip a parse bd and the TSO split header bd
826 since they have no mapping */
827 if (nbd)
828 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
829
830 if (tx_bd->bd_flags.as_bitfield & (ETH_TX_BD_FLAGS_IP_CSUM |
831 ETH_TX_BD_FLAGS_TCP_CSUM |
832 ETH_TX_BD_FLAGS_SW_LSO)) {
833 if (--nbd)
834 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
835 tx_bd = &fp->tx_desc_ring[bd_idx];
836 /* is this a TSO split header bd? */
837 if (tx_bd->bd_flags.as_bitfield & ETH_TX_BD_FLAGS_SW_LSO) {
838 if (--nbd)
839 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
840 }
841 }
842
843 /* now free frags */
844 while (nbd > 0) {
845
846 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
847 tx_bd = &fp->tx_desc_ring[bd_idx];
848 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_bd),
849 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
850 if (--nbd)
851 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
852 }
853
854 /* release skb */
53e5e96e 855 WARN_ON(!skb);
a2fbb9ea
ET
856 dev_kfree_skb(skb);
857 tx_buf->first_bd = 0;
858 tx_buf->skb = NULL;
859
34f80b04 860 return new_cons;
a2fbb9ea
ET
861}
862
34f80b04 863static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
a2fbb9ea 864{
34f80b04
EG
865 s16 used;
866 u16 prod;
867 u16 cons;
a2fbb9ea 868
34f80b04 869 barrier(); /* Tell compiler that prod and cons can change */
a2fbb9ea
ET
870 prod = fp->tx_bd_prod;
871 cons = fp->tx_bd_cons;
872
34f80b04
EG
873 /* NUM_TX_RINGS = number of "next-page" entries
874 It will be used as a threshold */
875 used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
a2fbb9ea 876
34f80b04 877#ifdef BNX2X_STOP_ON_ERROR
53e5e96e
IJ
878 WARN_ON(used < 0);
879 WARN_ON(used > fp->bp->tx_ring_size);
880 WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
34f80b04 881#endif
a2fbb9ea 882
34f80b04 883 return (s16)(fp->bp->tx_ring_size) - used;
a2fbb9ea
ET
884}
885
7961f791 886static void bnx2x_tx_int(struct bnx2x_fastpath *fp)
a2fbb9ea
ET
887{
888 struct bnx2x *bp = fp->bp;
555f6c78 889 struct netdev_queue *txq;
a2fbb9ea
ET
890 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
891 int done = 0;
892
893#ifdef BNX2X_STOP_ON_ERROR
894 if (unlikely(bp->panic))
895 return;
896#endif
897
555f6c78 898 txq = netdev_get_tx_queue(bp->dev, fp->index);
a2fbb9ea
ET
899 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
900 sw_cons = fp->tx_pkt_cons;
901
902 while (sw_cons != hw_cons) {
903 u16 pkt_cons;
904
905 pkt_cons = TX_BD(sw_cons);
906
907 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
908
34f80b04 909 DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %u\n",
a2fbb9ea
ET
910 hw_cons, sw_cons, pkt_cons);
911
34f80b04 912/* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
a2fbb9ea
ET
913 rmb();
914 prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
915 }
916*/
917 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
918 sw_cons++;
919 done++;
a2fbb9ea
ET
920 }
921
922 fp->tx_pkt_cons = sw_cons;
923 fp->tx_bd_cons = bd_cons;
924
a2fbb9ea 925 /* TBD need a thresh? */
555f6c78 926 if (unlikely(netif_tx_queue_stopped(txq))) {
a2fbb9ea 927
555f6c78 928 __netif_tx_lock(txq, smp_processor_id());
a2fbb9ea 929
6044735d
EG
930 /* Need to make the tx_bd_cons update visible to start_xmit()
931 * before checking for netif_tx_queue_stopped(). Without the
932 * memory barrier, there is a small possibility that
933 * start_xmit() will miss it and cause the queue to be stopped
934 * forever.
935 */
936 smp_mb();
937
555f6c78 938 if ((netif_tx_queue_stopped(txq)) &&
da5a662a 939 (bp->state == BNX2X_STATE_OPEN) &&
a2fbb9ea 940 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
555f6c78 941 netif_tx_wake_queue(txq);
a2fbb9ea 942
555f6c78 943 __netif_tx_unlock(txq);
a2fbb9ea
ET
944 }
945}
946
3196a88a 947
a2fbb9ea
ET
948static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
949 union eth_rx_cqe *rr_cqe)
950{
951 struct bnx2x *bp = fp->bp;
952 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
953 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
954
34f80b04 955 DP(BNX2X_MSG_SP,
a2fbb9ea 956 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
0626b899 957 fp->index, cid, command, bp->state,
34f80b04 958 rr_cqe->ramrod_cqe.ramrod_type);
a2fbb9ea
ET
959
960 bp->spq_left++;
961
0626b899 962 if (fp->index) {
a2fbb9ea
ET
963 switch (command | fp->state) {
964 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
965 BNX2X_FP_STATE_OPENING):
966 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
967 cid);
968 fp->state = BNX2X_FP_STATE_OPEN;
969 break;
970
971 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
972 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
973 cid);
974 fp->state = BNX2X_FP_STATE_HALTED;
975 break;
976
977 default:
34f80b04
EG
978 BNX2X_ERR("unexpected MC reply (%d) "
979 "fp->state is %x\n", command, fp->state);
980 break;
a2fbb9ea 981 }
34f80b04 982 mb(); /* force bnx2x_wait_ramrod() to see the change */
a2fbb9ea
ET
983 return;
984 }
c14423fe 985
a2fbb9ea
ET
986 switch (command | bp->state) {
987 case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
988 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
989 bp->state = BNX2X_STATE_OPEN;
990 break;
991
992 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
993 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
994 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
995 fp->state = BNX2X_FP_STATE_HALTED;
996 break;
997
a2fbb9ea 998 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
34f80b04 999 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
49d66772 1000 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
a2fbb9ea
ET
1001 break;
1002
3196a88a 1003
a2fbb9ea 1004 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
34f80b04 1005 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
a2fbb9ea 1006 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
bb2a0f7a 1007 bp->set_mac_pending = 0;
a2fbb9ea
ET
1008 break;
1009
49d66772 1010 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
34f80b04 1011 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
49d66772
ET
1012 break;
1013
a2fbb9ea 1014 default:
34f80b04 1015 BNX2X_ERR("unexpected MC reply (%d) bp->state is %x\n",
a2fbb9ea 1016 command, bp->state);
34f80b04 1017 break;
a2fbb9ea 1018 }
34f80b04 1019 mb(); /* force bnx2x_wait_ramrod() to see the change */
a2fbb9ea
ET
1020}
1021
7a9b2557
VZ
1022static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
1023 struct bnx2x_fastpath *fp, u16 index)
1024{
1025 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1026 struct page *page = sw_buf->page;
1027 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1028
1029 /* Skip "next page" elements */
1030 if (!page)
1031 return;
1032
1033 pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
4f40f2cb 1034 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
1035 __free_pages(page, PAGES_PER_SGE_SHIFT);
1036
1037 sw_buf->page = NULL;
1038 sge->addr_hi = 0;
1039 sge->addr_lo = 0;
1040}
1041
1042static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
1043 struct bnx2x_fastpath *fp, int last)
1044{
1045 int i;
1046
1047 for (i = 0; i < last; i++)
1048 bnx2x_free_rx_sge(bp, fp, i);
1049}
1050
1051static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
1052 struct bnx2x_fastpath *fp, u16 index)
1053{
1054 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
1055 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1056 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1057 dma_addr_t mapping;
1058
1059 if (unlikely(page == NULL))
1060 return -ENOMEM;
1061
4f40f2cb 1062 mapping = pci_map_page(bp->pdev, page, 0, SGE_PAGE_SIZE*PAGES_PER_SGE,
7a9b2557 1063 PCI_DMA_FROMDEVICE);
8d8bb39b 1064 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
7a9b2557
VZ
1065 __free_pages(page, PAGES_PER_SGE_SHIFT);
1066 return -ENOMEM;
1067 }
1068
1069 sw_buf->page = page;
1070 pci_unmap_addr_set(sw_buf, mapping, mapping);
1071
1072 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1073 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1074
1075 return 0;
1076}
1077
a2fbb9ea
ET
1078static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1079 struct bnx2x_fastpath *fp, u16 index)
1080{
1081 struct sk_buff *skb;
1082 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1083 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1084 dma_addr_t mapping;
1085
1086 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1087 if (unlikely(skb == NULL))
1088 return -ENOMEM;
1089
437cf2f1 1090 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_size,
a2fbb9ea 1091 PCI_DMA_FROMDEVICE);
8d8bb39b 1092 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
a2fbb9ea
ET
1093 dev_kfree_skb(skb);
1094 return -ENOMEM;
1095 }
1096
1097 rx_buf->skb = skb;
1098 pci_unmap_addr_set(rx_buf, mapping, mapping);
1099
1100 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1101 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1102
1103 return 0;
1104}
1105
1106/* note that we are not allocating a new skb,
1107 * we are just moving one from cons to prod
1108 * we are not creating a new mapping,
1109 * so there is no need to check for dma_mapping_error().
1110 */
1111static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1112 struct sk_buff *skb, u16 cons, u16 prod)
1113{
1114 struct bnx2x *bp = fp->bp;
1115 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1116 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1117 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1118 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1119
1120 pci_dma_sync_single_for_device(bp->pdev,
1121 pci_unmap_addr(cons_rx_buf, mapping),
87942b46 1122 RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
a2fbb9ea
ET
1123
1124 prod_rx_buf->skb = cons_rx_buf->skb;
1125 pci_unmap_addr_set(prod_rx_buf, mapping,
1126 pci_unmap_addr(cons_rx_buf, mapping));
1127 *prod_bd = *cons_bd;
1128}
1129
7a9b2557
VZ
1130static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1131 u16 idx)
1132{
1133 u16 last_max = fp->last_max_sge;
1134
1135 if (SUB_S16(idx, last_max) > 0)
1136 fp->last_max_sge = idx;
1137}
1138
1139static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1140{
1141 int i, j;
1142
1143 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1144 int idx = RX_SGE_CNT * i - 1;
1145
1146 for (j = 0; j < 2; j++) {
1147 SGE_MASK_CLEAR_BIT(fp, idx);
1148 idx--;
1149 }
1150 }
1151}
1152
1153static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1154 struct eth_fast_path_rx_cqe *fp_cqe)
1155{
1156 struct bnx2x *bp = fp->bp;
4f40f2cb 1157 u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
7a9b2557 1158 le16_to_cpu(fp_cqe->len_on_bd)) >>
4f40f2cb 1159 SGE_PAGE_SHIFT;
7a9b2557
VZ
1160 u16 last_max, last_elem, first_elem;
1161 u16 delta = 0;
1162 u16 i;
1163
1164 if (!sge_len)
1165 return;
1166
1167 /* First mark all used pages */
1168 for (i = 0; i < sge_len; i++)
1169 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1170
1171 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1172 sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1173
1174 /* Here we assume that the last SGE index is the biggest */
1175 prefetch((void *)(fp->sge_mask));
1176 bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1177
1178 last_max = RX_SGE(fp->last_max_sge);
1179 last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1180 first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1181
1182 /* If ring is not full */
1183 if (last_elem + 1 != first_elem)
1184 last_elem++;
1185
1186 /* Now update the prod */
1187 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1188 if (likely(fp->sge_mask[i]))
1189 break;
1190
1191 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1192 delta += RX_SGE_MASK_ELEM_SZ;
1193 }
1194
1195 if (delta > 0) {
1196 fp->rx_sge_prod += delta;
1197 /* clear page-end entries */
1198 bnx2x_clear_sge_mask_next_elems(fp);
1199 }
1200
1201 DP(NETIF_MSG_RX_STATUS,
1202 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
1203 fp->last_max_sge, fp->rx_sge_prod);
1204}
1205
1206static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1207{
1208 /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1209 memset(fp->sge_mask, 0xff,
1210 (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1211
33471629
EG
1212 /* Clear the two last indices in the page to 1:
1213 these are the indices that correspond to the "next" element,
7a9b2557
VZ
1214 hence will never be indicated and should be removed from
1215 the calculations. */
1216 bnx2x_clear_sge_mask_next_elems(fp);
1217}
1218
1219static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1220 struct sk_buff *skb, u16 cons, u16 prod)
1221{
1222 struct bnx2x *bp = fp->bp;
1223 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1224 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1225 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1226 dma_addr_t mapping;
1227
1228 /* move empty skb from pool to prod and map it */
1229 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1230 mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
437cf2f1 1231 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
1232 pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1233
1234 /* move partial skb from cons to pool (don't unmap yet) */
1235 fp->tpa_pool[queue] = *cons_rx_buf;
1236
1237 /* mark bin state as start - print error if current state != stop */
1238 if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1239 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1240
1241 fp->tpa_state[queue] = BNX2X_TPA_START;
1242
1243 /* point prod_bd to new skb */
1244 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1245 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1246
1247#ifdef BNX2X_STOP_ON_ERROR
1248 fp->tpa_queue_used |= (1 << queue);
1249#ifdef __powerpc64__
1250 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1251#else
1252 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1253#endif
1254 fp->tpa_queue_used);
1255#endif
1256}
1257
1258static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1259 struct sk_buff *skb,
1260 struct eth_fast_path_rx_cqe *fp_cqe,
1261 u16 cqe_idx)
1262{
1263 struct sw_rx_page *rx_pg, old_rx_pg;
7a9b2557
VZ
1264 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1265 u32 i, frag_len, frag_size, pages;
1266 int err;
1267 int j;
1268
1269 frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
4f40f2cb 1270 pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
7a9b2557
VZ
1271
1272 /* This is needed in order to enable forwarding support */
1273 if (frag_size)
4f40f2cb 1274 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
7a9b2557
VZ
1275 max(frag_size, (u32)len_on_bd));
1276
1277#ifdef BNX2X_STOP_ON_ERROR
4f40f2cb
EG
1278 if (pages >
1279 min((u32)8, (u32)MAX_SKB_FRAGS) * SGE_PAGE_SIZE * PAGES_PER_SGE) {
7a9b2557
VZ
1280 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1281 pages, cqe_idx);
1282 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
1283 fp_cqe->pkt_len, len_on_bd);
1284 bnx2x_panic();
1285 return -EINVAL;
1286 }
1287#endif
1288
1289 /* Run through the SGL and compose the fragmented skb */
1290 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1291 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1292
1293 /* FW gives the indices of the SGE as if the ring is an array
1294 (meaning that "next" element will consume 2 indices) */
4f40f2cb 1295 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
7a9b2557 1296 rx_pg = &fp->rx_page_ring[sge_idx];
7a9b2557
VZ
1297 old_rx_pg = *rx_pg;
1298
1299 /* If we fail to allocate a substitute page, we simply stop
1300 where we are and drop the whole packet */
1301 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1302 if (unlikely(err)) {
de832a55 1303 fp->eth_q_stats.rx_skb_alloc_failed++;
7a9b2557
VZ
1304 return err;
1305 }
1306
1307 /* Unmap the page as we r going to pass it to the stack */
1308 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
4f40f2cb 1309 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
1310
1311 /* Add one frag and update the appropriate fields in the skb */
1312 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1313
1314 skb->data_len += frag_len;
1315 skb->truesize += frag_len;
1316 skb->len += frag_len;
1317
1318 frag_size -= frag_len;
1319 }
1320
1321 return 0;
1322}
1323
1324static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1325 u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1326 u16 cqe_idx)
1327{
1328 struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1329 struct sk_buff *skb = rx_buf->skb;
1330 /* alloc new skb */
1331 struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1332
1333 /* Unmap skb in the pool anyway, as we are going to change
1334 pool entry status to BNX2X_TPA_STOP even if new skb allocation
1335 fails. */
1336 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
437cf2f1 1337 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
7a9b2557 1338
7a9b2557 1339 if (likely(new_skb)) {
66e855f3
YG
1340 /* fix ip xsum and give it to the stack */
1341 /* (no need to map the new skb) */
0c6671b0
EG
1342#ifdef BCM_VLAN
1343 int is_vlan_cqe =
1344 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1345 PARSING_FLAGS_VLAN);
1346 int is_not_hwaccel_vlan_cqe =
1347 (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
1348#endif
7a9b2557
VZ
1349
1350 prefetch(skb);
1351 prefetch(((char *)(skb)) + 128);
1352
7a9b2557
VZ
1353#ifdef BNX2X_STOP_ON_ERROR
1354 if (pad + len > bp->rx_buf_size) {
1355 BNX2X_ERR("skb_put is about to fail... "
1356 "pad %d len %d rx_buf_size %d\n",
1357 pad, len, bp->rx_buf_size);
1358 bnx2x_panic();
1359 return;
1360 }
1361#endif
1362
1363 skb_reserve(skb, pad);
1364 skb_put(skb, len);
1365
1366 skb->protocol = eth_type_trans(skb, bp->dev);
1367 skb->ip_summed = CHECKSUM_UNNECESSARY;
1368
1369 {
1370 struct iphdr *iph;
1371
1372 iph = (struct iphdr *)skb->data;
0c6671b0
EG
1373#ifdef BCM_VLAN
1374 /* If there is no Rx VLAN offloading -
1375 take VLAN tag into an account */
1376 if (unlikely(is_not_hwaccel_vlan_cqe))
1377 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
1378#endif
7a9b2557
VZ
1379 iph->check = 0;
1380 iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1381 }
1382
1383 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1384 &cqe->fast_path_cqe, cqe_idx)) {
1385#ifdef BCM_VLAN
0c6671b0
EG
1386 if ((bp->vlgrp != NULL) && is_vlan_cqe &&
1387 (!is_not_hwaccel_vlan_cqe))
7a9b2557
VZ
1388 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1389 le16_to_cpu(cqe->fast_path_cqe.
1390 vlan_tag));
1391 else
1392#endif
1393 netif_receive_skb(skb);
1394 } else {
1395 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1396 " - dropping packet!\n");
1397 dev_kfree_skb(skb);
1398 }
1399
7a9b2557
VZ
1400
1401 /* put new skb in bin */
1402 fp->tpa_pool[queue].skb = new_skb;
1403
1404 } else {
66e855f3 1405 /* else drop the packet and keep the buffer in the bin */
7a9b2557
VZ
1406 DP(NETIF_MSG_RX_STATUS,
1407 "Failed to allocate new skb - dropping packet!\n");
de832a55 1408 fp->eth_q_stats.rx_skb_alloc_failed++;
7a9b2557
VZ
1409 }
1410
1411 fp->tpa_state[queue] = BNX2X_TPA_STOP;
1412}
1413
1414static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1415 struct bnx2x_fastpath *fp,
1416 u16 bd_prod, u16 rx_comp_prod,
1417 u16 rx_sge_prod)
1418{
8d9c5f34 1419 struct ustorm_eth_rx_producers rx_prods = {0};
7a9b2557
VZ
1420 int i;
1421
1422 /* Update producers */
1423 rx_prods.bd_prod = bd_prod;
1424 rx_prods.cqe_prod = rx_comp_prod;
1425 rx_prods.sge_prod = rx_sge_prod;
1426
58f4c4cf
EG
1427 /*
1428 * Make sure that the BD and SGE data is updated before updating the
1429 * producers since FW might read the BD/SGE right after the producer
1430 * is updated.
1431 * This is only applicable for weak-ordered memory model archs such
1432 * as IA-64. The following barrier is also mandatory since FW will
1433 * assumes BDs must have buffers.
1434 */
1435 wmb();
1436
8d9c5f34
EG
1437 for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++)
1438 REG_WR(bp, BAR_USTRORM_INTMEM +
0626b899 1439 USTORM_RX_PRODS_OFFSET(BP_PORT(bp), fp->cl_id) + i*4,
7a9b2557
VZ
1440 ((u32 *)&rx_prods)[i]);
1441
58f4c4cf
EG
1442 mmiowb(); /* keep prod updates ordered */
1443
7a9b2557 1444 DP(NETIF_MSG_RX_STATUS,
555f6c78
EG
1445 "queue[%d]: wrote bd_prod %u cqe_prod %u sge_prod %u\n",
1446 fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
7a9b2557
VZ
1447}
1448
a2fbb9ea
ET
1449static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1450{
1451 struct bnx2x *bp = fp->bp;
34f80b04 1452 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
a2fbb9ea
ET
1453 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1454 int rx_pkt = 0;
1455
1456#ifdef BNX2X_STOP_ON_ERROR
1457 if (unlikely(bp->panic))
1458 return 0;
1459#endif
1460
34f80b04
EG
1461 /* CQ "next element" is of the size of the regular element,
1462 that's why it's ok here */
a2fbb9ea
ET
1463 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1464 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1465 hw_comp_cons++;
1466
1467 bd_cons = fp->rx_bd_cons;
1468 bd_prod = fp->rx_bd_prod;
34f80b04 1469 bd_prod_fw = bd_prod;
a2fbb9ea
ET
1470 sw_comp_cons = fp->rx_comp_cons;
1471 sw_comp_prod = fp->rx_comp_prod;
1472
1473 /* Memory barrier necessary as speculative reads of the rx
1474 * buffer can be ahead of the index in the status block
1475 */
1476 rmb();
1477
1478 DP(NETIF_MSG_RX_STATUS,
1479 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
0626b899 1480 fp->index, hw_comp_cons, sw_comp_cons);
a2fbb9ea
ET
1481
1482 while (sw_comp_cons != hw_comp_cons) {
34f80b04 1483 struct sw_rx_bd *rx_buf = NULL;
a2fbb9ea
ET
1484 struct sk_buff *skb;
1485 union eth_rx_cqe *cqe;
34f80b04
EG
1486 u8 cqe_fp_flags;
1487 u16 len, pad;
a2fbb9ea
ET
1488
1489 comp_ring_cons = RCQ_BD(sw_comp_cons);
1490 bd_prod = RX_BD(bd_prod);
1491 bd_cons = RX_BD(bd_cons);
1492
1493 cqe = &fp->rx_comp_ring[comp_ring_cons];
34f80b04 1494 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
a2fbb9ea 1495
a2fbb9ea 1496 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
34f80b04
EG
1497 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
1498 cqe_fp_flags, cqe->fast_path_cqe.status_flags,
68d59484 1499 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
34f80b04
EG
1500 le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1501 le16_to_cpu(cqe->fast_path_cqe.pkt_len));
a2fbb9ea
ET
1502
1503 /* is this a slowpath msg? */
34f80b04 1504 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
a2fbb9ea
ET
1505 bnx2x_sp_event(fp, cqe);
1506 goto next_cqe;
1507
1508 /* this is an rx packet */
1509 } else {
1510 rx_buf = &fp->rx_buf_ring[bd_cons];
1511 skb = rx_buf->skb;
a2fbb9ea
ET
1512 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1513 pad = cqe->fast_path_cqe.placement_offset;
1514
7a9b2557
VZ
1515 /* If CQE is marked both TPA_START and TPA_END
1516 it is a non-TPA CQE */
1517 if ((!fp->disable_tpa) &&
1518 (TPA_TYPE(cqe_fp_flags) !=
1519 (TPA_TYPE_START | TPA_TYPE_END))) {
3196a88a 1520 u16 queue = cqe->fast_path_cqe.queue_index;
7a9b2557
VZ
1521
1522 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1523 DP(NETIF_MSG_RX_STATUS,
1524 "calling tpa_start on queue %d\n",
1525 queue);
1526
1527 bnx2x_tpa_start(fp, queue, skb,
1528 bd_cons, bd_prod);
1529 goto next_rx;
1530 }
1531
1532 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1533 DP(NETIF_MSG_RX_STATUS,
1534 "calling tpa_stop on queue %d\n",
1535 queue);
1536
1537 if (!BNX2X_RX_SUM_FIX(cqe))
1538 BNX2X_ERR("STOP on none TCP "
1539 "data\n");
1540
1541 /* This is a size of the linear data
1542 on this skb */
1543 len = le16_to_cpu(cqe->fast_path_cqe.
1544 len_on_bd);
1545 bnx2x_tpa_stop(bp, fp, queue, pad,
1546 len, cqe, comp_ring_cons);
1547#ifdef BNX2X_STOP_ON_ERROR
1548 if (bp->panic)
17cb4006 1549 return 0;
7a9b2557
VZ
1550#endif
1551
1552 bnx2x_update_sge_prod(fp,
1553 &cqe->fast_path_cqe);
1554 goto next_cqe;
1555 }
1556 }
1557
a2fbb9ea
ET
1558 pci_dma_sync_single_for_device(bp->pdev,
1559 pci_unmap_addr(rx_buf, mapping),
1560 pad + RX_COPY_THRESH,
1561 PCI_DMA_FROMDEVICE);
1562 prefetch(skb);
1563 prefetch(((char *)(skb)) + 128);
1564
1565 /* is this an error packet? */
34f80b04 1566 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
a2fbb9ea 1567 DP(NETIF_MSG_RX_ERR,
34f80b04
EG
1568 "ERROR flags %x rx packet %u\n",
1569 cqe_fp_flags, sw_comp_cons);
de832a55 1570 fp->eth_q_stats.rx_err_discard_pkt++;
a2fbb9ea
ET
1571 goto reuse_rx;
1572 }
1573
1574 /* Since we don't have a jumbo ring
1575 * copy small packets if mtu > 1500
1576 */
1577 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1578 (len <= RX_COPY_THRESH)) {
1579 struct sk_buff *new_skb;
1580
1581 new_skb = netdev_alloc_skb(bp->dev,
1582 len + pad);
1583 if (new_skb == NULL) {
1584 DP(NETIF_MSG_RX_ERR,
34f80b04 1585 "ERROR packet dropped "
a2fbb9ea 1586 "because of alloc failure\n");
de832a55 1587 fp->eth_q_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
1588 goto reuse_rx;
1589 }
1590
1591 /* aligned copy */
1592 skb_copy_from_linear_data_offset(skb, pad,
1593 new_skb->data + pad, len);
1594 skb_reserve(new_skb, pad);
1595 skb_put(new_skb, len);
1596
1597 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1598
1599 skb = new_skb;
1600
1601 } else if (bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0) {
1602 pci_unmap_single(bp->pdev,
1603 pci_unmap_addr(rx_buf, mapping),
437cf2f1 1604 bp->rx_buf_size,
a2fbb9ea
ET
1605 PCI_DMA_FROMDEVICE);
1606 skb_reserve(skb, pad);
1607 skb_put(skb, len);
1608
1609 } else {
1610 DP(NETIF_MSG_RX_ERR,
34f80b04 1611 "ERROR packet dropped because "
a2fbb9ea 1612 "of alloc failure\n");
de832a55 1613 fp->eth_q_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
1614reuse_rx:
1615 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1616 goto next_rx;
1617 }
1618
1619 skb->protocol = eth_type_trans(skb, bp->dev);
1620
1621 skb->ip_summed = CHECKSUM_NONE;
66e855f3 1622 if (bp->rx_csum) {
1adcd8be
EG
1623 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1624 skb->ip_summed = CHECKSUM_UNNECESSARY;
66e855f3 1625 else
de832a55 1626 fp->eth_q_stats.hw_csum_err++;
66e855f3 1627 }
a2fbb9ea
ET
1628 }
1629
748e5439 1630 skb_record_rx_queue(skb, fp->index);
a2fbb9ea 1631#ifdef BCM_VLAN
0c6671b0 1632 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
34f80b04
EG
1633 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1634 PARSING_FLAGS_VLAN))
a2fbb9ea
ET
1635 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1636 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1637 else
1638#endif
34f80b04 1639 netif_receive_skb(skb);
a2fbb9ea 1640
a2fbb9ea
ET
1641
1642next_rx:
1643 rx_buf->skb = NULL;
1644
1645 bd_cons = NEXT_RX_IDX(bd_cons);
1646 bd_prod = NEXT_RX_IDX(bd_prod);
34f80b04
EG
1647 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1648 rx_pkt++;
a2fbb9ea
ET
1649next_cqe:
1650 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1651 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
a2fbb9ea 1652
34f80b04 1653 if (rx_pkt == budget)
a2fbb9ea
ET
1654 break;
1655 } /* while */
1656
1657 fp->rx_bd_cons = bd_cons;
34f80b04 1658 fp->rx_bd_prod = bd_prod_fw;
a2fbb9ea
ET
1659 fp->rx_comp_cons = sw_comp_cons;
1660 fp->rx_comp_prod = sw_comp_prod;
1661
7a9b2557
VZ
1662 /* Update producers */
1663 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1664 fp->rx_sge_prod);
a2fbb9ea
ET
1665
1666 fp->rx_pkt += rx_pkt;
1667 fp->rx_calls++;
1668
1669 return rx_pkt;
1670}
1671
1672static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1673{
1674 struct bnx2x_fastpath *fp = fp_cookie;
1675 struct bnx2x *bp = fp->bp;
0626b899 1676 int index = fp->index;
a2fbb9ea 1677
da5a662a
VZ
1678 /* Return here if interrupt is disabled */
1679 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1680 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1681 return IRQ_HANDLED;
1682 }
1683
34f80b04 1684 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
0626b899
EG
1685 index, fp->sb_id);
1686 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
a2fbb9ea
ET
1687
1688#ifdef BNX2X_STOP_ON_ERROR
1689 if (unlikely(bp->panic))
1690 return IRQ_HANDLED;
1691#endif
1692
1693 prefetch(fp->rx_cons_sb);
1694 prefetch(fp->tx_cons_sb);
1695 prefetch(&fp->status_blk->c_status_block.status_block_index);
1696 prefetch(&fp->status_blk->u_status_block.status_block_index);
1697
288379f0 1698 napi_schedule(&bnx2x_fp(bp, index, napi));
34f80b04 1699
a2fbb9ea
ET
1700 return IRQ_HANDLED;
1701}
1702
1703static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1704{
555f6c78 1705 struct bnx2x *bp = netdev_priv(dev_instance);
a2fbb9ea 1706 u16 status = bnx2x_ack_int(bp);
34f80b04 1707 u16 mask;
a2fbb9ea 1708
34f80b04 1709 /* Return here if interrupt is shared and it's not for us */
a2fbb9ea
ET
1710 if (unlikely(status == 0)) {
1711 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1712 return IRQ_NONE;
1713 }
f5372251 1714 DP(NETIF_MSG_INTR, "got an interrupt status 0x%x\n", status);
a2fbb9ea 1715
34f80b04 1716 /* Return here if interrupt is disabled */
a2fbb9ea
ET
1717 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1718 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1719 return IRQ_HANDLED;
1720 }
1721
3196a88a
EG
1722#ifdef BNX2X_STOP_ON_ERROR
1723 if (unlikely(bp->panic))
1724 return IRQ_HANDLED;
1725#endif
1726
34f80b04
EG
1727 mask = 0x2 << bp->fp[0].sb_id;
1728 if (status & mask) {
a2fbb9ea
ET
1729 struct bnx2x_fastpath *fp = &bp->fp[0];
1730
1731 prefetch(fp->rx_cons_sb);
1732 prefetch(fp->tx_cons_sb);
1733 prefetch(&fp->status_blk->c_status_block.status_block_index);
1734 prefetch(&fp->status_blk->u_status_block.status_block_index);
1735
288379f0 1736 napi_schedule(&bnx2x_fp(bp, 0, napi));
a2fbb9ea 1737
34f80b04 1738 status &= ~mask;
a2fbb9ea
ET
1739 }
1740
a2fbb9ea 1741
34f80b04 1742 if (unlikely(status & 0x1)) {
1cf167f2 1743 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
a2fbb9ea
ET
1744
1745 status &= ~0x1;
1746 if (!status)
1747 return IRQ_HANDLED;
1748 }
1749
34f80b04
EG
1750 if (status)
1751 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1752 status);
a2fbb9ea 1753
c18487ee 1754 return IRQ_HANDLED;
a2fbb9ea
ET
1755}
1756
c18487ee 1757/* end of fast path */
a2fbb9ea 1758
bb2a0f7a 1759static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
a2fbb9ea 1760
c18487ee
YR
1761/* Link */
1762
1763/*
1764 * General service functions
1765 */
a2fbb9ea 1766
4a37fb66 1767static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
c18487ee
YR
1768{
1769 u32 lock_status;
1770 u32 resource_bit = (1 << resource);
4a37fb66
YG
1771 int func = BP_FUNC(bp);
1772 u32 hw_lock_control_reg;
c18487ee 1773 int cnt;
a2fbb9ea 1774
c18487ee
YR
1775 /* Validating that the resource is within range */
1776 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1777 DP(NETIF_MSG_HW,
1778 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1779 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1780 return -EINVAL;
1781 }
a2fbb9ea 1782
4a37fb66
YG
1783 if (func <= 5) {
1784 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1785 } else {
1786 hw_lock_control_reg =
1787 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1788 }
1789
c18487ee 1790 /* Validating that the resource is not already taken */
4a37fb66 1791 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1792 if (lock_status & resource_bit) {
1793 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1794 lock_status, resource_bit);
1795 return -EEXIST;
1796 }
a2fbb9ea 1797
46230476
EG
1798 /* Try for 5 second every 5ms */
1799 for (cnt = 0; cnt < 1000; cnt++) {
c18487ee 1800 /* Try to acquire the lock */
4a37fb66
YG
1801 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1802 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1803 if (lock_status & resource_bit)
1804 return 0;
a2fbb9ea 1805
c18487ee 1806 msleep(5);
a2fbb9ea 1807 }
c18487ee
YR
1808 DP(NETIF_MSG_HW, "Timeout\n");
1809 return -EAGAIN;
1810}
a2fbb9ea 1811
4a37fb66 1812static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
c18487ee
YR
1813{
1814 u32 lock_status;
1815 u32 resource_bit = (1 << resource);
4a37fb66
YG
1816 int func = BP_FUNC(bp);
1817 u32 hw_lock_control_reg;
a2fbb9ea 1818
c18487ee
YR
1819 /* Validating that the resource is within range */
1820 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1821 DP(NETIF_MSG_HW,
1822 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1823 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1824 return -EINVAL;
1825 }
1826
4a37fb66
YG
1827 if (func <= 5) {
1828 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1829 } else {
1830 hw_lock_control_reg =
1831 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1832 }
1833
c18487ee 1834 /* Validating that the resource is currently taken */
4a37fb66 1835 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1836 if (!(lock_status & resource_bit)) {
1837 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1838 lock_status, resource_bit);
1839 return -EFAULT;
a2fbb9ea
ET
1840 }
1841
4a37fb66 1842 REG_WR(bp, hw_lock_control_reg, resource_bit);
c18487ee
YR
1843 return 0;
1844}
1845
1846/* HW Lock for shared dual port PHYs */
4a37fb66 1847static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
c18487ee 1848{
34f80b04 1849 mutex_lock(&bp->port.phy_mutex);
a2fbb9ea 1850
46c6a674
EG
1851 if (bp->port.need_hw_lock)
1852 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
c18487ee 1853}
a2fbb9ea 1854
4a37fb66 1855static void bnx2x_release_phy_lock(struct bnx2x *bp)
c18487ee 1856{
46c6a674
EG
1857 if (bp->port.need_hw_lock)
1858 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
a2fbb9ea 1859
34f80b04 1860 mutex_unlock(&bp->port.phy_mutex);
c18487ee 1861}
a2fbb9ea 1862
4acac6a5
EG
1863int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1864{
1865 /* The GPIO should be swapped if swap register is set and active */
1866 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1867 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1868 int gpio_shift = gpio_num +
1869 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1870 u32 gpio_mask = (1 << gpio_shift);
1871 u32 gpio_reg;
1872 int value;
1873
1874 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1875 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1876 return -EINVAL;
1877 }
1878
1879 /* read GPIO value */
1880 gpio_reg = REG_RD(bp, MISC_REG_GPIO);
1881
1882 /* get the requested pin value */
1883 if ((gpio_reg & gpio_mask) == gpio_mask)
1884 value = 1;
1885 else
1886 value = 0;
1887
1888 DP(NETIF_MSG_LINK, "pin %d value 0x%x\n", gpio_num, value);
1889
1890 return value;
1891}
1892
17de50b7 1893int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
c18487ee
YR
1894{
1895 /* The GPIO should be swapped if swap register is set and active */
1896 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
17de50b7 1897 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
c18487ee
YR
1898 int gpio_shift = gpio_num +
1899 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1900 u32 gpio_mask = (1 << gpio_shift);
1901 u32 gpio_reg;
a2fbb9ea 1902
c18487ee
YR
1903 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1904 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1905 return -EINVAL;
1906 }
a2fbb9ea 1907
4a37fb66 1908 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
c18487ee
YR
1909 /* read GPIO and mask except the float bits */
1910 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
a2fbb9ea 1911
c18487ee
YR
1912 switch (mode) {
1913 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1914 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1915 gpio_num, gpio_shift);
1916 /* clear FLOAT and set CLR */
1917 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1918 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1919 break;
a2fbb9ea 1920
c18487ee
YR
1921 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1922 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1923 gpio_num, gpio_shift);
1924 /* clear FLOAT and set SET */
1925 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1926 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1927 break;
a2fbb9ea 1928
17de50b7 1929 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
c18487ee
YR
1930 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1931 gpio_num, gpio_shift);
1932 /* set FLOAT */
1933 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1934 break;
a2fbb9ea 1935
c18487ee
YR
1936 default:
1937 break;
a2fbb9ea
ET
1938 }
1939
c18487ee 1940 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
4a37fb66 1941 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
f1410647 1942
c18487ee 1943 return 0;
a2fbb9ea
ET
1944}
1945
4acac6a5
EG
1946int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1947{
1948 /* The GPIO should be swapped if swap register is set and active */
1949 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1950 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1951 int gpio_shift = gpio_num +
1952 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1953 u32 gpio_mask = (1 << gpio_shift);
1954 u32 gpio_reg;
1955
1956 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1957 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1958 return -EINVAL;
1959 }
1960
1961 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1962 /* read GPIO int */
1963 gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
1964
1965 switch (mode) {
1966 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
1967 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
1968 "output low\n", gpio_num, gpio_shift);
1969 /* clear SET and set CLR */
1970 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1971 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1972 break;
1973
1974 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
1975 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
1976 "output high\n", gpio_num, gpio_shift);
1977 /* clear CLR and set SET */
1978 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1979 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1980 break;
1981
1982 default:
1983 break;
1984 }
1985
1986 REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
1987 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1988
1989 return 0;
1990}
1991
c18487ee 1992static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
a2fbb9ea 1993{
c18487ee
YR
1994 u32 spio_mask = (1 << spio_num);
1995 u32 spio_reg;
a2fbb9ea 1996
c18487ee
YR
1997 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
1998 (spio_num > MISC_REGISTERS_SPIO_7)) {
1999 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
2000 return -EINVAL;
a2fbb9ea
ET
2001 }
2002
4a37fb66 2003 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
c18487ee
YR
2004 /* read SPIO and mask except the float bits */
2005 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
a2fbb9ea 2006
c18487ee 2007 switch (mode) {
6378c025 2008 case MISC_REGISTERS_SPIO_OUTPUT_LOW:
c18487ee
YR
2009 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
2010 /* clear FLOAT and set CLR */
2011 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2012 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
2013 break;
a2fbb9ea 2014
6378c025 2015 case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
c18487ee
YR
2016 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
2017 /* clear FLOAT and set SET */
2018 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2019 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
2020 break;
a2fbb9ea 2021
c18487ee
YR
2022 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
2023 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
2024 /* set FLOAT */
2025 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2026 break;
a2fbb9ea 2027
c18487ee
YR
2028 default:
2029 break;
a2fbb9ea
ET
2030 }
2031
c18487ee 2032 REG_WR(bp, MISC_REG_SPIO, spio_reg);
4a37fb66 2033 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
c18487ee 2034
a2fbb9ea
ET
2035 return 0;
2036}
2037
c18487ee 2038static void bnx2x_calc_fc_adv(struct bnx2x *bp)
a2fbb9ea 2039{
ad33ea3a
EG
2040 switch (bp->link_vars.ieee_fc &
2041 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
c18487ee 2042 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
34f80b04 2043 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
c18487ee
YR
2044 ADVERTISED_Pause);
2045 break;
356e2385 2046
c18487ee 2047 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
34f80b04 2048 bp->port.advertising |= (ADVERTISED_Asym_Pause |
c18487ee
YR
2049 ADVERTISED_Pause);
2050 break;
356e2385 2051
c18487ee 2052 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
34f80b04 2053 bp->port.advertising |= ADVERTISED_Asym_Pause;
c18487ee 2054 break;
356e2385 2055
c18487ee 2056 default:
34f80b04 2057 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
c18487ee
YR
2058 ADVERTISED_Pause);
2059 break;
2060 }
2061}
f1410647 2062
c18487ee
YR
2063static void bnx2x_link_report(struct bnx2x *bp)
2064{
2065 if (bp->link_vars.link_up) {
2066 if (bp->state == BNX2X_STATE_OPEN)
2067 netif_carrier_on(bp->dev);
2068 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
f1410647 2069
c18487ee 2070 printk("%d Mbps ", bp->link_vars.line_speed);
f1410647 2071
c18487ee
YR
2072 if (bp->link_vars.duplex == DUPLEX_FULL)
2073 printk("full duplex");
2074 else
2075 printk("half duplex");
f1410647 2076
c0700f90
DM
2077 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
2078 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
c18487ee 2079 printk(", receive ");
356e2385
EG
2080 if (bp->link_vars.flow_ctrl &
2081 BNX2X_FLOW_CTRL_TX)
c18487ee
YR
2082 printk("& transmit ");
2083 } else {
2084 printk(", transmit ");
2085 }
2086 printk("flow control ON");
2087 }
2088 printk("\n");
f1410647 2089
c18487ee
YR
2090 } else { /* link_down */
2091 netif_carrier_off(bp->dev);
2092 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
f1410647 2093 }
c18487ee
YR
2094}
2095
b5bf9068 2096static u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
c18487ee 2097{
19680c48
EG
2098 if (!BP_NOMCP(bp)) {
2099 u8 rc;
a2fbb9ea 2100
19680c48 2101 /* Initialize link parameters structure variables */
8c99e7b0
YR
2102 /* It is recommended to turn off RX FC for jumbo frames
2103 for better performance */
2104 if (IS_E1HMF(bp))
c0700f90 2105 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
8c99e7b0 2106 else if (bp->dev->mtu > 5000)
c0700f90 2107 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
8c99e7b0 2108 else
c0700f90 2109 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
a2fbb9ea 2110
4a37fb66 2111 bnx2x_acquire_phy_lock(bp);
b5bf9068
EG
2112
2113 if (load_mode == LOAD_DIAG)
2114 bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
2115
19680c48 2116 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
b5bf9068 2117
4a37fb66 2118 bnx2x_release_phy_lock(bp);
a2fbb9ea 2119
3c96c68b
EG
2120 bnx2x_calc_fc_adv(bp);
2121
b5bf9068
EG
2122 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
2123 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
19680c48 2124 bnx2x_link_report(bp);
b5bf9068 2125 }
34f80b04 2126
19680c48
EG
2127 return rc;
2128 }
f5372251 2129 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
19680c48 2130 return -EINVAL;
a2fbb9ea
ET
2131}
2132
c18487ee 2133static void bnx2x_link_set(struct bnx2x *bp)
a2fbb9ea 2134{
19680c48 2135 if (!BP_NOMCP(bp)) {
4a37fb66 2136 bnx2x_acquire_phy_lock(bp);
19680c48 2137 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
4a37fb66 2138 bnx2x_release_phy_lock(bp);
a2fbb9ea 2139
19680c48
EG
2140 bnx2x_calc_fc_adv(bp);
2141 } else
f5372251 2142 BNX2X_ERR("Bootcode is missing - can not set link\n");
c18487ee 2143}
a2fbb9ea 2144
c18487ee
YR
2145static void bnx2x__link_reset(struct bnx2x *bp)
2146{
19680c48 2147 if (!BP_NOMCP(bp)) {
4a37fb66 2148 bnx2x_acquire_phy_lock(bp);
589abe3a 2149 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
4a37fb66 2150 bnx2x_release_phy_lock(bp);
19680c48 2151 } else
f5372251 2152 BNX2X_ERR("Bootcode is missing - can not reset link\n");
c18487ee 2153}
a2fbb9ea 2154
c18487ee
YR
2155static u8 bnx2x_link_test(struct bnx2x *bp)
2156{
2157 u8 rc;
a2fbb9ea 2158
4a37fb66 2159 bnx2x_acquire_phy_lock(bp);
c18487ee 2160 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
4a37fb66 2161 bnx2x_release_phy_lock(bp);
a2fbb9ea 2162
c18487ee
YR
2163 return rc;
2164}
a2fbb9ea 2165
8a1c38d1 2166static void bnx2x_init_port_minmax(struct bnx2x *bp)
34f80b04 2167{
8a1c38d1
EG
2168 u32 r_param = bp->link_vars.line_speed / 8;
2169 u32 fair_periodic_timeout_usec;
2170 u32 t_fair;
34f80b04 2171
8a1c38d1
EG
2172 memset(&(bp->cmng.rs_vars), 0,
2173 sizeof(struct rate_shaping_vars_per_port));
2174 memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
34f80b04 2175
8a1c38d1
EG
2176 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2177 bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
34f80b04 2178
8a1c38d1
EG
2179 /* this is the threshold below which no timer arming will occur
2180 1.25 coefficient is for the threshold to be a little bigger
2181 than the real time, to compensate for timer in-accuracy */
2182 bp->cmng.rs_vars.rs_threshold =
34f80b04
EG
2183 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2184
8a1c38d1
EG
2185 /* resolution of fairness timer */
2186 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2187 /* for 10G it is 1000usec. for 1G it is 10000usec. */
2188 t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
34f80b04 2189
8a1c38d1
EG
2190 /* this is the threshold below which we won't arm the timer anymore */
2191 bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
34f80b04 2192
8a1c38d1
EG
2193 /* we multiply by 1e3/8 to get bytes/msec.
2194 We don't want the credits to pass a credit
2195 of the t_fair*FAIR_MEM (algorithm resolution) */
2196 bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
2197 /* since each tick is 4 usec */
2198 bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
34f80b04
EG
2199}
2200
8a1c38d1 2201static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
34f80b04
EG
2202{
2203 struct rate_shaping_vars_per_vn m_rs_vn;
2204 struct fairness_vars_per_vn m_fair_vn;
2205 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2206 u16 vn_min_rate, vn_max_rate;
2207 int i;
2208
2209 /* If function is hidden - set min and max to zeroes */
2210 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2211 vn_min_rate = 0;
2212 vn_max_rate = 0;
2213
2214 } else {
2215 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2216 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
8a1c38d1 2217 /* If fairness is enabled (not all min rates are zeroes) and
34f80b04 2218 if current min rate is zero - set it to 1.
33471629 2219 This is a requirement of the algorithm. */
8a1c38d1 2220 if (bp->vn_weight_sum && (vn_min_rate == 0))
34f80b04
EG
2221 vn_min_rate = DEF_MIN_RATE;
2222 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2223 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2224 }
2225
8a1c38d1
EG
2226 DP(NETIF_MSG_IFUP,
2227 "func %d: vn_min_rate=%d vn_max_rate=%d vn_weight_sum=%d\n",
2228 func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
34f80b04
EG
2229
2230 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2231 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2232
2233 /* global vn counter - maximal Mbps for this vn */
2234 m_rs_vn.vn_counter.rate = vn_max_rate;
2235
2236 /* quota - number of bytes transmitted in this period */
2237 m_rs_vn.vn_counter.quota =
2238 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2239
8a1c38d1 2240 if (bp->vn_weight_sum) {
34f80b04
EG
2241 /* credit for each period of the fairness algorithm:
2242 number of bytes in T_FAIR (the vn share the port rate).
8a1c38d1
EG
2243 vn_weight_sum should not be larger than 10000, thus
2244 T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
2245 than zero */
34f80b04 2246 m_fair_vn.vn_credit_delta =
8a1c38d1
EG
2247 max((u32)(vn_min_rate * (T_FAIR_COEF /
2248 (8 * bp->vn_weight_sum))),
2249 (u32)(bp->cmng.fair_vars.fair_threshold * 2));
34f80b04
EG
2250 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2251 m_fair_vn.vn_credit_delta);
2252 }
2253
34f80b04
EG
2254 /* Store it to internal memory */
2255 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2256 REG_WR(bp, BAR_XSTRORM_INTMEM +
2257 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2258 ((u32 *)(&m_rs_vn))[i]);
2259
2260 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2261 REG_WR(bp, BAR_XSTRORM_INTMEM +
2262 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2263 ((u32 *)(&m_fair_vn))[i]);
2264}
2265
8a1c38d1 2266
c18487ee
YR
2267/* This function is called upon link interrupt */
2268static void bnx2x_link_attn(struct bnx2x *bp)
2269{
bb2a0f7a
YG
2270 /* Make sure that we are synced with the current statistics */
2271 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2272
c18487ee 2273 bnx2x_link_update(&bp->link_params, &bp->link_vars);
a2fbb9ea 2274
bb2a0f7a
YG
2275 if (bp->link_vars.link_up) {
2276
1c06328c
EG
2277 /* dropless flow control */
2278 if (CHIP_IS_E1H(bp)) {
2279 int port = BP_PORT(bp);
2280 u32 pause_enabled = 0;
2281
2282 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2283 pause_enabled = 1;
2284
2285 REG_WR(bp, BAR_USTRORM_INTMEM +
2286 USTORM_PAUSE_ENABLED_OFFSET(port),
2287 pause_enabled);
2288 }
2289
bb2a0f7a
YG
2290 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2291 struct host_port_stats *pstats;
2292
2293 pstats = bnx2x_sp(bp, port_stats);
2294 /* reset old bmac stats */
2295 memset(&(pstats->mac_stx[0]), 0,
2296 sizeof(struct mac_stx));
2297 }
2298 if ((bp->state == BNX2X_STATE_OPEN) ||
2299 (bp->state == BNX2X_STATE_DISABLED))
2300 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2301 }
2302
c18487ee
YR
2303 /* indicate link status */
2304 bnx2x_link_report(bp);
34f80b04
EG
2305
2306 if (IS_E1HMF(bp)) {
8a1c38d1 2307 int port = BP_PORT(bp);
34f80b04 2308 int func;
8a1c38d1 2309 int vn;
34f80b04
EG
2310
2311 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2312 if (vn == BP_E1HVN(bp))
2313 continue;
2314
8a1c38d1 2315 func = ((vn << 1) | port);
34f80b04
EG
2316
2317 /* Set the attention towards other drivers
2318 on the same port */
2319 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2320 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2321 }
34f80b04 2322
8a1c38d1
EG
2323 if (bp->link_vars.link_up) {
2324 int i;
2325
2326 /* Init rate shaping and fairness contexts */
2327 bnx2x_init_port_minmax(bp);
34f80b04 2328
34f80b04 2329 for (vn = VN_0; vn < E1HVN_MAX; vn++)
8a1c38d1
EG
2330 bnx2x_init_vn_minmax(bp, 2*vn + port);
2331
2332 /* Store it to internal memory */
2333 for (i = 0;
2334 i < sizeof(struct cmng_struct_per_port) / 4; i++)
2335 REG_WR(bp, BAR_XSTRORM_INTMEM +
2336 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2337 ((u32 *)(&bp->cmng))[i]);
2338 }
34f80b04 2339 }
c18487ee 2340}
a2fbb9ea 2341
c18487ee
YR
2342static void bnx2x__link_status_update(struct bnx2x *bp)
2343{
2344 if (bp->state != BNX2X_STATE_OPEN)
2345 return;
a2fbb9ea 2346
c18487ee 2347 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
a2fbb9ea 2348
bb2a0f7a
YG
2349 if (bp->link_vars.link_up)
2350 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2351 else
2352 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2353
c18487ee
YR
2354 /* indicate link status */
2355 bnx2x_link_report(bp);
a2fbb9ea 2356}
a2fbb9ea 2357
34f80b04
EG
2358static void bnx2x_pmf_update(struct bnx2x *bp)
2359{
2360 int port = BP_PORT(bp);
2361 u32 val;
2362
2363 bp->port.pmf = 1;
2364 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2365
2366 /* enable nig attention */
2367 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2368 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2369 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
bb2a0f7a
YG
2370
2371 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
34f80b04
EG
2372}
2373
c18487ee 2374/* end of Link */
a2fbb9ea
ET
2375
2376/* slow path */
2377
2378/*
2379 * General service functions
2380 */
2381
2382/* the slow path queue is odd since completions arrive on the fastpath ring */
2383static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2384 u32 data_hi, u32 data_lo, int common)
2385{
34f80b04 2386 int func = BP_FUNC(bp);
a2fbb9ea 2387
34f80b04
EG
2388 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2389 "SPQE (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
a2fbb9ea
ET
2390 (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2391 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2392 HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2393
2394#ifdef BNX2X_STOP_ON_ERROR
2395 if (unlikely(bp->panic))
2396 return -EIO;
2397#endif
2398
34f80b04 2399 spin_lock_bh(&bp->spq_lock);
a2fbb9ea
ET
2400
2401 if (!bp->spq_left) {
2402 BNX2X_ERR("BUG! SPQ ring full!\n");
34f80b04 2403 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
2404 bnx2x_panic();
2405 return -EBUSY;
2406 }
f1410647 2407
a2fbb9ea
ET
2408 /* CID needs port number to be encoded int it */
2409 bp->spq_prod_bd->hdr.conn_and_cmd_data =
2410 cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2411 HW_CID(bp, cid)));
2412 bp->spq_prod_bd->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2413 if (common)
2414 bp->spq_prod_bd->hdr.type |=
2415 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2416
2417 bp->spq_prod_bd->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2418 bp->spq_prod_bd->data.mac_config_addr.lo = cpu_to_le32(data_lo);
2419
2420 bp->spq_left--;
2421
2422 if (bp->spq_prod_bd == bp->spq_last_bd) {
2423 bp->spq_prod_bd = bp->spq;
2424 bp->spq_prod_idx = 0;
2425 DP(NETIF_MSG_TIMER, "end of spq\n");
2426
2427 } else {
2428 bp->spq_prod_bd++;
2429 bp->spq_prod_idx++;
2430 }
2431
34f80b04 2432 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
a2fbb9ea
ET
2433 bp->spq_prod_idx);
2434
34f80b04 2435 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
2436 return 0;
2437}
2438
2439/* acquire split MCP access lock register */
4a37fb66 2440static int bnx2x_acquire_alr(struct bnx2x *bp)
a2fbb9ea 2441{
a2fbb9ea 2442 u32 i, j, val;
34f80b04 2443 int rc = 0;
a2fbb9ea
ET
2444
2445 might_sleep();
2446 i = 100;
2447 for (j = 0; j < i*10; j++) {
2448 val = (1UL << 31);
2449 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2450 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2451 if (val & (1L << 31))
2452 break;
2453
2454 msleep(5);
2455 }
a2fbb9ea 2456 if (!(val & (1L << 31))) {
19680c48 2457 BNX2X_ERR("Cannot acquire MCP access lock register\n");
a2fbb9ea
ET
2458 rc = -EBUSY;
2459 }
2460
2461 return rc;
2462}
2463
4a37fb66
YG
2464/* release split MCP access lock register */
2465static void bnx2x_release_alr(struct bnx2x *bp)
a2fbb9ea
ET
2466{
2467 u32 val = 0;
2468
2469 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2470}
2471
2472static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2473{
2474 struct host_def_status_block *def_sb = bp->def_status_blk;
2475 u16 rc = 0;
2476
2477 barrier(); /* status block is written to by the chip */
a2fbb9ea
ET
2478 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2479 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2480 rc |= 1;
2481 }
2482 if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2483 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2484 rc |= 2;
2485 }
2486 if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2487 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2488 rc |= 4;
2489 }
2490 if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2491 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2492 rc |= 8;
2493 }
2494 if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2495 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2496 rc |= 16;
2497 }
2498 return rc;
2499}
2500
2501/*
2502 * slow path service functions
2503 */
2504
2505static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2506{
34f80b04 2507 int port = BP_PORT(bp);
5c862848
EG
2508 u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2509 COMMAND_REG_ATTN_BITS_SET);
a2fbb9ea
ET
2510 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2511 MISC_REG_AEU_MASK_ATTN_FUNC_0;
877e9aa4
ET
2512 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2513 NIG_REG_MASK_INTERRUPT_PORT0;
3fcaf2e5 2514 u32 aeu_mask;
87942b46 2515 u32 nig_mask = 0;
a2fbb9ea 2516
a2fbb9ea
ET
2517 if (bp->attn_state & asserted)
2518 BNX2X_ERR("IGU ERROR\n");
2519
3fcaf2e5
EG
2520 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2521 aeu_mask = REG_RD(bp, aeu_addr);
2522
a2fbb9ea 2523 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
3fcaf2e5
EG
2524 aeu_mask, asserted);
2525 aeu_mask &= ~(asserted & 0xff);
2526 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
a2fbb9ea 2527
3fcaf2e5
EG
2528 REG_WR(bp, aeu_addr, aeu_mask);
2529 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
a2fbb9ea 2530
3fcaf2e5 2531 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
a2fbb9ea 2532 bp->attn_state |= asserted;
3fcaf2e5 2533 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
a2fbb9ea
ET
2534
2535 if (asserted & ATTN_HARD_WIRED_MASK) {
2536 if (asserted & ATTN_NIG_FOR_FUNC) {
a2fbb9ea 2537
a5e9a7cf
EG
2538 bnx2x_acquire_phy_lock(bp);
2539
877e9aa4 2540 /* save nig interrupt mask */
87942b46 2541 nig_mask = REG_RD(bp, nig_int_mask_addr);
877e9aa4 2542 REG_WR(bp, nig_int_mask_addr, 0);
a2fbb9ea 2543
c18487ee 2544 bnx2x_link_attn(bp);
a2fbb9ea
ET
2545
2546 /* handle unicore attn? */
2547 }
2548 if (asserted & ATTN_SW_TIMER_4_FUNC)
2549 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2550
2551 if (asserted & GPIO_2_FUNC)
2552 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2553
2554 if (asserted & GPIO_3_FUNC)
2555 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2556
2557 if (asserted & GPIO_4_FUNC)
2558 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2559
2560 if (port == 0) {
2561 if (asserted & ATTN_GENERAL_ATTN_1) {
2562 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2563 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2564 }
2565 if (asserted & ATTN_GENERAL_ATTN_2) {
2566 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2567 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2568 }
2569 if (asserted & ATTN_GENERAL_ATTN_3) {
2570 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2571 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2572 }
2573 } else {
2574 if (asserted & ATTN_GENERAL_ATTN_4) {
2575 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2576 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2577 }
2578 if (asserted & ATTN_GENERAL_ATTN_5) {
2579 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2580 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2581 }
2582 if (asserted & ATTN_GENERAL_ATTN_6) {
2583 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2584 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2585 }
2586 }
2587
2588 } /* if hardwired */
2589
5c862848
EG
2590 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2591 asserted, hc_addr);
2592 REG_WR(bp, hc_addr, asserted);
a2fbb9ea
ET
2593
2594 /* now set back the mask */
a5e9a7cf 2595 if (asserted & ATTN_NIG_FOR_FUNC) {
87942b46 2596 REG_WR(bp, nig_int_mask_addr, nig_mask);
a5e9a7cf
EG
2597 bnx2x_release_phy_lock(bp);
2598 }
a2fbb9ea
ET
2599}
2600
877e9aa4 2601static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
a2fbb9ea 2602{
34f80b04 2603 int port = BP_PORT(bp);
877e9aa4
ET
2604 int reg_offset;
2605 u32 val;
2606
34f80b04
EG
2607 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2608 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
877e9aa4 2609
34f80b04 2610 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
877e9aa4
ET
2611
2612 val = REG_RD(bp, reg_offset);
2613 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2614 REG_WR(bp, reg_offset, val);
2615
2616 BNX2X_ERR("SPIO5 hw attention\n");
2617
35b19ba5
EG
2618 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
2619 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
877e9aa4
ET
2620 /* Fan failure attention */
2621
17de50b7 2622 /* The PHY reset is controlled by GPIO 1 */
877e9aa4 2623 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
17de50b7
EG
2624 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2625 /* Low power mode is controlled by GPIO 2 */
877e9aa4 2626 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
17de50b7 2627 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
877e9aa4 2628 /* mark the failure */
c18487ee 2629 bp->link_params.ext_phy_config &=
877e9aa4 2630 ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
c18487ee 2631 bp->link_params.ext_phy_config |=
877e9aa4
ET
2632 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2633 SHMEM_WR(bp,
2634 dev_info.port_hw_config[port].
2635 external_phy_config,
c18487ee 2636 bp->link_params.ext_phy_config);
877e9aa4
ET
2637 /* log the failure */
2638 printk(KERN_ERR PFX "Fan Failure on Network"
2639 " Controller %s has caused the driver to"
2640 " shutdown the card to prevent permanent"
2641 " damage. Please contact Dell Support for"
2642 " assistance\n", bp->dev->name);
2643 break;
2644
2645 default:
2646 break;
2647 }
2648 }
34f80b04 2649
589abe3a
EG
2650 if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
2651 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
2652 bnx2x_acquire_phy_lock(bp);
2653 bnx2x_handle_module_detect_int(&bp->link_params);
2654 bnx2x_release_phy_lock(bp);
2655 }
2656
34f80b04
EG
2657 if (attn & HW_INTERRUT_ASSERT_SET_0) {
2658
2659 val = REG_RD(bp, reg_offset);
2660 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2661 REG_WR(bp, reg_offset, val);
2662
2663 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2664 (attn & HW_INTERRUT_ASSERT_SET_0));
2665 bnx2x_panic();
2666 }
877e9aa4
ET
2667}
2668
2669static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2670{
2671 u32 val;
2672
0626b899 2673 if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
877e9aa4
ET
2674
2675 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2676 BNX2X_ERR("DB hw attention 0x%x\n", val);
2677 /* DORQ discard attention */
2678 if (val & 0x2)
2679 BNX2X_ERR("FATAL error from DORQ\n");
2680 }
34f80b04
EG
2681
2682 if (attn & HW_INTERRUT_ASSERT_SET_1) {
2683
2684 int port = BP_PORT(bp);
2685 int reg_offset;
2686
2687 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2688 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2689
2690 val = REG_RD(bp, reg_offset);
2691 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2692 REG_WR(bp, reg_offset, val);
2693
2694 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2695 (attn & HW_INTERRUT_ASSERT_SET_1));
2696 bnx2x_panic();
2697 }
877e9aa4
ET
2698}
2699
2700static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2701{
2702 u32 val;
2703
2704 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2705
2706 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2707 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2708 /* CFC error attention */
2709 if (val & 0x2)
2710 BNX2X_ERR("FATAL error from CFC\n");
2711 }
2712
2713 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
2714
2715 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
2716 BNX2X_ERR("PXP hw attention 0x%x\n", val);
2717 /* RQ_USDMDP_FIFO_OVERFLOW */
2718 if (val & 0x18000)
2719 BNX2X_ERR("FATAL error from PXP\n");
2720 }
34f80b04
EG
2721
2722 if (attn & HW_INTERRUT_ASSERT_SET_2) {
2723
2724 int port = BP_PORT(bp);
2725 int reg_offset;
2726
2727 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
2728 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
2729
2730 val = REG_RD(bp, reg_offset);
2731 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
2732 REG_WR(bp, reg_offset, val);
2733
2734 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
2735 (attn & HW_INTERRUT_ASSERT_SET_2));
2736 bnx2x_panic();
2737 }
877e9aa4
ET
2738}
2739
2740static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
2741{
34f80b04
EG
2742 u32 val;
2743
877e9aa4
ET
2744 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
2745
34f80b04
EG
2746 if (attn & BNX2X_PMF_LINK_ASSERT) {
2747 int func = BP_FUNC(bp);
2748
2749 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
2750 bnx2x__link_status_update(bp);
2751 if (SHMEM_RD(bp, func_mb[func].drv_status) &
2752 DRV_STATUS_PMF)
2753 bnx2x_pmf_update(bp);
2754
2755 } else if (attn & BNX2X_MC_ASSERT_BITS) {
877e9aa4
ET
2756
2757 BNX2X_ERR("MC assert!\n");
2758 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
2759 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
2760 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
2761 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
2762 bnx2x_panic();
2763
2764 } else if (attn & BNX2X_MCP_ASSERT) {
2765
2766 BNX2X_ERR("MCP assert!\n");
2767 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
34f80b04 2768 bnx2x_fw_dump(bp);
877e9aa4
ET
2769
2770 } else
2771 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
2772 }
2773
2774 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
34f80b04
EG
2775 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
2776 if (attn & BNX2X_GRC_TIMEOUT) {
2777 val = CHIP_IS_E1H(bp) ?
2778 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
2779 BNX2X_ERR("GRC time-out 0x%08x\n", val);
2780 }
2781 if (attn & BNX2X_GRC_RSV) {
2782 val = CHIP_IS_E1H(bp) ?
2783 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
2784 BNX2X_ERR("GRC reserved 0x%08x\n", val);
2785 }
877e9aa4 2786 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
877e9aa4
ET
2787 }
2788}
2789
2790static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
2791{
a2fbb9ea
ET
2792 struct attn_route attn;
2793 struct attn_route group_mask;
34f80b04 2794 int port = BP_PORT(bp);
877e9aa4 2795 int index;
a2fbb9ea
ET
2796 u32 reg_addr;
2797 u32 val;
3fcaf2e5 2798 u32 aeu_mask;
a2fbb9ea
ET
2799
2800 /* need to take HW lock because MCP or other port might also
2801 try to handle this event */
4a37fb66 2802 bnx2x_acquire_alr(bp);
a2fbb9ea
ET
2803
2804 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
2805 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
2806 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
2807 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
34f80b04
EG
2808 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
2809 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
a2fbb9ea
ET
2810
2811 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
2812 if (deasserted & (1 << index)) {
2813 group_mask = bp->attn_group[index];
2814
34f80b04
EG
2815 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
2816 index, group_mask.sig[0], group_mask.sig[1],
2817 group_mask.sig[2], group_mask.sig[3]);
a2fbb9ea 2818
877e9aa4
ET
2819 bnx2x_attn_int_deasserted3(bp,
2820 attn.sig[3] & group_mask.sig[3]);
2821 bnx2x_attn_int_deasserted1(bp,
2822 attn.sig[1] & group_mask.sig[1]);
2823 bnx2x_attn_int_deasserted2(bp,
2824 attn.sig[2] & group_mask.sig[2]);
2825 bnx2x_attn_int_deasserted0(bp,
2826 attn.sig[0] & group_mask.sig[0]);
a2fbb9ea 2827
a2fbb9ea
ET
2828 if ((attn.sig[0] & group_mask.sig[0] &
2829 HW_PRTY_ASSERT_SET_0) ||
2830 (attn.sig[1] & group_mask.sig[1] &
2831 HW_PRTY_ASSERT_SET_1) ||
2832 (attn.sig[2] & group_mask.sig[2] &
2833 HW_PRTY_ASSERT_SET_2))
6378c025 2834 BNX2X_ERR("FATAL HW block parity attention\n");
a2fbb9ea
ET
2835 }
2836 }
2837
4a37fb66 2838 bnx2x_release_alr(bp);
a2fbb9ea 2839
5c862848 2840 reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
a2fbb9ea
ET
2841
2842 val = ~deasserted;
3fcaf2e5
EG
2843 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2844 val, reg_addr);
5c862848 2845 REG_WR(bp, reg_addr, val);
a2fbb9ea 2846
a2fbb9ea 2847 if (~bp->attn_state & deasserted)
3fcaf2e5 2848 BNX2X_ERR("IGU ERROR\n");
a2fbb9ea
ET
2849
2850 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2851 MISC_REG_AEU_MASK_ATTN_FUNC_0;
2852
3fcaf2e5
EG
2853 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2854 aeu_mask = REG_RD(bp, reg_addr);
2855
2856 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
2857 aeu_mask, deasserted);
2858 aeu_mask |= (deasserted & 0xff);
2859 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
a2fbb9ea 2860
3fcaf2e5
EG
2861 REG_WR(bp, reg_addr, aeu_mask);
2862 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
a2fbb9ea
ET
2863
2864 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2865 bp->attn_state &= ~deasserted;
2866 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2867}
2868
2869static void bnx2x_attn_int(struct bnx2x *bp)
2870{
2871 /* read local copy of bits */
68d59484
EG
2872 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
2873 attn_bits);
2874 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
2875 attn_bits_ack);
a2fbb9ea
ET
2876 u32 attn_state = bp->attn_state;
2877
2878 /* look for changed bits */
2879 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
2880 u32 deasserted = ~attn_bits & attn_ack & attn_state;
2881
2882 DP(NETIF_MSG_HW,
2883 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
2884 attn_bits, attn_ack, asserted, deasserted);
2885
2886 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
34f80b04 2887 BNX2X_ERR("BAD attention state\n");
a2fbb9ea
ET
2888
2889 /* handle bits that were raised */
2890 if (asserted)
2891 bnx2x_attn_int_asserted(bp, asserted);
2892
2893 if (deasserted)
2894 bnx2x_attn_int_deasserted(bp, deasserted);
2895}
2896
2897static void bnx2x_sp_task(struct work_struct *work)
2898{
1cf167f2 2899 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
a2fbb9ea
ET
2900 u16 status;
2901
34f80b04 2902
a2fbb9ea
ET
2903 /* Return here if interrupt is disabled */
2904 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3196a88a 2905 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
2906 return;
2907 }
2908
2909 status = bnx2x_update_dsb_idx(bp);
34f80b04
EG
2910/* if (status == 0) */
2911/* BNX2X_ERR("spurious slowpath interrupt!\n"); */
a2fbb9ea 2912
3196a88a 2913 DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
a2fbb9ea 2914
877e9aa4
ET
2915 /* HW attentions */
2916 if (status & 0x1)
a2fbb9ea 2917 bnx2x_attn_int(bp);
a2fbb9ea 2918
68d59484 2919 bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
a2fbb9ea
ET
2920 IGU_INT_NOP, 1);
2921 bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
2922 IGU_INT_NOP, 1);
2923 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
2924 IGU_INT_NOP, 1);
2925 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
2926 IGU_INT_NOP, 1);
2927 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
2928 IGU_INT_ENABLE, 1);
877e9aa4 2929
a2fbb9ea
ET
2930}
2931
2932static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
2933{
2934 struct net_device *dev = dev_instance;
2935 struct bnx2x *bp = netdev_priv(dev);
2936
2937 /* Return here if interrupt is disabled */
2938 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3196a88a 2939 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
2940 return IRQ_HANDLED;
2941 }
2942
8d9c5f34 2943 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0);
a2fbb9ea
ET
2944
2945#ifdef BNX2X_STOP_ON_ERROR
2946 if (unlikely(bp->panic))
2947 return IRQ_HANDLED;
2948#endif
2949
1cf167f2 2950 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
a2fbb9ea
ET
2951
2952 return IRQ_HANDLED;
2953}
2954
2955/* end of slow path */
2956
2957/* Statistics */
2958
2959/****************************************************************************
2960* Macros
2961****************************************************************************/
2962
a2fbb9ea
ET
2963/* sum[hi:lo] += add[hi:lo] */
2964#define ADD_64(s_hi, a_hi, s_lo, a_lo) \
2965 do { \
2966 s_lo += a_lo; \
f5ba6772 2967 s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
a2fbb9ea
ET
2968 } while (0)
2969
2970/* difference = minuend - subtrahend */
2971#define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
2972 do { \
bb2a0f7a
YG
2973 if (m_lo < s_lo) { \
2974 /* underflow */ \
a2fbb9ea 2975 d_hi = m_hi - s_hi; \
bb2a0f7a 2976 if (d_hi > 0) { \
6378c025 2977 /* we can 'loan' 1 */ \
a2fbb9ea
ET
2978 d_hi--; \
2979 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
bb2a0f7a 2980 } else { \
6378c025 2981 /* m_hi <= s_hi */ \
a2fbb9ea
ET
2982 d_hi = 0; \
2983 d_lo = 0; \
2984 } \
bb2a0f7a
YG
2985 } else { \
2986 /* m_lo >= s_lo */ \
a2fbb9ea 2987 if (m_hi < s_hi) { \
bb2a0f7a
YG
2988 d_hi = 0; \
2989 d_lo = 0; \
2990 } else { \
6378c025 2991 /* m_hi >= s_hi */ \
bb2a0f7a
YG
2992 d_hi = m_hi - s_hi; \
2993 d_lo = m_lo - s_lo; \
a2fbb9ea
ET
2994 } \
2995 } \
2996 } while (0)
2997
bb2a0f7a 2998#define UPDATE_STAT64(s, t) \
a2fbb9ea 2999 do { \
bb2a0f7a
YG
3000 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
3001 diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
3002 pstats->mac_stx[0].t##_hi = new->s##_hi; \
3003 pstats->mac_stx[0].t##_lo = new->s##_lo; \
3004 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
3005 pstats->mac_stx[1].t##_lo, diff.lo); \
a2fbb9ea
ET
3006 } while (0)
3007
bb2a0f7a 3008#define UPDATE_STAT64_NIG(s, t) \
a2fbb9ea 3009 do { \
bb2a0f7a
YG
3010 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
3011 diff.lo, new->s##_lo, old->s##_lo); \
3012 ADD_64(estats->t##_hi, diff.hi, \
3013 estats->t##_lo, diff.lo); \
a2fbb9ea
ET
3014 } while (0)
3015
3016/* sum[hi:lo] += add */
3017#define ADD_EXTEND_64(s_hi, s_lo, a) \
3018 do { \
3019 s_lo += a; \
3020 s_hi += (s_lo < a) ? 1 : 0; \
3021 } while (0)
3022
bb2a0f7a 3023#define UPDATE_EXTEND_STAT(s) \
a2fbb9ea 3024 do { \
bb2a0f7a
YG
3025 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
3026 pstats->mac_stx[1].s##_lo, \
3027 new->s); \
a2fbb9ea
ET
3028 } while (0)
3029
bb2a0f7a 3030#define UPDATE_EXTEND_TSTAT(s, t) \
a2fbb9ea 3031 do { \
4781bfad
EG
3032 diff = le32_to_cpu(tclient->s) - le32_to_cpu(old_tclient->s); \
3033 old_tclient->s = tclient->s; \
de832a55
EG
3034 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3035 } while (0)
3036
3037#define UPDATE_EXTEND_USTAT(s, t) \
3038 do { \
3039 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3040 old_uclient->s = uclient->s; \
3041 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
bb2a0f7a
YG
3042 } while (0)
3043
3044#define UPDATE_EXTEND_XSTAT(s, t) \
3045 do { \
4781bfad
EG
3046 diff = le32_to_cpu(xclient->s) - le32_to_cpu(old_xclient->s); \
3047 old_xclient->s = xclient->s; \
de832a55
EG
3048 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3049 } while (0)
3050
3051/* minuend -= subtrahend */
3052#define SUB_64(m_hi, s_hi, m_lo, s_lo) \
3053 do { \
3054 DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
3055 } while (0)
3056
3057/* minuend[hi:lo] -= subtrahend */
3058#define SUB_EXTEND_64(m_hi, m_lo, s) \
3059 do { \
3060 SUB_64(m_hi, 0, m_lo, s); \
3061 } while (0)
3062
3063#define SUB_EXTEND_USTAT(s, t) \
3064 do { \
3065 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3066 SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
a2fbb9ea
ET
3067 } while (0)
3068
3069/*
3070 * General service functions
3071 */
3072
3073static inline long bnx2x_hilo(u32 *hiref)
3074{
3075 u32 lo = *(hiref + 1);
3076#if (BITS_PER_LONG == 64)
3077 u32 hi = *hiref;
3078
3079 return HILO_U64(hi, lo);
3080#else
3081 return lo;
3082#endif
3083}
3084
3085/*
3086 * Init service functions
3087 */
3088
bb2a0f7a
YG
3089static void bnx2x_storm_stats_post(struct bnx2x *bp)
3090{
3091 if (!bp->stats_pending) {
3092 struct eth_query_ramrod_data ramrod_data = {0};
de832a55 3093 int i, rc;
bb2a0f7a
YG
3094
3095 ramrod_data.drv_counter = bp->stats_counter++;
8d9c5f34 3096 ramrod_data.collect_port = bp->port.pmf ? 1 : 0;
de832a55
EG
3097 for_each_queue(bp, i)
3098 ramrod_data.ctr_id_vector |= (1 << bp->fp[i].cl_id);
bb2a0f7a
YG
3099
3100 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3101 ((u32 *)&ramrod_data)[1],
3102 ((u32 *)&ramrod_data)[0], 0);
3103 if (rc == 0) {
3104 /* stats ramrod has it's own slot on the spq */
3105 bp->spq_left++;
3106 bp->stats_pending = 1;
3107 }
3108 }
3109}
3110
3111static void bnx2x_stats_init(struct bnx2x *bp)
3112{
3113 int port = BP_PORT(bp);
de832a55 3114 int i;
bb2a0f7a 3115
de832a55 3116 bp->stats_pending = 0;
bb2a0f7a
YG
3117 bp->executer_idx = 0;
3118 bp->stats_counter = 0;
3119
3120 /* port stats */
3121 if (!BP_NOMCP(bp))
3122 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
3123 else
3124 bp->port.port_stx = 0;
3125 DP(BNX2X_MSG_STATS, "port_stx 0x%x\n", bp->port.port_stx);
3126
3127 memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
3128 bp->port.old_nig_stats.brb_discard =
3129 REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
66e855f3
YG
3130 bp->port.old_nig_stats.brb_truncate =
3131 REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
bb2a0f7a
YG
3132 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
3133 &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
3134 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
3135 &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
3136
3137 /* function stats */
de832a55
EG
3138 for_each_queue(bp, i) {
3139 struct bnx2x_fastpath *fp = &bp->fp[i];
3140
3141 memset(&fp->old_tclient, 0,
3142 sizeof(struct tstorm_per_client_stats));
3143 memset(&fp->old_uclient, 0,
3144 sizeof(struct ustorm_per_client_stats));
3145 memset(&fp->old_xclient, 0,
3146 sizeof(struct xstorm_per_client_stats));
3147 memset(&fp->eth_q_stats, 0, sizeof(struct bnx2x_eth_q_stats));
3148 }
3149
bb2a0f7a 3150 memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
bb2a0f7a
YG
3151 memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
3152
3153 bp->stats_state = STATS_STATE_DISABLED;
3154 if (IS_E1HMF(bp) && bp->port.pmf && bp->port.port_stx)
3155 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
3156}
3157
3158static void bnx2x_hw_stats_post(struct bnx2x *bp)
3159{
3160 struct dmae_command *dmae = &bp->stats_dmae;
3161 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3162
3163 *stats_comp = DMAE_COMP_VAL;
de832a55
EG
3164 if (CHIP_REV_IS_SLOW(bp))
3165 return;
bb2a0f7a
YG
3166
3167 /* loader */
3168 if (bp->executer_idx) {
3169 int loader_idx = PMF_DMAE_C(bp);
3170
3171 memset(dmae, 0, sizeof(struct dmae_command));
3172
3173 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3174 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3175 DMAE_CMD_DST_RESET |
3176#ifdef __BIG_ENDIAN
3177 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3178#else
3179 DMAE_CMD_ENDIANITY_DW_SWAP |
3180#endif
3181 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3182 DMAE_CMD_PORT_0) |
3183 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3184 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3185 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3186 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3187 sizeof(struct dmae_command) *
3188 (loader_idx + 1)) >> 2;
3189 dmae->dst_addr_hi = 0;
3190 dmae->len = sizeof(struct dmae_command) >> 2;
3191 if (CHIP_IS_E1(bp))
3192 dmae->len--;
3193 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3194 dmae->comp_addr_hi = 0;
3195 dmae->comp_val = 1;
3196
3197 *stats_comp = 0;
3198 bnx2x_post_dmae(bp, dmae, loader_idx);
3199
3200 } else if (bp->func_stx) {
3201 *stats_comp = 0;
3202 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3203 }
3204}
3205
3206static int bnx2x_stats_comp(struct bnx2x *bp)
3207{
3208 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3209 int cnt = 10;
3210
3211 might_sleep();
3212 while (*stats_comp != DMAE_COMP_VAL) {
bb2a0f7a
YG
3213 if (!cnt) {
3214 BNX2X_ERR("timeout waiting for stats finished\n");
3215 break;
3216 }
3217 cnt--;
12469401 3218 msleep(1);
bb2a0f7a
YG
3219 }
3220 return 1;
3221}
3222
3223/*
3224 * Statistics service functions
3225 */
3226
3227static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3228{
3229 struct dmae_command *dmae;
3230 u32 opcode;
3231 int loader_idx = PMF_DMAE_C(bp);
3232 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3233
3234 /* sanity */
3235 if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3236 BNX2X_ERR("BUG!\n");
3237 return;
3238 }
3239
3240 bp->executer_idx = 0;
3241
3242 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3243 DMAE_CMD_C_ENABLE |
3244 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3245#ifdef __BIG_ENDIAN
3246 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3247#else
3248 DMAE_CMD_ENDIANITY_DW_SWAP |
3249#endif
3250 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3251 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3252
3253 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3254 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3255 dmae->src_addr_lo = bp->port.port_stx >> 2;
3256 dmae->src_addr_hi = 0;
3257 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3258 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3259 dmae->len = DMAE_LEN32_RD_MAX;
3260 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3261 dmae->comp_addr_hi = 0;
3262 dmae->comp_val = 1;
3263
3264 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3265 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3266 dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3267 dmae->src_addr_hi = 0;
7a9b2557
VZ
3268 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3269 DMAE_LEN32_RD_MAX * 4);
3270 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3271 DMAE_LEN32_RD_MAX * 4);
bb2a0f7a
YG
3272 dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3273 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3274 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3275 dmae->comp_val = DMAE_COMP_VAL;
3276
3277 *stats_comp = 0;
3278 bnx2x_hw_stats_post(bp);
3279 bnx2x_stats_comp(bp);
3280}
3281
3282static void bnx2x_port_stats_init(struct bnx2x *bp)
a2fbb9ea
ET
3283{
3284 struct dmae_command *dmae;
34f80b04 3285 int port = BP_PORT(bp);
bb2a0f7a 3286 int vn = BP_E1HVN(bp);
a2fbb9ea 3287 u32 opcode;
bb2a0f7a 3288 int loader_idx = PMF_DMAE_C(bp);
a2fbb9ea 3289 u32 mac_addr;
bb2a0f7a
YG
3290 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3291
3292 /* sanity */
3293 if (!bp->link_vars.link_up || !bp->port.pmf) {
3294 BNX2X_ERR("BUG!\n");
3295 return;
3296 }
a2fbb9ea
ET
3297
3298 bp->executer_idx = 0;
bb2a0f7a
YG
3299
3300 /* MCP */
3301 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3302 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3303 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 3304#ifdef __BIG_ENDIAN
bb2a0f7a 3305 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 3306#else
bb2a0f7a 3307 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 3308#endif
bb2a0f7a
YG
3309 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3310 (vn << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea 3311
bb2a0f7a 3312 if (bp->port.port_stx) {
a2fbb9ea
ET
3313
3314 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3315 dmae->opcode = opcode;
bb2a0f7a
YG
3316 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3317 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3318 dmae->dst_addr_lo = bp->port.port_stx >> 2;
a2fbb9ea 3319 dmae->dst_addr_hi = 0;
bb2a0f7a
YG
3320 dmae->len = sizeof(struct host_port_stats) >> 2;
3321 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3322 dmae->comp_addr_hi = 0;
3323 dmae->comp_val = 1;
a2fbb9ea
ET
3324 }
3325
bb2a0f7a
YG
3326 if (bp->func_stx) {
3327
3328 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3329 dmae->opcode = opcode;
3330 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3331 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3332 dmae->dst_addr_lo = bp->func_stx >> 2;
3333 dmae->dst_addr_hi = 0;
3334 dmae->len = sizeof(struct host_func_stats) >> 2;
3335 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3336 dmae->comp_addr_hi = 0;
3337 dmae->comp_val = 1;
a2fbb9ea
ET
3338 }
3339
bb2a0f7a 3340 /* MAC */
a2fbb9ea
ET
3341 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3342 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3343 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3344#ifdef __BIG_ENDIAN
3345 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3346#else
3347 DMAE_CMD_ENDIANITY_DW_SWAP |
3348#endif
bb2a0f7a
YG
3349 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3350 (vn << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea 3351
c18487ee 3352 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
a2fbb9ea
ET
3353
3354 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3355 NIG_REG_INGRESS_BMAC0_MEM);
3356
3357 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3358 BIGMAC_REGISTER_TX_STAT_GTBYT */
3359 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3360 dmae->opcode = opcode;
3361 dmae->src_addr_lo = (mac_addr +
3362 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3363 dmae->src_addr_hi = 0;
3364 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3365 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3366 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3367 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3368 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3369 dmae->comp_addr_hi = 0;
3370 dmae->comp_val = 1;
3371
3372 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3373 BIGMAC_REGISTER_RX_STAT_GRIPJ */
3374 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3375 dmae->opcode = opcode;
3376 dmae->src_addr_lo = (mac_addr +
3377 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3378 dmae->src_addr_hi = 0;
3379 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3380 offsetof(struct bmac_stats, rx_stat_gr64_lo));
a2fbb9ea 3381 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3382 offsetof(struct bmac_stats, rx_stat_gr64_lo));
a2fbb9ea
ET
3383 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3384 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3385 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3386 dmae->comp_addr_hi = 0;
3387 dmae->comp_val = 1;
3388
c18487ee 3389 } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
a2fbb9ea
ET
3390
3391 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3392
3393 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3394 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3395 dmae->opcode = opcode;
3396 dmae->src_addr_lo = (mac_addr +
3397 EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3398 dmae->src_addr_hi = 0;
3399 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3400 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3401 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3402 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3403 dmae->comp_addr_hi = 0;
3404 dmae->comp_val = 1;
3405
3406 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3407 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3408 dmae->opcode = opcode;
3409 dmae->src_addr_lo = (mac_addr +
3410 EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3411 dmae->src_addr_hi = 0;
3412 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3413 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
a2fbb9ea 3414 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3415 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
a2fbb9ea
ET
3416 dmae->len = 1;
3417 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3418 dmae->comp_addr_hi = 0;
3419 dmae->comp_val = 1;
3420
3421 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3422 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3423 dmae->opcode = opcode;
3424 dmae->src_addr_lo = (mac_addr +
3425 EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3426 dmae->src_addr_hi = 0;
3427 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3428 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
a2fbb9ea 3429 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3430 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
a2fbb9ea
ET
3431 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3432 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3433 dmae->comp_addr_hi = 0;
3434 dmae->comp_val = 1;
3435 }
3436
3437 /* NIG */
bb2a0f7a
YG
3438 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3439 dmae->opcode = opcode;
3440 dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3441 NIG_REG_STAT0_BRB_DISCARD) >> 2;
3442 dmae->src_addr_hi = 0;
3443 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3444 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3445 dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3446 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3447 dmae->comp_addr_hi = 0;
3448 dmae->comp_val = 1;
3449
3450 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3451 dmae->opcode = opcode;
3452 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3453 NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3454 dmae->src_addr_hi = 0;
3455 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3456 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3457 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3458 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3459 dmae->len = (2*sizeof(u32)) >> 2;
3460 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3461 dmae->comp_addr_hi = 0;
3462 dmae->comp_val = 1;
3463
a2fbb9ea
ET
3464 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3465 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3466 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3467 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3468#ifdef __BIG_ENDIAN
3469 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3470#else
3471 DMAE_CMD_ENDIANITY_DW_SWAP |
3472#endif
bb2a0f7a
YG
3473 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3474 (vn << DMAE_CMD_E1HVN_SHIFT));
3475 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3476 NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
a2fbb9ea 3477 dmae->src_addr_hi = 0;
bb2a0f7a
YG
3478 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3479 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3480 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3481 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3482 dmae->len = (2*sizeof(u32)) >> 2;
3483 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3484 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3485 dmae->comp_val = DMAE_COMP_VAL;
3486
3487 *stats_comp = 0;
a2fbb9ea
ET
3488}
3489
bb2a0f7a 3490static void bnx2x_func_stats_init(struct bnx2x *bp)
a2fbb9ea 3491{
bb2a0f7a
YG
3492 struct dmae_command *dmae = &bp->stats_dmae;
3493 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 3494
bb2a0f7a
YG
3495 /* sanity */
3496 if (!bp->func_stx) {
3497 BNX2X_ERR("BUG!\n");
3498 return;
3499 }
a2fbb9ea 3500
bb2a0f7a
YG
3501 bp->executer_idx = 0;
3502 memset(dmae, 0, sizeof(struct dmae_command));
a2fbb9ea 3503
bb2a0f7a
YG
3504 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3505 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3506 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3507#ifdef __BIG_ENDIAN
3508 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3509#else
3510 DMAE_CMD_ENDIANITY_DW_SWAP |
3511#endif
3512 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3513 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3514 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3515 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3516 dmae->dst_addr_lo = bp->func_stx >> 2;
3517 dmae->dst_addr_hi = 0;
3518 dmae->len = sizeof(struct host_func_stats) >> 2;
3519 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3520 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3521 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 3522
bb2a0f7a
YG
3523 *stats_comp = 0;
3524}
a2fbb9ea 3525
bb2a0f7a
YG
3526static void bnx2x_stats_start(struct bnx2x *bp)
3527{
3528 if (bp->port.pmf)
3529 bnx2x_port_stats_init(bp);
3530
3531 else if (bp->func_stx)
3532 bnx2x_func_stats_init(bp);
3533
3534 bnx2x_hw_stats_post(bp);
3535 bnx2x_storm_stats_post(bp);
3536}
3537
3538static void bnx2x_stats_pmf_start(struct bnx2x *bp)
3539{
3540 bnx2x_stats_comp(bp);
3541 bnx2x_stats_pmf_update(bp);
3542 bnx2x_stats_start(bp);
3543}
3544
3545static void bnx2x_stats_restart(struct bnx2x *bp)
3546{
3547 bnx2x_stats_comp(bp);
3548 bnx2x_stats_start(bp);
3549}
3550
3551static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3552{
3553 struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3554 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
de832a55 3555 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4781bfad
EG
3556 struct {
3557 u32 lo;
3558 u32 hi;
3559 } diff;
bb2a0f7a
YG
3560
3561 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3562 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3563 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3564 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3565 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3566 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
66e855f3 3567 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
bb2a0f7a 3568 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
de832a55 3569 UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf);
bb2a0f7a
YG
3570 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3571 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3572 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3573 UPDATE_STAT64(tx_stat_gt127,
3574 tx_stat_etherstatspkts65octetsto127octets);
3575 UPDATE_STAT64(tx_stat_gt255,
3576 tx_stat_etherstatspkts128octetsto255octets);
3577 UPDATE_STAT64(tx_stat_gt511,
3578 tx_stat_etherstatspkts256octetsto511octets);
3579 UPDATE_STAT64(tx_stat_gt1023,
3580 tx_stat_etherstatspkts512octetsto1023octets);
3581 UPDATE_STAT64(tx_stat_gt1518,
3582 tx_stat_etherstatspkts1024octetsto1522octets);
3583 UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3584 UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3585 UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3586 UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3587 UPDATE_STAT64(tx_stat_gterr,
3588 tx_stat_dot3statsinternalmactransmiterrors);
3589 UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
de832a55
EG
3590
3591 estats->pause_frames_received_hi =
3592 pstats->mac_stx[1].rx_stat_bmac_xpf_hi;
3593 estats->pause_frames_received_lo =
3594 pstats->mac_stx[1].rx_stat_bmac_xpf_lo;
3595
3596 estats->pause_frames_sent_hi =
3597 pstats->mac_stx[1].tx_stat_outxoffsent_hi;
3598 estats->pause_frames_sent_lo =
3599 pstats->mac_stx[1].tx_stat_outxoffsent_lo;
bb2a0f7a
YG
3600}
3601
3602static void bnx2x_emac_stats_update(struct bnx2x *bp)
3603{
3604 struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3605 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
de832a55 3606 struct bnx2x_eth_stats *estats = &bp->eth_stats;
bb2a0f7a
YG
3607
3608 UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3609 UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3610 UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3611 UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3612 UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3613 UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3614 UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3615 UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3616 UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3617 UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3618 UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3619 UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3620 UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3621 UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3622 UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3623 UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3624 UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3625 UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3626 UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3627 UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3628 UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3629 UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3630 UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3631 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3632 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3633 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3634 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3635 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3636 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3637 UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3638 UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
de832a55
EG
3639
3640 estats->pause_frames_received_hi =
3641 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
3642 estats->pause_frames_received_lo =
3643 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
3644 ADD_64(estats->pause_frames_received_hi,
3645 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
3646 estats->pause_frames_received_lo,
3647 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
3648
3649 estats->pause_frames_sent_hi =
3650 pstats->mac_stx[1].tx_stat_outxonsent_hi;
3651 estats->pause_frames_sent_lo =
3652 pstats->mac_stx[1].tx_stat_outxonsent_lo;
3653 ADD_64(estats->pause_frames_sent_hi,
3654 pstats->mac_stx[1].tx_stat_outxoffsent_hi,
3655 estats->pause_frames_sent_lo,
3656 pstats->mac_stx[1].tx_stat_outxoffsent_lo);
bb2a0f7a
YG
3657}
3658
3659static int bnx2x_hw_stats_update(struct bnx2x *bp)
3660{
3661 struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3662 struct nig_stats *old = &(bp->port.old_nig_stats);
3663 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3664 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4781bfad
EG
3665 struct {
3666 u32 lo;
3667 u32 hi;
3668 } diff;
de832a55 3669 u32 nig_timer_max;
bb2a0f7a
YG
3670
3671 if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3672 bnx2x_bmac_stats_update(bp);
3673
3674 else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3675 bnx2x_emac_stats_update(bp);
3676
3677 else { /* unreached */
c3eefaf6 3678 BNX2X_ERR("stats updated by DMAE but no MAC active\n");
bb2a0f7a
YG
3679 return -1;
3680 }
a2fbb9ea 3681
bb2a0f7a
YG
3682 ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3683 new->brb_discard - old->brb_discard);
66e855f3
YG
3684 ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3685 new->brb_truncate - old->brb_truncate);
a2fbb9ea 3686
bb2a0f7a
YG
3687 UPDATE_STAT64_NIG(egress_mac_pkt0,
3688 etherstatspkts1024octetsto1522octets);
3689 UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
a2fbb9ea 3690
bb2a0f7a 3691 memcpy(old, new, sizeof(struct nig_stats));
a2fbb9ea 3692
bb2a0f7a
YG
3693 memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3694 sizeof(struct mac_stx));
3695 estats->brb_drop_hi = pstats->brb_drop_hi;
3696 estats->brb_drop_lo = pstats->brb_drop_lo;
a2fbb9ea 3697
bb2a0f7a 3698 pstats->host_port_stats_start = ++pstats->host_port_stats_end;
a2fbb9ea 3699
de832a55
EG
3700 nig_timer_max = SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
3701 if (nig_timer_max != estats->nig_timer_max) {
3702 estats->nig_timer_max = nig_timer_max;
3703 BNX2X_ERR("NIG timer max (%u)\n", estats->nig_timer_max);
3704 }
3705
bb2a0f7a 3706 return 0;
a2fbb9ea
ET
3707}
3708
bb2a0f7a 3709static int bnx2x_storm_stats_update(struct bnx2x *bp)
a2fbb9ea
ET
3710{
3711 struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
bb2a0f7a 3712 struct tstorm_per_port_stats *tport =
de832a55 3713 &stats->tstorm_common.port_statistics;
bb2a0f7a
YG
3714 struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
3715 struct bnx2x_eth_stats *estats = &bp->eth_stats;
de832a55
EG
3716 int i;
3717
3718 memset(&(fstats->total_bytes_received_hi), 0,
3719 sizeof(struct host_func_stats) - 2*sizeof(u32));
3720 estats->error_bytes_received_hi = 0;
3721 estats->error_bytes_received_lo = 0;
3722 estats->etherstatsoverrsizepkts_hi = 0;
3723 estats->etherstatsoverrsizepkts_lo = 0;
3724 estats->no_buff_discard_hi = 0;
3725 estats->no_buff_discard_lo = 0;
a2fbb9ea 3726
de832a55
EG
3727 for_each_queue(bp, i) {
3728 struct bnx2x_fastpath *fp = &bp->fp[i];
3729 int cl_id = fp->cl_id;
3730 struct tstorm_per_client_stats *tclient =
3731 &stats->tstorm_common.client_statistics[cl_id];
3732 struct tstorm_per_client_stats *old_tclient = &fp->old_tclient;
3733 struct ustorm_per_client_stats *uclient =
3734 &stats->ustorm_common.client_statistics[cl_id];
3735 struct ustorm_per_client_stats *old_uclient = &fp->old_uclient;
3736 struct xstorm_per_client_stats *xclient =
3737 &stats->xstorm_common.client_statistics[cl_id];
3738 struct xstorm_per_client_stats *old_xclient = &fp->old_xclient;
3739 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
3740 u32 diff;
3741
3742 /* are storm stats valid? */
3743 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
bb2a0f7a 3744 bp->stats_counter) {
de832a55
EG
3745 DP(BNX2X_MSG_STATS, "[%d] stats not updated by xstorm"
3746 " xstorm counter (%d) != stats_counter (%d)\n",
3747 i, xclient->stats_counter, bp->stats_counter);
3748 return -1;
3749 }
3750 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
bb2a0f7a 3751 bp->stats_counter) {
de832a55
EG
3752 DP(BNX2X_MSG_STATS, "[%d] stats not updated by tstorm"
3753 " tstorm counter (%d) != stats_counter (%d)\n",
3754 i, tclient->stats_counter, bp->stats_counter);
3755 return -2;
3756 }
3757 if ((u16)(le16_to_cpu(uclient->stats_counter) + 1) !=
3758 bp->stats_counter) {
3759 DP(BNX2X_MSG_STATS, "[%d] stats not updated by ustorm"
3760 " ustorm counter (%d) != stats_counter (%d)\n",
3761 i, uclient->stats_counter, bp->stats_counter);
3762 return -4;
3763 }
a2fbb9ea 3764
de832a55
EG
3765 qstats->total_bytes_received_hi =
3766 qstats->valid_bytes_received_hi =
a2fbb9ea 3767 le32_to_cpu(tclient->total_rcv_bytes.hi);
de832a55
EG
3768 qstats->total_bytes_received_lo =
3769 qstats->valid_bytes_received_lo =
a2fbb9ea 3770 le32_to_cpu(tclient->total_rcv_bytes.lo);
bb2a0f7a 3771
de832a55 3772 qstats->error_bytes_received_hi =
bb2a0f7a 3773 le32_to_cpu(tclient->rcv_error_bytes.hi);
de832a55 3774 qstats->error_bytes_received_lo =
bb2a0f7a 3775 le32_to_cpu(tclient->rcv_error_bytes.lo);
bb2a0f7a 3776
de832a55
EG
3777 ADD_64(qstats->total_bytes_received_hi,
3778 qstats->error_bytes_received_hi,
3779 qstats->total_bytes_received_lo,
3780 qstats->error_bytes_received_lo);
3781
3782 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts,
3783 total_unicast_packets_received);
3784 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
3785 total_multicast_packets_received);
3786 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
3787 total_broadcast_packets_received);
3788 UPDATE_EXTEND_TSTAT(packets_too_big_discard,
3789 etherstatsoverrsizepkts);
3790 UPDATE_EXTEND_TSTAT(no_buff_discard, no_buff_discard);
3791
3792 SUB_EXTEND_USTAT(ucast_no_buff_pkts,
3793 total_unicast_packets_received);
3794 SUB_EXTEND_USTAT(mcast_no_buff_pkts,
3795 total_multicast_packets_received);
3796 SUB_EXTEND_USTAT(bcast_no_buff_pkts,
3797 total_broadcast_packets_received);
3798 UPDATE_EXTEND_USTAT(ucast_no_buff_pkts, no_buff_discard);
3799 UPDATE_EXTEND_USTAT(mcast_no_buff_pkts, no_buff_discard);
3800 UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard);
3801
3802 qstats->total_bytes_transmitted_hi =
bb2a0f7a 3803 le32_to_cpu(xclient->total_sent_bytes.hi);
de832a55 3804 qstats->total_bytes_transmitted_lo =
bb2a0f7a
YG
3805 le32_to_cpu(xclient->total_sent_bytes.lo);
3806
de832a55
EG
3807 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
3808 total_unicast_packets_transmitted);
3809 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
3810 total_multicast_packets_transmitted);
3811 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
3812 total_broadcast_packets_transmitted);
3813
3814 old_tclient->checksum_discard = tclient->checksum_discard;
3815 old_tclient->ttl0_discard = tclient->ttl0_discard;
3816
3817 ADD_64(fstats->total_bytes_received_hi,
3818 qstats->total_bytes_received_hi,
3819 fstats->total_bytes_received_lo,
3820 qstats->total_bytes_received_lo);
3821 ADD_64(fstats->total_bytes_transmitted_hi,
3822 qstats->total_bytes_transmitted_hi,
3823 fstats->total_bytes_transmitted_lo,
3824 qstats->total_bytes_transmitted_lo);
3825 ADD_64(fstats->total_unicast_packets_received_hi,
3826 qstats->total_unicast_packets_received_hi,
3827 fstats->total_unicast_packets_received_lo,
3828 qstats->total_unicast_packets_received_lo);
3829 ADD_64(fstats->total_multicast_packets_received_hi,
3830 qstats->total_multicast_packets_received_hi,
3831 fstats->total_multicast_packets_received_lo,
3832 qstats->total_multicast_packets_received_lo);
3833 ADD_64(fstats->total_broadcast_packets_received_hi,
3834 qstats->total_broadcast_packets_received_hi,
3835 fstats->total_broadcast_packets_received_lo,
3836 qstats->total_broadcast_packets_received_lo);
3837 ADD_64(fstats->total_unicast_packets_transmitted_hi,
3838 qstats->total_unicast_packets_transmitted_hi,
3839 fstats->total_unicast_packets_transmitted_lo,
3840 qstats->total_unicast_packets_transmitted_lo);
3841 ADD_64(fstats->total_multicast_packets_transmitted_hi,
3842 qstats->total_multicast_packets_transmitted_hi,
3843 fstats->total_multicast_packets_transmitted_lo,
3844 qstats->total_multicast_packets_transmitted_lo);
3845 ADD_64(fstats->total_broadcast_packets_transmitted_hi,
3846 qstats->total_broadcast_packets_transmitted_hi,
3847 fstats->total_broadcast_packets_transmitted_lo,
3848 qstats->total_broadcast_packets_transmitted_lo);
3849 ADD_64(fstats->valid_bytes_received_hi,
3850 qstats->valid_bytes_received_hi,
3851 fstats->valid_bytes_received_lo,
3852 qstats->valid_bytes_received_lo);
3853
3854 ADD_64(estats->error_bytes_received_hi,
3855 qstats->error_bytes_received_hi,
3856 estats->error_bytes_received_lo,
3857 qstats->error_bytes_received_lo);
3858 ADD_64(estats->etherstatsoverrsizepkts_hi,
3859 qstats->etherstatsoverrsizepkts_hi,
3860 estats->etherstatsoverrsizepkts_lo,
3861 qstats->etherstatsoverrsizepkts_lo);
3862 ADD_64(estats->no_buff_discard_hi, qstats->no_buff_discard_hi,
3863 estats->no_buff_discard_lo, qstats->no_buff_discard_lo);
3864 }
3865
3866 ADD_64(fstats->total_bytes_received_hi,
3867 estats->rx_stat_ifhcinbadoctets_hi,
3868 fstats->total_bytes_received_lo,
3869 estats->rx_stat_ifhcinbadoctets_lo);
bb2a0f7a
YG
3870
3871 memcpy(estats, &(fstats->total_bytes_received_hi),
3872 sizeof(struct host_func_stats) - 2*sizeof(u32));
3873
de832a55
EG
3874 ADD_64(estats->etherstatsoverrsizepkts_hi,
3875 estats->rx_stat_dot3statsframestoolong_hi,
3876 estats->etherstatsoverrsizepkts_lo,
3877 estats->rx_stat_dot3statsframestoolong_lo);
3878 ADD_64(estats->error_bytes_received_hi,
3879 estats->rx_stat_ifhcinbadoctets_hi,
3880 estats->error_bytes_received_lo,
3881 estats->rx_stat_ifhcinbadoctets_lo);
3882
3883 if (bp->port.pmf) {
3884 estats->mac_filter_discard =
3885 le32_to_cpu(tport->mac_filter_discard);
3886 estats->xxoverflow_discard =
3887 le32_to_cpu(tport->xxoverflow_discard);
3888 estats->brb_truncate_discard =
bb2a0f7a 3889 le32_to_cpu(tport->brb_truncate_discard);
de832a55
EG
3890 estats->mac_discard = le32_to_cpu(tport->mac_discard);
3891 }
bb2a0f7a
YG
3892
3893 fstats->host_func_stats_start = ++fstats->host_func_stats_end;
a2fbb9ea 3894
de832a55
EG
3895 bp->stats_pending = 0;
3896
a2fbb9ea
ET
3897 return 0;
3898}
3899
bb2a0f7a 3900static void bnx2x_net_stats_update(struct bnx2x *bp)
a2fbb9ea 3901{
bb2a0f7a 3902 struct bnx2x_eth_stats *estats = &bp->eth_stats;
a2fbb9ea 3903 struct net_device_stats *nstats = &bp->dev->stats;
de832a55 3904 int i;
a2fbb9ea
ET
3905
3906 nstats->rx_packets =
3907 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
3908 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
3909 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
3910
3911 nstats->tx_packets =
3912 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
3913 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
3914 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
3915
de832a55 3916 nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
a2fbb9ea 3917
0e39e645 3918 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
a2fbb9ea 3919
de832a55
EG
3920 nstats->rx_dropped = estats->mac_discard;
3921 for_each_queue(bp, i)
3922 nstats->rx_dropped +=
3923 le32_to_cpu(bp->fp[i].old_tclient.checksum_discard);
3924
a2fbb9ea
ET
3925 nstats->tx_dropped = 0;
3926
3927 nstats->multicast =
de832a55 3928 bnx2x_hilo(&estats->total_multicast_packets_received_hi);
a2fbb9ea 3929
bb2a0f7a 3930 nstats->collisions =
de832a55 3931 bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi);
bb2a0f7a
YG
3932
3933 nstats->rx_length_errors =
de832a55
EG
3934 bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) +
3935 bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi);
3936 nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) +
3937 bnx2x_hilo(&estats->brb_truncate_hi);
3938 nstats->rx_crc_errors =
3939 bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi);
3940 nstats->rx_frame_errors =
3941 bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi);
3942 nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi);
a2fbb9ea
ET
3943 nstats->rx_missed_errors = estats->xxoverflow_discard;
3944
3945 nstats->rx_errors = nstats->rx_length_errors +
3946 nstats->rx_over_errors +
3947 nstats->rx_crc_errors +
3948 nstats->rx_frame_errors +
0e39e645
ET
3949 nstats->rx_fifo_errors +
3950 nstats->rx_missed_errors;
a2fbb9ea 3951
bb2a0f7a 3952 nstats->tx_aborted_errors =
de832a55
EG
3953 bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) +
3954 bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi);
3955 nstats->tx_carrier_errors =
3956 bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi);
a2fbb9ea
ET
3957 nstats->tx_fifo_errors = 0;
3958 nstats->tx_heartbeat_errors = 0;
3959 nstats->tx_window_errors = 0;
3960
3961 nstats->tx_errors = nstats->tx_aborted_errors +
de832a55
EG
3962 nstats->tx_carrier_errors +
3963 bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi);
3964}
3965
3966static void bnx2x_drv_stats_update(struct bnx2x *bp)
3967{
3968 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3969 int i;
3970
3971 estats->driver_xoff = 0;
3972 estats->rx_err_discard_pkt = 0;
3973 estats->rx_skb_alloc_failed = 0;
3974 estats->hw_csum_err = 0;
3975 for_each_queue(bp, i) {
3976 struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats;
3977
3978 estats->driver_xoff += qstats->driver_xoff;
3979 estats->rx_err_discard_pkt += qstats->rx_err_discard_pkt;
3980 estats->rx_skb_alloc_failed += qstats->rx_skb_alloc_failed;
3981 estats->hw_csum_err += qstats->hw_csum_err;
3982 }
a2fbb9ea
ET
3983}
3984
bb2a0f7a 3985static void bnx2x_stats_update(struct bnx2x *bp)
a2fbb9ea 3986{
bb2a0f7a 3987 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 3988
bb2a0f7a
YG
3989 if (*stats_comp != DMAE_COMP_VAL)
3990 return;
3991
3992 if (bp->port.pmf)
de832a55 3993 bnx2x_hw_stats_update(bp);
a2fbb9ea 3994
de832a55
EG
3995 if (bnx2x_storm_stats_update(bp) && (bp->stats_pending++ == 3)) {
3996 BNX2X_ERR("storm stats were not updated for 3 times\n");
3997 bnx2x_panic();
3998 return;
a2fbb9ea
ET
3999 }
4000
de832a55
EG
4001 bnx2x_net_stats_update(bp);
4002 bnx2x_drv_stats_update(bp);
4003
a2fbb9ea 4004 if (bp->msglevel & NETIF_MSG_TIMER) {
de832a55
EG
4005 struct tstorm_per_client_stats *old_tclient =
4006 &bp->fp->old_tclient;
4007 struct bnx2x_eth_q_stats *qstats = &bp->fp->eth_q_stats;
bb2a0f7a 4008 struct bnx2x_eth_stats *estats = &bp->eth_stats;
a2fbb9ea 4009 struct net_device_stats *nstats = &bp->dev->stats;
34f80b04 4010 int i;
a2fbb9ea
ET
4011
4012 printk(KERN_DEBUG "%s:\n", bp->dev->name);
4013 printk(KERN_DEBUG " tx avail (%4x) tx hc idx (%x)"
4014 " tx pkt (%lx)\n",
4015 bnx2x_tx_avail(bp->fp),
7a9b2557 4016 le16_to_cpu(*bp->fp->tx_cons_sb), nstats->tx_packets);
a2fbb9ea
ET
4017 printk(KERN_DEBUG " rx usage (%4x) rx hc idx (%x)"
4018 " rx pkt (%lx)\n",
7a9b2557
VZ
4019 (u16)(le16_to_cpu(*bp->fp->rx_cons_sb) -
4020 bp->fp->rx_comp_cons),
4021 le16_to_cpu(*bp->fp->rx_cons_sb), nstats->rx_packets);
de832a55
EG
4022 printk(KERN_DEBUG " %s (Xoff events %u) brb drops %u "
4023 "brb truncate %u\n",
4024 (netif_queue_stopped(bp->dev) ? "Xoff" : "Xon"),
4025 qstats->driver_xoff,
4026 estats->brb_drop_lo, estats->brb_truncate_lo);
a2fbb9ea 4027 printk(KERN_DEBUG "tstats: checksum_discard %u "
de832a55 4028 "packets_too_big_discard %lu no_buff_discard %lu "
a2fbb9ea
ET
4029 "mac_discard %u mac_filter_discard %u "
4030 "xxovrflow_discard %u brb_truncate_discard %u "
4031 "ttl0_discard %u\n",
4781bfad 4032 le32_to_cpu(old_tclient->checksum_discard),
de832a55
EG
4033 bnx2x_hilo(&qstats->etherstatsoverrsizepkts_hi),
4034 bnx2x_hilo(&qstats->no_buff_discard_hi),
4035 estats->mac_discard, estats->mac_filter_discard,
4036 estats->xxoverflow_discard, estats->brb_truncate_discard,
4781bfad 4037 le32_to_cpu(old_tclient->ttl0_discard));
a2fbb9ea
ET
4038
4039 for_each_queue(bp, i) {
4040 printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
4041 bnx2x_fp(bp, i, tx_pkt),
4042 bnx2x_fp(bp, i, rx_pkt),
4043 bnx2x_fp(bp, i, rx_calls));
4044 }
4045 }
4046
bb2a0f7a
YG
4047 bnx2x_hw_stats_post(bp);
4048 bnx2x_storm_stats_post(bp);
4049}
a2fbb9ea 4050
bb2a0f7a
YG
4051static void bnx2x_port_stats_stop(struct bnx2x *bp)
4052{
4053 struct dmae_command *dmae;
4054 u32 opcode;
4055 int loader_idx = PMF_DMAE_C(bp);
4056 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 4057
bb2a0f7a 4058 bp->executer_idx = 0;
a2fbb9ea 4059
bb2a0f7a
YG
4060 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4061 DMAE_CMD_C_ENABLE |
4062 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 4063#ifdef __BIG_ENDIAN
bb2a0f7a 4064 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 4065#else
bb2a0f7a 4066 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 4067#endif
bb2a0f7a
YG
4068 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4069 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4070
4071 if (bp->port.port_stx) {
4072
4073 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4074 if (bp->func_stx)
4075 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
4076 else
4077 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4078 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4079 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4080 dmae->dst_addr_lo = bp->port.port_stx >> 2;
a2fbb9ea 4081 dmae->dst_addr_hi = 0;
bb2a0f7a
YG
4082 dmae->len = sizeof(struct host_port_stats) >> 2;
4083 if (bp->func_stx) {
4084 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4085 dmae->comp_addr_hi = 0;
4086 dmae->comp_val = 1;
4087 } else {
4088 dmae->comp_addr_lo =
4089 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4090 dmae->comp_addr_hi =
4091 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4092 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 4093
bb2a0f7a
YG
4094 *stats_comp = 0;
4095 }
a2fbb9ea
ET
4096 }
4097
bb2a0f7a
YG
4098 if (bp->func_stx) {
4099
4100 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4101 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4102 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
4103 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
4104 dmae->dst_addr_lo = bp->func_stx >> 2;
4105 dmae->dst_addr_hi = 0;
4106 dmae->len = sizeof(struct host_func_stats) >> 2;
4107 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4108 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4109 dmae->comp_val = DMAE_COMP_VAL;
4110
4111 *stats_comp = 0;
a2fbb9ea 4112 }
bb2a0f7a
YG
4113}
4114
4115static void bnx2x_stats_stop(struct bnx2x *bp)
4116{
4117 int update = 0;
4118
4119 bnx2x_stats_comp(bp);
4120
4121 if (bp->port.pmf)
4122 update = (bnx2x_hw_stats_update(bp) == 0);
4123
4124 update |= (bnx2x_storm_stats_update(bp) == 0);
4125
4126 if (update) {
4127 bnx2x_net_stats_update(bp);
a2fbb9ea 4128
bb2a0f7a
YG
4129 if (bp->port.pmf)
4130 bnx2x_port_stats_stop(bp);
4131
4132 bnx2x_hw_stats_post(bp);
4133 bnx2x_stats_comp(bp);
a2fbb9ea
ET
4134 }
4135}
4136
bb2a0f7a
YG
4137static void bnx2x_stats_do_nothing(struct bnx2x *bp)
4138{
4139}
4140
4141static const struct {
4142 void (*action)(struct bnx2x *bp);
4143 enum bnx2x_stats_state next_state;
4144} bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
4145/* state event */
4146{
4147/* DISABLED PMF */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
4148/* LINK_UP */ {bnx2x_stats_start, STATS_STATE_ENABLED},
4149/* UPDATE */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
4150/* STOP */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
4151},
4152{
4153/* ENABLED PMF */ {bnx2x_stats_pmf_start, STATS_STATE_ENABLED},
4154/* LINK_UP */ {bnx2x_stats_restart, STATS_STATE_ENABLED},
4155/* UPDATE */ {bnx2x_stats_update, STATS_STATE_ENABLED},
4156/* STOP */ {bnx2x_stats_stop, STATS_STATE_DISABLED}
4157}
4158};
4159
4160static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
4161{
4162 enum bnx2x_stats_state state = bp->stats_state;
4163
4164 bnx2x_stats_stm[state][event].action(bp);
4165 bp->stats_state = bnx2x_stats_stm[state][event].next_state;
4166
4167 if ((event != STATS_EVENT_UPDATE) || (bp->msglevel & NETIF_MSG_TIMER))
4168 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
4169 state, event, bp->stats_state);
4170}
4171
a2fbb9ea
ET
4172static void bnx2x_timer(unsigned long data)
4173{
4174 struct bnx2x *bp = (struct bnx2x *) data;
4175
4176 if (!netif_running(bp->dev))
4177 return;
4178
4179 if (atomic_read(&bp->intr_sem) != 0)
f1410647 4180 goto timer_restart;
a2fbb9ea
ET
4181
4182 if (poll) {
4183 struct bnx2x_fastpath *fp = &bp->fp[0];
4184 int rc;
4185
7961f791 4186 bnx2x_tx_int(fp);
a2fbb9ea
ET
4187 rc = bnx2x_rx_int(fp, 1000);
4188 }
4189
34f80b04
EG
4190 if (!BP_NOMCP(bp)) {
4191 int func = BP_FUNC(bp);
a2fbb9ea
ET
4192 u32 drv_pulse;
4193 u32 mcp_pulse;
4194
4195 ++bp->fw_drv_pulse_wr_seq;
4196 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
4197 /* TBD - add SYSTEM_TIME */
4198 drv_pulse = bp->fw_drv_pulse_wr_seq;
34f80b04 4199 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
a2fbb9ea 4200
34f80b04 4201 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
a2fbb9ea
ET
4202 MCP_PULSE_SEQ_MASK);
4203 /* The delta between driver pulse and mcp response
4204 * should be 1 (before mcp response) or 0 (after mcp response)
4205 */
4206 if ((drv_pulse != mcp_pulse) &&
4207 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
4208 /* someone lost a heartbeat... */
4209 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
4210 drv_pulse, mcp_pulse);
4211 }
4212 }
4213
bb2a0f7a
YG
4214 if ((bp->state == BNX2X_STATE_OPEN) ||
4215 (bp->state == BNX2X_STATE_DISABLED))
4216 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
a2fbb9ea 4217
f1410647 4218timer_restart:
a2fbb9ea
ET
4219 mod_timer(&bp->timer, jiffies + bp->current_interval);
4220}
4221
4222/* end of Statistics */
4223
4224/* nic init */
4225
4226/*
4227 * nic init service functions
4228 */
4229
34f80b04 4230static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
a2fbb9ea 4231{
34f80b04
EG
4232 int port = BP_PORT(bp);
4233
490c3c9b 4234 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR +
34f80b04 4235 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
35302989 4236 sizeof(struct ustorm_status_block)/4);
490c3c9b 4237 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR +
34f80b04 4238 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
35302989 4239 sizeof(struct cstorm_status_block)/4);
34f80b04
EG
4240}
4241
5c862848
EG
4242static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
4243 dma_addr_t mapping, int sb_id)
34f80b04
EG
4244{
4245 int port = BP_PORT(bp);
bb2a0f7a 4246 int func = BP_FUNC(bp);
a2fbb9ea 4247 int index;
34f80b04 4248 u64 section;
a2fbb9ea
ET
4249
4250 /* USTORM */
4251 section = ((u64)mapping) + offsetof(struct host_status_block,
4252 u_status_block);
34f80b04 4253 sb->u_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4254
4255 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4256 USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
a2fbb9ea 4257 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4258 ((USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
a2fbb9ea 4259 U64_HI(section));
bb2a0f7a
YG
4260 REG_WR8(bp, BAR_USTRORM_INTMEM + FP_USB_FUNC_OFF +
4261 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
a2fbb9ea
ET
4262
4263 for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
4264 REG_WR16(bp, BAR_USTRORM_INTMEM +
34f80b04 4265 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
a2fbb9ea
ET
4266
4267 /* CSTORM */
4268 section = ((u64)mapping) + offsetof(struct host_status_block,
4269 c_status_block);
34f80b04 4270 sb->c_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4271
4272 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4273 CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
a2fbb9ea 4274 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4275 ((CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
a2fbb9ea 4276 U64_HI(section));
7a9b2557
VZ
4277 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
4278 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
a2fbb9ea
ET
4279
4280 for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4281 REG_WR16(bp, BAR_CSTRORM_INTMEM +
34f80b04
EG
4282 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4283
4284 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4285}
4286
4287static void bnx2x_zero_def_sb(struct bnx2x *bp)
4288{
4289 int func = BP_FUNC(bp);
a2fbb9ea 4290
490c3c9b
EG
4291 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR +
4292 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4293 sizeof(struct tstorm_def_status_block)/4);
4294 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR +
34f80b04
EG
4295 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4296 sizeof(struct ustorm_def_status_block)/4);
490c3c9b 4297 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR +
34f80b04
EG
4298 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4299 sizeof(struct cstorm_def_status_block)/4);
490c3c9b 4300 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR +
34f80b04
EG
4301 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4302 sizeof(struct xstorm_def_status_block)/4);
a2fbb9ea
ET
4303}
4304
4305static void bnx2x_init_def_sb(struct bnx2x *bp,
4306 struct host_def_status_block *def_sb,
34f80b04 4307 dma_addr_t mapping, int sb_id)
a2fbb9ea 4308{
34f80b04
EG
4309 int port = BP_PORT(bp);
4310 int func = BP_FUNC(bp);
a2fbb9ea
ET
4311 int index, val, reg_offset;
4312 u64 section;
4313
4314 /* ATTN */
4315 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4316 atten_status_block);
34f80b04 4317 def_sb->atten_status_block.status_block_id = sb_id;
a2fbb9ea 4318
49d66772
ET
4319 bp->attn_state = 0;
4320
a2fbb9ea
ET
4321 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4322 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4323
34f80b04 4324 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
a2fbb9ea
ET
4325 bp->attn_group[index].sig[0] = REG_RD(bp,
4326 reg_offset + 0x10*index);
4327 bp->attn_group[index].sig[1] = REG_RD(bp,
4328 reg_offset + 0x4 + 0x10*index);
4329 bp->attn_group[index].sig[2] = REG_RD(bp,
4330 reg_offset + 0x8 + 0x10*index);
4331 bp->attn_group[index].sig[3] = REG_RD(bp,
4332 reg_offset + 0xc + 0x10*index);
4333 }
4334
a2fbb9ea
ET
4335 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4336 HC_REG_ATTN_MSG0_ADDR_L);
4337
4338 REG_WR(bp, reg_offset, U64_LO(section));
4339 REG_WR(bp, reg_offset + 4, U64_HI(section));
4340
4341 reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4342
4343 val = REG_RD(bp, reg_offset);
34f80b04 4344 val |= sb_id;
a2fbb9ea
ET
4345 REG_WR(bp, reg_offset, val);
4346
4347 /* USTORM */
4348 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4349 u_def_status_block);
34f80b04 4350 def_sb->u_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4351
4352 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4353 USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4354 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4355 ((USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4356 U64_HI(section));
5c862848 4357 REG_WR8(bp, BAR_USTRORM_INTMEM + DEF_USB_FUNC_OFF +
34f80b04 4358 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4359
4360 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
4361 REG_WR16(bp, BAR_USTRORM_INTMEM +
34f80b04 4362 USTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
a2fbb9ea
ET
4363
4364 /* CSTORM */
4365 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4366 c_def_status_block);
34f80b04 4367 def_sb->c_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4368
4369 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4370 CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4371 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4372 ((CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4373 U64_HI(section));
5c862848 4374 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
34f80b04 4375 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4376
4377 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4378 REG_WR16(bp, BAR_CSTRORM_INTMEM +
34f80b04 4379 CSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
a2fbb9ea
ET
4380
4381 /* TSTORM */
4382 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4383 t_def_status_block);
34f80b04 4384 def_sb->t_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4385
4386 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4387 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4388 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4389 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4390 U64_HI(section));
5c862848 4391 REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
34f80b04 4392 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4393
4394 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4395 REG_WR16(bp, BAR_TSTRORM_INTMEM +
34f80b04 4396 TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
a2fbb9ea
ET
4397
4398 /* XSTORM */
4399 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4400 x_def_status_block);
34f80b04 4401 def_sb->x_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4402
4403 REG_WR(bp, BAR_XSTRORM_INTMEM +
34f80b04 4404 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4405 REG_WR(bp, BAR_XSTRORM_INTMEM +
34f80b04 4406 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4407 U64_HI(section));
5c862848 4408 REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
34f80b04 4409 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4410
4411 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4412 REG_WR16(bp, BAR_XSTRORM_INTMEM +
34f80b04 4413 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
49d66772 4414
bb2a0f7a 4415 bp->stats_pending = 0;
66e855f3 4416 bp->set_mac_pending = 0;
bb2a0f7a 4417
34f80b04 4418 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea
ET
4419}
4420
4421static void bnx2x_update_coalesce(struct bnx2x *bp)
4422{
34f80b04 4423 int port = BP_PORT(bp);
a2fbb9ea
ET
4424 int i;
4425
4426 for_each_queue(bp, i) {
34f80b04 4427 int sb_id = bp->fp[i].sb_id;
a2fbb9ea
ET
4428
4429 /* HC_INDEX_U_ETH_RX_CQ_CONS */
4430 REG_WR8(bp, BAR_USTRORM_INTMEM +
34f80b04 4431 USTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
5c862848 4432 U_SB_ETH_RX_CQ_INDEX),
34f80b04 4433 bp->rx_ticks/12);
a2fbb9ea 4434 REG_WR16(bp, BAR_USTRORM_INTMEM +
34f80b04 4435 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
5c862848 4436 U_SB_ETH_RX_CQ_INDEX),
3799cf47 4437 (bp->rx_ticks/12) ? 0 : 1);
a2fbb9ea
ET
4438
4439 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4440 REG_WR8(bp, BAR_CSTRORM_INTMEM +
34f80b04 4441 CSTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
5c862848 4442 C_SB_ETH_TX_CQ_INDEX),
34f80b04 4443 bp->tx_ticks/12);
a2fbb9ea 4444 REG_WR16(bp, BAR_CSTRORM_INTMEM +
34f80b04 4445 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
5c862848 4446 C_SB_ETH_TX_CQ_INDEX),
3799cf47 4447 (bp->tx_ticks/12) ? 0 : 1);
a2fbb9ea
ET
4448 }
4449}
4450
7a9b2557
VZ
4451static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4452 struct bnx2x_fastpath *fp, int last)
4453{
4454 int i;
4455
4456 for (i = 0; i < last; i++) {
4457 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
4458 struct sk_buff *skb = rx_buf->skb;
4459
4460 if (skb == NULL) {
4461 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
4462 continue;
4463 }
4464
4465 if (fp->tpa_state[i] == BNX2X_TPA_START)
4466 pci_unmap_single(bp->pdev,
4467 pci_unmap_addr(rx_buf, mapping),
356e2385 4468 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
4469
4470 dev_kfree_skb(skb);
4471 rx_buf->skb = NULL;
4472 }
4473}
4474
a2fbb9ea
ET
4475static void bnx2x_init_rx_rings(struct bnx2x *bp)
4476{
7a9b2557 4477 int func = BP_FUNC(bp);
32626230
EG
4478 int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
4479 ETH_MAX_AGGREGATION_QUEUES_E1H;
4480 u16 ring_prod, cqe_ring_prod;
a2fbb9ea 4481 int i, j;
a2fbb9ea 4482
87942b46 4483 bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN;
0f00846d
EG
4484 DP(NETIF_MSG_IFUP,
4485 "mtu %d rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
a2fbb9ea 4486
7a9b2557 4487 if (bp->flags & TPA_ENABLE_FLAG) {
7a9b2557 4488
555f6c78 4489 for_each_rx_queue(bp, j) {
32626230 4490 struct bnx2x_fastpath *fp = &bp->fp[j];
7a9b2557 4491
32626230 4492 for (i = 0; i < max_agg_queues; i++) {
7a9b2557
VZ
4493 fp->tpa_pool[i].skb =
4494 netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4495 if (!fp->tpa_pool[i].skb) {
4496 BNX2X_ERR("Failed to allocate TPA "
4497 "skb pool for queue[%d] - "
4498 "disabling TPA on this "
4499 "queue!\n", j);
4500 bnx2x_free_tpa_pool(bp, fp, i);
4501 fp->disable_tpa = 1;
4502 break;
4503 }
4504 pci_unmap_addr_set((struct sw_rx_bd *)
4505 &bp->fp->tpa_pool[i],
4506 mapping, 0);
4507 fp->tpa_state[i] = BNX2X_TPA_STOP;
4508 }
4509 }
4510 }
4511
555f6c78 4512 for_each_rx_queue(bp, j) {
a2fbb9ea
ET
4513 struct bnx2x_fastpath *fp = &bp->fp[j];
4514
4515 fp->rx_bd_cons = 0;
4516 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
7a9b2557
VZ
4517 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
4518
4519 /* "next page" elements initialization */
4520 /* SGE ring */
4521 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
4522 struct eth_rx_sge *sge;
4523
4524 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
4525 sge->addr_hi =
4526 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
4527 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4528 sge->addr_lo =
4529 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
4530 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4531 }
4532
4533 bnx2x_init_sge_ring_bit_mask(fp);
a2fbb9ea 4534
7a9b2557 4535 /* RX BD ring */
a2fbb9ea
ET
4536 for (i = 1; i <= NUM_RX_RINGS; i++) {
4537 struct eth_rx_bd *rx_bd;
4538
4539 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
4540 rx_bd->addr_hi =
4541 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
34f80b04 4542 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
a2fbb9ea
ET
4543 rx_bd->addr_lo =
4544 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
34f80b04 4545 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
a2fbb9ea
ET
4546 }
4547
34f80b04 4548 /* CQ ring */
a2fbb9ea
ET
4549 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4550 struct eth_rx_cqe_next_page *nextpg;
4551
4552 nextpg = (struct eth_rx_cqe_next_page *)
4553 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4554 nextpg->addr_hi =
4555 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
34f80b04 4556 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
a2fbb9ea
ET
4557 nextpg->addr_lo =
4558 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
34f80b04 4559 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
a2fbb9ea
ET
4560 }
4561
7a9b2557
VZ
4562 /* Allocate SGEs and initialize the ring elements */
4563 for (i = 0, ring_prod = 0;
4564 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
a2fbb9ea 4565
7a9b2557
VZ
4566 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
4567 BNX2X_ERR("was only able to allocate "
4568 "%d rx sges\n", i);
4569 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
4570 /* Cleanup already allocated elements */
4571 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
32626230 4572 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
7a9b2557
VZ
4573 fp->disable_tpa = 1;
4574 ring_prod = 0;
4575 break;
4576 }
4577 ring_prod = NEXT_SGE_IDX(ring_prod);
4578 }
4579 fp->rx_sge_prod = ring_prod;
4580
4581 /* Allocate BDs and initialize BD ring */
66e855f3 4582 fp->rx_comp_cons = 0;
7a9b2557 4583 cqe_ring_prod = ring_prod = 0;
a2fbb9ea
ET
4584 for (i = 0; i < bp->rx_ring_size; i++) {
4585 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
4586 BNX2X_ERR("was only able to allocate "
de832a55
EG
4587 "%d rx skbs on queue[%d]\n", i, j);
4588 fp->eth_q_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
4589 break;
4590 }
4591 ring_prod = NEXT_RX_IDX(ring_prod);
7a9b2557 4592 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
53e5e96e 4593 WARN_ON(ring_prod <= i);
a2fbb9ea
ET
4594 }
4595
7a9b2557
VZ
4596 fp->rx_bd_prod = ring_prod;
4597 /* must not have more available CQEs than BDs */
4598 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
4599 cqe_ring_prod);
a2fbb9ea
ET
4600 fp->rx_pkt = fp->rx_calls = 0;
4601
7a9b2557
VZ
4602 /* Warning!
4603 * this will generate an interrupt (to the TSTORM)
4604 * must only be done after chip is initialized
4605 */
4606 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
4607 fp->rx_sge_prod);
a2fbb9ea
ET
4608 if (j != 0)
4609 continue;
4610
4611 REG_WR(bp, BAR_USTRORM_INTMEM +
7a9b2557 4612 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
a2fbb9ea
ET
4613 U64_LO(fp->rx_comp_mapping));
4614 REG_WR(bp, BAR_USTRORM_INTMEM +
7a9b2557 4615 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
a2fbb9ea
ET
4616 U64_HI(fp->rx_comp_mapping));
4617 }
4618}
4619
4620static void bnx2x_init_tx_ring(struct bnx2x *bp)
4621{
4622 int i, j;
4623
555f6c78 4624 for_each_tx_queue(bp, j) {
a2fbb9ea
ET
4625 struct bnx2x_fastpath *fp = &bp->fp[j];
4626
4627 for (i = 1; i <= NUM_TX_RINGS; i++) {
4628 struct eth_tx_bd *tx_bd =
4629 &fp->tx_desc_ring[TX_DESC_CNT * i - 1];
4630
4631 tx_bd->addr_hi =
4632 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
34f80b04 4633 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
a2fbb9ea
ET
4634 tx_bd->addr_lo =
4635 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
34f80b04 4636 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
a2fbb9ea
ET
4637 }
4638
4639 fp->tx_pkt_prod = 0;
4640 fp->tx_pkt_cons = 0;
4641 fp->tx_bd_prod = 0;
4642 fp->tx_bd_cons = 0;
4643 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
4644 fp->tx_pkt = 0;
4645 }
4646}
4647
4648static void bnx2x_init_sp_ring(struct bnx2x *bp)
4649{
34f80b04 4650 int func = BP_FUNC(bp);
a2fbb9ea
ET
4651
4652 spin_lock_init(&bp->spq_lock);
4653
4654 bp->spq_left = MAX_SPQ_PENDING;
4655 bp->spq_prod_idx = 0;
a2fbb9ea
ET
4656 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
4657 bp->spq_prod_bd = bp->spq;
4658 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
4659
34f80b04 4660 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
a2fbb9ea 4661 U64_LO(bp->spq_mapping));
34f80b04
EG
4662 REG_WR(bp,
4663 XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
a2fbb9ea
ET
4664 U64_HI(bp->spq_mapping));
4665
34f80b04 4666 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
a2fbb9ea
ET
4667 bp->spq_prod_idx);
4668}
4669
4670static void bnx2x_init_context(struct bnx2x *bp)
4671{
4672 int i;
4673
4674 for_each_queue(bp, i) {
4675 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
4676 struct bnx2x_fastpath *fp = &bp->fp[i];
de832a55 4677 u8 cl_id = fp->cl_id;
0626b899 4678 u8 sb_id = fp->sb_id;
a2fbb9ea 4679
34f80b04
EG
4680 context->ustorm_st_context.common.sb_index_numbers =
4681 BNX2X_RX_SB_INDEX_NUM;
0626b899 4682 context->ustorm_st_context.common.clientId = cl_id;
34f80b04
EG
4683 context->ustorm_st_context.common.status_block_id = sb_id;
4684 context->ustorm_st_context.common.flags =
de832a55
EG
4685 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT |
4686 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS);
4687 context->ustorm_st_context.common.statistics_counter_id =
4688 cl_id;
8d9c5f34 4689 context->ustorm_st_context.common.mc_alignment_log_size =
0f00846d 4690 BNX2X_RX_ALIGN_SHIFT;
34f80b04 4691 context->ustorm_st_context.common.bd_buff_size =
437cf2f1 4692 bp->rx_buf_size;
34f80b04 4693 context->ustorm_st_context.common.bd_page_base_hi =
a2fbb9ea 4694 U64_HI(fp->rx_desc_mapping);
34f80b04 4695 context->ustorm_st_context.common.bd_page_base_lo =
a2fbb9ea 4696 U64_LO(fp->rx_desc_mapping);
7a9b2557
VZ
4697 if (!fp->disable_tpa) {
4698 context->ustorm_st_context.common.flags |=
4699 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA |
4700 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_SGE_RING);
4701 context->ustorm_st_context.common.sge_buff_size =
8d9c5f34
EG
4702 (u16)min((u32)SGE_PAGE_SIZE*PAGES_PER_SGE,
4703 (u32)0xffff);
7a9b2557
VZ
4704 context->ustorm_st_context.common.sge_page_base_hi =
4705 U64_HI(fp->rx_sge_mapping);
4706 context->ustorm_st_context.common.sge_page_base_lo =
4707 U64_LO(fp->rx_sge_mapping);
4708 }
4709
8d9c5f34
EG
4710 context->ustorm_ag_context.cdu_usage =
4711 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4712 CDU_REGION_NUMBER_UCM_AG,
4713 ETH_CONNECTION_TYPE);
4714
4715 context->xstorm_st_context.tx_bd_page_base_hi =
4716 U64_HI(fp->tx_desc_mapping);
4717 context->xstorm_st_context.tx_bd_page_base_lo =
4718 U64_LO(fp->tx_desc_mapping);
4719 context->xstorm_st_context.db_data_addr_hi =
4720 U64_HI(fp->tx_prods_mapping);
4721 context->xstorm_st_context.db_data_addr_lo =
4722 U64_LO(fp->tx_prods_mapping);
0626b899 4723 context->xstorm_st_context.statistics_data = (cl_id |
8d9c5f34 4724 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
a2fbb9ea 4725 context->cstorm_st_context.sb_index_number =
5c862848 4726 C_SB_ETH_TX_CQ_INDEX;
34f80b04 4727 context->cstorm_st_context.status_block_id = sb_id;
a2fbb9ea
ET
4728
4729 context->xstorm_ag_context.cdu_reserved =
4730 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4731 CDU_REGION_NUMBER_XCM_AG,
4732 ETH_CONNECTION_TYPE);
a2fbb9ea
ET
4733 }
4734}
4735
4736static void bnx2x_init_ind_table(struct bnx2x *bp)
4737{
26c8fa4d 4738 int func = BP_FUNC(bp);
a2fbb9ea
ET
4739 int i;
4740
555f6c78 4741 if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
a2fbb9ea
ET
4742 return;
4743
555f6c78
EG
4744 DP(NETIF_MSG_IFUP,
4745 "Initializing indirection table multi_mode %d\n", bp->multi_mode);
a2fbb9ea 4746 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
34f80b04 4747 REG_WR8(bp, BAR_TSTRORM_INTMEM +
26c8fa4d 4748 TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
0626b899 4749 bp->fp->cl_id + (i % bp->num_rx_queues));
a2fbb9ea
ET
4750}
4751
49d66772
ET
4752static void bnx2x_set_client_config(struct bnx2x *bp)
4753{
49d66772 4754 struct tstorm_eth_client_config tstorm_client = {0};
34f80b04
EG
4755 int port = BP_PORT(bp);
4756 int i;
49d66772 4757
e7799c5f 4758 tstorm_client.mtu = bp->dev->mtu;
49d66772 4759 tstorm_client.config_flags =
de832a55
EG
4760 (TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE |
4761 TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE);
49d66772 4762#ifdef BCM_VLAN
0c6671b0 4763 if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
49d66772 4764 tstorm_client.config_flags |=
8d9c5f34 4765 TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE;
49d66772
ET
4766 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
4767 }
4768#endif
49d66772 4769
7a9b2557
VZ
4770 if (bp->flags & TPA_ENABLE_FLAG) {
4771 tstorm_client.max_sges_for_packet =
4f40f2cb 4772 SGE_PAGE_ALIGN(tstorm_client.mtu) >> SGE_PAGE_SHIFT;
7a9b2557
VZ
4773 tstorm_client.max_sges_for_packet =
4774 ((tstorm_client.max_sges_for_packet +
4775 PAGES_PER_SGE - 1) & (~(PAGES_PER_SGE - 1))) >>
4776 PAGES_PER_SGE_SHIFT;
4777
4778 tstorm_client.config_flags |=
4779 TSTORM_ETH_CLIENT_CONFIG_ENABLE_SGE_RING;
4780 }
4781
49d66772 4782 for_each_queue(bp, i) {
de832a55
EG
4783 tstorm_client.statistics_counter_id = bp->fp[i].cl_id;
4784
49d66772 4785 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4786 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
49d66772
ET
4787 ((u32 *)&tstorm_client)[0]);
4788 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4789 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
49d66772
ET
4790 ((u32 *)&tstorm_client)[1]);
4791 }
4792
34f80b04
EG
4793 DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
4794 ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
49d66772
ET
4795}
4796
a2fbb9ea
ET
4797static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
4798{
a2fbb9ea 4799 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
34f80b04
EG
4800 int mode = bp->rx_mode;
4801 int mask = (1 << BP_L_ID(bp));
4802 int func = BP_FUNC(bp);
a2fbb9ea
ET
4803 int i;
4804
3196a88a 4805 DP(NETIF_MSG_IFUP, "rx mode %d mask 0x%x\n", mode, mask);
a2fbb9ea
ET
4806
4807 switch (mode) {
4808 case BNX2X_RX_MODE_NONE: /* no Rx */
34f80b04
EG
4809 tstorm_mac_filter.ucast_drop_all = mask;
4810 tstorm_mac_filter.mcast_drop_all = mask;
4811 tstorm_mac_filter.bcast_drop_all = mask;
a2fbb9ea 4812 break;
356e2385 4813
a2fbb9ea 4814 case BNX2X_RX_MODE_NORMAL:
34f80b04 4815 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea 4816 break;
356e2385 4817
a2fbb9ea 4818 case BNX2X_RX_MODE_ALLMULTI:
34f80b04
EG
4819 tstorm_mac_filter.mcast_accept_all = mask;
4820 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea 4821 break;
356e2385 4822
a2fbb9ea 4823 case BNX2X_RX_MODE_PROMISC:
34f80b04
EG
4824 tstorm_mac_filter.ucast_accept_all = mask;
4825 tstorm_mac_filter.mcast_accept_all = mask;
4826 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea 4827 break;
356e2385 4828
a2fbb9ea 4829 default:
34f80b04
EG
4830 BNX2X_ERR("BAD rx mode (%d)\n", mode);
4831 break;
a2fbb9ea
ET
4832 }
4833
4834 for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
4835 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4836 TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
a2fbb9ea
ET
4837 ((u32 *)&tstorm_mac_filter)[i]);
4838
34f80b04 4839/* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
a2fbb9ea
ET
4840 ((u32 *)&tstorm_mac_filter)[i]); */
4841 }
a2fbb9ea 4842
49d66772
ET
4843 if (mode != BNX2X_RX_MODE_NONE)
4844 bnx2x_set_client_config(bp);
a2fbb9ea
ET
4845}
4846
471de716
EG
4847static void bnx2x_init_internal_common(struct bnx2x *bp)
4848{
4849 int i;
4850
3cdf1db7
YG
4851 if (bp->flags & TPA_ENABLE_FLAG) {
4852 struct tstorm_eth_tpa_exist tpa = {0};
4853
4854 tpa.tpa_exist = 1;
4855
4856 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET,
4857 ((u32 *)&tpa)[0]);
4858 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET + 4,
4859 ((u32 *)&tpa)[1]);
4860 }
4861
471de716
EG
4862 /* Zero this manually as its initialization is
4863 currently missing in the initTool */
4864 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
4865 REG_WR(bp, BAR_USTRORM_INTMEM +
4866 USTORM_AGG_DATA_OFFSET + i * 4, 0);
4867}
4868
4869static void bnx2x_init_internal_port(struct bnx2x *bp)
4870{
4871 int port = BP_PORT(bp);
4872
4873 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4874 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4875 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4876 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4877}
4878
8a1c38d1
EG
4879/* Calculates the sum of vn_min_rates.
4880 It's needed for further normalizing of the min_rates.
4881 Returns:
4882 sum of vn_min_rates.
4883 or
4884 0 - if all the min_rates are 0.
4885 In the later case fainess algorithm should be deactivated.
4886 If not all min_rates are zero then those that are zeroes will be set to 1.
4887 */
4888static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
4889{
4890 int all_zero = 1;
4891 int port = BP_PORT(bp);
4892 int vn;
4893
4894 bp->vn_weight_sum = 0;
4895 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
4896 int func = 2*vn + port;
4897 u32 vn_cfg =
4898 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
4899 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
4900 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
4901
4902 /* Skip hidden vns */
4903 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
4904 continue;
4905
4906 /* If min rate is zero - set it to 1 */
4907 if (!vn_min_rate)
4908 vn_min_rate = DEF_MIN_RATE;
4909 else
4910 all_zero = 0;
4911
4912 bp->vn_weight_sum += vn_min_rate;
4913 }
4914
4915 /* ... only if all min rates are zeros - disable fairness */
4916 if (all_zero)
4917 bp->vn_weight_sum = 0;
4918}
4919
471de716 4920static void bnx2x_init_internal_func(struct bnx2x *bp)
a2fbb9ea 4921{
a2fbb9ea
ET
4922 struct tstorm_eth_function_common_config tstorm_config = {0};
4923 struct stats_indication_flags stats_flags = {0};
34f80b04
EG
4924 int port = BP_PORT(bp);
4925 int func = BP_FUNC(bp);
de832a55
EG
4926 int i, j;
4927 u32 offset;
471de716 4928 u16 max_agg_size;
a2fbb9ea
ET
4929
4930 if (is_multi(bp)) {
555f6c78 4931 tstorm_config.config_flags = MULTI_FLAGS(bp);
a2fbb9ea
ET
4932 tstorm_config.rss_result_mask = MULTI_MASK;
4933 }
8d9c5f34
EG
4934 if (IS_E1HMF(bp))
4935 tstorm_config.config_flags |=
4936 TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM;
a2fbb9ea 4937
34f80b04
EG
4938 tstorm_config.leading_client_id = BP_L_ID(bp);
4939
a2fbb9ea 4940 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4941 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
a2fbb9ea
ET
4942 (*(u32 *)&tstorm_config));
4943
c14423fe 4944 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
a2fbb9ea
ET
4945 bnx2x_set_storm_rx_mode(bp);
4946
de832a55
EG
4947 for_each_queue(bp, i) {
4948 u8 cl_id = bp->fp[i].cl_id;
4949
4950 /* reset xstorm per client statistics */
4951 offset = BAR_XSTRORM_INTMEM +
4952 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
4953 for (j = 0;
4954 j < sizeof(struct xstorm_per_client_stats) / 4; j++)
4955 REG_WR(bp, offset + j*4, 0);
4956
4957 /* reset tstorm per client statistics */
4958 offset = BAR_TSTRORM_INTMEM +
4959 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
4960 for (j = 0;
4961 j < sizeof(struct tstorm_per_client_stats) / 4; j++)
4962 REG_WR(bp, offset + j*4, 0);
4963
4964 /* reset ustorm per client statistics */
4965 offset = BAR_USTRORM_INTMEM +
4966 USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
4967 for (j = 0;
4968 j < sizeof(struct ustorm_per_client_stats) / 4; j++)
4969 REG_WR(bp, offset + j*4, 0);
66e855f3
YG
4970 }
4971
4972 /* Init statistics related context */
34f80b04 4973 stats_flags.collect_eth = 1;
a2fbb9ea 4974
66e855f3 4975 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 4976 ((u32 *)&stats_flags)[0]);
66e855f3 4977 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
4978 ((u32 *)&stats_flags)[1]);
4979
66e855f3 4980 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 4981 ((u32 *)&stats_flags)[0]);
66e855f3 4982 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
4983 ((u32 *)&stats_flags)[1]);
4984
de832a55
EG
4985 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func),
4986 ((u32 *)&stats_flags)[0]);
4987 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func) + 4,
4988 ((u32 *)&stats_flags)[1]);
4989
66e855f3 4990 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 4991 ((u32 *)&stats_flags)[0]);
66e855f3 4992 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
4993 ((u32 *)&stats_flags)[1]);
4994
66e855f3
YG
4995 REG_WR(bp, BAR_XSTRORM_INTMEM +
4996 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4997 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4998 REG_WR(bp, BAR_XSTRORM_INTMEM +
4999 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5000 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5001
5002 REG_WR(bp, BAR_TSTRORM_INTMEM +
5003 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5004 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5005 REG_WR(bp, BAR_TSTRORM_INTMEM +
5006 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5007 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
34f80b04 5008
de832a55
EG
5009 REG_WR(bp, BAR_USTRORM_INTMEM +
5010 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5011 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5012 REG_WR(bp, BAR_USTRORM_INTMEM +
5013 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5014 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5015
34f80b04
EG
5016 if (CHIP_IS_E1H(bp)) {
5017 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
5018 IS_E1HMF(bp));
5019 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
5020 IS_E1HMF(bp));
5021 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
5022 IS_E1HMF(bp));
5023 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
5024 IS_E1HMF(bp));
5025
7a9b2557
VZ
5026 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
5027 bp->e1hov);
34f80b04
EG
5028 }
5029
4f40f2cb
EG
5030 /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
5031 max_agg_size =
5032 min((u32)(min((u32)8, (u32)MAX_SKB_FRAGS) *
5033 SGE_PAGE_SIZE * PAGES_PER_SGE),
5034 (u32)0xffff);
555f6c78 5035 for_each_rx_queue(bp, i) {
7a9b2557 5036 struct bnx2x_fastpath *fp = &bp->fp[i];
7a9b2557
VZ
5037
5038 REG_WR(bp, BAR_USTRORM_INTMEM +
0626b899 5039 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id),
7a9b2557
VZ
5040 U64_LO(fp->rx_comp_mapping));
5041 REG_WR(bp, BAR_USTRORM_INTMEM +
0626b899 5042 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id) + 4,
7a9b2557
VZ
5043 U64_HI(fp->rx_comp_mapping));
5044
7a9b2557 5045 REG_WR16(bp, BAR_USTRORM_INTMEM +
0626b899 5046 USTORM_MAX_AGG_SIZE_OFFSET(port, fp->cl_id),
7a9b2557
VZ
5047 max_agg_size);
5048 }
8a1c38d1 5049
1c06328c
EG
5050 /* dropless flow control */
5051 if (CHIP_IS_E1H(bp)) {
5052 struct ustorm_eth_rx_pause_data_e1h rx_pause = {0};
5053
5054 rx_pause.bd_thr_low = 250;
5055 rx_pause.cqe_thr_low = 250;
5056 rx_pause.cos = 1;
5057 rx_pause.sge_thr_low = 0;
5058 rx_pause.bd_thr_high = 350;
5059 rx_pause.cqe_thr_high = 350;
5060 rx_pause.sge_thr_high = 0;
5061
5062 for_each_rx_queue(bp, i) {
5063 struct bnx2x_fastpath *fp = &bp->fp[i];
5064
5065 if (!fp->disable_tpa) {
5066 rx_pause.sge_thr_low = 150;
5067 rx_pause.sge_thr_high = 250;
5068 }
5069
5070
5071 offset = BAR_USTRORM_INTMEM +
5072 USTORM_ETH_RING_PAUSE_DATA_OFFSET(port,
5073 fp->cl_id);
5074 for (j = 0;
5075 j < sizeof(struct ustorm_eth_rx_pause_data_e1h)/4;
5076 j++)
5077 REG_WR(bp, offset + j*4,
5078 ((u32 *)&rx_pause)[j]);
5079 }
5080 }
5081
8a1c38d1
EG
5082 memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
5083
5084 /* Init rate shaping and fairness contexts */
5085 if (IS_E1HMF(bp)) {
5086 int vn;
5087
5088 /* During init there is no active link
5089 Until link is up, set link rate to 10Gbps */
5090 bp->link_vars.line_speed = SPEED_10000;
5091 bnx2x_init_port_minmax(bp);
5092
5093 bnx2x_calc_vn_weight_sum(bp);
5094
5095 for (vn = VN_0; vn < E1HVN_MAX; vn++)
5096 bnx2x_init_vn_minmax(bp, 2*vn + port);
5097
5098 /* Enable rate shaping and fairness */
5099 bp->cmng.flags.cmng_enables =
5100 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
5101 if (bp->vn_weight_sum)
5102 bp->cmng.flags.cmng_enables |=
5103 CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
5104 else
5105 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
5106 " fairness will be disabled\n");
5107 } else {
5108 /* rate shaping and fairness are disabled */
5109 DP(NETIF_MSG_IFUP,
5110 "single function mode minmax will be disabled\n");
5111 }
5112
5113
5114 /* Store it to internal memory */
5115 if (bp->port.pmf)
5116 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
5117 REG_WR(bp, BAR_XSTRORM_INTMEM +
5118 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
5119 ((u32 *)(&bp->cmng))[i]);
a2fbb9ea
ET
5120}
5121
471de716
EG
5122static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
5123{
5124 switch (load_code) {
5125 case FW_MSG_CODE_DRV_LOAD_COMMON:
5126 bnx2x_init_internal_common(bp);
5127 /* no break */
5128
5129 case FW_MSG_CODE_DRV_LOAD_PORT:
5130 bnx2x_init_internal_port(bp);
5131 /* no break */
5132
5133 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5134 bnx2x_init_internal_func(bp);
5135 break;
5136
5137 default:
5138 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5139 break;
5140 }
5141}
5142
5143static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
a2fbb9ea
ET
5144{
5145 int i;
5146
5147 for_each_queue(bp, i) {
5148 struct bnx2x_fastpath *fp = &bp->fp[i];
5149
34f80b04 5150 fp->bp = bp;
a2fbb9ea 5151 fp->state = BNX2X_FP_STATE_CLOSED;
a2fbb9ea 5152 fp->index = i;
34f80b04
EG
5153 fp->cl_id = BP_L_ID(bp) + i;
5154 fp->sb_id = fp->cl_id;
5155 DP(NETIF_MSG_IFUP,
f5372251
EG
5156 "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d sb %d\n",
5157 i, bp, fp->status_blk, fp->cl_id, fp->sb_id);
5c862848 5158 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
0626b899 5159 fp->sb_id);
5c862848 5160 bnx2x_update_fpsb_idx(fp);
a2fbb9ea
ET
5161 }
5162
16119785
EG
5163 /* ensure status block indices were read */
5164 rmb();
5165
5166
5c862848
EG
5167 bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
5168 DEF_SB_ID);
5169 bnx2x_update_dsb_idx(bp);
a2fbb9ea
ET
5170 bnx2x_update_coalesce(bp);
5171 bnx2x_init_rx_rings(bp);
5172 bnx2x_init_tx_ring(bp);
5173 bnx2x_init_sp_ring(bp);
5174 bnx2x_init_context(bp);
471de716 5175 bnx2x_init_internal(bp, load_code);
a2fbb9ea 5176 bnx2x_init_ind_table(bp);
0ef00459
EG
5177 bnx2x_stats_init(bp);
5178
5179 /* At this point, we are ready for interrupts */
5180 atomic_set(&bp->intr_sem, 0);
5181
5182 /* flush all before enabling interrupts */
5183 mb();
5184 mmiowb();
5185
615f8fd9 5186 bnx2x_int_enable(bp);
a2fbb9ea
ET
5187}
5188
5189/* end of nic init */
5190
5191/*
5192 * gzip service functions
5193 */
5194
5195static int bnx2x_gunzip_init(struct bnx2x *bp)
5196{
5197 bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
5198 &bp->gunzip_mapping);
5199 if (bp->gunzip_buf == NULL)
5200 goto gunzip_nomem1;
5201
5202 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
5203 if (bp->strm == NULL)
5204 goto gunzip_nomem2;
5205
5206 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
5207 GFP_KERNEL);
5208 if (bp->strm->workspace == NULL)
5209 goto gunzip_nomem3;
5210
5211 return 0;
5212
5213gunzip_nomem3:
5214 kfree(bp->strm);
5215 bp->strm = NULL;
5216
5217gunzip_nomem2:
5218 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5219 bp->gunzip_mapping);
5220 bp->gunzip_buf = NULL;
5221
5222gunzip_nomem1:
5223 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
34f80b04 5224 " un-compression\n", bp->dev->name);
a2fbb9ea
ET
5225 return -ENOMEM;
5226}
5227
5228static void bnx2x_gunzip_end(struct bnx2x *bp)
5229{
5230 kfree(bp->strm->workspace);
5231
5232 kfree(bp->strm);
5233 bp->strm = NULL;
5234
5235 if (bp->gunzip_buf) {
5236 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5237 bp->gunzip_mapping);
5238 bp->gunzip_buf = NULL;
5239 }
5240}
5241
94a78b79 5242static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
a2fbb9ea
ET
5243{
5244 int n, rc;
5245
5246 /* check gzip header */
94a78b79
VZ
5247 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
5248 BNX2X_ERR("Bad gzip header\n");
a2fbb9ea 5249 return -EINVAL;
94a78b79 5250 }
a2fbb9ea
ET
5251
5252 n = 10;
5253
34f80b04 5254#define FNAME 0x8
a2fbb9ea
ET
5255
5256 if (zbuf[3] & FNAME)
5257 while ((zbuf[n++] != 0) && (n < len));
5258
94a78b79 5259 bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
a2fbb9ea
ET
5260 bp->strm->avail_in = len - n;
5261 bp->strm->next_out = bp->gunzip_buf;
5262 bp->strm->avail_out = FW_BUF_SIZE;
5263
5264 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
5265 if (rc != Z_OK)
5266 return rc;
5267
5268 rc = zlib_inflate(bp->strm, Z_FINISH);
5269 if ((rc != Z_OK) && (rc != Z_STREAM_END))
5270 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
5271 bp->dev->name, bp->strm->msg);
5272
5273 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
5274 if (bp->gunzip_outlen & 0x3)
5275 printk(KERN_ERR PFX "%s: Firmware decompression error:"
5276 " gunzip_outlen (%d) not aligned\n",
5277 bp->dev->name, bp->gunzip_outlen);
5278 bp->gunzip_outlen >>= 2;
5279
5280 zlib_inflateEnd(bp->strm);
5281
5282 if (rc == Z_STREAM_END)
5283 return 0;
5284
5285 return rc;
5286}
5287
5288/* nic load/unload */
5289
5290/*
34f80b04 5291 * General service functions
a2fbb9ea
ET
5292 */
5293
5294/* send a NIG loopback debug packet */
5295static void bnx2x_lb_pckt(struct bnx2x *bp)
5296{
a2fbb9ea 5297 u32 wb_write[3];
a2fbb9ea
ET
5298
5299 /* Ethernet source and destination addresses */
a2fbb9ea
ET
5300 wb_write[0] = 0x55555555;
5301 wb_write[1] = 0x55555555;
34f80b04 5302 wb_write[2] = 0x20; /* SOP */
a2fbb9ea 5303 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
5304
5305 /* NON-IP protocol */
a2fbb9ea
ET
5306 wb_write[0] = 0x09000000;
5307 wb_write[1] = 0x55555555;
34f80b04 5308 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
a2fbb9ea 5309 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
5310}
5311
5312/* some of the internal memories
5313 * are not directly readable from the driver
5314 * to test them we send debug packets
5315 */
5316static int bnx2x_int_mem_test(struct bnx2x *bp)
5317{
5318 int factor;
5319 int count, i;
5320 u32 val = 0;
5321
ad8d3948 5322 if (CHIP_REV_IS_FPGA(bp))
a2fbb9ea 5323 factor = 120;
ad8d3948
EG
5324 else if (CHIP_REV_IS_EMUL(bp))
5325 factor = 200;
5326 else
a2fbb9ea 5327 factor = 1;
a2fbb9ea
ET
5328
5329 DP(NETIF_MSG_HW, "start part1\n");
5330
5331 /* Disable inputs of parser neighbor blocks */
5332 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5333 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5334 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3196a88a 5335 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
a2fbb9ea
ET
5336
5337 /* Write 0 to parser credits for CFC search request */
5338 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5339
5340 /* send Ethernet packet */
5341 bnx2x_lb_pckt(bp);
5342
5343 /* TODO do i reset NIG statistic? */
5344 /* Wait until NIG register shows 1 packet of size 0x10 */
5345 count = 1000 * factor;
5346 while (count) {
34f80b04 5347
a2fbb9ea
ET
5348 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5349 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
5350 if (val == 0x10)
5351 break;
5352
5353 msleep(10);
5354 count--;
5355 }
5356 if (val != 0x10) {
5357 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5358 return -1;
5359 }
5360
5361 /* Wait until PRS register shows 1 packet */
5362 count = 1000 * factor;
5363 while (count) {
5364 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
a2fbb9ea
ET
5365 if (val == 1)
5366 break;
5367
5368 msleep(10);
5369 count--;
5370 }
5371 if (val != 0x1) {
5372 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5373 return -2;
5374 }
5375
5376 /* Reset and init BRB, PRS */
34f80b04 5377 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
a2fbb9ea 5378 msleep(50);
34f80b04 5379 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
a2fbb9ea 5380 msleep(50);
94a78b79
VZ
5381 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5382 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
a2fbb9ea
ET
5383
5384 DP(NETIF_MSG_HW, "part2\n");
5385
5386 /* Disable inputs of parser neighbor blocks */
5387 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5388 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5389 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3196a88a 5390 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
a2fbb9ea
ET
5391
5392 /* Write 0 to parser credits for CFC search request */
5393 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5394
5395 /* send 10 Ethernet packets */
5396 for (i = 0; i < 10; i++)
5397 bnx2x_lb_pckt(bp);
5398
5399 /* Wait until NIG register shows 10 + 1
5400 packets of size 11*0x10 = 0xb0 */
5401 count = 1000 * factor;
5402 while (count) {
34f80b04 5403
a2fbb9ea
ET
5404 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5405 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
5406 if (val == 0xb0)
5407 break;
5408
5409 msleep(10);
5410 count--;
5411 }
5412 if (val != 0xb0) {
5413 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5414 return -3;
5415 }
5416
5417 /* Wait until PRS register shows 2 packets */
5418 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5419 if (val != 2)
5420 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5421
5422 /* Write 1 to parser credits for CFC search request */
5423 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5424
5425 /* Wait until PRS register shows 3 packets */
5426 msleep(10 * factor);
5427 /* Wait until NIG register shows 1 packet of size 0x10 */
5428 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5429 if (val != 3)
5430 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5431
5432 /* clear NIG EOP FIFO */
5433 for (i = 0; i < 11; i++)
5434 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5435 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5436 if (val != 1) {
5437 BNX2X_ERR("clear of NIG failed\n");
5438 return -4;
5439 }
5440
5441 /* Reset and init BRB, PRS, NIG */
5442 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5443 msleep(50);
5444 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5445 msleep(50);
94a78b79
VZ
5446 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5447 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
a2fbb9ea
ET
5448#ifndef BCM_ISCSI
5449 /* set NIC mode */
5450 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5451#endif
5452
5453 /* Enable inputs of parser neighbor blocks */
5454 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5455 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5456 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
3196a88a 5457 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
a2fbb9ea
ET
5458
5459 DP(NETIF_MSG_HW, "done\n");
5460
5461 return 0; /* OK */
5462}
5463
5464static void enable_blocks_attention(struct bnx2x *bp)
5465{
5466 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5467 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5468 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5469 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5470 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5471 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5472 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5473 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5474 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
34f80b04
EG
5475/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5476/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5477 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5478 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5479 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
34f80b04
EG
5480/* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5481/* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5482 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5483 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5484 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5485 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
34f80b04
EG
5486/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5487/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5488 if (CHIP_REV_IS_FPGA(bp))
5489 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
5490 else
5491 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
a2fbb9ea
ET
5492 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5493 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5494 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
34f80b04
EG
5495/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5496/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5497 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5498 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
34f80b04
EG
5499/* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5500 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
a2fbb9ea
ET
5501}
5502
34f80b04 5503
81f75bbf
EG
5504static void bnx2x_reset_common(struct bnx2x *bp)
5505{
5506 /* reset_common */
5507 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5508 0xd3ffff7f);
5509 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
5510}
5511
34f80b04 5512static int bnx2x_init_common(struct bnx2x *bp)
a2fbb9ea 5513{
a2fbb9ea 5514 u32 val, i;
a2fbb9ea 5515
34f80b04 5516 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp));
a2fbb9ea 5517
81f75bbf 5518 bnx2x_reset_common(bp);
34f80b04
EG
5519 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
5520 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
a2fbb9ea 5521
94a78b79 5522 bnx2x_init_block(bp, MISC_BLOCK, COMMON_STAGE);
34f80b04
EG
5523 if (CHIP_IS_E1H(bp))
5524 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
a2fbb9ea 5525
34f80b04
EG
5526 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
5527 msleep(30);
5528 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
a2fbb9ea 5529
94a78b79 5530 bnx2x_init_block(bp, PXP_BLOCK, COMMON_STAGE);
34f80b04
EG
5531 if (CHIP_IS_E1(bp)) {
5532 /* enable HW interrupt from PXP on USDM overflow
5533 bit 16 on INT_MASK_0 */
5534 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5535 }
a2fbb9ea 5536
94a78b79 5537 bnx2x_init_block(bp, PXP2_BLOCK, COMMON_STAGE);
34f80b04 5538 bnx2x_init_pxp(bp);
a2fbb9ea
ET
5539
5540#ifdef __BIG_ENDIAN
34f80b04
EG
5541 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
5542 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
5543 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
5544 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
5545 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
8badd27a
EG
5546 /* make sure this value is 0 */
5547 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
34f80b04
EG
5548
5549/* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
5550 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
5551 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
5552 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
5553 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
a2fbb9ea
ET
5554#endif
5555
34f80b04 5556 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
a2fbb9ea 5557#ifdef BCM_ISCSI
34f80b04
EG
5558 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
5559 REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
5560 REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
a2fbb9ea
ET
5561#endif
5562
34f80b04
EG
5563 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
5564 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
a2fbb9ea 5565
34f80b04
EG
5566 /* let the HW do it's magic ... */
5567 msleep(100);
5568 /* finish PXP init */
5569 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
5570 if (val != 1) {
5571 BNX2X_ERR("PXP2 CFG failed\n");
5572 return -EBUSY;
5573 }
5574 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
5575 if (val != 1) {
5576 BNX2X_ERR("PXP2 RD_INIT failed\n");
5577 return -EBUSY;
5578 }
a2fbb9ea 5579
34f80b04
EG
5580 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
5581 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
a2fbb9ea 5582
94a78b79 5583 bnx2x_init_block(bp, DMAE_BLOCK, COMMON_STAGE);
a2fbb9ea 5584
34f80b04
EG
5585 /* clean the DMAE memory */
5586 bp->dmae_ready = 1;
5587 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
a2fbb9ea 5588
94a78b79
VZ
5589 bnx2x_init_block(bp, TCM_BLOCK, COMMON_STAGE);
5590 bnx2x_init_block(bp, UCM_BLOCK, COMMON_STAGE);
5591 bnx2x_init_block(bp, CCM_BLOCK, COMMON_STAGE);
5592 bnx2x_init_block(bp, XCM_BLOCK, COMMON_STAGE);
a2fbb9ea 5593
34f80b04
EG
5594 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
5595 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
5596 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
5597 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
5598
94a78b79 5599 bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE);
34f80b04
EG
5600 /* soft reset pulse */
5601 REG_WR(bp, QM_REG_SOFT_RESET, 1);
5602 REG_WR(bp, QM_REG_SOFT_RESET, 0);
a2fbb9ea
ET
5603
5604#ifdef BCM_ISCSI
94a78b79 5605 bnx2x_init_block(bp, TIMERS_BLOCK, COMMON_STAGE);
a2fbb9ea 5606#endif
a2fbb9ea 5607
94a78b79 5608 bnx2x_init_block(bp, DQ_BLOCK, COMMON_STAGE);
34f80b04
EG
5609 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
5610 if (!CHIP_REV_IS_SLOW(bp)) {
5611 /* enable hw interrupt from doorbell Q */
5612 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5613 }
a2fbb9ea 5614
94a78b79
VZ
5615 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5616 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
26c8fa4d 5617 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
3196a88a
EG
5618 /* set NIC mode */
5619 REG_WR(bp, PRS_REG_NIC_MODE, 1);
34f80b04
EG
5620 if (CHIP_IS_E1H(bp))
5621 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
a2fbb9ea 5622
94a78b79
VZ
5623 bnx2x_init_block(bp, TSDM_BLOCK, COMMON_STAGE);
5624 bnx2x_init_block(bp, CSDM_BLOCK, COMMON_STAGE);
5625 bnx2x_init_block(bp, USDM_BLOCK, COMMON_STAGE);
5626 bnx2x_init_block(bp, XSDM_BLOCK, COMMON_STAGE);
a2fbb9ea 5627
490c3c9b
EG
5628 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0, STORM_INTMEM_SIZE(bp));
5629 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0, STORM_INTMEM_SIZE(bp));
5630 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0, STORM_INTMEM_SIZE(bp));
5631 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0, STORM_INTMEM_SIZE(bp));
a2fbb9ea 5632
94a78b79
VZ
5633 bnx2x_init_block(bp, TSEM_BLOCK, COMMON_STAGE);
5634 bnx2x_init_block(bp, USEM_BLOCK, COMMON_STAGE);
5635 bnx2x_init_block(bp, CSEM_BLOCK, COMMON_STAGE);
5636 bnx2x_init_block(bp, XSEM_BLOCK, COMMON_STAGE);
a2fbb9ea 5637
34f80b04
EG
5638 /* sync semi rtc */
5639 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5640 0x80000000);
5641 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
5642 0x80000000);
a2fbb9ea 5643
94a78b79
VZ
5644 bnx2x_init_block(bp, UPB_BLOCK, COMMON_STAGE);
5645 bnx2x_init_block(bp, XPB_BLOCK, COMMON_STAGE);
5646 bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE);
a2fbb9ea 5647
34f80b04
EG
5648 REG_WR(bp, SRC_REG_SOFT_RST, 1);
5649 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
5650 REG_WR(bp, i, 0xc0cac01a);
5651 /* TODO: replace with something meaningful */
5652 }
94a78b79 5653 bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE);
34f80b04 5654 REG_WR(bp, SRC_REG_SOFT_RST, 0);
a2fbb9ea 5655
34f80b04
EG
5656 if (sizeof(union cdu_context) != 1024)
5657 /* we currently assume that a context is 1024 bytes */
5658 printk(KERN_ALERT PFX "please adjust the size of"
5659 " cdu_context(%ld)\n", (long)sizeof(union cdu_context));
a2fbb9ea 5660
94a78b79 5661 bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE);
34f80b04
EG
5662 val = (4 << 24) + (0 << 12) + 1024;
5663 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
5664 if (CHIP_IS_E1(bp)) {
5665 /* !!! fix pxp client crdit until excel update */
5666 REG_WR(bp, CDU_REG_CDU_DEBUG, 0x264);
5667 REG_WR(bp, CDU_REG_CDU_DEBUG, 0);
5668 }
a2fbb9ea 5669
94a78b79 5670 bnx2x_init_block(bp, CFC_BLOCK, COMMON_STAGE);
34f80b04 5671 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
8d9c5f34
EG
5672 /* enable context validation interrupt from CFC */
5673 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5674
5675 /* set the thresholds to prevent CFC/CDU race */
5676 REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
a2fbb9ea 5677
94a78b79
VZ
5678 bnx2x_init_block(bp, HC_BLOCK, COMMON_STAGE);
5679 bnx2x_init_block(bp, MISC_AEU_BLOCK, COMMON_STAGE);
a2fbb9ea 5680
34f80b04 5681 /* PXPCS COMMON comes here */
94a78b79 5682 bnx2x_init_block(bp, PXPCS_BLOCK, COMMON_STAGE);
34f80b04
EG
5683 /* Reset PCIE errors for debug */
5684 REG_WR(bp, 0x2814, 0xffffffff);
5685 REG_WR(bp, 0x3820, 0xffffffff);
a2fbb9ea 5686
34f80b04 5687 /* EMAC0 COMMON comes here */
94a78b79 5688 bnx2x_init_block(bp, EMAC0_BLOCK, COMMON_STAGE);
34f80b04 5689 /* EMAC1 COMMON comes here */
94a78b79 5690 bnx2x_init_block(bp, EMAC1_BLOCK, COMMON_STAGE);
34f80b04 5691 /* DBU COMMON comes here */
94a78b79 5692 bnx2x_init_block(bp, DBU_BLOCK, COMMON_STAGE);
34f80b04 5693 /* DBG COMMON comes here */
94a78b79 5694 bnx2x_init_block(bp, DBG_BLOCK, COMMON_STAGE);
34f80b04 5695
94a78b79 5696 bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE);
34f80b04
EG
5697 if (CHIP_IS_E1H(bp)) {
5698 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
5699 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
5700 }
5701
5702 if (CHIP_REV_IS_SLOW(bp))
5703 msleep(200);
5704
5705 /* finish CFC init */
5706 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
5707 if (val != 1) {
5708 BNX2X_ERR("CFC LL_INIT failed\n");
5709 return -EBUSY;
5710 }
5711 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
5712 if (val != 1) {
5713 BNX2X_ERR("CFC AC_INIT failed\n");
5714 return -EBUSY;
5715 }
5716 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
5717 if (val != 1) {
5718 BNX2X_ERR("CFC CAM_INIT failed\n");
5719 return -EBUSY;
5720 }
5721 REG_WR(bp, CFC_REG_DEBUG0, 0);
f1410647 5722
34f80b04
EG
5723 /* read NIG statistic
5724 to see if this is our first up since powerup */
5725 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5726 val = *bnx2x_sp(bp, wb_data[0]);
5727
5728 /* do internal memory self test */
5729 if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
5730 BNX2X_ERR("internal mem self test failed\n");
5731 return -EBUSY;
5732 }
5733
35b19ba5 5734 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
46c6a674
EG
5735 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
5736 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
5737 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
5738 bp->port.need_hw_lock = 1;
5739 break;
5740
35b19ba5 5741 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
34f80b04
EG
5742 /* Fan failure is indicated by SPIO 5 */
5743 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
5744 MISC_REGISTERS_SPIO_INPUT_HI_Z);
5745
5746 /* set to active low mode */
5747 val = REG_RD(bp, MISC_REG_SPIO_INT);
5748 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
f1410647 5749 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
34f80b04 5750 REG_WR(bp, MISC_REG_SPIO_INT, val);
f1410647 5751
34f80b04
EG
5752 /* enable interrupt to signal the IGU */
5753 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
5754 val |= (1 << MISC_REGISTERS_SPIO_5);
5755 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
5756 break;
f1410647 5757
34f80b04
EG
5758 default:
5759 break;
5760 }
f1410647 5761
34f80b04
EG
5762 /* clear PXP2 attentions */
5763 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
a2fbb9ea 5764
34f80b04 5765 enable_blocks_attention(bp);
a2fbb9ea 5766
6bbca910
YR
5767 if (!BP_NOMCP(bp)) {
5768 bnx2x_acquire_phy_lock(bp);
5769 bnx2x_common_init_phy(bp, bp->common.shmem_base);
5770 bnx2x_release_phy_lock(bp);
5771 } else
5772 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
5773
34f80b04
EG
5774 return 0;
5775}
a2fbb9ea 5776
34f80b04
EG
5777static int bnx2x_init_port(struct bnx2x *bp)
5778{
5779 int port = BP_PORT(bp);
94a78b79 5780 int init_stage = port ? PORT1_STAGE : PORT0_STAGE;
1c06328c 5781 u32 low, high;
34f80b04 5782 u32 val;
a2fbb9ea 5783
34f80b04
EG
5784 DP(BNX2X_MSG_MCP, "starting port init port %x\n", port);
5785
5786 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
a2fbb9ea
ET
5787
5788 /* Port PXP comes here */
94a78b79 5789 bnx2x_init_block(bp, PXP_BLOCK, init_stage);
a2fbb9ea 5790 /* Port PXP2 comes here */
94a78b79 5791 bnx2x_init_block(bp, PXP2_BLOCK, init_stage);
a2fbb9ea
ET
5792#ifdef BCM_ISCSI
5793 /* Port0 1
5794 * Port1 385 */
5795 i++;
5796 wb_write[0] = ONCHIP_ADDR1(bp->timers_mapping);
5797 wb_write[1] = ONCHIP_ADDR2(bp->timers_mapping);
5798 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5799 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
5800
5801 /* Port0 2
5802 * Port1 386 */
5803 i++;
5804 wb_write[0] = ONCHIP_ADDR1(bp->qm_mapping);
5805 wb_write[1] = ONCHIP_ADDR2(bp->qm_mapping);
5806 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5807 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
5808
5809 /* Port0 3
5810 * Port1 387 */
5811 i++;
5812 wb_write[0] = ONCHIP_ADDR1(bp->t1_mapping);
5813 wb_write[1] = ONCHIP_ADDR2(bp->t1_mapping);
5814 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5815 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
5816#endif
34f80b04 5817 /* Port CMs come here */
94a78b79 5818 bnx2x_init_block(bp, XCM_BLOCK, init_stage);
a2fbb9ea
ET
5819
5820 /* Port QM comes here */
a2fbb9ea
ET
5821#ifdef BCM_ISCSI
5822 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + func*4, 1024/64*20);
5823 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + func*4, 31);
5824
94a78b79 5825 bnx2x_init_block(bp, TIMERS_BLOCK, init_stage);
a2fbb9ea
ET
5826#endif
5827 /* Port DQ comes here */
94a78b79 5828 bnx2x_init_block(bp, DQ_BLOCK, init_stage);
1c06328c 5829
94a78b79 5830 bnx2x_init_block(bp, BRB1_BLOCK, init_stage);
1c06328c
EG
5831 if (CHIP_REV_IS_SLOW(bp) && !CHIP_IS_E1H(bp)) {
5832 /* no pause for emulation and FPGA */
5833 low = 0;
5834 high = 513;
5835 } else {
5836 if (IS_E1HMF(bp))
5837 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
5838 else if (bp->dev->mtu > 4096) {
5839 if (bp->flags & ONE_PORT_FLAG)
5840 low = 160;
5841 else {
5842 val = bp->dev->mtu;
5843 /* (24*1024 + val*4)/256 */
5844 low = 96 + (val/64) + ((val % 64) ? 1 : 0);
5845 }
5846 } else
5847 low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
5848 high = low + 56; /* 14*1024/256 */
5849 }
5850 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
5851 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
5852
5853
ad8d3948 5854 /* Port PRS comes here */
94a78b79 5855 bnx2x_init_block(bp, PRS_BLOCK, init_stage);
a2fbb9ea 5856 /* Port TSDM comes here */
94a78b79 5857 bnx2x_init_block(bp, TSDM_BLOCK, init_stage);
a2fbb9ea 5858 /* Port CSDM comes here */
94a78b79 5859 bnx2x_init_block(bp, CSDM_BLOCK, init_stage);
a2fbb9ea 5860 /* Port USDM comes here */
94a78b79 5861 bnx2x_init_block(bp, USDM_BLOCK, init_stage);
a2fbb9ea 5862 /* Port XSDM comes here */
94a78b79 5863 bnx2x_init_block(bp, XSDM_BLOCK, init_stage);
356e2385 5864
94a78b79
VZ
5865 bnx2x_init_block(bp, TSEM_BLOCK, init_stage);
5866 bnx2x_init_block(bp, USEM_BLOCK, init_stage);
5867 bnx2x_init_block(bp, CSEM_BLOCK, init_stage);
5868 bnx2x_init_block(bp, XSEM_BLOCK, init_stage);
356e2385 5869
a2fbb9ea 5870 /* Port UPB comes here */
94a78b79 5871 bnx2x_init_block(bp, UPB_BLOCK, init_stage);
34f80b04 5872 /* Port XPB comes here */
94a78b79 5873 bnx2x_init_block(bp, XPB_BLOCK, init_stage);
34f80b04 5874
94a78b79 5875 bnx2x_init_block(bp, PBF_BLOCK, init_stage);
a2fbb9ea
ET
5876
5877 /* configure PBF to work without PAUSE mtu 9000 */
34f80b04 5878 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
a2fbb9ea
ET
5879
5880 /* update threshold */
34f80b04 5881 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
a2fbb9ea 5882 /* update init credit */
34f80b04 5883 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
a2fbb9ea
ET
5884
5885 /* probe changes */
34f80b04 5886 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
a2fbb9ea 5887 msleep(5);
34f80b04 5888 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
a2fbb9ea
ET
5889
5890#ifdef BCM_ISCSI
5891 /* tell the searcher where the T2 table is */
5892 REG_WR(bp, SRC_REG_COUNTFREE0 + func*4, 16*1024/64);
5893
5894 wb_write[0] = U64_LO(bp->t2_mapping);
5895 wb_write[1] = U64_HI(bp->t2_mapping);
5896 REG_WR_DMAE(bp, SRC_REG_FIRSTFREE0 + func*4, wb_write, 2);
5897 wb_write[0] = U64_LO((u64)bp->t2_mapping + 16*1024 - 64);
5898 wb_write[1] = U64_HI((u64)bp->t2_mapping + 16*1024 - 64);
5899 REG_WR_DMAE(bp, SRC_REG_LASTFREE0 + func*4, wb_write, 2);
5900
5901 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + func*4, 10);
5902 /* Port SRCH comes here */
5903#endif
5904 /* Port CDU comes here */
94a78b79 5905 bnx2x_init_block(bp, CDU_BLOCK, init_stage);
a2fbb9ea 5906 /* Port CFC comes here */
94a78b79 5907 bnx2x_init_block(bp, CFC_BLOCK, init_stage);
34f80b04
EG
5908
5909 if (CHIP_IS_E1(bp)) {
5910 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5911 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5912 }
94a78b79 5913 bnx2x_init_block(bp, HC_BLOCK, init_stage);
34f80b04 5914
94a78b79 5915 bnx2x_init_block(bp, MISC_AEU_BLOCK, init_stage);
34f80b04
EG
5916 /* init aeu_mask_attn_func_0/1:
5917 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
5918 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
5919 * bits 4-7 are used for "per vn group attention" */
5920 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
5921 (IS_E1HMF(bp) ? 0xF7 : 0x7));
5922
a2fbb9ea 5923 /* Port PXPCS comes here */
94a78b79 5924 bnx2x_init_block(bp, PXPCS_BLOCK, init_stage);
a2fbb9ea 5925 /* Port EMAC0 comes here */
94a78b79 5926 bnx2x_init_block(bp, EMAC0_BLOCK, init_stage);
a2fbb9ea 5927 /* Port EMAC1 comes here */
94a78b79 5928 bnx2x_init_block(bp, EMAC1_BLOCK, init_stage);
a2fbb9ea 5929 /* Port DBU comes here */
94a78b79 5930 bnx2x_init_block(bp, DBU_BLOCK, init_stage);
a2fbb9ea 5931 /* Port DBG comes here */
94a78b79 5932 bnx2x_init_block(bp, DBG_BLOCK, init_stage);
356e2385 5933
94a78b79 5934 bnx2x_init_block(bp, NIG_BLOCK, init_stage);
34f80b04
EG
5935
5936 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
5937
5938 if (CHIP_IS_E1H(bp)) {
34f80b04
EG
5939 /* 0x2 disable e1hov, 0x1 enable */
5940 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
5941 (IS_E1HMF(bp) ? 0x1 : 0x2));
5942
1c06328c
EG
5943 /* support pause requests from USDM, TSDM and BRB */
5944 REG_WR(bp, NIG_REG_LLFC_EGRESS_SRC_ENABLE_0 + port*4, 0x7);
5945
5946 {
5947 REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
5948 REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
5949 REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
5950 }
34f80b04
EG
5951 }
5952
a2fbb9ea 5953 /* Port MCP comes here */
94a78b79 5954 bnx2x_init_block(bp, MCP_BLOCK, init_stage);
a2fbb9ea 5955 /* Port DMAE comes here */
94a78b79 5956 bnx2x_init_block(bp, DMAE_BLOCK, init_stage);
a2fbb9ea 5957
35b19ba5 5958 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
589abe3a
EG
5959 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
5960 {
5961 u32 swap_val, swap_override, aeu_gpio_mask, offset;
5962
5963 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
5964 MISC_REGISTERS_GPIO_INPUT_HI_Z, port);
5965
5966 /* The GPIO should be swapped if the swap register is
5967 set and active */
5968 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
5969 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
5970
5971 /* Select function upon port-swap configuration */
5972 if (port == 0) {
5973 offset = MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
5974 aeu_gpio_mask = (swap_val && swap_override) ?
5975 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1 :
5976 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0;
5977 } else {
5978 offset = MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0;
5979 aeu_gpio_mask = (swap_val && swap_override) ?
5980 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 :
5981 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1;
5982 }
5983 val = REG_RD(bp, offset);
5984 /* add GPIO3 to group */
5985 val |= aeu_gpio_mask;
5986 REG_WR(bp, offset, val);
5987 }
5988 break;
5989
35b19ba5 5990 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
f1410647
ET
5991 /* add SPIO 5 to group 0 */
5992 val = REG_RD(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
5993 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
5994 REG_WR(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0, val);
5995 break;
5996
5997 default:
5998 break;
5999 }
6000
c18487ee 6001 bnx2x__link_reset(bp);
a2fbb9ea 6002
34f80b04
EG
6003 return 0;
6004}
6005
6006#define ILT_PER_FUNC (768/2)
6007#define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
6008/* the phys address is shifted right 12 bits and has an added
6009 1=valid bit added to the 53rd bit
6010 then since this is a wide register(TM)
6011 we split it into two 32 bit writes
6012 */
6013#define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
6014#define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
6015#define PXP_ONE_ILT(x) (((x) << 10) | x)
6016#define PXP_ILT_RANGE(f, l) (((l) << 10) | f)
6017
6018#define CNIC_ILT_LINES 0
6019
6020static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
6021{
6022 int reg;
6023
6024 if (CHIP_IS_E1H(bp))
6025 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
6026 else /* E1 */
6027 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
6028
6029 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
6030}
6031
6032static int bnx2x_init_func(struct bnx2x *bp)
6033{
6034 int port = BP_PORT(bp);
6035 int func = BP_FUNC(bp);
8badd27a 6036 u32 addr, val;
34f80b04
EG
6037 int i;
6038
6039 DP(BNX2X_MSG_MCP, "starting func init func %x\n", func);
6040
8badd27a
EG
6041 /* set MSI reconfigure capability */
6042 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
6043 val = REG_RD(bp, addr);
6044 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
6045 REG_WR(bp, addr, val);
6046
34f80b04
EG
6047 i = FUNC_ILT_BASE(func);
6048
6049 bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
6050 if (CHIP_IS_E1H(bp)) {
6051 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
6052 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
6053 } else /* E1 */
6054 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
6055 PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
6056
6057
6058 if (CHIP_IS_E1H(bp)) {
6059 for (i = 0; i < 9; i++)
6060 bnx2x_init_block(bp,
94a78b79 6061 cm_blocks[i], FUNC0_STAGE + func);
34f80b04
EG
6062
6063 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
6064 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
6065 }
6066
6067 /* HC init per function */
6068 if (CHIP_IS_E1H(bp)) {
6069 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
6070
6071 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6072 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6073 }
94a78b79 6074 bnx2x_init_block(bp, HC_BLOCK, FUNC0_STAGE + func);
34f80b04 6075
c14423fe 6076 /* Reset PCIE errors for debug */
a2fbb9ea
ET
6077 REG_WR(bp, 0x2114, 0xffffffff);
6078 REG_WR(bp, 0x2120, 0xffffffff);
a2fbb9ea 6079
34f80b04
EG
6080 return 0;
6081}
6082
6083static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
6084{
6085 int i, rc = 0;
a2fbb9ea 6086
34f80b04
EG
6087 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
6088 BP_FUNC(bp), load_code);
a2fbb9ea 6089
34f80b04
EG
6090 bp->dmae_ready = 0;
6091 mutex_init(&bp->dmae_mutex);
6092 bnx2x_gunzip_init(bp);
a2fbb9ea 6093
34f80b04
EG
6094 switch (load_code) {
6095 case FW_MSG_CODE_DRV_LOAD_COMMON:
6096 rc = bnx2x_init_common(bp);
6097 if (rc)
6098 goto init_hw_err;
6099 /* no break */
6100
6101 case FW_MSG_CODE_DRV_LOAD_PORT:
6102 bp->dmae_ready = 1;
6103 rc = bnx2x_init_port(bp);
6104 if (rc)
6105 goto init_hw_err;
6106 /* no break */
6107
6108 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
6109 bp->dmae_ready = 1;
6110 rc = bnx2x_init_func(bp);
6111 if (rc)
6112 goto init_hw_err;
6113 break;
6114
6115 default:
6116 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
6117 break;
6118 }
6119
6120 if (!BP_NOMCP(bp)) {
6121 int func = BP_FUNC(bp);
a2fbb9ea
ET
6122
6123 bp->fw_drv_pulse_wr_seq =
34f80b04 6124 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
a2fbb9ea 6125 DRV_PULSE_SEQ_MASK);
34f80b04
EG
6126 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
6127 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x func_stx 0x%x\n",
6128 bp->fw_drv_pulse_wr_seq, bp->func_stx);
6129 } else
6130 bp->func_stx = 0;
a2fbb9ea 6131
34f80b04
EG
6132 /* this needs to be done before gunzip end */
6133 bnx2x_zero_def_sb(bp);
6134 for_each_queue(bp, i)
6135 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
6136
6137init_hw_err:
6138 bnx2x_gunzip_end(bp);
6139
6140 return rc;
a2fbb9ea
ET
6141}
6142
c14423fe 6143/* send the MCP a request, block until there is a reply */
a2fbb9ea
ET
6144static u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
6145{
34f80b04 6146 int func = BP_FUNC(bp);
f1410647
ET
6147 u32 seq = ++bp->fw_seq;
6148 u32 rc = 0;
19680c48
EG
6149 u32 cnt = 1;
6150 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
a2fbb9ea 6151
34f80b04 6152 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
f1410647 6153 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
a2fbb9ea 6154
19680c48
EG
6155 do {
6156 /* let the FW do it's magic ... */
6157 msleep(delay);
a2fbb9ea 6158
19680c48 6159 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
a2fbb9ea 6160
19680c48
EG
6161 /* Give the FW up to 2 second (200*10ms) */
6162 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 200));
6163
6164 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
6165 cnt*delay, rc, seq);
a2fbb9ea
ET
6166
6167 /* is this a reply to our command? */
6168 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) {
6169 rc &= FW_MSG_CODE_MASK;
f1410647 6170
a2fbb9ea
ET
6171 } else {
6172 /* FW BUG! */
6173 BNX2X_ERR("FW failed to respond!\n");
6174 bnx2x_fw_dump(bp);
6175 rc = 0;
6176 }
f1410647 6177
a2fbb9ea
ET
6178 return rc;
6179}
6180
6181static void bnx2x_free_mem(struct bnx2x *bp)
6182{
6183
6184#define BNX2X_PCI_FREE(x, y, size) \
6185 do { \
6186 if (x) { \
6187 pci_free_consistent(bp->pdev, size, x, y); \
6188 x = NULL; \
6189 y = 0; \
6190 } \
6191 } while (0)
6192
6193#define BNX2X_FREE(x) \
6194 do { \
6195 if (x) { \
6196 vfree(x); \
6197 x = NULL; \
6198 } \
6199 } while (0)
6200
6201 int i;
6202
6203 /* fastpath */
555f6c78 6204 /* Common */
a2fbb9ea
ET
6205 for_each_queue(bp, i) {
6206
555f6c78 6207 /* status blocks */
a2fbb9ea
ET
6208 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
6209 bnx2x_fp(bp, i, status_blk_mapping),
6210 sizeof(struct host_status_block) +
6211 sizeof(struct eth_tx_db_data));
555f6c78
EG
6212 }
6213 /* Rx */
6214 for_each_rx_queue(bp, i) {
a2fbb9ea 6215
555f6c78 6216 /* fastpath rx rings: rx_buf rx_desc rx_comp */
a2fbb9ea
ET
6217 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
6218 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
6219 bnx2x_fp(bp, i, rx_desc_mapping),
6220 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6221
6222 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
6223 bnx2x_fp(bp, i, rx_comp_mapping),
6224 sizeof(struct eth_fast_path_rx_cqe) *
6225 NUM_RCQ_BD);
a2fbb9ea 6226
7a9b2557 6227 /* SGE ring */
32626230 6228 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
7a9b2557
VZ
6229 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
6230 bnx2x_fp(bp, i, rx_sge_mapping),
6231 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
6232 }
555f6c78
EG
6233 /* Tx */
6234 for_each_tx_queue(bp, i) {
6235
6236 /* fastpath tx rings: tx_buf tx_desc */
6237 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
6238 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
6239 bnx2x_fp(bp, i, tx_desc_mapping),
6240 sizeof(struct eth_tx_bd) * NUM_TX_BD);
6241 }
a2fbb9ea
ET
6242 /* end of fastpath */
6243
6244 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
34f80b04 6245 sizeof(struct host_def_status_block));
a2fbb9ea
ET
6246
6247 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
34f80b04 6248 sizeof(struct bnx2x_slowpath));
a2fbb9ea
ET
6249
6250#ifdef BCM_ISCSI
6251 BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
6252 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
6253 BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
6254 BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
6255#endif
7a9b2557 6256 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
a2fbb9ea
ET
6257
6258#undef BNX2X_PCI_FREE
6259#undef BNX2X_KFREE
6260}
6261
6262static int bnx2x_alloc_mem(struct bnx2x *bp)
6263{
6264
6265#define BNX2X_PCI_ALLOC(x, y, size) \
6266 do { \
6267 x = pci_alloc_consistent(bp->pdev, size, y); \
6268 if (x == NULL) \
6269 goto alloc_mem_err; \
6270 memset(x, 0, size); \
6271 } while (0)
6272
6273#define BNX2X_ALLOC(x, size) \
6274 do { \
6275 x = vmalloc(size); \
6276 if (x == NULL) \
6277 goto alloc_mem_err; \
6278 memset(x, 0, size); \
6279 } while (0)
6280
6281 int i;
6282
6283 /* fastpath */
555f6c78 6284 /* Common */
a2fbb9ea
ET
6285 for_each_queue(bp, i) {
6286 bnx2x_fp(bp, i, bp) = bp;
6287
555f6c78 6288 /* status blocks */
a2fbb9ea
ET
6289 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
6290 &bnx2x_fp(bp, i, status_blk_mapping),
6291 sizeof(struct host_status_block) +
6292 sizeof(struct eth_tx_db_data));
555f6c78
EG
6293 }
6294 /* Rx */
6295 for_each_rx_queue(bp, i) {
a2fbb9ea 6296
555f6c78 6297 /* fastpath rx rings: rx_buf rx_desc rx_comp */
a2fbb9ea
ET
6298 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
6299 sizeof(struct sw_rx_bd) * NUM_RX_BD);
6300 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
6301 &bnx2x_fp(bp, i, rx_desc_mapping),
6302 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6303
6304 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
6305 &bnx2x_fp(bp, i, rx_comp_mapping),
6306 sizeof(struct eth_fast_path_rx_cqe) *
6307 NUM_RCQ_BD);
6308
7a9b2557
VZ
6309 /* SGE ring */
6310 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
6311 sizeof(struct sw_rx_page) * NUM_RX_SGE);
6312 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
6313 &bnx2x_fp(bp, i, rx_sge_mapping),
6314 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
a2fbb9ea 6315 }
555f6c78
EG
6316 /* Tx */
6317 for_each_tx_queue(bp, i) {
6318
6319 bnx2x_fp(bp, i, hw_tx_prods) =
6320 (void *)(bnx2x_fp(bp, i, status_blk) + 1);
6321
6322 bnx2x_fp(bp, i, tx_prods_mapping) =
6323 bnx2x_fp(bp, i, status_blk_mapping) +
6324 sizeof(struct host_status_block);
6325
6326 /* fastpath tx rings: tx_buf tx_desc */
6327 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
6328 sizeof(struct sw_tx_bd) * NUM_TX_BD);
6329 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
6330 &bnx2x_fp(bp, i, tx_desc_mapping),
6331 sizeof(struct eth_tx_bd) * NUM_TX_BD);
6332 }
a2fbb9ea
ET
6333 /* end of fastpath */
6334
6335 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
6336 sizeof(struct host_def_status_block));
6337
6338 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
6339 sizeof(struct bnx2x_slowpath));
6340
6341#ifdef BCM_ISCSI
6342 BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
6343
6344 /* Initialize T1 */
6345 for (i = 0; i < 64*1024; i += 64) {
6346 *(u64 *)((char *)bp->t1 + i + 56) = 0x0UL;
6347 *(u64 *)((char *)bp->t1 + i + 3) = 0x0UL;
6348 }
6349
6350 /* allocate searcher T2 table
6351 we allocate 1/4 of alloc num for T2
6352 (which is not entered into the ILT) */
6353 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
6354
6355 /* Initialize T2 */
6356 for (i = 0; i < 16*1024; i += 64)
6357 * (u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
6358
c14423fe 6359 /* now fixup the last line in the block to point to the next block */
a2fbb9ea
ET
6360 *(u64 *)((char *)bp->t2 + 1024*16-8) = bp->t2_mapping;
6361
6362 /* Timer block array (MAX_CONN*8) phys uncached for now 1024 conns */
6363 BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
6364
6365 /* QM queues (128*MAX_CONN) */
6366 BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
6367#endif
6368
6369 /* Slow path ring */
6370 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
6371
6372 return 0;
6373
6374alloc_mem_err:
6375 bnx2x_free_mem(bp);
6376 return -ENOMEM;
6377
6378#undef BNX2X_PCI_ALLOC
6379#undef BNX2X_ALLOC
6380}
6381
6382static void bnx2x_free_tx_skbs(struct bnx2x *bp)
6383{
6384 int i;
6385
555f6c78 6386 for_each_tx_queue(bp, i) {
a2fbb9ea
ET
6387 struct bnx2x_fastpath *fp = &bp->fp[i];
6388
6389 u16 bd_cons = fp->tx_bd_cons;
6390 u16 sw_prod = fp->tx_pkt_prod;
6391 u16 sw_cons = fp->tx_pkt_cons;
6392
a2fbb9ea
ET
6393 while (sw_cons != sw_prod) {
6394 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
6395 sw_cons++;
6396 }
6397 }
6398}
6399
6400static void bnx2x_free_rx_skbs(struct bnx2x *bp)
6401{
6402 int i, j;
6403
555f6c78 6404 for_each_rx_queue(bp, j) {
a2fbb9ea
ET
6405 struct bnx2x_fastpath *fp = &bp->fp[j];
6406
a2fbb9ea
ET
6407 for (i = 0; i < NUM_RX_BD; i++) {
6408 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
6409 struct sk_buff *skb = rx_buf->skb;
6410
6411 if (skb == NULL)
6412 continue;
6413
6414 pci_unmap_single(bp->pdev,
6415 pci_unmap_addr(rx_buf, mapping),
356e2385 6416 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
a2fbb9ea
ET
6417
6418 rx_buf->skb = NULL;
6419 dev_kfree_skb(skb);
6420 }
7a9b2557 6421 if (!fp->disable_tpa)
32626230
EG
6422 bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
6423 ETH_MAX_AGGREGATION_QUEUES_E1 :
7a9b2557 6424 ETH_MAX_AGGREGATION_QUEUES_E1H);
a2fbb9ea
ET
6425 }
6426}
6427
6428static void bnx2x_free_skbs(struct bnx2x *bp)
6429{
6430 bnx2x_free_tx_skbs(bp);
6431 bnx2x_free_rx_skbs(bp);
6432}
6433
6434static void bnx2x_free_msix_irqs(struct bnx2x *bp)
6435{
34f80b04 6436 int i, offset = 1;
a2fbb9ea
ET
6437
6438 free_irq(bp->msix_table[0].vector, bp->dev);
c14423fe 6439 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
a2fbb9ea
ET
6440 bp->msix_table[0].vector);
6441
6442 for_each_queue(bp, i) {
c14423fe 6443 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
34f80b04 6444 "state %x\n", i, bp->msix_table[i + offset].vector,
a2fbb9ea
ET
6445 bnx2x_fp(bp, i, state));
6446
34f80b04 6447 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
a2fbb9ea 6448 }
a2fbb9ea
ET
6449}
6450
6451static void bnx2x_free_irq(struct bnx2x *bp)
6452{
a2fbb9ea 6453 if (bp->flags & USING_MSIX_FLAG) {
a2fbb9ea
ET
6454 bnx2x_free_msix_irqs(bp);
6455 pci_disable_msix(bp->pdev);
a2fbb9ea
ET
6456 bp->flags &= ~USING_MSIX_FLAG;
6457
8badd27a
EG
6458 } else if (bp->flags & USING_MSI_FLAG) {
6459 free_irq(bp->pdev->irq, bp->dev);
6460 pci_disable_msi(bp->pdev);
6461 bp->flags &= ~USING_MSI_FLAG;
6462
a2fbb9ea
ET
6463 } else
6464 free_irq(bp->pdev->irq, bp->dev);
6465}
6466
6467static int bnx2x_enable_msix(struct bnx2x *bp)
6468{
8badd27a
EG
6469 int i, rc, offset = 1;
6470 int igu_vec = 0;
a2fbb9ea 6471
8badd27a
EG
6472 bp->msix_table[0].entry = igu_vec;
6473 DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n", igu_vec);
a2fbb9ea 6474
34f80b04 6475 for_each_queue(bp, i) {
8badd27a 6476 igu_vec = BP_L_ID(bp) + offset + i;
34f80b04
EG
6477 bp->msix_table[i + offset].entry = igu_vec;
6478 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
6479 "(fastpath #%u)\n", i + offset, igu_vec, i);
a2fbb9ea
ET
6480 }
6481
34f80b04 6482 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
555f6c78 6483 BNX2X_NUM_QUEUES(bp) + offset);
34f80b04 6484 if (rc) {
8badd27a
EG
6485 DP(NETIF_MSG_IFUP, "MSI-X is not attainable rc %d\n", rc);
6486 return rc;
34f80b04 6487 }
8badd27a 6488
a2fbb9ea
ET
6489 bp->flags |= USING_MSIX_FLAG;
6490
6491 return 0;
a2fbb9ea
ET
6492}
6493
a2fbb9ea
ET
6494static int bnx2x_req_msix_irqs(struct bnx2x *bp)
6495{
34f80b04 6496 int i, rc, offset = 1;
a2fbb9ea 6497
a2fbb9ea
ET
6498 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
6499 bp->dev->name, bp->dev);
a2fbb9ea
ET
6500 if (rc) {
6501 BNX2X_ERR("request sp irq failed\n");
6502 return -EBUSY;
6503 }
6504
6505 for_each_queue(bp, i) {
555f6c78
EG
6506 struct bnx2x_fastpath *fp = &bp->fp[i];
6507
6508 sprintf(fp->name, "%s.fp%d", bp->dev->name, i);
34f80b04 6509 rc = request_irq(bp->msix_table[i + offset].vector,
555f6c78 6510 bnx2x_msix_fp_int, 0, fp->name, fp);
a2fbb9ea 6511 if (rc) {
555f6c78 6512 BNX2X_ERR("request fp #%d irq failed rc %d\n", i, rc);
a2fbb9ea
ET
6513 bnx2x_free_msix_irqs(bp);
6514 return -EBUSY;
6515 }
6516
555f6c78 6517 fp->state = BNX2X_FP_STATE_IRQ;
a2fbb9ea
ET
6518 }
6519
555f6c78
EG
6520 i = BNX2X_NUM_QUEUES(bp);
6521 if (is_multi(bp))
6522 printk(KERN_INFO PFX
6523 "%s: using MSI-X IRQs: sp %d fp %d - %d\n",
6524 bp->dev->name, bp->msix_table[0].vector,
6525 bp->msix_table[offset].vector,
6526 bp->msix_table[offset + i - 1].vector);
6527 else
6528 printk(KERN_INFO PFX "%s: using MSI-X IRQs: sp %d fp %d\n",
6529 bp->dev->name, bp->msix_table[0].vector,
6530 bp->msix_table[offset + i - 1].vector);
6531
a2fbb9ea 6532 return 0;
a2fbb9ea
ET
6533}
6534
8badd27a
EG
6535static int bnx2x_enable_msi(struct bnx2x *bp)
6536{
6537 int rc;
6538
6539 rc = pci_enable_msi(bp->pdev);
6540 if (rc) {
6541 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
6542 return -1;
6543 }
6544 bp->flags |= USING_MSI_FLAG;
6545
6546 return 0;
6547}
6548
a2fbb9ea
ET
6549static int bnx2x_req_irq(struct bnx2x *bp)
6550{
8badd27a 6551 unsigned long flags;
34f80b04 6552 int rc;
a2fbb9ea 6553
8badd27a
EG
6554 if (bp->flags & USING_MSI_FLAG)
6555 flags = 0;
6556 else
6557 flags = IRQF_SHARED;
6558
6559 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
34f80b04 6560 bp->dev->name, bp->dev);
a2fbb9ea
ET
6561 if (!rc)
6562 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
6563
6564 return rc;
a2fbb9ea
ET
6565}
6566
65abd74d
YG
6567static void bnx2x_napi_enable(struct bnx2x *bp)
6568{
6569 int i;
6570
555f6c78 6571 for_each_rx_queue(bp, i)
65abd74d
YG
6572 napi_enable(&bnx2x_fp(bp, i, napi));
6573}
6574
6575static void bnx2x_napi_disable(struct bnx2x *bp)
6576{
6577 int i;
6578
555f6c78 6579 for_each_rx_queue(bp, i)
65abd74d
YG
6580 napi_disable(&bnx2x_fp(bp, i, napi));
6581}
6582
6583static void bnx2x_netif_start(struct bnx2x *bp)
6584{
6585 if (atomic_dec_and_test(&bp->intr_sem)) {
6586 if (netif_running(bp->dev)) {
65abd74d
YG
6587 bnx2x_napi_enable(bp);
6588 bnx2x_int_enable(bp);
555f6c78
EG
6589 if (bp->state == BNX2X_STATE_OPEN)
6590 netif_tx_wake_all_queues(bp->dev);
65abd74d
YG
6591 }
6592 }
6593}
6594
f8ef6e44 6595static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
65abd74d 6596{
f8ef6e44 6597 bnx2x_int_disable_sync(bp, disable_hw);
e94d8af3 6598 bnx2x_napi_disable(bp);
762d5f6c
EG
6599 netif_tx_disable(bp->dev);
6600 bp->dev->trans_start = jiffies; /* prevent tx timeout */
65abd74d
YG
6601}
6602
a2fbb9ea
ET
6603/*
6604 * Init service functions
6605 */
6606
3101c2bc 6607static void bnx2x_set_mac_addr_e1(struct bnx2x *bp, int set)
a2fbb9ea
ET
6608{
6609 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
34f80b04 6610 int port = BP_PORT(bp);
a2fbb9ea
ET
6611
6612 /* CAM allocation
6613 * unicasts 0-31:port0 32-63:port1
6614 * multicast 64-127:port0 128-191:port1
6615 */
8d9c5f34 6616 config->hdr.length = 2;
af246401 6617 config->hdr.offset = port ? 32 : 0;
0626b899 6618 config->hdr.client_id = bp->fp->cl_id;
a2fbb9ea
ET
6619 config->hdr.reserved1 = 0;
6620
6621 /* primary MAC */
6622 config->config_table[0].cam_entry.msb_mac_addr =
6623 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6624 config->config_table[0].cam_entry.middle_mac_addr =
6625 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6626 config->config_table[0].cam_entry.lsb_mac_addr =
6627 swab16(*(u16 *)&bp->dev->dev_addr[4]);
34f80b04 6628 config->config_table[0].cam_entry.flags = cpu_to_le16(port);
3101c2bc
YG
6629 if (set)
6630 config->config_table[0].target_table_entry.flags = 0;
6631 else
6632 CAM_INVALIDATE(config->config_table[0]);
a2fbb9ea
ET
6633 config->config_table[0].target_table_entry.client_id = 0;
6634 config->config_table[0].target_table_entry.vlan_id = 0;
6635
3101c2bc
YG
6636 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
6637 (set ? "setting" : "clearing"),
a2fbb9ea
ET
6638 config->config_table[0].cam_entry.msb_mac_addr,
6639 config->config_table[0].cam_entry.middle_mac_addr,
6640 config->config_table[0].cam_entry.lsb_mac_addr);
6641
6642 /* broadcast */
4781bfad
EG
6643 config->config_table[1].cam_entry.msb_mac_addr = cpu_to_le16(0xffff);
6644 config->config_table[1].cam_entry.middle_mac_addr = cpu_to_le16(0xffff);
6645 config->config_table[1].cam_entry.lsb_mac_addr = cpu_to_le16(0xffff);
34f80b04 6646 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
3101c2bc
YG
6647 if (set)
6648 config->config_table[1].target_table_entry.flags =
a2fbb9ea 6649 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
3101c2bc
YG
6650 else
6651 CAM_INVALIDATE(config->config_table[1]);
a2fbb9ea
ET
6652 config->config_table[1].target_table_entry.client_id = 0;
6653 config->config_table[1].target_table_entry.vlan_id = 0;
6654
6655 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6656 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6657 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6658}
6659
3101c2bc 6660static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp, int set)
34f80b04
EG
6661{
6662 struct mac_configuration_cmd_e1h *config =
6663 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
6664
3101c2bc 6665 if (set && (bp->state != BNX2X_STATE_OPEN)) {
34f80b04
EG
6666 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
6667 return;
6668 }
6669
6670 /* CAM allocation for E1H
6671 * unicasts: by func number
6672 * multicast: 20+FUNC*20, 20 each
6673 */
8d9c5f34 6674 config->hdr.length = 1;
34f80b04 6675 config->hdr.offset = BP_FUNC(bp);
0626b899 6676 config->hdr.client_id = bp->fp->cl_id;
34f80b04
EG
6677 config->hdr.reserved1 = 0;
6678
6679 /* primary MAC */
6680 config->config_table[0].msb_mac_addr =
6681 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6682 config->config_table[0].middle_mac_addr =
6683 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6684 config->config_table[0].lsb_mac_addr =
6685 swab16(*(u16 *)&bp->dev->dev_addr[4]);
6686 config->config_table[0].client_id = BP_L_ID(bp);
6687 config->config_table[0].vlan_id = 0;
6688 config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
3101c2bc
YG
6689 if (set)
6690 config->config_table[0].flags = BP_PORT(bp);
6691 else
6692 config->config_table[0].flags =
6693 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
34f80b04 6694
3101c2bc
YG
6695 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) E1HOV %d CLID %d\n",
6696 (set ? "setting" : "clearing"),
34f80b04
EG
6697 config->config_table[0].msb_mac_addr,
6698 config->config_table[0].middle_mac_addr,
6699 config->config_table[0].lsb_mac_addr, bp->e1hov, BP_L_ID(bp));
6700
6701 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6702 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6703 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6704}
6705
a2fbb9ea
ET
6706static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6707 int *state_p, int poll)
6708{
6709 /* can take a while if any port is running */
8b3a0f0b 6710 int cnt = 5000;
a2fbb9ea 6711
c14423fe
ET
6712 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
6713 poll ? "polling" : "waiting", state, idx);
a2fbb9ea
ET
6714
6715 might_sleep();
34f80b04 6716 while (cnt--) {
a2fbb9ea
ET
6717 if (poll) {
6718 bnx2x_rx_int(bp->fp, 10);
34f80b04
EG
6719 /* if index is different from 0
6720 * the reply for some commands will
3101c2bc 6721 * be on the non default queue
a2fbb9ea
ET
6722 */
6723 if (idx)
6724 bnx2x_rx_int(&bp->fp[idx], 10);
6725 }
a2fbb9ea 6726
3101c2bc 6727 mb(); /* state is changed by bnx2x_sp_event() */
8b3a0f0b
EG
6728 if (*state_p == state) {
6729#ifdef BNX2X_STOP_ON_ERROR
6730 DP(NETIF_MSG_IFUP, "exit (cnt %d)\n", 5000 - cnt);
6731#endif
a2fbb9ea 6732 return 0;
8b3a0f0b 6733 }
a2fbb9ea 6734
a2fbb9ea 6735 msleep(1);
a2fbb9ea
ET
6736 }
6737
a2fbb9ea 6738 /* timeout! */
49d66772
ET
6739 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
6740 poll ? "polling" : "waiting", state, idx);
34f80b04
EG
6741#ifdef BNX2X_STOP_ON_ERROR
6742 bnx2x_panic();
6743#endif
a2fbb9ea 6744
49d66772 6745 return -EBUSY;
a2fbb9ea
ET
6746}
6747
6748static int bnx2x_setup_leading(struct bnx2x *bp)
6749{
34f80b04 6750 int rc;
a2fbb9ea 6751
c14423fe 6752 /* reset IGU state */
34f80b04 6753 bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea
ET
6754
6755 /* SETUP ramrod */
6756 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
6757
34f80b04
EG
6758 /* Wait for completion */
6759 rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
a2fbb9ea 6760
34f80b04 6761 return rc;
a2fbb9ea
ET
6762}
6763
6764static int bnx2x_setup_multi(struct bnx2x *bp, int index)
6765{
555f6c78
EG
6766 struct bnx2x_fastpath *fp = &bp->fp[index];
6767
a2fbb9ea 6768 /* reset IGU state */
555f6c78 6769 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea 6770
228241eb 6771 /* SETUP ramrod */
555f6c78
EG
6772 fp->state = BNX2X_FP_STATE_OPENING;
6773 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0,
6774 fp->cl_id, 0);
a2fbb9ea
ET
6775
6776 /* Wait for completion */
6777 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
555f6c78 6778 &(fp->state), 0);
a2fbb9ea
ET
6779}
6780
a2fbb9ea 6781static int bnx2x_poll(struct napi_struct *napi, int budget);
a2fbb9ea 6782
8badd27a 6783static void bnx2x_set_int_mode(struct bnx2x *bp)
a2fbb9ea 6784{
555f6c78 6785 int num_queues;
a2fbb9ea 6786
8badd27a
EG
6787 switch (int_mode) {
6788 case INT_MODE_INTx:
6789 case INT_MODE_MSI:
555f6c78
EG
6790 num_queues = 1;
6791 bp->num_rx_queues = num_queues;
6792 bp->num_tx_queues = num_queues;
6793 DP(NETIF_MSG_IFUP,
6794 "set number of queues to %d\n", num_queues);
8badd27a
EG
6795 break;
6796
6797 case INT_MODE_MSIX:
6798 default:
555f6c78
EG
6799 if (bp->multi_mode == ETH_RSS_MODE_REGULAR)
6800 num_queues = min_t(u32, num_online_cpus(),
6801 BNX2X_MAX_QUEUES(bp));
34f80b04 6802 else
555f6c78
EG
6803 num_queues = 1;
6804 bp->num_rx_queues = num_queues;
6805 bp->num_tx_queues = num_queues;
6806 DP(NETIF_MSG_IFUP, "set number of rx queues to %d"
6807 " number of tx queues to %d\n",
6808 bp->num_rx_queues, bp->num_tx_queues);
2dfe0e1f
EG
6809 /* if we can't use MSI-X we only need one fp,
6810 * so try to enable MSI-X with the requested number of fp's
6811 * and fallback to MSI or legacy INTx with one fp
6812 */
8badd27a 6813 if (bnx2x_enable_msix(bp)) {
34f80b04 6814 /* failed to enable MSI-X */
555f6c78
EG
6815 num_queues = 1;
6816 bp->num_rx_queues = num_queues;
6817 bp->num_tx_queues = num_queues;
6818 if (bp->multi_mode)
6819 BNX2X_ERR("Multi requested but failed to "
6820 "enable MSI-X set number of "
6821 "queues to %d\n", num_queues);
a2fbb9ea 6822 }
8badd27a 6823 break;
a2fbb9ea 6824 }
555f6c78 6825 bp->dev->real_num_tx_queues = bp->num_tx_queues;
8badd27a
EG
6826}
6827
6828static void bnx2x_set_rx_mode(struct net_device *dev);
6829
6830/* must be called with rtnl_lock */
6831static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
6832{
6833 u32 load_code;
6834 int i, rc = 0;
6835#ifdef BNX2X_STOP_ON_ERROR
6836 DP(NETIF_MSG_IFUP, "enter load_mode %d\n", load_mode);
6837 if (unlikely(bp->panic))
6838 return -EPERM;
6839#endif
6840
6841 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
6842
6843 bnx2x_set_int_mode(bp);
c14423fe 6844
a2fbb9ea
ET
6845 if (bnx2x_alloc_mem(bp))
6846 return -ENOMEM;
6847
555f6c78 6848 for_each_rx_queue(bp, i)
7a9b2557
VZ
6849 bnx2x_fp(bp, i, disable_tpa) =
6850 ((bp->flags & TPA_ENABLE_FLAG) == 0);
6851
555f6c78 6852 for_each_rx_queue(bp, i)
2dfe0e1f
EG
6853 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
6854 bnx2x_poll, 128);
6855
6856#ifdef BNX2X_STOP_ON_ERROR
555f6c78 6857 for_each_rx_queue(bp, i) {
2dfe0e1f
EG
6858 struct bnx2x_fastpath *fp = &bp->fp[i];
6859
6860 fp->poll_no_work = 0;
6861 fp->poll_calls = 0;
6862 fp->poll_max_calls = 0;
6863 fp->poll_complete = 0;
6864 fp->poll_exit = 0;
6865 }
6866#endif
6867 bnx2x_napi_enable(bp);
6868
34f80b04
EG
6869 if (bp->flags & USING_MSIX_FLAG) {
6870 rc = bnx2x_req_msix_irqs(bp);
6871 if (rc) {
6872 pci_disable_msix(bp->pdev);
2dfe0e1f 6873 goto load_error1;
34f80b04
EG
6874 }
6875 } else {
8badd27a
EG
6876 if ((rc != -ENOMEM) && (int_mode != INT_MODE_INTx))
6877 bnx2x_enable_msi(bp);
34f80b04
EG
6878 bnx2x_ack_int(bp);
6879 rc = bnx2x_req_irq(bp);
6880 if (rc) {
2dfe0e1f 6881 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
8badd27a
EG
6882 if (bp->flags & USING_MSI_FLAG)
6883 pci_disable_msi(bp->pdev);
2dfe0e1f 6884 goto load_error1;
a2fbb9ea 6885 }
8badd27a
EG
6886 if (bp->flags & USING_MSI_FLAG) {
6887 bp->dev->irq = bp->pdev->irq;
6888 printk(KERN_INFO PFX "%s: using MSI IRQ %d\n",
6889 bp->dev->name, bp->pdev->irq);
6890 }
a2fbb9ea
ET
6891 }
6892
2dfe0e1f
EG
6893 /* Send LOAD_REQUEST command to MCP
6894 Returns the type of LOAD command:
6895 if it is the first port to be initialized
6896 common blocks should be initialized, otherwise - not
6897 */
6898 if (!BP_NOMCP(bp)) {
6899 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
6900 if (!load_code) {
6901 BNX2X_ERR("MCP response failure, aborting\n");
6902 rc = -EBUSY;
6903 goto load_error2;
6904 }
6905 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
6906 rc = -EBUSY; /* other port in diagnostic mode */
6907 goto load_error2;
6908 }
6909
6910 } else {
6911 int port = BP_PORT(bp);
6912
f5372251 6913 DP(NETIF_MSG_IFUP, "NO MCP - load counts %d, %d, %d\n",
2dfe0e1f
EG
6914 load_count[0], load_count[1], load_count[2]);
6915 load_count[0]++;
6916 load_count[1 + port]++;
f5372251 6917 DP(NETIF_MSG_IFUP, "NO MCP - new load counts %d, %d, %d\n",
2dfe0e1f
EG
6918 load_count[0], load_count[1], load_count[2]);
6919 if (load_count[0] == 1)
6920 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
6921 else if (load_count[1 + port] == 1)
6922 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
6923 else
6924 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
6925 }
6926
6927 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
6928 (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
6929 bp->port.pmf = 1;
6930 else
6931 bp->port.pmf = 0;
6932 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
a2fbb9ea 6933
a2fbb9ea 6934 /* Initialize HW */
34f80b04
EG
6935 rc = bnx2x_init_hw(bp, load_code);
6936 if (rc) {
a2fbb9ea 6937 BNX2X_ERR("HW init failed, aborting\n");
2dfe0e1f 6938 goto load_error2;
a2fbb9ea
ET
6939 }
6940
a2fbb9ea 6941 /* Setup NIC internals and enable interrupts */
471de716 6942 bnx2x_nic_init(bp, load_code);
a2fbb9ea
ET
6943
6944 /* Send LOAD_DONE command to MCP */
34f80b04 6945 if (!BP_NOMCP(bp)) {
228241eb
ET
6946 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
6947 if (!load_code) {
da5a662a 6948 BNX2X_ERR("MCP response failure, aborting\n");
34f80b04 6949 rc = -EBUSY;
2dfe0e1f 6950 goto load_error3;
a2fbb9ea
ET
6951 }
6952 }
6953
6954 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
6955
34f80b04
EG
6956 rc = bnx2x_setup_leading(bp);
6957 if (rc) {
da5a662a 6958 BNX2X_ERR("Setup leading failed!\n");
2dfe0e1f 6959 goto load_error3;
34f80b04 6960 }
a2fbb9ea 6961
34f80b04
EG
6962 if (CHIP_IS_E1H(bp))
6963 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
f5372251 6964 DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
34f80b04
EG
6965 bp->state = BNX2X_STATE_DISABLED;
6966 }
a2fbb9ea 6967
34f80b04
EG
6968 if (bp->state == BNX2X_STATE_OPEN)
6969 for_each_nondefault_queue(bp, i) {
6970 rc = bnx2x_setup_multi(bp, i);
6971 if (rc)
2dfe0e1f 6972 goto load_error3;
34f80b04 6973 }
a2fbb9ea 6974
34f80b04 6975 if (CHIP_IS_E1(bp))
3101c2bc 6976 bnx2x_set_mac_addr_e1(bp, 1);
34f80b04 6977 else
3101c2bc 6978 bnx2x_set_mac_addr_e1h(bp, 1);
34f80b04
EG
6979
6980 if (bp->port.pmf)
b5bf9068 6981 bnx2x_initial_phy_init(bp, load_mode);
a2fbb9ea
ET
6982
6983 /* Start fast path */
34f80b04
EG
6984 switch (load_mode) {
6985 case LOAD_NORMAL:
6986 /* Tx queue should be only reenabled */
555f6c78 6987 netif_tx_wake_all_queues(bp->dev);
2dfe0e1f 6988 /* Initialize the receive filter. */
34f80b04
EG
6989 bnx2x_set_rx_mode(bp->dev);
6990 break;
6991
6992 case LOAD_OPEN:
555f6c78 6993 netif_tx_start_all_queues(bp->dev);
2dfe0e1f 6994 /* Initialize the receive filter. */
34f80b04 6995 bnx2x_set_rx_mode(bp->dev);
34f80b04 6996 break;
a2fbb9ea 6997
34f80b04 6998 case LOAD_DIAG:
2dfe0e1f 6999 /* Initialize the receive filter. */
a2fbb9ea 7000 bnx2x_set_rx_mode(bp->dev);
34f80b04
EG
7001 bp->state = BNX2X_STATE_DIAG;
7002 break;
7003
7004 default:
7005 break;
a2fbb9ea
ET
7006 }
7007
34f80b04
EG
7008 if (!bp->port.pmf)
7009 bnx2x__link_status_update(bp);
7010
a2fbb9ea
ET
7011 /* start the timer */
7012 mod_timer(&bp->timer, jiffies + bp->current_interval);
7013
34f80b04 7014
a2fbb9ea
ET
7015 return 0;
7016
2dfe0e1f
EG
7017load_error3:
7018 bnx2x_int_disable_sync(bp, 1);
7019 if (!BP_NOMCP(bp)) {
7020 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
7021 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7022 }
7023 bp->port.pmf = 0;
7a9b2557
VZ
7024 /* Free SKBs, SGEs, TPA pool and driver internals */
7025 bnx2x_free_skbs(bp);
555f6c78 7026 for_each_rx_queue(bp, i)
3196a88a 7027 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
2dfe0e1f 7028load_error2:
d1014634
YG
7029 /* Release IRQs */
7030 bnx2x_free_irq(bp);
2dfe0e1f
EG
7031load_error1:
7032 bnx2x_napi_disable(bp);
555f6c78 7033 for_each_rx_queue(bp, i)
7cde1c8b 7034 netif_napi_del(&bnx2x_fp(bp, i, napi));
a2fbb9ea
ET
7035 bnx2x_free_mem(bp);
7036
34f80b04 7037 return rc;
a2fbb9ea
ET
7038}
7039
7040static int bnx2x_stop_multi(struct bnx2x *bp, int index)
7041{
555f6c78 7042 struct bnx2x_fastpath *fp = &bp->fp[index];
a2fbb9ea
ET
7043 int rc;
7044
c14423fe 7045 /* halt the connection */
555f6c78
EG
7046 fp->state = BNX2X_FP_STATE_HALTING;
7047 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, fp->cl_id, 0);
a2fbb9ea 7048
34f80b04 7049 /* Wait for completion */
a2fbb9ea 7050 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
555f6c78 7051 &(fp->state), 1);
c14423fe 7052 if (rc) /* timeout */
a2fbb9ea
ET
7053 return rc;
7054
7055 /* delete cfc entry */
7056 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
7057
34f80b04
EG
7058 /* Wait for completion */
7059 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
555f6c78 7060 &(fp->state), 1);
34f80b04 7061 return rc;
a2fbb9ea
ET
7062}
7063
da5a662a 7064static int bnx2x_stop_leading(struct bnx2x *bp)
a2fbb9ea 7065{
4781bfad 7066 __le16 dsb_sp_prod_idx;
c14423fe 7067 /* if the other port is handling traffic,
a2fbb9ea 7068 this can take a lot of time */
34f80b04
EG
7069 int cnt = 500;
7070 int rc;
a2fbb9ea
ET
7071
7072 might_sleep();
7073
7074 /* Send HALT ramrod */
7075 bp->fp[0].state = BNX2X_FP_STATE_HALTING;
0626b899 7076 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, bp->fp->cl_id, 0);
a2fbb9ea 7077
34f80b04
EG
7078 /* Wait for completion */
7079 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
7080 &(bp->fp[0].state), 1);
7081 if (rc) /* timeout */
da5a662a 7082 return rc;
a2fbb9ea 7083
49d66772 7084 dsb_sp_prod_idx = *bp->dsb_sp_prod;
a2fbb9ea 7085
228241eb 7086 /* Send PORT_DELETE ramrod */
a2fbb9ea
ET
7087 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
7088
49d66772 7089 /* Wait for completion to arrive on default status block
a2fbb9ea
ET
7090 we are going to reset the chip anyway
7091 so there is not much to do if this times out
7092 */
34f80b04 7093 while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
34f80b04
EG
7094 if (!cnt) {
7095 DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
7096 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
7097 *bp->dsb_sp_prod, dsb_sp_prod_idx);
7098#ifdef BNX2X_STOP_ON_ERROR
7099 bnx2x_panic();
7100#endif
36e552ab 7101 rc = -EBUSY;
34f80b04
EG
7102 break;
7103 }
7104 cnt--;
da5a662a 7105 msleep(1);
5650d9d4 7106 rmb(); /* Refresh the dsb_sp_prod */
49d66772
ET
7107 }
7108 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
7109 bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
da5a662a
VZ
7110
7111 return rc;
a2fbb9ea
ET
7112}
7113
34f80b04
EG
7114static void bnx2x_reset_func(struct bnx2x *bp)
7115{
7116 int port = BP_PORT(bp);
7117 int func = BP_FUNC(bp);
7118 int base, i;
7119
7120 /* Configure IGU */
7121 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
7122 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
7123
34f80b04
EG
7124 /* Clear ILT */
7125 base = FUNC_ILT_BASE(func);
7126 for (i = base; i < base + ILT_PER_FUNC; i++)
7127 bnx2x_ilt_wr(bp, i, 0);
7128}
7129
7130static void bnx2x_reset_port(struct bnx2x *bp)
7131{
7132 int port = BP_PORT(bp);
7133 u32 val;
7134
7135 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
7136
7137 /* Do not rcv packets to BRB */
7138 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
7139 /* Do not direct rcv packets that are not for MCP to the BRB */
7140 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
7141 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7142
7143 /* Configure AEU */
7144 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
7145
7146 msleep(100);
7147 /* Check for BRB port occupancy */
7148 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
7149 if (val)
7150 DP(NETIF_MSG_IFDOWN,
33471629 7151 "BRB1 is not empty %d blocks are occupied\n", val);
34f80b04
EG
7152
7153 /* TODO: Close Doorbell port? */
7154}
7155
34f80b04
EG
7156static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
7157{
7158 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
7159 BP_FUNC(bp), reset_code);
7160
7161 switch (reset_code) {
7162 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
7163 bnx2x_reset_port(bp);
7164 bnx2x_reset_func(bp);
7165 bnx2x_reset_common(bp);
7166 break;
7167
7168 case FW_MSG_CODE_DRV_UNLOAD_PORT:
7169 bnx2x_reset_port(bp);
7170 bnx2x_reset_func(bp);
7171 break;
7172
7173 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
7174 bnx2x_reset_func(bp);
7175 break;
49d66772 7176
34f80b04
EG
7177 default:
7178 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
7179 break;
7180 }
7181}
7182
33471629 7183/* must be called with rtnl_lock */
34f80b04 7184static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
a2fbb9ea 7185{
da5a662a 7186 int port = BP_PORT(bp);
a2fbb9ea 7187 u32 reset_code = 0;
da5a662a 7188 int i, cnt, rc;
a2fbb9ea
ET
7189
7190 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
7191
228241eb
ET
7192 bp->rx_mode = BNX2X_RX_MODE_NONE;
7193 bnx2x_set_storm_rx_mode(bp);
a2fbb9ea 7194
f8ef6e44 7195 bnx2x_netif_stop(bp, 1);
e94d8af3 7196
34f80b04
EG
7197 del_timer_sync(&bp->timer);
7198 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
7199 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
bb2a0f7a 7200 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04 7201
70b9986c
EG
7202 /* Release IRQs */
7203 bnx2x_free_irq(bp);
7204
555f6c78
EG
7205 /* Wait until tx fastpath tasks complete */
7206 for_each_tx_queue(bp, i) {
228241eb
ET
7207 struct bnx2x_fastpath *fp = &bp->fp[i];
7208
34f80b04 7209 cnt = 1000;
e8b5fc51 7210 while (bnx2x_has_tx_work_unload(fp)) {
da5a662a 7211
7961f791 7212 bnx2x_tx_int(fp);
34f80b04
EG
7213 if (!cnt) {
7214 BNX2X_ERR("timeout waiting for queue[%d]\n",
7215 i);
7216#ifdef BNX2X_STOP_ON_ERROR
7217 bnx2x_panic();
7218 return -EBUSY;
7219#else
7220 break;
7221#endif
7222 }
7223 cnt--;
da5a662a 7224 msleep(1);
34f80b04 7225 }
228241eb 7226 }
da5a662a
VZ
7227 /* Give HW time to discard old tx messages */
7228 msleep(1);
a2fbb9ea 7229
3101c2bc
YG
7230 if (CHIP_IS_E1(bp)) {
7231 struct mac_configuration_cmd *config =
7232 bnx2x_sp(bp, mcast_config);
7233
7234 bnx2x_set_mac_addr_e1(bp, 0);
7235
8d9c5f34 7236 for (i = 0; i < config->hdr.length; i++)
3101c2bc
YG
7237 CAM_INVALIDATE(config->config_table[i]);
7238
8d9c5f34 7239 config->hdr.length = i;
3101c2bc
YG
7240 if (CHIP_REV_IS_SLOW(bp))
7241 config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
7242 else
7243 config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
0626b899 7244 config->hdr.client_id = bp->fp->cl_id;
3101c2bc
YG
7245 config->hdr.reserved1 = 0;
7246
7247 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7248 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
7249 U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
7250
7251 } else { /* E1H */
65abd74d
YG
7252 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
7253
3101c2bc
YG
7254 bnx2x_set_mac_addr_e1h(bp, 0);
7255
7256 for (i = 0; i < MC_HASH_SIZE; i++)
7257 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
7258 }
7259
65abd74d
YG
7260 if (unload_mode == UNLOAD_NORMAL)
7261 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7262
7263 else if (bp->flags & NO_WOL_FLAG) {
7264 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
7265 if (CHIP_IS_E1H(bp))
7266 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
7267
7268 } else if (bp->wol) {
7269 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
7270 u8 *mac_addr = bp->dev->dev_addr;
7271 u32 val;
7272 /* The mac address is written to entries 1-4 to
7273 preserve entry 0 which is used by the PMF */
7274 u8 entry = (BP_E1HVN(bp) + 1)*8;
7275
7276 val = (mac_addr[0] << 8) | mac_addr[1];
7277 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
7278
7279 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
7280 (mac_addr[4] << 8) | mac_addr[5];
7281 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
7282
7283 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
7284
7285 } else
7286 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
da5a662a 7287
34f80b04
EG
7288 /* Close multi and leading connections
7289 Completions for ramrods are collected in a synchronous way */
a2fbb9ea
ET
7290 for_each_nondefault_queue(bp, i)
7291 if (bnx2x_stop_multi(bp, i))
228241eb 7292 goto unload_error;
a2fbb9ea 7293
da5a662a
VZ
7294 rc = bnx2x_stop_leading(bp);
7295 if (rc) {
34f80b04 7296 BNX2X_ERR("Stop leading failed!\n");
da5a662a 7297#ifdef BNX2X_STOP_ON_ERROR
34f80b04 7298 return -EBUSY;
da5a662a
VZ
7299#else
7300 goto unload_error;
34f80b04 7301#endif
228241eb
ET
7302 }
7303
7304unload_error:
34f80b04 7305 if (!BP_NOMCP(bp))
228241eb 7306 reset_code = bnx2x_fw_command(bp, reset_code);
34f80b04 7307 else {
f5372251 7308 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts %d, %d, %d\n",
34f80b04
EG
7309 load_count[0], load_count[1], load_count[2]);
7310 load_count[0]--;
da5a662a 7311 load_count[1 + port]--;
f5372251 7312 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts %d, %d, %d\n",
34f80b04
EG
7313 load_count[0], load_count[1], load_count[2]);
7314 if (load_count[0] == 0)
7315 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
da5a662a 7316 else if (load_count[1 + port] == 0)
34f80b04
EG
7317 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
7318 else
7319 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
7320 }
a2fbb9ea 7321
34f80b04
EG
7322 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
7323 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
7324 bnx2x__link_reset(bp);
a2fbb9ea
ET
7325
7326 /* Reset the chip */
228241eb 7327 bnx2x_reset_chip(bp, reset_code);
a2fbb9ea
ET
7328
7329 /* Report UNLOAD_DONE to MCP */
34f80b04 7330 if (!BP_NOMCP(bp))
a2fbb9ea 7331 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
356e2385 7332
9a035440 7333 bp->port.pmf = 0;
a2fbb9ea 7334
7a9b2557 7335 /* Free SKBs, SGEs, TPA pool and driver internals */
a2fbb9ea 7336 bnx2x_free_skbs(bp);
555f6c78 7337 for_each_rx_queue(bp, i)
3196a88a 7338 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
555f6c78 7339 for_each_rx_queue(bp, i)
7cde1c8b 7340 netif_napi_del(&bnx2x_fp(bp, i, napi));
a2fbb9ea
ET
7341 bnx2x_free_mem(bp);
7342
7343 bp->state = BNX2X_STATE_CLOSED;
228241eb 7344
a2fbb9ea
ET
7345 netif_carrier_off(bp->dev);
7346
7347 return 0;
7348}
7349
34f80b04
EG
7350static void bnx2x_reset_task(struct work_struct *work)
7351{
7352 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
7353
7354#ifdef BNX2X_STOP_ON_ERROR
7355 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
7356 " so reset not done to allow debug dump,\n"
7357 KERN_ERR " you will need to reboot when done\n");
7358 return;
7359#endif
7360
7361 rtnl_lock();
7362
7363 if (!netif_running(bp->dev))
7364 goto reset_task_exit;
7365
7366 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
7367 bnx2x_nic_load(bp, LOAD_NORMAL);
7368
7369reset_task_exit:
7370 rtnl_unlock();
7371}
7372
a2fbb9ea
ET
7373/* end of nic load/unload */
7374
7375/* ethtool_ops */
7376
7377/*
7378 * Init service functions
7379 */
7380
f1ef27ef
EG
7381static inline u32 bnx2x_get_pretend_reg(struct bnx2x *bp, int func)
7382{
7383 switch (func) {
7384 case 0: return PXP2_REG_PGL_PRETEND_FUNC_F0;
7385 case 1: return PXP2_REG_PGL_PRETEND_FUNC_F1;
7386 case 2: return PXP2_REG_PGL_PRETEND_FUNC_F2;
7387 case 3: return PXP2_REG_PGL_PRETEND_FUNC_F3;
7388 case 4: return PXP2_REG_PGL_PRETEND_FUNC_F4;
7389 case 5: return PXP2_REG_PGL_PRETEND_FUNC_F5;
7390 case 6: return PXP2_REG_PGL_PRETEND_FUNC_F6;
7391 case 7: return PXP2_REG_PGL_PRETEND_FUNC_F7;
7392 default:
7393 BNX2X_ERR("Unsupported function index: %d\n", func);
7394 return (u32)(-1);
7395 }
7396}
7397
7398static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp, int orig_func)
7399{
7400 u32 reg = bnx2x_get_pretend_reg(bp, orig_func), new_val;
7401
7402 /* Flush all outstanding writes */
7403 mmiowb();
7404
7405 /* Pretend to be function 0 */
7406 REG_WR(bp, reg, 0);
7407 /* Flush the GRC transaction (in the chip) */
7408 new_val = REG_RD(bp, reg);
7409 if (new_val != 0) {
7410 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (0,%d)!\n",
7411 new_val);
7412 BUG();
7413 }
7414
7415 /* From now we are in the "like-E1" mode */
7416 bnx2x_int_disable(bp);
7417
7418 /* Flush all outstanding writes */
7419 mmiowb();
7420
7421 /* Restore the original funtion settings */
7422 REG_WR(bp, reg, orig_func);
7423 new_val = REG_RD(bp, reg);
7424 if (new_val != orig_func) {
7425 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (%d,%d)!\n",
7426 orig_func, new_val);
7427 BUG();
7428 }
7429}
7430
7431static inline void bnx2x_undi_int_disable(struct bnx2x *bp, int func)
7432{
7433 if (CHIP_IS_E1H(bp))
7434 bnx2x_undi_int_disable_e1h(bp, func);
7435 else
7436 bnx2x_int_disable(bp);
7437}
7438
34f80b04
EG
7439static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
7440{
7441 u32 val;
7442
7443 /* Check if there is any driver already loaded */
7444 val = REG_RD(bp, MISC_REG_UNPREPARED);
7445 if (val == 0x1) {
7446 /* Check if it is the UNDI driver
7447 * UNDI driver initializes CID offset for normal bell to 0x7
7448 */
4a37fb66 7449 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
34f80b04
EG
7450 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
7451 if (val == 0x7) {
7452 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
da5a662a 7453 /* save our func */
34f80b04 7454 int func = BP_FUNC(bp);
da5a662a
VZ
7455 u32 swap_en;
7456 u32 swap_val;
34f80b04 7457
b4661739
EG
7458 /* clear the UNDI indication */
7459 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
7460
34f80b04
EG
7461 BNX2X_DEV_INFO("UNDI is active! reset device\n");
7462
7463 /* try unload UNDI on port 0 */
7464 bp->func = 0;
da5a662a
VZ
7465 bp->fw_seq =
7466 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7467 DRV_MSG_SEQ_NUMBER_MASK);
34f80b04 7468 reset_code = bnx2x_fw_command(bp, reset_code);
34f80b04
EG
7469
7470 /* if UNDI is loaded on the other port */
7471 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
7472
da5a662a
VZ
7473 /* send "DONE" for previous unload */
7474 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7475
7476 /* unload UNDI on port 1 */
34f80b04 7477 bp->func = 1;
da5a662a
VZ
7478 bp->fw_seq =
7479 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7480 DRV_MSG_SEQ_NUMBER_MASK);
7481 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7482
7483 bnx2x_fw_command(bp, reset_code);
34f80b04
EG
7484 }
7485
b4661739
EG
7486 /* now it's safe to release the lock */
7487 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7488
f1ef27ef 7489 bnx2x_undi_int_disable(bp, func);
da5a662a
VZ
7490
7491 /* close input traffic and wait for it */
7492 /* Do not rcv packets to BRB */
7493 REG_WR(bp,
7494 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
7495 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
7496 /* Do not direct rcv packets that are not for MCP to
7497 * the BRB */
7498 REG_WR(bp,
7499 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
7500 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7501 /* clear AEU */
7502 REG_WR(bp,
7503 (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
7504 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
7505 msleep(10);
7506
7507 /* save NIG port swap info */
7508 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
7509 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
34f80b04
EG
7510 /* reset device */
7511 REG_WR(bp,
7512 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
da5a662a 7513 0xd3ffffff);
34f80b04
EG
7514 REG_WR(bp,
7515 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
7516 0x1403);
da5a662a
VZ
7517 /* take the NIG out of reset and restore swap values */
7518 REG_WR(bp,
7519 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
7520 MISC_REGISTERS_RESET_REG_1_RST_NIG);
7521 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
7522 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
7523
7524 /* send unload done to the MCP */
7525 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7526
7527 /* restore our func and fw_seq */
7528 bp->func = func;
7529 bp->fw_seq =
7530 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7531 DRV_MSG_SEQ_NUMBER_MASK);
b4661739
EG
7532
7533 } else
7534 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
34f80b04
EG
7535 }
7536}
7537
7538static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
7539{
7540 u32 val, val2, val3, val4, id;
72ce58c3 7541 u16 pmc;
34f80b04
EG
7542
7543 /* Get the chip revision id and number. */
7544 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
7545 val = REG_RD(bp, MISC_REG_CHIP_NUM);
7546 id = ((val & 0xffff) << 16);
7547 val = REG_RD(bp, MISC_REG_CHIP_REV);
7548 id |= ((val & 0xf) << 12);
7549 val = REG_RD(bp, MISC_REG_CHIP_METAL);
7550 id |= ((val & 0xff) << 4);
5a40e08e 7551 val = REG_RD(bp, MISC_REG_BOND_ID);
34f80b04
EG
7552 id |= (val & 0xf);
7553 bp->common.chip_id = id;
7554 bp->link_params.chip_id = bp->common.chip_id;
7555 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
7556
1c06328c
EG
7557 val = (REG_RD(bp, 0x2874) & 0x55);
7558 if ((bp->common.chip_id & 0x1) ||
7559 (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
7560 bp->flags |= ONE_PORT_FLAG;
7561 BNX2X_DEV_INFO("single port device\n");
7562 }
7563
34f80b04
EG
7564 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
7565 bp->common.flash_size = (NVRAM_1MB_SIZE <<
7566 (val & MCPR_NVM_CFG4_FLASH_SIZE));
7567 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
7568 bp->common.flash_size, bp->common.flash_size);
7569
7570 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
7571 bp->link_params.shmem_base = bp->common.shmem_base;
7572 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
7573
7574 if (!bp->common.shmem_base ||
7575 (bp->common.shmem_base < 0xA0000) ||
7576 (bp->common.shmem_base >= 0xC0000)) {
7577 BNX2X_DEV_INFO("MCP not active\n");
7578 bp->flags |= NO_MCP_FLAG;
7579 return;
7580 }
7581
7582 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
7583 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7584 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7585 BNX2X_ERR("BAD MCP validity signature\n");
7586
7587 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
35b19ba5 7588 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
34f80b04
EG
7589
7590 bp->link_params.hw_led_mode = ((bp->common.hw_config &
7591 SHARED_HW_CFG_LED_MODE_MASK) >>
7592 SHARED_HW_CFG_LED_MODE_SHIFT);
7593
c2c8b03e
EG
7594 bp->link_params.feature_config_flags = 0;
7595 val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
7596 if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
7597 bp->link_params.feature_config_flags |=
7598 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
7599 else
7600 bp->link_params.feature_config_flags &=
7601 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
7602
34f80b04
EG
7603 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
7604 bp->common.bc_ver = val;
7605 BNX2X_DEV_INFO("bc_ver %X\n", val);
7606 if (val < BNX2X_BC_VER) {
7607 /* for now only warn
7608 * later we might need to enforce this */
7609 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
7610 " please upgrade BC\n", BNX2X_BC_VER, val);
7611 }
72ce58c3
EG
7612
7613 if (BP_E1HVN(bp) == 0) {
7614 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
7615 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
7616 } else {
7617 /* no WOL capability for E1HVN != 0 */
7618 bp->flags |= NO_WOL_FLAG;
7619 }
7620 BNX2X_DEV_INFO("%sWoL capable\n",
f5372251 7621 (bp->flags & NO_WOL_FLAG) ? "not " : "");
34f80b04
EG
7622
7623 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
7624 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
7625 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
7626 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
7627
7628 printk(KERN_INFO PFX "part number %X-%X-%X-%X\n",
7629 val, val2, val3, val4);
7630}
7631
7632static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
7633 u32 switch_cfg)
a2fbb9ea 7634{
34f80b04 7635 int port = BP_PORT(bp);
a2fbb9ea
ET
7636 u32 ext_phy_type;
7637
a2fbb9ea
ET
7638 switch (switch_cfg) {
7639 case SWITCH_CFG_1G:
7640 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
7641
c18487ee
YR
7642 ext_phy_type =
7643 SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea
ET
7644 switch (ext_phy_type) {
7645 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
7646 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7647 ext_phy_type);
7648
34f80b04
EG
7649 bp->port.supported |= (SUPPORTED_10baseT_Half |
7650 SUPPORTED_10baseT_Full |
7651 SUPPORTED_100baseT_Half |
7652 SUPPORTED_100baseT_Full |
7653 SUPPORTED_1000baseT_Full |
7654 SUPPORTED_2500baseX_Full |
7655 SUPPORTED_TP |
7656 SUPPORTED_FIBRE |
7657 SUPPORTED_Autoneg |
7658 SUPPORTED_Pause |
7659 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
7660 break;
7661
7662 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
7663 BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
7664 ext_phy_type);
7665
34f80b04
EG
7666 bp->port.supported |= (SUPPORTED_10baseT_Half |
7667 SUPPORTED_10baseT_Full |
7668 SUPPORTED_100baseT_Half |
7669 SUPPORTED_100baseT_Full |
7670 SUPPORTED_1000baseT_Full |
7671 SUPPORTED_TP |
7672 SUPPORTED_FIBRE |
7673 SUPPORTED_Autoneg |
7674 SUPPORTED_Pause |
7675 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
7676 break;
7677
7678 default:
7679 BNX2X_ERR("NVRAM config error. "
7680 "BAD SerDes ext_phy_config 0x%x\n",
c18487ee 7681 bp->link_params.ext_phy_config);
a2fbb9ea
ET
7682 return;
7683 }
7684
34f80b04
EG
7685 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
7686 port*0x10);
7687 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea
ET
7688 break;
7689
7690 case SWITCH_CFG_10G:
7691 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
7692
c18487ee
YR
7693 ext_phy_type =
7694 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea
ET
7695 switch (ext_phy_type) {
7696 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7697 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7698 ext_phy_type);
7699
34f80b04
EG
7700 bp->port.supported |= (SUPPORTED_10baseT_Half |
7701 SUPPORTED_10baseT_Full |
7702 SUPPORTED_100baseT_Half |
7703 SUPPORTED_100baseT_Full |
7704 SUPPORTED_1000baseT_Full |
7705 SUPPORTED_2500baseX_Full |
7706 SUPPORTED_10000baseT_Full |
7707 SUPPORTED_TP |
7708 SUPPORTED_FIBRE |
7709 SUPPORTED_Autoneg |
7710 SUPPORTED_Pause |
7711 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
7712 break;
7713
589abe3a
EG
7714 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
7715 BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
34f80b04 7716 ext_phy_type);
f1410647 7717
34f80b04 7718 bp->port.supported |= (SUPPORTED_10000baseT_Full |
589abe3a 7719 SUPPORTED_1000baseT_Full |
34f80b04 7720 SUPPORTED_FIBRE |
589abe3a 7721 SUPPORTED_Autoneg |
34f80b04
EG
7722 SUPPORTED_Pause |
7723 SUPPORTED_Asym_Pause);
f1410647
ET
7724 break;
7725
589abe3a
EG
7726 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
7727 BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
f1410647
ET
7728 ext_phy_type);
7729
34f80b04 7730 bp->port.supported |= (SUPPORTED_10000baseT_Full |
589abe3a 7731 SUPPORTED_2500baseX_Full |
34f80b04 7732 SUPPORTED_1000baseT_Full |
589abe3a
EG
7733 SUPPORTED_FIBRE |
7734 SUPPORTED_Autoneg |
7735 SUPPORTED_Pause |
7736 SUPPORTED_Asym_Pause);
7737 break;
7738
7739 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
7740 BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
7741 ext_phy_type);
7742
7743 bp->port.supported |= (SUPPORTED_10000baseT_Full |
34f80b04
EG
7744 SUPPORTED_FIBRE |
7745 SUPPORTED_Pause |
7746 SUPPORTED_Asym_Pause);
f1410647
ET
7747 break;
7748
589abe3a
EG
7749 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
7750 BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
a2fbb9ea
ET
7751 ext_phy_type);
7752
34f80b04
EG
7753 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7754 SUPPORTED_1000baseT_Full |
7755 SUPPORTED_FIBRE |
34f80b04
EG
7756 SUPPORTED_Pause |
7757 SUPPORTED_Asym_Pause);
f1410647
ET
7758 break;
7759
589abe3a
EG
7760 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
7761 BNX2X_DEV_INFO("ext_phy_type 0x%x (8726)\n",
c18487ee
YR
7762 ext_phy_type);
7763
34f80b04 7764 bp->port.supported |= (SUPPORTED_10000baseT_Full |
34f80b04 7765 SUPPORTED_1000baseT_Full |
34f80b04 7766 SUPPORTED_Autoneg |
589abe3a 7767 SUPPORTED_FIBRE |
34f80b04
EG
7768 SUPPORTED_Pause |
7769 SUPPORTED_Asym_Pause);
c18487ee
YR
7770 break;
7771
f1410647
ET
7772 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7773 BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
7774 ext_phy_type);
7775
34f80b04
EG
7776 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7777 SUPPORTED_TP |
7778 SUPPORTED_Autoneg |
7779 SUPPORTED_Pause |
7780 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
7781 break;
7782
28577185
EG
7783 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
7784 BNX2X_DEV_INFO("ext_phy_type 0x%x (BCM8481)\n",
7785 ext_phy_type);
7786
7787 bp->port.supported |= (SUPPORTED_10baseT_Half |
7788 SUPPORTED_10baseT_Full |
7789 SUPPORTED_100baseT_Half |
7790 SUPPORTED_100baseT_Full |
7791 SUPPORTED_1000baseT_Full |
7792 SUPPORTED_10000baseT_Full |
7793 SUPPORTED_TP |
7794 SUPPORTED_Autoneg |
7795 SUPPORTED_Pause |
7796 SUPPORTED_Asym_Pause);
7797 break;
7798
c18487ee
YR
7799 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
7800 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
7801 bp->link_params.ext_phy_config);
7802 break;
7803
a2fbb9ea
ET
7804 default:
7805 BNX2X_ERR("NVRAM config error. "
7806 "BAD XGXS ext_phy_config 0x%x\n",
c18487ee 7807 bp->link_params.ext_phy_config);
a2fbb9ea
ET
7808 return;
7809 }
7810
34f80b04
EG
7811 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
7812 port*0x18);
7813 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea 7814
a2fbb9ea
ET
7815 break;
7816
7817 default:
7818 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
34f80b04 7819 bp->port.link_config);
a2fbb9ea
ET
7820 return;
7821 }
34f80b04 7822 bp->link_params.phy_addr = bp->port.phy_addr;
a2fbb9ea
ET
7823
7824 /* mask what we support according to speed_cap_mask */
c18487ee
YR
7825 if (!(bp->link_params.speed_cap_mask &
7826 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
34f80b04 7827 bp->port.supported &= ~SUPPORTED_10baseT_Half;
a2fbb9ea 7828
c18487ee
YR
7829 if (!(bp->link_params.speed_cap_mask &
7830 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
34f80b04 7831 bp->port.supported &= ~SUPPORTED_10baseT_Full;
a2fbb9ea 7832
c18487ee
YR
7833 if (!(bp->link_params.speed_cap_mask &
7834 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
34f80b04 7835 bp->port.supported &= ~SUPPORTED_100baseT_Half;
a2fbb9ea 7836
c18487ee
YR
7837 if (!(bp->link_params.speed_cap_mask &
7838 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
34f80b04 7839 bp->port.supported &= ~SUPPORTED_100baseT_Full;
a2fbb9ea 7840
c18487ee
YR
7841 if (!(bp->link_params.speed_cap_mask &
7842 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
34f80b04
EG
7843 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
7844 SUPPORTED_1000baseT_Full);
a2fbb9ea 7845
c18487ee
YR
7846 if (!(bp->link_params.speed_cap_mask &
7847 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
34f80b04 7848 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
a2fbb9ea 7849
c18487ee
YR
7850 if (!(bp->link_params.speed_cap_mask &
7851 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
34f80b04 7852 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
a2fbb9ea 7853
34f80b04 7854 BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
a2fbb9ea
ET
7855}
7856
34f80b04 7857static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
a2fbb9ea 7858{
c18487ee 7859 bp->link_params.req_duplex = DUPLEX_FULL;
a2fbb9ea 7860
34f80b04 7861 switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
a2fbb9ea 7862 case PORT_FEATURE_LINK_SPEED_AUTO:
34f80b04 7863 if (bp->port.supported & SUPPORTED_Autoneg) {
c18487ee 7864 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
34f80b04 7865 bp->port.advertising = bp->port.supported;
a2fbb9ea 7866 } else {
c18487ee
YR
7867 u32 ext_phy_type =
7868 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7869
7870 if ((ext_phy_type ==
7871 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
7872 (ext_phy_type ==
7873 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
a2fbb9ea 7874 /* force 10G, no AN */
c18487ee 7875 bp->link_params.req_line_speed = SPEED_10000;
34f80b04 7876 bp->port.advertising =
a2fbb9ea
ET
7877 (ADVERTISED_10000baseT_Full |
7878 ADVERTISED_FIBRE);
7879 break;
7880 }
7881 BNX2X_ERR("NVRAM config error. "
7882 "Invalid link_config 0x%x"
7883 " Autoneg not supported\n",
34f80b04 7884 bp->port.link_config);
a2fbb9ea
ET
7885 return;
7886 }
7887 break;
7888
7889 case PORT_FEATURE_LINK_SPEED_10M_FULL:
34f80b04 7890 if (bp->port.supported & SUPPORTED_10baseT_Full) {
c18487ee 7891 bp->link_params.req_line_speed = SPEED_10;
34f80b04
EG
7892 bp->port.advertising = (ADVERTISED_10baseT_Full |
7893 ADVERTISED_TP);
a2fbb9ea
ET
7894 } else {
7895 BNX2X_ERR("NVRAM config error. "
7896 "Invalid link_config 0x%x"
7897 " speed_cap_mask 0x%x\n",
34f80b04 7898 bp->port.link_config,
c18487ee 7899 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7900 return;
7901 }
7902 break;
7903
7904 case PORT_FEATURE_LINK_SPEED_10M_HALF:
34f80b04 7905 if (bp->port.supported & SUPPORTED_10baseT_Half) {
c18487ee
YR
7906 bp->link_params.req_line_speed = SPEED_10;
7907 bp->link_params.req_duplex = DUPLEX_HALF;
34f80b04
EG
7908 bp->port.advertising = (ADVERTISED_10baseT_Half |
7909 ADVERTISED_TP);
a2fbb9ea
ET
7910 } else {
7911 BNX2X_ERR("NVRAM config error. "
7912 "Invalid link_config 0x%x"
7913 " speed_cap_mask 0x%x\n",
34f80b04 7914 bp->port.link_config,
c18487ee 7915 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7916 return;
7917 }
7918 break;
7919
7920 case PORT_FEATURE_LINK_SPEED_100M_FULL:
34f80b04 7921 if (bp->port.supported & SUPPORTED_100baseT_Full) {
c18487ee 7922 bp->link_params.req_line_speed = SPEED_100;
34f80b04
EG
7923 bp->port.advertising = (ADVERTISED_100baseT_Full |
7924 ADVERTISED_TP);
a2fbb9ea
ET
7925 } else {
7926 BNX2X_ERR("NVRAM config error. "
7927 "Invalid link_config 0x%x"
7928 " speed_cap_mask 0x%x\n",
34f80b04 7929 bp->port.link_config,
c18487ee 7930 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7931 return;
7932 }
7933 break;
7934
7935 case PORT_FEATURE_LINK_SPEED_100M_HALF:
34f80b04 7936 if (bp->port.supported & SUPPORTED_100baseT_Half) {
c18487ee
YR
7937 bp->link_params.req_line_speed = SPEED_100;
7938 bp->link_params.req_duplex = DUPLEX_HALF;
34f80b04
EG
7939 bp->port.advertising = (ADVERTISED_100baseT_Half |
7940 ADVERTISED_TP);
a2fbb9ea
ET
7941 } else {
7942 BNX2X_ERR("NVRAM config error. "
7943 "Invalid link_config 0x%x"
7944 " speed_cap_mask 0x%x\n",
34f80b04 7945 bp->port.link_config,
c18487ee 7946 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7947 return;
7948 }
7949 break;
7950
7951 case PORT_FEATURE_LINK_SPEED_1G:
34f80b04 7952 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
c18487ee 7953 bp->link_params.req_line_speed = SPEED_1000;
34f80b04
EG
7954 bp->port.advertising = (ADVERTISED_1000baseT_Full |
7955 ADVERTISED_TP);
a2fbb9ea
ET
7956 } else {
7957 BNX2X_ERR("NVRAM config error. "
7958 "Invalid link_config 0x%x"
7959 " speed_cap_mask 0x%x\n",
34f80b04 7960 bp->port.link_config,
c18487ee 7961 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7962 return;
7963 }
7964 break;
7965
7966 case PORT_FEATURE_LINK_SPEED_2_5G:
34f80b04 7967 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
c18487ee 7968 bp->link_params.req_line_speed = SPEED_2500;
34f80b04
EG
7969 bp->port.advertising = (ADVERTISED_2500baseX_Full |
7970 ADVERTISED_TP);
a2fbb9ea
ET
7971 } else {
7972 BNX2X_ERR("NVRAM config error. "
7973 "Invalid link_config 0x%x"
7974 " speed_cap_mask 0x%x\n",
34f80b04 7975 bp->port.link_config,
c18487ee 7976 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7977 return;
7978 }
7979 break;
7980
7981 case PORT_FEATURE_LINK_SPEED_10G_CX4:
7982 case PORT_FEATURE_LINK_SPEED_10G_KX4:
7983 case PORT_FEATURE_LINK_SPEED_10G_KR:
34f80b04 7984 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
c18487ee 7985 bp->link_params.req_line_speed = SPEED_10000;
34f80b04
EG
7986 bp->port.advertising = (ADVERTISED_10000baseT_Full |
7987 ADVERTISED_FIBRE);
a2fbb9ea
ET
7988 } else {
7989 BNX2X_ERR("NVRAM config error. "
7990 "Invalid link_config 0x%x"
7991 " speed_cap_mask 0x%x\n",
34f80b04 7992 bp->port.link_config,
c18487ee 7993 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7994 return;
7995 }
7996 break;
7997
7998 default:
7999 BNX2X_ERR("NVRAM config error. "
8000 "BAD link speed link_config 0x%x\n",
34f80b04 8001 bp->port.link_config);
c18487ee 8002 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
34f80b04 8003 bp->port.advertising = bp->port.supported;
a2fbb9ea
ET
8004 break;
8005 }
a2fbb9ea 8006
34f80b04
EG
8007 bp->link_params.req_flow_ctrl = (bp->port.link_config &
8008 PORT_FEATURE_FLOW_CONTROL_MASK);
c0700f90 8009 if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
4ab84d45 8010 !(bp->port.supported & SUPPORTED_Autoneg))
c0700f90 8011 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
a2fbb9ea 8012
c18487ee 8013 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x"
f1410647 8014 " advertising 0x%x\n",
c18487ee
YR
8015 bp->link_params.req_line_speed,
8016 bp->link_params.req_duplex,
34f80b04 8017 bp->link_params.req_flow_ctrl, bp->port.advertising);
a2fbb9ea
ET
8018}
8019
34f80b04 8020static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
a2fbb9ea 8021{
34f80b04
EG
8022 int port = BP_PORT(bp);
8023 u32 val, val2;
589abe3a 8024 u32 config;
c2c8b03e 8025 u16 i;
a2fbb9ea 8026
c18487ee 8027 bp->link_params.bp = bp;
34f80b04 8028 bp->link_params.port = port;
c18487ee 8029
c18487ee 8030 bp->link_params.lane_config =
a2fbb9ea 8031 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
c18487ee 8032 bp->link_params.ext_phy_config =
a2fbb9ea
ET
8033 SHMEM_RD(bp,
8034 dev_info.port_hw_config[port].external_phy_config);
c18487ee 8035 bp->link_params.speed_cap_mask =
a2fbb9ea
ET
8036 SHMEM_RD(bp,
8037 dev_info.port_hw_config[port].speed_capability_mask);
8038
34f80b04 8039 bp->port.link_config =
a2fbb9ea
ET
8040 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
8041
c2c8b03e
EG
8042 /* Get the 4 lanes xgxs config rx and tx */
8043 for (i = 0; i < 2; i++) {
8044 val = SHMEM_RD(bp,
8045 dev_info.port_hw_config[port].xgxs_config_rx[i<<1]);
8046 bp->link_params.xgxs_config_rx[i << 1] = ((val>>16) & 0xffff);
8047 bp->link_params.xgxs_config_rx[(i << 1) + 1] = (val & 0xffff);
8048
8049 val = SHMEM_RD(bp,
8050 dev_info.port_hw_config[port].xgxs_config_tx[i<<1]);
8051 bp->link_params.xgxs_config_tx[i << 1] = ((val>>16) & 0xffff);
8052 bp->link_params.xgxs_config_tx[(i << 1) + 1] = (val & 0xffff);
8053 }
8054
589abe3a
EG
8055 config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
8056 if (config & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_ENABLED)
8057 bp->link_params.feature_config_flags |=
8058 FEATURE_CONFIG_MODULE_ENFORCMENT_ENABLED;
8059 else
8060 bp->link_params.feature_config_flags &=
8061 ~FEATURE_CONFIG_MODULE_ENFORCMENT_ENABLED;
8062
3ce2c3f9
EG
8063 /* If the device is capable of WoL, set the default state according
8064 * to the HW
8065 */
8066 bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
8067 (config & PORT_FEATURE_WOL_ENABLED));
8068
c2c8b03e
EG
8069 BNX2X_DEV_INFO("lane_config 0x%08x ext_phy_config 0x%08x"
8070 " speed_cap_mask 0x%08x link_config 0x%08x\n",
c18487ee
YR
8071 bp->link_params.lane_config,
8072 bp->link_params.ext_phy_config,
34f80b04 8073 bp->link_params.speed_cap_mask, bp->port.link_config);
a2fbb9ea 8074
34f80b04 8075 bp->link_params.switch_cfg = (bp->port.link_config &
c18487ee
YR
8076 PORT_FEATURE_CONNECTED_SWITCH_MASK);
8077 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
a2fbb9ea
ET
8078
8079 bnx2x_link_settings_requested(bp);
8080
8081 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
8082 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
8083 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
8084 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8085 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8086 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8087 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
8088 bp->dev->dev_addr[5] = (u8)(val & 0xff);
c18487ee
YR
8089 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
8090 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
34f80b04
EG
8091}
8092
8093static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
8094{
8095 int func = BP_FUNC(bp);
8096 u32 val, val2;
8097 int rc = 0;
a2fbb9ea 8098
34f80b04 8099 bnx2x_get_common_hwinfo(bp);
a2fbb9ea 8100
34f80b04
EG
8101 bp->e1hov = 0;
8102 bp->e1hmf = 0;
8103 if (CHIP_IS_E1H(bp)) {
8104 bp->mf_config =
8105 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
a2fbb9ea 8106
3196a88a
EG
8107 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].e1hov_tag) &
8108 FUNC_MF_CFG_E1HOV_TAG_MASK);
34f80b04 8109 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
a2fbb9ea 8110
34f80b04
EG
8111 bp->e1hov = val;
8112 bp->e1hmf = 1;
8113 BNX2X_DEV_INFO("MF mode E1HOV for func %d is %d "
8114 "(0x%04x)\n",
8115 func, bp->e1hov, bp->e1hov);
8116 } else {
f5372251 8117 BNX2X_DEV_INFO("single function mode\n");
34f80b04
EG
8118 if (BP_E1HVN(bp)) {
8119 BNX2X_ERR("!!! No valid E1HOV for func %d,"
8120 " aborting\n", func);
8121 rc = -EPERM;
8122 }
8123 }
8124 }
a2fbb9ea 8125
34f80b04
EG
8126 if (!BP_NOMCP(bp)) {
8127 bnx2x_get_port_hwinfo(bp);
8128
8129 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
8130 DRV_MSG_SEQ_NUMBER_MASK);
8131 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
8132 }
8133
8134 if (IS_E1HMF(bp)) {
8135 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
8136 val = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_lower);
8137 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
8138 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
8139 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
8140 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8141 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8142 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8143 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
8144 bp->dev->dev_addr[5] = (u8)(val & 0xff);
8145 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
8146 ETH_ALEN);
8147 memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
8148 ETH_ALEN);
a2fbb9ea 8149 }
34f80b04
EG
8150
8151 return rc;
a2fbb9ea
ET
8152 }
8153
34f80b04
EG
8154 if (BP_NOMCP(bp)) {
8155 /* only supposed to happen on emulation/FPGA */
33471629 8156 BNX2X_ERR("warning random MAC workaround active\n");
34f80b04
EG
8157 random_ether_addr(bp->dev->dev_addr);
8158 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8159 }
a2fbb9ea 8160
34f80b04
EG
8161 return rc;
8162}
8163
8164static int __devinit bnx2x_init_bp(struct bnx2x *bp)
8165{
8166 int func = BP_FUNC(bp);
87942b46 8167 int timer_interval;
34f80b04
EG
8168 int rc;
8169
da5a662a
VZ
8170 /* Disable interrupt handling until HW is initialized */
8171 atomic_set(&bp->intr_sem, 1);
8172
34f80b04 8173 mutex_init(&bp->port.phy_mutex);
a2fbb9ea 8174
1cf167f2 8175 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
34f80b04
EG
8176 INIT_WORK(&bp->reset_task, bnx2x_reset_task);
8177
8178 rc = bnx2x_get_hwinfo(bp);
8179
8180 /* need to reset chip if undi was active */
8181 if (!BP_NOMCP(bp))
8182 bnx2x_undi_unload(bp);
8183
8184 if (CHIP_REV_IS_FPGA(bp))
8185 printk(KERN_ERR PFX "FPGA detected\n");
8186
8187 if (BP_NOMCP(bp) && (func == 0))
8188 printk(KERN_ERR PFX
8189 "MCP disabled, must load devices in order!\n");
8190
555f6c78 8191 /* Set multi queue mode */
8badd27a
EG
8192 if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
8193 ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
555f6c78 8194 printk(KERN_ERR PFX
8badd27a 8195 "Multi disabled since int_mode requested is not MSI-X\n");
555f6c78
EG
8196 multi_mode = ETH_RSS_MODE_DISABLED;
8197 }
8198 bp->multi_mode = multi_mode;
8199
8200
7a9b2557
VZ
8201 /* Set TPA flags */
8202 if (disable_tpa) {
8203 bp->flags &= ~TPA_ENABLE_FLAG;
8204 bp->dev->features &= ~NETIF_F_LRO;
8205 } else {
8206 bp->flags |= TPA_ENABLE_FLAG;
8207 bp->dev->features |= NETIF_F_LRO;
8208 }
8209
8d5726c4 8210 bp->mrrs = mrrs;
7a9b2557 8211
34f80b04
EG
8212 bp->tx_ring_size = MAX_TX_AVAIL;
8213 bp->rx_ring_size = MAX_RX_AVAIL;
8214
8215 bp->rx_csum = 1;
34f80b04
EG
8216
8217 bp->tx_ticks = 50;
8218 bp->rx_ticks = 25;
8219
87942b46
EG
8220 timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
8221 bp->current_interval = (poll ? poll : timer_interval);
34f80b04
EG
8222
8223 init_timer(&bp->timer);
8224 bp->timer.expires = jiffies + bp->current_interval;
8225 bp->timer.data = (unsigned long) bp;
8226 bp->timer.function = bnx2x_timer;
8227
8228 return rc;
a2fbb9ea
ET
8229}
8230
8231/*
8232 * ethtool service functions
8233 */
8234
8235/* All ethtool functions called with rtnl_lock */
8236
8237static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8238{
8239 struct bnx2x *bp = netdev_priv(dev);
8240
34f80b04
EG
8241 cmd->supported = bp->port.supported;
8242 cmd->advertising = bp->port.advertising;
a2fbb9ea
ET
8243
8244 if (netif_carrier_ok(dev)) {
c18487ee
YR
8245 cmd->speed = bp->link_vars.line_speed;
8246 cmd->duplex = bp->link_vars.duplex;
a2fbb9ea 8247 } else {
c18487ee
YR
8248 cmd->speed = bp->link_params.req_line_speed;
8249 cmd->duplex = bp->link_params.req_duplex;
a2fbb9ea 8250 }
34f80b04
EG
8251 if (IS_E1HMF(bp)) {
8252 u16 vn_max_rate;
8253
8254 vn_max_rate = ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
8255 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
8256 if (vn_max_rate < cmd->speed)
8257 cmd->speed = vn_max_rate;
8258 }
a2fbb9ea 8259
c18487ee
YR
8260 if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
8261 u32 ext_phy_type =
8262 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
f1410647
ET
8263
8264 switch (ext_phy_type) {
8265 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
f1410647 8266 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
c18487ee 8267 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
589abe3a
EG
8268 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
8269 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
8270 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
f1410647
ET
8271 cmd->port = PORT_FIBRE;
8272 break;
8273
8274 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
28577185 8275 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
f1410647
ET
8276 cmd->port = PORT_TP;
8277 break;
8278
c18487ee
YR
8279 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
8280 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
8281 bp->link_params.ext_phy_config);
8282 break;
8283
f1410647
ET
8284 default:
8285 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
c18487ee
YR
8286 bp->link_params.ext_phy_config);
8287 break;
f1410647
ET
8288 }
8289 } else
a2fbb9ea 8290 cmd->port = PORT_TP;
a2fbb9ea 8291
34f80b04 8292 cmd->phy_address = bp->port.phy_addr;
a2fbb9ea
ET
8293 cmd->transceiver = XCVR_INTERNAL;
8294
c18487ee 8295 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
a2fbb9ea 8296 cmd->autoneg = AUTONEG_ENABLE;
f1410647 8297 else
a2fbb9ea 8298 cmd->autoneg = AUTONEG_DISABLE;
a2fbb9ea
ET
8299
8300 cmd->maxtxpkt = 0;
8301 cmd->maxrxpkt = 0;
8302
8303 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
8304 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
8305 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
8306 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
8307 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
8308 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
8309 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
8310
8311 return 0;
8312}
8313
8314static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8315{
8316 struct bnx2x *bp = netdev_priv(dev);
8317 u32 advertising;
8318
34f80b04
EG
8319 if (IS_E1HMF(bp))
8320 return 0;
8321
a2fbb9ea
ET
8322 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
8323 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
8324 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
8325 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
8326 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
8327 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
8328 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
8329
a2fbb9ea 8330 if (cmd->autoneg == AUTONEG_ENABLE) {
34f80b04
EG
8331 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
8332 DP(NETIF_MSG_LINK, "Autoneg not supported\n");
a2fbb9ea 8333 return -EINVAL;
f1410647 8334 }
a2fbb9ea
ET
8335
8336 /* advertise the requested speed and duplex if supported */
34f80b04 8337 cmd->advertising &= bp->port.supported;
a2fbb9ea 8338
c18487ee
YR
8339 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
8340 bp->link_params.req_duplex = DUPLEX_FULL;
34f80b04
EG
8341 bp->port.advertising |= (ADVERTISED_Autoneg |
8342 cmd->advertising);
a2fbb9ea
ET
8343
8344 } else { /* forced speed */
8345 /* advertise the requested speed and duplex if supported */
8346 switch (cmd->speed) {
8347 case SPEED_10:
8348 if (cmd->duplex == DUPLEX_FULL) {
34f80b04 8349 if (!(bp->port.supported &
f1410647
ET
8350 SUPPORTED_10baseT_Full)) {
8351 DP(NETIF_MSG_LINK,
8352 "10M full not supported\n");
a2fbb9ea 8353 return -EINVAL;
f1410647 8354 }
a2fbb9ea
ET
8355
8356 advertising = (ADVERTISED_10baseT_Full |
8357 ADVERTISED_TP);
8358 } else {
34f80b04 8359 if (!(bp->port.supported &
f1410647
ET
8360 SUPPORTED_10baseT_Half)) {
8361 DP(NETIF_MSG_LINK,
8362 "10M half not supported\n");
a2fbb9ea 8363 return -EINVAL;
f1410647 8364 }
a2fbb9ea
ET
8365
8366 advertising = (ADVERTISED_10baseT_Half |
8367 ADVERTISED_TP);
8368 }
8369 break;
8370
8371 case SPEED_100:
8372 if (cmd->duplex == DUPLEX_FULL) {
34f80b04 8373 if (!(bp->port.supported &
f1410647
ET
8374 SUPPORTED_100baseT_Full)) {
8375 DP(NETIF_MSG_LINK,
8376 "100M full not supported\n");
a2fbb9ea 8377 return -EINVAL;
f1410647 8378 }
a2fbb9ea
ET
8379
8380 advertising = (ADVERTISED_100baseT_Full |
8381 ADVERTISED_TP);
8382 } else {
34f80b04 8383 if (!(bp->port.supported &
f1410647
ET
8384 SUPPORTED_100baseT_Half)) {
8385 DP(NETIF_MSG_LINK,
8386 "100M half not supported\n");
a2fbb9ea 8387 return -EINVAL;
f1410647 8388 }
a2fbb9ea
ET
8389
8390 advertising = (ADVERTISED_100baseT_Half |
8391 ADVERTISED_TP);
8392 }
8393 break;
8394
8395 case SPEED_1000:
f1410647
ET
8396 if (cmd->duplex != DUPLEX_FULL) {
8397 DP(NETIF_MSG_LINK, "1G half not supported\n");
a2fbb9ea 8398 return -EINVAL;
f1410647 8399 }
a2fbb9ea 8400
34f80b04 8401 if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
f1410647 8402 DP(NETIF_MSG_LINK, "1G full not supported\n");
a2fbb9ea 8403 return -EINVAL;
f1410647 8404 }
a2fbb9ea
ET
8405
8406 advertising = (ADVERTISED_1000baseT_Full |
8407 ADVERTISED_TP);
8408 break;
8409
8410 case SPEED_2500:
f1410647
ET
8411 if (cmd->duplex != DUPLEX_FULL) {
8412 DP(NETIF_MSG_LINK,
8413 "2.5G half not supported\n");
a2fbb9ea 8414 return -EINVAL;
f1410647 8415 }
a2fbb9ea 8416
34f80b04 8417 if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
f1410647
ET
8418 DP(NETIF_MSG_LINK,
8419 "2.5G full not supported\n");
a2fbb9ea 8420 return -EINVAL;
f1410647 8421 }
a2fbb9ea 8422
f1410647 8423 advertising = (ADVERTISED_2500baseX_Full |
a2fbb9ea
ET
8424 ADVERTISED_TP);
8425 break;
8426
8427 case SPEED_10000:
f1410647
ET
8428 if (cmd->duplex != DUPLEX_FULL) {
8429 DP(NETIF_MSG_LINK, "10G half not supported\n");
a2fbb9ea 8430 return -EINVAL;
f1410647 8431 }
a2fbb9ea 8432
34f80b04 8433 if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
f1410647 8434 DP(NETIF_MSG_LINK, "10G full not supported\n");
a2fbb9ea 8435 return -EINVAL;
f1410647 8436 }
a2fbb9ea
ET
8437
8438 advertising = (ADVERTISED_10000baseT_Full |
8439 ADVERTISED_FIBRE);
8440 break;
8441
8442 default:
f1410647 8443 DP(NETIF_MSG_LINK, "Unsupported speed\n");
a2fbb9ea
ET
8444 return -EINVAL;
8445 }
8446
c18487ee
YR
8447 bp->link_params.req_line_speed = cmd->speed;
8448 bp->link_params.req_duplex = cmd->duplex;
34f80b04 8449 bp->port.advertising = advertising;
a2fbb9ea
ET
8450 }
8451
c18487ee 8452 DP(NETIF_MSG_LINK, "req_line_speed %d\n"
a2fbb9ea 8453 DP_LEVEL " req_duplex %d advertising 0x%x\n",
c18487ee 8454 bp->link_params.req_line_speed, bp->link_params.req_duplex,
34f80b04 8455 bp->port.advertising);
a2fbb9ea 8456
34f80b04 8457 if (netif_running(dev)) {
bb2a0f7a 8458 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
8459 bnx2x_link_set(bp);
8460 }
a2fbb9ea
ET
8461
8462 return 0;
8463}
8464
c18487ee
YR
8465#define PHY_FW_VER_LEN 10
8466
a2fbb9ea
ET
8467static void bnx2x_get_drvinfo(struct net_device *dev,
8468 struct ethtool_drvinfo *info)
8469{
8470 struct bnx2x *bp = netdev_priv(dev);
f0e53a84 8471 u8 phy_fw_ver[PHY_FW_VER_LEN];
a2fbb9ea
ET
8472
8473 strcpy(info->driver, DRV_MODULE_NAME);
8474 strcpy(info->version, DRV_MODULE_VERSION);
c18487ee
YR
8475
8476 phy_fw_ver[0] = '\0';
34f80b04 8477 if (bp->port.pmf) {
4a37fb66 8478 bnx2x_acquire_phy_lock(bp);
34f80b04
EG
8479 bnx2x_get_ext_phy_fw_version(&bp->link_params,
8480 (bp->state != BNX2X_STATE_CLOSED),
8481 phy_fw_ver, PHY_FW_VER_LEN);
4a37fb66 8482 bnx2x_release_phy_lock(bp);
34f80b04 8483 }
c18487ee 8484
f0e53a84
EG
8485 snprintf(info->fw_version, 32, "BC:%d.%d.%d%s%s",
8486 (bp->common.bc_ver & 0xff0000) >> 16,
8487 (bp->common.bc_ver & 0xff00) >> 8,
8488 (bp->common.bc_ver & 0xff),
8489 ((phy_fw_ver[0] != '\0') ? " PHY:" : ""), phy_fw_ver);
a2fbb9ea
ET
8490 strcpy(info->bus_info, pci_name(bp->pdev));
8491 info->n_stats = BNX2X_NUM_STATS;
8492 info->testinfo_len = BNX2X_NUM_TESTS;
34f80b04 8493 info->eedump_len = bp->common.flash_size;
a2fbb9ea
ET
8494 info->regdump_len = 0;
8495}
8496
0a64ea57
EG
8497#define IS_E1_ONLINE(info) (((info) & RI_E1_ONLINE) == RI_E1_ONLINE)
8498#define IS_E1H_ONLINE(info) (((info) & RI_E1H_ONLINE) == RI_E1H_ONLINE)
8499
8500static int bnx2x_get_regs_len(struct net_device *dev)
8501{
8502 static u32 regdump_len;
8503 struct bnx2x *bp = netdev_priv(dev);
8504 int i;
8505
8506 if (regdump_len)
8507 return regdump_len;
8508
8509 if (CHIP_IS_E1(bp)) {
8510 for (i = 0; i < REGS_COUNT; i++)
8511 if (IS_E1_ONLINE(reg_addrs[i].info))
8512 regdump_len += reg_addrs[i].size;
8513
8514 for (i = 0; i < WREGS_COUNT_E1; i++)
8515 if (IS_E1_ONLINE(wreg_addrs_e1[i].info))
8516 regdump_len += wreg_addrs_e1[i].size *
8517 (1 + wreg_addrs_e1[i].read_regs_count);
8518
8519 } else { /* E1H */
8520 for (i = 0; i < REGS_COUNT; i++)
8521 if (IS_E1H_ONLINE(reg_addrs[i].info))
8522 regdump_len += reg_addrs[i].size;
8523
8524 for (i = 0; i < WREGS_COUNT_E1H; i++)
8525 if (IS_E1H_ONLINE(wreg_addrs_e1h[i].info))
8526 regdump_len += wreg_addrs_e1h[i].size *
8527 (1 + wreg_addrs_e1h[i].read_regs_count);
8528 }
8529 regdump_len *= 4;
8530 regdump_len += sizeof(struct dump_hdr);
8531
8532 return regdump_len;
8533}
8534
8535static void bnx2x_get_regs(struct net_device *dev,
8536 struct ethtool_regs *regs, void *_p)
8537{
8538 u32 *p = _p, i, j;
8539 struct bnx2x *bp = netdev_priv(dev);
8540 struct dump_hdr dump_hdr = {0};
8541
8542 regs->version = 0;
8543 memset(p, 0, regs->len);
8544
8545 if (!netif_running(bp->dev))
8546 return;
8547
8548 dump_hdr.hdr_size = (sizeof(struct dump_hdr) / 4) - 1;
8549 dump_hdr.dump_sign = dump_sign_all;
8550 dump_hdr.xstorm_waitp = REG_RD(bp, XSTORM_WAITP_ADDR);
8551 dump_hdr.tstorm_waitp = REG_RD(bp, TSTORM_WAITP_ADDR);
8552 dump_hdr.ustorm_waitp = REG_RD(bp, USTORM_WAITP_ADDR);
8553 dump_hdr.cstorm_waitp = REG_RD(bp, CSTORM_WAITP_ADDR);
8554 dump_hdr.info = CHIP_IS_E1(bp) ? RI_E1_ONLINE : RI_E1H_ONLINE;
8555
8556 memcpy(p, &dump_hdr, sizeof(struct dump_hdr));
8557 p += dump_hdr.hdr_size + 1;
8558
8559 if (CHIP_IS_E1(bp)) {
8560 for (i = 0; i < REGS_COUNT; i++)
8561 if (IS_E1_ONLINE(reg_addrs[i].info))
8562 for (j = 0; j < reg_addrs[i].size; j++)
8563 *p++ = REG_RD(bp,
8564 reg_addrs[i].addr + j*4);
8565
8566 } else { /* E1H */
8567 for (i = 0; i < REGS_COUNT; i++)
8568 if (IS_E1H_ONLINE(reg_addrs[i].info))
8569 for (j = 0; j < reg_addrs[i].size; j++)
8570 *p++ = REG_RD(bp,
8571 reg_addrs[i].addr + j*4);
8572 }
8573}
8574
a2fbb9ea
ET
8575static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8576{
8577 struct bnx2x *bp = netdev_priv(dev);
8578
8579 if (bp->flags & NO_WOL_FLAG) {
8580 wol->supported = 0;
8581 wol->wolopts = 0;
8582 } else {
8583 wol->supported = WAKE_MAGIC;
8584 if (bp->wol)
8585 wol->wolopts = WAKE_MAGIC;
8586 else
8587 wol->wolopts = 0;
8588 }
8589 memset(&wol->sopass, 0, sizeof(wol->sopass));
8590}
8591
8592static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8593{
8594 struct bnx2x *bp = netdev_priv(dev);
8595
8596 if (wol->wolopts & ~WAKE_MAGIC)
8597 return -EINVAL;
8598
8599 if (wol->wolopts & WAKE_MAGIC) {
8600 if (bp->flags & NO_WOL_FLAG)
8601 return -EINVAL;
8602
8603 bp->wol = 1;
34f80b04 8604 } else
a2fbb9ea 8605 bp->wol = 0;
34f80b04 8606
a2fbb9ea
ET
8607 return 0;
8608}
8609
8610static u32 bnx2x_get_msglevel(struct net_device *dev)
8611{
8612 struct bnx2x *bp = netdev_priv(dev);
8613
8614 return bp->msglevel;
8615}
8616
8617static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
8618{
8619 struct bnx2x *bp = netdev_priv(dev);
8620
8621 if (capable(CAP_NET_ADMIN))
8622 bp->msglevel = level;
8623}
8624
8625static int bnx2x_nway_reset(struct net_device *dev)
8626{
8627 struct bnx2x *bp = netdev_priv(dev);
8628
34f80b04
EG
8629 if (!bp->port.pmf)
8630 return 0;
a2fbb9ea 8631
34f80b04 8632 if (netif_running(dev)) {
bb2a0f7a 8633 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
8634 bnx2x_link_set(bp);
8635 }
a2fbb9ea
ET
8636
8637 return 0;
8638}
8639
01e53298
NO
8640static u32
8641bnx2x_get_link(struct net_device *dev)
8642{
8643 struct bnx2x *bp = netdev_priv(dev);
8644
8645 return bp->link_vars.link_up;
8646}
8647
a2fbb9ea
ET
8648static int bnx2x_get_eeprom_len(struct net_device *dev)
8649{
8650 struct bnx2x *bp = netdev_priv(dev);
8651
34f80b04 8652 return bp->common.flash_size;
a2fbb9ea
ET
8653}
8654
8655static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
8656{
34f80b04 8657 int port = BP_PORT(bp);
a2fbb9ea
ET
8658 int count, i;
8659 u32 val = 0;
8660
8661 /* adjust timeout for emulation/FPGA */
8662 count = NVRAM_TIMEOUT_COUNT;
8663 if (CHIP_REV_IS_SLOW(bp))
8664 count *= 100;
8665
8666 /* request access to nvram interface */
8667 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
8668 (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
8669
8670 for (i = 0; i < count*10; i++) {
8671 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
8672 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
8673 break;
8674
8675 udelay(5);
8676 }
8677
8678 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
34f80b04 8679 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
a2fbb9ea
ET
8680 return -EBUSY;
8681 }
8682
8683 return 0;
8684}
8685
8686static int bnx2x_release_nvram_lock(struct bnx2x *bp)
8687{
34f80b04 8688 int port = BP_PORT(bp);
a2fbb9ea
ET
8689 int count, i;
8690 u32 val = 0;
8691
8692 /* adjust timeout for emulation/FPGA */
8693 count = NVRAM_TIMEOUT_COUNT;
8694 if (CHIP_REV_IS_SLOW(bp))
8695 count *= 100;
8696
8697 /* relinquish nvram interface */
8698 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
8699 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
8700
8701 for (i = 0; i < count*10; i++) {
8702 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
8703 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
8704 break;
8705
8706 udelay(5);
8707 }
8708
8709 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
34f80b04 8710 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
a2fbb9ea
ET
8711 return -EBUSY;
8712 }
8713
8714 return 0;
8715}
8716
8717static void bnx2x_enable_nvram_access(struct bnx2x *bp)
8718{
8719 u32 val;
8720
8721 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
8722
8723 /* enable both bits, even on read */
8724 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
8725 (val | MCPR_NVM_ACCESS_ENABLE_EN |
8726 MCPR_NVM_ACCESS_ENABLE_WR_EN));
8727}
8728
8729static void bnx2x_disable_nvram_access(struct bnx2x *bp)
8730{
8731 u32 val;
8732
8733 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
8734
8735 /* disable both bits, even after read */
8736 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
8737 (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
8738 MCPR_NVM_ACCESS_ENABLE_WR_EN)));
8739}
8740
4781bfad 8741static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, __be32 *ret_val,
a2fbb9ea
ET
8742 u32 cmd_flags)
8743{
f1410647 8744 int count, i, rc;
a2fbb9ea
ET
8745 u32 val;
8746
8747 /* build the command word */
8748 cmd_flags |= MCPR_NVM_COMMAND_DOIT;
8749
8750 /* need to clear DONE bit separately */
8751 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8752
8753 /* address of the NVRAM to read from */
8754 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8755 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8756
8757 /* issue a read command */
8758 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8759
8760 /* adjust timeout for emulation/FPGA */
8761 count = NVRAM_TIMEOUT_COUNT;
8762 if (CHIP_REV_IS_SLOW(bp))
8763 count *= 100;
8764
8765 /* wait for completion */
8766 *ret_val = 0;
8767 rc = -EBUSY;
8768 for (i = 0; i < count; i++) {
8769 udelay(5);
8770 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8771
8772 if (val & MCPR_NVM_COMMAND_DONE) {
8773 val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
a2fbb9ea
ET
8774 /* we read nvram data in cpu order
8775 * but ethtool sees it as an array of bytes
8776 * converting to big-endian will do the work */
4781bfad 8777 *ret_val = cpu_to_be32(val);
a2fbb9ea
ET
8778 rc = 0;
8779 break;
8780 }
8781 }
8782
8783 return rc;
8784}
8785
8786static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
8787 int buf_size)
8788{
8789 int rc;
8790 u32 cmd_flags;
4781bfad 8791 __be32 val;
a2fbb9ea
ET
8792
8793 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
34f80b04 8794 DP(BNX2X_MSG_NVM,
c14423fe 8795 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
a2fbb9ea
ET
8796 offset, buf_size);
8797 return -EINVAL;
8798 }
8799
34f80b04
EG
8800 if (offset + buf_size > bp->common.flash_size) {
8801 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 8802 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 8803 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
8804 return -EINVAL;
8805 }
8806
8807 /* request access to nvram interface */
8808 rc = bnx2x_acquire_nvram_lock(bp);
8809 if (rc)
8810 return rc;
8811
8812 /* enable access to nvram interface */
8813 bnx2x_enable_nvram_access(bp);
8814
8815 /* read the first word(s) */
8816 cmd_flags = MCPR_NVM_COMMAND_FIRST;
8817 while ((buf_size > sizeof(u32)) && (rc == 0)) {
8818 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8819 memcpy(ret_buf, &val, 4);
8820
8821 /* advance to the next dword */
8822 offset += sizeof(u32);
8823 ret_buf += sizeof(u32);
8824 buf_size -= sizeof(u32);
8825 cmd_flags = 0;
8826 }
8827
8828 if (rc == 0) {
8829 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8830 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8831 memcpy(ret_buf, &val, 4);
8832 }
8833
8834 /* disable access to nvram interface */
8835 bnx2x_disable_nvram_access(bp);
8836 bnx2x_release_nvram_lock(bp);
8837
8838 return rc;
8839}
8840
8841static int bnx2x_get_eeprom(struct net_device *dev,
8842 struct ethtool_eeprom *eeprom, u8 *eebuf)
8843{
8844 struct bnx2x *bp = netdev_priv(dev);
8845 int rc;
8846
2add3acb
EG
8847 if (!netif_running(dev))
8848 return -EAGAIN;
8849
34f80b04 8850 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
a2fbb9ea
ET
8851 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
8852 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8853 eeprom->len, eeprom->len);
8854
8855 /* parameters already validated in ethtool_get_eeprom */
8856
8857 rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
8858
8859 return rc;
8860}
8861
8862static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
8863 u32 cmd_flags)
8864{
f1410647 8865 int count, i, rc;
a2fbb9ea
ET
8866
8867 /* build the command word */
8868 cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
8869
8870 /* need to clear DONE bit separately */
8871 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8872
8873 /* write the data */
8874 REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
8875
8876 /* address of the NVRAM to write to */
8877 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8878 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8879
8880 /* issue the write command */
8881 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8882
8883 /* adjust timeout for emulation/FPGA */
8884 count = NVRAM_TIMEOUT_COUNT;
8885 if (CHIP_REV_IS_SLOW(bp))
8886 count *= 100;
8887
8888 /* wait for completion */
8889 rc = -EBUSY;
8890 for (i = 0; i < count; i++) {
8891 udelay(5);
8892 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8893 if (val & MCPR_NVM_COMMAND_DONE) {
8894 rc = 0;
8895 break;
8896 }
8897 }
8898
8899 return rc;
8900}
8901
f1410647 8902#define BYTE_OFFSET(offset) (8 * (offset & 0x03))
a2fbb9ea
ET
8903
8904static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
8905 int buf_size)
8906{
8907 int rc;
8908 u32 cmd_flags;
8909 u32 align_offset;
4781bfad 8910 __be32 val;
a2fbb9ea 8911
34f80b04
EG
8912 if (offset + buf_size > bp->common.flash_size) {
8913 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 8914 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 8915 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
8916 return -EINVAL;
8917 }
8918
8919 /* request access to nvram interface */
8920 rc = bnx2x_acquire_nvram_lock(bp);
8921 if (rc)
8922 return rc;
8923
8924 /* enable access to nvram interface */
8925 bnx2x_enable_nvram_access(bp);
8926
8927 cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
8928 align_offset = (offset & ~0x03);
8929 rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
8930
8931 if (rc == 0) {
8932 val &= ~(0xff << BYTE_OFFSET(offset));
8933 val |= (*data_buf << BYTE_OFFSET(offset));
8934
8935 /* nvram data is returned as an array of bytes
8936 * convert it back to cpu order */
8937 val = be32_to_cpu(val);
8938
a2fbb9ea
ET
8939 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
8940 cmd_flags);
8941 }
8942
8943 /* disable access to nvram interface */
8944 bnx2x_disable_nvram_access(bp);
8945 bnx2x_release_nvram_lock(bp);
8946
8947 return rc;
8948}
8949
8950static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
8951 int buf_size)
8952{
8953 int rc;
8954 u32 cmd_flags;
8955 u32 val;
8956 u32 written_so_far;
8957
34f80b04 8958 if (buf_size == 1) /* ethtool */
a2fbb9ea 8959 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
a2fbb9ea
ET
8960
8961 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
34f80b04 8962 DP(BNX2X_MSG_NVM,
c14423fe 8963 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
a2fbb9ea
ET
8964 offset, buf_size);
8965 return -EINVAL;
8966 }
8967
34f80b04
EG
8968 if (offset + buf_size > bp->common.flash_size) {
8969 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 8970 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 8971 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
8972 return -EINVAL;
8973 }
8974
8975 /* request access to nvram interface */
8976 rc = bnx2x_acquire_nvram_lock(bp);
8977 if (rc)
8978 return rc;
8979
8980 /* enable access to nvram interface */
8981 bnx2x_enable_nvram_access(bp);
8982
8983 written_so_far = 0;
8984 cmd_flags = MCPR_NVM_COMMAND_FIRST;
8985 while ((written_so_far < buf_size) && (rc == 0)) {
8986 if (written_so_far == (buf_size - sizeof(u32)))
8987 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8988 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
8989 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8990 else if ((offset % NVRAM_PAGE_SIZE) == 0)
8991 cmd_flags |= MCPR_NVM_COMMAND_FIRST;
8992
8993 memcpy(&val, data_buf, 4);
a2fbb9ea
ET
8994
8995 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
8996
8997 /* advance to the next dword */
8998 offset += sizeof(u32);
8999 data_buf += sizeof(u32);
9000 written_so_far += sizeof(u32);
9001 cmd_flags = 0;
9002 }
9003
9004 /* disable access to nvram interface */
9005 bnx2x_disable_nvram_access(bp);
9006 bnx2x_release_nvram_lock(bp);
9007
9008 return rc;
9009}
9010
9011static int bnx2x_set_eeprom(struct net_device *dev,
9012 struct ethtool_eeprom *eeprom, u8 *eebuf)
9013{
9014 struct bnx2x *bp = netdev_priv(dev);
9015 int rc;
9016
9f4c9583
EG
9017 if (!netif_running(dev))
9018 return -EAGAIN;
9019
34f80b04 9020 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
a2fbb9ea
ET
9021 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
9022 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
9023 eeprom->len, eeprom->len);
9024
9025 /* parameters already validated in ethtool_set_eeprom */
9026
c18487ee 9027 /* If the magic number is PHY (0x00504859) upgrade the PHY FW */
34f80b04
EG
9028 if (eeprom->magic == 0x00504859)
9029 if (bp->port.pmf) {
9030
4a37fb66 9031 bnx2x_acquire_phy_lock(bp);
34f80b04
EG
9032 rc = bnx2x_flash_download(bp, BP_PORT(bp),
9033 bp->link_params.ext_phy_config,
9034 (bp->state != BNX2X_STATE_CLOSED),
9035 eebuf, eeprom->len);
bb2a0f7a
YG
9036 if ((bp->state == BNX2X_STATE_OPEN) ||
9037 (bp->state == BNX2X_STATE_DISABLED)) {
34f80b04 9038 rc |= bnx2x_link_reset(&bp->link_params,
589abe3a 9039 &bp->link_vars, 1);
34f80b04
EG
9040 rc |= bnx2x_phy_init(&bp->link_params,
9041 &bp->link_vars);
bb2a0f7a 9042 }
4a37fb66 9043 bnx2x_release_phy_lock(bp);
34f80b04
EG
9044
9045 } else /* Only the PMF can access the PHY */
9046 return -EINVAL;
9047 else
c18487ee 9048 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
a2fbb9ea
ET
9049
9050 return rc;
9051}
9052
9053static int bnx2x_get_coalesce(struct net_device *dev,
9054 struct ethtool_coalesce *coal)
9055{
9056 struct bnx2x *bp = netdev_priv(dev);
9057
9058 memset(coal, 0, sizeof(struct ethtool_coalesce));
9059
9060 coal->rx_coalesce_usecs = bp->rx_ticks;
9061 coal->tx_coalesce_usecs = bp->tx_ticks;
a2fbb9ea
ET
9062
9063 return 0;
9064}
9065
9066static int bnx2x_set_coalesce(struct net_device *dev,
9067 struct ethtool_coalesce *coal)
9068{
9069 struct bnx2x *bp = netdev_priv(dev);
9070
9071 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
1e9d9987
EG
9072 if (bp->rx_ticks > BNX2X_MAX_COALESCE_TOUT)
9073 bp->rx_ticks = BNX2X_MAX_COALESCE_TOUT;
a2fbb9ea
ET
9074
9075 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
1e9d9987
EG
9076 if (bp->tx_ticks > BNX2X_MAX_COALESCE_TOUT)
9077 bp->tx_ticks = BNX2X_MAX_COALESCE_TOUT;
a2fbb9ea 9078
34f80b04 9079 if (netif_running(dev))
a2fbb9ea
ET
9080 bnx2x_update_coalesce(bp);
9081
9082 return 0;
9083}
9084
9085static void bnx2x_get_ringparam(struct net_device *dev,
9086 struct ethtool_ringparam *ering)
9087{
9088 struct bnx2x *bp = netdev_priv(dev);
9089
9090 ering->rx_max_pending = MAX_RX_AVAIL;
9091 ering->rx_mini_max_pending = 0;
9092 ering->rx_jumbo_max_pending = 0;
9093
9094 ering->rx_pending = bp->rx_ring_size;
9095 ering->rx_mini_pending = 0;
9096 ering->rx_jumbo_pending = 0;
9097
9098 ering->tx_max_pending = MAX_TX_AVAIL;
9099 ering->tx_pending = bp->tx_ring_size;
9100}
9101
9102static int bnx2x_set_ringparam(struct net_device *dev,
9103 struct ethtool_ringparam *ering)
9104{
9105 struct bnx2x *bp = netdev_priv(dev);
34f80b04 9106 int rc = 0;
a2fbb9ea
ET
9107
9108 if ((ering->rx_pending > MAX_RX_AVAIL) ||
9109 (ering->tx_pending > MAX_TX_AVAIL) ||
9110 (ering->tx_pending <= MAX_SKB_FRAGS + 4))
9111 return -EINVAL;
9112
9113 bp->rx_ring_size = ering->rx_pending;
9114 bp->tx_ring_size = ering->tx_pending;
9115
34f80b04
EG
9116 if (netif_running(dev)) {
9117 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9118 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
a2fbb9ea
ET
9119 }
9120
34f80b04 9121 return rc;
a2fbb9ea
ET
9122}
9123
9124static void bnx2x_get_pauseparam(struct net_device *dev,
9125 struct ethtool_pauseparam *epause)
9126{
9127 struct bnx2x *bp = netdev_priv(dev);
9128
356e2385
EG
9129 epause->autoneg = (bp->link_params.req_flow_ctrl ==
9130 BNX2X_FLOW_CTRL_AUTO) &&
c18487ee
YR
9131 (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
9132
c0700f90
DM
9133 epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) ==
9134 BNX2X_FLOW_CTRL_RX);
9135 epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) ==
9136 BNX2X_FLOW_CTRL_TX);
a2fbb9ea
ET
9137
9138 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9139 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
9140 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9141}
9142
9143static int bnx2x_set_pauseparam(struct net_device *dev,
9144 struct ethtool_pauseparam *epause)
9145{
9146 struct bnx2x *bp = netdev_priv(dev);
9147
34f80b04
EG
9148 if (IS_E1HMF(bp))
9149 return 0;
9150
a2fbb9ea
ET
9151 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9152 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
9153 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9154
c0700f90 9155 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
a2fbb9ea 9156
f1410647 9157 if (epause->rx_pause)
c0700f90 9158 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_RX;
c18487ee 9159
f1410647 9160 if (epause->tx_pause)
c0700f90 9161 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_TX;
c18487ee 9162
c0700f90
DM
9163 if (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO)
9164 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
a2fbb9ea 9165
c18487ee 9166 if (epause->autoneg) {
34f80b04 9167 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
3196a88a 9168 DP(NETIF_MSG_LINK, "autoneg not supported\n");
c18487ee
YR
9169 return -EINVAL;
9170 }
a2fbb9ea 9171
c18487ee 9172 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
c0700f90 9173 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
c18487ee 9174 }
a2fbb9ea 9175
c18487ee
YR
9176 DP(NETIF_MSG_LINK,
9177 "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
34f80b04
EG
9178
9179 if (netif_running(dev)) {
bb2a0f7a 9180 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
9181 bnx2x_link_set(bp);
9182 }
a2fbb9ea
ET
9183
9184 return 0;
9185}
9186
df0f2343
VZ
9187static int bnx2x_set_flags(struct net_device *dev, u32 data)
9188{
9189 struct bnx2x *bp = netdev_priv(dev);
9190 int changed = 0;
9191 int rc = 0;
9192
9193 /* TPA requires Rx CSUM offloading */
9194 if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
9195 if (!(dev->features & NETIF_F_LRO)) {
9196 dev->features |= NETIF_F_LRO;
9197 bp->flags |= TPA_ENABLE_FLAG;
9198 changed = 1;
9199 }
9200
9201 } else if (dev->features & NETIF_F_LRO) {
9202 dev->features &= ~NETIF_F_LRO;
9203 bp->flags &= ~TPA_ENABLE_FLAG;
9204 changed = 1;
9205 }
9206
9207 if (changed && netif_running(dev)) {
9208 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9209 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
9210 }
9211
9212 return rc;
9213}
9214
a2fbb9ea
ET
9215static u32 bnx2x_get_rx_csum(struct net_device *dev)
9216{
9217 struct bnx2x *bp = netdev_priv(dev);
9218
9219 return bp->rx_csum;
9220}
9221
9222static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
9223{
9224 struct bnx2x *bp = netdev_priv(dev);
df0f2343 9225 int rc = 0;
a2fbb9ea
ET
9226
9227 bp->rx_csum = data;
df0f2343
VZ
9228
9229 /* Disable TPA, when Rx CSUM is disabled. Otherwise all
9230 TPA'ed packets will be discarded due to wrong TCP CSUM */
9231 if (!data) {
9232 u32 flags = ethtool_op_get_flags(dev);
9233
9234 rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
9235 }
9236
9237 return rc;
a2fbb9ea
ET
9238}
9239
9240static int bnx2x_set_tso(struct net_device *dev, u32 data)
9241{
755735eb 9242 if (data) {
a2fbb9ea 9243 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb
EG
9244 dev->features |= NETIF_F_TSO6;
9245 } else {
a2fbb9ea 9246 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb
EG
9247 dev->features &= ~NETIF_F_TSO6;
9248 }
9249
a2fbb9ea
ET
9250 return 0;
9251}
9252
f3c87cdd 9253static const struct {
a2fbb9ea
ET
9254 char string[ETH_GSTRING_LEN];
9255} bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
f3c87cdd
YG
9256 { "register_test (offline)" },
9257 { "memory_test (offline)" },
9258 { "loopback_test (offline)" },
9259 { "nvram_test (online)" },
9260 { "interrupt_test (online)" },
9261 { "link_test (online)" },
d3d4f495 9262 { "idle check (online)" }
a2fbb9ea
ET
9263};
9264
9265static int bnx2x_self_test_count(struct net_device *dev)
9266{
9267 return BNX2X_NUM_TESTS;
9268}
9269
f3c87cdd
YG
9270static int bnx2x_test_registers(struct bnx2x *bp)
9271{
9272 int idx, i, rc = -ENODEV;
9273 u32 wr_val = 0;
9dabc424 9274 int port = BP_PORT(bp);
f3c87cdd
YG
9275 static const struct {
9276 u32 offset0;
9277 u32 offset1;
9278 u32 mask;
9279 } reg_tbl[] = {
9280/* 0 */ { BRB1_REG_PAUSE_LOW_THRESHOLD_0, 4, 0x000003ff },
9281 { DORQ_REG_DB_ADDR0, 4, 0xffffffff },
9282 { HC_REG_AGG_INT_0, 4, 0x000003ff },
9283 { PBF_REG_MAC_IF0_ENABLE, 4, 0x00000001 },
9284 { PBF_REG_P0_INIT_CRD, 4, 0x000007ff },
9285 { PRS_REG_CID_PORT_0, 4, 0x00ffffff },
9286 { PXP2_REG_PSWRQ_CDU0_L2P, 4, 0x000fffff },
9287 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
9288 { PXP2_REG_PSWRQ_TM0_L2P, 4, 0x000fffff },
9289 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
9290/* 10 */ { PXP2_REG_PSWRQ_TSDM0_L2P, 4, 0x000fffff },
9291 { QM_REG_CONNNUM_0, 4, 0x000fffff },
9292 { TM_REG_LIN0_MAX_ACTIVE_CID, 4, 0x0003ffff },
9293 { SRC_REG_KEYRSS0_0, 40, 0xffffffff },
9294 { SRC_REG_KEYRSS0_7, 40, 0xffffffff },
9295 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
9296 { XCM_REG_WU_DA_CNT_CMD00, 4, 0x00000003 },
9297 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 4, 0x000000ff },
9298 { NIG_REG_EGRESS_MNG0_FIFO, 20, 0xffffffff },
9299 { NIG_REG_LLH0_T_BIT, 4, 0x00000001 },
9300/* 20 */ { NIG_REG_EMAC0_IN_EN, 4, 0x00000001 },
9301 { NIG_REG_BMAC0_IN_EN, 4, 0x00000001 },
9302 { NIG_REG_XCM0_OUT_EN, 4, 0x00000001 },
9303 { NIG_REG_BRB0_OUT_EN, 4, 0x00000001 },
9304 { NIG_REG_LLH0_XCM_MASK, 4, 0x00000007 },
9305 { NIG_REG_LLH0_ACPI_PAT_6_LEN, 68, 0x000000ff },
9306 { NIG_REG_LLH0_ACPI_PAT_0_CRC, 68, 0xffffffff },
9307 { NIG_REG_LLH0_DEST_MAC_0_0, 160, 0xffffffff },
9308 { NIG_REG_LLH0_DEST_IP_0_1, 160, 0xffffffff },
9309 { NIG_REG_LLH0_IPV4_IPV6_0, 160, 0x00000001 },
9310/* 30 */ { NIG_REG_LLH0_DEST_UDP_0, 160, 0x0000ffff },
9311 { NIG_REG_LLH0_DEST_TCP_0, 160, 0x0000ffff },
9312 { NIG_REG_LLH0_VLAN_ID_0, 160, 0x00000fff },
9313 { NIG_REG_XGXS_SERDES0_MODE_SEL, 4, 0x00000001 },
9314 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
9315 { NIG_REG_STATUS_INTERRUPT_PORT0, 4, 0x07ffffff },
9316 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
9317 { NIG_REG_SERDES0_CTRL_PHY_ADDR, 16, 0x0000001f },
9318
9319 { 0xffffffff, 0, 0x00000000 }
9320 };
9321
9322 if (!netif_running(bp->dev))
9323 return rc;
9324
9325 /* Repeat the test twice:
9326 First by writing 0x00000000, second by writing 0xffffffff */
9327 for (idx = 0; idx < 2; idx++) {
9328
9329 switch (idx) {
9330 case 0:
9331 wr_val = 0;
9332 break;
9333 case 1:
9334 wr_val = 0xffffffff;
9335 break;
9336 }
9337
9338 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
9339 u32 offset, mask, save_val, val;
f3c87cdd
YG
9340
9341 offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
9342 mask = reg_tbl[i].mask;
9343
9344 save_val = REG_RD(bp, offset);
9345
9346 REG_WR(bp, offset, wr_val);
9347 val = REG_RD(bp, offset);
9348
9349 /* Restore the original register's value */
9350 REG_WR(bp, offset, save_val);
9351
9352 /* verify that value is as expected value */
9353 if ((val & mask) != (wr_val & mask))
9354 goto test_reg_exit;
9355 }
9356 }
9357
9358 rc = 0;
9359
9360test_reg_exit:
9361 return rc;
9362}
9363
9364static int bnx2x_test_memory(struct bnx2x *bp)
9365{
9366 int i, j, rc = -ENODEV;
9367 u32 val;
9368 static const struct {
9369 u32 offset;
9370 int size;
9371 } mem_tbl[] = {
9372 { CCM_REG_XX_DESCR_TABLE, CCM_REG_XX_DESCR_TABLE_SIZE },
9373 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
9374 { CFC_REG_LINK_LIST, CFC_REG_LINK_LIST_SIZE },
9375 { DMAE_REG_CMD_MEM, DMAE_REG_CMD_MEM_SIZE },
9376 { TCM_REG_XX_DESCR_TABLE, TCM_REG_XX_DESCR_TABLE_SIZE },
9377 { UCM_REG_XX_DESCR_TABLE, UCM_REG_XX_DESCR_TABLE_SIZE },
9378 { XCM_REG_XX_DESCR_TABLE, XCM_REG_XX_DESCR_TABLE_SIZE },
9379
9380 { 0xffffffff, 0 }
9381 };
9382 static const struct {
9383 char *name;
9384 u32 offset;
9dabc424
YG
9385 u32 e1_mask;
9386 u32 e1h_mask;
f3c87cdd 9387 } prty_tbl[] = {
9dabc424
YG
9388 { "CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, 0x3ffc0, 0 },
9389 { "CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, 0x2, 0x2 },
9390 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0, 0 },
9391 { "TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, 0x3ffc0, 0 },
9392 { "UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, 0x3ffc0, 0 },
9393 { "XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, 0x3ffc1, 0 },
9394
9395 { NULL, 0xffffffff, 0, 0 }
f3c87cdd
YG
9396 };
9397
9398 if (!netif_running(bp->dev))
9399 return rc;
9400
9401 /* Go through all the memories */
9402 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
9403 for (j = 0; j < mem_tbl[i].size; j++)
9404 REG_RD(bp, mem_tbl[i].offset + j*4);
9405
9406 /* Check the parity status */
9407 for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
9408 val = REG_RD(bp, prty_tbl[i].offset);
9dabc424
YG
9409 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
9410 (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
f3c87cdd
YG
9411 DP(NETIF_MSG_HW,
9412 "%s is 0x%x\n", prty_tbl[i].name, val);
9413 goto test_mem_exit;
9414 }
9415 }
9416
9417 rc = 0;
9418
9419test_mem_exit:
9420 return rc;
9421}
9422
f3c87cdd
YG
9423static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
9424{
9425 int cnt = 1000;
9426
9427 if (link_up)
9428 while (bnx2x_link_test(bp) && cnt--)
9429 msleep(10);
9430}
9431
9432static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
9433{
9434 unsigned int pkt_size, num_pkts, i;
9435 struct sk_buff *skb;
9436 unsigned char *packet;
9437 struct bnx2x_fastpath *fp = &bp->fp[0];
9438 u16 tx_start_idx, tx_idx;
9439 u16 rx_start_idx, rx_idx;
9440 u16 pkt_prod;
9441 struct sw_tx_bd *tx_buf;
9442 struct eth_tx_bd *tx_bd;
9443 dma_addr_t mapping;
9444 union eth_rx_cqe *cqe;
9445 u8 cqe_fp_flags;
9446 struct sw_rx_bd *rx_buf;
9447 u16 len;
9448 int rc = -ENODEV;
9449
b5bf9068
EG
9450 /* check the loopback mode */
9451 switch (loopback_mode) {
9452 case BNX2X_PHY_LOOPBACK:
9453 if (bp->link_params.loopback_mode != LOOPBACK_XGXS_10)
9454 return -EINVAL;
9455 break;
9456 case BNX2X_MAC_LOOPBACK:
f3c87cdd 9457 bp->link_params.loopback_mode = LOOPBACK_BMAC;
f3c87cdd 9458 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
b5bf9068
EG
9459 break;
9460 default:
f3c87cdd 9461 return -EINVAL;
b5bf9068 9462 }
f3c87cdd 9463
b5bf9068
EG
9464 /* prepare the loopback packet */
9465 pkt_size = (((bp->dev->mtu < ETH_MAX_PACKET_SIZE) ?
9466 bp->dev->mtu : ETH_MAX_PACKET_SIZE) + ETH_HLEN);
f3c87cdd
YG
9467 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
9468 if (!skb) {
9469 rc = -ENOMEM;
9470 goto test_loopback_exit;
9471 }
9472 packet = skb_put(skb, pkt_size);
9473 memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
9474 memset(packet + ETH_ALEN, 0, (ETH_HLEN - ETH_ALEN));
9475 for (i = ETH_HLEN; i < pkt_size; i++)
9476 packet[i] = (unsigned char) (i & 0xff);
9477
b5bf9068 9478 /* send the loopback packet */
f3c87cdd
YG
9479 num_pkts = 0;
9480 tx_start_idx = le16_to_cpu(*fp->tx_cons_sb);
9481 rx_start_idx = le16_to_cpu(*fp->rx_cons_sb);
9482
9483 pkt_prod = fp->tx_pkt_prod++;
9484 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
9485 tx_buf->first_bd = fp->tx_bd_prod;
9486 tx_buf->skb = skb;
9487
9488 tx_bd = &fp->tx_desc_ring[TX_BD(fp->tx_bd_prod)];
9489 mapping = pci_map_single(bp->pdev, skb->data,
9490 skb_headlen(skb), PCI_DMA_TODEVICE);
9491 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9492 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9493 tx_bd->nbd = cpu_to_le16(1);
9494 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
9495 tx_bd->vlan = cpu_to_le16(pkt_prod);
9496 tx_bd->bd_flags.as_bitfield = (ETH_TX_BD_FLAGS_START_BD |
9497 ETH_TX_BD_FLAGS_END_BD);
9498 tx_bd->general_data = ((UNICAST_ADDRESS <<
9499 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT) | 1);
9500
58f4c4cf
EG
9501 wmb();
9502
4781bfad 9503 le16_add_cpu(&fp->hw_tx_prods->bds_prod, 1);
f3c87cdd 9504 mb(); /* FW restriction: must not reorder writing nbd and packets */
4781bfad 9505 le32_add_cpu(&fp->hw_tx_prods->packets_prod, 1);
0626b899 9506 DOORBELL(bp, fp->index, 0);
f3c87cdd
YG
9507
9508 mmiowb();
9509
9510 num_pkts++;
9511 fp->tx_bd_prod++;
9512 bp->dev->trans_start = jiffies;
9513
9514 udelay(100);
9515
9516 tx_idx = le16_to_cpu(*fp->tx_cons_sb);
9517 if (tx_idx != tx_start_idx + num_pkts)
9518 goto test_loopback_exit;
9519
9520 rx_idx = le16_to_cpu(*fp->rx_cons_sb);
9521 if (rx_idx != rx_start_idx + num_pkts)
9522 goto test_loopback_exit;
9523
9524 cqe = &fp->rx_comp_ring[RCQ_BD(fp->rx_comp_cons)];
9525 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
9526 if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
9527 goto test_loopback_rx_exit;
9528
9529 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
9530 if (len != pkt_size)
9531 goto test_loopback_rx_exit;
9532
9533 rx_buf = &fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)];
9534 skb = rx_buf->skb;
9535 skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
9536 for (i = ETH_HLEN; i < pkt_size; i++)
9537 if (*(skb->data + i) != (unsigned char) (i & 0xff))
9538 goto test_loopback_rx_exit;
9539
9540 rc = 0;
9541
9542test_loopback_rx_exit:
f3c87cdd
YG
9543
9544 fp->rx_bd_cons = NEXT_RX_IDX(fp->rx_bd_cons);
9545 fp->rx_bd_prod = NEXT_RX_IDX(fp->rx_bd_prod);
9546 fp->rx_comp_cons = NEXT_RCQ_IDX(fp->rx_comp_cons);
9547 fp->rx_comp_prod = NEXT_RCQ_IDX(fp->rx_comp_prod);
9548
9549 /* Update producers */
9550 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
9551 fp->rx_sge_prod);
f3c87cdd
YG
9552
9553test_loopback_exit:
9554 bp->link_params.loopback_mode = LOOPBACK_NONE;
9555
9556 return rc;
9557}
9558
9559static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
9560{
b5bf9068 9561 int rc = 0, res;
f3c87cdd
YG
9562
9563 if (!netif_running(bp->dev))
9564 return BNX2X_LOOPBACK_FAILED;
9565
f8ef6e44 9566 bnx2x_netif_stop(bp, 1);
3910c8ae 9567 bnx2x_acquire_phy_lock(bp);
f3c87cdd 9568
b5bf9068
EG
9569 res = bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up);
9570 if (res) {
9571 DP(NETIF_MSG_PROBE, " PHY loopback failed (res %d)\n", res);
9572 rc |= BNX2X_PHY_LOOPBACK_FAILED;
f3c87cdd
YG
9573 }
9574
b5bf9068
EG
9575 res = bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up);
9576 if (res) {
9577 DP(NETIF_MSG_PROBE, " MAC loopback failed (res %d)\n", res);
9578 rc |= BNX2X_MAC_LOOPBACK_FAILED;
f3c87cdd
YG
9579 }
9580
3910c8ae 9581 bnx2x_release_phy_lock(bp);
f3c87cdd
YG
9582 bnx2x_netif_start(bp);
9583
9584 return rc;
9585}
9586
9587#define CRC32_RESIDUAL 0xdebb20e3
9588
9589static int bnx2x_test_nvram(struct bnx2x *bp)
9590{
9591 static const struct {
9592 int offset;
9593 int size;
9594 } nvram_tbl[] = {
9595 { 0, 0x14 }, /* bootstrap */
9596 { 0x14, 0xec }, /* dir */
9597 { 0x100, 0x350 }, /* manuf_info */
9598 { 0x450, 0xf0 }, /* feature_info */
9599 { 0x640, 0x64 }, /* upgrade_key_info */
9600 { 0x6a4, 0x64 },
9601 { 0x708, 0x70 }, /* manuf_key_info */
9602 { 0x778, 0x70 },
9603 { 0, 0 }
9604 };
4781bfad 9605 __be32 buf[0x350 / 4];
f3c87cdd
YG
9606 u8 *data = (u8 *)buf;
9607 int i, rc;
9608 u32 magic, csum;
9609
9610 rc = bnx2x_nvram_read(bp, 0, data, 4);
9611 if (rc) {
f5372251 9612 DP(NETIF_MSG_PROBE, "magic value read (rc %d)\n", rc);
f3c87cdd
YG
9613 goto test_nvram_exit;
9614 }
9615
9616 magic = be32_to_cpu(buf[0]);
9617 if (magic != 0x669955aa) {
9618 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
9619 rc = -ENODEV;
9620 goto test_nvram_exit;
9621 }
9622
9623 for (i = 0; nvram_tbl[i].size; i++) {
9624
9625 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
9626 nvram_tbl[i].size);
9627 if (rc) {
9628 DP(NETIF_MSG_PROBE,
f5372251 9629 "nvram_tbl[%d] read data (rc %d)\n", i, rc);
f3c87cdd
YG
9630 goto test_nvram_exit;
9631 }
9632
9633 csum = ether_crc_le(nvram_tbl[i].size, data);
9634 if (csum != CRC32_RESIDUAL) {
9635 DP(NETIF_MSG_PROBE,
9636 "nvram_tbl[%d] csum value (0x%08x)\n", i, csum);
9637 rc = -ENODEV;
9638 goto test_nvram_exit;
9639 }
9640 }
9641
9642test_nvram_exit:
9643 return rc;
9644}
9645
9646static int bnx2x_test_intr(struct bnx2x *bp)
9647{
9648 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
9649 int i, rc;
9650
9651 if (!netif_running(bp->dev))
9652 return -ENODEV;
9653
8d9c5f34 9654 config->hdr.length = 0;
af246401
EG
9655 if (CHIP_IS_E1(bp))
9656 config->hdr.offset = (BP_PORT(bp) ? 32 : 0);
9657 else
9658 config->hdr.offset = BP_FUNC(bp);
0626b899 9659 config->hdr.client_id = bp->fp->cl_id;
f3c87cdd
YG
9660 config->hdr.reserved1 = 0;
9661
9662 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
9663 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
9664 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
9665 if (rc == 0) {
9666 bp->set_mac_pending++;
9667 for (i = 0; i < 10; i++) {
9668 if (!bp->set_mac_pending)
9669 break;
9670 msleep_interruptible(10);
9671 }
9672 if (i == 10)
9673 rc = -ENODEV;
9674 }
9675
9676 return rc;
9677}
9678
a2fbb9ea
ET
9679static void bnx2x_self_test(struct net_device *dev,
9680 struct ethtool_test *etest, u64 *buf)
9681{
9682 struct bnx2x *bp = netdev_priv(dev);
a2fbb9ea
ET
9683
9684 memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
9685
f3c87cdd 9686 if (!netif_running(dev))
a2fbb9ea 9687 return;
a2fbb9ea 9688
33471629 9689 /* offline tests are not supported in MF mode */
f3c87cdd
YG
9690 if (IS_E1HMF(bp))
9691 etest->flags &= ~ETH_TEST_FL_OFFLINE;
9692
9693 if (etest->flags & ETH_TEST_FL_OFFLINE) {
279abdf5
EG
9694 int port = BP_PORT(bp);
9695 u32 val;
f3c87cdd
YG
9696 u8 link_up;
9697
279abdf5
EG
9698 /* save current value of input enable for TX port IF */
9699 val = REG_RD(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4);
9700 /* disable input for TX port IF */
9701 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, 0);
9702
f3c87cdd
YG
9703 link_up = bp->link_vars.link_up;
9704 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9705 bnx2x_nic_load(bp, LOAD_DIAG);
9706 /* wait until link state is restored */
9707 bnx2x_wait_for_link(bp, link_up);
9708
9709 if (bnx2x_test_registers(bp) != 0) {
9710 buf[0] = 1;
9711 etest->flags |= ETH_TEST_FL_FAILED;
9712 }
9713 if (bnx2x_test_memory(bp) != 0) {
9714 buf[1] = 1;
9715 etest->flags |= ETH_TEST_FL_FAILED;
9716 }
9717 buf[2] = bnx2x_test_loopback(bp, link_up);
9718 if (buf[2] != 0)
9719 etest->flags |= ETH_TEST_FL_FAILED;
a2fbb9ea 9720
f3c87cdd 9721 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
279abdf5
EG
9722
9723 /* restore input for TX port IF */
9724 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, val);
9725
f3c87cdd
YG
9726 bnx2x_nic_load(bp, LOAD_NORMAL);
9727 /* wait until link state is restored */
9728 bnx2x_wait_for_link(bp, link_up);
9729 }
9730 if (bnx2x_test_nvram(bp) != 0) {
9731 buf[3] = 1;
a2fbb9ea
ET
9732 etest->flags |= ETH_TEST_FL_FAILED;
9733 }
f3c87cdd
YG
9734 if (bnx2x_test_intr(bp) != 0) {
9735 buf[4] = 1;
9736 etest->flags |= ETH_TEST_FL_FAILED;
9737 }
9738 if (bp->port.pmf)
9739 if (bnx2x_link_test(bp) != 0) {
9740 buf[5] = 1;
9741 etest->flags |= ETH_TEST_FL_FAILED;
9742 }
f3c87cdd
YG
9743
9744#ifdef BNX2X_EXTRA_DEBUG
9745 bnx2x_panic_dump(bp);
9746#endif
a2fbb9ea
ET
9747}
9748
de832a55
EG
9749static const struct {
9750 long offset;
9751 int size;
9752 u8 string[ETH_GSTRING_LEN];
9753} bnx2x_q_stats_arr[BNX2X_NUM_Q_STATS] = {
9754/* 1 */ { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%d]: rx_bytes" },
9755 { Q_STATS_OFFSET32(error_bytes_received_hi),
9756 8, "[%d]: rx_error_bytes" },
9757 { Q_STATS_OFFSET32(total_unicast_packets_received_hi),
9758 8, "[%d]: rx_ucast_packets" },
9759 { Q_STATS_OFFSET32(total_multicast_packets_received_hi),
9760 8, "[%d]: rx_mcast_packets" },
9761 { Q_STATS_OFFSET32(total_broadcast_packets_received_hi),
9762 8, "[%d]: rx_bcast_packets" },
9763 { Q_STATS_OFFSET32(no_buff_discard_hi), 8, "[%d]: rx_discards" },
9764 { Q_STATS_OFFSET32(rx_err_discard_pkt),
9765 4, "[%d]: rx_phy_ip_err_discards"},
9766 { Q_STATS_OFFSET32(rx_skb_alloc_failed),
9767 4, "[%d]: rx_skb_alloc_discard" },
9768 { Q_STATS_OFFSET32(hw_csum_err), 4, "[%d]: rx_csum_offload_errors" },
9769
9770/* 10 */{ Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%d]: tx_bytes" },
9771 { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
9772 8, "[%d]: tx_packets" }
9773};
9774
bb2a0f7a
YG
9775static const struct {
9776 long offset;
9777 int size;
9778 u32 flags;
66e855f3
YG
9779#define STATS_FLAGS_PORT 1
9780#define STATS_FLAGS_FUNC 2
de832a55 9781#define STATS_FLAGS_BOTH (STATS_FLAGS_FUNC | STATS_FLAGS_PORT)
66e855f3 9782 u8 string[ETH_GSTRING_LEN];
bb2a0f7a 9783} bnx2x_stats_arr[BNX2X_NUM_STATS] = {
de832a55
EG
9784/* 1 */ { STATS_OFFSET32(total_bytes_received_hi),
9785 8, STATS_FLAGS_BOTH, "rx_bytes" },
66e855f3 9786 { STATS_OFFSET32(error_bytes_received_hi),
de832a55 9787 8, STATS_FLAGS_BOTH, "rx_error_bytes" },
bb2a0f7a 9788 { STATS_OFFSET32(total_unicast_packets_received_hi),
de832a55 9789 8, STATS_FLAGS_BOTH, "rx_ucast_packets" },
bb2a0f7a 9790 { STATS_OFFSET32(total_multicast_packets_received_hi),
de832a55 9791 8, STATS_FLAGS_BOTH, "rx_mcast_packets" },
bb2a0f7a 9792 { STATS_OFFSET32(total_broadcast_packets_received_hi),
de832a55 9793 8, STATS_FLAGS_BOTH, "rx_bcast_packets" },
bb2a0f7a 9794 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
66e855f3 9795 8, STATS_FLAGS_PORT, "rx_crc_errors" },
bb2a0f7a 9796 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
66e855f3 9797 8, STATS_FLAGS_PORT, "rx_align_errors" },
de832a55
EG
9798 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
9799 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
9800 { STATS_OFFSET32(etherstatsoverrsizepkts_hi),
9801 8, STATS_FLAGS_PORT, "rx_oversize_packets" },
9802/* 10 */{ STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
9803 8, STATS_FLAGS_PORT, "rx_fragments" },
9804 { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
9805 8, STATS_FLAGS_PORT, "rx_jabbers" },
9806 { STATS_OFFSET32(no_buff_discard_hi),
9807 8, STATS_FLAGS_BOTH, "rx_discards" },
9808 { STATS_OFFSET32(mac_filter_discard),
9809 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
9810 { STATS_OFFSET32(xxoverflow_discard),
9811 4, STATS_FLAGS_PORT, "rx_fw_discards" },
9812 { STATS_OFFSET32(brb_drop_hi),
9813 8, STATS_FLAGS_PORT, "rx_brb_discard" },
9814 { STATS_OFFSET32(brb_truncate_hi),
9815 8, STATS_FLAGS_PORT, "rx_brb_truncate" },
9816 { STATS_OFFSET32(pause_frames_received_hi),
9817 8, STATS_FLAGS_PORT, "rx_pause_frames" },
9818 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
9819 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
9820 { STATS_OFFSET32(nig_timer_max),
9821 4, STATS_FLAGS_PORT, "rx_constant_pause_events" },
9822/* 20 */{ STATS_OFFSET32(rx_err_discard_pkt),
9823 4, STATS_FLAGS_BOTH, "rx_phy_ip_err_discards"},
9824 { STATS_OFFSET32(rx_skb_alloc_failed),
9825 4, STATS_FLAGS_BOTH, "rx_skb_alloc_discard" },
9826 { STATS_OFFSET32(hw_csum_err),
9827 4, STATS_FLAGS_BOTH, "rx_csum_offload_errors" },
9828
9829 { STATS_OFFSET32(total_bytes_transmitted_hi),
9830 8, STATS_FLAGS_BOTH, "tx_bytes" },
9831 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
9832 8, STATS_FLAGS_PORT, "tx_error_bytes" },
9833 { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
9834 8, STATS_FLAGS_BOTH, "tx_packets" },
9835 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
9836 8, STATS_FLAGS_PORT, "tx_mac_errors" },
9837 { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
9838 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
bb2a0f7a 9839 { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
66e855f3 9840 8, STATS_FLAGS_PORT, "tx_single_collisions" },
bb2a0f7a 9841 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
66e855f3 9842 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
de832a55 9843/* 30 */{ STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
66e855f3 9844 8, STATS_FLAGS_PORT, "tx_deferred" },
bb2a0f7a 9845 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
66e855f3 9846 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
bb2a0f7a 9847 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
66e855f3 9848 8, STATS_FLAGS_PORT, "tx_late_collisions" },
bb2a0f7a 9849 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
66e855f3 9850 8, STATS_FLAGS_PORT, "tx_total_collisions" },
bb2a0f7a 9851 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
66e855f3 9852 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
bb2a0f7a 9853 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
66e855f3 9854 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
bb2a0f7a 9855 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
66e855f3 9856 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
bb2a0f7a 9857 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
66e855f3 9858 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
bb2a0f7a 9859 { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
66e855f3 9860 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
bb2a0f7a 9861 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
66e855f3 9862 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
de832a55 9863/* 40 */{ STATS_OFFSET32(etherstatspktsover1522octets_hi),
66e855f3 9864 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
de832a55
EG
9865 { STATS_OFFSET32(pause_frames_sent_hi),
9866 8, STATS_FLAGS_PORT, "tx_pause_frames" }
a2fbb9ea
ET
9867};
9868
de832a55
EG
9869#define IS_PORT_STAT(i) \
9870 ((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT)
9871#define IS_FUNC_STAT(i) (bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC)
9872#define IS_E1HMF_MODE_STAT(bp) \
9873 (IS_E1HMF(bp) && !(bp->msglevel & BNX2X_MSG_STATS))
66e855f3 9874
a2fbb9ea
ET
9875static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
9876{
bb2a0f7a 9877 struct bnx2x *bp = netdev_priv(dev);
de832a55 9878 int i, j, k;
bb2a0f7a 9879
a2fbb9ea
ET
9880 switch (stringset) {
9881 case ETH_SS_STATS:
de832a55
EG
9882 if (is_multi(bp)) {
9883 k = 0;
9884 for_each_queue(bp, i) {
9885 for (j = 0; j < BNX2X_NUM_Q_STATS; j++)
9886 sprintf(buf + (k + j)*ETH_GSTRING_LEN,
9887 bnx2x_q_stats_arr[j].string, i);
9888 k += BNX2X_NUM_Q_STATS;
9889 }
9890 if (IS_E1HMF_MODE_STAT(bp))
9891 break;
9892 for (j = 0; j < BNX2X_NUM_STATS; j++)
9893 strcpy(buf + (k + j)*ETH_GSTRING_LEN,
9894 bnx2x_stats_arr[j].string);
9895 } else {
9896 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
9897 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
9898 continue;
9899 strcpy(buf + j*ETH_GSTRING_LEN,
9900 bnx2x_stats_arr[i].string);
9901 j++;
9902 }
bb2a0f7a 9903 }
a2fbb9ea
ET
9904 break;
9905
9906 case ETH_SS_TEST:
9907 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
9908 break;
9909 }
9910}
9911
9912static int bnx2x_get_stats_count(struct net_device *dev)
9913{
bb2a0f7a 9914 struct bnx2x *bp = netdev_priv(dev);
de832a55 9915 int i, num_stats;
bb2a0f7a 9916
de832a55
EG
9917 if (is_multi(bp)) {
9918 num_stats = BNX2X_NUM_Q_STATS * BNX2X_NUM_QUEUES(bp);
9919 if (!IS_E1HMF_MODE_STAT(bp))
9920 num_stats += BNX2X_NUM_STATS;
9921 } else {
9922 if (IS_E1HMF_MODE_STAT(bp)) {
9923 num_stats = 0;
9924 for (i = 0; i < BNX2X_NUM_STATS; i++)
9925 if (IS_FUNC_STAT(i))
9926 num_stats++;
9927 } else
9928 num_stats = BNX2X_NUM_STATS;
bb2a0f7a 9929 }
de832a55 9930
bb2a0f7a 9931 return num_stats;
a2fbb9ea
ET
9932}
9933
9934static void bnx2x_get_ethtool_stats(struct net_device *dev,
9935 struct ethtool_stats *stats, u64 *buf)
9936{
9937 struct bnx2x *bp = netdev_priv(dev);
de832a55
EG
9938 u32 *hw_stats, *offset;
9939 int i, j, k;
bb2a0f7a 9940
de832a55
EG
9941 if (is_multi(bp)) {
9942 k = 0;
9943 for_each_queue(bp, i) {
9944 hw_stats = (u32 *)&bp->fp[i].eth_q_stats;
9945 for (j = 0; j < BNX2X_NUM_Q_STATS; j++) {
9946 if (bnx2x_q_stats_arr[j].size == 0) {
9947 /* skip this counter */
9948 buf[k + j] = 0;
9949 continue;
9950 }
9951 offset = (hw_stats +
9952 bnx2x_q_stats_arr[j].offset);
9953 if (bnx2x_q_stats_arr[j].size == 4) {
9954 /* 4-byte counter */
9955 buf[k + j] = (u64) *offset;
9956 continue;
9957 }
9958 /* 8-byte counter */
9959 buf[k + j] = HILO_U64(*offset, *(offset + 1));
9960 }
9961 k += BNX2X_NUM_Q_STATS;
9962 }
9963 if (IS_E1HMF_MODE_STAT(bp))
9964 return;
9965 hw_stats = (u32 *)&bp->eth_stats;
9966 for (j = 0; j < BNX2X_NUM_STATS; j++) {
9967 if (bnx2x_stats_arr[j].size == 0) {
9968 /* skip this counter */
9969 buf[k + j] = 0;
9970 continue;
9971 }
9972 offset = (hw_stats + bnx2x_stats_arr[j].offset);
9973 if (bnx2x_stats_arr[j].size == 4) {
9974 /* 4-byte counter */
9975 buf[k + j] = (u64) *offset;
9976 continue;
9977 }
9978 /* 8-byte counter */
9979 buf[k + j] = HILO_U64(*offset, *(offset + 1));
a2fbb9ea 9980 }
de832a55
EG
9981 } else {
9982 hw_stats = (u32 *)&bp->eth_stats;
9983 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
9984 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
9985 continue;
9986 if (bnx2x_stats_arr[i].size == 0) {
9987 /* skip this counter */
9988 buf[j] = 0;
9989 j++;
9990 continue;
9991 }
9992 offset = (hw_stats + bnx2x_stats_arr[i].offset);
9993 if (bnx2x_stats_arr[i].size == 4) {
9994 /* 4-byte counter */
9995 buf[j] = (u64) *offset;
9996 j++;
9997 continue;
9998 }
9999 /* 8-byte counter */
10000 buf[j] = HILO_U64(*offset, *(offset + 1));
bb2a0f7a 10001 j++;
a2fbb9ea 10002 }
a2fbb9ea
ET
10003 }
10004}
10005
10006static int bnx2x_phys_id(struct net_device *dev, u32 data)
10007{
10008 struct bnx2x *bp = netdev_priv(dev);
34f80b04 10009 int port = BP_PORT(bp);
a2fbb9ea
ET
10010 int i;
10011
34f80b04
EG
10012 if (!netif_running(dev))
10013 return 0;
10014
10015 if (!bp->port.pmf)
10016 return 0;
10017
a2fbb9ea
ET
10018 if (data == 0)
10019 data = 2;
10020
10021 for (i = 0; i < (data * 2); i++) {
c18487ee 10022 if ((i % 2) == 0)
34f80b04 10023 bnx2x_set_led(bp, port, LED_MODE_OPER, SPEED_1000,
c18487ee
YR
10024 bp->link_params.hw_led_mode,
10025 bp->link_params.chip_id);
10026 else
34f80b04 10027 bnx2x_set_led(bp, port, LED_MODE_OFF, 0,
c18487ee
YR
10028 bp->link_params.hw_led_mode,
10029 bp->link_params.chip_id);
10030
a2fbb9ea
ET
10031 msleep_interruptible(500);
10032 if (signal_pending(current))
10033 break;
10034 }
10035
c18487ee 10036 if (bp->link_vars.link_up)
34f80b04 10037 bnx2x_set_led(bp, port, LED_MODE_OPER,
c18487ee
YR
10038 bp->link_vars.line_speed,
10039 bp->link_params.hw_led_mode,
10040 bp->link_params.chip_id);
a2fbb9ea
ET
10041
10042 return 0;
10043}
10044
10045static struct ethtool_ops bnx2x_ethtool_ops = {
7a9b2557
VZ
10046 .get_settings = bnx2x_get_settings,
10047 .set_settings = bnx2x_set_settings,
10048 .get_drvinfo = bnx2x_get_drvinfo,
0a64ea57
EG
10049 .get_regs_len = bnx2x_get_regs_len,
10050 .get_regs = bnx2x_get_regs,
a2fbb9ea
ET
10051 .get_wol = bnx2x_get_wol,
10052 .set_wol = bnx2x_set_wol,
7a9b2557
VZ
10053 .get_msglevel = bnx2x_get_msglevel,
10054 .set_msglevel = bnx2x_set_msglevel,
10055 .nway_reset = bnx2x_nway_reset,
01e53298 10056 .get_link = bnx2x_get_link,
7a9b2557
VZ
10057 .get_eeprom_len = bnx2x_get_eeprom_len,
10058 .get_eeprom = bnx2x_get_eeprom,
10059 .set_eeprom = bnx2x_set_eeprom,
10060 .get_coalesce = bnx2x_get_coalesce,
10061 .set_coalesce = bnx2x_set_coalesce,
10062 .get_ringparam = bnx2x_get_ringparam,
10063 .set_ringparam = bnx2x_set_ringparam,
10064 .get_pauseparam = bnx2x_get_pauseparam,
10065 .set_pauseparam = bnx2x_set_pauseparam,
10066 .get_rx_csum = bnx2x_get_rx_csum,
10067 .set_rx_csum = bnx2x_set_rx_csum,
10068 .get_tx_csum = ethtool_op_get_tx_csum,
755735eb 10069 .set_tx_csum = ethtool_op_set_tx_hw_csum,
7a9b2557
VZ
10070 .set_flags = bnx2x_set_flags,
10071 .get_flags = ethtool_op_get_flags,
10072 .get_sg = ethtool_op_get_sg,
10073 .set_sg = ethtool_op_set_sg,
a2fbb9ea
ET
10074 .get_tso = ethtool_op_get_tso,
10075 .set_tso = bnx2x_set_tso,
10076 .self_test_count = bnx2x_self_test_count,
7a9b2557
VZ
10077 .self_test = bnx2x_self_test,
10078 .get_strings = bnx2x_get_strings,
a2fbb9ea
ET
10079 .phys_id = bnx2x_phys_id,
10080 .get_stats_count = bnx2x_get_stats_count,
bb2a0f7a 10081 .get_ethtool_stats = bnx2x_get_ethtool_stats,
a2fbb9ea
ET
10082};
10083
10084/* end of ethtool_ops */
10085
10086/****************************************************************************
10087* General service functions
10088****************************************************************************/
10089
10090static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
10091{
10092 u16 pmcsr;
10093
10094 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
10095
10096 switch (state) {
10097 case PCI_D0:
34f80b04 10098 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
a2fbb9ea
ET
10099 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
10100 PCI_PM_CTRL_PME_STATUS));
10101
10102 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
33471629 10103 /* delay required during transition out of D3hot */
a2fbb9ea 10104 msleep(20);
34f80b04 10105 break;
a2fbb9ea 10106
34f80b04
EG
10107 case PCI_D3hot:
10108 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
10109 pmcsr |= 3;
a2fbb9ea 10110
34f80b04
EG
10111 if (bp->wol)
10112 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
a2fbb9ea 10113
34f80b04
EG
10114 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
10115 pmcsr);
a2fbb9ea 10116
34f80b04
EG
10117 /* No more memory access after this point until
10118 * device is brought back to D0.
10119 */
10120 break;
10121
10122 default:
10123 return -EINVAL;
10124 }
10125 return 0;
a2fbb9ea
ET
10126}
10127
237907c1
EG
10128static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
10129{
10130 u16 rx_cons_sb;
10131
10132 /* Tell compiler that status block fields can change */
10133 barrier();
10134 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
10135 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
10136 rx_cons_sb++;
10137 return (fp->rx_comp_cons != rx_cons_sb);
10138}
10139
34f80b04
EG
10140/*
10141 * net_device service functions
10142 */
10143
a2fbb9ea
ET
10144static int bnx2x_poll(struct napi_struct *napi, int budget)
10145{
10146 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
10147 napi);
10148 struct bnx2x *bp = fp->bp;
10149 int work_done = 0;
10150
10151#ifdef BNX2X_STOP_ON_ERROR
10152 if (unlikely(bp->panic))
34f80b04 10153 goto poll_panic;
a2fbb9ea
ET
10154#endif
10155
10156 prefetch(fp->tx_buf_ring[TX_BD(fp->tx_pkt_cons)].skb);
10157 prefetch(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb);
10158 prefetch((char *)(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb) + 256);
10159
10160 bnx2x_update_fpsb_idx(fp);
10161
237907c1 10162 if (bnx2x_has_tx_work(fp))
7961f791 10163 bnx2x_tx_int(fp);
a2fbb9ea 10164
8534f32c 10165 if (bnx2x_has_rx_work(fp)) {
a2fbb9ea 10166 work_done = bnx2x_rx_int(fp, budget);
356e2385 10167
8534f32c
EG
10168 /* must not complete if we consumed full budget */
10169 if (work_done >= budget)
10170 goto poll_again;
10171 }
a2fbb9ea 10172
8534f32c
EG
10173 /* BNX2X_HAS_WORK() reads the status block, thus we need to
10174 * ensure that status block indices have been actually read
10175 * (bnx2x_update_fpsb_idx) prior to this check (BNX2X_HAS_WORK)
10176 * so that we won't write the "newer" value of the status block to IGU
10177 * (if there was a DMA right after BNX2X_HAS_WORK and
10178 * if there is no rmb, the memory reading (bnx2x_update_fpsb_idx)
10179 * may be postponed to right before bnx2x_ack_sb). In this case
10180 * there will never be another interrupt until there is another update
10181 * of the status block, while there is still unhandled work.
10182 */
10183 rmb();
a2fbb9ea 10184
8534f32c 10185 if (!BNX2X_HAS_WORK(fp)) {
a2fbb9ea 10186#ifdef BNX2X_STOP_ON_ERROR
34f80b04 10187poll_panic:
a2fbb9ea 10188#endif
288379f0 10189 napi_complete(napi);
a2fbb9ea 10190
0626b899 10191 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
a2fbb9ea 10192 le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
0626b899 10193 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
a2fbb9ea
ET
10194 le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
10195 }
356e2385 10196
8534f32c 10197poll_again:
a2fbb9ea
ET
10198 return work_done;
10199}
10200
755735eb
EG
10201
10202/* we split the first BD into headers and data BDs
33471629 10203 * to ease the pain of our fellow microcode engineers
755735eb
EG
10204 * we use one mapping for both BDs
10205 * So far this has only been observed to happen
10206 * in Other Operating Systems(TM)
10207 */
10208static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
10209 struct bnx2x_fastpath *fp,
10210 struct eth_tx_bd **tx_bd, u16 hlen,
10211 u16 bd_prod, int nbd)
10212{
10213 struct eth_tx_bd *h_tx_bd = *tx_bd;
10214 struct eth_tx_bd *d_tx_bd;
10215 dma_addr_t mapping;
10216 int old_len = le16_to_cpu(h_tx_bd->nbytes);
10217
10218 /* first fix first BD */
10219 h_tx_bd->nbd = cpu_to_le16(nbd);
10220 h_tx_bd->nbytes = cpu_to_le16(hlen);
10221
10222 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
10223 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
10224 h_tx_bd->addr_lo, h_tx_bd->nbd);
10225
10226 /* now get a new data BD
10227 * (after the pbd) and fill it */
10228 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10229 d_tx_bd = &fp->tx_desc_ring[bd_prod];
10230
10231 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
10232 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
10233
10234 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10235 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10236 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
10237 d_tx_bd->vlan = 0;
10238 /* this marks the BD as one that has no individual mapping
10239 * the FW ignores this flag in a BD not marked start
10240 */
10241 d_tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_SW_LSO;
10242 DP(NETIF_MSG_TX_QUEUED,
10243 "TSO split data size is %d (%x:%x)\n",
10244 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
10245
10246 /* update tx_bd for marking the last BD flag */
10247 *tx_bd = d_tx_bd;
10248
10249 return bd_prod;
10250}
10251
10252static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
10253{
10254 if (fix > 0)
10255 csum = (u16) ~csum_fold(csum_sub(csum,
10256 csum_partial(t_header - fix, fix, 0)));
10257
10258 else if (fix < 0)
10259 csum = (u16) ~csum_fold(csum_add(csum,
10260 csum_partial(t_header, -fix, 0)));
10261
10262 return swab16(csum);
10263}
10264
10265static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
10266{
10267 u32 rc;
10268
10269 if (skb->ip_summed != CHECKSUM_PARTIAL)
10270 rc = XMIT_PLAIN;
10271
10272 else {
4781bfad 10273 if (skb->protocol == htons(ETH_P_IPV6)) {
755735eb
EG
10274 rc = XMIT_CSUM_V6;
10275 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
10276 rc |= XMIT_CSUM_TCP;
10277
10278 } else {
10279 rc = XMIT_CSUM_V4;
10280 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
10281 rc |= XMIT_CSUM_TCP;
10282 }
10283 }
10284
10285 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
10286 rc |= XMIT_GSO_V4;
10287
10288 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
10289 rc |= XMIT_GSO_V6;
10290
10291 return rc;
10292}
10293
632da4d6 10294#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
f5372251
EG
10295/* check if packet requires linearization (packet is too fragmented)
10296 no need to check fragmentation if page size > 8K (there will be no
10297 violation to FW restrictions) */
755735eb
EG
10298static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
10299 u32 xmit_type)
10300{
10301 int to_copy = 0;
10302 int hlen = 0;
10303 int first_bd_sz = 0;
10304
10305 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
10306 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
10307
10308 if (xmit_type & XMIT_GSO) {
10309 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
10310 /* Check if LSO packet needs to be copied:
10311 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
10312 int wnd_size = MAX_FETCH_BD - 3;
33471629 10313 /* Number of windows to check */
755735eb
EG
10314 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
10315 int wnd_idx = 0;
10316 int frag_idx = 0;
10317 u32 wnd_sum = 0;
10318
10319 /* Headers length */
10320 hlen = (int)(skb_transport_header(skb) - skb->data) +
10321 tcp_hdrlen(skb);
10322
10323 /* Amount of data (w/o headers) on linear part of SKB*/
10324 first_bd_sz = skb_headlen(skb) - hlen;
10325
10326 wnd_sum = first_bd_sz;
10327
10328 /* Calculate the first sum - it's special */
10329 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
10330 wnd_sum +=
10331 skb_shinfo(skb)->frags[frag_idx].size;
10332
10333 /* If there was data on linear skb data - check it */
10334 if (first_bd_sz > 0) {
10335 if (unlikely(wnd_sum < lso_mss)) {
10336 to_copy = 1;
10337 goto exit_lbl;
10338 }
10339
10340 wnd_sum -= first_bd_sz;
10341 }
10342
10343 /* Others are easier: run through the frag list and
10344 check all windows */
10345 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
10346 wnd_sum +=
10347 skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
10348
10349 if (unlikely(wnd_sum < lso_mss)) {
10350 to_copy = 1;
10351 break;
10352 }
10353 wnd_sum -=
10354 skb_shinfo(skb)->frags[wnd_idx].size;
10355 }
755735eb
EG
10356 } else {
10357 /* in non-LSO too fragmented packet should always
10358 be linearized */
10359 to_copy = 1;
10360 }
10361 }
10362
10363exit_lbl:
10364 if (unlikely(to_copy))
10365 DP(NETIF_MSG_TX_QUEUED,
10366 "Linearization IS REQUIRED for %s packet. "
10367 "num_frags %d hlen %d first_bd_sz %d\n",
10368 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
10369 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
10370
10371 return to_copy;
10372}
632da4d6 10373#endif
755735eb
EG
10374
10375/* called with netif_tx_lock
a2fbb9ea 10376 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
755735eb 10377 * netif_wake_queue()
a2fbb9ea
ET
10378 */
10379static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
10380{
10381 struct bnx2x *bp = netdev_priv(dev);
10382 struct bnx2x_fastpath *fp;
555f6c78 10383 struct netdev_queue *txq;
a2fbb9ea
ET
10384 struct sw_tx_bd *tx_buf;
10385 struct eth_tx_bd *tx_bd;
10386 struct eth_tx_parse_bd *pbd = NULL;
10387 u16 pkt_prod, bd_prod;
755735eb 10388 int nbd, fp_index;
a2fbb9ea 10389 dma_addr_t mapping;
755735eb
EG
10390 u32 xmit_type = bnx2x_xmit_type(bp, skb);
10391 int vlan_off = (bp->e1hov ? 4 : 0);
10392 int i;
10393 u8 hlen = 0;
a2fbb9ea
ET
10394
10395#ifdef BNX2X_STOP_ON_ERROR
10396 if (unlikely(bp->panic))
10397 return NETDEV_TX_BUSY;
10398#endif
10399
555f6c78
EG
10400 fp_index = skb_get_queue_mapping(skb);
10401 txq = netdev_get_tx_queue(dev, fp_index);
10402
a2fbb9ea 10403 fp = &bp->fp[fp_index];
755735eb 10404
231fd58a 10405 if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
de832a55 10406 fp->eth_q_stats.driver_xoff++,
555f6c78 10407 netif_tx_stop_queue(txq);
a2fbb9ea
ET
10408 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
10409 return NETDEV_TX_BUSY;
10410 }
10411
755735eb
EG
10412 DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x protocol %x protocol(%x,%x)"
10413 " gso type %x xmit_type %x\n",
10414 skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
10415 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
10416
632da4d6 10417#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
f5372251
EG
10418 /* First, check if we need to linearize the skb (due to FW
10419 restrictions). No need to check fragmentation if page size > 8K
10420 (there will be no violation to FW restrictions) */
755735eb
EG
10421 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
10422 /* Statistics of linearization */
10423 bp->lin_cnt++;
10424 if (skb_linearize(skb) != 0) {
10425 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
10426 "silently dropping this SKB\n");
10427 dev_kfree_skb_any(skb);
da5a662a 10428 return NETDEV_TX_OK;
755735eb
EG
10429 }
10430 }
632da4d6 10431#endif
755735eb 10432
a2fbb9ea 10433 /*
755735eb 10434 Please read carefully. First we use one BD which we mark as start,
a2fbb9ea 10435 then for TSO or xsum we have a parsing info BD,
755735eb 10436 and only then we have the rest of the TSO BDs.
a2fbb9ea
ET
10437 (don't forget to mark the last one as last,
10438 and to unmap only AFTER you write to the BD ...)
755735eb 10439 And above all, all pdb sizes are in words - NOT DWORDS!
a2fbb9ea
ET
10440 */
10441
10442 pkt_prod = fp->tx_pkt_prod++;
755735eb 10443 bd_prod = TX_BD(fp->tx_bd_prod);
a2fbb9ea 10444
755735eb 10445 /* get a tx_buf and first BD */
a2fbb9ea
ET
10446 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
10447 tx_bd = &fp->tx_desc_ring[bd_prod];
10448
10449 tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
10450 tx_bd->general_data = (UNICAST_ADDRESS <<
10451 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT);
3196a88a
EG
10452 /* header nbd */
10453 tx_bd->general_data |= (1 << ETH_TX_BD_HDR_NBDS_SHIFT);
a2fbb9ea 10454
755735eb
EG
10455 /* remember the first BD of the packet */
10456 tx_buf->first_bd = fp->tx_bd_prod;
10457 tx_buf->skb = skb;
a2fbb9ea
ET
10458
10459 DP(NETIF_MSG_TX_QUEUED,
10460 "sending pkt %u @%p next_idx %u bd %u @%p\n",
10461 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_bd);
10462
0c6671b0
EG
10463#ifdef BCM_VLAN
10464 if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
10465 (bp->flags & HW_VLAN_TX_FLAG)) {
755735eb
EG
10466 tx_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
10467 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
10468 vlan_off += 4;
10469 } else
0c6671b0 10470#endif
755735eb 10471 tx_bd->vlan = cpu_to_le16(pkt_prod);
a2fbb9ea 10472
755735eb 10473 if (xmit_type) {
755735eb 10474 /* turn on parsing and get a BD */
a2fbb9ea
ET
10475 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10476 pbd = (void *)&fp->tx_desc_ring[bd_prod];
755735eb
EG
10477
10478 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
10479 }
10480
10481 if (xmit_type & XMIT_CSUM) {
10482 hlen = (skb_network_header(skb) - skb->data + vlan_off) / 2;
a2fbb9ea
ET
10483
10484 /* for now NS flag is not used in Linux */
4781bfad
EG
10485 pbd->global_data =
10486 (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
10487 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
a2fbb9ea 10488
755735eb
EG
10489 pbd->ip_hlen = (skb_transport_header(skb) -
10490 skb_network_header(skb)) / 2;
10491
10492 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
a2fbb9ea 10493
755735eb
EG
10494 pbd->total_hlen = cpu_to_le16(hlen);
10495 hlen = hlen*2 - vlan_off;
a2fbb9ea 10496
755735eb
EG
10497 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_TCP_CSUM;
10498
10499 if (xmit_type & XMIT_CSUM_V4)
a2fbb9ea 10500 tx_bd->bd_flags.as_bitfield |=
755735eb
EG
10501 ETH_TX_BD_FLAGS_IP_CSUM;
10502 else
10503 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
10504
10505 if (xmit_type & XMIT_CSUM_TCP) {
10506 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
10507
10508 } else {
10509 s8 fix = SKB_CS_OFF(skb); /* signed! */
10510
a2fbb9ea 10511 pbd->global_data |= ETH_TX_PARSE_BD_CS_ANY_FLG;
755735eb 10512 pbd->cs_offset = fix / 2;
a2fbb9ea 10513
755735eb
EG
10514 DP(NETIF_MSG_TX_QUEUED,
10515 "hlen %d offset %d fix %d csum before fix %x\n",
10516 le16_to_cpu(pbd->total_hlen), pbd->cs_offset, fix,
10517 SKB_CS(skb));
10518
10519 /* HW bug: fixup the CSUM */
10520 pbd->tcp_pseudo_csum =
10521 bnx2x_csum_fix(skb_transport_header(skb),
10522 SKB_CS(skb), fix);
10523
10524 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
10525 pbd->tcp_pseudo_csum);
10526 }
a2fbb9ea
ET
10527 }
10528
10529 mapping = pci_map_single(bp->pdev, skb->data,
755735eb 10530 skb_headlen(skb), PCI_DMA_TODEVICE);
a2fbb9ea
ET
10531
10532 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10533 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
6378c025 10534 nbd = skb_shinfo(skb)->nr_frags + ((pbd == NULL) ? 1 : 2);
a2fbb9ea
ET
10535 tx_bd->nbd = cpu_to_le16(nbd);
10536 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
10537
10538 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
755735eb
EG
10539 " nbytes %d flags %x vlan %x\n",
10540 tx_bd, tx_bd->addr_hi, tx_bd->addr_lo, le16_to_cpu(tx_bd->nbd),
10541 le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield,
10542 le16_to_cpu(tx_bd->vlan));
a2fbb9ea 10543
755735eb 10544 if (xmit_type & XMIT_GSO) {
a2fbb9ea
ET
10545
10546 DP(NETIF_MSG_TX_QUEUED,
10547 "TSO packet len %d hlen %d total len %d tso size %d\n",
10548 skb->len, hlen, skb_headlen(skb),
10549 skb_shinfo(skb)->gso_size);
10550
10551 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
10552
755735eb
EG
10553 if (unlikely(skb_headlen(skb) > hlen))
10554 bd_prod = bnx2x_tx_split(bp, fp, &tx_bd, hlen,
10555 bd_prod, ++nbd);
a2fbb9ea
ET
10556
10557 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
10558 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
755735eb
EG
10559 pbd->tcp_flags = pbd_tcp_flags(skb);
10560
10561 if (xmit_type & XMIT_GSO_V4) {
10562 pbd->ip_id = swab16(ip_hdr(skb)->id);
10563 pbd->tcp_pseudo_csum =
a2fbb9ea
ET
10564 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
10565 ip_hdr(skb)->daddr,
10566 0, IPPROTO_TCP, 0));
755735eb
EG
10567
10568 } else
10569 pbd->tcp_pseudo_csum =
10570 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
10571 &ipv6_hdr(skb)->daddr,
10572 0, IPPROTO_TCP, 0));
10573
a2fbb9ea
ET
10574 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
10575 }
10576
755735eb
EG
10577 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
10578 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
a2fbb9ea 10579
755735eb
EG
10580 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10581 tx_bd = &fp->tx_desc_ring[bd_prod];
a2fbb9ea 10582
755735eb
EG
10583 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
10584 frag->size, PCI_DMA_TODEVICE);
a2fbb9ea 10585
755735eb
EG
10586 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10587 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10588 tx_bd->nbytes = cpu_to_le16(frag->size);
10589 tx_bd->vlan = cpu_to_le16(pkt_prod);
10590 tx_bd->bd_flags.as_bitfield = 0;
a2fbb9ea 10591
755735eb
EG
10592 DP(NETIF_MSG_TX_QUEUED,
10593 "frag %d bd @%p addr (%x:%x) nbytes %d flags %x\n",
10594 i, tx_bd, tx_bd->addr_hi, tx_bd->addr_lo,
10595 le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield);
a2fbb9ea
ET
10596 }
10597
755735eb 10598 /* now at last mark the BD as the last BD */
a2fbb9ea
ET
10599 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_END_BD;
10600
10601 DP(NETIF_MSG_TX_QUEUED, "last bd @%p flags %x\n",
10602 tx_bd, tx_bd->bd_flags.as_bitfield);
10603
a2fbb9ea
ET
10604 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10605
755735eb 10606 /* now send a tx doorbell, counting the next BD
a2fbb9ea
ET
10607 * if the packet contains or ends with it
10608 */
10609 if (TX_BD_POFF(bd_prod) < nbd)
10610 nbd++;
10611
10612 if (pbd)
10613 DP(NETIF_MSG_TX_QUEUED,
10614 "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
10615 " tcp_flags %x xsum %x seq %u hlen %u\n",
10616 pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
10617 pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
755735eb 10618 pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
a2fbb9ea 10619
755735eb 10620 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
a2fbb9ea 10621
58f4c4cf
EG
10622 /*
10623 * Make sure that the BD data is updated before updating the producer
10624 * since FW might read the BD right after the producer is updated.
10625 * This is only applicable for weak-ordered memory model archs such
10626 * as IA-64. The following barrier is also mandatory since FW will
10627 * assumes packets must have BDs.
10628 */
10629 wmb();
10630
4781bfad 10631 le16_add_cpu(&fp->hw_tx_prods->bds_prod, nbd);
a2fbb9ea 10632 mb(); /* FW restriction: must not reorder writing nbd and packets */
4781bfad 10633 le32_add_cpu(&fp->hw_tx_prods->packets_prod, 1);
0626b899 10634 DOORBELL(bp, fp->index, 0);
a2fbb9ea
ET
10635
10636 mmiowb();
10637
755735eb 10638 fp->tx_bd_prod += nbd;
a2fbb9ea
ET
10639
10640 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
58f4c4cf
EG
10641 /* We want bnx2x_tx_int to "see" the updated tx_bd_prod
10642 if we put Tx into XOFF state. */
10643 smp_mb();
555f6c78 10644 netif_tx_stop_queue(txq);
de832a55 10645 fp->eth_q_stats.driver_xoff++;
a2fbb9ea 10646 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
555f6c78 10647 netif_tx_wake_queue(txq);
a2fbb9ea
ET
10648 }
10649 fp->tx_pkt++;
10650
10651 return NETDEV_TX_OK;
10652}
10653
bb2a0f7a 10654/* called with rtnl_lock */
a2fbb9ea
ET
10655static int bnx2x_open(struct net_device *dev)
10656{
10657 struct bnx2x *bp = netdev_priv(dev);
10658
6eccabb3
EG
10659 netif_carrier_off(dev);
10660
a2fbb9ea
ET
10661 bnx2x_set_power_state(bp, PCI_D0);
10662
bb2a0f7a 10663 return bnx2x_nic_load(bp, LOAD_OPEN);
a2fbb9ea
ET
10664}
10665
bb2a0f7a 10666/* called with rtnl_lock */
a2fbb9ea
ET
10667static int bnx2x_close(struct net_device *dev)
10668{
a2fbb9ea
ET
10669 struct bnx2x *bp = netdev_priv(dev);
10670
10671 /* Unload the driver, release IRQs */
bb2a0f7a
YG
10672 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
10673 if (atomic_read(&bp->pdev->enable_cnt) == 1)
10674 if (!CHIP_REV_IS_SLOW(bp))
10675 bnx2x_set_power_state(bp, PCI_D3hot);
a2fbb9ea
ET
10676
10677 return 0;
10678}
10679
f5372251 10680/* called with netif_tx_lock from dev_mcast.c */
34f80b04
EG
10681static void bnx2x_set_rx_mode(struct net_device *dev)
10682{
10683 struct bnx2x *bp = netdev_priv(dev);
10684 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
10685 int port = BP_PORT(bp);
10686
10687 if (bp->state != BNX2X_STATE_OPEN) {
10688 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
10689 return;
10690 }
10691
10692 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
10693
10694 if (dev->flags & IFF_PROMISC)
10695 rx_mode = BNX2X_RX_MODE_PROMISC;
10696
10697 else if ((dev->flags & IFF_ALLMULTI) ||
10698 ((dev->mc_count > BNX2X_MAX_MULTICAST) && CHIP_IS_E1(bp)))
10699 rx_mode = BNX2X_RX_MODE_ALLMULTI;
10700
10701 else { /* some multicasts */
10702 if (CHIP_IS_E1(bp)) {
10703 int i, old, offset;
10704 struct dev_mc_list *mclist;
10705 struct mac_configuration_cmd *config =
10706 bnx2x_sp(bp, mcast_config);
10707
10708 for (i = 0, mclist = dev->mc_list;
10709 mclist && (i < dev->mc_count);
10710 i++, mclist = mclist->next) {
10711
10712 config->config_table[i].
10713 cam_entry.msb_mac_addr =
10714 swab16(*(u16 *)&mclist->dmi_addr[0]);
10715 config->config_table[i].
10716 cam_entry.middle_mac_addr =
10717 swab16(*(u16 *)&mclist->dmi_addr[2]);
10718 config->config_table[i].
10719 cam_entry.lsb_mac_addr =
10720 swab16(*(u16 *)&mclist->dmi_addr[4]);
10721 config->config_table[i].cam_entry.flags =
10722 cpu_to_le16(port);
10723 config->config_table[i].
10724 target_table_entry.flags = 0;
10725 config->config_table[i].
10726 target_table_entry.client_id = 0;
10727 config->config_table[i].
10728 target_table_entry.vlan_id = 0;
10729
10730 DP(NETIF_MSG_IFUP,
10731 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
10732 config->config_table[i].
10733 cam_entry.msb_mac_addr,
10734 config->config_table[i].
10735 cam_entry.middle_mac_addr,
10736 config->config_table[i].
10737 cam_entry.lsb_mac_addr);
10738 }
8d9c5f34 10739 old = config->hdr.length;
34f80b04
EG
10740 if (old > i) {
10741 for (; i < old; i++) {
10742 if (CAM_IS_INVALID(config->
10743 config_table[i])) {
af246401 10744 /* already invalidated */
34f80b04
EG
10745 break;
10746 }
10747 /* invalidate */
10748 CAM_INVALIDATE(config->
10749 config_table[i]);
10750 }
10751 }
10752
10753 if (CHIP_REV_IS_SLOW(bp))
10754 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
10755 else
10756 offset = BNX2X_MAX_MULTICAST*(1 + port);
10757
8d9c5f34 10758 config->hdr.length = i;
34f80b04 10759 config->hdr.offset = offset;
8d9c5f34 10760 config->hdr.client_id = bp->fp->cl_id;
34f80b04
EG
10761 config->hdr.reserved1 = 0;
10762
10763 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
10764 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
10765 U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
10766 0);
10767 } else { /* E1H */
10768 /* Accept one or more multicasts */
10769 struct dev_mc_list *mclist;
10770 u32 mc_filter[MC_HASH_SIZE];
10771 u32 crc, bit, regidx;
10772 int i;
10773
10774 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
10775
10776 for (i = 0, mclist = dev->mc_list;
10777 mclist && (i < dev->mc_count);
10778 i++, mclist = mclist->next) {
10779
7c510e4b
JB
10780 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
10781 mclist->dmi_addr);
34f80b04
EG
10782
10783 crc = crc32c_le(0, mclist->dmi_addr, ETH_ALEN);
10784 bit = (crc >> 24) & 0xff;
10785 regidx = bit >> 5;
10786 bit &= 0x1f;
10787 mc_filter[regidx] |= (1 << bit);
10788 }
10789
10790 for (i = 0; i < MC_HASH_SIZE; i++)
10791 REG_WR(bp, MC_HASH_OFFSET(bp, i),
10792 mc_filter[i]);
10793 }
10794 }
10795
10796 bp->rx_mode = rx_mode;
10797 bnx2x_set_storm_rx_mode(bp);
10798}
10799
10800/* called with rtnl_lock */
a2fbb9ea
ET
10801static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
10802{
10803 struct sockaddr *addr = p;
10804 struct bnx2x *bp = netdev_priv(dev);
10805
34f80b04 10806 if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
a2fbb9ea
ET
10807 return -EINVAL;
10808
10809 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
34f80b04
EG
10810 if (netif_running(dev)) {
10811 if (CHIP_IS_E1(bp))
3101c2bc 10812 bnx2x_set_mac_addr_e1(bp, 1);
34f80b04 10813 else
3101c2bc 10814 bnx2x_set_mac_addr_e1h(bp, 1);
34f80b04 10815 }
a2fbb9ea
ET
10816
10817 return 0;
10818}
10819
c18487ee 10820/* called with rtnl_lock */
a2fbb9ea
ET
10821static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
10822{
10823 struct mii_ioctl_data *data = if_mii(ifr);
10824 struct bnx2x *bp = netdev_priv(dev);
3196a88a 10825 int port = BP_PORT(bp);
a2fbb9ea
ET
10826 int err;
10827
10828 switch (cmd) {
10829 case SIOCGMIIPHY:
34f80b04 10830 data->phy_id = bp->port.phy_addr;
a2fbb9ea 10831
c14423fe 10832 /* fallthrough */
c18487ee 10833
a2fbb9ea 10834 case SIOCGMIIREG: {
c18487ee 10835 u16 mii_regval;
a2fbb9ea 10836
c18487ee
YR
10837 if (!netif_running(dev))
10838 return -EAGAIN;
a2fbb9ea 10839
34f80b04 10840 mutex_lock(&bp->port.phy_mutex);
3196a88a 10841 err = bnx2x_cl45_read(bp, port, 0, bp->port.phy_addr,
c18487ee
YR
10842 DEFAULT_PHY_DEV_ADDR,
10843 (data->reg_num & 0x1f), &mii_regval);
10844 data->val_out = mii_regval;
34f80b04 10845 mutex_unlock(&bp->port.phy_mutex);
a2fbb9ea
ET
10846 return err;
10847 }
10848
10849 case SIOCSMIIREG:
10850 if (!capable(CAP_NET_ADMIN))
10851 return -EPERM;
10852
c18487ee
YR
10853 if (!netif_running(dev))
10854 return -EAGAIN;
10855
34f80b04 10856 mutex_lock(&bp->port.phy_mutex);
3196a88a 10857 err = bnx2x_cl45_write(bp, port, 0, bp->port.phy_addr,
c18487ee
YR
10858 DEFAULT_PHY_DEV_ADDR,
10859 (data->reg_num & 0x1f), data->val_in);
34f80b04 10860 mutex_unlock(&bp->port.phy_mutex);
a2fbb9ea
ET
10861 return err;
10862
10863 default:
10864 /* do nothing */
10865 break;
10866 }
10867
10868 return -EOPNOTSUPP;
10869}
10870
34f80b04 10871/* called with rtnl_lock */
a2fbb9ea
ET
10872static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
10873{
10874 struct bnx2x *bp = netdev_priv(dev);
34f80b04 10875 int rc = 0;
a2fbb9ea
ET
10876
10877 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
10878 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
10879 return -EINVAL;
10880
10881 /* This does not race with packet allocation
c14423fe 10882 * because the actual alloc size is
a2fbb9ea
ET
10883 * only updated as part of load
10884 */
10885 dev->mtu = new_mtu;
10886
10887 if (netif_running(dev)) {
34f80b04
EG
10888 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10889 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
a2fbb9ea 10890 }
34f80b04
EG
10891
10892 return rc;
a2fbb9ea
ET
10893}
10894
10895static void bnx2x_tx_timeout(struct net_device *dev)
10896{
10897 struct bnx2x *bp = netdev_priv(dev);
10898
10899#ifdef BNX2X_STOP_ON_ERROR
10900 if (!bp->panic)
10901 bnx2x_panic();
10902#endif
10903 /* This allows the netif to be shutdown gracefully before resetting */
10904 schedule_work(&bp->reset_task);
10905}
10906
10907#ifdef BCM_VLAN
34f80b04 10908/* called with rtnl_lock */
a2fbb9ea
ET
10909static void bnx2x_vlan_rx_register(struct net_device *dev,
10910 struct vlan_group *vlgrp)
10911{
10912 struct bnx2x *bp = netdev_priv(dev);
10913
10914 bp->vlgrp = vlgrp;
0c6671b0
EG
10915
10916 /* Set flags according to the required capabilities */
10917 bp->flags &= ~(HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
10918
10919 if (dev->features & NETIF_F_HW_VLAN_TX)
10920 bp->flags |= HW_VLAN_TX_FLAG;
10921
10922 if (dev->features & NETIF_F_HW_VLAN_RX)
10923 bp->flags |= HW_VLAN_RX_FLAG;
10924
a2fbb9ea 10925 if (netif_running(dev))
49d66772 10926 bnx2x_set_client_config(bp);
a2fbb9ea 10927}
34f80b04 10928
a2fbb9ea
ET
10929#endif
10930
10931#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
10932static void poll_bnx2x(struct net_device *dev)
10933{
10934 struct bnx2x *bp = netdev_priv(dev);
10935
10936 disable_irq(bp->pdev->irq);
10937 bnx2x_interrupt(bp->pdev->irq, dev);
10938 enable_irq(bp->pdev->irq);
10939}
10940#endif
10941
c64213cd
SH
10942static const struct net_device_ops bnx2x_netdev_ops = {
10943 .ndo_open = bnx2x_open,
10944 .ndo_stop = bnx2x_close,
10945 .ndo_start_xmit = bnx2x_start_xmit,
356e2385 10946 .ndo_set_multicast_list = bnx2x_set_rx_mode,
c64213cd
SH
10947 .ndo_set_mac_address = bnx2x_change_mac_addr,
10948 .ndo_validate_addr = eth_validate_addr,
10949 .ndo_do_ioctl = bnx2x_ioctl,
10950 .ndo_change_mtu = bnx2x_change_mtu,
10951 .ndo_tx_timeout = bnx2x_tx_timeout,
10952#ifdef BCM_VLAN
10953 .ndo_vlan_rx_register = bnx2x_vlan_rx_register,
10954#endif
10955#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
10956 .ndo_poll_controller = poll_bnx2x,
10957#endif
10958};
10959
34f80b04
EG
10960static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
10961 struct net_device *dev)
a2fbb9ea
ET
10962{
10963 struct bnx2x *bp;
10964 int rc;
10965
10966 SET_NETDEV_DEV(dev, &pdev->dev);
10967 bp = netdev_priv(dev);
10968
34f80b04
EG
10969 bp->dev = dev;
10970 bp->pdev = pdev;
a2fbb9ea 10971 bp->flags = 0;
34f80b04 10972 bp->func = PCI_FUNC(pdev->devfn);
a2fbb9ea
ET
10973
10974 rc = pci_enable_device(pdev);
10975 if (rc) {
10976 printk(KERN_ERR PFX "Cannot enable PCI device, aborting\n");
10977 goto err_out;
10978 }
10979
10980 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
10981 printk(KERN_ERR PFX "Cannot find PCI device base address,"
10982 " aborting\n");
10983 rc = -ENODEV;
10984 goto err_out_disable;
10985 }
10986
10987 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
10988 printk(KERN_ERR PFX "Cannot find second PCI device"
10989 " base address, aborting\n");
10990 rc = -ENODEV;
10991 goto err_out_disable;
10992 }
10993
34f80b04
EG
10994 if (atomic_read(&pdev->enable_cnt) == 1) {
10995 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
10996 if (rc) {
10997 printk(KERN_ERR PFX "Cannot obtain PCI resources,"
10998 " aborting\n");
10999 goto err_out_disable;
11000 }
a2fbb9ea 11001
34f80b04
EG
11002 pci_set_master(pdev);
11003 pci_save_state(pdev);
11004 }
a2fbb9ea
ET
11005
11006 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
11007 if (bp->pm_cap == 0) {
11008 printk(KERN_ERR PFX "Cannot find power management"
11009 " capability, aborting\n");
11010 rc = -EIO;
11011 goto err_out_release;
11012 }
11013
11014 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
11015 if (bp->pcie_cap == 0) {
11016 printk(KERN_ERR PFX "Cannot find PCI Express capability,"
11017 " aborting\n");
11018 rc = -EIO;
11019 goto err_out_release;
11020 }
11021
6a35528a 11022 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) {
a2fbb9ea 11023 bp->flags |= USING_DAC_FLAG;
6a35528a 11024 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) {
a2fbb9ea
ET
11025 printk(KERN_ERR PFX "pci_set_consistent_dma_mask"
11026 " failed, aborting\n");
11027 rc = -EIO;
11028 goto err_out_release;
11029 }
11030
284901a9 11031 } else if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) {
a2fbb9ea
ET
11032 printk(KERN_ERR PFX "System does not support DMA,"
11033 " aborting\n");
11034 rc = -EIO;
11035 goto err_out_release;
11036 }
11037
34f80b04
EG
11038 dev->mem_start = pci_resource_start(pdev, 0);
11039 dev->base_addr = dev->mem_start;
11040 dev->mem_end = pci_resource_end(pdev, 0);
a2fbb9ea
ET
11041
11042 dev->irq = pdev->irq;
11043
275f165f 11044 bp->regview = pci_ioremap_bar(pdev, 0);
a2fbb9ea
ET
11045 if (!bp->regview) {
11046 printk(KERN_ERR PFX "Cannot map register space, aborting\n");
11047 rc = -ENOMEM;
11048 goto err_out_release;
11049 }
11050
34f80b04
EG
11051 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
11052 min_t(u64, BNX2X_DB_SIZE,
11053 pci_resource_len(pdev, 2)));
a2fbb9ea
ET
11054 if (!bp->doorbells) {
11055 printk(KERN_ERR PFX "Cannot map doorbell space, aborting\n");
11056 rc = -ENOMEM;
11057 goto err_out_unmap;
11058 }
11059
11060 bnx2x_set_power_state(bp, PCI_D0);
11061
34f80b04
EG
11062 /* clean indirect addresses */
11063 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
11064 PCICFG_VENDOR_ID_OFFSET);
11065 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
11066 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
11067 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
11068 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
a2fbb9ea 11069
34f80b04 11070 dev->watchdog_timeo = TX_TIMEOUT;
a2fbb9ea 11071
c64213cd 11072 dev->netdev_ops = &bnx2x_netdev_ops;
34f80b04 11073 dev->ethtool_ops = &bnx2x_ethtool_ops;
34f80b04
EG
11074 dev->features |= NETIF_F_SG;
11075 dev->features |= NETIF_F_HW_CSUM;
11076 if (bp->flags & USING_DAC_FLAG)
11077 dev->features |= NETIF_F_HIGHDMA;
11078#ifdef BCM_VLAN
11079 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
0c6671b0 11080 bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
34f80b04
EG
11081#endif
11082 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb 11083 dev->features |= NETIF_F_TSO6;
a2fbb9ea
ET
11084
11085 return 0;
11086
11087err_out_unmap:
11088 if (bp->regview) {
11089 iounmap(bp->regview);
11090 bp->regview = NULL;
11091 }
a2fbb9ea
ET
11092 if (bp->doorbells) {
11093 iounmap(bp->doorbells);
11094 bp->doorbells = NULL;
11095 }
11096
11097err_out_release:
34f80b04
EG
11098 if (atomic_read(&pdev->enable_cnt) == 1)
11099 pci_release_regions(pdev);
a2fbb9ea
ET
11100
11101err_out_disable:
11102 pci_disable_device(pdev);
11103 pci_set_drvdata(pdev, NULL);
11104
11105err_out:
11106 return rc;
11107}
11108
25047950
ET
11109static int __devinit bnx2x_get_pcie_width(struct bnx2x *bp)
11110{
11111 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
11112
11113 val = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
11114 return val;
11115}
11116
11117/* return value of 1=2.5GHz 2=5GHz */
11118static int __devinit bnx2x_get_pcie_speed(struct bnx2x *bp)
11119{
11120 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
11121
11122 val = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
11123 return val;
11124}
94a78b79
VZ
11125static int __devinit bnx2x_check_firmware(struct bnx2x *bp)
11126{
11127 struct bnx2x_fw_file_hdr *fw_hdr;
11128 struct bnx2x_fw_file_section *sections;
11129 u16 *ops_offsets;
11130 u32 offset, len, num_ops;
11131 int i;
11132 const struct firmware *firmware = bp->firmware;
11133 const u8 * fw_ver;
11134
11135 if (firmware->size < sizeof(struct bnx2x_fw_file_hdr))
11136 return -EINVAL;
11137
11138 fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
11139 sections = (struct bnx2x_fw_file_section *)fw_hdr;
11140
11141 /* Make sure none of the offsets and sizes make us read beyond
11142 * the end of the firmware data */
11143 for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
11144 offset = be32_to_cpu(sections[i].offset);
11145 len = be32_to_cpu(sections[i].len);
11146 if (offset + len > firmware->size) {
11147 printk(KERN_ERR PFX "Section %d length is out of bounds\n", i);
11148 return -EINVAL;
11149 }
11150 }
11151
11152 /* Likewise for the init_ops offsets */
11153 offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
11154 ops_offsets = (u16 *)(firmware->data + offset);
11155 num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
11156
11157 for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
11158 if (be16_to_cpu(ops_offsets[i]) > num_ops) {
11159 printk(KERN_ERR PFX "Section offset %d is out of bounds\n", i);
11160 return -EINVAL;
11161 }
11162 }
11163
11164 /* Check FW version */
11165 offset = be32_to_cpu(fw_hdr->fw_version.offset);
11166 fw_ver = firmware->data + offset;
11167 if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
11168 (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
11169 (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
11170 (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
11171 printk(KERN_ERR PFX "Bad FW version:%d.%d.%d.%d."
11172 " Should be %d.%d.%d.%d\n",
11173 fw_ver[0], fw_ver[1], fw_ver[2],
11174 fw_ver[3], BCM_5710_FW_MAJOR_VERSION,
11175 BCM_5710_FW_MINOR_VERSION,
11176 BCM_5710_FW_REVISION_VERSION,
11177 BCM_5710_FW_ENGINEERING_VERSION);
11178 return -EINVAL;
11179 }
11180
11181 return 0;
11182}
11183
11184static void inline be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
11185{
11186 u32 i;
11187 const __be32 *source = (const __be32*)_source;
11188 u32 *target = (u32*)_target;
11189
11190 for (i = 0; i < n/4; i++)
11191 target[i] = be32_to_cpu(source[i]);
11192}
11193
11194/*
11195 Ops array is stored in the following format:
11196 {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
11197 */
11198static void inline bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
11199{
11200 u32 i, j, tmp;
11201 const __be32 *source = (const __be32*)_source;
11202 struct raw_op *target = (struct raw_op*)_target;
11203
11204 for (i = 0, j = 0; i < n/8; i++, j+=2) {
11205 tmp = be32_to_cpu(source[j]);
11206 target[i].op = (tmp >> 24) & 0xff;
11207 target[i].offset = tmp & 0xffffff;
11208 target[i].raw_data = be32_to_cpu(source[j+1]);
11209 }
11210}
11211static void inline be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
11212{
11213 u32 i;
11214 u16 *target = (u16*)_target;
11215 const __be16 *source = (const __be16*)_source;
11216
11217 for (i = 0; i < n/2; i++)
11218 target[i] = be16_to_cpu(source[i]);
11219}
11220
11221#define BNX2X_ALLOC_AND_SET(arr, lbl, func) \
11222 do { \
11223 u32 len = be32_to_cpu(fw_hdr->arr.len); \
11224 bp->arr = kmalloc(len, GFP_KERNEL); \
11225 if (!bp->arr) { \
11226 printk(KERN_ERR PFX "Failed to allocate %d bytes for "#arr"\n", len); \
11227 goto lbl; \
11228 } \
11229 func(bp->firmware->data + \
11230 be32_to_cpu(fw_hdr->arr.offset), \
11231 (u8*)bp->arr, len); \
11232 } while (0)
11233
11234
11235static int __devinit bnx2x_init_firmware(struct bnx2x *bp, struct device *dev)
11236{
11237 char fw_file_name[40] = {0};
11238 int rc, offset;
11239 struct bnx2x_fw_file_hdr *fw_hdr;
11240
11241 /* Create a FW file name */
11242 if (CHIP_IS_E1(bp))
11243 offset = sprintf(fw_file_name, FW_FILE_PREFIX_E1);
11244 else
11245 offset = sprintf(fw_file_name, FW_FILE_PREFIX_E1H);
11246
11247 sprintf(fw_file_name + offset, "%d.%d.%d.%d.fw",
11248 BCM_5710_FW_MAJOR_VERSION,
11249 BCM_5710_FW_MINOR_VERSION,
11250 BCM_5710_FW_REVISION_VERSION,
11251 BCM_5710_FW_ENGINEERING_VERSION);
11252
11253 printk(KERN_INFO PFX "Loading %s\n", fw_file_name);
11254
11255 rc = request_firmware(&bp->firmware, fw_file_name, dev);
11256 if (rc) {
11257 printk(KERN_ERR PFX "Can't load firmware file %s\n", fw_file_name);
11258 goto request_firmware_exit;
11259 }
11260
11261 rc = bnx2x_check_firmware(bp);
11262 if (rc) {
11263 printk(KERN_ERR PFX "Corrupt firmware file %s\n", fw_file_name);
11264 goto request_firmware_exit;
11265 }
11266
11267 fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
11268
11269 /* Initialize the pointers to the init arrays */
11270 /* Blob */
11271 BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
11272
11273 /* Opcodes */
11274 BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
11275
11276 /* Offsets */
11277 BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err, be16_to_cpu_n);
11278
11279 /* STORMs firmware */
11280 bp->tsem_int_table_data = bp->firmware->data +
11281 be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
11282 bp->tsem_pram_data = bp->firmware->data +
11283 be32_to_cpu(fw_hdr->tsem_pram_data.offset);
11284 bp->usem_int_table_data = bp->firmware->data +
11285 be32_to_cpu(fw_hdr->usem_int_table_data.offset);
11286 bp->usem_pram_data = bp->firmware->data +
11287 be32_to_cpu(fw_hdr->usem_pram_data.offset);
11288 bp->xsem_int_table_data = bp->firmware->data +
11289 be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
11290 bp->xsem_pram_data = bp->firmware->data +
11291 be32_to_cpu(fw_hdr->xsem_pram_data.offset);
11292 bp->csem_int_table_data = bp->firmware->data +
11293 be32_to_cpu(fw_hdr->csem_int_table_data.offset);
11294 bp->csem_pram_data = bp->firmware->data +
11295 be32_to_cpu(fw_hdr->csem_pram_data.offset);
11296
11297 return 0;
11298init_offsets_alloc_err:
11299 kfree(bp->init_ops);
11300init_ops_alloc_err:
11301 kfree(bp->init_data);
11302request_firmware_exit:
11303 release_firmware(bp->firmware);
11304
11305 return rc;
11306}
11307
11308
25047950 11309
a2fbb9ea
ET
11310static int __devinit bnx2x_init_one(struct pci_dev *pdev,
11311 const struct pci_device_id *ent)
11312{
11313 static int version_printed;
11314 struct net_device *dev = NULL;
11315 struct bnx2x *bp;
25047950 11316 int rc;
a2fbb9ea
ET
11317
11318 if (version_printed++ == 0)
11319 printk(KERN_INFO "%s", version);
11320
11321 /* dev zeroed in init_etherdev */
555f6c78 11322 dev = alloc_etherdev_mq(sizeof(*bp), MAX_CONTEXT);
34f80b04
EG
11323 if (!dev) {
11324 printk(KERN_ERR PFX "Cannot allocate net device\n");
a2fbb9ea 11325 return -ENOMEM;
34f80b04 11326 }
a2fbb9ea 11327
a2fbb9ea
ET
11328 bp = netdev_priv(dev);
11329 bp->msglevel = debug;
11330
34f80b04 11331 rc = bnx2x_init_dev(pdev, dev);
a2fbb9ea
ET
11332 if (rc < 0) {
11333 free_netdev(dev);
11334 return rc;
11335 }
11336
a2fbb9ea
ET
11337 pci_set_drvdata(pdev, dev);
11338
34f80b04 11339 rc = bnx2x_init_bp(bp);
693fc0d1
EG
11340 if (rc)
11341 goto init_one_exit;
11342
94a78b79
VZ
11343 /* Set init arrays */
11344 rc = bnx2x_init_firmware(bp, &pdev->dev);
11345 if (rc) {
11346 printk(KERN_ERR PFX "Error loading firmware\n");
11347 goto init_one_exit;
11348 }
11349
693fc0d1 11350 rc = register_netdev(dev);
34f80b04 11351 if (rc) {
693fc0d1 11352 dev_err(&pdev->dev, "Cannot register net device\n");
34f80b04
EG
11353 goto init_one_exit;
11354 }
11355
25047950 11356 printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
87942b46 11357 " IRQ %d, ", dev->name, board_info[ent->driver_data].name,
34f80b04 11358 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
25047950
ET
11359 bnx2x_get_pcie_width(bp),
11360 (bnx2x_get_pcie_speed(bp) == 2) ? "5GHz (Gen2)" : "2.5GHz",
11361 dev->base_addr, bp->pdev->irq);
e174961c 11362 printk(KERN_CONT "node addr %pM\n", dev->dev_addr);
c016201c 11363
a2fbb9ea 11364 return 0;
34f80b04
EG
11365
11366init_one_exit:
11367 if (bp->regview)
11368 iounmap(bp->regview);
11369
11370 if (bp->doorbells)
11371 iounmap(bp->doorbells);
11372
11373 free_netdev(dev);
11374
11375 if (atomic_read(&pdev->enable_cnt) == 1)
11376 pci_release_regions(pdev);
11377
11378 pci_disable_device(pdev);
11379 pci_set_drvdata(pdev, NULL);
11380
11381 return rc;
a2fbb9ea
ET
11382}
11383
11384static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
11385{
11386 struct net_device *dev = pci_get_drvdata(pdev);
228241eb
ET
11387 struct bnx2x *bp;
11388
11389 if (!dev) {
228241eb
ET
11390 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
11391 return;
11392 }
228241eb 11393 bp = netdev_priv(dev);
a2fbb9ea 11394
a2fbb9ea
ET
11395 unregister_netdev(dev);
11396
94a78b79
VZ
11397 kfree(bp->init_ops_offsets);
11398 kfree(bp->init_ops);
11399 kfree(bp->init_data);
11400 release_firmware(bp->firmware);
11401
a2fbb9ea
ET
11402 if (bp->regview)
11403 iounmap(bp->regview);
11404
11405 if (bp->doorbells)
11406 iounmap(bp->doorbells);
11407
11408 free_netdev(dev);
34f80b04
EG
11409
11410 if (atomic_read(&pdev->enable_cnt) == 1)
11411 pci_release_regions(pdev);
11412
a2fbb9ea
ET
11413 pci_disable_device(pdev);
11414 pci_set_drvdata(pdev, NULL);
11415}
11416
11417static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
11418{
11419 struct net_device *dev = pci_get_drvdata(pdev);
228241eb
ET
11420 struct bnx2x *bp;
11421
34f80b04
EG
11422 if (!dev) {
11423 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
11424 return -ENODEV;
11425 }
11426 bp = netdev_priv(dev);
a2fbb9ea 11427
34f80b04 11428 rtnl_lock();
a2fbb9ea 11429
34f80b04 11430 pci_save_state(pdev);
228241eb 11431
34f80b04
EG
11432 if (!netif_running(dev)) {
11433 rtnl_unlock();
11434 return 0;
11435 }
a2fbb9ea
ET
11436
11437 netif_device_detach(dev);
a2fbb9ea 11438
da5a662a 11439 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
34f80b04 11440
a2fbb9ea 11441 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
228241eb 11442
34f80b04
EG
11443 rtnl_unlock();
11444
a2fbb9ea
ET
11445 return 0;
11446}
11447
11448static int bnx2x_resume(struct pci_dev *pdev)
11449{
11450 struct net_device *dev = pci_get_drvdata(pdev);
228241eb 11451 struct bnx2x *bp;
a2fbb9ea
ET
11452 int rc;
11453
228241eb
ET
11454 if (!dev) {
11455 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
11456 return -ENODEV;
11457 }
228241eb 11458 bp = netdev_priv(dev);
a2fbb9ea 11459
34f80b04
EG
11460 rtnl_lock();
11461
228241eb 11462 pci_restore_state(pdev);
34f80b04
EG
11463
11464 if (!netif_running(dev)) {
11465 rtnl_unlock();
11466 return 0;
11467 }
11468
a2fbb9ea
ET
11469 bnx2x_set_power_state(bp, PCI_D0);
11470 netif_device_attach(dev);
11471
da5a662a 11472 rc = bnx2x_nic_load(bp, LOAD_OPEN);
a2fbb9ea 11473
34f80b04
EG
11474 rtnl_unlock();
11475
11476 return rc;
a2fbb9ea
ET
11477}
11478
f8ef6e44
YG
11479static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
11480{
11481 int i;
11482
11483 bp->state = BNX2X_STATE_ERROR;
11484
11485 bp->rx_mode = BNX2X_RX_MODE_NONE;
11486
11487 bnx2x_netif_stop(bp, 0);
11488
11489 del_timer_sync(&bp->timer);
11490 bp->stats_state = STATS_STATE_DISABLED;
11491 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
11492
11493 /* Release IRQs */
11494 bnx2x_free_irq(bp);
11495
11496 if (CHIP_IS_E1(bp)) {
11497 struct mac_configuration_cmd *config =
11498 bnx2x_sp(bp, mcast_config);
11499
8d9c5f34 11500 for (i = 0; i < config->hdr.length; i++)
f8ef6e44
YG
11501 CAM_INVALIDATE(config->config_table[i]);
11502 }
11503
11504 /* Free SKBs, SGEs, TPA pool and driver internals */
11505 bnx2x_free_skbs(bp);
555f6c78 11506 for_each_rx_queue(bp, i)
f8ef6e44 11507 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
555f6c78 11508 for_each_rx_queue(bp, i)
7cde1c8b 11509 netif_napi_del(&bnx2x_fp(bp, i, napi));
f8ef6e44
YG
11510 bnx2x_free_mem(bp);
11511
11512 bp->state = BNX2X_STATE_CLOSED;
11513
11514 netif_carrier_off(bp->dev);
11515
11516 return 0;
11517}
11518
11519static void bnx2x_eeh_recover(struct bnx2x *bp)
11520{
11521 u32 val;
11522
11523 mutex_init(&bp->port.phy_mutex);
11524
11525 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
11526 bp->link_params.shmem_base = bp->common.shmem_base;
11527 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
11528
11529 if (!bp->common.shmem_base ||
11530 (bp->common.shmem_base < 0xA0000) ||
11531 (bp->common.shmem_base >= 0xC0000)) {
11532 BNX2X_DEV_INFO("MCP not active\n");
11533 bp->flags |= NO_MCP_FLAG;
11534 return;
11535 }
11536
11537 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
11538 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
11539 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
11540 BNX2X_ERR("BAD MCP validity signature\n");
11541
11542 if (!BP_NOMCP(bp)) {
11543 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
11544 & DRV_MSG_SEQ_NUMBER_MASK);
11545 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
11546 }
11547}
11548
493adb1f
WX
11549/**
11550 * bnx2x_io_error_detected - called when PCI error is detected
11551 * @pdev: Pointer to PCI device
11552 * @state: The current pci connection state
11553 *
11554 * This function is called after a PCI bus error affecting
11555 * this device has been detected.
11556 */
11557static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
11558 pci_channel_state_t state)
11559{
11560 struct net_device *dev = pci_get_drvdata(pdev);
11561 struct bnx2x *bp = netdev_priv(dev);
11562
11563 rtnl_lock();
11564
11565 netif_device_detach(dev);
11566
11567 if (netif_running(dev))
f8ef6e44 11568 bnx2x_eeh_nic_unload(bp);
493adb1f
WX
11569
11570 pci_disable_device(pdev);
11571
11572 rtnl_unlock();
11573
11574 /* Request a slot reset */
11575 return PCI_ERS_RESULT_NEED_RESET;
11576}
11577
11578/**
11579 * bnx2x_io_slot_reset - called after the PCI bus has been reset
11580 * @pdev: Pointer to PCI device
11581 *
11582 * Restart the card from scratch, as if from a cold-boot.
11583 */
11584static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
11585{
11586 struct net_device *dev = pci_get_drvdata(pdev);
11587 struct bnx2x *bp = netdev_priv(dev);
11588
11589 rtnl_lock();
11590
11591 if (pci_enable_device(pdev)) {
11592 dev_err(&pdev->dev,
11593 "Cannot re-enable PCI device after reset\n");
11594 rtnl_unlock();
11595 return PCI_ERS_RESULT_DISCONNECT;
11596 }
11597
11598 pci_set_master(pdev);
11599 pci_restore_state(pdev);
11600
11601 if (netif_running(dev))
11602 bnx2x_set_power_state(bp, PCI_D0);
11603
11604 rtnl_unlock();
11605
11606 return PCI_ERS_RESULT_RECOVERED;
11607}
11608
11609/**
11610 * bnx2x_io_resume - called when traffic can start flowing again
11611 * @pdev: Pointer to PCI device
11612 *
11613 * This callback is called when the error recovery driver tells us that
11614 * its OK to resume normal operation.
11615 */
11616static void bnx2x_io_resume(struct pci_dev *pdev)
11617{
11618 struct net_device *dev = pci_get_drvdata(pdev);
11619 struct bnx2x *bp = netdev_priv(dev);
11620
11621 rtnl_lock();
11622
f8ef6e44
YG
11623 bnx2x_eeh_recover(bp);
11624
493adb1f 11625 if (netif_running(dev))
f8ef6e44 11626 bnx2x_nic_load(bp, LOAD_NORMAL);
493adb1f
WX
11627
11628 netif_device_attach(dev);
11629
11630 rtnl_unlock();
11631}
11632
11633static struct pci_error_handlers bnx2x_err_handler = {
11634 .error_detected = bnx2x_io_error_detected,
356e2385
EG
11635 .slot_reset = bnx2x_io_slot_reset,
11636 .resume = bnx2x_io_resume,
493adb1f
WX
11637};
11638
a2fbb9ea 11639static struct pci_driver bnx2x_pci_driver = {
493adb1f
WX
11640 .name = DRV_MODULE_NAME,
11641 .id_table = bnx2x_pci_tbl,
11642 .probe = bnx2x_init_one,
11643 .remove = __devexit_p(bnx2x_remove_one),
11644 .suspend = bnx2x_suspend,
11645 .resume = bnx2x_resume,
11646 .err_handler = &bnx2x_err_handler,
a2fbb9ea
ET
11647};
11648
11649static int __init bnx2x_init(void)
11650{
dd21ca6d
SG
11651 int ret;
11652
1cf167f2
EG
11653 bnx2x_wq = create_singlethread_workqueue("bnx2x");
11654 if (bnx2x_wq == NULL) {
11655 printk(KERN_ERR PFX "Cannot create workqueue\n");
11656 return -ENOMEM;
11657 }
11658
dd21ca6d
SG
11659 ret = pci_register_driver(&bnx2x_pci_driver);
11660 if (ret) {
11661 printk(KERN_ERR PFX "Cannot register driver\n");
11662 destroy_workqueue(bnx2x_wq);
11663 }
11664 return ret;
a2fbb9ea
ET
11665}
11666
11667static void __exit bnx2x_cleanup(void)
11668{
11669 pci_unregister_driver(&bnx2x_pci_driver);
1cf167f2
EG
11670
11671 destroy_workqueue(bnx2x_wq);
a2fbb9ea
ET
11672}
11673
11674module_init(bnx2x_init);
11675module_exit(bnx2x_cleanup);
11676
94a78b79 11677
This page took 1.088846 seconds and 5 git commands to generate.