bnx2x: Calling napi_del
[deliverable/linux.git] / drivers / net / bnx2x_main.c
CommitLineData
34f80b04 1/* bnx2x_main.c: Broadcom Everest network driver.
a2fbb9ea 2 *
d05c26ce 3 * Copyright (c) 2007-2009 Broadcom Corporation
a2fbb9ea
ET
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
24e3fcef
EG
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
a2fbb9ea
ET
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath rework by Vladislav Zolotarov
c14423fe 14 * Statistics and Link management by Yitchak Gertner
a2fbb9ea
ET
15 *
16 */
17
a2fbb9ea
ET
18#include <linux/module.h>
19#include <linux/moduleparam.h>
20#include <linux/kernel.h>
21#include <linux/device.h> /* for dev_info() */
22#include <linux/timer.h>
23#include <linux/errno.h>
24#include <linux/ioport.h>
25#include <linux/slab.h>
26#include <linux/vmalloc.h>
27#include <linux/interrupt.h>
28#include <linux/pci.h>
29#include <linux/init.h>
30#include <linux/netdevice.h>
31#include <linux/etherdevice.h>
32#include <linux/skbuff.h>
33#include <linux/dma-mapping.h>
34#include <linux/bitops.h>
35#include <linux/irq.h>
36#include <linux/delay.h>
37#include <asm/byteorder.h>
38#include <linux/time.h>
39#include <linux/ethtool.h>
40#include <linux/mii.h>
0c6671b0 41#include <linux/if_vlan.h>
a2fbb9ea
ET
42#include <net/ip.h>
43#include <net/tcp.h>
44#include <net/checksum.h>
34f80b04 45#include <net/ip6_checksum.h>
a2fbb9ea
ET
46#include <linux/workqueue.h>
47#include <linux/crc32.h>
34f80b04 48#include <linux/crc32c.h>
a2fbb9ea
ET
49#include <linux/prefetch.h>
50#include <linux/zlib.h>
a2fbb9ea
ET
51#include <linux/io.h>
52
53#include "bnx2x_reg.h"
54#include "bnx2x_fw_defs.h"
55#include "bnx2x_hsi.h"
c18487ee 56#include "bnx2x_link.h"
a2fbb9ea
ET
57#include "bnx2x.h"
58#include "bnx2x_init.h"
59
d05c26ce
EG
60#define DRV_MODULE_VERSION "1.45.24"
61#define DRV_MODULE_RELDATE "2009/01/14"
34f80b04 62#define BNX2X_BC_VER 0x040200
a2fbb9ea 63
34f80b04
EG
64/* Time in jiffies before concluding the transmitter is hung */
65#define TX_TIMEOUT (5*HZ)
a2fbb9ea 66
53a10565 67static char version[] __devinitdata =
34f80b04 68 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
a2fbb9ea
ET
69 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
70
24e3fcef 71MODULE_AUTHOR("Eliezer Tamir");
e47d7e6e 72MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
a2fbb9ea
ET
73MODULE_LICENSE("GPL");
74MODULE_VERSION(DRV_MODULE_VERSION);
a2fbb9ea 75
19680c48 76static int disable_tpa;
a2fbb9ea
ET
77static int use_inta;
78static int poll;
a2fbb9ea 79static int debug;
34f80b04 80static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
a2fbb9ea
ET
81static int use_multi;
82
19680c48 83module_param(disable_tpa, int, 0);
a2fbb9ea
ET
84module_param(use_inta, int, 0);
85module_param(poll, int, 0);
a2fbb9ea 86module_param(debug, int, 0);
19680c48 87MODULE_PARM_DESC(disable_tpa, "disable the TPA (LRO) feature");
a2fbb9ea
ET
88MODULE_PARM_DESC(use_inta, "use INT#A instead of MSI-X");
89MODULE_PARM_DESC(poll, "use polling (for debug)");
c14423fe 90MODULE_PARM_DESC(debug, "default debug msglevel");
a2fbb9ea
ET
91
92#ifdef BNX2X_MULTI
93module_param(use_multi, int, 0);
94MODULE_PARM_DESC(use_multi, "use per-CPU queues");
95#endif
1cf167f2 96static struct workqueue_struct *bnx2x_wq;
a2fbb9ea
ET
97
98enum bnx2x_board_type {
99 BCM57710 = 0,
34f80b04
EG
100 BCM57711 = 1,
101 BCM57711E = 2,
a2fbb9ea
ET
102};
103
34f80b04 104/* indexed by board_type, above */
53a10565 105static struct {
a2fbb9ea
ET
106 char *name;
107} board_info[] __devinitdata = {
34f80b04
EG
108 { "Broadcom NetXtreme II BCM57710 XGb" },
109 { "Broadcom NetXtreme II BCM57711 XGb" },
110 { "Broadcom NetXtreme II BCM57711E XGb" }
a2fbb9ea
ET
111};
112
34f80b04 113
a2fbb9ea
ET
114static const struct pci_device_id bnx2x_pci_tbl[] = {
115 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57710,
116 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57710 },
34f80b04
EG
117 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711,
118 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711 },
119 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711E,
120 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711E },
a2fbb9ea
ET
121 { 0 }
122};
123
124MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
125
126/****************************************************************************
127* General service functions
128****************************************************************************/
129
130/* used only at init
131 * locking is done by mcp
132 */
133static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
134{
135 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
136 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
137 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
138 PCICFG_VENDOR_ID_OFFSET);
139}
140
a2fbb9ea
ET
141static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
142{
143 u32 val;
144
145 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
146 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
147 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
148 PCICFG_VENDOR_ID_OFFSET);
149
150 return val;
151}
a2fbb9ea
ET
152
153static const u32 dmae_reg_go_c[] = {
154 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
155 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
156 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
157 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
158};
159
160/* copy command into DMAE command memory and set DMAE command go */
161static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
162 int idx)
163{
164 u32 cmd_offset;
165 int i;
166
167 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
168 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
169 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
170
ad8d3948
EG
171 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
172 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
a2fbb9ea
ET
173 }
174 REG_WR(bp, dmae_reg_go_c[idx], 1);
175}
176
ad8d3948
EG
177void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
178 u32 len32)
a2fbb9ea 179{
ad8d3948 180 struct dmae_command *dmae = &bp->init_dmae;
a2fbb9ea 181 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
ad8d3948
EG
182 int cnt = 200;
183
184 if (!bp->dmae_ready) {
185 u32 *data = bnx2x_sp(bp, wb_data[0]);
186
187 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
188 " using indirect\n", dst_addr, len32);
189 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
190 return;
191 }
192
193 mutex_lock(&bp->dmae_mutex);
a2fbb9ea
ET
194
195 memset(dmae, 0, sizeof(struct dmae_command));
196
197 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
198 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
199 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
200#ifdef __BIG_ENDIAN
201 DMAE_CMD_ENDIANITY_B_DW_SWAP |
202#else
203 DMAE_CMD_ENDIANITY_DW_SWAP |
204#endif
34f80b04
EG
205 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
206 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea
ET
207 dmae->src_addr_lo = U64_LO(dma_addr);
208 dmae->src_addr_hi = U64_HI(dma_addr);
209 dmae->dst_addr_lo = dst_addr >> 2;
210 dmae->dst_addr_hi = 0;
211 dmae->len = len32;
212 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
213 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
ad8d3948 214 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 215
ad8d3948 216 DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
a2fbb9ea
ET
217 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
218 "dst_addr [%x:%08x (%08x)]\n"
219 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
220 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
221 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, dst_addr,
222 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
ad8d3948 223 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
a2fbb9ea
ET
224 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
225 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
a2fbb9ea
ET
226
227 *wb_comp = 0;
228
34f80b04 229 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
a2fbb9ea
ET
230
231 udelay(5);
ad8d3948
EG
232
233 while (*wb_comp != DMAE_COMP_VAL) {
234 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
235
ad8d3948 236 if (!cnt) {
a2fbb9ea
ET
237 BNX2X_ERR("dmae timeout!\n");
238 break;
239 }
ad8d3948 240 cnt--;
12469401
YG
241 /* adjust delay for emulation/FPGA */
242 if (CHIP_REV_IS_SLOW(bp))
243 msleep(100);
244 else
245 udelay(5);
a2fbb9ea 246 }
ad8d3948
EG
247
248 mutex_unlock(&bp->dmae_mutex);
a2fbb9ea
ET
249}
250
c18487ee 251void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
a2fbb9ea 252{
ad8d3948 253 struct dmae_command *dmae = &bp->init_dmae;
a2fbb9ea 254 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
ad8d3948
EG
255 int cnt = 200;
256
257 if (!bp->dmae_ready) {
258 u32 *data = bnx2x_sp(bp, wb_data[0]);
259 int i;
260
261 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
262 " using indirect\n", src_addr, len32);
263 for (i = 0; i < len32; i++)
264 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
265 return;
266 }
267
268 mutex_lock(&bp->dmae_mutex);
a2fbb9ea
ET
269
270 memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
271 memset(dmae, 0, sizeof(struct dmae_command));
272
273 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
274 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
275 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
276#ifdef __BIG_ENDIAN
277 DMAE_CMD_ENDIANITY_B_DW_SWAP |
278#else
279 DMAE_CMD_ENDIANITY_DW_SWAP |
280#endif
34f80b04
EG
281 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
282 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea
ET
283 dmae->src_addr_lo = src_addr >> 2;
284 dmae->src_addr_hi = 0;
285 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
286 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
287 dmae->len = len32;
288 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
289 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
ad8d3948 290 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 291
ad8d3948 292 DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
a2fbb9ea
ET
293 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
294 "dst_addr [%x:%08x (%08x)]\n"
295 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
296 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
297 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, src_addr,
298 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
a2fbb9ea
ET
299
300 *wb_comp = 0;
301
34f80b04 302 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
a2fbb9ea
ET
303
304 udelay(5);
ad8d3948
EG
305
306 while (*wb_comp != DMAE_COMP_VAL) {
307
ad8d3948 308 if (!cnt) {
a2fbb9ea
ET
309 BNX2X_ERR("dmae timeout!\n");
310 break;
311 }
ad8d3948 312 cnt--;
12469401
YG
313 /* adjust delay for emulation/FPGA */
314 if (CHIP_REV_IS_SLOW(bp))
315 msleep(100);
316 else
317 udelay(5);
a2fbb9ea 318 }
ad8d3948 319 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
a2fbb9ea
ET
320 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
321 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
ad8d3948
EG
322
323 mutex_unlock(&bp->dmae_mutex);
324}
325
326/* used only for slowpath so not inlined */
327static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
328{
329 u32 wb_write[2];
330
331 wb_write[0] = val_hi;
332 wb_write[1] = val_lo;
333 REG_WR_DMAE(bp, reg, wb_write, 2);
a2fbb9ea 334}
a2fbb9ea 335
ad8d3948
EG
336#ifdef USE_WB_RD
337static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
338{
339 u32 wb_data[2];
340
341 REG_RD_DMAE(bp, reg, wb_data, 2);
342
343 return HILO_U64(wb_data[0], wb_data[1]);
344}
345#endif
346
a2fbb9ea
ET
347static int bnx2x_mc_assert(struct bnx2x *bp)
348{
a2fbb9ea 349 char last_idx;
34f80b04
EG
350 int i, rc = 0;
351 u32 row0, row1, row2, row3;
352
353 /* XSTORM */
354 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
355 XSTORM_ASSERT_LIST_INDEX_OFFSET);
356 if (last_idx)
357 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
358
359 /* print the asserts */
360 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
361
362 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
363 XSTORM_ASSERT_LIST_OFFSET(i));
364 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
365 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
366 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
367 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
368 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
369 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
370
371 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
372 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
373 " 0x%08x 0x%08x 0x%08x\n",
374 i, row3, row2, row1, row0);
375 rc++;
376 } else {
377 break;
378 }
379 }
380
381 /* TSTORM */
382 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
383 TSTORM_ASSERT_LIST_INDEX_OFFSET);
384 if (last_idx)
385 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
386
387 /* print the asserts */
388 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
389
390 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
391 TSTORM_ASSERT_LIST_OFFSET(i));
392 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
393 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
394 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
395 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
396 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
397 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
398
399 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
400 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
401 " 0x%08x 0x%08x 0x%08x\n",
402 i, row3, row2, row1, row0);
403 rc++;
404 } else {
405 break;
406 }
407 }
408
409 /* CSTORM */
410 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
411 CSTORM_ASSERT_LIST_INDEX_OFFSET);
412 if (last_idx)
413 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
414
415 /* print the asserts */
416 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
417
418 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
419 CSTORM_ASSERT_LIST_OFFSET(i));
420 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
421 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
422 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
423 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
424 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
425 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
426
427 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
428 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
429 " 0x%08x 0x%08x 0x%08x\n",
430 i, row3, row2, row1, row0);
431 rc++;
432 } else {
433 break;
434 }
435 }
436
437 /* USTORM */
438 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
439 USTORM_ASSERT_LIST_INDEX_OFFSET);
440 if (last_idx)
441 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
442
443 /* print the asserts */
444 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
445
446 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
447 USTORM_ASSERT_LIST_OFFSET(i));
448 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
449 USTORM_ASSERT_LIST_OFFSET(i) + 4);
450 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
451 USTORM_ASSERT_LIST_OFFSET(i) + 8);
452 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
453 USTORM_ASSERT_LIST_OFFSET(i) + 12);
454
455 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
456 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
457 " 0x%08x 0x%08x 0x%08x\n",
458 i, row3, row2, row1, row0);
459 rc++;
460 } else {
461 break;
a2fbb9ea
ET
462 }
463 }
34f80b04 464
a2fbb9ea
ET
465 return rc;
466}
c14423fe 467
a2fbb9ea
ET
468static void bnx2x_fw_dump(struct bnx2x *bp)
469{
470 u32 mark, offset;
471 u32 data[9];
472 int word;
473
474 mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
49d66772
ET
475 mark = ((mark + 0x3) & ~0x3);
476 printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n" KERN_ERR, mark);
a2fbb9ea
ET
477
478 for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
479 for (word = 0; word < 8; word++)
480 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
481 offset + 4*word));
482 data[8] = 0x0;
49d66772 483 printk(KERN_CONT "%s", (char *)data);
a2fbb9ea
ET
484 }
485 for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
486 for (word = 0; word < 8; word++)
487 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
488 offset + 4*word));
489 data[8] = 0x0;
49d66772 490 printk(KERN_CONT "%s", (char *)data);
a2fbb9ea
ET
491 }
492 printk("\n" KERN_ERR PFX "end of fw dump\n");
493}
494
495static void bnx2x_panic_dump(struct bnx2x *bp)
496{
497 int i;
498 u16 j, start, end;
499
66e855f3
YG
500 bp->stats_state = STATS_STATE_DISABLED;
501 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
502
a2fbb9ea
ET
503 BNX2X_ERR("begin crash dump -----------------\n");
504
505 for_each_queue(bp, i) {
506 struct bnx2x_fastpath *fp = &bp->fp[i];
507 struct eth_tx_db_data *hw_prods = fp->hw_tx_prods;
508
509 BNX2X_ERR("queue[%d]: tx_pkt_prod(%x) tx_pkt_cons(%x)"
34f80b04 510 " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)\n",
a2fbb9ea 511 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
34f80b04 512 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
66e855f3
YG
513 BNX2X_ERR(" rx_bd_prod(%x) rx_bd_cons(%x)"
514 " *rx_bd_cons_sb(%x) rx_comp_prod(%x)"
515 " rx_comp_cons(%x) *rx_cons_sb(%x)\n",
516 fp->rx_bd_prod, fp->rx_bd_cons,
517 le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
518 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
519 BNX2X_ERR(" rx_sge_prod(%x) last_max_sge(%x)"
520 " fp_c_idx(%x) *sb_c_idx(%x) fp_u_idx(%x)"
521 " *sb_u_idx(%x) bd data(%x,%x)\n",
522 fp->rx_sge_prod, fp->last_max_sge, fp->fp_c_idx,
523 fp->status_blk->c_status_block.status_block_index,
524 fp->fp_u_idx,
525 fp->status_blk->u_status_block.status_block_index,
526 hw_prods->packets_prod, hw_prods->bds_prod);
a2fbb9ea
ET
527
528 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
529 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
530 for (j = start; j < end; j++) {
531 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
532
533 BNX2X_ERR("packet[%x]=[%p,%x]\n", j,
534 sw_bd->skb, sw_bd->first_bd);
535 }
536
537 start = TX_BD(fp->tx_bd_cons - 10);
538 end = TX_BD(fp->tx_bd_cons + 254);
539 for (j = start; j < end; j++) {
540 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
541
542 BNX2X_ERR("tx_bd[%x]=[%x:%x:%x:%x]\n",
543 j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
544 }
545
546 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
547 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
548 for (j = start; j < end; j++) {
549 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
550 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
551
552 BNX2X_ERR("rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
34f80b04 553 j, rx_bd[1], rx_bd[0], sw_bd->skb);
a2fbb9ea
ET
554 }
555
3196a88a
EG
556 start = RX_SGE(fp->rx_sge_prod);
557 end = RX_SGE(fp->last_max_sge);
7a9b2557
VZ
558 for (j = start; j < end; j++) {
559 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
560 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
561
562 BNX2X_ERR("rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
563 j, rx_sge[1], rx_sge[0], sw_page->page);
564 }
565
a2fbb9ea
ET
566 start = RCQ_BD(fp->rx_comp_cons - 10);
567 end = RCQ_BD(fp->rx_comp_cons + 503);
568 for (j = start; j < end; j++) {
569 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
570
571 BNX2X_ERR("cqe[%x]=[%x:%x:%x:%x]\n",
572 j, cqe[0], cqe[1], cqe[2], cqe[3]);
573 }
574 }
575
49d66772
ET
576 BNX2X_ERR("def_c_idx(%u) def_u_idx(%u) def_x_idx(%u)"
577 " def_t_idx(%u) def_att_idx(%u) attn_state(%u)"
a2fbb9ea 578 " spq_prod_idx(%u)\n",
49d66772 579 bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
a2fbb9ea
ET
580 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
581
34f80b04 582 bnx2x_fw_dump(bp);
a2fbb9ea
ET
583 bnx2x_mc_assert(bp);
584 BNX2X_ERR("end crash dump -----------------\n");
a2fbb9ea
ET
585}
586
615f8fd9 587static void bnx2x_int_enable(struct bnx2x *bp)
a2fbb9ea 588{
34f80b04 589 int port = BP_PORT(bp);
a2fbb9ea
ET
590 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
591 u32 val = REG_RD(bp, addr);
592 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
593
594 if (msix) {
595 val &= ~HC_CONFIG_0_REG_SINGLE_ISR_EN_0;
596 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
597 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
598 } else {
599 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
615f8fd9 600 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
a2fbb9ea
ET
601 HC_CONFIG_0_REG_INT_LINE_EN_0 |
602 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
615f8fd9 603
615f8fd9
ET
604 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) MSI-X %d\n",
605 val, port, addr, msix);
606
607 REG_WR(bp, addr, val);
608
a2fbb9ea
ET
609 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
610 }
611
615f8fd9 612 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) MSI-X %d\n",
a2fbb9ea
ET
613 val, port, addr, msix);
614
615 REG_WR(bp, addr, val);
34f80b04
EG
616
617 if (CHIP_IS_E1H(bp)) {
618 /* init leading/trailing edge */
619 if (IS_E1HMF(bp)) {
620 val = (0xfe0f | (1 << (BP_E1HVN(bp) + 4)));
621 if (bp->port.pmf)
622 /* enable nig attention */
623 val |= 0x0100;
624 } else
625 val = 0xffff;
626
627 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
628 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
629 }
a2fbb9ea
ET
630}
631
615f8fd9 632static void bnx2x_int_disable(struct bnx2x *bp)
a2fbb9ea 633{
34f80b04 634 int port = BP_PORT(bp);
a2fbb9ea
ET
635 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
636 u32 val = REG_RD(bp, addr);
637
638 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
639 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
640 HC_CONFIG_0_REG_INT_LINE_EN_0 |
641 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
642
643 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
644 val, port, addr);
645
646 REG_WR(bp, addr, val);
647 if (REG_RD(bp, addr) != val)
648 BNX2X_ERR("BUG! proper val not read from IGU!\n");
649}
650
f8ef6e44 651static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
a2fbb9ea 652{
a2fbb9ea
ET
653 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
654 int i;
655
34f80b04 656 /* disable interrupt handling */
a2fbb9ea 657 atomic_inc(&bp->intr_sem);
f8ef6e44
YG
658 if (disable_hw)
659 /* prevent the HW from sending interrupts */
660 bnx2x_int_disable(bp);
a2fbb9ea
ET
661
662 /* make sure all ISRs are done */
663 if (msix) {
664 for_each_queue(bp, i)
665 synchronize_irq(bp->msix_table[i].vector);
666
667 /* one more for the Slow Path IRQ */
668 synchronize_irq(bp->msix_table[i].vector);
669 } else
670 synchronize_irq(bp->pdev->irq);
671
672 /* make sure sp_task is not running */
1cf167f2
EG
673 cancel_delayed_work(&bp->sp_task);
674 flush_workqueue(bnx2x_wq);
a2fbb9ea
ET
675}
676
34f80b04 677/* fast path */
a2fbb9ea
ET
678
679/*
34f80b04 680 * General service functions
a2fbb9ea
ET
681 */
682
34f80b04 683static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
a2fbb9ea
ET
684 u8 storm, u16 index, u8 op, u8 update)
685{
5c862848
EG
686 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
687 COMMAND_REG_INT_ACK);
a2fbb9ea
ET
688 struct igu_ack_register igu_ack;
689
690 igu_ack.status_block_index = index;
691 igu_ack.sb_id_and_flags =
34f80b04 692 ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
a2fbb9ea
ET
693 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
694 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
695 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
696
5c862848
EG
697 DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
698 (*(u32 *)&igu_ack), hc_addr);
699 REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
a2fbb9ea
ET
700}
701
702static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
703{
704 struct host_status_block *fpsb = fp->status_blk;
705 u16 rc = 0;
706
707 barrier(); /* status block is written to by the chip */
708 if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
709 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
710 rc |= 1;
711 }
712 if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
713 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
714 rc |= 2;
715 }
716 return rc;
717}
718
a2fbb9ea
ET
719static u16 bnx2x_ack_int(struct bnx2x *bp)
720{
5c862848
EG
721 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
722 COMMAND_REG_SIMD_MASK);
723 u32 result = REG_RD(bp, hc_addr);
a2fbb9ea 724
5c862848
EG
725 DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
726 result, hc_addr);
a2fbb9ea 727
a2fbb9ea
ET
728 return result;
729}
730
731
732/*
733 * fast path service functions
734 */
735
237907c1
EG
736static inline int bnx2x_has_tx_work(struct bnx2x_fastpath *fp)
737{
738 u16 tx_cons_sb;
739
740 /* Tell compiler that status block fields can change */
741 barrier();
742 tx_cons_sb = le16_to_cpu(*fp->tx_cons_sb);
743 return ((fp->tx_pkt_prod != tx_cons_sb) ||
744 (fp->tx_pkt_prod != fp->tx_pkt_cons));
745}
746
a2fbb9ea
ET
747/* free skb in the packet ring at pos idx
748 * return idx of last bd freed
749 */
750static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
751 u16 idx)
752{
753 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
754 struct eth_tx_bd *tx_bd;
755 struct sk_buff *skb = tx_buf->skb;
34f80b04 756 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
a2fbb9ea
ET
757 int nbd;
758
759 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
760 idx, tx_buf, skb);
761
762 /* unmap first bd */
763 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
764 tx_bd = &fp->tx_desc_ring[bd_idx];
765 pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_bd),
766 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
767
768 nbd = le16_to_cpu(tx_bd->nbd) - 1;
34f80b04 769 new_cons = nbd + tx_buf->first_bd;
a2fbb9ea
ET
770#ifdef BNX2X_STOP_ON_ERROR
771 if (nbd > (MAX_SKB_FRAGS + 2)) {
34f80b04 772 BNX2X_ERR("BAD nbd!\n");
a2fbb9ea
ET
773 bnx2x_panic();
774 }
775#endif
776
777 /* Skip a parse bd and the TSO split header bd
778 since they have no mapping */
779 if (nbd)
780 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
781
782 if (tx_bd->bd_flags.as_bitfield & (ETH_TX_BD_FLAGS_IP_CSUM |
783 ETH_TX_BD_FLAGS_TCP_CSUM |
784 ETH_TX_BD_FLAGS_SW_LSO)) {
785 if (--nbd)
786 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
787 tx_bd = &fp->tx_desc_ring[bd_idx];
788 /* is this a TSO split header bd? */
789 if (tx_bd->bd_flags.as_bitfield & ETH_TX_BD_FLAGS_SW_LSO) {
790 if (--nbd)
791 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
792 }
793 }
794
795 /* now free frags */
796 while (nbd > 0) {
797
798 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
799 tx_bd = &fp->tx_desc_ring[bd_idx];
800 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_bd),
801 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
802 if (--nbd)
803 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
804 }
805
806 /* release skb */
53e5e96e 807 WARN_ON(!skb);
a2fbb9ea
ET
808 dev_kfree_skb(skb);
809 tx_buf->first_bd = 0;
810 tx_buf->skb = NULL;
811
34f80b04 812 return new_cons;
a2fbb9ea
ET
813}
814
34f80b04 815static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
a2fbb9ea 816{
34f80b04
EG
817 s16 used;
818 u16 prod;
819 u16 cons;
a2fbb9ea 820
34f80b04 821 barrier(); /* Tell compiler that prod and cons can change */
a2fbb9ea
ET
822 prod = fp->tx_bd_prod;
823 cons = fp->tx_bd_cons;
824
34f80b04
EG
825 /* NUM_TX_RINGS = number of "next-page" entries
826 It will be used as a threshold */
827 used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
a2fbb9ea 828
34f80b04 829#ifdef BNX2X_STOP_ON_ERROR
53e5e96e
IJ
830 WARN_ON(used < 0);
831 WARN_ON(used > fp->bp->tx_ring_size);
832 WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
34f80b04 833#endif
a2fbb9ea 834
34f80b04 835 return (s16)(fp->bp->tx_ring_size) - used;
a2fbb9ea
ET
836}
837
838static void bnx2x_tx_int(struct bnx2x_fastpath *fp, int work)
839{
840 struct bnx2x *bp = fp->bp;
841 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
842 int done = 0;
843
844#ifdef BNX2X_STOP_ON_ERROR
845 if (unlikely(bp->panic))
846 return;
847#endif
848
849 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
850 sw_cons = fp->tx_pkt_cons;
851
852 while (sw_cons != hw_cons) {
853 u16 pkt_cons;
854
855 pkt_cons = TX_BD(sw_cons);
856
857 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
858
34f80b04 859 DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %u\n",
a2fbb9ea
ET
860 hw_cons, sw_cons, pkt_cons);
861
34f80b04 862/* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
a2fbb9ea
ET
863 rmb();
864 prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
865 }
866*/
867 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
868 sw_cons++;
869 done++;
870
871 if (done == work)
872 break;
873 }
874
875 fp->tx_pkt_cons = sw_cons;
876 fp->tx_bd_cons = bd_cons;
877
878 /* Need to make the tx_cons update visible to start_xmit()
879 * before checking for netif_queue_stopped(). Without the
880 * memory barrier, there is a small possibility that start_xmit()
881 * will miss it and cause the queue to be stopped forever.
882 */
883 smp_mb();
884
885 /* TBD need a thresh? */
886 if (unlikely(netif_queue_stopped(bp->dev))) {
887
888 netif_tx_lock(bp->dev);
889
890 if (netif_queue_stopped(bp->dev) &&
da5a662a 891 (bp->state == BNX2X_STATE_OPEN) &&
a2fbb9ea
ET
892 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
893 netif_wake_queue(bp->dev);
894
895 netif_tx_unlock(bp->dev);
a2fbb9ea
ET
896 }
897}
898
3196a88a 899
a2fbb9ea
ET
900static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
901 union eth_rx_cqe *rr_cqe)
902{
903 struct bnx2x *bp = fp->bp;
904 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
905 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
906
34f80b04 907 DP(BNX2X_MSG_SP,
a2fbb9ea 908 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
34f80b04
EG
909 FP_IDX(fp), cid, command, bp->state,
910 rr_cqe->ramrod_cqe.ramrod_type);
a2fbb9ea
ET
911
912 bp->spq_left++;
913
34f80b04 914 if (FP_IDX(fp)) {
a2fbb9ea
ET
915 switch (command | fp->state) {
916 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
917 BNX2X_FP_STATE_OPENING):
918 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
919 cid);
920 fp->state = BNX2X_FP_STATE_OPEN;
921 break;
922
923 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
924 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
925 cid);
926 fp->state = BNX2X_FP_STATE_HALTED;
927 break;
928
929 default:
34f80b04
EG
930 BNX2X_ERR("unexpected MC reply (%d) "
931 "fp->state is %x\n", command, fp->state);
932 break;
a2fbb9ea 933 }
34f80b04 934 mb(); /* force bnx2x_wait_ramrod() to see the change */
a2fbb9ea
ET
935 return;
936 }
c14423fe 937
a2fbb9ea
ET
938 switch (command | bp->state) {
939 case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
940 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
941 bp->state = BNX2X_STATE_OPEN;
942 break;
943
944 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
945 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
946 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
947 fp->state = BNX2X_FP_STATE_HALTED;
948 break;
949
a2fbb9ea 950 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
34f80b04 951 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
49d66772 952 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
a2fbb9ea
ET
953 break;
954
3196a88a 955
a2fbb9ea 956 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
34f80b04 957 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
a2fbb9ea 958 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
bb2a0f7a 959 bp->set_mac_pending = 0;
a2fbb9ea
ET
960 break;
961
49d66772 962 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
34f80b04 963 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
49d66772
ET
964 break;
965
a2fbb9ea 966 default:
34f80b04 967 BNX2X_ERR("unexpected MC reply (%d) bp->state is %x\n",
a2fbb9ea 968 command, bp->state);
34f80b04 969 break;
a2fbb9ea 970 }
34f80b04 971 mb(); /* force bnx2x_wait_ramrod() to see the change */
a2fbb9ea
ET
972}
973
7a9b2557
VZ
974static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
975 struct bnx2x_fastpath *fp, u16 index)
976{
977 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
978 struct page *page = sw_buf->page;
979 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
980
981 /* Skip "next page" elements */
982 if (!page)
983 return;
984
985 pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
4f40f2cb 986 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
987 __free_pages(page, PAGES_PER_SGE_SHIFT);
988
989 sw_buf->page = NULL;
990 sge->addr_hi = 0;
991 sge->addr_lo = 0;
992}
993
994static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
995 struct bnx2x_fastpath *fp, int last)
996{
997 int i;
998
999 for (i = 0; i < last; i++)
1000 bnx2x_free_rx_sge(bp, fp, i);
1001}
1002
1003static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
1004 struct bnx2x_fastpath *fp, u16 index)
1005{
1006 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
1007 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1008 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1009 dma_addr_t mapping;
1010
1011 if (unlikely(page == NULL))
1012 return -ENOMEM;
1013
4f40f2cb 1014 mapping = pci_map_page(bp->pdev, page, 0, SGE_PAGE_SIZE*PAGES_PER_SGE,
7a9b2557 1015 PCI_DMA_FROMDEVICE);
8d8bb39b 1016 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
7a9b2557
VZ
1017 __free_pages(page, PAGES_PER_SGE_SHIFT);
1018 return -ENOMEM;
1019 }
1020
1021 sw_buf->page = page;
1022 pci_unmap_addr_set(sw_buf, mapping, mapping);
1023
1024 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1025 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1026
1027 return 0;
1028}
1029
a2fbb9ea
ET
1030static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1031 struct bnx2x_fastpath *fp, u16 index)
1032{
1033 struct sk_buff *skb;
1034 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1035 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1036 dma_addr_t mapping;
1037
1038 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1039 if (unlikely(skb == NULL))
1040 return -ENOMEM;
1041
437cf2f1 1042 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_size,
a2fbb9ea 1043 PCI_DMA_FROMDEVICE);
8d8bb39b 1044 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
a2fbb9ea
ET
1045 dev_kfree_skb(skb);
1046 return -ENOMEM;
1047 }
1048
1049 rx_buf->skb = skb;
1050 pci_unmap_addr_set(rx_buf, mapping, mapping);
1051
1052 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1053 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1054
1055 return 0;
1056}
1057
1058/* note that we are not allocating a new skb,
1059 * we are just moving one from cons to prod
1060 * we are not creating a new mapping,
1061 * so there is no need to check for dma_mapping_error().
1062 */
1063static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1064 struct sk_buff *skb, u16 cons, u16 prod)
1065{
1066 struct bnx2x *bp = fp->bp;
1067 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1068 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1069 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1070 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1071
1072 pci_dma_sync_single_for_device(bp->pdev,
1073 pci_unmap_addr(cons_rx_buf, mapping),
1074 bp->rx_offset + RX_COPY_THRESH,
1075 PCI_DMA_FROMDEVICE);
1076
1077 prod_rx_buf->skb = cons_rx_buf->skb;
1078 pci_unmap_addr_set(prod_rx_buf, mapping,
1079 pci_unmap_addr(cons_rx_buf, mapping));
1080 *prod_bd = *cons_bd;
1081}
1082
7a9b2557
VZ
1083static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1084 u16 idx)
1085{
1086 u16 last_max = fp->last_max_sge;
1087
1088 if (SUB_S16(idx, last_max) > 0)
1089 fp->last_max_sge = idx;
1090}
1091
1092static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1093{
1094 int i, j;
1095
1096 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1097 int idx = RX_SGE_CNT * i - 1;
1098
1099 for (j = 0; j < 2; j++) {
1100 SGE_MASK_CLEAR_BIT(fp, idx);
1101 idx--;
1102 }
1103 }
1104}
1105
1106static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1107 struct eth_fast_path_rx_cqe *fp_cqe)
1108{
1109 struct bnx2x *bp = fp->bp;
4f40f2cb 1110 u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
7a9b2557 1111 le16_to_cpu(fp_cqe->len_on_bd)) >>
4f40f2cb 1112 SGE_PAGE_SHIFT;
7a9b2557
VZ
1113 u16 last_max, last_elem, first_elem;
1114 u16 delta = 0;
1115 u16 i;
1116
1117 if (!sge_len)
1118 return;
1119
1120 /* First mark all used pages */
1121 for (i = 0; i < sge_len; i++)
1122 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1123
1124 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1125 sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1126
1127 /* Here we assume that the last SGE index is the biggest */
1128 prefetch((void *)(fp->sge_mask));
1129 bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1130
1131 last_max = RX_SGE(fp->last_max_sge);
1132 last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1133 first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1134
1135 /* If ring is not full */
1136 if (last_elem + 1 != first_elem)
1137 last_elem++;
1138
1139 /* Now update the prod */
1140 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1141 if (likely(fp->sge_mask[i]))
1142 break;
1143
1144 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1145 delta += RX_SGE_MASK_ELEM_SZ;
1146 }
1147
1148 if (delta > 0) {
1149 fp->rx_sge_prod += delta;
1150 /* clear page-end entries */
1151 bnx2x_clear_sge_mask_next_elems(fp);
1152 }
1153
1154 DP(NETIF_MSG_RX_STATUS,
1155 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
1156 fp->last_max_sge, fp->rx_sge_prod);
1157}
1158
1159static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1160{
1161 /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1162 memset(fp->sge_mask, 0xff,
1163 (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1164
33471629
EG
1165 /* Clear the two last indices in the page to 1:
1166 these are the indices that correspond to the "next" element,
7a9b2557
VZ
1167 hence will never be indicated and should be removed from
1168 the calculations. */
1169 bnx2x_clear_sge_mask_next_elems(fp);
1170}
1171
1172static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1173 struct sk_buff *skb, u16 cons, u16 prod)
1174{
1175 struct bnx2x *bp = fp->bp;
1176 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1177 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1178 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1179 dma_addr_t mapping;
1180
1181 /* move empty skb from pool to prod and map it */
1182 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1183 mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
437cf2f1 1184 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
1185 pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1186
1187 /* move partial skb from cons to pool (don't unmap yet) */
1188 fp->tpa_pool[queue] = *cons_rx_buf;
1189
1190 /* mark bin state as start - print error if current state != stop */
1191 if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1192 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1193
1194 fp->tpa_state[queue] = BNX2X_TPA_START;
1195
1196 /* point prod_bd to new skb */
1197 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1198 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1199
1200#ifdef BNX2X_STOP_ON_ERROR
1201 fp->tpa_queue_used |= (1 << queue);
1202#ifdef __powerpc64__
1203 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1204#else
1205 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1206#endif
1207 fp->tpa_queue_used);
1208#endif
1209}
1210
1211static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1212 struct sk_buff *skb,
1213 struct eth_fast_path_rx_cqe *fp_cqe,
1214 u16 cqe_idx)
1215{
1216 struct sw_rx_page *rx_pg, old_rx_pg;
7a9b2557
VZ
1217 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1218 u32 i, frag_len, frag_size, pages;
1219 int err;
1220 int j;
1221
1222 frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
4f40f2cb 1223 pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
7a9b2557
VZ
1224
1225 /* This is needed in order to enable forwarding support */
1226 if (frag_size)
4f40f2cb 1227 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
7a9b2557
VZ
1228 max(frag_size, (u32)len_on_bd));
1229
1230#ifdef BNX2X_STOP_ON_ERROR
4f40f2cb
EG
1231 if (pages >
1232 min((u32)8, (u32)MAX_SKB_FRAGS) * SGE_PAGE_SIZE * PAGES_PER_SGE) {
7a9b2557
VZ
1233 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1234 pages, cqe_idx);
1235 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
1236 fp_cqe->pkt_len, len_on_bd);
1237 bnx2x_panic();
1238 return -EINVAL;
1239 }
1240#endif
1241
1242 /* Run through the SGL and compose the fragmented skb */
1243 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1244 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1245
1246 /* FW gives the indices of the SGE as if the ring is an array
1247 (meaning that "next" element will consume 2 indices) */
4f40f2cb 1248 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
7a9b2557 1249 rx_pg = &fp->rx_page_ring[sge_idx];
7a9b2557
VZ
1250 old_rx_pg = *rx_pg;
1251
1252 /* If we fail to allocate a substitute page, we simply stop
1253 where we are and drop the whole packet */
1254 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1255 if (unlikely(err)) {
66e855f3 1256 bp->eth_stats.rx_skb_alloc_failed++;
7a9b2557
VZ
1257 return err;
1258 }
1259
1260 /* Unmap the page as we r going to pass it to the stack */
1261 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
4f40f2cb 1262 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
1263
1264 /* Add one frag and update the appropriate fields in the skb */
1265 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1266
1267 skb->data_len += frag_len;
1268 skb->truesize += frag_len;
1269 skb->len += frag_len;
1270
1271 frag_size -= frag_len;
1272 }
1273
1274 return 0;
1275}
1276
1277static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1278 u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1279 u16 cqe_idx)
1280{
1281 struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1282 struct sk_buff *skb = rx_buf->skb;
1283 /* alloc new skb */
1284 struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1285
1286 /* Unmap skb in the pool anyway, as we are going to change
1287 pool entry status to BNX2X_TPA_STOP even if new skb allocation
1288 fails. */
1289 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
437cf2f1 1290 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
7a9b2557 1291
7a9b2557 1292 if (likely(new_skb)) {
66e855f3
YG
1293 /* fix ip xsum and give it to the stack */
1294 /* (no need to map the new skb) */
0c6671b0
EG
1295#ifdef BCM_VLAN
1296 int is_vlan_cqe =
1297 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1298 PARSING_FLAGS_VLAN);
1299 int is_not_hwaccel_vlan_cqe =
1300 (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
1301#endif
7a9b2557
VZ
1302
1303 prefetch(skb);
1304 prefetch(((char *)(skb)) + 128);
1305
7a9b2557
VZ
1306#ifdef BNX2X_STOP_ON_ERROR
1307 if (pad + len > bp->rx_buf_size) {
1308 BNX2X_ERR("skb_put is about to fail... "
1309 "pad %d len %d rx_buf_size %d\n",
1310 pad, len, bp->rx_buf_size);
1311 bnx2x_panic();
1312 return;
1313 }
1314#endif
1315
1316 skb_reserve(skb, pad);
1317 skb_put(skb, len);
1318
1319 skb->protocol = eth_type_trans(skb, bp->dev);
1320 skb->ip_summed = CHECKSUM_UNNECESSARY;
1321
1322 {
1323 struct iphdr *iph;
1324
1325 iph = (struct iphdr *)skb->data;
0c6671b0
EG
1326#ifdef BCM_VLAN
1327 /* If there is no Rx VLAN offloading -
1328 take VLAN tag into an account */
1329 if (unlikely(is_not_hwaccel_vlan_cqe))
1330 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
1331#endif
7a9b2557
VZ
1332 iph->check = 0;
1333 iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1334 }
1335
1336 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1337 &cqe->fast_path_cqe, cqe_idx)) {
1338#ifdef BCM_VLAN
0c6671b0
EG
1339 if ((bp->vlgrp != NULL) && is_vlan_cqe &&
1340 (!is_not_hwaccel_vlan_cqe))
7a9b2557
VZ
1341 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1342 le16_to_cpu(cqe->fast_path_cqe.
1343 vlan_tag));
1344 else
1345#endif
1346 netif_receive_skb(skb);
1347 } else {
1348 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1349 " - dropping packet!\n");
1350 dev_kfree_skb(skb);
1351 }
1352
7a9b2557
VZ
1353
1354 /* put new skb in bin */
1355 fp->tpa_pool[queue].skb = new_skb;
1356
1357 } else {
66e855f3 1358 /* else drop the packet and keep the buffer in the bin */
7a9b2557
VZ
1359 DP(NETIF_MSG_RX_STATUS,
1360 "Failed to allocate new skb - dropping packet!\n");
66e855f3 1361 bp->eth_stats.rx_skb_alloc_failed++;
7a9b2557
VZ
1362 }
1363
1364 fp->tpa_state[queue] = BNX2X_TPA_STOP;
1365}
1366
1367static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1368 struct bnx2x_fastpath *fp,
1369 u16 bd_prod, u16 rx_comp_prod,
1370 u16 rx_sge_prod)
1371{
1372 struct tstorm_eth_rx_producers rx_prods = {0};
1373 int i;
1374
1375 /* Update producers */
1376 rx_prods.bd_prod = bd_prod;
1377 rx_prods.cqe_prod = rx_comp_prod;
1378 rx_prods.sge_prod = rx_sge_prod;
1379
58f4c4cf
EG
1380 /*
1381 * Make sure that the BD and SGE data is updated before updating the
1382 * producers since FW might read the BD/SGE right after the producer
1383 * is updated.
1384 * This is only applicable for weak-ordered memory model archs such
1385 * as IA-64. The following barrier is also mandatory since FW will
1386 * assumes BDs must have buffers.
1387 */
1388 wmb();
1389
7a9b2557
VZ
1390 for (i = 0; i < sizeof(struct tstorm_eth_rx_producers)/4; i++)
1391 REG_WR(bp, BAR_TSTRORM_INTMEM +
1392 TSTORM_RX_PRODS_OFFSET(BP_PORT(bp), FP_CL_ID(fp)) + i*4,
1393 ((u32 *)&rx_prods)[i]);
1394
58f4c4cf
EG
1395 mmiowb(); /* keep prod updates ordered */
1396
7a9b2557
VZ
1397 DP(NETIF_MSG_RX_STATUS,
1398 "Wrote: bd_prod %u cqe_prod %u sge_prod %u\n",
1399 bd_prod, rx_comp_prod, rx_sge_prod);
1400}
1401
a2fbb9ea
ET
1402static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1403{
1404 struct bnx2x *bp = fp->bp;
34f80b04 1405 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
a2fbb9ea
ET
1406 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1407 int rx_pkt = 0;
1408
1409#ifdef BNX2X_STOP_ON_ERROR
1410 if (unlikely(bp->panic))
1411 return 0;
1412#endif
1413
34f80b04
EG
1414 /* CQ "next element" is of the size of the regular element,
1415 that's why it's ok here */
a2fbb9ea
ET
1416 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1417 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1418 hw_comp_cons++;
1419
1420 bd_cons = fp->rx_bd_cons;
1421 bd_prod = fp->rx_bd_prod;
34f80b04 1422 bd_prod_fw = bd_prod;
a2fbb9ea
ET
1423 sw_comp_cons = fp->rx_comp_cons;
1424 sw_comp_prod = fp->rx_comp_prod;
1425
1426 /* Memory barrier necessary as speculative reads of the rx
1427 * buffer can be ahead of the index in the status block
1428 */
1429 rmb();
1430
1431 DP(NETIF_MSG_RX_STATUS,
1432 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
34f80b04 1433 FP_IDX(fp), hw_comp_cons, sw_comp_cons);
a2fbb9ea
ET
1434
1435 while (sw_comp_cons != hw_comp_cons) {
34f80b04 1436 struct sw_rx_bd *rx_buf = NULL;
a2fbb9ea
ET
1437 struct sk_buff *skb;
1438 union eth_rx_cqe *cqe;
34f80b04
EG
1439 u8 cqe_fp_flags;
1440 u16 len, pad;
a2fbb9ea
ET
1441
1442 comp_ring_cons = RCQ_BD(sw_comp_cons);
1443 bd_prod = RX_BD(bd_prod);
1444 bd_cons = RX_BD(bd_cons);
1445
1446 cqe = &fp->rx_comp_ring[comp_ring_cons];
34f80b04 1447 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
a2fbb9ea 1448
a2fbb9ea 1449 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
34f80b04
EG
1450 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
1451 cqe_fp_flags, cqe->fast_path_cqe.status_flags,
68d59484 1452 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
34f80b04
EG
1453 le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1454 le16_to_cpu(cqe->fast_path_cqe.pkt_len));
a2fbb9ea
ET
1455
1456 /* is this a slowpath msg? */
34f80b04 1457 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
a2fbb9ea
ET
1458 bnx2x_sp_event(fp, cqe);
1459 goto next_cqe;
1460
1461 /* this is an rx packet */
1462 } else {
1463 rx_buf = &fp->rx_buf_ring[bd_cons];
1464 skb = rx_buf->skb;
a2fbb9ea
ET
1465 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1466 pad = cqe->fast_path_cqe.placement_offset;
1467
7a9b2557
VZ
1468 /* If CQE is marked both TPA_START and TPA_END
1469 it is a non-TPA CQE */
1470 if ((!fp->disable_tpa) &&
1471 (TPA_TYPE(cqe_fp_flags) !=
1472 (TPA_TYPE_START | TPA_TYPE_END))) {
3196a88a 1473 u16 queue = cqe->fast_path_cqe.queue_index;
7a9b2557
VZ
1474
1475 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1476 DP(NETIF_MSG_RX_STATUS,
1477 "calling tpa_start on queue %d\n",
1478 queue);
1479
1480 bnx2x_tpa_start(fp, queue, skb,
1481 bd_cons, bd_prod);
1482 goto next_rx;
1483 }
1484
1485 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1486 DP(NETIF_MSG_RX_STATUS,
1487 "calling tpa_stop on queue %d\n",
1488 queue);
1489
1490 if (!BNX2X_RX_SUM_FIX(cqe))
1491 BNX2X_ERR("STOP on none TCP "
1492 "data\n");
1493
1494 /* This is a size of the linear data
1495 on this skb */
1496 len = le16_to_cpu(cqe->fast_path_cqe.
1497 len_on_bd);
1498 bnx2x_tpa_stop(bp, fp, queue, pad,
1499 len, cqe, comp_ring_cons);
1500#ifdef BNX2X_STOP_ON_ERROR
1501 if (bp->panic)
1502 return -EINVAL;
1503#endif
1504
1505 bnx2x_update_sge_prod(fp,
1506 &cqe->fast_path_cqe);
1507 goto next_cqe;
1508 }
1509 }
1510
a2fbb9ea
ET
1511 pci_dma_sync_single_for_device(bp->pdev,
1512 pci_unmap_addr(rx_buf, mapping),
1513 pad + RX_COPY_THRESH,
1514 PCI_DMA_FROMDEVICE);
1515 prefetch(skb);
1516 prefetch(((char *)(skb)) + 128);
1517
1518 /* is this an error packet? */
34f80b04 1519 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
a2fbb9ea 1520 DP(NETIF_MSG_RX_ERR,
34f80b04
EG
1521 "ERROR flags %x rx packet %u\n",
1522 cqe_fp_flags, sw_comp_cons);
66e855f3 1523 bp->eth_stats.rx_err_discard_pkt++;
a2fbb9ea
ET
1524 goto reuse_rx;
1525 }
1526
1527 /* Since we don't have a jumbo ring
1528 * copy small packets if mtu > 1500
1529 */
1530 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1531 (len <= RX_COPY_THRESH)) {
1532 struct sk_buff *new_skb;
1533
1534 new_skb = netdev_alloc_skb(bp->dev,
1535 len + pad);
1536 if (new_skb == NULL) {
1537 DP(NETIF_MSG_RX_ERR,
34f80b04 1538 "ERROR packet dropped "
a2fbb9ea 1539 "because of alloc failure\n");
66e855f3 1540 bp->eth_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
1541 goto reuse_rx;
1542 }
1543
1544 /* aligned copy */
1545 skb_copy_from_linear_data_offset(skb, pad,
1546 new_skb->data + pad, len);
1547 skb_reserve(new_skb, pad);
1548 skb_put(new_skb, len);
1549
1550 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1551
1552 skb = new_skb;
1553
1554 } else if (bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0) {
1555 pci_unmap_single(bp->pdev,
1556 pci_unmap_addr(rx_buf, mapping),
437cf2f1 1557 bp->rx_buf_size,
a2fbb9ea
ET
1558 PCI_DMA_FROMDEVICE);
1559 skb_reserve(skb, pad);
1560 skb_put(skb, len);
1561
1562 } else {
1563 DP(NETIF_MSG_RX_ERR,
34f80b04 1564 "ERROR packet dropped because "
a2fbb9ea 1565 "of alloc failure\n");
66e855f3 1566 bp->eth_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
1567reuse_rx:
1568 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1569 goto next_rx;
1570 }
1571
1572 skb->protocol = eth_type_trans(skb, bp->dev);
1573
1574 skb->ip_summed = CHECKSUM_NONE;
66e855f3 1575 if (bp->rx_csum) {
1adcd8be
EG
1576 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1577 skb->ip_summed = CHECKSUM_UNNECESSARY;
66e855f3
YG
1578 else
1579 bp->eth_stats.hw_csum_err++;
1580 }
a2fbb9ea
ET
1581 }
1582
1583#ifdef BCM_VLAN
0c6671b0 1584 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
34f80b04
EG
1585 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1586 PARSING_FLAGS_VLAN))
a2fbb9ea
ET
1587 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1588 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1589 else
1590#endif
34f80b04 1591 netif_receive_skb(skb);
a2fbb9ea 1592
a2fbb9ea
ET
1593
1594next_rx:
1595 rx_buf->skb = NULL;
1596
1597 bd_cons = NEXT_RX_IDX(bd_cons);
1598 bd_prod = NEXT_RX_IDX(bd_prod);
34f80b04
EG
1599 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1600 rx_pkt++;
a2fbb9ea
ET
1601next_cqe:
1602 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1603 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
a2fbb9ea 1604
34f80b04 1605 if (rx_pkt == budget)
a2fbb9ea
ET
1606 break;
1607 } /* while */
1608
1609 fp->rx_bd_cons = bd_cons;
34f80b04 1610 fp->rx_bd_prod = bd_prod_fw;
a2fbb9ea
ET
1611 fp->rx_comp_cons = sw_comp_cons;
1612 fp->rx_comp_prod = sw_comp_prod;
1613
7a9b2557
VZ
1614 /* Update producers */
1615 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1616 fp->rx_sge_prod);
a2fbb9ea
ET
1617
1618 fp->rx_pkt += rx_pkt;
1619 fp->rx_calls++;
1620
1621 return rx_pkt;
1622}
1623
1624static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1625{
1626 struct bnx2x_fastpath *fp = fp_cookie;
1627 struct bnx2x *bp = fp->bp;
34f80b04 1628 int index = FP_IDX(fp);
a2fbb9ea 1629
da5a662a
VZ
1630 /* Return here if interrupt is disabled */
1631 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1632 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1633 return IRQ_HANDLED;
1634 }
1635
34f80b04
EG
1636 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
1637 index, FP_SB_ID(fp));
1638 bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID, 0, IGU_INT_DISABLE, 0);
a2fbb9ea
ET
1639
1640#ifdef BNX2X_STOP_ON_ERROR
1641 if (unlikely(bp->panic))
1642 return IRQ_HANDLED;
1643#endif
1644
1645 prefetch(fp->rx_cons_sb);
1646 prefetch(fp->tx_cons_sb);
1647 prefetch(&fp->status_blk->c_status_block.status_block_index);
1648 prefetch(&fp->status_blk->u_status_block.status_block_index);
1649
908a7a16 1650 netif_rx_schedule(&bnx2x_fp(bp, index, napi));
34f80b04 1651
a2fbb9ea
ET
1652 return IRQ_HANDLED;
1653}
1654
1655static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1656{
1657 struct net_device *dev = dev_instance;
1658 struct bnx2x *bp = netdev_priv(dev);
1659 u16 status = bnx2x_ack_int(bp);
34f80b04 1660 u16 mask;
a2fbb9ea 1661
34f80b04 1662 /* Return here if interrupt is shared and it's not for us */
a2fbb9ea
ET
1663 if (unlikely(status == 0)) {
1664 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1665 return IRQ_NONE;
1666 }
34f80b04 1667 DP(NETIF_MSG_INTR, "got an interrupt status %u\n", status);
a2fbb9ea 1668
34f80b04 1669 /* Return here if interrupt is disabled */
a2fbb9ea
ET
1670 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1671 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1672 return IRQ_HANDLED;
1673 }
1674
3196a88a
EG
1675#ifdef BNX2X_STOP_ON_ERROR
1676 if (unlikely(bp->panic))
1677 return IRQ_HANDLED;
1678#endif
1679
34f80b04
EG
1680 mask = 0x2 << bp->fp[0].sb_id;
1681 if (status & mask) {
a2fbb9ea
ET
1682 struct bnx2x_fastpath *fp = &bp->fp[0];
1683
1684 prefetch(fp->rx_cons_sb);
1685 prefetch(fp->tx_cons_sb);
1686 prefetch(&fp->status_blk->c_status_block.status_block_index);
1687 prefetch(&fp->status_blk->u_status_block.status_block_index);
1688
908a7a16 1689 netif_rx_schedule(&bnx2x_fp(bp, 0, napi));
a2fbb9ea 1690
34f80b04 1691 status &= ~mask;
a2fbb9ea
ET
1692 }
1693
a2fbb9ea 1694
34f80b04 1695 if (unlikely(status & 0x1)) {
1cf167f2 1696 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
a2fbb9ea
ET
1697
1698 status &= ~0x1;
1699 if (!status)
1700 return IRQ_HANDLED;
1701 }
1702
34f80b04
EG
1703 if (status)
1704 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1705 status);
a2fbb9ea 1706
c18487ee 1707 return IRQ_HANDLED;
a2fbb9ea
ET
1708}
1709
c18487ee 1710/* end of fast path */
a2fbb9ea 1711
bb2a0f7a 1712static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
a2fbb9ea 1713
c18487ee
YR
1714/* Link */
1715
1716/*
1717 * General service functions
1718 */
a2fbb9ea 1719
4a37fb66 1720static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
c18487ee
YR
1721{
1722 u32 lock_status;
1723 u32 resource_bit = (1 << resource);
4a37fb66
YG
1724 int func = BP_FUNC(bp);
1725 u32 hw_lock_control_reg;
c18487ee 1726 int cnt;
a2fbb9ea 1727
c18487ee
YR
1728 /* Validating that the resource is within range */
1729 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1730 DP(NETIF_MSG_HW,
1731 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1732 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1733 return -EINVAL;
1734 }
a2fbb9ea 1735
4a37fb66
YG
1736 if (func <= 5) {
1737 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1738 } else {
1739 hw_lock_control_reg =
1740 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1741 }
1742
c18487ee 1743 /* Validating that the resource is not already taken */
4a37fb66 1744 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1745 if (lock_status & resource_bit) {
1746 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1747 lock_status, resource_bit);
1748 return -EEXIST;
1749 }
a2fbb9ea 1750
46230476
EG
1751 /* Try for 5 second every 5ms */
1752 for (cnt = 0; cnt < 1000; cnt++) {
c18487ee 1753 /* Try to acquire the lock */
4a37fb66
YG
1754 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1755 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1756 if (lock_status & resource_bit)
1757 return 0;
a2fbb9ea 1758
c18487ee 1759 msleep(5);
a2fbb9ea 1760 }
c18487ee
YR
1761 DP(NETIF_MSG_HW, "Timeout\n");
1762 return -EAGAIN;
1763}
a2fbb9ea 1764
4a37fb66 1765static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
c18487ee
YR
1766{
1767 u32 lock_status;
1768 u32 resource_bit = (1 << resource);
4a37fb66
YG
1769 int func = BP_FUNC(bp);
1770 u32 hw_lock_control_reg;
a2fbb9ea 1771
c18487ee
YR
1772 /* Validating that the resource is within range */
1773 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1774 DP(NETIF_MSG_HW,
1775 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1776 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1777 return -EINVAL;
1778 }
1779
4a37fb66
YG
1780 if (func <= 5) {
1781 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1782 } else {
1783 hw_lock_control_reg =
1784 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1785 }
1786
c18487ee 1787 /* Validating that the resource is currently taken */
4a37fb66 1788 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1789 if (!(lock_status & resource_bit)) {
1790 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1791 lock_status, resource_bit);
1792 return -EFAULT;
a2fbb9ea
ET
1793 }
1794
4a37fb66 1795 REG_WR(bp, hw_lock_control_reg, resource_bit);
c18487ee
YR
1796 return 0;
1797}
1798
1799/* HW Lock for shared dual port PHYs */
4a37fb66 1800static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
c18487ee
YR
1801{
1802 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea 1803
34f80b04 1804 mutex_lock(&bp->port.phy_mutex);
a2fbb9ea 1805
c18487ee
YR
1806 if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) ||
1807 (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))
4a37fb66 1808 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
c18487ee 1809}
a2fbb9ea 1810
4a37fb66 1811static void bnx2x_release_phy_lock(struct bnx2x *bp)
c18487ee
YR
1812{
1813 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea 1814
c18487ee
YR
1815 if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) ||
1816 (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))
4a37fb66 1817 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
a2fbb9ea 1818
34f80b04 1819 mutex_unlock(&bp->port.phy_mutex);
c18487ee 1820}
a2fbb9ea 1821
17de50b7 1822int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
c18487ee
YR
1823{
1824 /* The GPIO should be swapped if swap register is set and active */
1825 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
17de50b7 1826 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
c18487ee
YR
1827 int gpio_shift = gpio_num +
1828 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1829 u32 gpio_mask = (1 << gpio_shift);
1830 u32 gpio_reg;
a2fbb9ea 1831
c18487ee
YR
1832 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1833 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1834 return -EINVAL;
1835 }
a2fbb9ea 1836
4a37fb66 1837 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
c18487ee
YR
1838 /* read GPIO and mask except the float bits */
1839 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
a2fbb9ea 1840
c18487ee
YR
1841 switch (mode) {
1842 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1843 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1844 gpio_num, gpio_shift);
1845 /* clear FLOAT and set CLR */
1846 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1847 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1848 break;
a2fbb9ea 1849
c18487ee
YR
1850 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1851 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1852 gpio_num, gpio_shift);
1853 /* clear FLOAT and set SET */
1854 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1855 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1856 break;
a2fbb9ea 1857
17de50b7 1858 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
c18487ee
YR
1859 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1860 gpio_num, gpio_shift);
1861 /* set FLOAT */
1862 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1863 break;
a2fbb9ea 1864
c18487ee
YR
1865 default:
1866 break;
a2fbb9ea
ET
1867 }
1868
c18487ee 1869 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
4a37fb66 1870 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
f1410647 1871
c18487ee 1872 return 0;
a2fbb9ea
ET
1873}
1874
c18487ee 1875static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
a2fbb9ea 1876{
c18487ee
YR
1877 u32 spio_mask = (1 << spio_num);
1878 u32 spio_reg;
a2fbb9ea 1879
c18487ee
YR
1880 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
1881 (spio_num > MISC_REGISTERS_SPIO_7)) {
1882 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
1883 return -EINVAL;
a2fbb9ea
ET
1884 }
1885
4a37fb66 1886 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
c18487ee
YR
1887 /* read SPIO and mask except the float bits */
1888 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
a2fbb9ea 1889
c18487ee 1890 switch (mode) {
6378c025 1891 case MISC_REGISTERS_SPIO_OUTPUT_LOW:
c18487ee
YR
1892 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
1893 /* clear FLOAT and set CLR */
1894 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1895 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
1896 break;
a2fbb9ea 1897
6378c025 1898 case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
c18487ee
YR
1899 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
1900 /* clear FLOAT and set SET */
1901 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1902 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
1903 break;
a2fbb9ea 1904
c18487ee
YR
1905 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
1906 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
1907 /* set FLOAT */
1908 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1909 break;
a2fbb9ea 1910
c18487ee
YR
1911 default:
1912 break;
a2fbb9ea
ET
1913 }
1914
c18487ee 1915 REG_WR(bp, MISC_REG_SPIO, spio_reg);
4a37fb66 1916 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
c18487ee 1917
a2fbb9ea
ET
1918 return 0;
1919}
1920
c18487ee 1921static void bnx2x_calc_fc_adv(struct bnx2x *bp)
a2fbb9ea 1922{
ad33ea3a
EG
1923 switch (bp->link_vars.ieee_fc &
1924 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
c18487ee 1925 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
34f80b04 1926 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
c18487ee
YR
1927 ADVERTISED_Pause);
1928 break;
1929 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
34f80b04 1930 bp->port.advertising |= (ADVERTISED_Asym_Pause |
c18487ee
YR
1931 ADVERTISED_Pause);
1932 break;
1933 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
34f80b04 1934 bp->port.advertising |= ADVERTISED_Asym_Pause;
c18487ee
YR
1935 break;
1936 default:
34f80b04 1937 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
c18487ee
YR
1938 ADVERTISED_Pause);
1939 break;
1940 }
1941}
f1410647 1942
c18487ee
YR
1943static void bnx2x_link_report(struct bnx2x *bp)
1944{
1945 if (bp->link_vars.link_up) {
1946 if (bp->state == BNX2X_STATE_OPEN)
1947 netif_carrier_on(bp->dev);
1948 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
f1410647 1949
c18487ee 1950 printk("%d Mbps ", bp->link_vars.line_speed);
f1410647 1951
c18487ee
YR
1952 if (bp->link_vars.duplex == DUPLEX_FULL)
1953 printk("full duplex");
1954 else
1955 printk("half duplex");
f1410647 1956
c0700f90
DM
1957 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
1958 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
c18487ee 1959 printk(", receive ");
c0700f90 1960 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
c18487ee
YR
1961 printk("& transmit ");
1962 } else {
1963 printk(", transmit ");
1964 }
1965 printk("flow control ON");
1966 }
1967 printk("\n");
f1410647 1968
c18487ee
YR
1969 } else { /* link_down */
1970 netif_carrier_off(bp->dev);
1971 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
f1410647 1972 }
c18487ee
YR
1973}
1974
1975static u8 bnx2x_initial_phy_init(struct bnx2x *bp)
1976{
19680c48
EG
1977 if (!BP_NOMCP(bp)) {
1978 u8 rc;
a2fbb9ea 1979
19680c48 1980 /* Initialize link parameters structure variables */
8c99e7b0
YR
1981 /* It is recommended to turn off RX FC for jumbo frames
1982 for better performance */
1983 if (IS_E1HMF(bp))
c0700f90 1984 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
8c99e7b0 1985 else if (bp->dev->mtu > 5000)
c0700f90 1986 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
8c99e7b0 1987 else
c0700f90 1988 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
a2fbb9ea 1989
4a37fb66 1990 bnx2x_acquire_phy_lock(bp);
19680c48 1991 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
4a37fb66 1992 bnx2x_release_phy_lock(bp);
a2fbb9ea 1993
3c96c68b
EG
1994 bnx2x_calc_fc_adv(bp);
1995
19680c48
EG
1996 if (bp->link_vars.link_up)
1997 bnx2x_link_report(bp);
a2fbb9ea 1998
34f80b04 1999
19680c48
EG
2000 return rc;
2001 }
2002 BNX2X_ERR("Bootcode is missing -not initializing link\n");
2003 return -EINVAL;
a2fbb9ea
ET
2004}
2005
c18487ee 2006static void bnx2x_link_set(struct bnx2x *bp)
a2fbb9ea 2007{
19680c48 2008 if (!BP_NOMCP(bp)) {
4a37fb66 2009 bnx2x_acquire_phy_lock(bp);
19680c48 2010 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
4a37fb66 2011 bnx2x_release_phy_lock(bp);
a2fbb9ea 2012
19680c48
EG
2013 bnx2x_calc_fc_adv(bp);
2014 } else
2015 BNX2X_ERR("Bootcode is missing -not setting link\n");
c18487ee 2016}
a2fbb9ea 2017
c18487ee
YR
2018static void bnx2x__link_reset(struct bnx2x *bp)
2019{
19680c48 2020 if (!BP_NOMCP(bp)) {
4a37fb66 2021 bnx2x_acquire_phy_lock(bp);
19680c48 2022 bnx2x_link_reset(&bp->link_params, &bp->link_vars);
4a37fb66 2023 bnx2x_release_phy_lock(bp);
19680c48
EG
2024 } else
2025 BNX2X_ERR("Bootcode is missing -not resetting link\n");
c18487ee 2026}
a2fbb9ea 2027
c18487ee
YR
2028static u8 bnx2x_link_test(struct bnx2x *bp)
2029{
2030 u8 rc;
a2fbb9ea 2031
4a37fb66 2032 bnx2x_acquire_phy_lock(bp);
c18487ee 2033 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
4a37fb66 2034 bnx2x_release_phy_lock(bp);
a2fbb9ea 2035
c18487ee
YR
2036 return rc;
2037}
a2fbb9ea 2038
34f80b04
EG
2039/* Calculates the sum of vn_min_rates.
2040 It's needed for further normalizing of the min_rates.
2041
2042 Returns:
2043 sum of vn_min_rates
2044 or
2045 0 - if all the min_rates are 0.
33471629 2046 In the later case fairness algorithm should be deactivated.
34f80b04
EG
2047 If not all min_rates are zero then those that are zeroes will
2048 be set to 1.
2049 */
2050static u32 bnx2x_calc_vn_wsum(struct bnx2x *bp)
2051{
2052 int i, port = BP_PORT(bp);
2053 u32 wsum = 0;
2054 int all_zero = 1;
2055
2056 for (i = 0; i < E1HVN_MAX; i++) {
2057 u32 vn_cfg =
2058 SHMEM_RD(bp, mf_cfg.func_mf_config[2*i + port].config);
2059 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2060 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2061 if (!(vn_cfg & FUNC_MF_CFG_FUNC_HIDE)) {
2062 /* If min rate is zero - set it to 1 */
2063 if (!vn_min_rate)
2064 vn_min_rate = DEF_MIN_RATE;
2065 else
2066 all_zero = 0;
2067
2068 wsum += vn_min_rate;
2069 }
2070 }
2071
2072 /* ... only if all min rates are zeros - disable FAIRNESS */
2073 if (all_zero)
2074 return 0;
2075
2076 return wsum;
2077}
2078
2079static void bnx2x_init_port_minmax(struct bnx2x *bp,
2080 int en_fness,
2081 u16 port_rate,
2082 struct cmng_struct_per_port *m_cmng_port)
2083{
2084 u32 r_param = port_rate / 8;
2085 int port = BP_PORT(bp);
2086 int i;
2087
2088 memset(m_cmng_port, 0, sizeof(struct cmng_struct_per_port));
2089
2090 /* Enable minmax only if we are in e1hmf mode */
2091 if (IS_E1HMF(bp)) {
2092 u32 fair_periodic_timeout_usec;
2093 u32 t_fair;
2094
2095 /* Enable rate shaping and fairness */
2096 m_cmng_port->flags.cmng_vn_enable = 1;
2097 m_cmng_port->flags.fairness_enable = en_fness ? 1 : 0;
2098 m_cmng_port->flags.rate_shaping_enable = 1;
2099
2100 if (!en_fness)
2101 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
2102 " fairness will be disabled\n");
2103
2104 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2105 m_cmng_port->rs_vars.rs_periodic_timeout =
2106 RS_PERIODIC_TIMEOUT_USEC / 4;
2107
2108 /* this is the threshold below which no timer arming will occur
2109 1.25 coefficient is for the threshold to be a little bigger
2110 than the real time, to compensate for timer in-accuracy */
2111 m_cmng_port->rs_vars.rs_threshold =
2112 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2113
2114 /* resolution of fairness timer */
2115 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2116 /* for 10G it is 1000usec. for 1G it is 10000usec. */
2117 t_fair = T_FAIR_COEF / port_rate;
2118
2119 /* this is the threshold below which we won't arm
2120 the timer anymore */
2121 m_cmng_port->fair_vars.fair_threshold = QM_ARB_BYTES;
2122
2123 /* we multiply by 1e3/8 to get bytes/msec.
2124 We don't want the credits to pass a credit
2125 of the T_FAIR*FAIR_MEM (algorithm resolution) */
2126 m_cmng_port->fair_vars.upper_bound =
2127 r_param * t_fair * FAIR_MEM;
2128 /* since each tick is 4 usec */
2129 m_cmng_port->fair_vars.fairness_timeout =
2130 fair_periodic_timeout_usec / 4;
2131
2132 } else {
2133 /* Disable rate shaping and fairness */
2134 m_cmng_port->flags.cmng_vn_enable = 0;
2135 m_cmng_port->flags.fairness_enable = 0;
2136 m_cmng_port->flags.rate_shaping_enable = 0;
2137
2138 DP(NETIF_MSG_IFUP,
2139 "Single function mode minmax will be disabled\n");
2140 }
2141
2142 /* Store it to internal memory */
2143 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
2144 REG_WR(bp, BAR_XSTRORM_INTMEM +
2145 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
2146 ((u32 *)(m_cmng_port))[i]);
2147}
2148
2149static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func,
2150 u32 wsum, u16 port_rate,
2151 struct cmng_struct_per_port *m_cmng_port)
2152{
2153 struct rate_shaping_vars_per_vn m_rs_vn;
2154 struct fairness_vars_per_vn m_fair_vn;
2155 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2156 u16 vn_min_rate, vn_max_rate;
2157 int i;
2158
2159 /* If function is hidden - set min and max to zeroes */
2160 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2161 vn_min_rate = 0;
2162 vn_max_rate = 0;
2163
2164 } else {
2165 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2166 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2167 /* If FAIRNESS is enabled (not all min rates are zeroes) and
2168 if current min rate is zero - set it to 1.
33471629 2169 This is a requirement of the algorithm. */
34f80b04
EG
2170 if ((vn_min_rate == 0) && wsum)
2171 vn_min_rate = DEF_MIN_RATE;
2172 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2173 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2174 }
2175
2176 DP(NETIF_MSG_IFUP, "func %d: vn_min_rate=%d vn_max_rate=%d "
2177 "wsum=%d\n", func, vn_min_rate, vn_max_rate, wsum);
2178
2179 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2180 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2181
2182 /* global vn counter - maximal Mbps for this vn */
2183 m_rs_vn.vn_counter.rate = vn_max_rate;
2184
2185 /* quota - number of bytes transmitted in this period */
2186 m_rs_vn.vn_counter.quota =
2187 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2188
2189#ifdef BNX2X_PER_PROT_QOS
2190 /* per protocol counter */
2191 for (protocol = 0; protocol < NUM_OF_PROTOCOLS; protocol++) {
2192 /* maximal Mbps for this protocol */
2193 m_rs_vn.protocol_counters[protocol].rate =
2194 protocol_max_rate[protocol];
2195 /* the quota in each timer period -
2196 number of bytes transmitted in this period */
2197 m_rs_vn.protocol_counters[protocol].quota =
2198 (u32)(rs_periodic_timeout_usec *
2199 ((double)m_rs_vn.
2200 protocol_counters[protocol].rate/8));
2201 }
2202#endif
2203
2204 if (wsum) {
2205 /* credit for each period of the fairness algorithm:
2206 number of bytes in T_FAIR (the vn share the port rate).
2207 wsum should not be larger than 10000, thus
2208 T_FAIR_COEF / (8 * wsum) will always be grater than zero */
2209 m_fair_vn.vn_credit_delta =
2210 max((u64)(vn_min_rate * (T_FAIR_COEF / (8 * wsum))),
2211 (u64)(m_cmng_port->fair_vars.fair_threshold * 2));
2212 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2213 m_fair_vn.vn_credit_delta);
2214 }
2215
2216#ifdef BNX2X_PER_PROT_QOS
2217 do {
2218 u32 protocolWeightSum = 0;
2219
2220 for (protocol = 0; protocol < NUM_OF_PROTOCOLS; protocol++)
2221 protocolWeightSum +=
2222 drvInit.protocol_min_rate[protocol];
2223 /* per protocol counter -
2224 NOT NEEDED IF NO PER-PROTOCOL CONGESTION MANAGEMENT */
2225 if (protocolWeightSum > 0) {
2226 for (protocol = 0;
2227 protocol < NUM_OF_PROTOCOLS; protocol++)
2228 /* credit for each period of the
2229 fairness algorithm - number of bytes in
2230 T_FAIR (the protocol share the vn rate) */
2231 m_fair_vn.protocol_credit_delta[protocol] =
2232 (u32)((vn_min_rate / 8) * t_fair *
2233 protocol_min_rate / protocolWeightSum);
2234 }
2235 } while (0);
2236#endif
2237
2238 /* Store it to internal memory */
2239 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2240 REG_WR(bp, BAR_XSTRORM_INTMEM +
2241 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2242 ((u32 *)(&m_rs_vn))[i]);
2243
2244 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2245 REG_WR(bp, BAR_XSTRORM_INTMEM +
2246 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2247 ((u32 *)(&m_fair_vn))[i]);
2248}
2249
c18487ee
YR
2250/* This function is called upon link interrupt */
2251static void bnx2x_link_attn(struct bnx2x *bp)
2252{
34f80b04
EG
2253 int vn;
2254
bb2a0f7a
YG
2255 /* Make sure that we are synced with the current statistics */
2256 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2257
c18487ee 2258 bnx2x_link_update(&bp->link_params, &bp->link_vars);
a2fbb9ea 2259
bb2a0f7a
YG
2260 if (bp->link_vars.link_up) {
2261
2262 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2263 struct host_port_stats *pstats;
2264
2265 pstats = bnx2x_sp(bp, port_stats);
2266 /* reset old bmac stats */
2267 memset(&(pstats->mac_stx[0]), 0,
2268 sizeof(struct mac_stx));
2269 }
2270 if ((bp->state == BNX2X_STATE_OPEN) ||
2271 (bp->state == BNX2X_STATE_DISABLED))
2272 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2273 }
2274
c18487ee
YR
2275 /* indicate link status */
2276 bnx2x_link_report(bp);
34f80b04
EG
2277
2278 if (IS_E1HMF(bp)) {
2279 int func;
2280
2281 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2282 if (vn == BP_E1HVN(bp))
2283 continue;
2284
2285 func = ((vn << 1) | BP_PORT(bp));
2286
2287 /* Set the attention towards other drivers
2288 on the same port */
2289 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2290 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2291 }
2292 }
2293
2294 if (CHIP_IS_E1H(bp) && (bp->link_vars.line_speed > 0)) {
2295 struct cmng_struct_per_port m_cmng_port;
2296 u32 wsum;
2297 int port = BP_PORT(bp);
2298
2299 /* Init RATE SHAPING and FAIRNESS contexts */
2300 wsum = bnx2x_calc_vn_wsum(bp);
2301 bnx2x_init_port_minmax(bp, (int)wsum,
2302 bp->link_vars.line_speed,
2303 &m_cmng_port);
2304 if (IS_E1HMF(bp))
2305 for (vn = VN_0; vn < E1HVN_MAX; vn++)
2306 bnx2x_init_vn_minmax(bp, 2*vn + port,
2307 wsum, bp->link_vars.line_speed,
2308 &m_cmng_port);
2309 }
c18487ee 2310}
a2fbb9ea 2311
c18487ee
YR
2312static void bnx2x__link_status_update(struct bnx2x *bp)
2313{
2314 if (bp->state != BNX2X_STATE_OPEN)
2315 return;
a2fbb9ea 2316
c18487ee 2317 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
a2fbb9ea 2318
bb2a0f7a
YG
2319 if (bp->link_vars.link_up)
2320 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2321 else
2322 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2323
c18487ee
YR
2324 /* indicate link status */
2325 bnx2x_link_report(bp);
a2fbb9ea 2326}
a2fbb9ea 2327
34f80b04
EG
2328static void bnx2x_pmf_update(struct bnx2x *bp)
2329{
2330 int port = BP_PORT(bp);
2331 u32 val;
2332
2333 bp->port.pmf = 1;
2334 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2335
2336 /* enable nig attention */
2337 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2338 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2339 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
bb2a0f7a
YG
2340
2341 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
34f80b04
EG
2342}
2343
c18487ee 2344/* end of Link */
a2fbb9ea
ET
2345
2346/* slow path */
2347
2348/*
2349 * General service functions
2350 */
2351
2352/* the slow path queue is odd since completions arrive on the fastpath ring */
2353static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2354 u32 data_hi, u32 data_lo, int common)
2355{
34f80b04 2356 int func = BP_FUNC(bp);
a2fbb9ea 2357
34f80b04
EG
2358 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2359 "SPQE (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
a2fbb9ea
ET
2360 (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2361 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2362 HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2363
2364#ifdef BNX2X_STOP_ON_ERROR
2365 if (unlikely(bp->panic))
2366 return -EIO;
2367#endif
2368
34f80b04 2369 spin_lock_bh(&bp->spq_lock);
a2fbb9ea
ET
2370
2371 if (!bp->spq_left) {
2372 BNX2X_ERR("BUG! SPQ ring full!\n");
34f80b04 2373 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
2374 bnx2x_panic();
2375 return -EBUSY;
2376 }
f1410647 2377
a2fbb9ea
ET
2378 /* CID needs port number to be encoded int it */
2379 bp->spq_prod_bd->hdr.conn_and_cmd_data =
2380 cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2381 HW_CID(bp, cid)));
2382 bp->spq_prod_bd->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2383 if (common)
2384 bp->spq_prod_bd->hdr.type |=
2385 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2386
2387 bp->spq_prod_bd->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2388 bp->spq_prod_bd->data.mac_config_addr.lo = cpu_to_le32(data_lo);
2389
2390 bp->spq_left--;
2391
2392 if (bp->spq_prod_bd == bp->spq_last_bd) {
2393 bp->spq_prod_bd = bp->spq;
2394 bp->spq_prod_idx = 0;
2395 DP(NETIF_MSG_TIMER, "end of spq\n");
2396
2397 } else {
2398 bp->spq_prod_bd++;
2399 bp->spq_prod_idx++;
2400 }
2401
34f80b04 2402 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
a2fbb9ea
ET
2403 bp->spq_prod_idx);
2404
34f80b04 2405 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
2406 return 0;
2407}
2408
2409/* acquire split MCP access lock register */
4a37fb66 2410static int bnx2x_acquire_alr(struct bnx2x *bp)
a2fbb9ea 2411{
a2fbb9ea 2412 u32 i, j, val;
34f80b04 2413 int rc = 0;
a2fbb9ea
ET
2414
2415 might_sleep();
2416 i = 100;
2417 for (j = 0; j < i*10; j++) {
2418 val = (1UL << 31);
2419 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2420 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2421 if (val & (1L << 31))
2422 break;
2423
2424 msleep(5);
2425 }
a2fbb9ea 2426 if (!(val & (1L << 31))) {
19680c48 2427 BNX2X_ERR("Cannot acquire MCP access lock register\n");
a2fbb9ea
ET
2428 rc = -EBUSY;
2429 }
2430
2431 return rc;
2432}
2433
4a37fb66
YG
2434/* release split MCP access lock register */
2435static void bnx2x_release_alr(struct bnx2x *bp)
a2fbb9ea
ET
2436{
2437 u32 val = 0;
2438
2439 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2440}
2441
2442static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2443{
2444 struct host_def_status_block *def_sb = bp->def_status_blk;
2445 u16 rc = 0;
2446
2447 barrier(); /* status block is written to by the chip */
a2fbb9ea
ET
2448 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2449 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2450 rc |= 1;
2451 }
2452 if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2453 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2454 rc |= 2;
2455 }
2456 if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2457 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2458 rc |= 4;
2459 }
2460 if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2461 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2462 rc |= 8;
2463 }
2464 if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2465 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2466 rc |= 16;
2467 }
2468 return rc;
2469}
2470
2471/*
2472 * slow path service functions
2473 */
2474
2475static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2476{
34f80b04 2477 int port = BP_PORT(bp);
5c862848
EG
2478 u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2479 COMMAND_REG_ATTN_BITS_SET);
a2fbb9ea
ET
2480 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2481 MISC_REG_AEU_MASK_ATTN_FUNC_0;
877e9aa4
ET
2482 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2483 NIG_REG_MASK_INTERRUPT_PORT0;
3fcaf2e5 2484 u32 aeu_mask;
a2fbb9ea 2485
a2fbb9ea
ET
2486 if (bp->attn_state & asserted)
2487 BNX2X_ERR("IGU ERROR\n");
2488
3fcaf2e5
EG
2489 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2490 aeu_mask = REG_RD(bp, aeu_addr);
2491
a2fbb9ea 2492 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
3fcaf2e5
EG
2493 aeu_mask, asserted);
2494 aeu_mask &= ~(asserted & 0xff);
2495 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
a2fbb9ea 2496
3fcaf2e5
EG
2497 REG_WR(bp, aeu_addr, aeu_mask);
2498 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
a2fbb9ea 2499
3fcaf2e5 2500 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
a2fbb9ea 2501 bp->attn_state |= asserted;
3fcaf2e5 2502 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
a2fbb9ea
ET
2503
2504 if (asserted & ATTN_HARD_WIRED_MASK) {
2505 if (asserted & ATTN_NIG_FOR_FUNC) {
a2fbb9ea 2506
a5e9a7cf
EG
2507 bnx2x_acquire_phy_lock(bp);
2508
877e9aa4
ET
2509 /* save nig interrupt mask */
2510 bp->nig_mask = REG_RD(bp, nig_int_mask_addr);
2511 REG_WR(bp, nig_int_mask_addr, 0);
a2fbb9ea 2512
c18487ee 2513 bnx2x_link_attn(bp);
a2fbb9ea
ET
2514
2515 /* handle unicore attn? */
2516 }
2517 if (asserted & ATTN_SW_TIMER_4_FUNC)
2518 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2519
2520 if (asserted & GPIO_2_FUNC)
2521 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2522
2523 if (asserted & GPIO_3_FUNC)
2524 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2525
2526 if (asserted & GPIO_4_FUNC)
2527 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2528
2529 if (port == 0) {
2530 if (asserted & ATTN_GENERAL_ATTN_1) {
2531 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2532 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2533 }
2534 if (asserted & ATTN_GENERAL_ATTN_2) {
2535 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2536 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2537 }
2538 if (asserted & ATTN_GENERAL_ATTN_3) {
2539 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2540 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2541 }
2542 } else {
2543 if (asserted & ATTN_GENERAL_ATTN_4) {
2544 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2545 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2546 }
2547 if (asserted & ATTN_GENERAL_ATTN_5) {
2548 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2549 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2550 }
2551 if (asserted & ATTN_GENERAL_ATTN_6) {
2552 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2553 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2554 }
2555 }
2556
2557 } /* if hardwired */
2558
5c862848
EG
2559 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2560 asserted, hc_addr);
2561 REG_WR(bp, hc_addr, asserted);
a2fbb9ea
ET
2562
2563 /* now set back the mask */
a5e9a7cf 2564 if (asserted & ATTN_NIG_FOR_FUNC) {
877e9aa4 2565 REG_WR(bp, nig_int_mask_addr, bp->nig_mask);
a5e9a7cf
EG
2566 bnx2x_release_phy_lock(bp);
2567 }
a2fbb9ea
ET
2568}
2569
877e9aa4 2570static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
a2fbb9ea 2571{
34f80b04 2572 int port = BP_PORT(bp);
877e9aa4
ET
2573 int reg_offset;
2574 u32 val;
2575
34f80b04
EG
2576 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2577 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
877e9aa4 2578
34f80b04 2579 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
877e9aa4
ET
2580
2581 val = REG_RD(bp, reg_offset);
2582 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2583 REG_WR(bp, reg_offset, val);
2584
2585 BNX2X_ERR("SPIO5 hw attention\n");
2586
34f80b04 2587 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
7add905f 2588 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1021G:
877e9aa4
ET
2589 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
2590 /* Fan failure attention */
2591
17de50b7 2592 /* The PHY reset is controlled by GPIO 1 */
877e9aa4 2593 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
17de50b7
EG
2594 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2595 /* Low power mode is controlled by GPIO 2 */
877e9aa4 2596 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
17de50b7 2597 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
877e9aa4 2598 /* mark the failure */
c18487ee 2599 bp->link_params.ext_phy_config &=
877e9aa4 2600 ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
c18487ee 2601 bp->link_params.ext_phy_config |=
877e9aa4
ET
2602 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2603 SHMEM_WR(bp,
2604 dev_info.port_hw_config[port].
2605 external_phy_config,
c18487ee 2606 bp->link_params.ext_phy_config);
877e9aa4
ET
2607 /* log the failure */
2608 printk(KERN_ERR PFX "Fan Failure on Network"
2609 " Controller %s has caused the driver to"
2610 " shutdown the card to prevent permanent"
2611 " damage. Please contact Dell Support for"
2612 " assistance\n", bp->dev->name);
2613 break;
2614
2615 default:
2616 break;
2617 }
2618 }
34f80b04
EG
2619
2620 if (attn & HW_INTERRUT_ASSERT_SET_0) {
2621
2622 val = REG_RD(bp, reg_offset);
2623 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2624 REG_WR(bp, reg_offset, val);
2625
2626 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2627 (attn & HW_INTERRUT_ASSERT_SET_0));
2628 bnx2x_panic();
2629 }
877e9aa4
ET
2630}
2631
2632static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2633{
2634 u32 val;
2635
2636 if (attn & BNX2X_DOORQ_ASSERT) {
2637
2638 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2639 BNX2X_ERR("DB hw attention 0x%x\n", val);
2640 /* DORQ discard attention */
2641 if (val & 0x2)
2642 BNX2X_ERR("FATAL error from DORQ\n");
2643 }
34f80b04
EG
2644
2645 if (attn & HW_INTERRUT_ASSERT_SET_1) {
2646
2647 int port = BP_PORT(bp);
2648 int reg_offset;
2649
2650 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2651 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2652
2653 val = REG_RD(bp, reg_offset);
2654 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2655 REG_WR(bp, reg_offset, val);
2656
2657 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2658 (attn & HW_INTERRUT_ASSERT_SET_1));
2659 bnx2x_panic();
2660 }
877e9aa4
ET
2661}
2662
2663static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2664{
2665 u32 val;
2666
2667 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2668
2669 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2670 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2671 /* CFC error attention */
2672 if (val & 0x2)
2673 BNX2X_ERR("FATAL error from CFC\n");
2674 }
2675
2676 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
2677
2678 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
2679 BNX2X_ERR("PXP hw attention 0x%x\n", val);
2680 /* RQ_USDMDP_FIFO_OVERFLOW */
2681 if (val & 0x18000)
2682 BNX2X_ERR("FATAL error from PXP\n");
2683 }
34f80b04
EG
2684
2685 if (attn & HW_INTERRUT_ASSERT_SET_2) {
2686
2687 int port = BP_PORT(bp);
2688 int reg_offset;
2689
2690 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
2691 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
2692
2693 val = REG_RD(bp, reg_offset);
2694 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
2695 REG_WR(bp, reg_offset, val);
2696
2697 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
2698 (attn & HW_INTERRUT_ASSERT_SET_2));
2699 bnx2x_panic();
2700 }
877e9aa4
ET
2701}
2702
2703static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
2704{
34f80b04
EG
2705 u32 val;
2706
877e9aa4
ET
2707 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
2708
34f80b04
EG
2709 if (attn & BNX2X_PMF_LINK_ASSERT) {
2710 int func = BP_FUNC(bp);
2711
2712 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
2713 bnx2x__link_status_update(bp);
2714 if (SHMEM_RD(bp, func_mb[func].drv_status) &
2715 DRV_STATUS_PMF)
2716 bnx2x_pmf_update(bp);
2717
2718 } else if (attn & BNX2X_MC_ASSERT_BITS) {
877e9aa4
ET
2719
2720 BNX2X_ERR("MC assert!\n");
2721 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
2722 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
2723 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
2724 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
2725 bnx2x_panic();
2726
2727 } else if (attn & BNX2X_MCP_ASSERT) {
2728
2729 BNX2X_ERR("MCP assert!\n");
2730 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
34f80b04 2731 bnx2x_fw_dump(bp);
877e9aa4
ET
2732
2733 } else
2734 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
2735 }
2736
2737 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
34f80b04
EG
2738 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
2739 if (attn & BNX2X_GRC_TIMEOUT) {
2740 val = CHIP_IS_E1H(bp) ?
2741 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
2742 BNX2X_ERR("GRC time-out 0x%08x\n", val);
2743 }
2744 if (attn & BNX2X_GRC_RSV) {
2745 val = CHIP_IS_E1H(bp) ?
2746 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
2747 BNX2X_ERR("GRC reserved 0x%08x\n", val);
2748 }
877e9aa4 2749 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
877e9aa4
ET
2750 }
2751}
2752
2753static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
2754{
a2fbb9ea
ET
2755 struct attn_route attn;
2756 struct attn_route group_mask;
34f80b04 2757 int port = BP_PORT(bp);
877e9aa4 2758 int index;
a2fbb9ea
ET
2759 u32 reg_addr;
2760 u32 val;
3fcaf2e5 2761 u32 aeu_mask;
a2fbb9ea
ET
2762
2763 /* need to take HW lock because MCP or other port might also
2764 try to handle this event */
4a37fb66 2765 bnx2x_acquire_alr(bp);
a2fbb9ea
ET
2766
2767 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
2768 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
2769 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
2770 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
34f80b04
EG
2771 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
2772 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
a2fbb9ea
ET
2773
2774 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
2775 if (deasserted & (1 << index)) {
2776 group_mask = bp->attn_group[index];
2777
34f80b04
EG
2778 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
2779 index, group_mask.sig[0], group_mask.sig[1],
2780 group_mask.sig[2], group_mask.sig[3]);
a2fbb9ea 2781
877e9aa4
ET
2782 bnx2x_attn_int_deasserted3(bp,
2783 attn.sig[3] & group_mask.sig[3]);
2784 bnx2x_attn_int_deasserted1(bp,
2785 attn.sig[1] & group_mask.sig[1]);
2786 bnx2x_attn_int_deasserted2(bp,
2787 attn.sig[2] & group_mask.sig[2]);
2788 bnx2x_attn_int_deasserted0(bp,
2789 attn.sig[0] & group_mask.sig[0]);
a2fbb9ea 2790
a2fbb9ea
ET
2791 if ((attn.sig[0] & group_mask.sig[0] &
2792 HW_PRTY_ASSERT_SET_0) ||
2793 (attn.sig[1] & group_mask.sig[1] &
2794 HW_PRTY_ASSERT_SET_1) ||
2795 (attn.sig[2] & group_mask.sig[2] &
2796 HW_PRTY_ASSERT_SET_2))
6378c025 2797 BNX2X_ERR("FATAL HW block parity attention\n");
a2fbb9ea
ET
2798 }
2799 }
2800
4a37fb66 2801 bnx2x_release_alr(bp);
a2fbb9ea 2802
5c862848 2803 reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
a2fbb9ea
ET
2804
2805 val = ~deasserted;
3fcaf2e5
EG
2806 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2807 val, reg_addr);
5c862848 2808 REG_WR(bp, reg_addr, val);
a2fbb9ea 2809
a2fbb9ea 2810 if (~bp->attn_state & deasserted)
3fcaf2e5 2811 BNX2X_ERR("IGU ERROR\n");
a2fbb9ea
ET
2812
2813 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2814 MISC_REG_AEU_MASK_ATTN_FUNC_0;
2815
3fcaf2e5
EG
2816 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2817 aeu_mask = REG_RD(bp, reg_addr);
2818
2819 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
2820 aeu_mask, deasserted);
2821 aeu_mask |= (deasserted & 0xff);
2822 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
a2fbb9ea 2823
3fcaf2e5
EG
2824 REG_WR(bp, reg_addr, aeu_mask);
2825 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
a2fbb9ea
ET
2826
2827 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2828 bp->attn_state &= ~deasserted;
2829 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2830}
2831
2832static void bnx2x_attn_int(struct bnx2x *bp)
2833{
2834 /* read local copy of bits */
68d59484
EG
2835 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
2836 attn_bits);
2837 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
2838 attn_bits_ack);
a2fbb9ea
ET
2839 u32 attn_state = bp->attn_state;
2840
2841 /* look for changed bits */
2842 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
2843 u32 deasserted = ~attn_bits & attn_ack & attn_state;
2844
2845 DP(NETIF_MSG_HW,
2846 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
2847 attn_bits, attn_ack, asserted, deasserted);
2848
2849 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
34f80b04 2850 BNX2X_ERR("BAD attention state\n");
a2fbb9ea
ET
2851
2852 /* handle bits that were raised */
2853 if (asserted)
2854 bnx2x_attn_int_asserted(bp, asserted);
2855
2856 if (deasserted)
2857 bnx2x_attn_int_deasserted(bp, deasserted);
2858}
2859
2860static void bnx2x_sp_task(struct work_struct *work)
2861{
1cf167f2 2862 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
a2fbb9ea
ET
2863 u16 status;
2864
34f80b04 2865
a2fbb9ea
ET
2866 /* Return here if interrupt is disabled */
2867 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3196a88a 2868 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
2869 return;
2870 }
2871
2872 status = bnx2x_update_dsb_idx(bp);
34f80b04
EG
2873/* if (status == 0) */
2874/* BNX2X_ERR("spurious slowpath interrupt!\n"); */
a2fbb9ea 2875
3196a88a 2876 DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
a2fbb9ea 2877
877e9aa4
ET
2878 /* HW attentions */
2879 if (status & 0x1)
a2fbb9ea 2880 bnx2x_attn_int(bp);
a2fbb9ea 2881
bb2a0f7a
YG
2882 /* CStorm events: query_stats, port delete ramrod */
2883 if (status & 0x2)
2884 bp->stats_pending = 0;
2885
68d59484 2886 bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
a2fbb9ea
ET
2887 IGU_INT_NOP, 1);
2888 bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
2889 IGU_INT_NOP, 1);
2890 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
2891 IGU_INT_NOP, 1);
2892 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
2893 IGU_INT_NOP, 1);
2894 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
2895 IGU_INT_ENABLE, 1);
877e9aa4 2896
a2fbb9ea
ET
2897}
2898
2899static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
2900{
2901 struct net_device *dev = dev_instance;
2902 struct bnx2x *bp = netdev_priv(dev);
2903
2904 /* Return here if interrupt is disabled */
2905 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3196a88a 2906 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
2907 return IRQ_HANDLED;
2908 }
2909
877e9aa4 2910 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, 0, IGU_INT_DISABLE, 0);
a2fbb9ea
ET
2911
2912#ifdef BNX2X_STOP_ON_ERROR
2913 if (unlikely(bp->panic))
2914 return IRQ_HANDLED;
2915#endif
2916
1cf167f2 2917 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
a2fbb9ea
ET
2918
2919 return IRQ_HANDLED;
2920}
2921
2922/* end of slow path */
2923
2924/* Statistics */
2925
2926/****************************************************************************
2927* Macros
2928****************************************************************************/
2929
a2fbb9ea
ET
2930/* sum[hi:lo] += add[hi:lo] */
2931#define ADD_64(s_hi, a_hi, s_lo, a_lo) \
2932 do { \
2933 s_lo += a_lo; \
f5ba6772 2934 s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
a2fbb9ea
ET
2935 } while (0)
2936
2937/* difference = minuend - subtrahend */
2938#define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
2939 do { \
bb2a0f7a
YG
2940 if (m_lo < s_lo) { \
2941 /* underflow */ \
a2fbb9ea 2942 d_hi = m_hi - s_hi; \
bb2a0f7a 2943 if (d_hi > 0) { \
6378c025 2944 /* we can 'loan' 1 */ \
a2fbb9ea
ET
2945 d_hi--; \
2946 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
bb2a0f7a 2947 } else { \
6378c025 2948 /* m_hi <= s_hi */ \
a2fbb9ea
ET
2949 d_hi = 0; \
2950 d_lo = 0; \
2951 } \
bb2a0f7a
YG
2952 } else { \
2953 /* m_lo >= s_lo */ \
a2fbb9ea 2954 if (m_hi < s_hi) { \
bb2a0f7a
YG
2955 d_hi = 0; \
2956 d_lo = 0; \
2957 } else { \
6378c025 2958 /* m_hi >= s_hi */ \
bb2a0f7a
YG
2959 d_hi = m_hi - s_hi; \
2960 d_lo = m_lo - s_lo; \
a2fbb9ea
ET
2961 } \
2962 } \
2963 } while (0)
2964
bb2a0f7a 2965#define UPDATE_STAT64(s, t) \
a2fbb9ea 2966 do { \
bb2a0f7a
YG
2967 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
2968 diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
2969 pstats->mac_stx[0].t##_hi = new->s##_hi; \
2970 pstats->mac_stx[0].t##_lo = new->s##_lo; \
2971 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
2972 pstats->mac_stx[1].t##_lo, diff.lo); \
a2fbb9ea
ET
2973 } while (0)
2974
bb2a0f7a 2975#define UPDATE_STAT64_NIG(s, t) \
a2fbb9ea 2976 do { \
bb2a0f7a
YG
2977 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
2978 diff.lo, new->s##_lo, old->s##_lo); \
2979 ADD_64(estats->t##_hi, diff.hi, \
2980 estats->t##_lo, diff.lo); \
a2fbb9ea
ET
2981 } while (0)
2982
2983/* sum[hi:lo] += add */
2984#define ADD_EXTEND_64(s_hi, s_lo, a) \
2985 do { \
2986 s_lo += a; \
2987 s_hi += (s_lo < a) ? 1 : 0; \
2988 } while (0)
2989
bb2a0f7a 2990#define UPDATE_EXTEND_STAT(s) \
a2fbb9ea 2991 do { \
bb2a0f7a
YG
2992 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
2993 pstats->mac_stx[1].s##_lo, \
2994 new->s); \
a2fbb9ea
ET
2995 } while (0)
2996
bb2a0f7a 2997#define UPDATE_EXTEND_TSTAT(s, t) \
a2fbb9ea
ET
2998 do { \
2999 diff = le32_to_cpu(tclient->s) - old_tclient->s; \
3000 old_tclient->s = le32_to_cpu(tclient->s); \
bb2a0f7a
YG
3001 ADD_EXTEND_64(fstats->t##_hi, fstats->t##_lo, diff); \
3002 } while (0)
3003
3004#define UPDATE_EXTEND_XSTAT(s, t) \
3005 do { \
3006 diff = le32_to_cpu(xclient->s) - old_xclient->s; \
3007 old_xclient->s = le32_to_cpu(xclient->s); \
3008 ADD_EXTEND_64(fstats->t##_hi, fstats->t##_lo, diff); \
a2fbb9ea
ET
3009 } while (0)
3010
3011/*
3012 * General service functions
3013 */
3014
3015static inline long bnx2x_hilo(u32 *hiref)
3016{
3017 u32 lo = *(hiref + 1);
3018#if (BITS_PER_LONG == 64)
3019 u32 hi = *hiref;
3020
3021 return HILO_U64(hi, lo);
3022#else
3023 return lo;
3024#endif
3025}
3026
3027/*
3028 * Init service functions
3029 */
3030
bb2a0f7a
YG
3031static void bnx2x_storm_stats_post(struct bnx2x *bp)
3032{
3033 if (!bp->stats_pending) {
3034 struct eth_query_ramrod_data ramrod_data = {0};
3035 int rc;
3036
3037 ramrod_data.drv_counter = bp->stats_counter++;
3038 ramrod_data.collect_port_1b = bp->port.pmf ? 1 : 0;
3039 ramrod_data.ctr_id_vector = (1 << BP_CL_ID(bp));
3040
3041 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3042 ((u32 *)&ramrod_data)[1],
3043 ((u32 *)&ramrod_data)[0], 0);
3044 if (rc == 0) {
3045 /* stats ramrod has it's own slot on the spq */
3046 bp->spq_left++;
3047 bp->stats_pending = 1;
3048 }
3049 }
3050}
3051
3052static void bnx2x_stats_init(struct bnx2x *bp)
3053{
3054 int port = BP_PORT(bp);
3055
3056 bp->executer_idx = 0;
3057 bp->stats_counter = 0;
3058
3059 /* port stats */
3060 if (!BP_NOMCP(bp))
3061 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
3062 else
3063 bp->port.port_stx = 0;
3064 DP(BNX2X_MSG_STATS, "port_stx 0x%x\n", bp->port.port_stx);
3065
3066 memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
3067 bp->port.old_nig_stats.brb_discard =
3068 REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
66e855f3
YG
3069 bp->port.old_nig_stats.brb_truncate =
3070 REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
bb2a0f7a
YG
3071 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
3072 &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
3073 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
3074 &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
3075
3076 /* function stats */
3077 memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
3078 memset(&bp->old_tclient, 0, sizeof(struct tstorm_per_client_stats));
3079 memset(&bp->old_xclient, 0, sizeof(struct xstorm_per_client_stats));
3080 memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
3081
3082 bp->stats_state = STATS_STATE_DISABLED;
3083 if (IS_E1HMF(bp) && bp->port.pmf && bp->port.port_stx)
3084 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
3085}
3086
3087static void bnx2x_hw_stats_post(struct bnx2x *bp)
3088{
3089 struct dmae_command *dmae = &bp->stats_dmae;
3090 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3091
3092 *stats_comp = DMAE_COMP_VAL;
3093
3094 /* loader */
3095 if (bp->executer_idx) {
3096 int loader_idx = PMF_DMAE_C(bp);
3097
3098 memset(dmae, 0, sizeof(struct dmae_command));
3099
3100 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3101 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3102 DMAE_CMD_DST_RESET |
3103#ifdef __BIG_ENDIAN
3104 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3105#else
3106 DMAE_CMD_ENDIANITY_DW_SWAP |
3107#endif
3108 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3109 DMAE_CMD_PORT_0) |
3110 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3111 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3112 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3113 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3114 sizeof(struct dmae_command) *
3115 (loader_idx + 1)) >> 2;
3116 dmae->dst_addr_hi = 0;
3117 dmae->len = sizeof(struct dmae_command) >> 2;
3118 if (CHIP_IS_E1(bp))
3119 dmae->len--;
3120 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3121 dmae->comp_addr_hi = 0;
3122 dmae->comp_val = 1;
3123
3124 *stats_comp = 0;
3125 bnx2x_post_dmae(bp, dmae, loader_idx);
3126
3127 } else if (bp->func_stx) {
3128 *stats_comp = 0;
3129 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3130 }
3131}
3132
3133static int bnx2x_stats_comp(struct bnx2x *bp)
3134{
3135 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3136 int cnt = 10;
3137
3138 might_sleep();
3139 while (*stats_comp != DMAE_COMP_VAL) {
bb2a0f7a
YG
3140 if (!cnt) {
3141 BNX2X_ERR("timeout waiting for stats finished\n");
3142 break;
3143 }
3144 cnt--;
12469401 3145 msleep(1);
bb2a0f7a
YG
3146 }
3147 return 1;
3148}
3149
3150/*
3151 * Statistics service functions
3152 */
3153
3154static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3155{
3156 struct dmae_command *dmae;
3157 u32 opcode;
3158 int loader_idx = PMF_DMAE_C(bp);
3159 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3160
3161 /* sanity */
3162 if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3163 BNX2X_ERR("BUG!\n");
3164 return;
3165 }
3166
3167 bp->executer_idx = 0;
3168
3169 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3170 DMAE_CMD_C_ENABLE |
3171 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3172#ifdef __BIG_ENDIAN
3173 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3174#else
3175 DMAE_CMD_ENDIANITY_DW_SWAP |
3176#endif
3177 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3178 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3179
3180 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3181 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3182 dmae->src_addr_lo = bp->port.port_stx >> 2;
3183 dmae->src_addr_hi = 0;
3184 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3185 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3186 dmae->len = DMAE_LEN32_RD_MAX;
3187 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3188 dmae->comp_addr_hi = 0;
3189 dmae->comp_val = 1;
3190
3191 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3192 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3193 dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3194 dmae->src_addr_hi = 0;
7a9b2557
VZ
3195 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3196 DMAE_LEN32_RD_MAX * 4);
3197 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3198 DMAE_LEN32_RD_MAX * 4);
bb2a0f7a
YG
3199 dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3200 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3201 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3202 dmae->comp_val = DMAE_COMP_VAL;
3203
3204 *stats_comp = 0;
3205 bnx2x_hw_stats_post(bp);
3206 bnx2x_stats_comp(bp);
3207}
3208
3209static void bnx2x_port_stats_init(struct bnx2x *bp)
a2fbb9ea
ET
3210{
3211 struct dmae_command *dmae;
34f80b04 3212 int port = BP_PORT(bp);
bb2a0f7a 3213 int vn = BP_E1HVN(bp);
a2fbb9ea 3214 u32 opcode;
bb2a0f7a 3215 int loader_idx = PMF_DMAE_C(bp);
a2fbb9ea 3216 u32 mac_addr;
bb2a0f7a
YG
3217 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3218
3219 /* sanity */
3220 if (!bp->link_vars.link_up || !bp->port.pmf) {
3221 BNX2X_ERR("BUG!\n");
3222 return;
3223 }
a2fbb9ea
ET
3224
3225 bp->executer_idx = 0;
bb2a0f7a
YG
3226
3227 /* MCP */
3228 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3229 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3230 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 3231#ifdef __BIG_ENDIAN
bb2a0f7a 3232 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 3233#else
bb2a0f7a 3234 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 3235#endif
bb2a0f7a
YG
3236 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3237 (vn << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea 3238
bb2a0f7a 3239 if (bp->port.port_stx) {
a2fbb9ea
ET
3240
3241 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3242 dmae->opcode = opcode;
bb2a0f7a
YG
3243 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3244 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3245 dmae->dst_addr_lo = bp->port.port_stx >> 2;
a2fbb9ea 3246 dmae->dst_addr_hi = 0;
bb2a0f7a
YG
3247 dmae->len = sizeof(struct host_port_stats) >> 2;
3248 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3249 dmae->comp_addr_hi = 0;
3250 dmae->comp_val = 1;
a2fbb9ea
ET
3251 }
3252
bb2a0f7a
YG
3253 if (bp->func_stx) {
3254
3255 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3256 dmae->opcode = opcode;
3257 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3258 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3259 dmae->dst_addr_lo = bp->func_stx >> 2;
3260 dmae->dst_addr_hi = 0;
3261 dmae->len = sizeof(struct host_func_stats) >> 2;
3262 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3263 dmae->comp_addr_hi = 0;
3264 dmae->comp_val = 1;
a2fbb9ea
ET
3265 }
3266
bb2a0f7a 3267 /* MAC */
a2fbb9ea
ET
3268 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3269 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3270 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3271#ifdef __BIG_ENDIAN
3272 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3273#else
3274 DMAE_CMD_ENDIANITY_DW_SWAP |
3275#endif
bb2a0f7a
YG
3276 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3277 (vn << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea 3278
c18487ee 3279 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
a2fbb9ea
ET
3280
3281 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3282 NIG_REG_INGRESS_BMAC0_MEM);
3283
3284 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3285 BIGMAC_REGISTER_TX_STAT_GTBYT */
3286 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3287 dmae->opcode = opcode;
3288 dmae->src_addr_lo = (mac_addr +
3289 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3290 dmae->src_addr_hi = 0;
3291 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3292 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3293 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3294 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3295 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3296 dmae->comp_addr_hi = 0;
3297 dmae->comp_val = 1;
3298
3299 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3300 BIGMAC_REGISTER_RX_STAT_GRIPJ */
3301 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3302 dmae->opcode = opcode;
3303 dmae->src_addr_lo = (mac_addr +
3304 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3305 dmae->src_addr_hi = 0;
3306 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3307 offsetof(struct bmac_stats, rx_stat_gr64_lo));
a2fbb9ea 3308 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3309 offsetof(struct bmac_stats, rx_stat_gr64_lo));
a2fbb9ea
ET
3310 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3311 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3312 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3313 dmae->comp_addr_hi = 0;
3314 dmae->comp_val = 1;
3315
c18487ee 3316 } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
a2fbb9ea
ET
3317
3318 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3319
3320 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3321 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3322 dmae->opcode = opcode;
3323 dmae->src_addr_lo = (mac_addr +
3324 EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3325 dmae->src_addr_hi = 0;
3326 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3327 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3328 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3329 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3330 dmae->comp_addr_hi = 0;
3331 dmae->comp_val = 1;
3332
3333 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3334 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3335 dmae->opcode = opcode;
3336 dmae->src_addr_lo = (mac_addr +
3337 EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3338 dmae->src_addr_hi = 0;
3339 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3340 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
a2fbb9ea 3341 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3342 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
a2fbb9ea
ET
3343 dmae->len = 1;
3344 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3345 dmae->comp_addr_hi = 0;
3346 dmae->comp_val = 1;
3347
3348 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3349 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3350 dmae->opcode = opcode;
3351 dmae->src_addr_lo = (mac_addr +
3352 EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3353 dmae->src_addr_hi = 0;
3354 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3355 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
a2fbb9ea 3356 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3357 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
a2fbb9ea
ET
3358 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3359 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3360 dmae->comp_addr_hi = 0;
3361 dmae->comp_val = 1;
3362 }
3363
3364 /* NIG */
bb2a0f7a
YG
3365 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3366 dmae->opcode = opcode;
3367 dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3368 NIG_REG_STAT0_BRB_DISCARD) >> 2;
3369 dmae->src_addr_hi = 0;
3370 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3371 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3372 dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3373 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3374 dmae->comp_addr_hi = 0;
3375 dmae->comp_val = 1;
3376
3377 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3378 dmae->opcode = opcode;
3379 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3380 NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3381 dmae->src_addr_hi = 0;
3382 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3383 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3384 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3385 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3386 dmae->len = (2*sizeof(u32)) >> 2;
3387 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3388 dmae->comp_addr_hi = 0;
3389 dmae->comp_val = 1;
3390
a2fbb9ea
ET
3391 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3392 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3393 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3394 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3395#ifdef __BIG_ENDIAN
3396 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3397#else
3398 DMAE_CMD_ENDIANITY_DW_SWAP |
3399#endif
bb2a0f7a
YG
3400 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3401 (vn << DMAE_CMD_E1HVN_SHIFT));
3402 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3403 NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
a2fbb9ea 3404 dmae->src_addr_hi = 0;
bb2a0f7a
YG
3405 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3406 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3407 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3408 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3409 dmae->len = (2*sizeof(u32)) >> 2;
3410 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3411 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3412 dmae->comp_val = DMAE_COMP_VAL;
3413
3414 *stats_comp = 0;
a2fbb9ea
ET
3415}
3416
bb2a0f7a 3417static void bnx2x_func_stats_init(struct bnx2x *bp)
a2fbb9ea 3418{
bb2a0f7a
YG
3419 struct dmae_command *dmae = &bp->stats_dmae;
3420 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 3421
bb2a0f7a
YG
3422 /* sanity */
3423 if (!bp->func_stx) {
3424 BNX2X_ERR("BUG!\n");
3425 return;
3426 }
a2fbb9ea 3427
bb2a0f7a
YG
3428 bp->executer_idx = 0;
3429 memset(dmae, 0, sizeof(struct dmae_command));
a2fbb9ea 3430
bb2a0f7a
YG
3431 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3432 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3433 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3434#ifdef __BIG_ENDIAN
3435 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3436#else
3437 DMAE_CMD_ENDIANITY_DW_SWAP |
3438#endif
3439 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3440 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3441 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3442 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3443 dmae->dst_addr_lo = bp->func_stx >> 2;
3444 dmae->dst_addr_hi = 0;
3445 dmae->len = sizeof(struct host_func_stats) >> 2;
3446 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3447 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3448 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 3449
bb2a0f7a
YG
3450 *stats_comp = 0;
3451}
a2fbb9ea 3452
bb2a0f7a
YG
3453static void bnx2x_stats_start(struct bnx2x *bp)
3454{
3455 if (bp->port.pmf)
3456 bnx2x_port_stats_init(bp);
3457
3458 else if (bp->func_stx)
3459 bnx2x_func_stats_init(bp);
3460
3461 bnx2x_hw_stats_post(bp);
3462 bnx2x_storm_stats_post(bp);
3463}
3464
3465static void bnx2x_stats_pmf_start(struct bnx2x *bp)
3466{
3467 bnx2x_stats_comp(bp);
3468 bnx2x_stats_pmf_update(bp);
3469 bnx2x_stats_start(bp);
3470}
3471
3472static void bnx2x_stats_restart(struct bnx2x *bp)
3473{
3474 bnx2x_stats_comp(bp);
3475 bnx2x_stats_start(bp);
3476}
3477
3478static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3479{
3480 struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3481 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3482 struct regpair diff;
3483
3484 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3485 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3486 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3487 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3488 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3489 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
66e855f3 3490 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
bb2a0f7a
YG
3491 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
3492 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffpauseframesreceived);
3493 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3494 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3495 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3496 UPDATE_STAT64(tx_stat_gt127,
3497 tx_stat_etherstatspkts65octetsto127octets);
3498 UPDATE_STAT64(tx_stat_gt255,
3499 tx_stat_etherstatspkts128octetsto255octets);
3500 UPDATE_STAT64(tx_stat_gt511,
3501 tx_stat_etherstatspkts256octetsto511octets);
3502 UPDATE_STAT64(tx_stat_gt1023,
3503 tx_stat_etherstatspkts512octetsto1023octets);
3504 UPDATE_STAT64(tx_stat_gt1518,
3505 tx_stat_etherstatspkts1024octetsto1522octets);
3506 UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3507 UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3508 UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3509 UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3510 UPDATE_STAT64(tx_stat_gterr,
3511 tx_stat_dot3statsinternalmactransmiterrors);
3512 UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
3513}
3514
3515static void bnx2x_emac_stats_update(struct bnx2x *bp)
3516{
3517 struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3518 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3519
3520 UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3521 UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3522 UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3523 UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3524 UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3525 UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3526 UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3527 UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3528 UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3529 UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3530 UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3531 UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3532 UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3533 UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3534 UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3535 UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3536 UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3537 UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3538 UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3539 UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3540 UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3541 UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3542 UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3543 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3544 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3545 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3546 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3547 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3548 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3549 UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3550 UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
3551}
3552
3553static int bnx2x_hw_stats_update(struct bnx2x *bp)
3554{
3555 struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3556 struct nig_stats *old = &(bp->port.old_nig_stats);
3557 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3558 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3559 struct regpair diff;
3560
3561 if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3562 bnx2x_bmac_stats_update(bp);
3563
3564 else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3565 bnx2x_emac_stats_update(bp);
3566
3567 else { /* unreached */
3568 BNX2X_ERR("stats updated by dmae but no MAC active\n");
3569 return -1;
3570 }
a2fbb9ea 3571
bb2a0f7a
YG
3572 ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3573 new->brb_discard - old->brb_discard);
66e855f3
YG
3574 ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3575 new->brb_truncate - old->brb_truncate);
a2fbb9ea 3576
bb2a0f7a
YG
3577 UPDATE_STAT64_NIG(egress_mac_pkt0,
3578 etherstatspkts1024octetsto1522octets);
3579 UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
a2fbb9ea 3580
bb2a0f7a 3581 memcpy(old, new, sizeof(struct nig_stats));
a2fbb9ea 3582
bb2a0f7a
YG
3583 memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3584 sizeof(struct mac_stx));
3585 estats->brb_drop_hi = pstats->brb_drop_hi;
3586 estats->brb_drop_lo = pstats->brb_drop_lo;
a2fbb9ea 3587
bb2a0f7a 3588 pstats->host_port_stats_start = ++pstats->host_port_stats_end;
a2fbb9ea 3589
bb2a0f7a 3590 return 0;
a2fbb9ea
ET
3591}
3592
bb2a0f7a 3593static int bnx2x_storm_stats_update(struct bnx2x *bp)
a2fbb9ea
ET
3594{
3595 struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
bb2a0f7a
YG
3596 int cl_id = BP_CL_ID(bp);
3597 struct tstorm_per_port_stats *tport =
3598 &stats->tstorm_common.port_statistics;
a2fbb9ea 3599 struct tstorm_per_client_stats *tclient =
bb2a0f7a 3600 &stats->tstorm_common.client_statistics[cl_id];
a2fbb9ea 3601 struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
bb2a0f7a
YG
3602 struct xstorm_per_client_stats *xclient =
3603 &stats->xstorm_common.client_statistics[cl_id];
3604 struct xstorm_per_client_stats *old_xclient = &bp->old_xclient;
3605 struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
3606 struct bnx2x_eth_stats *estats = &bp->eth_stats;
a2fbb9ea
ET
3607 u32 diff;
3608
bb2a0f7a
YG
3609 /* are storm stats valid? */
3610 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
3611 bp->stats_counter) {
3612 DP(BNX2X_MSG_STATS, "stats not updated by tstorm"
3613 " tstorm counter (%d) != stats_counter (%d)\n",
3614 tclient->stats_counter, bp->stats_counter);
a2fbb9ea
ET
3615 return -1;
3616 }
bb2a0f7a
YG
3617 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
3618 bp->stats_counter) {
3619 DP(BNX2X_MSG_STATS, "stats not updated by xstorm"
3620 " xstorm counter (%d) != stats_counter (%d)\n",
3621 xclient->stats_counter, bp->stats_counter);
a2fbb9ea
ET
3622 return -2;
3623 }
a2fbb9ea 3624
bb2a0f7a
YG
3625 fstats->total_bytes_received_hi =
3626 fstats->valid_bytes_received_hi =
a2fbb9ea 3627 le32_to_cpu(tclient->total_rcv_bytes.hi);
bb2a0f7a
YG
3628 fstats->total_bytes_received_lo =
3629 fstats->valid_bytes_received_lo =
a2fbb9ea 3630 le32_to_cpu(tclient->total_rcv_bytes.lo);
bb2a0f7a
YG
3631
3632 estats->error_bytes_received_hi =
3633 le32_to_cpu(tclient->rcv_error_bytes.hi);
3634 estats->error_bytes_received_lo =
3635 le32_to_cpu(tclient->rcv_error_bytes.lo);
3636 ADD_64(estats->error_bytes_received_hi,
3637 estats->rx_stat_ifhcinbadoctets_hi,
3638 estats->error_bytes_received_lo,
3639 estats->rx_stat_ifhcinbadoctets_lo);
3640
3641 ADD_64(fstats->total_bytes_received_hi,
3642 estats->error_bytes_received_hi,
3643 fstats->total_bytes_received_lo,
3644 estats->error_bytes_received_lo);
3645
3646 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts, total_unicast_packets_received);
a2fbb9ea 3647 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
bb2a0f7a 3648 total_multicast_packets_received);
a2fbb9ea 3649 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
bb2a0f7a
YG
3650 total_broadcast_packets_received);
3651
3652 fstats->total_bytes_transmitted_hi =
3653 le32_to_cpu(xclient->total_sent_bytes.hi);
3654 fstats->total_bytes_transmitted_lo =
3655 le32_to_cpu(xclient->total_sent_bytes.lo);
3656
3657 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
3658 total_unicast_packets_transmitted);
3659 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
3660 total_multicast_packets_transmitted);
3661 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
3662 total_broadcast_packets_transmitted);
3663
3664 memcpy(estats, &(fstats->total_bytes_received_hi),
3665 sizeof(struct host_func_stats) - 2*sizeof(u32));
3666
3667 estats->mac_filter_discard = le32_to_cpu(tport->mac_filter_discard);
3668 estats->xxoverflow_discard = le32_to_cpu(tport->xxoverflow_discard);
3669 estats->brb_truncate_discard =
3670 le32_to_cpu(tport->brb_truncate_discard);
3671 estats->mac_discard = le32_to_cpu(tport->mac_discard);
3672
3673 old_tclient->rcv_unicast_bytes.hi =
a2fbb9ea 3674 le32_to_cpu(tclient->rcv_unicast_bytes.hi);
bb2a0f7a 3675 old_tclient->rcv_unicast_bytes.lo =
a2fbb9ea 3676 le32_to_cpu(tclient->rcv_unicast_bytes.lo);
bb2a0f7a 3677 old_tclient->rcv_broadcast_bytes.hi =
a2fbb9ea 3678 le32_to_cpu(tclient->rcv_broadcast_bytes.hi);
bb2a0f7a 3679 old_tclient->rcv_broadcast_bytes.lo =
a2fbb9ea 3680 le32_to_cpu(tclient->rcv_broadcast_bytes.lo);
bb2a0f7a 3681 old_tclient->rcv_multicast_bytes.hi =
a2fbb9ea 3682 le32_to_cpu(tclient->rcv_multicast_bytes.hi);
bb2a0f7a 3683 old_tclient->rcv_multicast_bytes.lo =
a2fbb9ea 3684 le32_to_cpu(tclient->rcv_multicast_bytes.lo);
bb2a0f7a 3685 old_tclient->total_rcv_pkts = le32_to_cpu(tclient->total_rcv_pkts);
a2fbb9ea 3686
bb2a0f7a
YG
3687 old_tclient->checksum_discard = le32_to_cpu(tclient->checksum_discard);
3688 old_tclient->packets_too_big_discard =
a2fbb9ea 3689 le32_to_cpu(tclient->packets_too_big_discard);
bb2a0f7a
YG
3690 estats->no_buff_discard =
3691 old_tclient->no_buff_discard = le32_to_cpu(tclient->no_buff_discard);
3692 old_tclient->ttl0_discard = le32_to_cpu(tclient->ttl0_discard);
3693
3694 old_xclient->total_sent_pkts = le32_to_cpu(xclient->total_sent_pkts);
3695 old_xclient->unicast_bytes_sent.hi =
3696 le32_to_cpu(xclient->unicast_bytes_sent.hi);
3697 old_xclient->unicast_bytes_sent.lo =
3698 le32_to_cpu(xclient->unicast_bytes_sent.lo);
3699 old_xclient->multicast_bytes_sent.hi =
3700 le32_to_cpu(xclient->multicast_bytes_sent.hi);
3701 old_xclient->multicast_bytes_sent.lo =
3702 le32_to_cpu(xclient->multicast_bytes_sent.lo);
3703 old_xclient->broadcast_bytes_sent.hi =
3704 le32_to_cpu(xclient->broadcast_bytes_sent.hi);
3705 old_xclient->broadcast_bytes_sent.lo =
3706 le32_to_cpu(xclient->broadcast_bytes_sent.lo);
3707
3708 fstats->host_func_stats_start = ++fstats->host_func_stats_end;
a2fbb9ea
ET
3709
3710 return 0;
3711}
3712
bb2a0f7a 3713static void bnx2x_net_stats_update(struct bnx2x *bp)
a2fbb9ea 3714{
bb2a0f7a
YG
3715 struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
3716 struct bnx2x_eth_stats *estats = &bp->eth_stats;
a2fbb9ea
ET
3717 struct net_device_stats *nstats = &bp->dev->stats;
3718
3719 nstats->rx_packets =
3720 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
3721 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
3722 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
3723
3724 nstats->tx_packets =
3725 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
3726 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
3727 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
3728
bb2a0f7a 3729 nstats->rx_bytes = bnx2x_hilo(&estats->valid_bytes_received_hi);
a2fbb9ea 3730
0e39e645 3731 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
a2fbb9ea 3732
bb2a0f7a
YG
3733 nstats->rx_dropped = old_tclient->checksum_discard +
3734 estats->mac_discard;
a2fbb9ea
ET
3735 nstats->tx_dropped = 0;
3736
3737 nstats->multicast =
3738 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi);
3739
bb2a0f7a
YG
3740 nstats->collisions =
3741 estats->tx_stat_dot3statssinglecollisionframes_lo +
3742 estats->tx_stat_dot3statsmultiplecollisionframes_lo +
3743 estats->tx_stat_dot3statslatecollisions_lo +
3744 estats->tx_stat_dot3statsexcessivecollisions_lo;
a2fbb9ea 3745
bb2a0f7a
YG
3746 estats->jabber_packets_received =
3747 old_tclient->packets_too_big_discard +
3748 estats->rx_stat_dot3statsframestoolong_lo;
3749
3750 nstats->rx_length_errors =
3751 estats->rx_stat_etherstatsundersizepkts_lo +
3752 estats->jabber_packets_received;
66e855f3 3753 nstats->rx_over_errors = estats->brb_drop_lo + estats->brb_truncate_lo;
bb2a0f7a
YG
3754 nstats->rx_crc_errors = estats->rx_stat_dot3statsfcserrors_lo;
3755 nstats->rx_frame_errors = estats->rx_stat_dot3statsalignmenterrors_lo;
3756 nstats->rx_fifo_errors = old_tclient->no_buff_discard;
a2fbb9ea
ET
3757 nstats->rx_missed_errors = estats->xxoverflow_discard;
3758
3759 nstats->rx_errors = nstats->rx_length_errors +
3760 nstats->rx_over_errors +
3761 nstats->rx_crc_errors +
3762 nstats->rx_frame_errors +
0e39e645
ET
3763 nstats->rx_fifo_errors +
3764 nstats->rx_missed_errors;
a2fbb9ea 3765
bb2a0f7a
YG
3766 nstats->tx_aborted_errors =
3767 estats->tx_stat_dot3statslatecollisions_lo +
3768 estats->tx_stat_dot3statsexcessivecollisions_lo;
3769 nstats->tx_carrier_errors = estats->rx_stat_falsecarriererrors_lo;
a2fbb9ea
ET
3770 nstats->tx_fifo_errors = 0;
3771 nstats->tx_heartbeat_errors = 0;
3772 nstats->tx_window_errors = 0;
3773
3774 nstats->tx_errors = nstats->tx_aborted_errors +
3775 nstats->tx_carrier_errors;
a2fbb9ea
ET
3776}
3777
bb2a0f7a 3778static void bnx2x_stats_update(struct bnx2x *bp)
a2fbb9ea 3779{
bb2a0f7a
YG
3780 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3781 int update = 0;
a2fbb9ea 3782
bb2a0f7a
YG
3783 if (*stats_comp != DMAE_COMP_VAL)
3784 return;
3785
3786 if (bp->port.pmf)
3787 update = (bnx2x_hw_stats_update(bp) == 0);
a2fbb9ea 3788
bb2a0f7a 3789 update |= (bnx2x_storm_stats_update(bp) == 0);
a2fbb9ea 3790
bb2a0f7a
YG
3791 if (update)
3792 bnx2x_net_stats_update(bp);
a2fbb9ea 3793
bb2a0f7a
YG
3794 else {
3795 if (bp->stats_pending) {
3796 bp->stats_pending++;
3797 if (bp->stats_pending == 3) {
3798 BNX2X_ERR("stats not updated for 3 times\n");
3799 bnx2x_panic();
3800 return;
3801 }
3802 }
a2fbb9ea
ET
3803 }
3804
3805 if (bp->msglevel & NETIF_MSG_TIMER) {
bb2a0f7a
YG
3806 struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
3807 struct bnx2x_eth_stats *estats = &bp->eth_stats;
a2fbb9ea 3808 struct net_device_stats *nstats = &bp->dev->stats;
34f80b04 3809 int i;
a2fbb9ea
ET
3810
3811 printk(KERN_DEBUG "%s:\n", bp->dev->name);
3812 printk(KERN_DEBUG " tx avail (%4x) tx hc idx (%x)"
3813 " tx pkt (%lx)\n",
3814 bnx2x_tx_avail(bp->fp),
7a9b2557 3815 le16_to_cpu(*bp->fp->tx_cons_sb), nstats->tx_packets);
a2fbb9ea
ET
3816 printk(KERN_DEBUG " rx usage (%4x) rx hc idx (%x)"
3817 " rx pkt (%lx)\n",
7a9b2557
VZ
3818 (u16)(le16_to_cpu(*bp->fp->rx_cons_sb) -
3819 bp->fp->rx_comp_cons),
3820 le16_to_cpu(*bp->fp->rx_cons_sb), nstats->rx_packets);
a2fbb9ea 3821 printk(KERN_DEBUG " %s (Xoff events %u) brb drops %u\n",
6378c025 3822 netif_queue_stopped(bp->dev) ? "Xoff" : "Xon",
bb2a0f7a 3823 estats->driver_xoff, estats->brb_drop_lo);
a2fbb9ea
ET
3824 printk(KERN_DEBUG "tstats: checksum_discard %u "
3825 "packets_too_big_discard %u no_buff_discard %u "
3826 "mac_discard %u mac_filter_discard %u "
3827 "xxovrflow_discard %u brb_truncate_discard %u "
3828 "ttl0_discard %u\n",
bb2a0f7a
YG
3829 old_tclient->checksum_discard,
3830 old_tclient->packets_too_big_discard,
3831 old_tclient->no_buff_discard, estats->mac_discard,
a2fbb9ea 3832 estats->mac_filter_discard, estats->xxoverflow_discard,
bb2a0f7a
YG
3833 estats->brb_truncate_discard,
3834 old_tclient->ttl0_discard);
a2fbb9ea
ET
3835
3836 for_each_queue(bp, i) {
3837 printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
3838 bnx2x_fp(bp, i, tx_pkt),
3839 bnx2x_fp(bp, i, rx_pkt),
3840 bnx2x_fp(bp, i, rx_calls));
3841 }
3842 }
3843
bb2a0f7a
YG
3844 bnx2x_hw_stats_post(bp);
3845 bnx2x_storm_stats_post(bp);
3846}
a2fbb9ea 3847
bb2a0f7a
YG
3848static void bnx2x_port_stats_stop(struct bnx2x *bp)
3849{
3850 struct dmae_command *dmae;
3851 u32 opcode;
3852 int loader_idx = PMF_DMAE_C(bp);
3853 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 3854
bb2a0f7a 3855 bp->executer_idx = 0;
a2fbb9ea 3856
bb2a0f7a
YG
3857 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3858 DMAE_CMD_C_ENABLE |
3859 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 3860#ifdef __BIG_ENDIAN
bb2a0f7a 3861 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 3862#else
bb2a0f7a 3863 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 3864#endif
bb2a0f7a
YG
3865 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3866 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3867
3868 if (bp->port.port_stx) {
3869
3870 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3871 if (bp->func_stx)
3872 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3873 else
3874 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3875 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3876 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3877 dmae->dst_addr_lo = bp->port.port_stx >> 2;
a2fbb9ea 3878 dmae->dst_addr_hi = 0;
bb2a0f7a
YG
3879 dmae->len = sizeof(struct host_port_stats) >> 2;
3880 if (bp->func_stx) {
3881 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3882 dmae->comp_addr_hi = 0;
3883 dmae->comp_val = 1;
3884 } else {
3885 dmae->comp_addr_lo =
3886 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3887 dmae->comp_addr_hi =
3888 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3889 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 3890
bb2a0f7a
YG
3891 *stats_comp = 0;
3892 }
a2fbb9ea
ET
3893 }
3894
bb2a0f7a
YG
3895 if (bp->func_stx) {
3896
3897 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3898 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3899 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3900 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3901 dmae->dst_addr_lo = bp->func_stx >> 2;
3902 dmae->dst_addr_hi = 0;
3903 dmae->len = sizeof(struct host_func_stats) >> 2;
3904 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3905 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3906 dmae->comp_val = DMAE_COMP_VAL;
3907
3908 *stats_comp = 0;
a2fbb9ea 3909 }
bb2a0f7a
YG
3910}
3911
3912static void bnx2x_stats_stop(struct bnx2x *bp)
3913{
3914 int update = 0;
3915
3916 bnx2x_stats_comp(bp);
3917
3918 if (bp->port.pmf)
3919 update = (bnx2x_hw_stats_update(bp) == 0);
3920
3921 update |= (bnx2x_storm_stats_update(bp) == 0);
3922
3923 if (update) {
3924 bnx2x_net_stats_update(bp);
a2fbb9ea 3925
bb2a0f7a
YG
3926 if (bp->port.pmf)
3927 bnx2x_port_stats_stop(bp);
3928
3929 bnx2x_hw_stats_post(bp);
3930 bnx2x_stats_comp(bp);
a2fbb9ea
ET
3931 }
3932}
3933
bb2a0f7a
YG
3934static void bnx2x_stats_do_nothing(struct bnx2x *bp)
3935{
3936}
3937
3938static const struct {
3939 void (*action)(struct bnx2x *bp);
3940 enum bnx2x_stats_state next_state;
3941} bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
3942/* state event */
3943{
3944/* DISABLED PMF */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
3945/* LINK_UP */ {bnx2x_stats_start, STATS_STATE_ENABLED},
3946/* UPDATE */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
3947/* STOP */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
3948},
3949{
3950/* ENABLED PMF */ {bnx2x_stats_pmf_start, STATS_STATE_ENABLED},
3951/* LINK_UP */ {bnx2x_stats_restart, STATS_STATE_ENABLED},
3952/* UPDATE */ {bnx2x_stats_update, STATS_STATE_ENABLED},
3953/* STOP */ {bnx2x_stats_stop, STATS_STATE_DISABLED}
3954}
3955};
3956
3957static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
3958{
3959 enum bnx2x_stats_state state = bp->stats_state;
3960
3961 bnx2x_stats_stm[state][event].action(bp);
3962 bp->stats_state = bnx2x_stats_stm[state][event].next_state;
3963
3964 if ((event != STATS_EVENT_UPDATE) || (bp->msglevel & NETIF_MSG_TIMER))
3965 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
3966 state, event, bp->stats_state);
3967}
3968
a2fbb9ea
ET
3969static void bnx2x_timer(unsigned long data)
3970{
3971 struct bnx2x *bp = (struct bnx2x *) data;
3972
3973 if (!netif_running(bp->dev))
3974 return;
3975
3976 if (atomic_read(&bp->intr_sem) != 0)
f1410647 3977 goto timer_restart;
a2fbb9ea
ET
3978
3979 if (poll) {
3980 struct bnx2x_fastpath *fp = &bp->fp[0];
3981 int rc;
3982
3983 bnx2x_tx_int(fp, 1000);
3984 rc = bnx2x_rx_int(fp, 1000);
3985 }
3986
34f80b04
EG
3987 if (!BP_NOMCP(bp)) {
3988 int func = BP_FUNC(bp);
a2fbb9ea
ET
3989 u32 drv_pulse;
3990 u32 mcp_pulse;
3991
3992 ++bp->fw_drv_pulse_wr_seq;
3993 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
3994 /* TBD - add SYSTEM_TIME */
3995 drv_pulse = bp->fw_drv_pulse_wr_seq;
34f80b04 3996 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
a2fbb9ea 3997
34f80b04 3998 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
a2fbb9ea
ET
3999 MCP_PULSE_SEQ_MASK);
4000 /* The delta between driver pulse and mcp response
4001 * should be 1 (before mcp response) or 0 (after mcp response)
4002 */
4003 if ((drv_pulse != mcp_pulse) &&
4004 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
4005 /* someone lost a heartbeat... */
4006 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
4007 drv_pulse, mcp_pulse);
4008 }
4009 }
4010
bb2a0f7a
YG
4011 if ((bp->state == BNX2X_STATE_OPEN) ||
4012 (bp->state == BNX2X_STATE_DISABLED))
4013 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
a2fbb9ea 4014
f1410647 4015timer_restart:
a2fbb9ea
ET
4016 mod_timer(&bp->timer, jiffies + bp->current_interval);
4017}
4018
4019/* end of Statistics */
4020
4021/* nic init */
4022
4023/*
4024 * nic init service functions
4025 */
4026
34f80b04 4027static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
a2fbb9ea 4028{
34f80b04
EG
4029 int port = BP_PORT(bp);
4030
4031 bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
4032 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
35302989 4033 sizeof(struct ustorm_status_block)/4);
34f80b04
EG
4034 bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
4035 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
35302989 4036 sizeof(struct cstorm_status_block)/4);
34f80b04
EG
4037}
4038
5c862848
EG
4039static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
4040 dma_addr_t mapping, int sb_id)
34f80b04
EG
4041{
4042 int port = BP_PORT(bp);
bb2a0f7a 4043 int func = BP_FUNC(bp);
a2fbb9ea 4044 int index;
34f80b04 4045 u64 section;
a2fbb9ea
ET
4046
4047 /* USTORM */
4048 section = ((u64)mapping) + offsetof(struct host_status_block,
4049 u_status_block);
34f80b04 4050 sb->u_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4051
4052 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4053 USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
a2fbb9ea 4054 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4055 ((USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
a2fbb9ea 4056 U64_HI(section));
bb2a0f7a
YG
4057 REG_WR8(bp, BAR_USTRORM_INTMEM + FP_USB_FUNC_OFF +
4058 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
a2fbb9ea
ET
4059
4060 for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
4061 REG_WR16(bp, BAR_USTRORM_INTMEM +
34f80b04 4062 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
a2fbb9ea
ET
4063
4064 /* CSTORM */
4065 section = ((u64)mapping) + offsetof(struct host_status_block,
4066 c_status_block);
34f80b04 4067 sb->c_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4068
4069 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4070 CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
a2fbb9ea 4071 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4072 ((CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
a2fbb9ea 4073 U64_HI(section));
7a9b2557
VZ
4074 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
4075 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
a2fbb9ea
ET
4076
4077 for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4078 REG_WR16(bp, BAR_CSTRORM_INTMEM +
34f80b04
EG
4079 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4080
4081 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4082}
4083
4084static void bnx2x_zero_def_sb(struct bnx2x *bp)
4085{
4086 int func = BP_FUNC(bp);
a2fbb9ea 4087
34f80b04
EG
4088 bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
4089 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4090 sizeof(struct ustorm_def_status_block)/4);
4091 bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
4092 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4093 sizeof(struct cstorm_def_status_block)/4);
4094 bnx2x_init_fill(bp, BAR_XSTRORM_INTMEM +
4095 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4096 sizeof(struct xstorm_def_status_block)/4);
4097 bnx2x_init_fill(bp, BAR_TSTRORM_INTMEM +
4098 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4099 sizeof(struct tstorm_def_status_block)/4);
a2fbb9ea
ET
4100}
4101
4102static void bnx2x_init_def_sb(struct bnx2x *bp,
4103 struct host_def_status_block *def_sb,
34f80b04 4104 dma_addr_t mapping, int sb_id)
a2fbb9ea 4105{
34f80b04
EG
4106 int port = BP_PORT(bp);
4107 int func = BP_FUNC(bp);
a2fbb9ea
ET
4108 int index, val, reg_offset;
4109 u64 section;
4110
4111 /* ATTN */
4112 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4113 atten_status_block);
34f80b04 4114 def_sb->atten_status_block.status_block_id = sb_id;
a2fbb9ea 4115
49d66772
ET
4116 bp->attn_state = 0;
4117
a2fbb9ea
ET
4118 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4119 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4120
34f80b04 4121 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
a2fbb9ea
ET
4122 bp->attn_group[index].sig[0] = REG_RD(bp,
4123 reg_offset + 0x10*index);
4124 bp->attn_group[index].sig[1] = REG_RD(bp,
4125 reg_offset + 0x4 + 0x10*index);
4126 bp->attn_group[index].sig[2] = REG_RD(bp,
4127 reg_offset + 0x8 + 0x10*index);
4128 bp->attn_group[index].sig[3] = REG_RD(bp,
4129 reg_offset + 0xc + 0x10*index);
4130 }
4131
a2fbb9ea
ET
4132 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4133 HC_REG_ATTN_MSG0_ADDR_L);
4134
4135 REG_WR(bp, reg_offset, U64_LO(section));
4136 REG_WR(bp, reg_offset + 4, U64_HI(section));
4137
4138 reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4139
4140 val = REG_RD(bp, reg_offset);
34f80b04 4141 val |= sb_id;
a2fbb9ea
ET
4142 REG_WR(bp, reg_offset, val);
4143
4144 /* USTORM */
4145 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4146 u_def_status_block);
34f80b04 4147 def_sb->u_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4148
4149 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4150 USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4151 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4152 ((USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4153 U64_HI(section));
5c862848 4154 REG_WR8(bp, BAR_USTRORM_INTMEM + DEF_USB_FUNC_OFF +
34f80b04 4155 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4156
4157 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
4158 REG_WR16(bp, BAR_USTRORM_INTMEM +
34f80b04 4159 USTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
a2fbb9ea
ET
4160
4161 /* CSTORM */
4162 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4163 c_def_status_block);
34f80b04 4164 def_sb->c_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4165
4166 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4167 CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4168 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4169 ((CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4170 U64_HI(section));
5c862848 4171 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
34f80b04 4172 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4173
4174 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4175 REG_WR16(bp, BAR_CSTRORM_INTMEM +
34f80b04 4176 CSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
a2fbb9ea
ET
4177
4178 /* TSTORM */
4179 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4180 t_def_status_block);
34f80b04 4181 def_sb->t_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4182
4183 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4184 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4185 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4186 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4187 U64_HI(section));
5c862848 4188 REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
34f80b04 4189 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4190
4191 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4192 REG_WR16(bp, BAR_TSTRORM_INTMEM +
34f80b04 4193 TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
a2fbb9ea
ET
4194
4195 /* XSTORM */
4196 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4197 x_def_status_block);
34f80b04 4198 def_sb->x_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4199
4200 REG_WR(bp, BAR_XSTRORM_INTMEM +
34f80b04 4201 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4202 REG_WR(bp, BAR_XSTRORM_INTMEM +
34f80b04 4203 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4204 U64_HI(section));
5c862848 4205 REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
34f80b04 4206 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4207
4208 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4209 REG_WR16(bp, BAR_XSTRORM_INTMEM +
34f80b04 4210 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
49d66772 4211
bb2a0f7a 4212 bp->stats_pending = 0;
66e855f3 4213 bp->set_mac_pending = 0;
bb2a0f7a 4214
34f80b04 4215 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea
ET
4216}
4217
4218static void bnx2x_update_coalesce(struct bnx2x *bp)
4219{
34f80b04 4220 int port = BP_PORT(bp);
a2fbb9ea
ET
4221 int i;
4222
4223 for_each_queue(bp, i) {
34f80b04 4224 int sb_id = bp->fp[i].sb_id;
a2fbb9ea
ET
4225
4226 /* HC_INDEX_U_ETH_RX_CQ_CONS */
4227 REG_WR8(bp, BAR_USTRORM_INTMEM +
34f80b04 4228 USTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
5c862848 4229 U_SB_ETH_RX_CQ_INDEX),
34f80b04 4230 bp->rx_ticks/12);
a2fbb9ea 4231 REG_WR16(bp, BAR_USTRORM_INTMEM +
34f80b04 4232 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
5c862848
EG
4233 U_SB_ETH_RX_CQ_INDEX),
4234 bp->rx_ticks ? 0 : 1);
4235 REG_WR16(bp, BAR_USTRORM_INTMEM +
4236 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4237 U_SB_ETH_RX_BD_INDEX),
34f80b04 4238 bp->rx_ticks ? 0 : 1);
a2fbb9ea
ET
4239
4240 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4241 REG_WR8(bp, BAR_CSTRORM_INTMEM +
34f80b04 4242 CSTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
5c862848 4243 C_SB_ETH_TX_CQ_INDEX),
34f80b04 4244 bp->tx_ticks/12);
a2fbb9ea 4245 REG_WR16(bp, BAR_CSTRORM_INTMEM +
34f80b04 4246 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
5c862848 4247 C_SB_ETH_TX_CQ_INDEX),
34f80b04 4248 bp->tx_ticks ? 0 : 1);
a2fbb9ea
ET
4249 }
4250}
4251
7a9b2557
VZ
4252static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4253 struct bnx2x_fastpath *fp, int last)
4254{
4255 int i;
4256
4257 for (i = 0; i < last; i++) {
4258 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
4259 struct sk_buff *skb = rx_buf->skb;
4260
4261 if (skb == NULL) {
4262 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
4263 continue;
4264 }
4265
4266 if (fp->tpa_state[i] == BNX2X_TPA_START)
4267 pci_unmap_single(bp->pdev,
4268 pci_unmap_addr(rx_buf, mapping),
437cf2f1 4269 bp->rx_buf_size,
7a9b2557
VZ
4270 PCI_DMA_FROMDEVICE);
4271
4272 dev_kfree_skb(skb);
4273 rx_buf->skb = NULL;
4274 }
4275}
4276
a2fbb9ea
ET
4277static void bnx2x_init_rx_rings(struct bnx2x *bp)
4278{
7a9b2557 4279 int func = BP_FUNC(bp);
32626230
EG
4280 int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
4281 ETH_MAX_AGGREGATION_QUEUES_E1H;
4282 u16 ring_prod, cqe_ring_prod;
a2fbb9ea 4283 int i, j;
a2fbb9ea 4284
437cf2f1
EG
4285 bp->rx_buf_size = bp->dev->mtu;
4286 bp->rx_buf_size += bp->rx_offset + ETH_OVREHEAD +
4287 BCM_RX_ETH_PAYLOAD_ALIGN;
a2fbb9ea 4288
7a9b2557
VZ
4289 if (bp->flags & TPA_ENABLE_FLAG) {
4290 DP(NETIF_MSG_IFUP,
437cf2f1
EG
4291 "rx_buf_size %d effective_mtu %d\n",
4292 bp->rx_buf_size, bp->dev->mtu + ETH_OVREHEAD);
7a9b2557
VZ
4293
4294 for_each_queue(bp, j) {
32626230 4295 struct bnx2x_fastpath *fp = &bp->fp[j];
7a9b2557 4296
32626230 4297 for (i = 0; i < max_agg_queues; i++) {
7a9b2557
VZ
4298 fp->tpa_pool[i].skb =
4299 netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4300 if (!fp->tpa_pool[i].skb) {
4301 BNX2X_ERR("Failed to allocate TPA "
4302 "skb pool for queue[%d] - "
4303 "disabling TPA on this "
4304 "queue!\n", j);
4305 bnx2x_free_tpa_pool(bp, fp, i);
4306 fp->disable_tpa = 1;
4307 break;
4308 }
4309 pci_unmap_addr_set((struct sw_rx_bd *)
4310 &bp->fp->tpa_pool[i],
4311 mapping, 0);
4312 fp->tpa_state[i] = BNX2X_TPA_STOP;
4313 }
4314 }
4315 }
4316
a2fbb9ea
ET
4317 for_each_queue(bp, j) {
4318 struct bnx2x_fastpath *fp = &bp->fp[j];
4319
4320 fp->rx_bd_cons = 0;
4321 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
7a9b2557
VZ
4322 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
4323
4324 /* "next page" elements initialization */
4325 /* SGE ring */
4326 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
4327 struct eth_rx_sge *sge;
4328
4329 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
4330 sge->addr_hi =
4331 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
4332 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4333 sge->addr_lo =
4334 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
4335 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4336 }
4337
4338 bnx2x_init_sge_ring_bit_mask(fp);
a2fbb9ea 4339
7a9b2557 4340 /* RX BD ring */
a2fbb9ea
ET
4341 for (i = 1; i <= NUM_RX_RINGS; i++) {
4342 struct eth_rx_bd *rx_bd;
4343
4344 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
4345 rx_bd->addr_hi =
4346 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
34f80b04 4347 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
a2fbb9ea
ET
4348 rx_bd->addr_lo =
4349 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
34f80b04 4350 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
a2fbb9ea
ET
4351 }
4352
34f80b04 4353 /* CQ ring */
a2fbb9ea
ET
4354 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4355 struct eth_rx_cqe_next_page *nextpg;
4356
4357 nextpg = (struct eth_rx_cqe_next_page *)
4358 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4359 nextpg->addr_hi =
4360 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
34f80b04 4361 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
a2fbb9ea
ET
4362 nextpg->addr_lo =
4363 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
34f80b04 4364 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
a2fbb9ea
ET
4365 }
4366
7a9b2557
VZ
4367 /* Allocate SGEs and initialize the ring elements */
4368 for (i = 0, ring_prod = 0;
4369 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
a2fbb9ea 4370
7a9b2557
VZ
4371 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
4372 BNX2X_ERR("was only able to allocate "
4373 "%d rx sges\n", i);
4374 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
4375 /* Cleanup already allocated elements */
4376 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
32626230 4377 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
7a9b2557
VZ
4378 fp->disable_tpa = 1;
4379 ring_prod = 0;
4380 break;
4381 }
4382 ring_prod = NEXT_SGE_IDX(ring_prod);
4383 }
4384 fp->rx_sge_prod = ring_prod;
4385
4386 /* Allocate BDs and initialize BD ring */
66e855f3 4387 fp->rx_comp_cons = 0;
7a9b2557 4388 cqe_ring_prod = ring_prod = 0;
a2fbb9ea
ET
4389 for (i = 0; i < bp->rx_ring_size; i++) {
4390 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
4391 BNX2X_ERR("was only able to allocate "
4392 "%d rx skbs\n", i);
66e855f3 4393 bp->eth_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
4394 break;
4395 }
4396 ring_prod = NEXT_RX_IDX(ring_prod);
7a9b2557 4397 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
53e5e96e 4398 WARN_ON(ring_prod <= i);
a2fbb9ea
ET
4399 }
4400
7a9b2557
VZ
4401 fp->rx_bd_prod = ring_prod;
4402 /* must not have more available CQEs than BDs */
4403 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
4404 cqe_ring_prod);
a2fbb9ea
ET
4405 fp->rx_pkt = fp->rx_calls = 0;
4406
7a9b2557
VZ
4407 /* Warning!
4408 * this will generate an interrupt (to the TSTORM)
4409 * must only be done after chip is initialized
4410 */
4411 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
4412 fp->rx_sge_prod);
a2fbb9ea
ET
4413 if (j != 0)
4414 continue;
4415
4416 REG_WR(bp, BAR_USTRORM_INTMEM +
7a9b2557 4417 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
a2fbb9ea
ET
4418 U64_LO(fp->rx_comp_mapping));
4419 REG_WR(bp, BAR_USTRORM_INTMEM +
7a9b2557 4420 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
a2fbb9ea
ET
4421 U64_HI(fp->rx_comp_mapping));
4422 }
4423}
4424
4425static void bnx2x_init_tx_ring(struct bnx2x *bp)
4426{
4427 int i, j;
4428
4429 for_each_queue(bp, j) {
4430 struct bnx2x_fastpath *fp = &bp->fp[j];
4431
4432 for (i = 1; i <= NUM_TX_RINGS; i++) {
4433 struct eth_tx_bd *tx_bd =
4434 &fp->tx_desc_ring[TX_DESC_CNT * i - 1];
4435
4436 tx_bd->addr_hi =
4437 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
34f80b04 4438 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
a2fbb9ea
ET
4439 tx_bd->addr_lo =
4440 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
34f80b04 4441 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
a2fbb9ea
ET
4442 }
4443
4444 fp->tx_pkt_prod = 0;
4445 fp->tx_pkt_cons = 0;
4446 fp->tx_bd_prod = 0;
4447 fp->tx_bd_cons = 0;
4448 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
4449 fp->tx_pkt = 0;
4450 }
4451}
4452
4453static void bnx2x_init_sp_ring(struct bnx2x *bp)
4454{
34f80b04 4455 int func = BP_FUNC(bp);
a2fbb9ea
ET
4456
4457 spin_lock_init(&bp->spq_lock);
4458
4459 bp->spq_left = MAX_SPQ_PENDING;
4460 bp->spq_prod_idx = 0;
a2fbb9ea
ET
4461 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
4462 bp->spq_prod_bd = bp->spq;
4463 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
4464
34f80b04 4465 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
a2fbb9ea 4466 U64_LO(bp->spq_mapping));
34f80b04
EG
4467 REG_WR(bp,
4468 XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
a2fbb9ea
ET
4469 U64_HI(bp->spq_mapping));
4470
34f80b04 4471 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
a2fbb9ea
ET
4472 bp->spq_prod_idx);
4473}
4474
4475static void bnx2x_init_context(struct bnx2x *bp)
4476{
4477 int i;
4478
4479 for_each_queue(bp, i) {
4480 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
4481 struct bnx2x_fastpath *fp = &bp->fp[i];
34f80b04 4482 u8 sb_id = FP_SB_ID(fp);
a2fbb9ea
ET
4483
4484 context->xstorm_st_context.tx_bd_page_base_hi =
4485 U64_HI(fp->tx_desc_mapping);
4486 context->xstorm_st_context.tx_bd_page_base_lo =
4487 U64_LO(fp->tx_desc_mapping);
4488 context->xstorm_st_context.db_data_addr_hi =
4489 U64_HI(fp->tx_prods_mapping);
4490 context->xstorm_st_context.db_data_addr_lo =
4491 U64_LO(fp->tx_prods_mapping);
34f80b04
EG
4492 context->xstorm_st_context.statistics_data = (BP_CL_ID(bp) |
4493 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
4494
4495 context->ustorm_st_context.common.sb_index_numbers =
4496 BNX2X_RX_SB_INDEX_NUM;
4497 context->ustorm_st_context.common.clientId = FP_CL_ID(fp);
4498 context->ustorm_st_context.common.status_block_id = sb_id;
4499 context->ustorm_st_context.common.flags =
4500 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT;
437cf2f1
EG
4501 context->ustorm_st_context.common.mc_alignment_size =
4502 BCM_RX_ETH_PAYLOAD_ALIGN;
34f80b04 4503 context->ustorm_st_context.common.bd_buff_size =
437cf2f1 4504 bp->rx_buf_size;
34f80b04 4505 context->ustorm_st_context.common.bd_page_base_hi =
a2fbb9ea 4506 U64_HI(fp->rx_desc_mapping);
34f80b04 4507 context->ustorm_st_context.common.bd_page_base_lo =
a2fbb9ea 4508 U64_LO(fp->rx_desc_mapping);
7a9b2557
VZ
4509 if (!fp->disable_tpa) {
4510 context->ustorm_st_context.common.flags |=
4511 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA |
4512 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_SGE_RING);
4513 context->ustorm_st_context.common.sge_buff_size =
4514 (u16)(BCM_PAGE_SIZE*PAGES_PER_SGE);
4515 context->ustorm_st_context.common.sge_page_base_hi =
4516 U64_HI(fp->rx_sge_mapping);
4517 context->ustorm_st_context.common.sge_page_base_lo =
4518 U64_LO(fp->rx_sge_mapping);
4519 }
4520
a2fbb9ea 4521 context->cstorm_st_context.sb_index_number =
5c862848 4522 C_SB_ETH_TX_CQ_INDEX;
34f80b04 4523 context->cstorm_st_context.status_block_id = sb_id;
a2fbb9ea
ET
4524
4525 context->xstorm_ag_context.cdu_reserved =
4526 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4527 CDU_REGION_NUMBER_XCM_AG,
4528 ETH_CONNECTION_TYPE);
4529 context->ustorm_ag_context.cdu_usage =
4530 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4531 CDU_REGION_NUMBER_UCM_AG,
4532 ETH_CONNECTION_TYPE);
4533 }
4534}
4535
4536static void bnx2x_init_ind_table(struct bnx2x *bp)
4537{
26c8fa4d 4538 int func = BP_FUNC(bp);
a2fbb9ea
ET
4539 int i;
4540
4541 if (!is_multi(bp))
4542 return;
4543
34f80b04 4544 DP(NETIF_MSG_IFUP, "Initializing indirection table\n");
a2fbb9ea 4545 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
34f80b04 4546 REG_WR8(bp, BAR_TSTRORM_INTMEM +
26c8fa4d
EG
4547 TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
4548 BP_CL_ID(bp) + (i % bp->num_queues));
a2fbb9ea
ET
4549}
4550
49d66772
ET
4551static void bnx2x_set_client_config(struct bnx2x *bp)
4552{
49d66772 4553 struct tstorm_eth_client_config tstorm_client = {0};
34f80b04
EG
4554 int port = BP_PORT(bp);
4555 int i;
49d66772 4556
e7799c5f 4557 tstorm_client.mtu = bp->dev->mtu;
66e855f3 4558 tstorm_client.statistics_counter_id = BP_CL_ID(bp);
49d66772
ET
4559 tstorm_client.config_flags =
4560 TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE;
4561#ifdef BCM_VLAN
0c6671b0 4562 if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
49d66772
ET
4563 tstorm_client.config_flags |=
4564 TSTORM_ETH_CLIENT_CONFIG_VLAN_REMOVAL_ENABLE;
4565 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
4566 }
4567#endif
49d66772 4568
7a9b2557
VZ
4569 if (bp->flags & TPA_ENABLE_FLAG) {
4570 tstorm_client.max_sges_for_packet =
4f40f2cb 4571 SGE_PAGE_ALIGN(tstorm_client.mtu) >> SGE_PAGE_SHIFT;
7a9b2557
VZ
4572 tstorm_client.max_sges_for_packet =
4573 ((tstorm_client.max_sges_for_packet +
4574 PAGES_PER_SGE - 1) & (~(PAGES_PER_SGE - 1))) >>
4575 PAGES_PER_SGE_SHIFT;
4576
4577 tstorm_client.config_flags |=
4578 TSTORM_ETH_CLIENT_CONFIG_ENABLE_SGE_RING;
4579 }
4580
49d66772
ET
4581 for_each_queue(bp, i) {
4582 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4583 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
49d66772
ET
4584 ((u32 *)&tstorm_client)[0]);
4585 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4586 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
49d66772
ET
4587 ((u32 *)&tstorm_client)[1]);
4588 }
4589
34f80b04
EG
4590 DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
4591 ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
49d66772
ET
4592}
4593
a2fbb9ea
ET
4594static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
4595{
a2fbb9ea 4596 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
34f80b04
EG
4597 int mode = bp->rx_mode;
4598 int mask = (1 << BP_L_ID(bp));
4599 int func = BP_FUNC(bp);
a2fbb9ea
ET
4600 int i;
4601
3196a88a 4602 DP(NETIF_MSG_IFUP, "rx mode %d mask 0x%x\n", mode, mask);
a2fbb9ea
ET
4603
4604 switch (mode) {
4605 case BNX2X_RX_MODE_NONE: /* no Rx */
34f80b04
EG
4606 tstorm_mac_filter.ucast_drop_all = mask;
4607 tstorm_mac_filter.mcast_drop_all = mask;
4608 tstorm_mac_filter.bcast_drop_all = mask;
a2fbb9ea
ET
4609 break;
4610 case BNX2X_RX_MODE_NORMAL:
34f80b04 4611 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea
ET
4612 break;
4613 case BNX2X_RX_MODE_ALLMULTI:
34f80b04
EG
4614 tstorm_mac_filter.mcast_accept_all = mask;
4615 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea
ET
4616 break;
4617 case BNX2X_RX_MODE_PROMISC:
34f80b04
EG
4618 tstorm_mac_filter.ucast_accept_all = mask;
4619 tstorm_mac_filter.mcast_accept_all = mask;
4620 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea
ET
4621 break;
4622 default:
34f80b04
EG
4623 BNX2X_ERR("BAD rx mode (%d)\n", mode);
4624 break;
a2fbb9ea
ET
4625 }
4626
4627 for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
4628 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4629 TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
a2fbb9ea
ET
4630 ((u32 *)&tstorm_mac_filter)[i]);
4631
34f80b04 4632/* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
a2fbb9ea
ET
4633 ((u32 *)&tstorm_mac_filter)[i]); */
4634 }
a2fbb9ea 4635
49d66772
ET
4636 if (mode != BNX2X_RX_MODE_NONE)
4637 bnx2x_set_client_config(bp);
a2fbb9ea
ET
4638}
4639
471de716
EG
4640static void bnx2x_init_internal_common(struct bnx2x *bp)
4641{
4642 int i;
4643
3cdf1db7
YG
4644 if (bp->flags & TPA_ENABLE_FLAG) {
4645 struct tstorm_eth_tpa_exist tpa = {0};
4646
4647 tpa.tpa_exist = 1;
4648
4649 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET,
4650 ((u32 *)&tpa)[0]);
4651 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET + 4,
4652 ((u32 *)&tpa)[1]);
4653 }
4654
471de716
EG
4655 /* Zero this manually as its initialization is
4656 currently missing in the initTool */
4657 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
4658 REG_WR(bp, BAR_USTRORM_INTMEM +
4659 USTORM_AGG_DATA_OFFSET + i * 4, 0);
4660}
4661
4662static void bnx2x_init_internal_port(struct bnx2x *bp)
4663{
4664 int port = BP_PORT(bp);
4665
4666 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4667 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4668 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4669 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4670}
4671
4672static void bnx2x_init_internal_func(struct bnx2x *bp)
a2fbb9ea 4673{
a2fbb9ea
ET
4674 struct tstorm_eth_function_common_config tstorm_config = {0};
4675 struct stats_indication_flags stats_flags = {0};
34f80b04
EG
4676 int port = BP_PORT(bp);
4677 int func = BP_FUNC(bp);
4678 int i;
471de716 4679 u16 max_agg_size;
a2fbb9ea
ET
4680
4681 if (is_multi(bp)) {
4682 tstorm_config.config_flags = MULTI_FLAGS;
4683 tstorm_config.rss_result_mask = MULTI_MASK;
4684 }
4685
34f80b04
EG
4686 tstorm_config.leading_client_id = BP_L_ID(bp);
4687
a2fbb9ea 4688 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4689 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
a2fbb9ea
ET
4690 (*(u32 *)&tstorm_config));
4691
c14423fe 4692 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
a2fbb9ea
ET
4693 bnx2x_set_storm_rx_mode(bp);
4694
66e855f3
YG
4695 /* reset xstorm per client statistics */
4696 for (i = 0; i < sizeof(struct xstorm_per_client_stats) / 4; i++) {
4697 REG_WR(bp, BAR_XSTRORM_INTMEM +
4698 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, BP_CL_ID(bp)) +
4699 i*4, 0);
4700 }
4701 /* reset tstorm per client statistics */
4702 for (i = 0; i < sizeof(struct tstorm_per_client_stats) / 4; i++) {
4703 REG_WR(bp, BAR_TSTRORM_INTMEM +
4704 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, BP_CL_ID(bp)) +
4705 i*4, 0);
4706 }
4707
4708 /* Init statistics related context */
34f80b04 4709 stats_flags.collect_eth = 1;
a2fbb9ea 4710
66e855f3 4711 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 4712 ((u32 *)&stats_flags)[0]);
66e855f3 4713 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
4714 ((u32 *)&stats_flags)[1]);
4715
66e855f3 4716 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 4717 ((u32 *)&stats_flags)[0]);
66e855f3 4718 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
4719 ((u32 *)&stats_flags)[1]);
4720
66e855f3 4721 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 4722 ((u32 *)&stats_flags)[0]);
66e855f3 4723 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
4724 ((u32 *)&stats_flags)[1]);
4725
66e855f3
YG
4726 REG_WR(bp, BAR_XSTRORM_INTMEM +
4727 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4728 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4729 REG_WR(bp, BAR_XSTRORM_INTMEM +
4730 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4731 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4732
4733 REG_WR(bp, BAR_TSTRORM_INTMEM +
4734 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4735 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4736 REG_WR(bp, BAR_TSTRORM_INTMEM +
4737 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4738 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
34f80b04
EG
4739
4740 if (CHIP_IS_E1H(bp)) {
4741 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
4742 IS_E1HMF(bp));
4743 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
4744 IS_E1HMF(bp));
4745 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
4746 IS_E1HMF(bp));
4747 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
4748 IS_E1HMF(bp));
4749
7a9b2557
VZ
4750 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
4751 bp->e1hov);
34f80b04
EG
4752 }
4753
4f40f2cb
EG
4754 /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
4755 max_agg_size =
4756 min((u32)(min((u32)8, (u32)MAX_SKB_FRAGS) *
4757 SGE_PAGE_SIZE * PAGES_PER_SGE),
4758 (u32)0xffff);
7a9b2557
VZ
4759 for_each_queue(bp, i) {
4760 struct bnx2x_fastpath *fp = &bp->fp[i];
7a9b2557
VZ
4761
4762 REG_WR(bp, BAR_USTRORM_INTMEM +
4763 USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)),
4764 U64_LO(fp->rx_comp_mapping));
4765 REG_WR(bp, BAR_USTRORM_INTMEM +
4766 USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)) + 4,
4767 U64_HI(fp->rx_comp_mapping));
4768
7a9b2557
VZ
4769 REG_WR16(bp, BAR_USTRORM_INTMEM +
4770 USTORM_MAX_AGG_SIZE_OFFSET(port, FP_CL_ID(fp)),
4771 max_agg_size);
4772 }
a2fbb9ea
ET
4773}
4774
471de716
EG
4775static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
4776{
4777 switch (load_code) {
4778 case FW_MSG_CODE_DRV_LOAD_COMMON:
4779 bnx2x_init_internal_common(bp);
4780 /* no break */
4781
4782 case FW_MSG_CODE_DRV_LOAD_PORT:
4783 bnx2x_init_internal_port(bp);
4784 /* no break */
4785
4786 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
4787 bnx2x_init_internal_func(bp);
4788 break;
4789
4790 default:
4791 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
4792 break;
4793 }
4794}
4795
4796static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
a2fbb9ea
ET
4797{
4798 int i;
4799
4800 for_each_queue(bp, i) {
4801 struct bnx2x_fastpath *fp = &bp->fp[i];
4802
34f80b04 4803 fp->bp = bp;
a2fbb9ea 4804 fp->state = BNX2X_FP_STATE_CLOSED;
a2fbb9ea 4805 fp->index = i;
34f80b04
EG
4806 fp->cl_id = BP_L_ID(bp) + i;
4807 fp->sb_id = fp->cl_id;
4808 DP(NETIF_MSG_IFUP,
4809 "bnx2x_init_sb(%p,%p) index %d cl_id %d sb %d\n",
4810 bp, fp->status_blk, i, FP_CL_ID(fp), FP_SB_ID(fp));
5c862848
EG
4811 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
4812 FP_SB_ID(fp));
4813 bnx2x_update_fpsb_idx(fp);
a2fbb9ea
ET
4814 }
4815
5c862848
EG
4816 bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
4817 DEF_SB_ID);
4818 bnx2x_update_dsb_idx(bp);
a2fbb9ea
ET
4819 bnx2x_update_coalesce(bp);
4820 bnx2x_init_rx_rings(bp);
4821 bnx2x_init_tx_ring(bp);
4822 bnx2x_init_sp_ring(bp);
4823 bnx2x_init_context(bp);
471de716 4824 bnx2x_init_internal(bp, load_code);
a2fbb9ea 4825 bnx2x_init_ind_table(bp);
0ef00459
EG
4826 bnx2x_stats_init(bp);
4827
4828 /* At this point, we are ready for interrupts */
4829 atomic_set(&bp->intr_sem, 0);
4830
4831 /* flush all before enabling interrupts */
4832 mb();
4833 mmiowb();
4834
615f8fd9 4835 bnx2x_int_enable(bp);
a2fbb9ea
ET
4836}
4837
4838/* end of nic init */
4839
4840/*
4841 * gzip service functions
4842 */
4843
4844static int bnx2x_gunzip_init(struct bnx2x *bp)
4845{
4846 bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
4847 &bp->gunzip_mapping);
4848 if (bp->gunzip_buf == NULL)
4849 goto gunzip_nomem1;
4850
4851 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
4852 if (bp->strm == NULL)
4853 goto gunzip_nomem2;
4854
4855 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
4856 GFP_KERNEL);
4857 if (bp->strm->workspace == NULL)
4858 goto gunzip_nomem3;
4859
4860 return 0;
4861
4862gunzip_nomem3:
4863 kfree(bp->strm);
4864 bp->strm = NULL;
4865
4866gunzip_nomem2:
4867 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
4868 bp->gunzip_mapping);
4869 bp->gunzip_buf = NULL;
4870
4871gunzip_nomem1:
4872 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
34f80b04 4873 " un-compression\n", bp->dev->name);
a2fbb9ea
ET
4874 return -ENOMEM;
4875}
4876
4877static void bnx2x_gunzip_end(struct bnx2x *bp)
4878{
4879 kfree(bp->strm->workspace);
4880
4881 kfree(bp->strm);
4882 bp->strm = NULL;
4883
4884 if (bp->gunzip_buf) {
4885 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
4886 bp->gunzip_mapping);
4887 bp->gunzip_buf = NULL;
4888 }
4889}
4890
4891static int bnx2x_gunzip(struct bnx2x *bp, u8 *zbuf, int len)
4892{
4893 int n, rc;
4894
4895 /* check gzip header */
4896 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
4897 return -EINVAL;
4898
4899 n = 10;
4900
34f80b04 4901#define FNAME 0x8
a2fbb9ea
ET
4902
4903 if (zbuf[3] & FNAME)
4904 while ((zbuf[n++] != 0) && (n < len));
4905
4906 bp->strm->next_in = zbuf + n;
4907 bp->strm->avail_in = len - n;
4908 bp->strm->next_out = bp->gunzip_buf;
4909 bp->strm->avail_out = FW_BUF_SIZE;
4910
4911 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
4912 if (rc != Z_OK)
4913 return rc;
4914
4915 rc = zlib_inflate(bp->strm, Z_FINISH);
4916 if ((rc != Z_OK) && (rc != Z_STREAM_END))
4917 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
4918 bp->dev->name, bp->strm->msg);
4919
4920 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
4921 if (bp->gunzip_outlen & 0x3)
4922 printk(KERN_ERR PFX "%s: Firmware decompression error:"
4923 " gunzip_outlen (%d) not aligned\n",
4924 bp->dev->name, bp->gunzip_outlen);
4925 bp->gunzip_outlen >>= 2;
4926
4927 zlib_inflateEnd(bp->strm);
4928
4929 if (rc == Z_STREAM_END)
4930 return 0;
4931
4932 return rc;
4933}
4934
4935/* nic load/unload */
4936
4937/*
34f80b04 4938 * General service functions
a2fbb9ea
ET
4939 */
4940
4941/* send a NIG loopback debug packet */
4942static void bnx2x_lb_pckt(struct bnx2x *bp)
4943{
a2fbb9ea 4944 u32 wb_write[3];
a2fbb9ea
ET
4945
4946 /* Ethernet source and destination addresses */
a2fbb9ea
ET
4947 wb_write[0] = 0x55555555;
4948 wb_write[1] = 0x55555555;
34f80b04 4949 wb_write[2] = 0x20; /* SOP */
a2fbb9ea 4950 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
4951
4952 /* NON-IP protocol */
a2fbb9ea
ET
4953 wb_write[0] = 0x09000000;
4954 wb_write[1] = 0x55555555;
34f80b04 4955 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
a2fbb9ea 4956 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
4957}
4958
4959/* some of the internal memories
4960 * are not directly readable from the driver
4961 * to test them we send debug packets
4962 */
4963static int bnx2x_int_mem_test(struct bnx2x *bp)
4964{
4965 int factor;
4966 int count, i;
4967 u32 val = 0;
4968
ad8d3948 4969 if (CHIP_REV_IS_FPGA(bp))
a2fbb9ea 4970 factor = 120;
ad8d3948
EG
4971 else if (CHIP_REV_IS_EMUL(bp))
4972 factor = 200;
4973 else
a2fbb9ea 4974 factor = 1;
a2fbb9ea
ET
4975
4976 DP(NETIF_MSG_HW, "start part1\n");
4977
4978 /* Disable inputs of parser neighbor blocks */
4979 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4980 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4981 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3196a88a 4982 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
a2fbb9ea
ET
4983
4984 /* Write 0 to parser credits for CFC search request */
4985 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
4986
4987 /* send Ethernet packet */
4988 bnx2x_lb_pckt(bp);
4989
4990 /* TODO do i reset NIG statistic? */
4991 /* Wait until NIG register shows 1 packet of size 0x10 */
4992 count = 1000 * factor;
4993 while (count) {
34f80b04 4994
a2fbb9ea
ET
4995 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
4996 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
4997 if (val == 0x10)
4998 break;
4999
5000 msleep(10);
5001 count--;
5002 }
5003 if (val != 0x10) {
5004 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5005 return -1;
5006 }
5007
5008 /* Wait until PRS register shows 1 packet */
5009 count = 1000 * factor;
5010 while (count) {
5011 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
a2fbb9ea
ET
5012 if (val == 1)
5013 break;
5014
5015 msleep(10);
5016 count--;
5017 }
5018 if (val != 0x1) {
5019 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5020 return -2;
5021 }
5022
5023 /* Reset and init BRB, PRS */
34f80b04 5024 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
a2fbb9ea 5025 msleep(50);
34f80b04 5026 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
a2fbb9ea
ET
5027 msleep(50);
5028 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5029 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5030
5031 DP(NETIF_MSG_HW, "part2\n");
5032
5033 /* Disable inputs of parser neighbor blocks */
5034 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5035 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5036 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3196a88a 5037 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
a2fbb9ea
ET
5038
5039 /* Write 0 to parser credits for CFC search request */
5040 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5041
5042 /* send 10 Ethernet packets */
5043 for (i = 0; i < 10; i++)
5044 bnx2x_lb_pckt(bp);
5045
5046 /* Wait until NIG register shows 10 + 1
5047 packets of size 11*0x10 = 0xb0 */
5048 count = 1000 * factor;
5049 while (count) {
34f80b04 5050
a2fbb9ea
ET
5051 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5052 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
5053 if (val == 0xb0)
5054 break;
5055
5056 msleep(10);
5057 count--;
5058 }
5059 if (val != 0xb0) {
5060 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5061 return -3;
5062 }
5063
5064 /* Wait until PRS register shows 2 packets */
5065 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5066 if (val != 2)
5067 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5068
5069 /* Write 1 to parser credits for CFC search request */
5070 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5071
5072 /* Wait until PRS register shows 3 packets */
5073 msleep(10 * factor);
5074 /* Wait until NIG register shows 1 packet of size 0x10 */
5075 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5076 if (val != 3)
5077 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5078
5079 /* clear NIG EOP FIFO */
5080 for (i = 0; i < 11; i++)
5081 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5082 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5083 if (val != 1) {
5084 BNX2X_ERR("clear of NIG failed\n");
5085 return -4;
5086 }
5087
5088 /* Reset and init BRB, PRS, NIG */
5089 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5090 msleep(50);
5091 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5092 msleep(50);
5093 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5094 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5095#ifndef BCM_ISCSI
5096 /* set NIC mode */
5097 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5098#endif
5099
5100 /* Enable inputs of parser neighbor blocks */
5101 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5102 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5103 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
3196a88a 5104 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
a2fbb9ea
ET
5105
5106 DP(NETIF_MSG_HW, "done\n");
5107
5108 return 0; /* OK */
5109}
5110
5111static void enable_blocks_attention(struct bnx2x *bp)
5112{
5113 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5114 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5115 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5116 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5117 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5118 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5119 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5120 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5121 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
34f80b04
EG
5122/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5123/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5124 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5125 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5126 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
34f80b04
EG
5127/* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5128/* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5129 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5130 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5131 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5132 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
34f80b04
EG
5133/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5134/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5135 if (CHIP_REV_IS_FPGA(bp))
5136 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
5137 else
5138 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
a2fbb9ea
ET
5139 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5140 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5141 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
34f80b04
EG
5142/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5143/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5144 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5145 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
34f80b04
EG
5146/* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5147 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
a2fbb9ea
ET
5148}
5149
34f80b04 5150
81f75bbf
EG
5151static void bnx2x_reset_common(struct bnx2x *bp)
5152{
5153 /* reset_common */
5154 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5155 0xd3ffff7f);
5156 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
5157}
5158
34f80b04 5159static int bnx2x_init_common(struct bnx2x *bp)
a2fbb9ea 5160{
a2fbb9ea 5161 u32 val, i;
a2fbb9ea 5162
34f80b04 5163 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp));
a2fbb9ea 5164
81f75bbf 5165 bnx2x_reset_common(bp);
34f80b04
EG
5166 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
5167 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
a2fbb9ea 5168
34f80b04
EG
5169 bnx2x_init_block(bp, MISC_COMMON_START, MISC_COMMON_END);
5170 if (CHIP_IS_E1H(bp))
5171 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
a2fbb9ea 5172
34f80b04
EG
5173 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
5174 msleep(30);
5175 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
a2fbb9ea 5176
34f80b04
EG
5177 bnx2x_init_block(bp, PXP_COMMON_START, PXP_COMMON_END);
5178 if (CHIP_IS_E1(bp)) {
5179 /* enable HW interrupt from PXP on USDM overflow
5180 bit 16 on INT_MASK_0 */
5181 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5182 }
a2fbb9ea 5183
34f80b04
EG
5184 bnx2x_init_block(bp, PXP2_COMMON_START, PXP2_COMMON_END);
5185 bnx2x_init_pxp(bp);
a2fbb9ea
ET
5186
5187#ifdef __BIG_ENDIAN
34f80b04
EG
5188 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
5189 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
5190 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
5191 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
5192 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
34f80b04
EG
5193
5194/* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
5195 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
5196 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
5197 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
5198 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
a2fbb9ea
ET
5199#endif
5200
34f80b04 5201 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
a2fbb9ea 5202#ifdef BCM_ISCSI
34f80b04
EG
5203 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
5204 REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
5205 REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
a2fbb9ea
ET
5206#endif
5207
34f80b04
EG
5208 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
5209 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
a2fbb9ea 5210
34f80b04
EG
5211 /* let the HW do it's magic ... */
5212 msleep(100);
5213 /* finish PXP init */
5214 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
5215 if (val != 1) {
5216 BNX2X_ERR("PXP2 CFG failed\n");
5217 return -EBUSY;
5218 }
5219 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
5220 if (val != 1) {
5221 BNX2X_ERR("PXP2 RD_INIT failed\n");
5222 return -EBUSY;
5223 }
a2fbb9ea 5224
34f80b04
EG
5225 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
5226 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
a2fbb9ea 5227
34f80b04 5228 bnx2x_init_block(bp, DMAE_COMMON_START, DMAE_COMMON_END);
a2fbb9ea 5229
34f80b04
EG
5230 /* clean the DMAE memory */
5231 bp->dmae_ready = 1;
5232 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
a2fbb9ea 5233
34f80b04
EG
5234 bnx2x_init_block(bp, TCM_COMMON_START, TCM_COMMON_END);
5235 bnx2x_init_block(bp, UCM_COMMON_START, UCM_COMMON_END);
5236 bnx2x_init_block(bp, CCM_COMMON_START, CCM_COMMON_END);
5237 bnx2x_init_block(bp, XCM_COMMON_START, XCM_COMMON_END);
a2fbb9ea 5238
34f80b04
EG
5239 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
5240 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
5241 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
5242 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
5243
5244 bnx2x_init_block(bp, QM_COMMON_START, QM_COMMON_END);
5245 /* soft reset pulse */
5246 REG_WR(bp, QM_REG_SOFT_RESET, 1);
5247 REG_WR(bp, QM_REG_SOFT_RESET, 0);
a2fbb9ea
ET
5248
5249#ifdef BCM_ISCSI
34f80b04 5250 bnx2x_init_block(bp, TIMERS_COMMON_START, TIMERS_COMMON_END);
a2fbb9ea 5251#endif
a2fbb9ea 5252
34f80b04
EG
5253 bnx2x_init_block(bp, DQ_COMMON_START, DQ_COMMON_END);
5254 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
5255 if (!CHIP_REV_IS_SLOW(bp)) {
5256 /* enable hw interrupt from doorbell Q */
5257 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5258 }
a2fbb9ea 5259
34f80b04
EG
5260 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5261 if (CHIP_REV_IS_SLOW(bp)) {
5262 /* fix for emulation and FPGA for no pause */
5263 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0, 513);
5264 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_1, 513);
5265 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0, 0);
5266 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_1, 0);
5267 }
a2fbb9ea 5268
34f80b04 5269 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
26c8fa4d 5270 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
3196a88a
EG
5271 /* set NIC mode */
5272 REG_WR(bp, PRS_REG_NIC_MODE, 1);
34f80b04
EG
5273 if (CHIP_IS_E1H(bp))
5274 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
a2fbb9ea 5275
34f80b04
EG
5276 bnx2x_init_block(bp, TSDM_COMMON_START, TSDM_COMMON_END);
5277 bnx2x_init_block(bp, CSDM_COMMON_START, CSDM_COMMON_END);
5278 bnx2x_init_block(bp, USDM_COMMON_START, USDM_COMMON_END);
5279 bnx2x_init_block(bp, XSDM_COMMON_START, XSDM_COMMON_END);
a2fbb9ea 5280
34f80b04
EG
5281 if (CHIP_IS_E1H(bp)) {
5282 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5283 STORM_INTMEM_SIZE_E1H/2);
5284 bnx2x_init_fill(bp,
5285 TSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5286 0, STORM_INTMEM_SIZE_E1H/2);
5287 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5288 STORM_INTMEM_SIZE_E1H/2);
5289 bnx2x_init_fill(bp,
5290 CSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5291 0, STORM_INTMEM_SIZE_E1H/2);
5292 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5293 STORM_INTMEM_SIZE_E1H/2);
5294 bnx2x_init_fill(bp,
5295 XSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5296 0, STORM_INTMEM_SIZE_E1H/2);
5297 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5298 STORM_INTMEM_SIZE_E1H/2);
5299 bnx2x_init_fill(bp,
5300 USTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5301 0, STORM_INTMEM_SIZE_E1H/2);
5302 } else { /* E1 */
ad8d3948
EG
5303 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5304 STORM_INTMEM_SIZE_E1);
5305 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5306 STORM_INTMEM_SIZE_E1);
5307 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5308 STORM_INTMEM_SIZE_E1);
5309 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5310 STORM_INTMEM_SIZE_E1);
34f80b04 5311 }
a2fbb9ea 5312
34f80b04
EG
5313 bnx2x_init_block(bp, TSEM_COMMON_START, TSEM_COMMON_END);
5314 bnx2x_init_block(bp, USEM_COMMON_START, USEM_COMMON_END);
5315 bnx2x_init_block(bp, CSEM_COMMON_START, CSEM_COMMON_END);
5316 bnx2x_init_block(bp, XSEM_COMMON_START, XSEM_COMMON_END);
a2fbb9ea 5317
34f80b04
EG
5318 /* sync semi rtc */
5319 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5320 0x80000000);
5321 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
5322 0x80000000);
a2fbb9ea 5323
34f80b04
EG
5324 bnx2x_init_block(bp, UPB_COMMON_START, UPB_COMMON_END);
5325 bnx2x_init_block(bp, XPB_COMMON_START, XPB_COMMON_END);
5326 bnx2x_init_block(bp, PBF_COMMON_START, PBF_COMMON_END);
a2fbb9ea 5327
34f80b04
EG
5328 REG_WR(bp, SRC_REG_SOFT_RST, 1);
5329 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
5330 REG_WR(bp, i, 0xc0cac01a);
5331 /* TODO: replace with something meaningful */
5332 }
5333 if (CHIP_IS_E1H(bp))
5334 bnx2x_init_block(bp, SRCH_COMMON_START, SRCH_COMMON_END);
5335 REG_WR(bp, SRC_REG_SOFT_RST, 0);
a2fbb9ea 5336
34f80b04
EG
5337 if (sizeof(union cdu_context) != 1024)
5338 /* we currently assume that a context is 1024 bytes */
5339 printk(KERN_ALERT PFX "please adjust the size of"
5340 " cdu_context(%ld)\n", (long)sizeof(union cdu_context));
a2fbb9ea 5341
34f80b04
EG
5342 bnx2x_init_block(bp, CDU_COMMON_START, CDU_COMMON_END);
5343 val = (4 << 24) + (0 << 12) + 1024;
5344 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
5345 if (CHIP_IS_E1(bp)) {
5346 /* !!! fix pxp client crdit until excel update */
5347 REG_WR(bp, CDU_REG_CDU_DEBUG, 0x264);
5348 REG_WR(bp, CDU_REG_CDU_DEBUG, 0);
5349 }
a2fbb9ea 5350
34f80b04
EG
5351 bnx2x_init_block(bp, CFC_COMMON_START, CFC_COMMON_END);
5352 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
a2fbb9ea 5353
34f80b04
EG
5354 bnx2x_init_block(bp, HC_COMMON_START, HC_COMMON_END);
5355 bnx2x_init_block(bp, MISC_AEU_COMMON_START, MISC_AEU_COMMON_END);
a2fbb9ea 5356
34f80b04
EG
5357 /* PXPCS COMMON comes here */
5358 /* Reset PCIE errors for debug */
5359 REG_WR(bp, 0x2814, 0xffffffff);
5360 REG_WR(bp, 0x3820, 0xffffffff);
a2fbb9ea 5361
34f80b04
EG
5362 /* EMAC0 COMMON comes here */
5363 /* EMAC1 COMMON comes here */
5364 /* DBU COMMON comes here */
5365 /* DBG COMMON comes here */
5366
5367 bnx2x_init_block(bp, NIG_COMMON_START, NIG_COMMON_END);
5368 if (CHIP_IS_E1H(bp)) {
5369 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
5370 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
5371 }
5372
5373 if (CHIP_REV_IS_SLOW(bp))
5374 msleep(200);
5375
5376 /* finish CFC init */
5377 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
5378 if (val != 1) {
5379 BNX2X_ERR("CFC LL_INIT failed\n");
5380 return -EBUSY;
5381 }
5382 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
5383 if (val != 1) {
5384 BNX2X_ERR("CFC AC_INIT failed\n");
5385 return -EBUSY;
5386 }
5387 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
5388 if (val != 1) {
5389 BNX2X_ERR("CFC CAM_INIT failed\n");
5390 return -EBUSY;
5391 }
5392 REG_WR(bp, CFC_REG_DEBUG0, 0);
f1410647 5393
34f80b04
EG
5394 /* read NIG statistic
5395 to see if this is our first up since powerup */
5396 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5397 val = *bnx2x_sp(bp, wb_data[0]);
5398
5399 /* do internal memory self test */
5400 if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
5401 BNX2X_ERR("internal mem self test failed\n");
5402 return -EBUSY;
5403 }
5404
5405 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
7add905f 5406 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1021G:
34f80b04
EG
5407 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
5408 /* Fan failure is indicated by SPIO 5 */
5409 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
5410 MISC_REGISTERS_SPIO_INPUT_HI_Z);
5411
5412 /* set to active low mode */
5413 val = REG_RD(bp, MISC_REG_SPIO_INT);
5414 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
f1410647 5415 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
34f80b04 5416 REG_WR(bp, MISC_REG_SPIO_INT, val);
f1410647 5417
34f80b04
EG
5418 /* enable interrupt to signal the IGU */
5419 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
5420 val |= (1 << MISC_REGISTERS_SPIO_5);
5421 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
5422 break;
f1410647 5423
34f80b04
EG
5424 default:
5425 break;
5426 }
f1410647 5427
34f80b04
EG
5428 /* clear PXP2 attentions */
5429 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
a2fbb9ea 5430
34f80b04 5431 enable_blocks_attention(bp);
a2fbb9ea 5432
6bbca910
YR
5433 if (!BP_NOMCP(bp)) {
5434 bnx2x_acquire_phy_lock(bp);
5435 bnx2x_common_init_phy(bp, bp->common.shmem_base);
5436 bnx2x_release_phy_lock(bp);
5437 } else
5438 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
5439
34f80b04
EG
5440 return 0;
5441}
a2fbb9ea 5442
34f80b04
EG
5443static int bnx2x_init_port(struct bnx2x *bp)
5444{
5445 int port = BP_PORT(bp);
5446 u32 val;
a2fbb9ea 5447
34f80b04
EG
5448 DP(BNX2X_MSG_MCP, "starting port init port %x\n", port);
5449
5450 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
a2fbb9ea
ET
5451
5452 /* Port PXP comes here */
5453 /* Port PXP2 comes here */
a2fbb9ea
ET
5454#ifdef BCM_ISCSI
5455 /* Port0 1
5456 * Port1 385 */
5457 i++;
5458 wb_write[0] = ONCHIP_ADDR1(bp->timers_mapping);
5459 wb_write[1] = ONCHIP_ADDR2(bp->timers_mapping);
5460 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5461 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
5462
5463 /* Port0 2
5464 * Port1 386 */
5465 i++;
5466 wb_write[0] = ONCHIP_ADDR1(bp->qm_mapping);
5467 wb_write[1] = ONCHIP_ADDR2(bp->qm_mapping);
5468 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5469 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
5470
5471 /* Port0 3
5472 * Port1 387 */
5473 i++;
5474 wb_write[0] = ONCHIP_ADDR1(bp->t1_mapping);
5475 wb_write[1] = ONCHIP_ADDR2(bp->t1_mapping);
5476 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5477 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
5478#endif
34f80b04 5479 /* Port CMs come here */
a2fbb9ea
ET
5480
5481 /* Port QM comes here */
a2fbb9ea
ET
5482#ifdef BCM_ISCSI
5483 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + func*4, 1024/64*20);
5484 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + func*4, 31);
5485
5486 bnx2x_init_block(bp, func ? TIMERS_PORT1_START : TIMERS_PORT0_START,
5487 func ? TIMERS_PORT1_END : TIMERS_PORT0_END);
5488#endif
5489 /* Port DQ comes here */
5490 /* Port BRB1 comes here */
ad8d3948 5491 /* Port PRS comes here */
a2fbb9ea
ET
5492 /* Port TSDM comes here */
5493 /* Port CSDM comes here */
5494 /* Port USDM comes here */
5495 /* Port XSDM comes here */
34f80b04
EG
5496 bnx2x_init_block(bp, port ? TSEM_PORT1_START : TSEM_PORT0_START,
5497 port ? TSEM_PORT1_END : TSEM_PORT0_END);
5498 bnx2x_init_block(bp, port ? USEM_PORT1_START : USEM_PORT0_START,
5499 port ? USEM_PORT1_END : USEM_PORT0_END);
5500 bnx2x_init_block(bp, port ? CSEM_PORT1_START : CSEM_PORT0_START,
5501 port ? CSEM_PORT1_END : CSEM_PORT0_END);
5502 bnx2x_init_block(bp, port ? XSEM_PORT1_START : XSEM_PORT0_START,
5503 port ? XSEM_PORT1_END : XSEM_PORT0_END);
a2fbb9ea 5504 /* Port UPB comes here */
34f80b04
EG
5505 /* Port XPB comes here */
5506
5507 bnx2x_init_block(bp, port ? PBF_PORT1_START : PBF_PORT0_START,
5508 port ? PBF_PORT1_END : PBF_PORT0_END);
a2fbb9ea
ET
5509
5510 /* configure PBF to work without PAUSE mtu 9000 */
34f80b04 5511 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
a2fbb9ea
ET
5512
5513 /* update threshold */
34f80b04 5514 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
a2fbb9ea 5515 /* update init credit */
34f80b04 5516 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
a2fbb9ea
ET
5517
5518 /* probe changes */
34f80b04 5519 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
a2fbb9ea 5520 msleep(5);
34f80b04 5521 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
a2fbb9ea
ET
5522
5523#ifdef BCM_ISCSI
5524 /* tell the searcher where the T2 table is */
5525 REG_WR(bp, SRC_REG_COUNTFREE0 + func*4, 16*1024/64);
5526
5527 wb_write[0] = U64_LO(bp->t2_mapping);
5528 wb_write[1] = U64_HI(bp->t2_mapping);
5529 REG_WR_DMAE(bp, SRC_REG_FIRSTFREE0 + func*4, wb_write, 2);
5530 wb_write[0] = U64_LO((u64)bp->t2_mapping + 16*1024 - 64);
5531 wb_write[1] = U64_HI((u64)bp->t2_mapping + 16*1024 - 64);
5532 REG_WR_DMAE(bp, SRC_REG_LASTFREE0 + func*4, wb_write, 2);
5533
5534 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + func*4, 10);
5535 /* Port SRCH comes here */
5536#endif
5537 /* Port CDU comes here */
5538 /* Port CFC comes here */
34f80b04
EG
5539
5540 if (CHIP_IS_E1(bp)) {
5541 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5542 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5543 }
5544 bnx2x_init_block(bp, port ? HC_PORT1_START : HC_PORT0_START,
5545 port ? HC_PORT1_END : HC_PORT0_END);
5546
5547 bnx2x_init_block(bp, port ? MISC_AEU_PORT1_START :
a2fbb9ea 5548 MISC_AEU_PORT0_START,
34f80b04
EG
5549 port ? MISC_AEU_PORT1_END : MISC_AEU_PORT0_END);
5550 /* init aeu_mask_attn_func_0/1:
5551 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
5552 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
5553 * bits 4-7 are used for "per vn group attention" */
5554 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
5555 (IS_E1HMF(bp) ? 0xF7 : 0x7));
5556
a2fbb9ea
ET
5557 /* Port PXPCS comes here */
5558 /* Port EMAC0 comes here */
5559 /* Port EMAC1 comes here */
5560 /* Port DBU comes here */
5561 /* Port DBG comes here */
34f80b04
EG
5562 bnx2x_init_block(bp, port ? NIG_PORT1_START : NIG_PORT0_START,
5563 port ? NIG_PORT1_END : NIG_PORT0_END);
5564
5565 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
5566
5567 if (CHIP_IS_E1H(bp)) {
5568 u32 wsum;
5569 struct cmng_struct_per_port m_cmng_port;
5570 int vn;
5571
5572 /* 0x2 disable e1hov, 0x1 enable */
5573 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
5574 (IS_E1HMF(bp) ? 0x1 : 0x2));
5575
5576 /* Init RATE SHAPING and FAIRNESS contexts.
5577 Initialize as if there is 10G link. */
5578 wsum = bnx2x_calc_vn_wsum(bp);
5579 bnx2x_init_port_minmax(bp, (int)wsum, 10000, &m_cmng_port);
5580 if (IS_E1HMF(bp))
5581 for (vn = VN_0; vn < E1HVN_MAX; vn++)
5582 bnx2x_init_vn_minmax(bp, 2*vn + port,
5583 wsum, 10000, &m_cmng_port);
5584 }
5585
a2fbb9ea
ET
5586 /* Port MCP comes here */
5587 /* Port DMAE comes here */
5588
34f80b04 5589 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
7add905f 5590 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1021G:
f1410647
ET
5591 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
5592 /* add SPIO 5 to group 0 */
5593 val = REG_RD(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
5594 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
5595 REG_WR(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0, val);
5596 break;
5597
5598 default:
5599 break;
5600 }
5601
c18487ee 5602 bnx2x__link_reset(bp);
a2fbb9ea 5603
34f80b04
EG
5604 return 0;
5605}
5606
5607#define ILT_PER_FUNC (768/2)
5608#define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
5609/* the phys address is shifted right 12 bits and has an added
5610 1=valid bit added to the 53rd bit
5611 then since this is a wide register(TM)
5612 we split it into two 32 bit writes
5613 */
5614#define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
5615#define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
5616#define PXP_ONE_ILT(x) (((x) << 10) | x)
5617#define PXP_ILT_RANGE(f, l) (((l) << 10) | f)
5618
5619#define CNIC_ILT_LINES 0
5620
5621static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
5622{
5623 int reg;
5624
5625 if (CHIP_IS_E1H(bp))
5626 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
5627 else /* E1 */
5628 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
5629
5630 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
5631}
5632
5633static int bnx2x_init_func(struct bnx2x *bp)
5634{
5635 int port = BP_PORT(bp);
5636 int func = BP_FUNC(bp);
5637 int i;
5638
5639 DP(BNX2X_MSG_MCP, "starting func init func %x\n", func);
5640
5641 i = FUNC_ILT_BASE(func);
5642
5643 bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
5644 if (CHIP_IS_E1H(bp)) {
5645 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
5646 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
5647 } else /* E1 */
5648 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
5649 PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
5650
5651
5652 if (CHIP_IS_E1H(bp)) {
5653 for (i = 0; i < 9; i++)
5654 bnx2x_init_block(bp,
5655 cm_start[func][i], cm_end[func][i]);
5656
5657 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
5658 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
5659 }
5660
5661 /* HC init per function */
5662 if (CHIP_IS_E1H(bp)) {
5663 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
5664
5665 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5666 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5667 }
5668 bnx2x_init_block(bp, hc_limits[func][0], hc_limits[func][1]);
5669
5670 if (CHIP_IS_E1H(bp))
5671 REG_WR(bp, HC_REG_FUNC_NUM_P0 + port*4, func);
5672
c14423fe 5673 /* Reset PCIE errors for debug */
a2fbb9ea
ET
5674 REG_WR(bp, 0x2114, 0xffffffff);
5675 REG_WR(bp, 0x2120, 0xffffffff);
a2fbb9ea 5676
34f80b04
EG
5677 return 0;
5678}
5679
5680static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
5681{
5682 int i, rc = 0;
a2fbb9ea 5683
34f80b04
EG
5684 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
5685 BP_FUNC(bp), load_code);
a2fbb9ea 5686
34f80b04
EG
5687 bp->dmae_ready = 0;
5688 mutex_init(&bp->dmae_mutex);
5689 bnx2x_gunzip_init(bp);
a2fbb9ea 5690
34f80b04
EG
5691 switch (load_code) {
5692 case FW_MSG_CODE_DRV_LOAD_COMMON:
5693 rc = bnx2x_init_common(bp);
5694 if (rc)
5695 goto init_hw_err;
5696 /* no break */
5697
5698 case FW_MSG_CODE_DRV_LOAD_PORT:
5699 bp->dmae_ready = 1;
5700 rc = bnx2x_init_port(bp);
5701 if (rc)
5702 goto init_hw_err;
5703 /* no break */
5704
5705 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5706 bp->dmae_ready = 1;
5707 rc = bnx2x_init_func(bp);
5708 if (rc)
5709 goto init_hw_err;
5710 break;
5711
5712 default:
5713 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5714 break;
5715 }
5716
5717 if (!BP_NOMCP(bp)) {
5718 int func = BP_FUNC(bp);
a2fbb9ea
ET
5719
5720 bp->fw_drv_pulse_wr_seq =
34f80b04 5721 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
a2fbb9ea 5722 DRV_PULSE_SEQ_MASK);
34f80b04
EG
5723 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
5724 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x func_stx 0x%x\n",
5725 bp->fw_drv_pulse_wr_seq, bp->func_stx);
5726 } else
5727 bp->func_stx = 0;
a2fbb9ea 5728
34f80b04
EG
5729 /* this needs to be done before gunzip end */
5730 bnx2x_zero_def_sb(bp);
5731 for_each_queue(bp, i)
5732 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
5733
5734init_hw_err:
5735 bnx2x_gunzip_end(bp);
5736
5737 return rc;
a2fbb9ea
ET
5738}
5739
c14423fe 5740/* send the MCP a request, block until there is a reply */
a2fbb9ea
ET
5741static u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
5742{
34f80b04 5743 int func = BP_FUNC(bp);
f1410647
ET
5744 u32 seq = ++bp->fw_seq;
5745 u32 rc = 0;
19680c48
EG
5746 u32 cnt = 1;
5747 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
a2fbb9ea 5748
34f80b04 5749 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
f1410647 5750 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
a2fbb9ea 5751
19680c48
EG
5752 do {
5753 /* let the FW do it's magic ... */
5754 msleep(delay);
a2fbb9ea 5755
19680c48 5756 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
a2fbb9ea 5757
19680c48
EG
5758 /* Give the FW up to 2 second (200*10ms) */
5759 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 200));
5760
5761 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
5762 cnt*delay, rc, seq);
a2fbb9ea
ET
5763
5764 /* is this a reply to our command? */
5765 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) {
5766 rc &= FW_MSG_CODE_MASK;
f1410647 5767
a2fbb9ea
ET
5768 } else {
5769 /* FW BUG! */
5770 BNX2X_ERR("FW failed to respond!\n");
5771 bnx2x_fw_dump(bp);
5772 rc = 0;
5773 }
f1410647 5774
a2fbb9ea
ET
5775 return rc;
5776}
5777
5778static void bnx2x_free_mem(struct bnx2x *bp)
5779{
5780
5781#define BNX2X_PCI_FREE(x, y, size) \
5782 do { \
5783 if (x) { \
5784 pci_free_consistent(bp->pdev, size, x, y); \
5785 x = NULL; \
5786 y = 0; \
5787 } \
5788 } while (0)
5789
5790#define BNX2X_FREE(x) \
5791 do { \
5792 if (x) { \
5793 vfree(x); \
5794 x = NULL; \
5795 } \
5796 } while (0)
5797
5798 int i;
5799
5800 /* fastpath */
5801 for_each_queue(bp, i) {
5802
5803 /* Status blocks */
5804 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
5805 bnx2x_fp(bp, i, status_blk_mapping),
5806 sizeof(struct host_status_block) +
5807 sizeof(struct eth_tx_db_data));
5808
5809 /* fast path rings: tx_buf tx_desc rx_buf rx_desc rx_comp */
5810 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
5811 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
5812 bnx2x_fp(bp, i, tx_desc_mapping),
5813 sizeof(struct eth_tx_bd) * NUM_TX_BD);
5814
5815 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
5816 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
5817 bnx2x_fp(bp, i, rx_desc_mapping),
5818 sizeof(struct eth_rx_bd) * NUM_RX_BD);
5819
5820 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
5821 bnx2x_fp(bp, i, rx_comp_mapping),
5822 sizeof(struct eth_fast_path_rx_cqe) *
5823 NUM_RCQ_BD);
a2fbb9ea 5824
7a9b2557 5825 /* SGE ring */
32626230 5826 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
7a9b2557
VZ
5827 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
5828 bnx2x_fp(bp, i, rx_sge_mapping),
5829 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
5830 }
a2fbb9ea
ET
5831 /* end of fastpath */
5832
5833 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
34f80b04 5834 sizeof(struct host_def_status_block));
a2fbb9ea
ET
5835
5836 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
34f80b04 5837 sizeof(struct bnx2x_slowpath));
a2fbb9ea
ET
5838
5839#ifdef BCM_ISCSI
5840 BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
5841 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
5842 BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
5843 BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
5844#endif
7a9b2557 5845 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
a2fbb9ea
ET
5846
5847#undef BNX2X_PCI_FREE
5848#undef BNX2X_KFREE
5849}
5850
5851static int bnx2x_alloc_mem(struct bnx2x *bp)
5852{
5853
5854#define BNX2X_PCI_ALLOC(x, y, size) \
5855 do { \
5856 x = pci_alloc_consistent(bp->pdev, size, y); \
5857 if (x == NULL) \
5858 goto alloc_mem_err; \
5859 memset(x, 0, size); \
5860 } while (0)
5861
5862#define BNX2X_ALLOC(x, size) \
5863 do { \
5864 x = vmalloc(size); \
5865 if (x == NULL) \
5866 goto alloc_mem_err; \
5867 memset(x, 0, size); \
5868 } while (0)
5869
5870 int i;
5871
5872 /* fastpath */
a2fbb9ea
ET
5873 for_each_queue(bp, i) {
5874 bnx2x_fp(bp, i, bp) = bp;
5875
5876 /* Status blocks */
5877 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
5878 &bnx2x_fp(bp, i, status_blk_mapping),
5879 sizeof(struct host_status_block) +
5880 sizeof(struct eth_tx_db_data));
5881
5882 bnx2x_fp(bp, i, hw_tx_prods) =
5883 (void *)(bnx2x_fp(bp, i, status_blk) + 1);
5884
5885 bnx2x_fp(bp, i, tx_prods_mapping) =
5886 bnx2x_fp(bp, i, status_blk_mapping) +
5887 sizeof(struct host_status_block);
5888
5889 /* fast path rings: tx_buf tx_desc rx_buf rx_desc rx_comp */
5890 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
5891 sizeof(struct sw_tx_bd) * NUM_TX_BD);
5892 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
5893 &bnx2x_fp(bp, i, tx_desc_mapping),
5894 sizeof(struct eth_tx_bd) * NUM_TX_BD);
5895
5896 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
5897 sizeof(struct sw_rx_bd) * NUM_RX_BD);
5898 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
5899 &bnx2x_fp(bp, i, rx_desc_mapping),
5900 sizeof(struct eth_rx_bd) * NUM_RX_BD);
5901
5902 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
5903 &bnx2x_fp(bp, i, rx_comp_mapping),
5904 sizeof(struct eth_fast_path_rx_cqe) *
5905 NUM_RCQ_BD);
5906
7a9b2557
VZ
5907 /* SGE ring */
5908 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
5909 sizeof(struct sw_rx_page) * NUM_RX_SGE);
5910 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
5911 &bnx2x_fp(bp, i, rx_sge_mapping),
5912 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
a2fbb9ea
ET
5913 }
5914 /* end of fastpath */
5915
5916 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
5917 sizeof(struct host_def_status_block));
5918
5919 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
5920 sizeof(struct bnx2x_slowpath));
5921
5922#ifdef BCM_ISCSI
5923 BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
5924
5925 /* Initialize T1 */
5926 for (i = 0; i < 64*1024; i += 64) {
5927 *(u64 *)((char *)bp->t1 + i + 56) = 0x0UL;
5928 *(u64 *)((char *)bp->t1 + i + 3) = 0x0UL;
5929 }
5930
5931 /* allocate searcher T2 table
5932 we allocate 1/4 of alloc num for T2
5933 (which is not entered into the ILT) */
5934 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
5935
5936 /* Initialize T2 */
5937 for (i = 0; i < 16*1024; i += 64)
5938 * (u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
5939
c14423fe 5940 /* now fixup the last line in the block to point to the next block */
a2fbb9ea
ET
5941 *(u64 *)((char *)bp->t2 + 1024*16-8) = bp->t2_mapping;
5942
5943 /* Timer block array (MAX_CONN*8) phys uncached for now 1024 conns */
5944 BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
5945
5946 /* QM queues (128*MAX_CONN) */
5947 BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
5948#endif
5949
5950 /* Slow path ring */
5951 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
5952
5953 return 0;
5954
5955alloc_mem_err:
5956 bnx2x_free_mem(bp);
5957 return -ENOMEM;
5958
5959#undef BNX2X_PCI_ALLOC
5960#undef BNX2X_ALLOC
5961}
5962
5963static void bnx2x_free_tx_skbs(struct bnx2x *bp)
5964{
5965 int i;
5966
5967 for_each_queue(bp, i) {
5968 struct bnx2x_fastpath *fp = &bp->fp[i];
5969
5970 u16 bd_cons = fp->tx_bd_cons;
5971 u16 sw_prod = fp->tx_pkt_prod;
5972 u16 sw_cons = fp->tx_pkt_cons;
5973
a2fbb9ea
ET
5974 while (sw_cons != sw_prod) {
5975 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
5976 sw_cons++;
5977 }
5978 }
5979}
5980
5981static void bnx2x_free_rx_skbs(struct bnx2x *bp)
5982{
5983 int i, j;
5984
5985 for_each_queue(bp, j) {
5986 struct bnx2x_fastpath *fp = &bp->fp[j];
5987
a2fbb9ea
ET
5988 for (i = 0; i < NUM_RX_BD; i++) {
5989 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
5990 struct sk_buff *skb = rx_buf->skb;
5991
5992 if (skb == NULL)
5993 continue;
5994
5995 pci_unmap_single(bp->pdev,
5996 pci_unmap_addr(rx_buf, mapping),
437cf2f1 5997 bp->rx_buf_size,
a2fbb9ea
ET
5998 PCI_DMA_FROMDEVICE);
5999
6000 rx_buf->skb = NULL;
6001 dev_kfree_skb(skb);
6002 }
7a9b2557 6003 if (!fp->disable_tpa)
32626230
EG
6004 bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
6005 ETH_MAX_AGGREGATION_QUEUES_E1 :
7a9b2557 6006 ETH_MAX_AGGREGATION_QUEUES_E1H);
a2fbb9ea
ET
6007 }
6008}
6009
6010static void bnx2x_free_skbs(struct bnx2x *bp)
6011{
6012 bnx2x_free_tx_skbs(bp);
6013 bnx2x_free_rx_skbs(bp);
6014}
6015
6016static void bnx2x_free_msix_irqs(struct bnx2x *bp)
6017{
34f80b04 6018 int i, offset = 1;
a2fbb9ea
ET
6019
6020 free_irq(bp->msix_table[0].vector, bp->dev);
c14423fe 6021 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
a2fbb9ea
ET
6022 bp->msix_table[0].vector);
6023
6024 for_each_queue(bp, i) {
c14423fe 6025 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
34f80b04 6026 "state %x\n", i, bp->msix_table[i + offset].vector,
a2fbb9ea
ET
6027 bnx2x_fp(bp, i, state));
6028
228241eb
ET
6029 if (bnx2x_fp(bp, i, state) != BNX2X_FP_STATE_CLOSED)
6030 BNX2X_ERR("IRQ of fp #%d being freed while "
6031 "state != closed\n", i);
a2fbb9ea 6032
34f80b04 6033 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
a2fbb9ea 6034 }
a2fbb9ea
ET
6035}
6036
6037static void bnx2x_free_irq(struct bnx2x *bp)
6038{
a2fbb9ea 6039 if (bp->flags & USING_MSIX_FLAG) {
a2fbb9ea
ET
6040 bnx2x_free_msix_irqs(bp);
6041 pci_disable_msix(bp->pdev);
a2fbb9ea
ET
6042 bp->flags &= ~USING_MSIX_FLAG;
6043
6044 } else
6045 free_irq(bp->pdev->irq, bp->dev);
6046}
6047
6048static int bnx2x_enable_msix(struct bnx2x *bp)
6049{
34f80b04 6050 int i, rc, offset;
a2fbb9ea
ET
6051
6052 bp->msix_table[0].entry = 0;
34f80b04
EG
6053 offset = 1;
6054 DP(NETIF_MSG_IFUP, "msix_table[0].entry = 0 (slowpath)\n");
a2fbb9ea 6055
34f80b04
EG
6056 for_each_queue(bp, i) {
6057 int igu_vec = offset + i + BP_L_ID(bp);
a2fbb9ea 6058
34f80b04
EG
6059 bp->msix_table[i + offset].entry = igu_vec;
6060 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
6061 "(fastpath #%u)\n", i + offset, igu_vec, i);
a2fbb9ea
ET
6062 }
6063
34f80b04
EG
6064 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
6065 bp->num_queues + offset);
6066 if (rc) {
6067 DP(NETIF_MSG_IFUP, "MSI-X is not attainable\n");
6068 return -1;
6069 }
a2fbb9ea
ET
6070 bp->flags |= USING_MSIX_FLAG;
6071
6072 return 0;
a2fbb9ea
ET
6073}
6074
a2fbb9ea
ET
6075static int bnx2x_req_msix_irqs(struct bnx2x *bp)
6076{
34f80b04 6077 int i, rc, offset = 1;
a2fbb9ea 6078
a2fbb9ea
ET
6079 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
6080 bp->dev->name, bp->dev);
a2fbb9ea
ET
6081 if (rc) {
6082 BNX2X_ERR("request sp irq failed\n");
6083 return -EBUSY;
6084 }
6085
6086 for_each_queue(bp, i) {
34f80b04 6087 rc = request_irq(bp->msix_table[i + offset].vector,
a2fbb9ea
ET
6088 bnx2x_msix_fp_int, 0,
6089 bp->dev->name, &bp->fp[i]);
a2fbb9ea 6090 if (rc) {
3196a88a
EG
6091 BNX2X_ERR("request fp #%d irq failed rc -%d\n",
6092 i + offset, -rc);
a2fbb9ea
ET
6093 bnx2x_free_msix_irqs(bp);
6094 return -EBUSY;
6095 }
6096
6097 bnx2x_fp(bp, i, state) = BNX2X_FP_STATE_IRQ;
a2fbb9ea
ET
6098 }
6099
6100 return 0;
a2fbb9ea
ET
6101}
6102
6103static int bnx2x_req_irq(struct bnx2x *bp)
6104{
34f80b04 6105 int rc;
a2fbb9ea 6106
34f80b04
EG
6107 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, IRQF_SHARED,
6108 bp->dev->name, bp->dev);
a2fbb9ea
ET
6109 if (!rc)
6110 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
6111
6112 return rc;
a2fbb9ea
ET
6113}
6114
65abd74d
YG
6115static void bnx2x_napi_enable(struct bnx2x *bp)
6116{
6117 int i;
6118
6119 for_each_queue(bp, i)
6120 napi_enable(&bnx2x_fp(bp, i, napi));
6121}
6122
6123static void bnx2x_napi_disable(struct bnx2x *bp)
6124{
6125 int i;
6126
6127 for_each_queue(bp, i)
6128 napi_disable(&bnx2x_fp(bp, i, napi));
6129}
6130
6131static void bnx2x_netif_start(struct bnx2x *bp)
6132{
6133 if (atomic_dec_and_test(&bp->intr_sem)) {
6134 if (netif_running(bp->dev)) {
6135 if (bp->state == BNX2X_STATE_OPEN)
6136 netif_wake_queue(bp->dev);
6137 bnx2x_napi_enable(bp);
6138 bnx2x_int_enable(bp);
6139 }
6140 }
6141}
6142
f8ef6e44 6143static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
65abd74d 6144{
f8ef6e44 6145 bnx2x_int_disable_sync(bp, disable_hw);
e94d8af3 6146 bnx2x_napi_disable(bp);
65abd74d 6147 if (netif_running(bp->dev)) {
65abd74d
YG
6148 netif_tx_disable(bp->dev);
6149 bp->dev->trans_start = jiffies; /* prevent tx timeout */
6150 }
6151}
6152
a2fbb9ea
ET
6153/*
6154 * Init service functions
6155 */
6156
3101c2bc 6157static void bnx2x_set_mac_addr_e1(struct bnx2x *bp, int set)
a2fbb9ea
ET
6158{
6159 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
34f80b04 6160 int port = BP_PORT(bp);
a2fbb9ea
ET
6161
6162 /* CAM allocation
6163 * unicasts 0-31:port0 32-63:port1
6164 * multicast 64-127:port0 128-191:port1
6165 */
6166 config->hdr.length_6b = 2;
af246401 6167 config->hdr.offset = port ? 32 : 0;
34f80b04 6168 config->hdr.client_id = BP_CL_ID(bp);
a2fbb9ea
ET
6169 config->hdr.reserved1 = 0;
6170
6171 /* primary MAC */
6172 config->config_table[0].cam_entry.msb_mac_addr =
6173 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6174 config->config_table[0].cam_entry.middle_mac_addr =
6175 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6176 config->config_table[0].cam_entry.lsb_mac_addr =
6177 swab16(*(u16 *)&bp->dev->dev_addr[4]);
34f80b04 6178 config->config_table[0].cam_entry.flags = cpu_to_le16(port);
3101c2bc
YG
6179 if (set)
6180 config->config_table[0].target_table_entry.flags = 0;
6181 else
6182 CAM_INVALIDATE(config->config_table[0]);
a2fbb9ea
ET
6183 config->config_table[0].target_table_entry.client_id = 0;
6184 config->config_table[0].target_table_entry.vlan_id = 0;
6185
3101c2bc
YG
6186 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
6187 (set ? "setting" : "clearing"),
a2fbb9ea
ET
6188 config->config_table[0].cam_entry.msb_mac_addr,
6189 config->config_table[0].cam_entry.middle_mac_addr,
6190 config->config_table[0].cam_entry.lsb_mac_addr);
6191
6192 /* broadcast */
6193 config->config_table[1].cam_entry.msb_mac_addr = 0xffff;
6194 config->config_table[1].cam_entry.middle_mac_addr = 0xffff;
6195 config->config_table[1].cam_entry.lsb_mac_addr = 0xffff;
34f80b04 6196 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
3101c2bc
YG
6197 if (set)
6198 config->config_table[1].target_table_entry.flags =
a2fbb9ea 6199 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
3101c2bc
YG
6200 else
6201 CAM_INVALIDATE(config->config_table[1]);
a2fbb9ea
ET
6202 config->config_table[1].target_table_entry.client_id = 0;
6203 config->config_table[1].target_table_entry.vlan_id = 0;
6204
6205 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6206 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6207 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6208}
6209
3101c2bc 6210static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp, int set)
34f80b04
EG
6211{
6212 struct mac_configuration_cmd_e1h *config =
6213 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
6214
3101c2bc 6215 if (set && (bp->state != BNX2X_STATE_OPEN)) {
34f80b04
EG
6216 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
6217 return;
6218 }
6219
6220 /* CAM allocation for E1H
6221 * unicasts: by func number
6222 * multicast: 20+FUNC*20, 20 each
6223 */
6224 config->hdr.length_6b = 1;
6225 config->hdr.offset = BP_FUNC(bp);
6226 config->hdr.client_id = BP_CL_ID(bp);
6227 config->hdr.reserved1 = 0;
6228
6229 /* primary MAC */
6230 config->config_table[0].msb_mac_addr =
6231 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6232 config->config_table[0].middle_mac_addr =
6233 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6234 config->config_table[0].lsb_mac_addr =
6235 swab16(*(u16 *)&bp->dev->dev_addr[4]);
6236 config->config_table[0].client_id = BP_L_ID(bp);
6237 config->config_table[0].vlan_id = 0;
6238 config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
3101c2bc
YG
6239 if (set)
6240 config->config_table[0].flags = BP_PORT(bp);
6241 else
6242 config->config_table[0].flags =
6243 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
34f80b04 6244
3101c2bc
YG
6245 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) E1HOV %d CLID %d\n",
6246 (set ? "setting" : "clearing"),
34f80b04
EG
6247 config->config_table[0].msb_mac_addr,
6248 config->config_table[0].middle_mac_addr,
6249 config->config_table[0].lsb_mac_addr, bp->e1hov, BP_L_ID(bp));
6250
6251 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6252 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6253 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6254}
6255
a2fbb9ea
ET
6256static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6257 int *state_p, int poll)
6258{
6259 /* can take a while if any port is running */
34f80b04 6260 int cnt = 500;
a2fbb9ea 6261
c14423fe
ET
6262 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
6263 poll ? "polling" : "waiting", state, idx);
a2fbb9ea
ET
6264
6265 might_sleep();
34f80b04 6266 while (cnt--) {
a2fbb9ea
ET
6267 if (poll) {
6268 bnx2x_rx_int(bp->fp, 10);
34f80b04
EG
6269 /* if index is different from 0
6270 * the reply for some commands will
3101c2bc 6271 * be on the non default queue
a2fbb9ea
ET
6272 */
6273 if (idx)
6274 bnx2x_rx_int(&bp->fp[idx], 10);
6275 }
a2fbb9ea 6276
3101c2bc 6277 mb(); /* state is changed by bnx2x_sp_event() */
49d66772 6278 if (*state_p == state)
a2fbb9ea
ET
6279 return 0;
6280
a2fbb9ea 6281 msleep(1);
a2fbb9ea
ET
6282 }
6283
a2fbb9ea 6284 /* timeout! */
49d66772
ET
6285 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
6286 poll ? "polling" : "waiting", state, idx);
34f80b04
EG
6287#ifdef BNX2X_STOP_ON_ERROR
6288 bnx2x_panic();
6289#endif
a2fbb9ea 6290
49d66772 6291 return -EBUSY;
a2fbb9ea
ET
6292}
6293
6294static int bnx2x_setup_leading(struct bnx2x *bp)
6295{
34f80b04 6296 int rc;
a2fbb9ea 6297
c14423fe 6298 /* reset IGU state */
34f80b04 6299 bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea
ET
6300
6301 /* SETUP ramrod */
6302 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
6303
34f80b04
EG
6304 /* Wait for completion */
6305 rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
a2fbb9ea 6306
34f80b04 6307 return rc;
a2fbb9ea
ET
6308}
6309
6310static int bnx2x_setup_multi(struct bnx2x *bp, int index)
6311{
a2fbb9ea 6312 /* reset IGU state */
34f80b04 6313 bnx2x_ack_sb(bp, bp->fp[index].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea 6314
228241eb 6315 /* SETUP ramrod */
a2fbb9ea
ET
6316 bp->fp[index].state = BNX2X_FP_STATE_OPENING;
6317 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0, index, 0);
6318
6319 /* Wait for completion */
6320 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
228241eb 6321 &(bp->fp[index].state), 0);
a2fbb9ea
ET
6322}
6323
a2fbb9ea
ET
6324static int bnx2x_poll(struct napi_struct *napi, int budget);
6325static void bnx2x_set_rx_mode(struct net_device *dev);
6326
34f80b04
EG
6327/* must be called with rtnl_lock */
6328static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
a2fbb9ea 6329{
228241eb 6330 u32 load_code;
2dfe0e1f 6331 int i, rc = 0;
34f80b04
EG
6332#ifdef BNX2X_STOP_ON_ERROR
6333 if (unlikely(bp->panic))
6334 return -EPERM;
6335#endif
a2fbb9ea
ET
6336
6337 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
6338
34f80b04
EG
6339 if (use_inta) {
6340 bp->num_queues = 1;
6341
6342 } else {
6343 if ((use_multi > 1) && (use_multi <= BP_MAX_QUEUES(bp)))
6344 /* user requested number */
6345 bp->num_queues = use_multi;
6346
6347 else if (use_multi)
6348 bp->num_queues = min_t(u32, num_online_cpus(),
6349 BP_MAX_QUEUES(bp));
6350 else
a2fbb9ea 6351 bp->num_queues = 1;
34f80b04 6352
2dfe0e1f
EG
6353 DP(NETIF_MSG_IFUP,
6354 "set number of queues to %d\n", bp->num_queues);
6355
6356 /* if we can't use MSI-X we only need one fp,
6357 * so try to enable MSI-X with the requested number of fp's
6358 * and fallback to MSI or legacy INTx with one fp
6359 */
6360 rc = bnx2x_enable_msix(bp);
6361 if (rc) {
34f80b04
EG
6362 /* failed to enable MSI-X */
6363 bp->num_queues = 1;
6364 if (use_multi)
6365 BNX2X_ERR("Multi requested but failed"
6366 " to enable MSI-X\n");
a2fbb9ea
ET
6367 }
6368 }
c14423fe 6369
a2fbb9ea
ET
6370 if (bnx2x_alloc_mem(bp))
6371 return -ENOMEM;
6372
7a9b2557
VZ
6373 for_each_queue(bp, i)
6374 bnx2x_fp(bp, i, disable_tpa) =
6375 ((bp->flags & TPA_ENABLE_FLAG) == 0);
6376
2dfe0e1f
EG
6377 for_each_queue(bp, i)
6378 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
6379 bnx2x_poll, 128);
6380
6381#ifdef BNX2X_STOP_ON_ERROR
6382 for_each_queue(bp, i) {
6383 struct bnx2x_fastpath *fp = &bp->fp[i];
6384
6385 fp->poll_no_work = 0;
6386 fp->poll_calls = 0;
6387 fp->poll_max_calls = 0;
6388 fp->poll_complete = 0;
6389 fp->poll_exit = 0;
6390 }
6391#endif
6392 bnx2x_napi_enable(bp);
6393
34f80b04
EG
6394 if (bp->flags & USING_MSIX_FLAG) {
6395 rc = bnx2x_req_msix_irqs(bp);
6396 if (rc) {
6397 pci_disable_msix(bp->pdev);
2dfe0e1f 6398 goto load_error1;
34f80b04 6399 }
2dfe0e1f 6400 printk(KERN_INFO PFX "%s: using MSI-X\n", bp->dev->name);
34f80b04
EG
6401 } else {
6402 bnx2x_ack_int(bp);
6403 rc = bnx2x_req_irq(bp);
6404 if (rc) {
2dfe0e1f
EG
6405 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
6406 goto load_error1;
a2fbb9ea
ET
6407 }
6408 }
6409
2dfe0e1f
EG
6410 /* Send LOAD_REQUEST command to MCP
6411 Returns the type of LOAD command:
6412 if it is the first port to be initialized
6413 common blocks should be initialized, otherwise - not
6414 */
6415 if (!BP_NOMCP(bp)) {
6416 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
6417 if (!load_code) {
6418 BNX2X_ERR("MCP response failure, aborting\n");
6419 rc = -EBUSY;
6420 goto load_error2;
6421 }
6422 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
6423 rc = -EBUSY; /* other port in diagnostic mode */
6424 goto load_error2;
6425 }
6426
6427 } else {
6428 int port = BP_PORT(bp);
6429
6430 DP(NETIF_MSG_IFUP, "NO MCP load counts before us %d, %d, %d\n",
6431 load_count[0], load_count[1], load_count[2]);
6432 load_count[0]++;
6433 load_count[1 + port]++;
6434 DP(NETIF_MSG_IFUP, "NO MCP new load counts %d, %d, %d\n",
6435 load_count[0], load_count[1], load_count[2]);
6436 if (load_count[0] == 1)
6437 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
6438 else if (load_count[1 + port] == 1)
6439 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
6440 else
6441 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
6442 }
6443
6444 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
6445 (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
6446 bp->port.pmf = 1;
6447 else
6448 bp->port.pmf = 0;
6449 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
a2fbb9ea 6450
a2fbb9ea 6451 /* Initialize HW */
34f80b04
EG
6452 rc = bnx2x_init_hw(bp, load_code);
6453 if (rc) {
a2fbb9ea 6454 BNX2X_ERR("HW init failed, aborting\n");
2dfe0e1f 6455 goto load_error2;
a2fbb9ea
ET
6456 }
6457
a2fbb9ea 6458 /* Setup NIC internals and enable interrupts */
471de716 6459 bnx2x_nic_init(bp, load_code);
a2fbb9ea
ET
6460
6461 /* Send LOAD_DONE command to MCP */
34f80b04 6462 if (!BP_NOMCP(bp)) {
228241eb
ET
6463 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
6464 if (!load_code) {
da5a662a 6465 BNX2X_ERR("MCP response failure, aborting\n");
34f80b04 6466 rc = -EBUSY;
2dfe0e1f 6467 goto load_error3;
a2fbb9ea
ET
6468 }
6469 }
6470
6471 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
6472
34f80b04
EG
6473 rc = bnx2x_setup_leading(bp);
6474 if (rc) {
da5a662a 6475 BNX2X_ERR("Setup leading failed!\n");
2dfe0e1f 6476 goto load_error3;
34f80b04 6477 }
a2fbb9ea 6478
34f80b04
EG
6479 if (CHIP_IS_E1H(bp))
6480 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
6481 BNX2X_ERR("!!! mf_cfg function disabled\n");
6482 bp->state = BNX2X_STATE_DISABLED;
6483 }
a2fbb9ea 6484
34f80b04
EG
6485 if (bp->state == BNX2X_STATE_OPEN)
6486 for_each_nondefault_queue(bp, i) {
6487 rc = bnx2x_setup_multi(bp, i);
6488 if (rc)
2dfe0e1f 6489 goto load_error3;
34f80b04 6490 }
a2fbb9ea 6491
34f80b04 6492 if (CHIP_IS_E1(bp))
3101c2bc 6493 bnx2x_set_mac_addr_e1(bp, 1);
34f80b04 6494 else
3101c2bc 6495 bnx2x_set_mac_addr_e1h(bp, 1);
34f80b04
EG
6496
6497 if (bp->port.pmf)
6498 bnx2x_initial_phy_init(bp);
a2fbb9ea
ET
6499
6500 /* Start fast path */
34f80b04
EG
6501 switch (load_mode) {
6502 case LOAD_NORMAL:
6503 /* Tx queue should be only reenabled */
6504 netif_wake_queue(bp->dev);
2dfe0e1f 6505 /* Initialize the receive filter. */
34f80b04
EG
6506 bnx2x_set_rx_mode(bp->dev);
6507 break;
6508
6509 case LOAD_OPEN:
a2fbb9ea 6510 netif_start_queue(bp->dev);
2dfe0e1f 6511 /* Initialize the receive filter. */
34f80b04 6512 bnx2x_set_rx_mode(bp->dev);
34f80b04 6513 break;
a2fbb9ea 6514
34f80b04 6515 case LOAD_DIAG:
2dfe0e1f 6516 /* Initialize the receive filter. */
a2fbb9ea 6517 bnx2x_set_rx_mode(bp->dev);
34f80b04
EG
6518 bp->state = BNX2X_STATE_DIAG;
6519 break;
6520
6521 default:
6522 break;
a2fbb9ea
ET
6523 }
6524
34f80b04
EG
6525 if (!bp->port.pmf)
6526 bnx2x__link_status_update(bp);
6527
a2fbb9ea
ET
6528 /* start the timer */
6529 mod_timer(&bp->timer, jiffies + bp->current_interval);
6530
34f80b04 6531
a2fbb9ea
ET
6532 return 0;
6533
2dfe0e1f
EG
6534load_error3:
6535 bnx2x_int_disable_sync(bp, 1);
6536 if (!BP_NOMCP(bp)) {
6537 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
6538 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6539 }
6540 bp->port.pmf = 0;
7a9b2557
VZ
6541 /* Free SKBs, SGEs, TPA pool and driver internals */
6542 bnx2x_free_skbs(bp);
6543 for_each_queue(bp, i)
3196a88a 6544 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
2dfe0e1f 6545load_error2:
d1014634
YG
6546 /* Release IRQs */
6547 bnx2x_free_irq(bp);
2dfe0e1f
EG
6548load_error1:
6549 bnx2x_napi_disable(bp);
7cde1c8b
EG
6550 for_each_queue(bp, i)
6551 netif_napi_del(&bnx2x_fp(bp, i, napi));
a2fbb9ea
ET
6552 bnx2x_free_mem(bp);
6553
6554 /* TBD we really need to reset the chip
6555 if we want to recover from this */
34f80b04 6556 return rc;
a2fbb9ea
ET
6557}
6558
6559static int bnx2x_stop_multi(struct bnx2x *bp, int index)
6560{
a2fbb9ea
ET
6561 int rc;
6562
c14423fe 6563 /* halt the connection */
a2fbb9ea 6564 bp->fp[index].state = BNX2X_FP_STATE_HALTING;
231fd58a 6565 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, index, 0);
a2fbb9ea 6566
34f80b04 6567 /* Wait for completion */
a2fbb9ea 6568 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
34f80b04 6569 &(bp->fp[index].state), 1);
c14423fe 6570 if (rc) /* timeout */
a2fbb9ea
ET
6571 return rc;
6572
6573 /* delete cfc entry */
6574 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
6575
34f80b04
EG
6576 /* Wait for completion */
6577 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
6578 &(bp->fp[index].state), 1);
6579 return rc;
a2fbb9ea
ET
6580}
6581
da5a662a 6582static int bnx2x_stop_leading(struct bnx2x *bp)
a2fbb9ea 6583{
49d66772 6584 u16 dsb_sp_prod_idx;
c14423fe 6585 /* if the other port is handling traffic,
a2fbb9ea 6586 this can take a lot of time */
34f80b04
EG
6587 int cnt = 500;
6588 int rc;
a2fbb9ea
ET
6589
6590 might_sleep();
6591
6592 /* Send HALT ramrod */
6593 bp->fp[0].state = BNX2X_FP_STATE_HALTING;
34f80b04 6594 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, BP_CL_ID(bp), 0);
a2fbb9ea 6595
34f80b04
EG
6596 /* Wait for completion */
6597 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
6598 &(bp->fp[0].state), 1);
6599 if (rc) /* timeout */
da5a662a 6600 return rc;
a2fbb9ea 6601
49d66772 6602 dsb_sp_prod_idx = *bp->dsb_sp_prod;
a2fbb9ea 6603
228241eb 6604 /* Send PORT_DELETE ramrod */
a2fbb9ea
ET
6605 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
6606
49d66772 6607 /* Wait for completion to arrive on default status block
a2fbb9ea
ET
6608 we are going to reset the chip anyway
6609 so there is not much to do if this times out
6610 */
34f80b04 6611 while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
34f80b04
EG
6612 if (!cnt) {
6613 DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
6614 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
6615 *bp->dsb_sp_prod, dsb_sp_prod_idx);
6616#ifdef BNX2X_STOP_ON_ERROR
6617 bnx2x_panic();
da5a662a
VZ
6618#else
6619 rc = -EBUSY;
34f80b04
EG
6620#endif
6621 break;
6622 }
6623 cnt--;
da5a662a 6624 msleep(1);
49d66772
ET
6625 }
6626 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
6627 bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
da5a662a
VZ
6628
6629 return rc;
a2fbb9ea
ET
6630}
6631
34f80b04
EG
6632static void bnx2x_reset_func(struct bnx2x *bp)
6633{
6634 int port = BP_PORT(bp);
6635 int func = BP_FUNC(bp);
6636 int base, i;
6637
6638 /* Configure IGU */
6639 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6640 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6641
6642 REG_WR(bp, HC_REG_CONFIG_0 + port*4, 0x1000);
6643
6644 /* Clear ILT */
6645 base = FUNC_ILT_BASE(func);
6646 for (i = base; i < base + ILT_PER_FUNC; i++)
6647 bnx2x_ilt_wr(bp, i, 0);
6648}
6649
6650static void bnx2x_reset_port(struct bnx2x *bp)
6651{
6652 int port = BP_PORT(bp);
6653 u32 val;
6654
6655 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
6656
6657 /* Do not rcv packets to BRB */
6658 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
6659 /* Do not direct rcv packets that are not for MCP to the BRB */
6660 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
6661 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
6662
6663 /* Configure AEU */
6664 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
6665
6666 msleep(100);
6667 /* Check for BRB port occupancy */
6668 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
6669 if (val)
6670 DP(NETIF_MSG_IFDOWN,
33471629 6671 "BRB1 is not empty %d blocks are occupied\n", val);
34f80b04
EG
6672
6673 /* TODO: Close Doorbell port? */
6674}
6675
34f80b04
EG
6676static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
6677{
6678 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
6679 BP_FUNC(bp), reset_code);
6680
6681 switch (reset_code) {
6682 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
6683 bnx2x_reset_port(bp);
6684 bnx2x_reset_func(bp);
6685 bnx2x_reset_common(bp);
6686 break;
6687
6688 case FW_MSG_CODE_DRV_UNLOAD_PORT:
6689 bnx2x_reset_port(bp);
6690 bnx2x_reset_func(bp);
6691 break;
6692
6693 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
6694 bnx2x_reset_func(bp);
6695 break;
49d66772 6696
34f80b04
EG
6697 default:
6698 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
6699 break;
6700 }
6701}
6702
33471629 6703/* must be called with rtnl_lock */
34f80b04 6704static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
a2fbb9ea 6705{
da5a662a 6706 int port = BP_PORT(bp);
a2fbb9ea 6707 u32 reset_code = 0;
da5a662a 6708 int i, cnt, rc;
a2fbb9ea
ET
6709
6710 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
6711
228241eb
ET
6712 bp->rx_mode = BNX2X_RX_MODE_NONE;
6713 bnx2x_set_storm_rx_mode(bp);
a2fbb9ea 6714
f8ef6e44 6715 bnx2x_netif_stop(bp, 1);
e94d8af3 6716
34f80b04
EG
6717 del_timer_sync(&bp->timer);
6718 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
6719 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
bb2a0f7a 6720 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04 6721
70b9986c
EG
6722 /* Release IRQs */
6723 bnx2x_free_irq(bp);
6724
da5a662a 6725 /* Wait until tx fast path tasks complete */
228241eb
ET
6726 for_each_queue(bp, i) {
6727 struct bnx2x_fastpath *fp = &bp->fp[i];
6728
34f80b04
EG
6729 cnt = 1000;
6730 smp_rmb();
237907c1 6731 while (bnx2x_has_tx_work(fp)) {
da5a662a 6732
65abd74d 6733 bnx2x_tx_int(fp, 1000);
34f80b04
EG
6734 if (!cnt) {
6735 BNX2X_ERR("timeout waiting for queue[%d]\n",
6736 i);
6737#ifdef BNX2X_STOP_ON_ERROR
6738 bnx2x_panic();
6739 return -EBUSY;
6740#else
6741 break;
6742#endif
6743 }
6744 cnt--;
da5a662a 6745 msleep(1);
34f80b04
EG
6746 smp_rmb();
6747 }
228241eb 6748 }
da5a662a
VZ
6749 /* Give HW time to discard old tx messages */
6750 msleep(1);
a2fbb9ea 6751
3101c2bc
YG
6752 if (CHIP_IS_E1(bp)) {
6753 struct mac_configuration_cmd *config =
6754 bnx2x_sp(bp, mcast_config);
6755
6756 bnx2x_set_mac_addr_e1(bp, 0);
6757
6758 for (i = 0; i < config->hdr.length_6b; i++)
6759 CAM_INVALIDATE(config->config_table[i]);
6760
6761 config->hdr.length_6b = i;
6762 if (CHIP_REV_IS_SLOW(bp))
6763 config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
6764 else
6765 config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
6766 config->hdr.client_id = BP_CL_ID(bp);
6767 config->hdr.reserved1 = 0;
6768
6769 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6770 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
6771 U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
6772
6773 } else { /* E1H */
65abd74d
YG
6774 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
6775
3101c2bc
YG
6776 bnx2x_set_mac_addr_e1h(bp, 0);
6777
6778 for (i = 0; i < MC_HASH_SIZE; i++)
6779 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
6780 }
6781
65abd74d
YG
6782 if (unload_mode == UNLOAD_NORMAL)
6783 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6784
6785 else if (bp->flags & NO_WOL_FLAG) {
6786 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
6787 if (CHIP_IS_E1H(bp))
6788 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
6789
6790 } else if (bp->wol) {
6791 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
6792 u8 *mac_addr = bp->dev->dev_addr;
6793 u32 val;
6794 /* The mac address is written to entries 1-4 to
6795 preserve entry 0 which is used by the PMF */
6796 u8 entry = (BP_E1HVN(bp) + 1)*8;
6797
6798 val = (mac_addr[0] << 8) | mac_addr[1];
6799 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
6800
6801 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
6802 (mac_addr[4] << 8) | mac_addr[5];
6803 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
6804
6805 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
6806
6807 } else
6808 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
da5a662a 6809
34f80b04
EG
6810 /* Close multi and leading connections
6811 Completions for ramrods are collected in a synchronous way */
a2fbb9ea
ET
6812 for_each_nondefault_queue(bp, i)
6813 if (bnx2x_stop_multi(bp, i))
228241eb 6814 goto unload_error;
a2fbb9ea 6815
da5a662a
VZ
6816 rc = bnx2x_stop_leading(bp);
6817 if (rc) {
34f80b04 6818 BNX2X_ERR("Stop leading failed!\n");
da5a662a 6819#ifdef BNX2X_STOP_ON_ERROR
34f80b04 6820 return -EBUSY;
da5a662a
VZ
6821#else
6822 goto unload_error;
34f80b04 6823#endif
228241eb
ET
6824 }
6825
6826unload_error:
34f80b04 6827 if (!BP_NOMCP(bp))
228241eb 6828 reset_code = bnx2x_fw_command(bp, reset_code);
34f80b04
EG
6829 else {
6830 DP(NETIF_MSG_IFDOWN, "NO MCP load counts %d, %d, %d\n",
6831 load_count[0], load_count[1], load_count[2]);
6832 load_count[0]--;
da5a662a 6833 load_count[1 + port]--;
34f80b04
EG
6834 DP(NETIF_MSG_IFDOWN, "NO MCP new load counts %d, %d, %d\n",
6835 load_count[0], load_count[1], load_count[2]);
6836 if (load_count[0] == 0)
6837 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
da5a662a 6838 else if (load_count[1 + port] == 0)
34f80b04
EG
6839 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
6840 else
6841 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
6842 }
a2fbb9ea 6843
34f80b04
EG
6844 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
6845 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
6846 bnx2x__link_reset(bp);
a2fbb9ea
ET
6847
6848 /* Reset the chip */
228241eb 6849 bnx2x_reset_chip(bp, reset_code);
a2fbb9ea
ET
6850
6851 /* Report UNLOAD_DONE to MCP */
34f80b04 6852 if (!BP_NOMCP(bp))
a2fbb9ea 6853 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
9a035440 6854 bp->port.pmf = 0;
a2fbb9ea 6855
7a9b2557 6856 /* Free SKBs, SGEs, TPA pool and driver internals */
a2fbb9ea 6857 bnx2x_free_skbs(bp);
7a9b2557 6858 for_each_queue(bp, i)
3196a88a 6859 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
7cde1c8b
EG
6860 for_each_queue(bp, i)
6861 netif_napi_del(&bnx2x_fp(bp, i, napi));
a2fbb9ea
ET
6862 bnx2x_free_mem(bp);
6863
6864 bp->state = BNX2X_STATE_CLOSED;
228241eb 6865
a2fbb9ea
ET
6866 netif_carrier_off(bp->dev);
6867
6868 return 0;
6869}
6870
34f80b04
EG
6871static void bnx2x_reset_task(struct work_struct *work)
6872{
6873 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
6874
6875#ifdef BNX2X_STOP_ON_ERROR
6876 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
6877 " so reset not done to allow debug dump,\n"
6878 KERN_ERR " you will need to reboot when done\n");
6879 return;
6880#endif
6881
6882 rtnl_lock();
6883
6884 if (!netif_running(bp->dev))
6885 goto reset_task_exit;
6886
6887 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
6888 bnx2x_nic_load(bp, LOAD_NORMAL);
6889
6890reset_task_exit:
6891 rtnl_unlock();
6892}
6893
a2fbb9ea
ET
6894/* end of nic load/unload */
6895
6896/* ethtool_ops */
6897
6898/*
6899 * Init service functions
6900 */
6901
34f80b04
EG
6902static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
6903{
6904 u32 val;
6905
6906 /* Check if there is any driver already loaded */
6907 val = REG_RD(bp, MISC_REG_UNPREPARED);
6908 if (val == 0x1) {
6909 /* Check if it is the UNDI driver
6910 * UNDI driver initializes CID offset for normal bell to 0x7
6911 */
4a37fb66 6912 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
34f80b04
EG
6913 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
6914 if (val == 0x7) {
6915 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
da5a662a 6916 /* save our func */
34f80b04 6917 int func = BP_FUNC(bp);
da5a662a
VZ
6918 u32 swap_en;
6919 u32 swap_val;
34f80b04 6920
b4661739
EG
6921 /* clear the UNDI indication */
6922 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
6923
34f80b04
EG
6924 BNX2X_DEV_INFO("UNDI is active! reset device\n");
6925
6926 /* try unload UNDI on port 0 */
6927 bp->func = 0;
da5a662a
VZ
6928 bp->fw_seq =
6929 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
6930 DRV_MSG_SEQ_NUMBER_MASK);
34f80b04 6931 reset_code = bnx2x_fw_command(bp, reset_code);
34f80b04
EG
6932
6933 /* if UNDI is loaded on the other port */
6934 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
6935
da5a662a
VZ
6936 /* send "DONE" for previous unload */
6937 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6938
6939 /* unload UNDI on port 1 */
34f80b04 6940 bp->func = 1;
da5a662a
VZ
6941 bp->fw_seq =
6942 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
6943 DRV_MSG_SEQ_NUMBER_MASK);
6944 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6945
6946 bnx2x_fw_command(bp, reset_code);
34f80b04
EG
6947 }
6948
b4661739
EG
6949 /* now it's safe to release the lock */
6950 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
6951
da5a662a
VZ
6952 REG_WR(bp, (BP_PORT(bp) ? HC_REG_CONFIG_1 :
6953 HC_REG_CONFIG_0), 0x1000);
6954
6955 /* close input traffic and wait for it */
6956 /* Do not rcv packets to BRB */
6957 REG_WR(bp,
6958 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
6959 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
6960 /* Do not direct rcv packets that are not for MCP to
6961 * the BRB */
6962 REG_WR(bp,
6963 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
6964 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
6965 /* clear AEU */
6966 REG_WR(bp,
6967 (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
6968 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
6969 msleep(10);
6970
6971 /* save NIG port swap info */
6972 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
6973 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
34f80b04
EG
6974 /* reset device */
6975 REG_WR(bp,
6976 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
da5a662a 6977 0xd3ffffff);
34f80b04
EG
6978 REG_WR(bp,
6979 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
6980 0x1403);
da5a662a
VZ
6981 /* take the NIG out of reset and restore swap values */
6982 REG_WR(bp,
6983 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
6984 MISC_REGISTERS_RESET_REG_1_RST_NIG);
6985 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
6986 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
6987
6988 /* send unload done to the MCP */
6989 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6990
6991 /* restore our func and fw_seq */
6992 bp->func = func;
6993 bp->fw_seq =
6994 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
6995 DRV_MSG_SEQ_NUMBER_MASK);
b4661739
EG
6996
6997 } else
6998 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
34f80b04
EG
6999 }
7000}
7001
7002static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
7003{
7004 u32 val, val2, val3, val4, id;
72ce58c3 7005 u16 pmc;
34f80b04
EG
7006
7007 /* Get the chip revision id and number. */
7008 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
7009 val = REG_RD(bp, MISC_REG_CHIP_NUM);
7010 id = ((val & 0xffff) << 16);
7011 val = REG_RD(bp, MISC_REG_CHIP_REV);
7012 id |= ((val & 0xf) << 12);
7013 val = REG_RD(bp, MISC_REG_CHIP_METAL);
7014 id |= ((val & 0xff) << 4);
5a40e08e 7015 val = REG_RD(bp, MISC_REG_BOND_ID);
34f80b04
EG
7016 id |= (val & 0xf);
7017 bp->common.chip_id = id;
7018 bp->link_params.chip_id = bp->common.chip_id;
7019 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
7020
7021 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
7022 bp->common.flash_size = (NVRAM_1MB_SIZE <<
7023 (val & MCPR_NVM_CFG4_FLASH_SIZE));
7024 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
7025 bp->common.flash_size, bp->common.flash_size);
7026
7027 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
7028 bp->link_params.shmem_base = bp->common.shmem_base;
7029 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
7030
7031 if (!bp->common.shmem_base ||
7032 (bp->common.shmem_base < 0xA0000) ||
7033 (bp->common.shmem_base >= 0xC0000)) {
7034 BNX2X_DEV_INFO("MCP not active\n");
7035 bp->flags |= NO_MCP_FLAG;
7036 return;
7037 }
7038
7039 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
7040 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7041 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7042 BNX2X_ERR("BAD MCP validity signature\n");
7043
7044 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
7045 bp->common.board = SHMEM_RD(bp, dev_info.shared_hw_config.board);
7046
7047 BNX2X_DEV_INFO("hw_config 0x%08x board 0x%08x\n",
7048 bp->common.hw_config, bp->common.board);
7049
7050 bp->link_params.hw_led_mode = ((bp->common.hw_config &
7051 SHARED_HW_CFG_LED_MODE_MASK) >>
7052 SHARED_HW_CFG_LED_MODE_SHIFT);
7053
7054 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
7055 bp->common.bc_ver = val;
7056 BNX2X_DEV_INFO("bc_ver %X\n", val);
7057 if (val < BNX2X_BC_VER) {
7058 /* for now only warn
7059 * later we might need to enforce this */
7060 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
7061 " please upgrade BC\n", BNX2X_BC_VER, val);
7062 }
72ce58c3
EG
7063
7064 if (BP_E1HVN(bp) == 0) {
7065 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
7066 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
7067 } else {
7068 /* no WOL capability for E1HVN != 0 */
7069 bp->flags |= NO_WOL_FLAG;
7070 }
7071 BNX2X_DEV_INFO("%sWoL capable\n",
7072 (bp->flags & NO_WOL_FLAG) ? "Not " : "");
34f80b04
EG
7073
7074 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
7075 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
7076 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
7077 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
7078
7079 printk(KERN_INFO PFX "part number %X-%X-%X-%X\n",
7080 val, val2, val3, val4);
7081}
7082
7083static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
7084 u32 switch_cfg)
a2fbb9ea 7085{
34f80b04 7086 int port = BP_PORT(bp);
a2fbb9ea
ET
7087 u32 ext_phy_type;
7088
a2fbb9ea
ET
7089 switch (switch_cfg) {
7090 case SWITCH_CFG_1G:
7091 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
7092
c18487ee
YR
7093 ext_phy_type =
7094 SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea
ET
7095 switch (ext_phy_type) {
7096 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
7097 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7098 ext_phy_type);
7099
34f80b04
EG
7100 bp->port.supported |= (SUPPORTED_10baseT_Half |
7101 SUPPORTED_10baseT_Full |
7102 SUPPORTED_100baseT_Half |
7103 SUPPORTED_100baseT_Full |
7104 SUPPORTED_1000baseT_Full |
7105 SUPPORTED_2500baseX_Full |
7106 SUPPORTED_TP |
7107 SUPPORTED_FIBRE |
7108 SUPPORTED_Autoneg |
7109 SUPPORTED_Pause |
7110 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
7111 break;
7112
7113 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
7114 BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
7115 ext_phy_type);
7116
34f80b04
EG
7117 bp->port.supported |= (SUPPORTED_10baseT_Half |
7118 SUPPORTED_10baseT_Full |
7119 SUPPORTED_100baseT_Half |
7120 SUPPORTED_100baseT_Full |
7121 SUPPORTED_1000baseT_Full |
7122 SUPPORTED_TP |
7123 SUPPORTED_FIBRE |
7124 SUPPORTED_Autoneg |
7125 SUPPORTED_Pause |
7126 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
7127 break;
7128
7129 default:
7130 BNX2X_ERR("NVRAM config error. "
7131 "BAD SerDes ext_phy_config 0x%x\n",
c18487ee 7132 bp->link_params.ext_phy_config);
a2fbb9ea
ET
7133 return;
7134 }
7135
34f80b04
EG
7136 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
7137 port*0x10);
7138 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea
ET
7139 break;
7140
7141 case SWITCH_CFG_10G:
7142 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
7143
c18487ee
YR
7144 ext_phy_type =
7145 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea
ET
7146 switch (ext_phy_type) {
7147 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7148 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7149 ext_phy_type);
7150
34f80b04
EG
7151 bp->port.supported |= (SUPPORTED_10baseT_Half |
7152 SUPPORTED_10baseT_Full |
7153 SUPPORTED_100baseT_Half |
7154 SUPPORTED_100baseT_Full |
7155 SUPPORTED_1000baseT_Full |
7156 SUPPORTED_2500baseX_Full |
7157 SUPPORTED_10000baseT_Full |
7158 SUPPORTED_TP |
7159 SUPPORTED_FIBRE |
7160 SUPPORTED_Autoneg |
7161 SUPPORTED_Pause |
7162 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
7163 break;
7164
7165 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
f1410647 7166 BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
34f80b04 7167 ext_phy_type);
f1410647 7168
34f80b04
EG
7169 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7170 SUPPORTED_FIBRE |
7171 SUPPORTED_Pause |
7172 SUPPORTED_Asym_Pause);
f1410647
ET
7173 break;
7174
a2fbb9ea 7175 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
f1410647
ET
7176 BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
7177 ext_phy_type);
7178
34f80b04
EG
7179 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7180 SUPPORTED_1000baseT_Full |
7181 SUPPORTED_FIBRE |
7182 SUPPORTED_Pause |
7183 SUPPORTED_Asym_Pause);
f1410647
ET
7184 break;
7185
7186 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
7187 BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
a2fbb9ea
ET
7188 ext_phy_type);
7189
34f80b04
EG
7190 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7191 SUPPORTED_1000baseT_Full |
7192 SUPPORTED_FIBRE |
7193 SUPPORTED_Autoneg |
7194 SUPPORTED_Pause |
7195 SUPPORTED_Asym_Pause);
f1410647
ET
7196 break;
7197
c18487ee
YR
7198 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
7199 BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
7200 ext_phy_type);
7201
34f80b04
EG
7202 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7203 SUPPORTED_2500baseX_Full |
7204 SUPPORTED_1000baseT_Full |
7205 SUPPORTED_FIBRE |
7206 SUPPORTED_Autoneg |
7207 SUPPORTED_Pause |
7208 SUPPORTED_Asym_Pause);
c18487ee
YR
7209 break;
7210
f1410647
ET
7211 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7212 BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
7213 ext_phy_type);
7214
34f80b04
EG
7215 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7216 SUPPORTED_TP |
7217 SUPPORTED_Autoneg |
7218 SUPPORTED_Pause |
7219 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
7220 break;
7221
c18487ee
YR
7222 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
7223 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
7224 bp->link_params.ext_phy_config);
7225 break;
7226
a2fbb9ea
ET
7227 default:
7228 BNX2X_ERR("NVRAM config error. "
7229 "BAD XGXS ext_phy_config 0x%x\n",
c18487ee 7230 bp->link_params.ext_phy_config);
a2fbb9ea
ET
7231 return;
7232 }
7233
34f80b04
EG
7234 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
7235 port*0x18);
7236 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea 7237
a2fbb9ea
ET
7238 break;
7239
7240 default:
7241 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
34f80b04 7242 bp->port.link_config);
a2fbb9ea
ET
7243 return;
7244 }
34f80b04 7245 bp->link_params.phy_addr = bp->port.phy_addr;
a2fbb9ea
ET
7246
7247 /* mask what we support according to speed_cap_mask */
c18487ee
YR
7248 if (!(bp->link_params.speed_cap_mask &
7249 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
34f80b04 7250 bp->port.supported &= ~SUPPORTED_10baseT_Half;
a2fbb9ea 7251
c18487ee
YR
7252 if (!(bp->link_params.speed_cap_mask &
7253 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
34f80b04 7254 bp->port.supported &= ~SUPPORTED_10baseT_Full;
a2fbb9ea 7255
c18487ee
YR
7256 if (!(bp->link_params.speed_cap_mask &
7257 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
34f80b04 7258 bp->port.supported &= ~SUPPORTED_100baseT_Half;
a2fbb9ea 7259
c18487ee
YR
7260 if (!(bp->link_params.speed_cap_mask &
7261 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
34f80b04 7262 bp->port.supported &= ~SUPPORTED_100baseT_Full;
a2fbb9ea 7263
c18487ee
YR
7264 if (!(bp->link_params.speed_cap_mask &
7265 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
34f80b04
EG
7266 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
7267 SUPPORTED_1000baseT_Full);
a2fbb9ea 7268
c18487ee
YR
7269 if (!(bp->link_params.speed_cap_mask &
7270 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
34f80b04 7271 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
a2fbb9ea 7272
c18487ee
YR
7273 if (!(bp->link_params.speed_cap_mask &
7274 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
34f80b04 7275 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
a2fbb9ea 7276
34f80b04 7277 BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
a2fbb9ea
ET
7278}
7279
34f80b04 7280static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
a2fbb9ea 7281{
c18487ee 7282 bp->link_params.req_duplex = DUPLEX_FULL;
a2fbb9ea 7283
34f80b04 7284 switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
a2fbb9ea 7285 case PORT_FEATURE_LINK_SPEED_AUTO:
34f80b04 7286 if (bp->port.supported & SUPPORTED_Autoneg) {
c18487ee 7287 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
34f80b04 7288 bp->port.advertising = bp->port.supported;
a2fbb9ea 7289 } else {
c18487ee
YR
7290 u32 ext_phy_type =
7291 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7292
7293 if ((ext_phy_type ==
7294 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
7295 (ext_phy_type ==
7296 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
a2fbb9ea 7297 /* force 10G, no AN */
c18487ee 7298 bp->link_params.req_line_speed = SPEED_10000;
34f80b04 7299 bp->port.advertising =
a2fbb9ea
ET
7300 (ADVERTISED_10000baseT_Full |
7301 ADVERTISED_FIBRE);
7302 break;
7303 }
7304 BNX2X_ERR("NVRAM config error. "
7305 "Invalid link_config 0x%x"
7306 " Autoneg not supported\n",
34f80b04 7307 bp->port.link_config);
a2fbb9ea
ET
7308 return;
7309 }
7310 break;
7311
7312 case PORT_FEATURE_LINK_SPEED_10M_FULL:
34f80b04 7313 if (bp->port.supported & SUPPORTED_10baseT_Full) {
c18487ee 7314 bp->link_params.req_line_speed = SPEED_10;
34f80b04
EG
7315 bp->port.advertising = (ADVERTISED_10baseT_Full |
7316 ADVERTISED_TP);
a2fbb9ea
ET
7317 } else {
7318 BNX2X_ERR("NVRAM config error. "
7319 "Invalid link_config 0x%x"
7320 " speed_cap_mask 0x%x\n",
34f80b04 7321 bp->port.link_config,
c18487ee 7322 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7323 return;
7324 }
7325 break;
7326
7327 case PORT_FEATURE_LINK_SPEED_10M_HALF:
34f80b04 7328 if (bp->port.supported & SUPPORTED_10baseT_Half) {
c18487ee
YR
7329 bp->link_params.req_line_speed = SPEED_10;
7330 bp->link_params.req_duplex = DUPLEX_HALF;
34f80b04
EG
7331 bp->port.advertising = (ADVERTISED_10baseT_Half |
7332 ADVERTISED_TP);
a2fbb9ea
ET
7333 } else {
7334 BNX2X_ERR("NVRAM config error. "
7335 "Invalid link_config 0x%x"
7336 " speed_cap_mask 0x%x\n",
34f80b04 7337 bp->port.link_config,
c18487ee 7338 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7339 return;
7340 }
7341 break;
7342
7343 case PORT_FEATURE_LINK_SPEED_100M_FULL:
34f80b04 7344 if (bp->port.supported & SUPPORTED_100baseT_Full) {
c18487ee 7345 bp->link_params.req_line_speed = SPEED_100;
34f80b04
EG
7346 bp->port.advertising = (ADVERTISED_100baseT_Full |
7347 ADVERTISED_TP);
a2fbb9ea
ET
7348 } else {
7349 BNX2X_ERR("NVRAM config error. "
7350 "Invalid link_config 0x%x"
7351 " speed_cap_mask 0x%x\n",
34f80b04 7352 bp->port.link_config,
c18487ee 7353 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7354 return;
7355 }
7356 break;
7357
7358 case PORT_FEATURE_LINK_SPEED_100M_HALF:
34f80b04 7359 if (bp->port.supported & SUPPORTED_100baseT_Half) {
c18487ee
YR
7360 bp->link_params.req_line_speed = SPEED_100;
7361 bp->link_params.req_duplex = DUPLEX_HALF;
34f80b04
EG
7362 bp->port.advertising = (ADVERTISED_100baseT_Half |
7363 ADVERTISED_TP);
a2fbb9ea
ET
7364 } else {
7365 BNX2X_ERR("NVRAM config error. "
7366 "Invalid link_config 0x%x"
7367 " speed_cap_mask 0x%x\n",
34f80b04 7368 bp->port.link_config,
c18487ee 7369 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7370 return;
7371 }
7372 break;
7373
7374 case PORT_FEATURE_LINK_SPEED_1G:
34f80b04 7375 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
c18487ee 7376 bp->link_params.req_line_speed = SPEED_1000;
34f80b04
EG
7377 bp->port.advertising = (ADVERTISED_1000baseT_Full |
7378 ADVERTISED_TP);
a2fbb9ea
ET
7379 } else {
7380 BNX2X_ERR("NVRAM config error. "
7381 "Invalid link_config 0x%x"
7382 " speed_cap_mask 0x%x\n",
34f80b04 7383 bp->port.link_config,
c18487ee 7384 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7385 return;
7386 }
7387 break;
7388
7389 case PORT_FEATURE_LINK_SPEED_2_5G:
34f80b04 7390 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
c18487ee 7391 bp->link_params.req_line_speed = SPEED_2500;
34f80b04
EG
7392 bp->port.advertising = (ADVERTISED_2500baseX_Full |
7393 ADVERTISED_TP);
a2fbb9ea
ET
7394 } else {
7395 BNX2X_ERR("NVRAM config error. "
7396 "Invalid link_config 0x%x"
7397 " speed_cap_mask 0x%x\n",
34f80b04 7398 bp->port.link_config,
c18487ee 7399 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7400 return;
7401 }
7402 break;
7403
7404 case PORT_FEATURE_LINK_SPEED_10G_CX4:
7405 case PORT_FEATURE_LINK_SPEED_10G_KX4:
7406 case PORT_FEATURE_LINK_SPEED_10G_KR:
34f80b04 7407 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
c18487ee 7408 bp->link_params.req_line_speed = SPEED_10000;
34f80b04
EG
7409 bp->port.advertising = (ADVERTISED_10000baseT_Full |
7410 ADVERTISED_FIBRE);
a2fbb9ea
ET
7411 } else {
7412 BNX2X_ERR("NVRAM config error. "
7413 "Invalid link_config 0x%x"
7414 " speed_cap_mask 0x%x\n",
34f80b04 7415 bp->port.link_config,
c18487ee 7416 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7417 return;
7418 }
7419 break;
7420
7421 default:
7422 BNX2X_ERR("NVRAM config error. "
7423 "BAD link speed link_config 0x%x\n",
34f80b04 7424 bp->port.link_config);
c18487ee 7425 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
34f80b04 7426 bp->port.advertising = bp->port.supported;
a2fbb9ea
ET
7427 break;
7428 }
a2fbb9ea 7429
34f80b04
EG
7430 bp->link_params.req_flow_ctrl = (bp->port.link_config &
7431 PORT_FEATURE_FLOW_CONTROL_MASK);
c0700f90 7432 if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
4ab84d45 7433 !(bp->port.supported & SUPPORTED_Autoneg))
c0700f90 7434 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
a2fbb9ea 7435
c18487ee 7436 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x"
f1410647 7437 " advertising 0x%x\n",
c18487ee
YR
7438 bp->link_params.req_line_speed,
7439 bp->link_params.req_duplex,
34f80b04 7440 bp->link_params.req_flow_ctrl, bp->port.advertising);
a2fbb9ea
ET
7441}
7442
34f80b04 7443static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
a2fbb9ea 7444{
34f80b04
EG
7445 int port = BP_PORT(bp);
7446 u32 val, val2;
a2fbb9ea 7447
c18487ee 7448 bp->link_params.bp = bp;
34f80b04 7449 bp->link_params.port = port;
c18487ee 7450
c18487ee 7451 bp->link_params.serdes_config =
f1410647 7452 SHMEM_RD(bp, dev_info.port_hw_config[port].serdes_config);
c18487ee 7453 bp->link_params.lane_config =
a2fbb9ea 7454 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
c18487ee 7455 bp->link_params.ext_phy_config =
a2fbb9ea
ET
7456 SHMEM_RD(bp,
7457 dev_info.port_hw_config[port].external_phy_config);
c18487ee 7458 bp->link_params.speed_cap_mask =
a2fbb9ea
ET
7459 SHMEM_RD(bp,
7460 dev_info.port_hw_config[port].speed_capability_mask);
7461
34f80b04 7462 bp->port.link_config =
a2fbb9ea
ET
7463 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
7464
34f80b04
EG
7465 BNX2X_DEV_INFO("serdes_config 0x%08x lane_config 0x%08x\n"
7466 KERN_INFO " ext_phy_config 0x%08x speed_cap_mask 0x%08x"
7467 " link_config 0x%08x\n",
c18487ee
YR
7468 bp->link_params.serdes_config,
7469 bp->link_params.lane_config,
7470 bp->link_params.ext_phy_config,
34f80b04 7471 bp->link_params.speed_cap_mask, bp->port.link_config);
a2fbb9ea 7472
34f80b04 7473 bp->link_params.switch_cfg = (bp->port.link_config &
c18487ee
YR
7474 PORT_FEATURE_CONNECTED_SWITCH_MASK);
7475 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
a2fbb9ea
ET
7476
7477 bnx2x_link_settings_requested(bp);
7478
7479 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
7480 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
7481 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
7482 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
7483 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
7484 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
7485 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
7486 bp->dev->dev_addr[5] = (u8)(val & 0xff);
c18487ee
YR
7487 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
7488 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
34f80b04
EG
7489}
7490
7491static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
7492{
7493 int func = BP_FUNC(bp);
7494 u32 val, val2;
7495 int rc = 0;
a2fbb9ea 7496
34f80b04 7497 bnx2x_get_common_hwinfo(bp);
a2fbb9ea 7498
34f80b04
EG
7499 bp->e1hov = 0;
7500 bp->e1hmf = 0;
7501 if (CHIP_IS_E1H(bp)) {
7502 bp->mf_config =
7503 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
a2fbb9ea 7504
3196a88a
EG
7505 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].e1hov_tag) &
7506 FUNC_MF_CFG_E1HOV_TAG_MASK);
34f80b04 7507 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
a2fbb9ea 7508
34f80b04
EG
7509 bp->e1hov = val;
7510 bp->e1hmf = 1;
7511 BNX2X_DEV_INFO("MF mode E1HOV for func %d is %d "
7512 "(0x%04x)\n",
7513 func, bp->e1hov, bp->e1hov);
7514 } else {
7515 BNX2X_DEV_INFO("Single function mode\n");
7516 if (BP_E1HVN(bp)) {
7517 BNX2X_ERR("!!! No valid E1HOV for func %d,"
7518 " aborting\n", func);
7519 rc = -EPERM;
7520 }
7521 }
7522 }
a2fbb9ea 7523
34f80b04
EG
7524 if (!BP_NOMCP(bp)) {
7525 bnx2x_get_port_hwinfo(bp);
7526
7527 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
7528 DRV_MSG_SEQ_NUMBER_MASK);
7529 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
7530 }
7531
7532 if (IS_E1HMF(bp)) {
7533 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
7534 val = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_lower);
7535 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
7536 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
7537 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
7538 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
7539 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
7540 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
7541 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
7542 bp->dev->dev_addr[5] = (u8)(val & 0xff);
7543 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
7544 ETH_ALEN);
7545 memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
7546 ETH_ALEN);
a2fbb9ea 7547 }
34f80b04
EG
7548
7549 return rc;
a2fbb9ea
ET
7550 }
7551
34f80b04
EG
7552 if (BP_NOMCP(bp)) {
7553 /* only supposed to happen on emulation/FPGA */
33471629 7554 BNX2X_ERR("warning random MAC workaround active\n");
34f80b04
EG
7555 random_ether_addr(bp->dev->dev_addr);
7556 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
7557 }
a2fbb9ea 7558
34f80b04
EG
7559 return rc;
7560}
7561
7562static int __devinit bnx2x_init_bp(struct bnx2x *bp)
7563{
7564 int func = BP_FUNC(bp);
7565 int rc;
7566
da5a662a
VZ
7567 /* Disable interrupt handling until HW is initialized */
7568 atomic_set(&bp->intr_sem, 1);
7569
34f80b04 7570 mutex_init(&bp->port.phy_mutex);
a2fbb9ea 7571
1cf167f2 7572 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
34f80b04
EG
7573 INIT_WORK(&bp->reset_task, bnx2x_reset_task);
7574
7575 rc = bnx2x_get_hwinfo(bp);
7576
7577 /* need to reset chip if undi was active */
7578 if (!BP_NOMCP(bp))
7579 bnx2x_undi_unload(bp);
7580
7581 if (CHIP_REV_IS_FPGA(bp))
7582 printk(KERN_ERR PFX "FPGA detected\n");
7583
7584 if (BP_NOMCP(bp) && (func == 0))
7585 printk(KERN_ERR PFX
7586 "MCP disabled, must load devices in order!\n");
7587
7a9b2557
VZ
7588 /* Set TPA flags */
7589 if (disable_tpa) {
7590 bp->flags &= ~TPA_ENABLE_FLAG;
7591 bp->dev->features &= ~NETIF_F_LRO;
7592 } else {
7593 bp->flags |= TPA_ENABLE_FLAG;
7594 bp->dev->features |= NETIF_F_LRO;
7595 }
7596
7597
34f80b04
EG
7598 bp->tx_ring_size = MAX_TX_AVAIL;
7599 bp->rx_ring_size = MAX_RX_AVAIL;
7600
7601 bp->rx_csum = 1;
7602 bp->rx_offset = 0;
7603
7604 bp->tx_ticks = 50;
7605 bp->rx_ticks = 25;
7606
34f80b04
EG
7607 bp->timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
7608 bp->current_interval = (poll ? poll : bp->timer_interval);
7609
7610 init_timer(&bp->timer);
7611 bp->timer.expires = jiffies + bp->current_interval;
7612 bp->timer.data = (unsigned long) bp;
7613 bp->timer.function = bnx2x_timer;
7614
7615 return rc;
a2fbb9ea
ET
7616}
7617
7618/*
7619 * ethtool service functions
7620 */
7621
7622/* All ethtool functions called with rtnl_lock */
7623
7624static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7625{
7626 struct bnx2x *bp = netdev_priv(dev);
7627
34f80b04
EG
7628 cmd->supported = bp->port.supported;
7629 cmd->advertising = bp->port.advertising;
a2fbb9ea
ET
7630
7631 if (netif_carrier_ok(dev)) {
c18487ee
YR
7632 cmd->speed = bp->link_vars.line_speed;
7633 cmd->duplex = bp->link_vars.duplex;
a2fbb9ea 7634 } else {
c18487ee
YR
7635 cmd->speed = bp->link_params.req_line_speed;
7636 cmd->duplex = bp->link_params.req_duplex;
a2fbb9ea 7637 }
34f80b04
EG
7638 if (IS_E1HMF(bp)) {
7639 u16 vn_max_rate;
7640
7641 vn_max_rate = ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
7642 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
7643 if (vn_max_rate < cmd->speed)
7644 cmd->speed = vn_max_rate;
7645 }
a2fbb9ea 7646
c18487ee
YR
7647 if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
7648 u32 ext_phy_type =
7649 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
f1410647
ET
7650
7651 switch (ext_phy_type) {
7652 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7653 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
7654 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
7655 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
c18487ee 7656 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
f1410647
ET
7657 cmd->port = PORT_FIBRE;
7658 break;
7659
7660 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7661 cmd->port = PORT_TP;
7662 break;
7663
c18487ee
YR
7664 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
7665 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
7666 bp->link_params.ext_phy_config);
7667 break;
7668
f1410647
ET
7669 default:
7670 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
c18487ee
YR
7671 bp->link_params.ext_phy_config);
7672 break;
f1410647
ET
7673 }
7674 } else
a2fbb9ea 7675 cmd->port = PORT_TP;
a2fbb9ea 7676
34f80b04 7677 cmd->phy_address = bp->port.phy_addr;
a2fbb9ea
ET
7678 cmd->transceiver = XCVR_INTERNAL;
7679
c18487ee 7680 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
a2fbb9ea 7681 cmd->autoneg = AUTONEG_ENABLE;
f1410647 7682 else
a2fbb9ea 7683 cmd->autoneg = AUTONEG_DISABLE;
a2fbb9ea
ET
7684
7685 cmd->maxtxpkt = 0;
7686 cmd->maxrxpkt = 0;
7687
7688 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
7689 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
7690 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
7691 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
7692 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
7693 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
7694 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
7695
7696 return 0;
7697}
7698
7699static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7700{
7701 struct bnx2x *bp = netdev_priv(dev);
7702 u32 advertising;
7703
34f80b04
EG
7704 if (IS_E1HMF(bp))
7705 return 0;
7706
a2fbb9ea
ET
7707 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
7708 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
7709 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
7710 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
7711 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
7712 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
7713 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
7714
a2fbb9ea 7715 if (cmd->autoneg == AUTONEG_ENABLE) {
34f80b04
EG
7716 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
7717 DP(NETIF_MSG_LINK, "Autoneg not supported\n");
a2fbb9ea 7718 return -EINVAL;
f1410647 7719 }
a2fbb9ea
ET
7720
7721 /* advertise the requested speed and duplex if supported */
34f80b04 7722 cmd->advertising &= bp->port.supported;
a2fbb9ea 7723
c18487ee
YR
7724 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
7725 bp->link_params.req_duplex = DUPLEX_FULL;
34f80b04
EG
7726 bp->port.advertising |= (ADVERTISED_Autoneg |
7727 cmd->advertising);
a2fbb9ea
ET
7728
7729 } else { /* forced speed */
7730 /* advertise the requested speed and duplex if supported */
7731 switch (cmd->speed) {
7732 case SPEED_10:
7733 if (cmd->duplex == DUPLEX_FULL) {
34f80b04 7734 if (!(bp->port.supported &
f1410647
ET
7735 SUPPORTED_10baseT_Full)) {
7736 DP(NETIF_MSG_LINK,
7737 "10M full not supported\n");
a2fbb9ea 7738 return -EINVAL;
f1410647 7739 }
a2fbb9ea
ET
7740
7741 advertising = (ADVERTISED_10baseT_Full |
7742 ADVERTISED_TP);
7743 } else {
34f80b04 7744 if (!(bp->port.supported &
f1410647
ET
7745 SUPPORTED_10baseT_Half)) {
7746 DP(NETIF_MSG_LINK,
7747 "10M half not supported\n");
a2fbb9ea 7748 return -EINVAL;
f1410647 7749 }
a2fbb9ea
ET
7750
7751 advertising = (ADVERTISED_10baseT_Half |
7752 ADVERTISED_TP);
7753 }
7754 break;
7755
7756 case SPEED_100:
7757 if (cmd->duplex == DUPLEX_FULL) {
34f80b04 7758 if (!(bp->port.supported &
f1410647
ET
7759 SUPPORTED_100baseT_Full)) {
7760 DP(NETIF_MSG_LINK,
7761 "100M full not supported\n");
a2fbb9ea 7762 return -EINVAL;
f1410647 7763 }
a2fbb9ea
ET
7764
7765 advertising = (ADVERTISED_100baseT_Full |
7766 ADVERTISED_TP);
7767 } else {
34f80b04 7768 if (!(bp->port.supported &
f1410647
ET
7769 SUPPORTED_100baseT_Half)) {
7770 DP(NETIF_MSG_LINK,
7771 "100M half not supported\n");
a2fbb9ea 7772 return -EINVAL;
f1410647 7773 }
a2fbb9ea
ET
7774
7775 advertising = (ADVERTISED_100baseT_Half |
7776 ADVERTISED_TP);
7777 }
7778 break;
7779
7780 case SPEED_1000:
f1410647
ET
7781 if (cmd->duplex != DUPLEX_FULL) {
7782 DP(NETIF_MSG_LINK, "1G half not supported\n");
a2fbb9ea 7783 return -EINVAL;
f1410647 7784 }
a2fbb9ea 7785
34f80b04 7786 if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
f1410647 7787 DP(NETIF_MSG_LINK, "1G full not supported\n");
a2fbb9ea 7788 return -EINVAL;
f1410647 7789 }
a2fbb9ea
ET
7790
7791 advertising = (ADVERTISED_1000baseT_Full |
7792 ADVERTISED_TP);
7793 break;
7794
7795 case SPEED_2500:
f1410647
ET
7796 if (cmd->duplex != DUPLEX_FULL) {
7797 DP(NETIF_MSG_LINK,
7798 "2.5G half not supported\n");
a2fbb9ea 7799 return -EINVAL;
f1410647 7800 }
a2fbb9ea 7801
34f80b04 7802 if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
f1410647
ET
7803 DP(NETIF_MSG_LINK,
7804 "2.5G full not supported\n");
a2fbb9ea 7805 return -EINVAL;
f1410647 7806 }
a2fbb9ea 7807
f1410647 7808 advertising = (ADVERTISED_2500baseX_Full |
a2fbb9ea
ET
7809 ADVERTISED_TP);
7810 break;
7811
7812 case SPEED_10000:
f1410647
ET
7813 if (cmd->duplex != DUPLEX_FULL) {
7814 DP(NETIF_MSG_LINK, "10G half not supported\n");
a2fbb9ea 7815 return -EINVAL;
f1410647 7816 }
a2fbb9ea 7817
34f80b04 7818 if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
f1410647 7819 DP(NETIF_MSG_LINK, "10G full not supported\n");
a2fbb9ea 7820 return -EINVAL;
f1410647 7821 }
a2fbb9ea
ET
7822
7823 advertising = (ADVERTISED_10000baseT_Full |
7824 ADVERTISED_FIBRE);
7825 break;
7826
7827 default:
f1410647 7828 DP(NETIF_MSG_LINK, "Unsupported speed\n");
a2fbb9ea
ET
7829 return -EINVAL;
7830 }
7831
c18487ee
YR
7832 bp->link_params.req_line_speed = cmd->speed;
7833 bp->link_params.req_duplex = cmd->duplex;
34f80b04 7834 bp->port.advertising = advertising;
a2fbb9ea
ET
7835 }
7836
c18487ee 7837 DP(NETIF_MSG_LINK, "req_line_speed %d\n"
a2fbb9ea 7838 DP_LEVEL " req_duplex %d advertising 0x%x\n",
c18487ee 7839 bp->link_params.req_line_speed, bp->link_params.req_duplex,
34f80b04 7840 bp->port.advertising);
a2fbb9ea 7841
34f80b04 7842 if (netif_running(dev)) {
bb2a0f7a 7843 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
7844 bnx2x_link_set(bp);
7845 }
a2fbb9ea
ET
7846
7847 return 0;
7848}
7849
c18487ee
YR
7850#define PHY_FW_VER_LEN 10
7851
a2fbb9ea
ET
7852static void bnx2x_get_drvinfo(struct net_device *dev,
7853 struct ethtool_drvinfo *info)
7854{
7855 struct bnx2x *bp = netdev_priv(dev);
f0e53a84 7856 u8 phy_fw_ver[PHY_FW_VER_LEN];
a2fbb9ea
ET
7857
7858 strcpy(info->driver, DRV_MODULE_NAME);
7859 strcpy(info->version, DRV_MODULE_VERSION);
c18487ee
YR
7860
7861 phy_fw_ver[0] = '\0';
34f80b04 7862 if (bp->port.pmf) {
4a37fb66 7863 bnx2x_acquire_phy_lock(bp);
34f80b04
EG
7864 bnx2x_get_ext_phy_fw_version(&bp->link_params,
7865 (bp->state != BNX2X_STATE_CLOSED),
7866 phy_fw_ver, PHY_FW_VER_LEN);
4a37fb66 7867 bnx2x_release_phy_lock(bp);
34f80b04 7868 }
c18487ee 7869
f0e53a84
EG
7870 snprintf(info->fw_version, 32, "BC:%d.%d.%d%s%s",
7871 (bp->common.bc_ver & 0xff0000) >> 16,
7872 (bp->common.bc_ver & 0xff00) >> 8,
7873 (bp->common.bc_ver & 0xff),
7874 ((phy_fw_ver[0] != '\0') ? " PHY:" : ""), phy_fw_ver);
a2fbb9ea
ET
7875 strcpy(info->bus_info, pci_name(bp->pdev));
7876 info->n_stats = BNX2X_NUM_STATS;
7877 info->testinfo_len = BNX2X_NUM_TESTS;
34f80b04 7878 info->eedump_len = bp->common.flash_size;
a2fbb9ea
ET
7879 info->regdump_len = 0;
7880}
7881
7882static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7883{
7884 struct bnx2x *bp = netdev_priv(dev);
7885
7886 if (bp->flags & NO_WOL_FLAG) {
7887 wol->supported = 0;
7888 wol->wolopts = 0;
7889 } else {
7890 wol->supported = WAKE_MAGIC;
7891 if (bp->wol)
7892 wol->wolopts = WAKE_MAGIC;
7893 else
7894 wol->wolopts = 0;
7895 }
7896 memset(&wol->sopass, 0, sizeof(wol->sopass));
7897}
7898
7899static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7900{
7901 struct bnx2x *bp = netdev_priv(dev);
7902
7903 if (wol->wolopts & ~WAKE_MAGIC)
7904 return -EINVAL;
7905
7906 if (wol->wolopts & WAKE_MAGIC) {
7907 if (bp->flags & NO_WOL_FLAG)
7908 return -EINVAL;
7909
7910 bp->wol = 1;
34f80b04 7911 } else
a2fbb9ea 7912 bp->wol = 0;
34f80b04 7913
a2fbb9ea
ET
7914 return 0;
7915}
7916
7917static u32 bnx2x_get_msglevel(struct net_device *dev)
7918{
7919 struct bnx2x *bp = netdev_priv(dev);
7920
7921 return bp->msglevel;
7922}
7923
7924static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
7925{
7926 struct bnx2x *bp = netdev_priv(dev);
7927
7928 if (capable(CAP_NET_ADMIN))
7929 bp->msglevel = level;
7930}
7931
7932static int bnx2x_nway_reset(struct net_device *dev)
7933{
7934 struct bnx2x *bp = netdev_priv(dev);
7935
34f80b04
EG
7936 if (!bp->port.pmf)
7937 return 0;
a2fbb9ea 7938
34f80b04 7939 if (netif_running(dev)) {
bb2a0f7a 7940 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
7941 bnx2x_link_set(bp);
7942 }
a2fbb9ea
ET
7943
7944 return 0;
7945}
7946
7947static int bnx2x_get_eeprom_len(struct net_device *dev)
7948{
7949 struct bnx2x *bp = netdev_priv(dev);
7950
34f80b04 7951 return bp->common.flash_size;
a2fbb9ea
ET
7952}
7953
7954static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
7955{
34f80b04 7956 int port = BP_PORT(bp);
a2fbb9ea
ET
7957 int count, i;
7958 u32 val = 0;
7959
7960 /* adjust timeout for emulation/FPGA */
7961 count = NVRAM_TIMEOUT_COUNT;
7962 if (CHIP_REV_IS_SLOW(bp))
7963 count *= 100;
7964
7965 /* request access to nvram interface */
7966 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
7967 (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
7968
7969 for (i = 0; i < count*10; i++) {
7970 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
7971 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
7972 break;
7973
7974 udelay(5);
7975 }
7976
7977 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
34f80b04 7978 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
a2fbb9ea
ET
7979 return -EBUSY;
7980 }
7981
7982 return 0;
7983}
7984
7985static int bnx2x_release_nvram_lock(struct bnx2x *bp)
7986{
34f80b04 7987 int port = BP_PORT(bp);
a2fbb9ea
ET
7988 int count, i;
7989 u32 val = 0;
7990
7991 /* adjust timeout for emulation/FPGA */
7992 count = NVRAM_TIMEOUT_COUNT;
7993 if (CHIP_REV_IS_SLOW(bp))
7994 count *= 100;
7995
7996 /* relinquish nvram interface */
7997 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
7998 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
7999
8000 for (i = 0; i < count*10; i++) {
8001 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
8002 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
8003 break;
8004
8005 udelay(5);
8006 }
8007
8008 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
34f80b04 8009 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
a2fbb9ea
ET
8010 return -EBUSY;
8011 }
8012
8013 return 0;
8014}
8015
8016static void bnx2x_enable_nvram_access(struct bnx2x *bp)
8017{
8018 u32 val;
8019
8020 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
8021
8022 /* enable both bits, even on read */
8023 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
8024 (val | MCPR_NVM_ACCESS_ENABLE_EN |
8025 MCPR_NVM_ACCESS_ENABLE_WR_EN));
8026}
8027
8028static void bnx2x_disable_nvram_access(struct bnx2x *bp)
8029{
8030 u32 val;
8031
8032 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
8033
8034 /* disable both bits, even after read */
8035 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
8036 (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
8037 MCPR_NVM_ACCESS_ENABLE_WR_EN)));
8038}
8039
8040static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, u32 *ret_val,
8041 u32 cmd_flags)
8042{
f1410647 8043 int count, i, rc;
a2fbb9ea
ET
8044 u32 val;
8045
8046 /* build the command word */
8047 cmd_flags |= MCPR_NVM_COMMAND_DOIT;
8048
8049 /* need to clear DONE bit separately */
8050 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8051
8052 /* address of the NVRAM to read from */
8053 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8054 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8055
8056 /* issue a read command */
8057 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8058
8059 /* adjust timeout for emulation/FPGA */
8060 count = NVRAM_TIMEOUT_COUNT;
8061 if (CHIP_REV_IS_SLOW(bp))
8062 count *= 100;
8063
8064 /* wait for completion */
8065 *ret_val = 0;
8066 rc = -EBUSY;
8067 for (i = 0; i < count; i++) {
8068 udelay(5);
8069 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8070
8071 if (val & MCPR_NVM_COMMAND_DONE) {
8072 val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
a2fbb9ea
ET
8073 /* we read nvram data in cpu order
8074 * but ethtool sees it as an array of bytes
8075 * converting to big-endian will do the work */
8076 val = cpu_to_be32(val);
8077 *ret_val = val;
8078 rc = 0;
8079 break;
8080 }
8081 }
8082
8083 return rc;
8084}
8085
8086static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
8087 int buf_size)
8088{
8089 int rc;
8090 u32 cmd_flags;
8091 u32 val;
8092
8093 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
34f80b04 8094 DP(BNX2X_MSG_NVM,
c14423fe 8095 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
a2fbb9ea
ET
8096 offset, buf_size);
8097 return -EINVAL;
8098 }
8099
34f80b04
EG
8100 if (offset + buf_size > bp->common.flash_size) {
8101 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 8102 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 8103 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
8104 return -EINVAL;
8105 }
8106
8107 /* request access to nvram interface */
8108 rc = bnx2x_acquire_nvram_lock(bp);
8109 if (rc)
8110 return rc;
8111
8112 /* enable access to nvram interface */
8113 bnx2x_enable_nvram_access(bp);
8114
8115 /* read the first word(s) */
8116 cmd_flags = MCPR_NVM_COMMAND_FIRST;
8117 while ((buf_size > sizeof(u32)) && (rc == 0)) {
8118 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8119 memcpy(ret_buf, &val, 4);
8120
8121 /* advance to the next dword */
8122 offset += sizeof(u32);
8123 ret_buf += sizeof(u32);
8124 buf_size -= sizeof(u32);
8125 cmd_flags = 0;
8126 }
8127
8128 if (rc == 0) {
8129 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8130 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8131 memcpy(ret_buf, &val, 4);
8132 }
8133
8134 /* disable access to nvram interface */
8135 bnx2x_disable_nvram_access(bp);
8136 bnx2x_release_nvram_lock(bp);
8137
8138 return rc;
8139}
8140
8141static int bnx2x_get_eeprom(struct net_device *dev,
8142 struct ethtool_eeprom *eeprom, u8 *eebuf)
8143{
8144 struct bnx2x *bp = netdev_priv(dev);
8145 int rc;
8146
2add3acb
EG
8147 if (!netif_running(dev))
8148 return -EAGAIN;
8149
34f80b04 8150 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
a2fbb9ea
ET
8151 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
8152 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8153 eeprom->len, eeprom->len);
8154
8155 /* parameters already validated in ethtool_get_eeprom */
8156
8157 rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
8158
8159 return rc;
8160}
8161
8162static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
8163 u32 cmd_flags)
8164{
f1410647 8165 int count, i, rc;
a2fbb9ea
ET
8166
8167 /* build the command word */
8168 cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
8169
8170 /* need to clear DONE bit separately */
8171 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8172
8173 /* write the data */
8174 REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
8175
8176 /* address of the NVRAM to write to */
8177 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8178 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8179
8180 /* issue the write command */
8181 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8182
8183 /* adjust timeout for emulation/FPGA */
8184 count = NVRAM_TIMEOUT_COUNT;
8185 if (CHIP_REV_IS_SLOW(bp))
8186 count *= 100;
8187
8188 /* wait for completion */
8189 rc = -EBUSY;
8190 for (i = 0; i < count; i++) {
8191 udelay(5);
8192 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8193 if (val & MCPR_NVM_COMMAND_DONE) {
8194 rc = 0;
8195 break;
8196 }
8197 }
8198
8199 return rc;
8200}
8201
f1410647 8202#define BYTE_OFFSET(offset) (8 * (offset & 0x03))
a2fbb9ea
ET
8203
8204static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
8205 int buf_size)
8206{
8207 int rc;
8208 u32 cmd_flags;
8209 u32 align_offset;
8210 u32 val;
8211
34f80b04
EG
8212 if (offset + buf_size > bp->common.flash_size) {
8213 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 8214 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 8215 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
8216 return -EINVAL;
8217 }
8218
8219 /* request access to nvram interface */
8220 rc = bnx2x_acquire_nvram_lock(bp);
8221 if (rc)
8222 return rc;
8223
8224 /* enable access to nvram interface */
8225 bnx2x_enable_nvram_access(bp);
8226
8227 cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
8228 align_offset = (offset & ~0x03);
8229 rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
8230
8231 if (rc == 0) {
8232 val &= ~(0xff << BYTE_OFFSET(offset));
8233 val |= (*data_buf << BYTE_OFFSET(offset));
8234
8235 /* nvram data is returned as an array of bytes
8236 * convert it back to cpu order */
8237 val = be32_to_cpu(val);
8238
a2fbb9ea
ET
8239 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
8240 cmd_flags);
8241 }
8242
8243 /* disable access to nvram interface */
8244 bnx2x_disable_nvram_access(bp);
8245 bnx2x_release_nvram_lock(bp);
8246
8247 return rc;
8248}
8249
8250static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
8251 int buf_size)
8252{
8253 int rc;
8254 u32 cmd_flags;
8255 u32 val;
8256 u32 written_so_far;
8257
34f80b04 8258 if (buf_size == 1) /* ethtool */
a2fbb9ea 8259 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
a2fbb9ea
ET
8260
8261 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
34f80b04 8262 DP(BNX2X_MSG_NVM,
c14423fe 8263 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
a2fbb9ea
ET
8264 offset, buf_size);
8265 return -EINVAL;
8266 }
8267
34f80b04
EG
8268 if (offset + buf_size > bp->common.flash_size) {
8269 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 8270 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 8271 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
8272 return -EINVAL;
8273 }
8274
8275 /* request access to nvram interface */
8276 rc = bnx2x_acquire_nvram_lock(bp);
8277 if (rc)
8278 return rc;
8279
8280 /* enable access to nvram interface */
8281 bnx2x_enable_nvram_access(bp);
8282
8283 written_so_far = 0;
8284 cmd_flags = MCPR_NVM_COMMAND_FIRST;
8285 while ((written_so_far < buf_size) && (rc == 0)) {
8286 if (written_so_far == (buf_size - sizeof(u32)))
8287 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8288 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
8289 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8290 else if ((offset % NVRAM_PAGE_SIZE) == 0)
8291 cmd_flags |= MCPR_NVM_COMMAND_FIRST;
8292
8293 memcpy(&val, data_buf, 4);
a2fbb9ea
ET
8294
8295 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
8296
8297 /* advance to the next dword */
8298 offset += sizeof(u32);
8299 data_buf += sizeof(u32);
8300 written_so_far += sizeof(u32);
8301 cmd_flags = 0;
8302 }
8303
8304 /* disable access to nvram interface */
8305 bnx2x_disable_nvram_access(bp);
8306 bnx2x_release_nvram_lock(bp);
8307
8308 return rc;
8309}
8310
8311static int bnx2x_set_eeprom(struct net_device *dev,
8312 struct ethtool_eeprom *eeprom, u8 *eebuf)
8313{
8314 struct bnx2x *bp = netdev_priv(dev);
8315 int rc;
8316
9f4c9583
EG
8317 if (!netif_running(dev))
8318 return -EAGAIN;
8319
34f80b04 8320 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
a2fbb9ea
ET
8321 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
8322 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8323 eeprom->len, eeprom->len);
8324
8325 /* parameters already validated in ethtool_set_eeprom */
8326
c18487ee 8327 /* If the magic number is PHY (0x00504859) upgrade the PHY FW */
34f80b04
EG
8328 if (eeprom->magic == 0x00504859)
8329 if (bp->port.pmf) {
8330
4a37fb66 8331 bnx2x_acquire_phy_lock(bp);
34f80b04
EG
8332 rc = bnx2x_flash_download(bp, BP_PORT(bp),
8333 bp->link_params.ext_phy_config,
8334 (bp->state != BNX2X_STATE_CLOSED),
8335 eebuf, eeprom->len);
bb2a0f7a
YG
8336 if ((bp->state == BNX2X_STATE_OPEN) ||
8337 (bp->state == BNX2X_STATE_DISABLED)) {
34f80b04
EG
8338 rc |= bnx2x_link_reset(&bp->link_params,
8339 &bp->link_vars);
8340 rc |= bnx2x_phy_init(&bp->link_params,
8341 &bp->link_vars);
bb2a0f7a 8342 }
4a37fb66 8343 bnx2x_release_phy_lock(bp);
34f80b04
EG
8344
8345 } else /* Only the PMF can access the PHY */
8346 return -EINVAL;
8347 else
c18487ee 8348 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
a2fbb9ea
ET
8349
8350 return rc;
8351}
8352
8353static int bnx2x_get_coalesce(struct net_device *dev,
8354 struct ethtool_coalesce *coal)
8355{
8356 struct bnx2x *bp = netdev_priv(dev);
8357
8358 memset(coal, 0, sizeof(struct ethtool_coalesce));
8359
8360 coal->rx_coalesce_usecs = bp->rx_ticks;
8361 coal->tx_coalesce_usecs = bp->tx_ticks;
a2fbb9ea
ET
8362
8363 return 0;
8364}
8365
8366static int bnx2x_set_coalesce(struct net_device *dev,
8367 struct ethtool_coalesce *coal)
8368{
8369 struct bnx2x *bp = netdev_priv(dev);
8370
8371 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
8372 if (bp->rx_ticks > 3000)
8373 bp->rx_ticks = 3000;
8374
8375 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
8376 if (bp->tx_ticks > 0x3000)
8377 bp->tx_ticks = 0x3000;
8378
34f80b04 8379 if (netif_running(dev))
a2fbb9ea
ET
8380 bnx2x_update_coalesce(bp);
8381
8382 return 0;
8383}
8384
8385static void bnx2x_get_ringparam(struct net_device *dev,
8386 struct ethtool_ringparam *ering)
8387{
8388 struct bnx2x *bp = netdev_priv(dev);
8389
8390 ering->rx_max_pending = MAX_RX_AVAIL;
8391 ering->rx_mini_max_pending = 0;
8392 ering->rx_jumbo_max_pending = 0;
8393
8394 ering->rx_pending = bp->rx_ring_size;
8395 ering->rx_mini_pending = 0;
8396 ering->rx_jumbo_pending = 0;
8397
8398 ering->tx_max_pending = MAX_TX_AVAIL;
8399 ering->tx_pending = bp->tx_ring_size;
8400}
8401
8402static int bnx2x_set_ringparam(struct net_device *dev,
8403 struct ethtool_ringparam *ering)
8404{
8405 struct bnx2x *bp = netdev_priv(dev);
34f80b04 8406 int rc = 0;
a2fbb9ea
ET
8407
8408 if ((ering->rx_pending > MAX_RX_AVAIL) ||
8409 (ering->tx_pending > MAX_TX_AVAIL) ||
8410 (ering->tx_pending <= MAX_SKB_FRAGS + 4))
8411 return -EINVAL;
8412
8413 bp->rx_ring_size = ering->rx_pending;
8414 bp->tx_ring_size = ering->tx_pending;
8415
34f80b04
EG
8416 if (netif_running(dev)) {
8417 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8418 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
a2fbb9ea
ET
8419 }
8420
34f80b04 8421 return rc;
a2fbb9ea
ET
8422}
8423
8424static void bnx2x_get_pauseparam(struct net_device *dev,
8425 struct ethtool_pauseparam *epause)
8426{
8427 struct bnx2x *bp = netdev_priv(dev);
8428
c0700f90 8429 epause->autoneg = (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
c18487ee
YR
8430 (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
8431
c0700f90
DM
8432 epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) ==
8433 BNX2X_FLOW_CTRL_RX);
8434 epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) ==
8435 BNX2X_FLOW_CTRL_TX);
a2fbb9ea
ET
8436
8437 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
8438 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
8439 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
8440}
8441
8442static int bnx2x_set_pauseparam(struct net_device *dev,
8443 struct ethtool_pauseparam *epause)
8444{
8445 struct bnx2x *bp = netdev_priv(dev);
8446
34f80b04
EG
8447 if (IS_E1HMF(bp))
8448 return 0;
8449
a2fbb9ea
ET
8450 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
8451 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
8452 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
8453
c0700f90 8454 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
a2fbb9ea 8455
f1410647 8456 if (epause->rx_pause)
c0700f90 8457 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_RX;
c18487ee 8458
f1410647 8459 if (epause->tx_pause)
c0700f90 8460 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_TX;
c18487ee 8461
c0700f90
DM
8462 if (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO)
8463 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
a2fbb9ea 8464
c18487ee 8465 if (epause->autoneg) {
34f80b04 8466 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
3196a88a 8467 DP(NETIF_MSG_LINK, "autoneg not supported\n");
c18487ee
YR
8468 return -EINVAL;
8469 }
a2fbb9ea 8470
c18487ee 8471 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
c0700f90 8472 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
c18487ee 8473 }
a2fbb9ea 8474
c18487ee
YR
8475 DP(NETIF_MSG_LINK,
8476 "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
34f80b04
EG
8477
8478 if (netif_running(dev)) {
bb2a0f7a 8479 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
8480 bnx2x_link_set(bp);
8481 }
a2fbb9ea
ET
8482
8483 return 0;
8484}
8485
df0f2343
VZ
8486static int bnx2x_set_flags(struct net_device *dev, u32 data)
8487{
8488 struct bnx2x *bp = netdev_priv(dev);
8489 int changed = 0;
8490 int rc = 0;
8491
8492 /* TPA requires Rx CSUM offloading */
8493 if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
8494 if (!(dev->features & NETIF_F_LRO)) {
8495 dev->features |= NETIF_F_LRO;
8496 bp->flags |= TPA_ENABLE_FLAG;
8497 changed = 1;
8498 }
8499
8500 } else if (dev->features & NETIF_F_LRO) {
8501 dev->features &= ~NETIF_F_LRO;
8502 bp->flags &= ~TPA_ENABLE_FLAG;
8503 changed = 1;
8504 }
8505
8506 if (changed && netif_running(dev)) {
8507 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8508 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
8509 }
8510
8511 return rc;
8512}
8513
a2fbb9ea
ET
8514static u32 bnx2x_get_rx_csum(struct net_device *dev)
8515{
8516 struct bnx2x *bp = netdev_priv(dev);
8517
8518 return bp->rx_csum;
8519}
8520
8521static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
8522{
8523 struct bnx2x *bp = netdev_priv(dev);
df0f2343 8524 int rc = 0;
a2fbb9ea
ET
8525
8526 bp->rx_csum = data;
df0f2343
VZ
8527
8528 /* Disable TPA, when Rx CSUM is disabled. Otherwise all
8529 TPA'ed packets will be discarded due to wrong TCP CSUM */
8530 if (!data) {
8531 u32 flags = ethtool_op_get_flags(dev);
8532
8533 rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
8534 }
8535
8536 return rc;
a2fbb9ea
ET
8537}
8538
8539static int bnx2x_set_tso(struct net_device *dev, u32 data)
8540{
755735eb 8541 if (data) {
a2fbb9ea 8542 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb
EG
8543 dev->features |= NETIF_F_TSO6;
8544 } else {
a2fbb9ea 8545 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb
EG
8546 dev->features &= ~NETIF_F_TSO6;
8547 }
8548
a2fbb9ea
ET
8549 return 0;
8550}
8551
f3c87cdd 8552static const struct {
a2fbb9ea
ET
8553 char string[ETH_GSTRING_LEN];
8554} bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
f3c87cdd
YG
8555 { "register_test (offline)" },
8556 { "memory_test (offline)" },
8557 { "loopback_test (offline)" },
8558 { "nvram_test (online)" },
8559 { "interrupt_test (online)" },
8560 { "link_test (online)" },
8561 { "idle check (online)" },
8562 { "MC errors (online)" }
a2fbb9ea
ET
8563};
8564
8565static int bnx2x_self_test_count(struct net_device *dev)
8566{
8567 return BNX2X_NUM_TESTS;
8568}
8569
f3c87cdd
YG
8570static int bnx2x_test_registers(struct bnx2x *bp)
8571{
8572 int idx, i, rc = -ENODEV;
8573 u32 wr_val = 0;
9dabc424 8574 int port = BP_PORT(bp);
f3c87cdd
YG
8575 static const struct {
8576 u32 offset0;
8577 u32 offset1;
8578 u32 mask;
8579 } reg_tbl[] = {
8580/* 0 */ { BRB1_REG_PAUSE_LOW_THRESHOLD_0, 4, 0x000003ff },
8581 { DORQ_REG_DB_ADDR0, 4, 0xffffffff },
8582 { HC_REG_AGG_INT_0, 4, 0x000003ff },
8583 { PBF_REG_MAC_IF0_ENABLE, 4, 0x00000001 },
8584 { PBF_REG_P0_INIT_CRD, 4, 0x000007ff },
8585 { PRS_REG_CID_PORT_0, 4, 0x00ffffff },
8586 { PXP2_REG_PSWRQ_CDU0_L2P, 4, 0x000fffff },
8587 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
8588 { PXP2_REG_PSWRQ_TM0_L2P, 4, 0x000fffff },
8589 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
8590/* 10 */ { PXP2_REG_PSWRQ_TSDM0_L2P, 4, 0x000fffff },
8591 { QM_REG_CONNNUM_0, 4, 0x000fffff },
8592 { TM_REG_LIN0_MAX_ACTIVE_CID, 4, 0x0003ffff },
8593 { SRC_REG_KEYRSS0_0, 40, 0xffffffff },
8594 { SRC_REG_KEYRSS0_7, 40, 0xffffffff },
8595 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
8596 { XCM_REG_WU_DA_CNT_CMD00, 4, 0x00000003 },
8597 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 4, 0x000000ff },
8598 { NIG_REG_EGRESS_MNG0_FIFO, 20, 0xffffffff },
8599 { NIG_REG_LLH0_T_BIT, 4, 0x00000001 },
8600/* 20 */ { NIG_REG_EMAC0_IN_EN, 4, 0x00000001 },
8601 { NIG_REG_BMAC0_IN_EN, 4, 0x00000001 },
8602 { NIG_REG_XCM0_OUT_EN, 4, 0x00000001 },
8603 { NIG_REG_BRB0_OUT_EN, 4, 0x00000001 },
8604 { NIG_REG_LLH0_XCM_MASK, 4, 0x00000007 },
8605 { NIG_REG_LLH0_ACPI_PAT_6_LEN, 68, 0x000000ff },
8606 { NIG_REG_LLH0_ACPI_PAT_0_CRC, 68, 0xffffffff },
8607 { NIG_REG_LLH0_DEST_MAC_0_0, 160, 0xffffffff },
8608 { NIG_REG_LLH0_DEST_IP_0_1, 160, 0xffffffff },
8609 { NIG_REG_LLH0_IPV4_IPV6_0, 160, 0x00000001 },
8610/* 30 */ { NIG_REG_LLH0_DEST_UDP_0, 160, 0x0000ffff },
8611 { NIG_REG_LLH0_DEST_TCP_0, 160, 0x0000ffff },
8612 { NIG_REG_LLH0_VLAN_ID_0, 160, 0x00000fff },
8613 { NIG_REG_XGXS_SERDES0_MODE_SEL, 4, 0x00000001 },
8614 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
8615 { NIG_REG_STATUS_INTERRUPT_PORT0, 4, 0x07ffffff },
8616 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
8617 { NIG_REG_SERDES0_CTRL_PHY_ADDR, 16, 0x0000001f },
8618
8619 { 0xffffffff, 0, 0x00000000 }
8620 };
8621
8622 if (!netif_running(bp->dev))
8623 return rc;
8624
8625 /* Repeat the test twice:
8626 First by writing 0x00000000, second by writing 0xffffffff */
8627 for (idx = 0; idx < 2; idx++) {
8628
8629 switch (idx) {
8630 case 0:
8631 wr_val = 0;
8632 break;
8633 case 1:
8634 wr_val = 0xffffffff;
8635 break;
8636 }
8637
8638 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
8639 u32 offset, mask, save_val, val;
f3c87cdd
YG
8640
8641 offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
8642 mask = reg_tbl[i].mask;
8643
8644 save_val = REG_RD(bp, offset);
8645
8646 REG_WR(bp, offset, wr_val);
8647 val = REG_RD(bp, offset);
8648
8649 /* Restore the original register's value */
8650 REG_WR(bp, offset, save_val);
8651
8652 /* verify that value is as expected value */
8653 if ((val & mask) != (wr_val & mask))
8654 goto test_reg_exit;
8655 }
8656 }
8657
8658 rc = 0;
8659
8660test_reg_exit:
8661 return rc;
8662}
8663
8664static int bnx2x_test_memory(struct bnx2x *bp)
8665{
8666 int i, j, rc = -ENODEV;
8667 u32 val;
8668 static const struct {
8669 u32 offset;
8670 int size;
8671 } mem_tbl[] = {
8672 { CCM_REG_XX_DESCR_TABLE, CCM_REG_XX_DESCR_TABLE_SIZE },
8673 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
8674 { CFC_REG_LINK_LIST, CFC_REG_LINK_LIST_SIZE },
8675 { DMAE_REG_CMD_MEM, DMAE_REG_CMD_MEM_SIZE },
8676 { TCM_REG_XX_DESCR_TABLE, TCM_REG_XX_DESCR_TABLE_SIZE },
8677 { UCM_REG_XX_DESCR_TABLE, UCM_REG_XX_DESCR_TABLE_SIZE },
8678 { XCM_REG_XX_DESCR_TABLE, XCM_REG_XX_DESCR_TABLE_SIZE },
8679
8680 { 0xffffffff, 0 }
8681 };
8682 static const struct {
8683 char *name;
8684 u32 offset;
9dabc424
YG
8685 u32 e1_mask;
8686 u32 e1h_mask;
f3c87cdd 8687 } prty_tbl[] = {
9dabc424
YG
8688 { "CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, 0x3ffc0, 0 },
8689 { "CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, 0x2, 0x2 },
8690 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0, 0 },
8691 { "TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, 0x3ffc0, 0 },
8692 { "UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, 0x3ffc0, 0 },
8693 { "XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, 0x3ffc1, 0 },
8694
8695 { NULL, 0xffffffff, 0, 0 }
f3c87cdd
YG
8696 };
8697
8698 if (!netif_running(bp->dev))
8699 return rc;
8700
8701 /* Go through all the memories */
8702 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
8703 for (j = 0; j < mem_tbl[i].size; j++)
8704 REG_RD(bp, mem_tbl[i].offset + j*4);
8705
8706 /* Check the parity status */
8707 for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
8708 val = REG_RD(bp, prty_tbl[i].offset);
9dabc424
YG
8709 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
8710 (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
f3c87cdd
YG
8711 DP(NETIF_MSG_HW,
8712 "%s is 0x%x\n", prty_tbl[i].name, val);
8713 goto test_mem_exit;
8714 }
8715 }
8716
8717 rc = 0;
8718
8719test_mem_exit:
8720 return rc;
8721}
8722
f3c87cdd
YG
8723static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
8724{
8725 int cnt = 1000;
8726
8727 if (link_up)
8728 while (bnx2x_link_test(bp) && cnt--)
8729 msleep(10);
8730}
8731
8732static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
8733{
8734 unsigned int pkt_size, num_pkts, i;
8735 struct sk_buff *skb;
8736 unsigned char *packet;
8737 struct bnx2x_fastpath *fp = &bp->fp[0];
8738 u16 tx_start_idx, tx_idx;
8739 u16 rx_start_idx, rx_idx;
8740 u16 pkt_prod;
8741 struct sw_tx_bd *tx_buf;
8742 struct eth_tx_bd *tx_bd;
8743 dma_addr_t mapping;
8744 union eth_rx_cqe *cqe;
8745 u8 cqe_fp_flags;
8746 struct sw_rx_bd *rx_buf;
8747 u16 len;
8748 int rc = -ENODEV;
8749
8750 if (loopback_mode == BNX2X_MAC_LOOPBACK) {
8751 bp->link_params.loopback_mode = LOOPBACK_BMAC;
4a37fb66 8752 bnx2x_acquire_phy_lock(bp);
f3c87cdd 8753 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
4a37fb66 8754 bnx2x_release_phy_lock(bp);
f3c87cdd
YG
8755
8756 } else if (loopback_mode == BNX2X_PHY_LOOPBACK) {
8757 bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
4a37fb66 8758 bnx2x_acquire_phy_lock(bp);
f3c87cdd 8759 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
4a37fb66 8760 bnx2x_release_phy_lock(bp);
f3c87cdd
YG
8761 /* wait until link state is restored */
8762 bnx2x_wait_for_link(bp, link_up);
8763
8764 } else
8765 return -EINVAL;
8766
8767 pkt_size = 1514;
8768 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
8769 if (!skb) {
8770 rc = -ENOMEM;
8771 goto test_loopback_exit;
8772 }
8773 packet = skb_put(skb, pkt_size);
8774 memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
8775 memset(packet + ETH_ALEN, 0, (ETH_HLEN - ETH_ALEN));
8776 for (i = ETH_HLEN; i < pkt_size; i++)
8777 packet[i] = (unsigned char) (i & 0xff);
8778
8779 num_pkts = 0;
8780 tx_start_idx = le16_to_cpu(*fp->tx_cons_sb);
8781 rx_start_idx = le16_to_cpu(*fp->rx_cons_sb);
8782
8783 pkt_prod = fp->tx_pkt_prod++;
8784 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
8785 tx_buf->first_bd = fp->tx_bd_prod;
8786 tx_buf->skb = skb;
8787
8788 tx_bd = &fp->tx_desc_ring[TX_BD(fp->tx_bd_prod)];
8789 mapping = pci_map_single(bp->pdev, skb->data,
8790 skb_headlen(skb), PCI_DMA_TODEVICE);
8791 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
8792 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
8793 tx_bd->nbd = cpu_to_le16(1);
8794 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
8795 tx_bd->vlan = cpu_to_le16(pkt_prod);
8796 tx_bd->bd_flags.as_bitfield = (ETH_TX_BD_FLAGS_START_BD |
8797 ETH_TX_BD_FLAGS_END_BD);
8798 tx_bd->general_data = ((UNICAST_ADDRESS <<
8799 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT) | 1);
8800
58f4c4cf
EG
8801 wmb();
8802
f3c87cdd
YG
8803 fp->hw_tx_prods->bds_prod =
8804 cpu_to_le16(le16_to_cpu(fp->hw_tx_prods->bds_prod) + 1);
8805 mb(); /* FW restriction: must not reorder writing nbd and packets */
8806 fp->hw_tx_prods->packets_prod =
8807 cpu_to_le32(le32_to_cpu(fp->hw_tx_prods->packets_prod) + 1);
8808 DOORBELL(bp, FP_IDX(fp), 0);
8809
8810 mmiowb();
8811
8812 num_pkts++;
8813 fp->tx_bd_prod++;
8814 bp->dev->trans_start = jiffies;
8815
8816 udelay(100);
8817
8818 tx_idx = le16_to_cpu(*fp->tx_cons_sb);
8819 if (tx_idx != tx_start_idx + num_pkts)
8820 goto test_loopback_exit;
8821
8822 rx_idx = le16_to_cpu(*fp->rx_cons_sb);
8823 if (rx_idx != rx_start_idx + num_pkts)
8824 goto test_loopback_exit;
8825
8826 cqe = &fp->rx_comp_ring[RCQ_BD(fp->rx_comp_cons)];
8827 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
8828 if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
8829 goto test_loopback_rx_exit;
8830
8831 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
8832 if (len != pkt_size)
8833 goto test_loopback_rx_exit;
8834
8835 rx_buf = &fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)];
8836 skb = rx_buf->skb;
8837 skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
8838 for (i = ETH_HLEN; i < pkt_size; i++)
8839 if (*(skb->data + i) != (unsigned char) (i & 0xff))
8840 goto test_loopback_rx_exit;
8841
8842 rc = 0;
8843
8844test_loopback_rx_exit:
f3c87cdd
YG
8845
8846 fp->rx_bd_cons = NEXT_RX_IDX(fp->rx_bd_cons);
8847 fp->rx_bd_prod = NEXT_RX_IDX(fp->rx_bd_prod);
8848 fp->rx_comp_cons = NEXT_RCQ_IDX(fp->rx_comp_cons);
8849 fp->rx_comp_prod = NEXT_RCQ_IDX(fp->rx_comp_prod);
8850
8851 /* Update producers */
8852 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
8853 fp->rx_sge_prod);
f3c87cdd
YG
8854
8855test_loopback_exit:
8856 bp->link_params.loopback_mode = LOOPBACK_NONE;
8857
8858 return rc;
8859}
8860
8861static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
8862{
8863 int rc = 0;
8864
8865 if (!netif_running(bp->dev))
8866 return BNX2X_LOOPBACK_FAILED;
8867
f8ef6e44 8868 bnx2x_netif_stop(bp, 1);
f3c87cdd
YG
8869
8870 if (bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up)) {
8871 DP(NETIF_MSG_PROBE, "MAC loopback failed\n");
8872 rc |= BNX2X_MAC_LOOPBACK_FAILED;
8873 }
8874
8875 if (bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up)) {
8876 DP(NETIF_MSG_PROBE, "PHY loopback failed\n");
8877 rc |= BNX2X_PHY_LOOPBACK_FAILED;
8878 }
8879
8880 bnx2x_netif_start(bp);
8881
8882 return rc;
8883}
8884
8885#define CRC32_RESIDUAL 0xdebb20e3
8886
8887static int bnx2x_test_nvram(struct bnx2x *bp)
8888{
8889 static const struct {
8890 int offset;
8891 int size;
8892 } nvram_tbl[] = {
8893 { 0, 0x14 }, /* bootstrap */
8894 { 0x14, 0xec }, /* dir */
8895 { 0x100, 0x350 }, /* manuf_info */
8896 { 0x450, 0xf0 }, /* feature_info */
8897 { 0x640, 0x64 }, /* upgrade_key_info */
8898 { 0x6a4, 0x64 },
8899 { 0x708, 0x70 }, /* manuf_key_info */
8900 { 0x778, 0x70 },
8901 { 0, 0 }
8902 };
8903 u32 buf[0x350 / 4];
8904 u8 *data = (u8 *)buf;
8905 int i, rc;
8906 u32 magic, csum;
8907
8908 rc = bnx2x_nvram_read(bp, 0, data, 4);
8909 if (rc) {
8910 DP(NETIF_MSG_PROBE, "magic value read (rc -%d)\n", -rc);
8911 goto test_nvram_exit;
8912 }
8913
8914 magic = be32_to_cpu(buf[0]);
8915 if (magic != 0x669955aa) {
8916 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
8917 rc = -ENODEV;
8918 goto test_nvram_exit;
8919 }
8920
8921 for (i = 0; nvram_tbl[i].size; i++) {
8922
8923 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
8924 nvram_tbl[i].size);
8925 if (rc) {
8926 DP(NETIF_MSG_PROBE,
8927 "nvram_tbl[%d] read data (rc -%d)\n", i, -rc);
8928 goto test_nvram_exit;
8929 }
8930
8931 csum = ether_crc_le(nvram_tbl[i].size, data);
8932 if (csum != CRC32_RESIDUAL) {
8933 DP(NETIF_MSG_PROBE,
8934 "nvram_tbl[%d] csum value (0x%08x)\n", i, csum);
8935 rc = -ENODEV;
8936 goto test_nvram_exit;
8937 }
8938 }
8939
8940test_nvram_exit:
8941 return rc;
8942}
8943
8944static int bnx2x_test_intr(struct bnx2x *bp)
8945{
8946 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
8947 int i, rc;
8948
8949 if (!netif_running(bp->dev))
8950 return -ENODEV;
8951
8952 config->hdr.length_6b = 0;
af246401
EG
8953 if (CHIP_IS_E1(bp))
8954 config->hdr.offset = (BP_PORT(bp) ? 32 : 0);
8955 else
8956 config->hdr.offset = BP_FUNC(bp);
f3c87cdd
YG
8957 config->hdr.client_id = BP_CL_ID(bp);
8958 config->hdr.reserved1 = 0;
8959
8960 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
8961 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
8962 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
8963 if (rc == 0) {
8964 bp->set_mac_pending++;
8965 for (i = 0; i < 10; i++) {
8966 if (!bp->set_mac_pending)
8967 break;
8968 msleep_interruptible(10);
8969 }
8970 if (i == 10)
8971 rc = -ENODEV;
8972 }
8973
8974 return rc;
8975}
8976
a2fbb9ea
ET
8977static void bnx2x_self_test(struct net_device *dev,
8978 struct ethtool_test *etest, u64 *buf)
8979{
8980 struct bnx2x *bp = netdev_priv(dev);
a2fbb9ea
ET
8981
8982 memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
8983
f3c87cdd 8984 if (!netif_running(dev))
a2fbb9ea 8985 return;
a2fbb9ea 8986
33471629 8987 /* offline tests are not supported in MF mode */
f3c87cdd
YG
8988 if (IS_E1HMF(bp))
8989 etest->flags &= ~ETH_TEST_FL_OFFLINE;
8990
8991 if (etest->flags & ETH_TEST_FL_OFFLINE) {
8992 u8 link_up;
8993
8994 link_up = bp->link_vars.link_up;
8995 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8996 bnx2x_nic_load(bp, LOAD_DIAG);
8997 /* wait until link state is restored */
8998 bnx2x_wait_for_link(bp, link_up);
8999
9000 if (bnx2x_test_registers(bp) != 0) {
9001 buf[0] = 1;
9002 etest->flags |= ETH_TEST_FL_FAILED;
9003 }
9004 if (bnx2x_test_memory(bp) != 0) {
9005 buf[1] = 1;
9006 etest->flags |= ETH_TEST_FL_FAILED;
9007 }
9008 buf[2] = bnx2x_test_loopback(bp, link_up);
9009 if (buf[2] != 0)
9010 etest->flags |= ETH_TEST_FL_FAILED;
a2fbb9ea 9011
f3c87cdd
YG
9012 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9013 bnx2x_nic_load(bp, LOAD_NORMAL);
9014 /* wait until link state is restored */
9015 bnx2x_wait_for_link(bp, link_up);
9016 }
9017 if (bnx2x_test_nvram(bp) != 0) {
9018 buf[3] = 1;
a2fbb9ea
ET
9019 etest->flags |= ETH_TEST_FL_FAILED;
9020 }
f3c87cdd
YG
9021 if (bnx2x_test_intr(bp) != 0) {
9022 buf[4] = 1;
9023 etest->flags |= ETH_TEST_FL_FAILED;
9024 }
9025 if (bp->port.pmf)
9026 if (bnx2x_link_test(bp) != 0) {
9027 buf[5] = 1;
9028 etest->flags |= ETH_TEST_FL_FAILED;
9029 }
9030 buf[7] = bnx2x_mc_assert(bp);
9031 if (buf[7] != 0)
9032 etest->flags |= ETH_TEST_FL_FAILED;
9033
9034#ifdef BNX2X_EXTRA_DEBUG
9035 bnx2x_panic_dump(bp);
9036#endif
a2fbb9ea
ET
9037}
9038
bb2a0f7a
YG
9039static const struct {
9040 long offset;
9041 int size;
9042 u32 flags;
66e855f3
YG
9043#define STATS_FLAGS_PORT 1
9044#define STATS_FLAGS_FUNC 2
9045 u8 string[ETH_GSTRING_LEN];
bb2a0f7a 9046} bnx2x_stats_arr[BNX2X_NUM_STATS] = {
66e855f3
YG
9047/* 1 */ { STATS_OFFSET32(valid_bytes_received_hi),
9048 8, STATS_FLAGS_FUNC, "rx_bytes" },
9049 { STATS_OFFSET32(error_bytes_received_hi),
9050 8, STATS_FLAGS_FUNC, "rx_error_bytes" },
9051 { STATS_OFFSET32(total_bytes_transmitted_hi),
9052 8, STATS_FLAGS_FUNC, "tx_bytes" },
9053 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
9054 8, STATS_FLAGS_PORT, "tx_error_bytes" },
bb2a0f7a 9055 { STATS_OFFSET32(total_unicast_packets_received_hi),
66e855f3 9056 8, STATS_FLAGS_FUNC, "rx_ucast_packets" },
bb2a0f7a 9057 { STATS_OFFSET32(total_multicast_packets_received_hi),
66e855f3 9058 8, STATS_FLAGS_FUNC, "rx_mcast_packets" },
bb2a0f7a 9059 { STATS_OFFSET32(total_broadcast_packets_received_hi),
66e855f3 9060 8, STATS_FLAGS_FUNC, "rx_bcast_packets" },
bb2a0f7a 9061 { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
66e855f3 9062 8, STATS_FLAGS_FUNC, "tx_packets" },
bb2a0f7a 9063 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
66e855f3 9064 8, STATS_FLAGS_PORT, "tx_mac_errors" },
bb2a0f7a 9065/* 10 */{ STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
66e855f3 9066 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
bb2a0f7a 9067 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
66e855f3 9068 8, STATS_FLAGS_PORT, "rx_crc_errors" },
bb2a0f7a 9069 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
66e855f3 9070 8, STATS_FLAGS_PORT, "rx_align_errors" },
bb2a0f7a 9071 { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
66e855f3 9072 8, STATS_FLAGS_PORT, "tx_single_collisions" },
bb2a0f7a 9073 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
66e855f3 9074 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
bb2a0f7a 9075 { STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
66e855f3 9076 8, STATS_FLAGS_PORT, "tx_deferred" },
bb2a0f7a 9077 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
66e855f3 9078 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
bb2a0f7a 9079 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
66e855f3 9080 8, STATS_FLAGS_PORT, "tx_late_collisions" },
bb2a0f7a 9081 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
66e855f3 9082 8, STATS_FLAGS_PORT, "tx_total_collisions" },
bb2a0f7a 9083 { STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
66e855f3
YG
9084 8, STATS_FLAGS_PORT, "rx_fragments" },
9085/* 20 */{ STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
9086 8, STATS_FLAGS_PORT, "rx_jabbers" },
bb2a0f7a 9087 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
66e855f3 9088 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
bb2a0f7a 9089 { STATS_OFFSET32(jabber_packets_received),
66e855f3 9090 4, STATS_FLAGS_FUNC, "rx_oversize_packets" },
bb2a0f7a 9091 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
66e855f3 9092 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
bb2a0f7a 9093 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
66e855f3 9094 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
bb2a0f7a 9095 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
66e855f3 9096 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
bb2a0f7a 9097 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
66e855f3 9098 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
bb2a0f7a 9099 { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
66e855f3 9100 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
bb2a0f7a 9101 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
66e855f3 9102 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
bb2a0f7a 9103 { STATS_OFFSET32(etherstatspktsover1522octets_hi),
66e855f3 9104 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
bb2a0f7a 9105/* 30 */{ STATS_OFFSET32(rx_stat_xonpauseframesreceived_hi),
66e855f3 9106 8, STATS_FLAGS_PORT, "rx_xon_frames" },
bb2a0f7a 9107 { STATS_OFFSET32(rx_stat_xoffpauseframesreceived_hi),
66e855f3
YG
9108 8, STATS_FLAGS_PORT, "rx_xoff_frames" },
9109 { STATS_OFFSET32(tx_stat_outxonsent_hi),
9110 8, STATS_FLAGS_PORT, "tx_xon_frames" },
9111 { STATS_OFFSET32(tx_stat_outxoffsent_hi),
9112 8, STATS_FLAGS_PORT, "tx_xoff_frames" },
bb2a0f7a 9113 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
66e855f3
YG
9114 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
9115 { STATS_OFFSET32(mac_filter_discard),
9116 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
9117 { STATS_OFFSET32(no_buff_discard),
9118 4, STATS_FLAGS_FUNC, "rx_discards" },
9119 { STATS_OFFSET32(xxoverflow_discard),
9120 4, STATS_FLAGS_PORT, "rx_fw_discards" },
9121 { STATS_OFFSET32(brb_drop_hi),
9122 8, STATS_FLAGS_PORT, "brb_discard" },
9123 { STATS_OFFSET32(brb_truncate_hi),
9124 8, STATS_FLAGS_PORT, "brb_truncate" },
9125/* 40 */{ STATS_OFFSET32(rx_err_discard_pkt),
9126 4, STATS_FLAGS_FUNC, "rx_phy_ip_err_discards"},
9127 { STATS_OFFSET32(rx_skb_alloc_failed),
9128 4, STATS_FLAGS_FUNC, "rx_skb_alloc_discard" },
9129/* 42 */{ STATS_OFFSET32(hw_csum_err),
9130 4, STATS_FLAGS_FUNC, "rx_csum_offload_errors" }
a2fbb9ea
ET
9131};
9132
66e855f3
YG
9133#define IS_NOT_E1HMF_STAT(bp, i) \
9134 (IS_E1HMF(bp) && (bnx2x_stats_arr[i].flags & STATS_FLAGS_PORT))
9135
a2fbb9ea
ET
9136static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
9137{
bb2a0f7a
YG
9138 struct bnx2x *bp = netdev_priv(dev);
9139 int i, j;
9140
a2fbb9ea
ET
9141 switch (stringset) {
9142 case ETH_SS_STATS:
bb2a0f7a 9143 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
66e855f3 9144 if (IS_NOT_E1HMF_STAT(bp, i))
bb2a0f7a
YG
9145 continue;
9146 strcpy(buf + j*ETH_GSTRING_LEN,
9147 bnx2x_stats_arr[i].string);
9148 j++;
9149 }
a2fbb9ea
ET
9150 break;
9151
9152 case ETH_SS_TEST:
9153 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
9154 break;
9155 }
9156}
9157
9158static int bnx2x_get_stats_count(struct net_device *dev)
9159{
bb2a0f7a
YG
9160 struct bnx2x *bp = netdev_priv(dev);
9161 int i, num_stats = 0;
9162
9163 for (i = 0; i < BNX2X_NUM_STATS; i++) {
66e855f3 9164 if (IS_NOT_E1HMF_STAT(bp, i))
bb2a0f7a
YG
9165 continue;
9166 num_stats++;
9167 }
9168 return num_stats;
a2fbb9ea
ET
9169}
9170
9171static void bnx2x_get_ethtool_stats(struct net_device *dev,
9172 struct ethtool_stats *stats, u64 *buf)
9173{
9174 struct bnx2x *bp = netdev_priv(dev);
bb2a0f7a
YG
9175 u32 *hw_stats = (u32 *)&bp->eth_stats;
9176 int i, j;
a2fbb9ea 9177
bb2a0f7a 9178 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
66e855f3 9179 if (IS_NOT_E1HMF_STAT(bp, i))
a2fbb9ea 9180 continue;
bb2a0f7a
YG
9181
9182 if (bnx2x_stats_arr[i].size == 0) {
9183 /* skip this counter */
9184 buf[j] = 0;
9185 j++;
a2fbb9ea
ET
9186 continue;
9187 }
bb2a0f7a 9188 if (bnx2x_stats_arr[i].size == 4) {
a2fbb9ea 9189 /* 4-byte counter */
bb2a0f7a
YG
9190 buf[j] = (u64) *(hw_stats + bnx2x_stats_arr[i].offset);
9191 j++;
a2fbb9ea
ET
9192 continue;
9193 }
9194 /* 8-byte counter */
bb2a0f7a
YG
9195 buf[j] = HILO_U64(*(hw_stats + bnx2x_stats_arr[i].offset),
9196 *(hw_stats + bnx2x_stats_arr[i].offset + 1));
9197 j++;
a2fbb9ea
ET
9198 }
9199}
9200
9201static int bnx2x_phys_id(struct net_device *dev, u32 data)
9202{
9203 struct bnx2x *bp = netdev_priv(dev);
34f80b04 9204 int port = BP_PORT(bp);
a2fbb9ea
ET
9205 int i;
9206
34f80b04
EG
9207 if (!netif_running(dev))
9208 return 0;
9209
9210 if (!bp->port.pmf)
9211 return 0;
9212
a2fbb9ea
ET
9213 if (data == 0)
9214 data = 2;
9215
9216 for (i = 0; i < (data * 2); i++) {
c18487ee 9217 if ((i % 2) == 0)
34f80b04 9218 bnx2x_set_led(bp, port, LED_MODE_OPER, SPEED_1000,
c18487ee
YR
9219 bp->link_params.hw_led_mode,
9220 bp->link_params.chip_id);
9221 else
34f80b04 9222 bnx2x_set_led(bp, port, LED_MODE_OFF, 0,
c18487ee
YR
9223 bp->link_params.hw_led_mode,
9224 bp->link_params.chip_id);
9225
a2fbb9ea
ET
9226 msleep_interruptible(500);
9227 if (signal_pending(current))
9228 break;
9229 }
9230
c18487ee 9231 if (bp->link_vars.link_up)
34f80b04 9232 bnx2x_set_led(bp, port, LED_MODE_OPER,
c18487ee
YR
9233 bp->link_vars.line_speed,
9234 bp->link_params.hw_led_mode,
9235 bp->link_params.chip_id);
a2fbb9ea
ET
9236
9237 return 0;
9238}
9239
9240static struct ethtool_ops bnx2x_ethtool_ops = {
7a9b2557
VZ
9241 .get_settings = bnx2x_get_settings,
9242 .set_settings = bnx2x_set_settings,
9243 .get_drvinfo = bnx2x_get_drvinfo,
a2fbb9ea
ET
9244 .get_wol = bnx2x_get_wol,
9245 .set_wol = bnx2x_set_wol,
7a9b2557
VZ
9246 .get_msglevel = bnx2x_get_msglevel,
9247 .set_msglevel = bnx2x_set_msglevel,
9248 .nway_reset = bnx2x_nway_reset,
9249 .get_link = ethtool_op_get_link,
9250 .get_eeprom_len = bnx2x_get_eeprom_len,
9251 .get_eeprom = bnx2x_get_eeprom,
9252 .set_eeprom = bnx2x_set_eeprom,
9253 .get_coalesce = bnx2x_get_coalesce,
9254 .set_coalesce = bnx2x_set_coalesce,
9255 .get_ringparam = bnx2x_get_ringparam,
9256 .set_ringparam = bnx2x_set_ringparam,
9257 .get_pauseparam = bnx2x_get_pauseparam,
9258 .set_pauseparam = bnx2x_set_pauseparam,
9259 .get_rx_csum = bnx2x_get_rx_csum,
9260 .set_rx_csum = bnx2x_set_rx_csum,
9261 .get_tx_csum = ethtool_op_get_tx_csum,
755735eb 9262 .set_tx_csum = ethtool_op_set_tx_hw_csum,
7a9b2557
VZ
9263 .set_flags = bnx2x_set_flags,
9264 .get_flags = ethtool_op_get_flags,
9265 .get_sg = ethtool_op_get_sg,
9266 .set_sg = ethtool_op_set_sg,
a2fbb9ea
ET
9267 .get_tso = ethtool_op_get_tso,
9268 .set_tso = bnx2x_set_tso,
9269 .self_test_count = bnx2x_self_test_count,
7a9b2557
VZ
9270 .self_test = bnx2x_self_test,
9271 .get_strings = bnx2x_get_strings,
a2fbb9ea
ET
9272 .phys_id = bnx2x_phys_id,
9273 .get_stats_count = bnx2x_get_stats_count,
bb2a0f7a 9274 .get_ethtool_stats = bnx2x_get_ethtool_stats,
a2fbb9ea
ET
9275};
9276
9277/* end of ethtool_ops */
9278
9279/****************************************************************************
9280* General service functions
9281****************************************************************************/
9282
9283static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
9284{
9285 u16 pmcsr;
9286
9287 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
9288
9289 switch (state) {
9290 case PCI_D0:
34f80b04 9291 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
a2fbb9ea
ET
9292 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
9293 PCI_PM_CTRL_PME_STATUS));
9294
9295 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
33471629 9296 /* delay required during transition out of D3hot */
a2fbb9ea 9297 msleep(20);
34f80b04 9298 break;
a2fbb9ea 9299
34f80b04
EG
9300 case PCI_D3hot:
9301 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
9302 pmcsr |= 3;
a2fbb9ea 9303
34f80b04
EG
9304 if (bp->wol)
9305 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
a2fbb9ea 9306
34f80b04
EG
9307 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
9308 pmcsr);
a2fbb9ea 9309
34f80b04
EG
9310 /* No more memory access after this point until
9311 * device is brought back to D0.
9312 */
9313 break;
9314
9315 default:
9316 return -EINVAL;
9317 }
9318 return 0;
a2fbb9ea
ET
9319}
9320
237907c1
EG
9321static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
9322{
9323 u16 rx_cons_sb;
9324
9325 /* Tell compiler that status block fields can change */
9326 barrier();
9327 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
9328 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
9329 rx_cons_sb++;
9330 return (fp->rx_comp_cons != rx_cons_sb);
9331}
9332
34f80b04
EG
9333/*
9334 * net_device service functions
9335 */
9336
a2fbb9ea
ET
9337static int bnx2x_poll(struct napi_struct *napi, int budget)
9338{
9339 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
9340 napi);
9341 struct bnx2x *bp = fp->bp;
9342 int work_done = 0;
9343
9344#ifdef BNX2X_STOP_ON_ERROR
9345 if (unlikely(bp->panic))
34f80b04 9346 goto poll_panic;
a2fbb9ea
ET
9347#endif
9348
9349 prefetch(fp->tx_buf_ring[TX_BD(fp->tx_pkt_cons)].skb);
9350 prefetch(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb);
9351 prefetch((char *)(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb) + 256);
9352
9353 bnx2x_update_fpsb_idx(fp);
9354
237907c1 9355 if (bnx2x_has_tx_work(fp))
a2fbb9ea
ET
9356 bnx2x_tx_int(fp, budget);
9357
237907c1 9358 if (bnx2x_has_rx_work(fp))
a2fbb9ea 9359 work_done = bnx2x_rx_int(fp, budget);
da5a662a 9360 rmb(); /* BNX2X_HAS_WORK() reads the status block */
a2fbb9ea
ET
9361
9362 /* must not complete if we consumed full budget */
da5a662a 9363 if ((work_done < budget) && !BNX2X_HAS_WORK(fp)) {
a2fbb9ea
ET
9364
9365#ifdef BNX2X_STOP_ON_ERROR
34f80b04 9366poll_panic:
a2fbb9ea 9367#endif
908a7a16 9368 netif_rx_complete(napi);
a2fbb9ea 9369
34f80b04 9370 bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID,
a2fbb9ea 9371 le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
34f80b04 9372 bnx2x_ack_sb(bp, FP_SB_ID(fp), CSTORM_ID,
a2fbb9ea
ET
9373 le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
9374 }
a2fbb9ea
ET
9375 return work_done;
9376}
9377
755735eb
EG
9378
9379/* we split the first BD into headers and data BDs
33471629 9380 * to ease the pain of our fellow microcode engineers
755735eb
EG
9381 * we use one mapping for both BDs
9382 * So far this has only been observed to happen
9383 * in Other Operating Systems(TM)
9384 */
9385static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
9386 struct bnx2x_fastpath *fp,
9387 struct eth_tx_bd **tx_bd, u16 hlen,
9388 u16 bd_prod, int nbd)
9389{
9390 struct eth_tx_bd *h_tx_bd = *tx_bd;
9391 struct eth_tx_bd *d_tx_bd;
9392 dma_addr_t mapping;
9393 int old_len = le16_to_cpu(h_tx_bd->nbytes);
9394
9395 /* first fix first BD */
9396 h_tx_bd->nbd = cpu_to_le16(nbd);
9397 h_tx_bd->nbytes = cpu_to_le16(hlen);
9398
9399 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
9400 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
9401 h_tx_bd->addr_lo, h_tx_bd->nbd);
9402
9403 /* now get a new data BD
9404 * (after the pbd) and fill it */
9405 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9406 d_tx_bd = &fp->tx_desc_ring[bd_prod];
9407
9408 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
9409 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
9410
9411 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9412 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9413 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
9414 d_tx_bd->vlan = 0;
9415 /* this marks the BD as one that has no individual mapping
9416 * the FW ignores this flag in a BD not marked start
9417 */
9418 d_tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_SW_LSO;
9419 DP(NETIF_MSG_TX_QUEUED,
9420 "TSO split data size is %d (%x:%x)\n",
9421 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
9422
9423 /* update tx_bd for marking the last BD flag */
9424 *tx_bd = d_tx_bd;
9425
9426 return bd_prod;
9427}
9428
9429static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
9430{
9431 if (fix > 0)
9432 csum = (u16) ~csum_fold(csum_sub(csum,
9433 csum_partial(t_header - fix, fix, 0)));
9434
9435 else if (fix < 0)
9436 csum = (u16) ~csum_fold(csum_add(csum,
9437 csum_partial(t_header, -fix, 0)));
9438
9439 return swab16(csum);
9440}
9441
9442static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
9443{
9444 u32 rc;
9445
9446 if (skb->ip_summed != CHECKSUM_PARTIAL)
9447 rc = XMIT_PLAIN;
9448
9449 else {
9450 if (skb->protocol == ntohs(ETH_P_IPV6)) {
9451 rc = XMIT_CSUM_V6;
9452 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
9453 rc |= XMIT_CSUM_TCP;
9454
9455 } else {
9456 rc = XMIT_CSUM_V4;
9457 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
9458 rc |= XMIT_CSUM_TCP;
9459 }
9460 }
9461
9462 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
9463 rc |= XMIT_GSO_V4;
9464
9465 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
9466 rc |= XMIT_GSO_V6;
9467
9468 return rc;
9469}
9470
632da4d6 9471#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
755735eb
EG
9472/* check if packet requires linearization (packet is too fragmented) */
9473static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
9474 u32 xmit_type)
9475{
9476 int to_copy = 0;
9477 int hlen = 0;
9478 int first_bd_sz = 0;
9479
9480 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
9481 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
9482
9483 if (xmit_type & XMIT_GSO) {
9484 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
9485 /* Check if LSO packet needs to be copied:
9486 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
9487 int wnd_size = MAX_FETCH_BD - 3;
33471629 9488 /* Number of windows to check */
755735eb
EG
9489 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
9490 int wnd_idx = 0;
9491 int frag_idx = 0;
9492 u32 wnd_sum = 0;
9493
9494 /* Headers length */
9495 hlen = (int)(skb_transport_header(skb) - skb->data) +
9496 tcp_hdrlen(skb);
9497
9498 /* Amount of data (w/o headers) on linear part of SKB*/
9499 first_bd_sz = skb_headlen(skb) - hlen;
9500
9501 wnd_sum = first_bd_sz;
9502
9503 /* Calculate the first sum - it's special */
9504 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
9505 wnd_sum +=
9506 skb_shinfo(skb)->frags[frag_idx].size;
9507
9508 /* If there was data on linear skb data - check it */
9509 if (first_bd_sz > 0) {
9510 if (unlikely(wnd_sum < lso_mss)) {
9511 to_copy = 1;
9512 goto exit_lbl;
9513 }
9514
9515 wnd_sum -= first_bd_sz;
9516 }
9517
9518 /* Others are easier: run through the frag list and
9519 check all windows */
9520 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
9521 wnd_sum +=
9522 skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
9523
9524 if (unlikely(wnd_sum < lso_mss)) {
9525 to_copy = 1;
9526 break;
9527 }
9528 wnd_sum -=
9529 skb_shinfo(skb)->frags[wnd_idx].size;
9530 }
9531
9532 } else {
9533 /* in non-LSO too fragmented packet should always
9534 be linearized */
9535 to_copy = 1;
9536 }
9537 }
9538
9539exit_lbl:
9540 if (unlikely(to_copy))
9541 DP(NETIF_MSG_TX_QUEUED,
9542 "Linearization IS REQUIRED for %s packet. "
9543 "num_frags %d hlen %d first_bd_sz %d\n",
9544 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
9545 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
9546
9547 return to_copy;
9548}
632da4d6 9549#endif
755735eb
EG
9550
9551/* called with netif_tx_lock
a2fbb9ea 9552 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
755735eb 9553 * netif_wake_queue()
a2fbb9ea
ET
9554 */
9555static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
9556{
9557 struct bnx2x *bp = netdev_priv(dev);
9558 struct bnx2x_fastpath *fp;
9559 struct sw_tx_bd *tx_buf;
9560 struct eth_tx_bd *tx_bd;
9561 struct eth_tx_parse_bd *pbd = NULL;
9562 u16 pkt_prod, bd_prod;
755735eb 9563 int nbd, fp_index;
a2fbb9ea 9564 dma_addr_t mapping;
755735eb
EG
9565 u32 xmit_type = bnx2x_xmit_type(bp, skb);
9566 int vlan_off = (bp->e1hov ? 4 : 0);
9567 int i;
9568 u8 hlen = 0;
a2fbb9ea
ET
9569
9570#ifdef BNX2X_STOP_ON_ERROR
9571 if (unlikely(bp->panic))
9572 return NETDEV_TX_BUSY;
9573#endif
9574
755735eb 9575 fp_index = (smp_processor_id() % bp->num_queues);
a2fbb9ea 9576 fp = &bp->fp[fp_index];
755735eb 9577
231fd58a 9578 if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
bb2a0f7a 9579 bp->eth_stats.driver_xoff++,
a2fbb9ea
ET
9580 netif_stop_queue(dev);
9581 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
9582 return NETDEV_TX_BUSY;
9583 }
9584
755735eb
EG
9585 DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x protocol %x protocol(%x,%x)"
9586 " gso type %x xmit_type %x\n",
9587 skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
9588 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
9589
632da4d6 9590#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
33471629 9591 /* First, check if we need to linearize the skb
755735eb
EG
9592 (due to FW restrictions) */
9593 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
9594 /* Statistics of linearization */
9595 bp->lin_cnt++;
9596 if (skb_linearize(skb) != 0) {
9597 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
9598 "silently dropping this SKB\n");
9599 dev_kfree_skb_any(skb);
da5a662a 9600 return NETDEV_TX_OK;
755735eb
EG
9601 }
9602 }
632da4d6 9603#endif
755735eb 9604
a2fbb9ea 9605 /*
755735eb 9606 Please read carefully. First we use one BD which we mark as start,
a2fbb9ea 9607 then for TSO or xsum we have a parsing info BD,
755735eb 9608 and only then we have the rest of the TSO BDs.
a2fbb9ea
ET
9609 (don't forget to mark the last one as last,
9610 and to unmap only AFTER you write to the BD ...)
755735eb 9611 And above all, all pdb sizes are in words - NOT DWORDS!
a2fbb9ea
ET
9612 */
9613
9614 pkt_prod = fp->tx_pkt_prod++;
755735eb 9615 bd_prod = TX_BD(fp->tx_bd_prod);
a2fbb9ea 9616
755735eb 9617 /* get a tx_buf and first BD */
a2fbb9ea
ET
9618 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
9619 tx_bd = &fp->tx_desc_ring[bd_prod];
9620
9621 tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
9622 tx_bd->general_data = (UNICAST_ADDRESS <<
9623 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT);
3196a88a
EG
9624 /* header nbd */
9625 tx_bd->general_data |= (1 << ETH_TX_BD_HDR_NBDS_SHIFT);
a2fbb9ea 9626
755735eb
EG
9627 /* remember the first BD of the packet */
9628 tx_buf->first_bd = fp->tx_bd_prod;
9629 tx_buf->skb = skb;
a2fbb9ea
ET
9630
9631 DP(NETIF_MSG_TX_QUEUED,
9632 "sending pkt %u @%p next_idx %u bd %u @%p\n",
9633 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_bd);
9634
0c6671b0
EG
9635#ifdef BCM_VLAN
9636 if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
9637 (bp->flags & HW_VLAN_TX_FLAG)) {
755735eb
EG
9638 tx_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
9639 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
9640 vlan_off += 4;
9641 } else
0c6671b0 9642#endif
755735eb 9643 tx_bd->vlan = cpu_to_le16(pkt_prod);
a2fbb9ea 9644
755735eb 9645 if (xmit_type) {
755735eb 9646 /* turn on parsing and get a BD */
a2fbb9ea
ET
9647 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9648 pbd = (void *)&fp->tx_desc_ring[bd_prod];
755735eb
EG
9649
9650 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
9651 }
9652
9653 if (xmit_type & XMIT_CSUM) {
9654 hlen = (skb_network_header(skb) - skb->data + vlan_off) / 2;
a2fbb9ea
ET
9655
9656 /* for now NS flag is not used in Linux */
755735eb 9657 pbd->global_data = (hlen |
96fc1784 9658 ((skb->protocol == ntohs(ETH_P_8021Q)) <<
a2fbb9ea 9659 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
a2fbb9ea 9660
755735eb
EG
9661 pbd->ip_hlen = (skb_transport_header(skb) -
9662 skb_network_header(skb)) / 2;
9663
9664 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
a2fbb9ea 9665
755735eb
EG
9666 pbd->total_hlen = cpu_to_le16(hlen);
9667 hlen = hlen*2 - vlan_off;
a2fbb9ea 9668
755735eb
EG
9669 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_TCP_CSUM;
9670
9671 if (xmit_type & XMIT_CSUM_V4)
a2fbb9ea 9672 tx_bd->bd_flags.as_bitfield |=
755735eb
EG
9673 ETH_TX_BD_FLAGS_IP_CSUM;
9674 else
9675 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
9676
9677 if (xmit_type & XMIT_CSUM_TCP) {
9678 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
9679
9680 } else {
9681 s8 fix = SKB_CS_OFF(skb); /* signed! */
9682
a2fbb9ea 9683 pbd->global_data |= ETH_TX_PARSE_BD_CS_ANY_FLG;
755735eb 9684 pbd->cs_offset = fix / 2;
a2fbb9ea 9685
755735eb
EG
9686 DP(NETIF_MSG_TX_QUEUED,
9687 "hlen %d offset %d fix %d csum before fix %x\n",
9688 le16_to_cpu(pbd->total_hlen), pbd->cs_offset, fix,
9689 SKB_CS(skb));
9690
9691 /* HW bug: fixup the CSUM */
9692 pbd->tcp_pseudo_csum =
9693 bnx2x_csum_fix(skb_transport_header(skb),
9694 SKB_CS(skb), fix);
9695
9696 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
9697 pbd->tcp_pseudo_csum);
9698 }
a2fbb9ea
ET
9699 }
9700
9701 mapping = pci_map_single(bp->pdev, skb->data,
755735eb 9702 skb_headlen(skb), PCI_DMA_TODEVICE);
a2fbb9ea
ET
9703
9704 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9705 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
6378c025 9706 nbd = skb_shinfo(skb)->nr_frags + ((pbd == NULL) ? 1 : 2);
a2fbb9ea
ET
9707 tx_bd->nbd = cpu_to_le16(nbd);
9708 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
9709
9710 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
755735eb
EG
9711 " nbytes %d flags %x vlan %x\n",
9712 tx_bd, tx_bd->addr_hi, tx_bd->addr_lo, le16_to_cpu(tx_bd->nbd),
9713 le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield,
9714 le16_to_cpu(tx_bd->vlan));
a2fbb9ea 9715
755735eb 9716 if (xmit_type & XMIT_GSO) {
a2fbb9ea
ET
9717
9718 DP(NETIF_MSG_TX_QUEUED,
9719 "TSO packet len %d hlen %d total len %d tso size %d\n",
9720 skb->len, hlen, skb_headlen(skb),
9721 skb_shinfo(skb)->gso_size);
9722
9723 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
9724
755735eb
EG
9725 if (unlikely(skb_headlen(skb) > hlen))
9726 bd_prod = bnx2x_tx_split(bp, fp, &tx_bd, hlen,
9727 bd_prod, ++nbd);
a2fbb9ea
ET
9728
9729 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
9730 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
755735eb
EG
9731 pbd->tcp_flags = pbd_tcp_flags(skb);
9732
9733 if (xmit_type & XMIT_GSO_V4) {
9734 pbd->ip_id = swab16(ip_hdr(skb)->id);
9735 pbd->tcp_pseudo_csum =
a2fbb9ea
ET
9736 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
9737 ip_hdr(skb)->daddr,
9738 0, IPPROTO_TCP, 0));
755735eb
EG
9739
9740 } else
9741 pbd->tcp_pseudo_csum =
9742 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
9743 &ipv6_hdr(skb)->daddr,
9744 0, IPPROTO_TCP, 0));
9745
a2fbb9ea
ET
9746 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
9747 }
9748
755735eb
EG
9749 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
9750 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
a2fbb9ea 9751
755735eb
EG
9752 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9753 tx_bd = &fp->tx_desc_ring[bd_prod];
a2fbb9ea 9754
755735eb
EG
9755 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
9756 frag->size, PCI_DMA_TODEVICE);
a2fbb9ea 9757
755735eb
EG
9758 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9759 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9760 tx_bd->nbytes = cpu_to_le16(frag->size);
9761 tx_bd->vlan = cpu_to_le16(pkt_prod);
9762 tx_bd->bd_flags.as_bitfield = 0;
a2fbb9ea 9763
755735eb
EG
9764 DP(NETIF_MSG_TX_QUEUED,
9765 "frag %d bd @%p addr (%x:%x) nbytes %d flags %x\n",
9766 i, tx_bd, tx_bd->addr_hi, tx_bd->addr_lo,
9767 le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield);
a2fbb9ea
ET
9768 }
9769
755735eb 9770 /* now at last mark the BD as the last BD */
a2fbb9ea
ET
9771 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_END_BD;
9772
9773 DP(NETIF_MSG_TX_QUEUED, "last bd @%p flags %x\n",
9774 tx_bd, tx_bd->bd_flags.as_bitfield);
9775
a2fbb9ea
ET
9776 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9777
755735eb 9778 /* now send a tx doorbell, counting the next BD
a2fbb9ea
ET
9779 * if the packet contains or ends with it
9780 */
9781 if (TX_BD_POFF(bd_prod) < nbd)
9782 nbd++;
9783
9784 if (pbd)
9785 DP(NETIF_MSG_TX_QUEUED,
9786 "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
9787 " tcp_flags %x xsum %x seq %u hlen %u\n",
9788 pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
9789 pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
755735eb 9790 pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
a2fbb9ea 9791
755735eb 9792 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
a2fbb9ea 9793
58f4c4cf
EG
9794 /*
9795 * Make sure that the BD data is updated before updating the producer
9796 * since FW might read the BD right after the producer is updated.
9797 * This is only applicable for weak-ordered memory model archs such
9798 * as IA-64. The following barrier is also mandatory since FW will
9799 * assumes packets must have BDs.
9800 */
9801 wmb();
9802
96fc1784
ET
9803 fp->hw_tx_prods->bds_prod =
9804 cpu_to_le16(le16_to_cpu(fp->hw_tx_prods->bds_prod) + nbd);
a2fbb9ea 9805 mb(); /* FW restriction: must not reorder writing nbd and packets */
96fc1784
ET
9806 fp->hw_tx_prods->packets_prod =
9807 cpu_to_le32(le32_to_cpu(fp->hw_tx_prods->packets_prod) + 1);
755735eb 9808 DOORBELL(bp, FP_IDX(fp), 0);
a2fbb9ea
ET
9809
9810 mmiowb();
9811
755735eb 9812 fp->tx_bd_prod += nbd;
a2fbb9ea
ET
9813 dev->trans_start = jiffies;
9814
9815 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
58f4c4cf
EG
9816 /* We want bnx2x_tx_int to "see" the updated tx_bd_prod
9817 if we put Tx into XOFF state. */
9818 smp_mb();
a2fbb9ea 9819 netif_stop_queue(dev);
bb2a0f7a 9820 bp->eth_stats.driver_xoff++;
a2fbb9ea
ET
9821 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
9822 netif_wake_queue(dev);
9823 }
9824 fp->tx_pkt++;
9825
9826 return NETDEV_TX_OK;
9827}
9828
bb2a0f7a 9829/* called with rtnl_lock */
a2fbb9ea
ET
9830static int bnx2x_open(struct net_device *dev)
9831{
9832 struct bnx2x *bp = netdev_priv(dev);
9833
6eccabb3
EG
9834 netif_carrier_off(dev);
9835
a2fbb9ea
ET
9836 bnx2x_set_power_state(bp, PCI_D0);
9837
bb2a0f7a 9838 return bnx2x_nic_load(bp, LOAD_OPEN);
a2fbb9ea
ET
9839}
9840
bb2a0f7a 9841/* called with rtnl_lock */
a2fbb9ea
ET
9842static int bnx2x_close(struct net_device *dev)
9843{
a2fbb9ea
ET
9844 struct bnx2x *bp = netdev_priv(dev);
9845
9846 /* Unload the driver, release IRQs */
bb2a0f7a
YG
9847 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
9848 if (atomic_read(&bp->pdev->enable_cnt) == 1)
9849 if (!CHIP_REV_IS_SLOW(bp))
9850 bnx2x_set_power_state(bp, PCI_D3hot);
a2fbb9ea
ET
9851
9852 return 0;
9853}
9854
34f80b04
EG
9855/* called with netif_tx_lock from set_multicast */
9856static void bnx2x_set_rx_mode(struct net_device *dev)
9857{
9858 struct bnx2x *bp = netdev_priv(dev);
9859 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
9860 int port = BP_PORT(bp);
9861
9862 if (bp->state != BNX2X_STATE_OPEN) {
9863 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
9864 return;
9865 }
9866
9867 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
9868
9869 if (dev->flags & IFF_PROMISC)
9870 rx_mode = BNX2X_RX_MODE_PROMISC;
9871
9872 else if ((dev->flags & IFF_ALLMULTI) ||
9873 ((dev->mc_count > BNX2X_MAX_MULTICAST) && CHIP_IS_E1(bp)))
9874 rx_mode = BNX2X_RX_MODE_ALLMULTI;
9875
9876 else { /* some multicasts */
9877 if (CHIP_IS_E1(bp)) {
9878 int i, old, offset;
9879 struct dev_mc_list *mclist;
9880 struct mac_configuration_cmd *config =
9881 bnx2x_sp(bp, mcast_config);
9882
9883 for (i = 0, mclist = dev->mc_list;
9884 mclist && (i < dev->mc_count);
9885 i++, mclist = mclist->next) {
9886
9887 config->config_table[i].
9888 cam_entry.msb_mac_addr =
9889 swab16(*(u16 *)&mclist->dmi_addr[0]);
9890 config->config_table[i].
9891 cam_entry.middle_mac_addr =
9892 swab16(*(u16 *)&mclist->dmi_addr[2]);
9893 config->config_table[i].
9894 cam_entry.lsb_mac_addr =
9895 swab16(*(u16 *)&mclist->dmi_addr[4]);
9896 config->config_table[i].cam_entry.flags =
9897 cpu_to_le16(port);
9898 config->config_table[i].
9899 target_table_entry.flags = 0;
9900 config->config_table[i].
9901 target_table_entry.client_id = 0;
9902 config->config_table[i].
9903 target_table_entry.vlan_id = 0;
9904
9905 DP(NETIF_MSG_IFUP,
9906 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
9907 config->config_table[i].
9908 cam_entry.msb_mac_addr,
9909 config->config_table[i].
9910 cam_entry.middle_mac_addr,
9911 config->config_table[i].
9912 cam_entry.lsb_mac_addr);
9913 }
9914 old = config->hdr.length_6b;
9915 if (old > i) {
9916 for (; i < old; i++) {
9917 if (CAM_IS_INVALID(config->
9918 config_table[i])) {
af246401 9919 /* already invalidated */
34f80b04
EG
9920 break;
9921 }
9922 /* invalidate */
9923 CAM_INVALIDATE(config->
9924 config_table[i]);
9925 }
9926 }
9927
9928 if (CHIP_REV_IS_SLOW(bp))
9929 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
9930 else
9931 offset = BNX2X_MAX_MULTICAST*(1 + port);
9932
9933 config->hdr.length_6b = i;
9934 config->hdr.offset = offset;
9935 config->hdr.client_id = BP_CL_ID(bp);
9936 config->hdr.reserved1 = 0;
9937
9938 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
9939 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
9940 U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
9941 0);
9942 } else { /* E1H */
9943 /* Accept one or more multicasts */
9944 struct dev_mc_list *mclist;
9945 u32 mc_filter[MC_HASH_SIZE];
9946 u32 crc, bit, regidx;
9947 int i;
9948
9949 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
9950
9951 for (i = 0, mclist = dev->mc_list;
9952 mclist && (i < dev->mc_count);
9953 i++, mclist = mclist->next) {
9954
7c510e4b
JB
9955 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
9956 mclist->dmi_addr);
34f80b04
EG
9957
9958 crc = crc32c_le(0, mclist->dmi_addr, ETH_ALEN);
9959 bit = (crc >> 24) & 0xff;
9960 regidx = bit >> 5;
9961 bit &= 0x1f;
9962 mc_filter[regidx] |= (1 << bit);
9963 }
9964
9965 for (i = 0; i < MC_HASH_SIZE; i++)
9966 REG_WR(bp, MC_HASH_OFFSET(bp, i),
9967 mc_filter[i]);
9968 }
9969 }
9970
9971 bp->rx_mode = rx_mode;
9972 bnx2x_set_storm_rx_mode(bp);
9973}
9974
9975/* called with rtnl_lock */
a2fbb9ea
ET
9976static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
9977{
9978 struct sockaddr *addr = p;
9979 struct bnx2x *bp = netdev_priv(dev);
9980
34f80b04 9981 if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
a2fbb9ea
ET
9982 return -EINVAL;
9983
9984 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
34f80b04
EG
9985 if (netif_running(dev)) {
9986 if (CHIP_IS_E1(bp))
3101c2bc 9987 bnx2x_set_mac_addr_e1(bp, 1);
34f80b04 9988 else
3101c2bc 9989 bnx2x_set_mac_addr_e1h(bp, 1);
34f80b04 9990 }
a2fbb9ea
ET
9991
9992 return 0;
9993}
9994
c18487ee 9995/* called with rtnl_lock */
a2fbb9ea
ET
9996static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
9997{
9998 struct mii_ioctl_data *data = if_mii(ifr);
9999 struct bnx2x *bp = netdev_priv(dev);
3196a88a 10000 int port = BP_PORT(bp);
a2fbb9ea
ET
10001 int err;
10002
10003 switch (cmd) {
10004 case SIOCGMIIPHY:
34f80b04 10005 data->phy_id = bp->port.phy_addr;
a2fbb9ea 10006
c14423fe 10007 /* fallthrough */
c18487ee 10008
a2fbb9ea 10009 case SIOCGMIIREG: {
c18487ee 10010 u16 mii_regval;
a2fbb9ea 10011
c18487ee
YR
10012 if (!netif_running(dev))
10013 return -EAGAIN;
a2fbb9ea 10014
34f80b04 10015 mutex_lock(&bp->port.phy_mutex);
3196a88a 10016 err = bnx2x_cl45_read(bp, port, 0, bp->port.phy_addr,
c18487ee
YR
10017 DEFAULT_PHY_DEV_ADDR,
10018 (data->reg_num & 0x1f), &mii_regval);
10019 data->val_out = mii_regval;
34f80b04 10020 mutex_unlock(&bp->port.phy_mutex);
a2fbb9ea
ET
10021 return err;
10022 }
10023
10024 case SIOCSMIIREG:
10025 if (!capable(CAP_NET_ADMIN))
10026 return -EPERM;
10027
c18487ee
YR
10028 if (!netif_running(dev))
10029 return -EAGAIN;
10030
34f80b04 10031 mutex_lock(&bp->port.phy_mutex);
3196a88a 10032 err = bnx2x_cl45_write(bp, port, 0, bp->port.phy_addr,
c18487ee
YR
10033 DEFAULT_PHY_DEV_ADDR,
10034 (data->reg_num & 0x1f), data->val_in);
34f80b04 10035 mutex_unlock(&bp->port.phy_mutex);
a2fbb9ea
ET
10036 return err;
10037
10038 default:
10039 /* do nothing */
10040 break;
10041 }
10042
10043 return -EOPNOTSUPP;
10044}
10045
34f80b04 10046/* called with rtnl_lock */
a2fbb9ea
ET
10047static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
10048{
10049 struct bnx2x *bp = netdev_priv(dev);
34f80b04 10050 int rc = 0;
a2fbb9ea
ET
10051
10052 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
10053 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
10054 return -EINVAL;
10055
10056 /* This does not race with packet allocation
c14423fe 10057 * because the actual alloc size is
a2fbb9ea
ET
10058 * only updated as part of load
10059 */
10060 dev->mtu = new_mtu;
10061
10062 if (netif_running(dev)) {
34f80b04
EG
10063 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10064 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
a2fbb9ea 10065 }
34f80b04
EG
10066
10067 return rc;
a2fbb9ea
ET
10068}
10069
10070static void bnx2x_tx_timeout(struct net_device *dev)
10071{
10072 struct bnx2x *bp = netdev_priv(dev);
10073
10074#ifdef BNX2X_STOP_ON_ERROR
10075 if (!bp->panic)
10076 bnx2x_panic();
10077#endif
10078 /* This allows the netif to be shutdown gracefully before resetting */
10079 schedule_work(&bp->reset_task);
10080}
10081
10082#ifdef BCM_VLAN
34f80b04 10083/* called with rtnl_lock */
a2fbb9ea
ET
10084static void bnx2x_vlan_rx_register(struct net_device *dev,
10085 struct vlan_group *vlgrp)
10086{
10087 struct bnx2x *bp = netdev_priv(dev);
10088
10089 bp->vlgrp = vlgrp;
0c6671b0
EG
10090
10091 /* Set flags according to the required capabilities */
10092 bp->flags &= ~(HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
10093
10094 if (dev->features & NETIF_F_HW_VLAN_TX)
10095 bp->flags |= HW_VLAN_TX_FLAG;
10096
10097 if (dev->features & NETIF_F_HW_VLAN_RX)
10098 bp->flags |= HW_VLAN_RX_FLAG;
10099
a2fbb9ea 10100 if (netif_running(dev))
49d66772 10101 bnx2x_set_client_config(bp);
a2fbb9ea 10102}
34f80b04 10103
a2fbb9ea
ET
10104#endif
10105
10106#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
10107static void poll_bnx2x(struct net_device *dev)
10108{
10109 struct bnx2x *bp = netdev_priv(dev);
10110
10111 disable_irq(bp->pdev->irq);
10112 bnx2x_interrupt(bp->pdev->irq, dev);
10113 enable_irq(bp->pdev->irq);
10114}
10115#endif
10116
c64213cd
SH
10117static const struct net_device_ops bnx2x_netdev_ops = {
10118 .ndo_open = bnx2x_open,
10119 .ndo_stop = bnx2x_close,
10120 .ndo_start_xmit = bnx2x_start_xmit,
10121 .ndo_set_multicast_list = bnx2x_set_rx_mode,
10122 .ndo_set_mac_address = bnx2x_change_mac_addr,
10123 .ndo_validate_addr = eth_validate_addr,
10124 .ndo_do_ioctl = bnx2x_ioctl,
10125 .ndo_change_mtu = bnx2x_change_mtu,
10126 .ndo_tx_timeout = bnx2x_tx_timeout,
10127#ifdef BCM_VLAN
10128 .ndo_vlan_rx_register = bnx2x_vlan_rx_register,
10129#endif
10130#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
10131 .ndo_poll_controller = poll_bnx2x,
10132#endif
10133};
10134
10135
34f80b04
EG
10136static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
10137 struct net_device *dev)
a2fbb9ea
ET
10138{
10139 struct bnx2x *bp;
10140 int rc;
10141
10142 SET_NETDEV_DEV(dev, &pdev->dev);
10143 bp = netdev_priv(dev);
10144
34f80b04
EG
10145 bp->dev = dev;
10146 bp->pdev = pdev;
a2fbb9ea 10147 bp->flags = 0;
34f80b04 10148 bp->func = PCI_FUNC(pdev->devfn);
a2fbb9ea
ET
10149
10150 rc = pci_enable_device(pdev);
10151 if (rc) {
10152 printk(KERN_ERR PFX "Cannot enable PCI device, aborting\n");
10153 goto err_out;
10154 }
10155
10156 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
10157 printk(KERN_ERR PFX "Cannot find PCI device base address,"
10158 " aborting\n");
10159 rc = -ENODEV;
10160 goto err_out_disable;
10161 }
10162
10163 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
10164 printk(KERN_ERR PFX "Cannot find second PCI device"
10165 " base address, aborting\n");
10166 rc = -ENODEV;
10167 goto err_out_disable;
10168 }
10169
34f80b04
EG
10170 if (atomic_read(&pdev->enable_cnt) == 1) {
10171 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
10172 if (rc) {
10173 printk(KERN_ERR PFX "Cannot obtain PCI resources,"
10174 " aborting\n");
10175 goto err_out_disable;
10176 }
a2fbb9ea 10177
34f80b04
EG
10178 pci_set_master(pdev);
10179 pci_save_state(pdev);
10180 }
a2fbb9ea
ET
10181
10182 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
10183 if (bp->pm_cap == 0) {
10184 printk(KERN_ERR PFX "Cannot find power management"
10185 " capability, aborting\n");
10186 rc = -EIO;
10187 goto err_out_release;
10188 }
10189
10190 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
10191 if (bp->pcie_cap == 0) {
10192 printk(KERN_ERR PFX "Cannot find PCI Express capability,"
10193 " aborting\n");
10194 rc = -EIO;
10195 goto err_out_release;
10196 }
10197
10198 if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0) {
10199 bp->flags |= USING_DAC_FLAG;
10200 if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) != 0) {
10201 printk(KERN_ERR PFX "pci_set_consistent_dma_mask"
10202 " failed, aborting\n");
10203 rc = -EIO;
10204 goto err_out_release;
10205 }
10206
10207 } else if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0) {
10208 printk(KERN_ERR PFX "System does not support DMA,"
10209 " aborting\n");
10210 rc = -EIO;
10211 goto err_out_release;
10212 }
10213
34f80b04
EG
10214 dev->mem_start = pci_resource_start(pdev, 0);
10215 dev->base_addr = dev->mem_start;
10216 dev->mem_end = pci_resource_end(pdev, 0);
a2fbb9ea
ET
10217
10218 dev->irq = pdev->irq;
10219
275f165f 10220 bp->regview = pci_ioremap_bar(pdev, 0);
a2fbb9ea
ET
10221 if (!bp->regview) {
10222 printk(KERN_ERR PFX "Cannot map register space, aborting\n");
10223 rc = -ENOMEM;
10224 goto err_out_release;
10225 }
10226
34f80b04
EG
10227 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
10228 min_t(u64, BNX2X_DB_SIZE,
10229 pci_resource_len(pdev, 2)));
a2fbb9ea
ET
10230 if (!bp->doorbells) {
10231 printk(KERN_ERR PFX "Cannot map doorbell space, aborting\n");
10232 rc = -ENOMEM;
10233 goto err_out_unmap;
10234 }
10235
10236 bnx2x_set_power_state(bp, PCI_D0);
10237
34f80b04
EG
10238 /* clean indirect addresses */
10239 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
10240 PCICFG_VENDOR_ID_OFFSET);
10241 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
10242 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
10243 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
10244 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
a2fbb9ea 10245
34f80b04 10246 dev->watchdog_timeo = TX_TIMEOUT;
a2fbb9ea 10247
c64213cd 10248 dev->netdev_ops = &bnx2x_netdev_ops;
34f80b04 10249 dev->ethtool_ops = &bnx2x_ethtool_ops;
34f80b04
EG
10250 dev->features |= NETIF_F_SG;
10251 dev->features |= NETIF_F_HW_CSUM;
10252 if (bp->flags & USING_DAC_FLAG)
10253 dev->features |= NETIF_F_HIGHDMA;
10254#ifdef BCM_VLAN
10255 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
0c6671b0 10256 bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
34f80b04
EG
10257#endif
10258 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb 10259 dev->features |= NETIF_F_TSO6;
a2fbb9ea
ET
10260
10261 return 0;
10262
10263err_out_unmap:
10264 if (bp->regview) {
10265 iounmap(bp->regview);
10266 bp->regview = NULL;
10267 }
a2fbb9ea
ET
10268 if (bp->doorbells) {
10269 iounmap(bp->doorbells);
10270 bp->doorbells = NULL;
10271 }
10272
10273err_out_release:
34f80b04
EG
10274 if (atomic_read(&pdev->enable_cnt) == 1)
10275 pci_release_regions(pdev);
a2fbb9ea
ET
10276
10277err_out_disable:
10278 pci_disable_device(pdev);
10279 pci_set_drvdata(pdev, NULL);
10280
10281err_out:
10282 return rc;
10283}
10284
25047950
ET
10285static int __devinit bnx2x_get_pcie_width(struct bnx2x *bp)
10286{
10287 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
10288
10289 val = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
10290 return val;
10291}
10292
10293/* return value of 1=2.5GHz 2=5GHz */
10294static int __devinit bnx2x_get_pcie_speed(struct bnx2x *bp)
10295{
10296 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
10297
10298 val = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
10299 return val;
10300}
10301
a2fbb9ea
ET
10302static int __devinit bnx2x_init_one(struct pci_dev *pdev,
10303 const struct pci_device_id *ent)
10304{
10305 static int version_printed;
10306 struct net_device *dev = NULL;
10307 struct bnx2x *bp;
25047950 10308 int rc;
a2fbb9ea
ET
10309
10310 if (version_printed++ == 0)
10311 printk(KERN_INFO "%s", version);
10312
10313 /* dev zeroed in init_etherdev */
10314 dev = alloc_etherdev(sizeof(*bp));
34f80b04
EG
10315 if (!dev) {
10316 printk(KERN_ERR PFX "Cannot allocate net device\n");
a2fbb9ea 10317 return -ENOMEM;
34f80b04 10318 }
a2fbb9ea 10319
a2fbb9ea
ET
10320 bp = netdev_priv(dev);
10321 bp->msglevel = debug;
10322
34f80b04 10323 rc = bnx2x_init_dev(pdev, dev);
a2fbb9ea
ET
10324 if (rc < 0) {
10325 free_netdev(dev);
10326 return rc;
10327 }
10328
a2fbb9ea
ET
10329 pci_set_drvdata(pdev, dev);
10330
34f80b04 10331 rc = bnx2x_init_bp(bp);
693fc0d1
EG
10332 if (rc)
10333 goto init_one_exit;
10334
10335 rc = register_netdev(dev);
34f80b04 10336 if (rc) {
693fc0d1 10337 dev_err(&pdev->dev, "Cannot register net device\n");
34f80b04
EG
10338 goto init_one_exit;
10339 }
10340
10341 bp->common.name = board_info[ent->driver_data].name;
25047950 10342 printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
34f80b04
EG
10343 " IRQ %d, ", dev->name, bp->common.name,
10344 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
25047950
ET
10345 bnx2x_get_pcie_width(bp),
10346 (bnx2x_get_pcie_speed(bp) == 2) ? "5GHz (Gen2)" : "2.5GHz",
10347 dev->base_addr, bp->pdev->irq);
e174961c 10348 printk(KERN_CONT "node addr %pM\n", dev->dev_addr);
a2fbb9ea 10349 return 0;
34f80b04
EG
10350
10351init_one_exit:
10352 if (bp->regview)
10353 iounmap(bp->regview);
10354
10355 if (bp->doorbells)
10356 iounmap(bp->doorbells);
10357
10358 free_netdev(dev);
10359
10360 if (atomic_read(&pdev->enable_cnt) == 1)
10361 pci_release_regions(pdev);
10362
10363 pci_disable_device(pdev);
10364 pci_set_drvdata(pdev, NULL);
10365
10366 return rc;
a2fbb9ea
ET
10367}
10368
10369static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
10370{
10371 struct net_device *dev = pci_get_drvdata(pdev);
228241eb
ET
10372 struct bnx2x *bp;
10373
10374 if (!dev) {
228241eb
ET
10375 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10376 return;
10377 }
228241eb 10378 bp = netdev_priv(dev);
a2fbb9ea 10379
a2fbb9ea
ET
10380 unregister_netdev(dev);
10381
10382 if (bp->regview)
10383 iounmap(bp->regview);
10384
10385 if (bp->doorbells)
10386 iounmap(bp->doorbells);
10387
10388 free_netdev(dev);
34f80b04
EG
10389
10390 if (atomic_read(&pdev->enable_cnt) == 1)
10391 pci_release_regions(pdev);
10392
a2fbb9ea
ET
10393 pci_disable_device(pdev);
10394 pci_set_drvdata(pdev, NULL);
10395}
10396
10397static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
10398{
10399 struct net_device *dev = pci_get_drvdata(pdev);
228241eb
ET
10400 struct bnx2x *bp;
10401
34f80b04
EG
10402 if (!dev) {
10403 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10404 return -ENODEV;
10405 }
10406 bp = netdev_priv(dev);
a2fbb9ea 10407
34f80b04 10408 rtnl_lock();
a2fbb9ea 10409
34f80b04 10410 pci_save_state(pdev);
228241eb 10411
34f80b04
EG
10412 if (!netif_running(dev)) {
10413 rtnl_unlock();
10414 return 0;
10415 }
a2fbb9ea
ET
10416
10417 netif_device_detach(dev);
a2fbb9ea 10418
da5a662a 10419 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
34f80b04 10420
a2fbb9ea 10421 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
228241eb 10422
34f80b04
EG
10423 rtnl_unlock();
10424
a2fbb9ea
ET
10425 return 0;
10426}
10427
10428static int bnx2x_resume(struct pci_dev *pdev)
10429{
10430 struct net_device *dev = pci_get_drvdata(pdev);
228241eb 10431 struct bnx2x *bp;
a2fbb9ea
ET
10432 int rc;
10433
228241eb
ET
10434 if (!dev) {
10435 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10436 return -ENODEV;
10437 }
228241eb 10438 bp = netdev_priv(dev);
a2fbb9ea 10439
34f80b04
EG
10440 rtnl_lock();
10441
228241eb 10442 pci_restore_state(pdev);
34f80b04
EG
10443
10444 if (!netif_running(dev)) {
10445 rtnl_unlock();
10446 return 0;
10447 }
10448
a2fbb9ea
ET
10449 bnx2x_set_power_state(bp, PCI_D0);
10450 netif_device_attach(dev);
10451
da5a662a 10452 rc = bnx2x_nic_load(bp, LOAD_OPEN);
a2fbb9ea 10453
34f80b04
EG
10454 rtnl_unlock();
10455
10456 return rc;
a2fbb9ea
ET
10457}
10458
f8ef6e44
YG
10459static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
10460{
10461 int i;
10462
10463 bp->state = BNX2X_STATE_ERROR;
10464
10465 bp->rx_mode = BNX2X_RX_MODE_NONE;
10466
10467 bnx2x_netif_stop(bp, 0);
10468
10469 del_timer_sync(&bp->timer);
10470 bp->stats_state = STATS_STATE_DISABLED;
10471 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
10472
10473 /* Release IRQs */
10474 bnx2x_free_irq(bp);
10475
10476 if (CHIP_IS_E1(bp)) {
10477 struct mac_configuration_cmd *config =
10478 bnx2x_sp(bp, mcast_config);
10479
10480 for (i = 0; i < config->hdr.length_6b; i++)
10481 CAM_INVALIDATE(config->config_table[i]);
10482 }
10483
10484 /* Free SKBs, SGEs, TPA pool and driver internals */
10485 bnx2x_free_skbs(bp);
10486 for_each_queue(bp, i)
10487 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
7cde1c8b
EG
10488 for_each_queue(bp, i)
10489 netif_napi_del(&bnx2x_fp(bp, i, napi));
f8ef6e44
YG
10490 bnx2x_free_mem(bp);
10491
10492 bp->state = BNX2X_STATE_CLOSED;
10493
10494 netif_carrier_off(bp->dev);
10495
10496 return 0;
10497}
10498
10499static void bnx2x_eeh_recover(struct bnx2x *bp)
10500{
10501 u32 val;
10502
10503 mutex_init(&bp->port.phy_mutex);
10504
10505 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
10506 bp->link_params.shmem_base = bp->common.shmem_base;
10507 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
10508
10509 if (!bp->common.shmem_base ||
10510 (bp->common.shmem_base < 0xA0000) ||
10511 (bp->common.shmem_base >= 0xC0000)) {
10512 BNX2X_DEV_INFO("MCP not active\n");
10513 bp->flags |= NO_MCP_FLAG;
10514 return;
10515 }
10516
10517 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
10518 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
10519 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
10520 BNX2X_ERR("BAD MCP validity signature\n");
10521
10522 if (!BP_NOMCP(bp)) {
10523 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
10524 & DRV_MSG_SEQ_NUMBER_MASK);
10525 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
10526 }
10527}
10528
493adb1f
WX
10529/**
10530 * bnx2x_io_error_detected - called when PCI error is detected
10531 * @pdev: Pointer to PCI device
10532 * @state: The current pci connection state
10533 *
10534 * This function is called after a PCI bus error affecting
10535 * this device has been detected.
10536 */
10537static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
10538 pci_channel_state_t state)
10539{
10540 struct net_device *dev = pci_get_drvdata(pdev);
10541 struct bnx2x *bp = netdev_priv(dev);
10542
10543 rtnl_lock();
10544
10545 netif_device_detach(dev);
10546
10547 if (netif_running(dev))
f8ef6e44 10548 bnx2x_eeh_nic_unload(bp);
493adb1f
WX
10549
10550 pci_disable_device(pdev);
10551
10552 rtnl_unlock();
10553
10554 /* Request a slot reset */
10555 return PCI_ERS_RESULT_NEED_RESET;
10556}
10557
10558/**
10559 * bnx2x_io_slot_reset - called after the PCI bus has been reset
10560 * @pdev: Pointer to PCI device
10561 *
10562 * Restart the card from scratch, as if from a cold-boot.
10563 */
10564static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
10565{
10566 struct net_device *dev = pci_get_drvdata(pdev);
10567 struct bnx2x *bp = netdev_priv(dev);
10568
10569 rtnl_lock();
10570
10571 if (pci_enable_device(pdev)) {
10572 dev_err(&pdev->dev,
10573 "Cannot re-enable PCI device after reset\n");
10574 rtnl_unlock();
10575 return PCI_ERS_RESULT_DISCONNECT;
10576 }
10577
10578 pci_set_master(pdev);
10579 pci_restore_state(pdev);
10580
10581 if (netif_running(dev))
10582 bnx2x_set_power_state(bp, PCI_D0);
10583
10584 rtnl_unlock();
10585
10586 return PCI_ERS_RESULT_RECOVERED;
10587}
10588
10589/**
10590 * bnx2x_io_resume - called when traffic can start flowing again
10591 * @pdev: Pointer to PCI device
10592 *
10593 * This callback is called when the error recovery driver tells us that
10594 * its OK to resume normal operation.
10595 */
10596static void bnx2x_io_resume(struct pci_dev *pdev)
10597{
10598 struct net_device *dev = pci_get_drvdata(pdev);
10599 struct bnx2x *bp = netdev_priv(dev);
10600
10601 rtnl_lock();
10602
f8ef6e44
YG
10603 bnx2x_eeh_recover(bp);
10604
493adb1f 10605 if (netif_running(dev))
f8ef6e44 10606 bnx2x_nic_load(bp, LOAD_NORMAL);
493adb1f
WX
10607
10608 netif_device_attach(dev);
10609
10610 rtnl_unlock();
10611}
10612
10613static struct pci_error_handlers bnx2x_err_handler = {
10614 .error_detected = bnx2x_io_error_detected,
10615 .slot_reset = bnx2x_io_slot_reset,
10616 .resume = bnx2x_io_resume,
10617};
10618
a2fbb9ea 10619static struct pci_driver bnx2x_pci_driver = {
493adb1f
WX
10620 .name = DRV_MODULE_NAME,
10621 .id_table = bnx2x_pci_tbl,
10622 .probe = bnx2x_init_one,
10623 .remove = __devexit_p(bnx2x_remove_one),
10624 .suspend = bnx2x_suspend,
10625 .resume = bnx2x_resume,
10626 .err_handler = &bnx2x_err_handler,
a2fbb9ea
ET
10627};
10628
10629static int __init bnx2x_init(void)
10630{
1cf167f2
EG
10631 bnx2x_wq = create_singlethread_workqueue("bnx2x");
10632 if (bnx2x_wq == NULL) {
10633 printk(KERN_ERR PFX "Cannot create workqueue\n");
10634 return -ENOMEM;
10635 }
10636
a2fbb9ea
ET
10637 return pci_register_driver(&bnx2x_pci_driver);
10638}
10639
10640static void __exit bnx2x_cleanup(void)
10641{
10642 pci_unregister_driver(&bnx2x_pci_driver);
1cf167f2
EG
10643
10644 destroy_workqueue(bnx2x_wq);
a2fbb9ea
ET
10645}
10646
10647module_init(bnx2x_init);
10648module_exit(bnx2x_cleanup);
10649
This page took 0.757906 seconds and 5 git commands to generate.