bnx2x: Missing memory barriers
[deliverable/linux.git] / drivers / net / bnx2x_main.c
CommitLineData
34f80b04 1/* bnx2x_main.c: Broadcom Everest network driver.
a2fbb9ea 2 *
f1410647 3 * Copyright (c) 2007-2008 Broadcom Corporation
a2fbb9ea
ET
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
24e3fcef
EG
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
a2fbb9ea
ET
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath rework by Vladislav Zolotarov
c14423fe 14 * Statistics and Link management by Yitchak Gertner
a2fbb9ea
ET
15 *
16 */
17
a2fbb9ea
ET
18#include <linux/module.h>
19#include <linux/moduleparam.h>
20#include <linux/kernel.h>
21#include <linux/device.h> /* for dev_info() */
22#include <linux/timer.h>
23#include <linux/errno.h>
24#include <linux/ioport.h>
25#include <linux/slab.h>
26#include <linux/vmalloc.h>
27#include <linux/interrupt.h>
28#include <linux/pci.h>
29#include <linux/init.h>
30#include <linux/netdevice.h>
31#include <linux/etherdevice.h>
32#include <linux/skbuff.h>
33#include <linux/dma-mapping.h>
34#include <linux/bitops.h>
35#include <linux/irq.h>
36#include <linux/delay.h>
37#include <asm/byteorder.h>
38#include <linux/time.h>
39#include <linux/ethtool.h>
40#include <linux/mii.h>
41#ifdef NETIF_F_HW_VLAN_TX
42 #include <linux/if_vlan.h>
a2fbb9ea
ET
43#endif
44#include <net/ip.h>
45#include <net/tcp.h>
46#include <net/checksum.h>
34f80b04 47#include <net/ip6_checksum.h>
a2fbb9ea
ET
48#include <linux/workqueue.h>
49#include <linux/crc32.h>
34f80b04 50#include <linux/crc32c.h>
a2fbb9ea
ET
51#include <linux/prefetch.h>
52#include <linux/zlib.h>
a2fbb9ea
ET
53#include <linux/io.h>
54
55#include "bnx2x_reg.h"
56#include "bnx2x_fw_defs.h"
57#include "bnx2x_hsi.h"
c18487ee 58#include "bnx2x_link.h"
a2fbb9ea
ET
59#include "bnx2x.h"
60#include "bnx2x_init.h"
61
ca8eac55
EG
62#define DRV_MODULE_VERSION "1.45.23"
63#define DRV_MODULE_RELDATE "2008/11/03"
34f80b04 64#define BNX2X_BC_VER 0x040200
a2fbb9ea 65
34f80b04
EG
66/* Time in jiffies before concluding the transmitter is hung */
67#define TX_TIMEOUT (5*HZ)
a2fbb9ea 68
53a10565 69static char version[] __devinitdata =
34f80b04 70 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
a2fbb9ea
ET
71 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
72
24e3fcef 73MODULE_AUTHOR("Eliezer Tamir");
a2fbb9ea
ET
74MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710 Driver");
75MODULE_LICENSE("GPL");
76MODULE_VERSION(DRV_MODULE_VERSION);
a2fbb9ea 77
19680c48 78static int disable_tpa;
a2fbb9ea
ET
79static int use_inta;
80static int poll;
a2fbb9ea 81static int debug;
34f80b04 82static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
a2fbb9ea
ET
83static int use_multi;
84
19680c48 85module_param(disable_tpa, int, 0);
a2fbb9ea
ET
86module_param(use_inta, int, 0);
87module_param(poll, int, 0);
a2fbb9ea 88module_param(debug, int, 0);
19680c48 89MODULE_PARM_DESC(disable_tpa, "disable the TPA (LRO) feature");
a2fbb9ea
ET
90MODULE_PARM_DESC(use_inta, "use INT#A instead of MSI-X");
91MODULE_PARM_DESC(poll, "use polling (for debug)");
c14423fe 92MODULE_PARM_DESC(debug, "default debug msglevel");
a2fbb9ea
ET
93
94#ifdef BNX2X_MULTI
95module_param(use_multi, int, 0);
96MODULE_PARM_DESC(use_multi, "use per-CPU queues");
97#endif
1cf167f2 98static struct workqueue_struct *bnx2x_wq;
a2fbb9ea
ET
99
100enum bnx2x_board_type {
101 BCM57710 = 0,
34f80b04
EG
102 BCM57711 = 1,
103 BCM57711E = 2,
a2fbb9ea
ET
104};
105
34f80b04 106/* indexed by board_type, above */
53a10565 107static struct {
a2fbb9ea
ET
108 char *name;
109} board_info[] __devinitdata = {
34f80b04
EG
110 { "Broadcom NetXtreme II BCM57710 XGb" },
111 { "Broadcom NetXtreme II BCM57711 XGb" },
112 { "Broadcom NetXtreme II BCM57711E XGb" }
a2fbb9ea
ET
113};
114
34f80b04 115
a2fbb9ea
ET
116static const struct pci_device_id bnx2x_pci_tbl[] = {
117 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57710,
118 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57710 },
34f80b04
EG
119 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711,
120 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711 },
121 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711E,
122 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711E },
a2fbb9ea
ET
123 { 0 }
124};
125
126MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
127
128/****************************************************************************
129* General service functions
130****************************************************************************/
131
132/* used only at init
133 * locking is done by mcp
134 */
135static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
136{
137 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
138 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
139 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
140 PCICFG_VENDOR_ID_OFFSET);
141}
142
a2fbb9ea
ET
143static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
144{
145 u32 val;
146
147 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
148 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
149 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
150 PCICFG_VENDOR_ID_OFFSET);
151
152 return val;
153}
a2fbb9ea
ET
154
155static const u32 dmae_reg_go_c[] = {
156 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
157 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
158 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
159 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
160};
161
162/* copy command into DMAE command memory and set DMAE command go */
163static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
164 int idx)
165{
166 u32 cmd_offset;
167 int i;
168
169 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
170 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
171 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
172
ad8d3948
EG
173 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
174 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
a2fbb9ea
ET
175 }
176 REG_WR(bp, dmae_reg_go_c[idx], 1);
177}
178
ad8d3948
EG
179void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
180 u32 len32)
a2fbb9ea 181{
ad8d3948 182 struct dmae_command *dmae = &bp->init_dmae;
a2fbb9ea 183 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
ad8d3948
EG
184 int cnt = 200;
185
186 if (!bp->dmae_ready) {
187 u32 *data = bnx2x_sp(bp, wb_data[0]);
188
189 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
190 " using indirect\n", dst_addr, len32);
191 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
192 return;
193 }
194
195 mutex_lock(&bp->dmae_mutex);
a2fbb9ea
ET
196
197 memset(dmae, 0, sizeof(struct dmae_command));
198
199 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
200 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
201 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
202#ifdef __BIG_ENDIAN
203 DMAE_CMD_ENDIANITY_B_DW_SWAP |
204#else
205 DMAE_CMD_ENDIANITY_DW_SWAP |
206#endif
34f80b04
EG
207 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
208 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea
ET
209 dmae->src_addr_lo = U64_LO(dma_addr);
210 dmae->src_addr_hi = U64_HI(dma_addr);
211 dmae->dst_addr_lo = dst_addr >> 2;
212 dmae->dst_addr_hi = 0;
213 dmae->len = len32;
214 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
215 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
ad8d3948 216 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 217
ad8d3948 218 DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
a2fbb9ea
ET
219 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
220 "dst_addr [%x:%08x (%08x)]\n"
221 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
222 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
223 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, dst_addr,
224 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
ad8d3948 225 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
a2fbb9ea
ET
226 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
227 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
a2fbb9ea
ET
228
229 *wb_comp = 0;
230
34f80b04 231 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
a2fbb9ea
ET
232
233 udelay(5);
ad8d3948
EG
234
235 while (*wb_comp != DMAE_COMP_VAL) {
236 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
237
ad8d3948 238 if (!cnt) {
a2fbb9ea
ET
239 BNX2X_ERR("dmae timeout!\n");
240 break;
241 }
ad8d3948 242 cnt--;
12469401
YG
243 /* adjust delay for emulation/FPGA */
244 if (CHIP_REV_IS_SLOW(bp))
245 msleep(100);
246 else
247 udelay(5);
a2fbb9ea 248 }
ad8d3948
EG
249
250 mutex_unlock(&bp->dmae_mutex);
a2fbb9ea
ET
251}
252
c18487ee 253void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
a2fbb9ea 254{
ad8d3948 255 struct dmae_command *dmae = &bp->init_dmae;
a2fbb9ea 256 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
ad8d3948
EG
257 int cnt = 200;
258
259 if (!bp->dmae_ready) {
260 u32 *data = bnx2x_sp(bp, wb_data[0]);
261 int i;
262
263 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
264 " using indirect\n", src_addr, len32);
265 for (i = 0; i < len32; i++)
266 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
267 return;
268 }
269
270 mutex_lock(&bp->dmae_mutex);
a2fbb9ea
ET
271
272 memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
273 memset(dmae, 0, sizeof(struct dmae_command));
274
275 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
276 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
277 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
278#ifdef __BIG_ENDIAN
279 DMAE_CMD_ENDIANITY_B_DW_SWAP |
280#else
281 DMAE_CMD_ENDIANITY_DW_SWAP |
282#endif
34f80b04
EG
283 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
284 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea
ET
285 dmae->src_addr_lo = src_addr >> 2;
286 dmae->src_addr_hi = 0;
287 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
288 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
289 dmae->len = len32;
290 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
291 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
ad8d3948 292 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 293
ad8d3948 294 DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
a2fbb9ea
ET
295 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
296 "dst_addr [%x:%08x (%08x)]\n"
297 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
298 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
299 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, src_addr,
300 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
a2fbb9ea
ET
301
302 *wb_comp = 0;
303
34f80b04 304 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
a2fbb9ea
ET
305
306 udelay(5);
ad8d3948
EG
307
308 while (*wb_comp != DMAE_COMP_VAL) {
309
ad8d3948 310 if (!cnt) {
a2fbb9ea
ET
311 BNX2X_ERR("dmae timeout!\n");
312 break;
313 }
ad8d3948 314 cnt--;
12469401
YG
315 /* adjust delay for emulation/FPGA */
316 if (CHIP_REV_IS_SLOW(bp))
317 msleep(100);
318 else
319 udelay(5);
a2fbb9ea 320 }
ad8d3948 321 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
a2fbb9ea
ET
322 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
323 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
ad8d3948
EG
324
325 mutex_unlock(&bp->dmae_mutex);
326}
327
328/* used only for slowpath so not inlined */
329static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
330{
331 u32 wb_write[2];
332
333 wb_write[0] = val_hi;
334 wb_write[1] = val_lo;
335 REG_WR_DMAE(bp, reg, wb_write, 2);
a2fbb9ea 336}
a2fbb9ea 337
ad8d3948
EG
338#ifdef USE_WB_RD
339static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
340{
341 u32 wb_data[2];
342
343 REG_RD_DMAE(bp, reg, wb_data, 2);
344
345 return HILO_U64(wb_data[0], wb_data[1]);
346}
347#endif
348
a2fbb9ea
ET
349static int bnx2x_mc_assert(struct bnx2x *bp)
350{
a2fbb9ea 351 char last_idx;
34f80b04
EG
352 int i, rc = 0;
353 u32 row0, row1, row2, row3;
354
355 /* XSTORM */
356 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
357 XSTORM_ASSERT_LIST_INDEX_OFFSET);
358 if (last_idx)
359 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
360
361 /* print the asserts */
362 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
363
364 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
365 XSTORM_ASSERT_LIST_OFFSET(i));
366 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
367 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
368 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
369 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
370 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
371 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
372
373 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
374 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
375 " 0x%08x 0x%08x 0x%08x\n",
376 i, row3, row2, row1, row0);
377 rc++;
378 } else {
379 break;
380 }
381 }
382
383 /* TSTORM */
384 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
385 TSTORM_ASSERT_LIST_INDEX_OFFSET);
386 if (last_idx)
387 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
388
389 /* print the asserts */
390 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
391
392 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
393 TSTORM_ASSERT_LIST_OFFSET(i));
394 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
395 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
396 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
397 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
398 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
399 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
400
401 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
402 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
403 " 0x%08x 0x%08x 0x%08x\n",
404 i, row3, row2, row1, row0);
405 rc++;
406 } else {
407 break;
408 }
409 }
410
411 /* CSTORM */
412 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
413 CSTORM_ASSERT_LIST_INDEX_OFFSET);
414 if (last_idx)
415 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
416
417 /* print the asserts */
418 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
419
420 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
421 CSTORM_ASSERT_LIST_OFFSET(i));
422 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
423 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
424 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
425 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
426 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
427 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
428
429 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
430 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
431 " 0x%08x 0x%08x 0x%08x\n",
432 i, row3, row2, row1, row0);
433 rc++;
434 } else {
435 break;
436 }
437 }
438
439 /* USTORM */
440 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
441 USTORM_ASSERT_LIST_INDEX_OFFSET);
442 if (last_idx)
443 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
444
445 /* print the asserts */
446 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
447
448 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
449 USTORM_ASSERT_LIST_OFFSET(i));
450 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
451 USTORM_ASSERT_LIST_OFFSET(i) + 4);
452 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
453 USTORM_ASSERT_LIST_OFFSET(i) + 8);
454 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
455 USTORM_ASSERT_LIST_OFFSET(i) + 12);
456
457 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
458 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
459 " 0x%08x 0x%08x 0x%08x\n",
460 i, row3, row2, row1, row0);
461 rc++;
462 } else {
463 break;
a2fbb9ea
ET
464 }
465 }
34f80b04 466
a2fbb9ea
ET
467 return rc;
468}
c14423fe 469
a2fbb9ea
ET
470static void bnx2x_fw_dump(struct bnx2x *bp)
471{
472 u32 mark, offset;
473 u32 data[9];
474 int word;
475
476 mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
49d66772
ET
477 mark = ((mark + 0x3) & ~0x3);
478 printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n" KERN_ERR, mark);
a2fbb9ea
ET
479
480 for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
481 for (word = 0; word < 8; word++)
482 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
483 offset + 4*word));
484 data[8] = 0x0;
49d66772 485 printk(KERN_CONT "%s", (char *)data);
a2fbb9ea
ET
486 }
487 for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
488 for (word = 0; word < 8; word++)
489 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
490 offset + 4*word));
491 data[8] = 0x0;
49d66772 492 printk(KERN_CONT "%s", (char *)data);
a2fbb9ea
ET
493 }
494 printk("\n" KERN_ERR PFX "end of fw dump\n");
495}
496
497static void bnx2x_panic_dump(struct bnx2x *bp)
498{
499 int i;
500 u16 j, start, end;
501
66e855f3
YG
502 bp->stats_state = STATS_STATE_DISABLED;
503 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
504
a2fbb9ea
ET
505 BNX2X_ERR("begin crash dump -----------------\n");
506
507 for_each_queue(bp, i) {
508 struct bnx2x_fastpath *fp = &bp->fp[i];
509 struct eth_tx_db_data *hw_prods = fp->hw_tx_prods;
510
511 BNX2X_ERR("queue[%d]: tx_pkt_prod(%x) tx_pkt_cons(%x)"
34f80b04 512 " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)\n",
a2fbb9ea 513 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
34f80b04 514 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
66e855f3
YG
515 BNX2X_ERR(" rx_bd_prod(%x) rx_bd_cons(%x)"
516 " *rx_bd_cons_sb(%x) rx_comp_prod(%x)"
517 " rx_comp_cons(%x) *rx_cons_sb(%x)\n",
518 fp->rx_bd_prod, fp->rx_bd_cons,
519 le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
520 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
521 BNX2X_ERR(" rx_sge_prod(%x) last_max_sge(%x)"
522 " fp_c_idx(%x) *sb_c_idx(%x) fp_u_idx(%x)"
523 " *sb_u_idx(%x) bd data(%x,%x)\n",
524 fp->rx_sge_prod, fp->last_max_sge, fp->fp_c_idx,
525 fp->status_blk->c_status_block.status_block_index,
526 fp->fp_u_idx,
527 fp->status_blk->u_status_block.status_block_index,
528 hw_prods->packets_prod, hw_prods->bds_prod);
a2fbb9ea
ET
529
530 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
531 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
532 for (j = start; j < end; j++) {
533 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
534
535 BNX2X_ERR("packet[%x]=[%p,%x]\n", j,
536 sw_bd->skb, sw_bd->first_bd);
537 }
538
539 start = TX_BD(fp->tx_bd_cons - 10);
540 end = TX_BD(fp->tx_bd_cons + 254);
541 for (j = start; j < end; j++) {
542 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
543
544 BNX2X_ERR("tx_bd[%x]=[%x:%x:%x:%x]\n",
545 j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
546 }
547
548 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
549 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
550 for (j = start; j < end; j++) {
551 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
552 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
553
554 BNX2X_ERR("rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
34f80b04 555 j, rx_bd[1], rx_bd[0], sw_bd->skb);
a2fbb9ea
ET
556 }
557
3196a88a
EG
558 start = RX_SGE(fp->rx_sge_prod);
559 end = RX_SGE(fp->last_max_sge);
7a9b2557
VZ
560 for (j = start; j < end; j++) {
561 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
562 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
563
564 BNX2X_ERR("rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
565 j, rx_sge[1], rx_sge[0], sw_page->page);
566 }
567
a2fbb9ea
ET
568 start = RCQ_BD(fp->rx_comp_cons - 10);
569 end = RCQ_BD(fp->rx_comp_cons + 503);
570 for (j = start; j < end; j++) {
571 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
572
573 BNX2X_ERR("cqe[%x]=[%x:%x:%x:%x]\n",
574 j, cqe[0], cqe[1], cqe[2], cqe[3]);
575 }
576 }
577
49d66772
ET
578 BNX2X_ERR("def_c_idx(%u) def_u_idx(%u) def_x_idx(%u)"
579 " def_t_idx(%u) def_att_idx(%u) attn_state(%u)"
a2fbb9ea 580 " spq_prod_idx(%u)\n",
49d66772 581 bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
a2fbb9ea
ET
582 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
583
34f80b04 584 bnx2x_fw_dump(bp);
a2fbb9ea
ET
585 bnx2x_mc_assert(bp);
586 BNX2X_ERR("end crash dump -----------------\n");
a2fbb9ea
ET
587}
588
615f8fd9 589static void bnx2x_int_enable(struct bnx2x *bp)
a2fbb9ea 590{
34f80b04 591 int port = BP_PORT(bp);
a2fbb9ea
ET
592 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
593 u32 val = REG_RD(bp, addr);
594 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
595
596 if (msix) {
597 val &= ~HC_CONFIG_0_REG_SINGLE_ISR_EN_0;
598 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
599 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
600 } else {
601 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
615f8fd9 602 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
a2fbb9ea
ET
603 HC_CONFIG_0_REG_INT_LINE_EN_0 |
604 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
615f8fd9 605
615f8fd9
ET
606 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) MSI-X %d\n",
607 val, port, addr, msix);
608
609 REG_WR(bp, addr, val);
610
a2fbb9ea
ET
611 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
612 }
613
615f8fd9 614 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) MSI-X %d\n",
a2fbb9ea
ET
615 val, port, addr, msix);
616
617 REG_WR(bp, addr, val);
34f80b04
EG
618
619 if (CHIP_IS_E1H(bp)) {
620 /* init leading/trailing edge */
621 if (IS_E1HMF(bp)) {
622 val = (0xfe0f | (1 << (BP_E1HVN(bp) + 4)));
623 if (bp->port.pmf)
624 /* enable nig attention */
625 val |= 0x0100;
626 } else
627 val = 0xffff;
628
629 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
630 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
631 }
a2fbb9ea
ET
632}
633
615f8fd9 634static void bnx2x_int_disable(struct bnx2x *bp)
a2fbb9ea 635{
34f80b04 636 int port = BP_PORT(bp);
a2fbb9ea
ET
637 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
638 u32 val = REG_RD(bp, addr);
639
640 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
641 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
642 HC_CONFIG_0_REG_INT_LINE_EN_0 |
643 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
644
645 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
646 val, port, addr);
647
648 REG_WR(bp, addr, val);
649 if (REG_RD(bp, addr) != val)
650 BNX2X_ERR("BUG! proper val not read from IGU!\n");
651}
652
f8ef6e44 653static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
a2fbb9ea 654{
a2fbb9ea
ET
655 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
656 int i;
657
34f80b04 658 /* disable interrupt handling */
a2fbb9ea 659 atomic_inc(&bp->intr_sem);
f8ef6e44
YG
660 if (disable_hw)
661 /* prevent the HW from sending interrupts */
662 bnx2x_int_disable(bp);
a2fbb9ea
ET
663
664 /* make sure all ISRs are done */
665 if (msix) {
666 for_each_queue(bp, i)
667 synchronize_irq(bp->msix_table[i].vector);
668
669 /* one more for the Slow Path IRQ */
670 synchronize_irq(bp->msix_table[i].vector);
671 } else
672 synchronize_irq(bp->pdev->irq);
673
674 /* make sure sp_task is not running */
1cf167f2
EG
675 cancel_delayed_work(&bp->sp_task);
676 flush_workqueue(bnx2x_wq);
a2fbb9ea
ET
677}
678
34f80b04 679/* fast path */
a2fbb9ea
ET
680
681/*
34f80b04 682 * General service functions
a2fbb9ea
ET
683 */
684
34f80b04 685static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
a2fbb9ea
ET
686 u8 storm, u16 index, u8 op, u8 update)
687{
5c862848
EG
688 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
689 COMMAND_REG_INT_ACK);
a2fbb9ea
ET
690 struct igu_ack_register igu_ack;
691
692 igu_ack.status_block_index = index;
693 igu_ack.sb_id_and_flags =
34f80b04 694 ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
a2fbb9ea
ET
695 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
696 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
697 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
698
5c862848
EG
699 DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
700 (*(u32 *)&igu_ack), hc_addr);
701 REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
a2fbb9ea
ET
702}
703
704static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
705{
706 struct host_status_block *fpsb = fp->status_blk;
707 u16 rc = 0;
708
709 barrier(); /* status block is written to by the chip */
710 if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
711 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
712 rc |= 1;
713 }
714 if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
715 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
716 rc |= 2;
717 }
718 return rc;
719}
720
a2fbb9ea
ET
721static u16 bnx2x_ack_int(struct bnx2x *bp)
722{
5c862848
EG
723 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
724 COMMAND_REG_SIMD_MASK);
725 u32 result = REG_RD(bp, hc_addr);
a2fbb9ea 726
5c862848
EG
727 DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
728 result, hc_addr);
a2fbb9ea 729
a2fbb9ea
ET
730 return result;
731}
732
733
734/*
735 * fast path service functions
736 */
737
738/* free skb in the packet ring at pos idx
739 * return idx of last bd freed
740 */
741static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
742 u16 idx)
743{
744 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
745 struct eth_tx_bd *tx_bd;
746 struct sk_buff *skb = tx_buf->skb;
34f80b04 747 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
a2fbb9ea
ET
748 int nbd;
749
750 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
751 idx, tx_buf, skb);
752
753 /* unmap first bd */
754 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
755 tx_bd = &fp->tx_desc_ring[bd_idx];
756 pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_bd),
757 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
758
759 nbd = le16_to_cpu(tx_bd->nbd) - 1;
34f80b04 760 new_cons = nbd + tx_buf->first_bd;
a2fbb9ea
ET
761#ifdef BNX2X_STOP_ON_ERROR
762 if (nbd > (MAX_SKB_FRAGS + 2)) {
34f80b04 763 BNX2X_ERR("BAD nbd!\n");
a2fbb9ea
ET
764 bnx2x_panic();
765 }
766#endif
767
768 /* Skip a parse bd and the TSO split header bd
769 since they have no mapping */
770 if (nbd)
771 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
772
773 if (tx_bd->bd_flags.as_bitfield & (ETH_TX_BD_FLAGS_IP_CSUM |
774 ETH_TX_BD_FLAGS_TCP_CSUM |
775 ETH_TX_BD_FLAGS_SW_LSO)) {
776 if (--nbd)
777 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
778 tx_bd = &fp->tx_desc_ring[bd_idx];
779 /* is this a TSO split header bd? */
780 if (tx_bd->bd_flags.as_bitfield & ETH_TX_BD_FLAGS_SW_LSO) {
781 if (--nbd)
782 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
783 }
784 }
785
786 /* now free frags */
787 while (nbd > 0) {
788
789 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
790 tx_bd = &fp->tx_desc_ring[bd_idx];
791 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_bd),
792 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
793 if (--nbd)
794 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
795 }
796
797 /* release skb */
53e5e96e 798 WARN_ON(!skb);
a2fbb9ea
ET
799 dev_kfree_skb(skb);
800 tx_buf->first_bd = 0;
801 tx_buf->skb = NULL;
802
34f80b04 803 return new_cons;
a2fbb9ea
ET
804}
805
34f80b04 806static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
a2fbb9ea 807{
34f80b04
EG
808 s16 used;
809 u16 prod;
810 u16 cons;
a2fbb9ea 811
34f80b04 812 barrier(); /* Tell compiler that prod and cons can change */
a2fbb9ea
ET
813 prod = fp->tx_bd_prod;
814 cons = fp->tx_bd_cons;
815
34f80b04
EG
816 /* NUM_TX_RINGS = number of "next-page" entries
817 It will be used as a threshold */
818 used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
a2fbb9ea 819
34f80b04 820#ifdef BNX2X_STOP_ON_ERROR
53e5e96e
IJ
821 WARN_ON(used < 0);
822 WARN_ON(used > fp->bp->tx_ring_size);
823 WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
34f80b04 824#endif
a2fbb9ea 825
34f80b04 826 return (s16)(fp->bp->tx_ring_size) - used;
a2fbb9ea
ET
827}
828
829static void bnx2x_tx_int(struct bnx2x_fastpath *fp, int work)
830{
831 struct bnx2x *bp = fp->bp;
832 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
833 int done = 0;
834
835#ifdef BNX2X_STOP_ON_ERROR
836 if (unlikely(bp->panic))
837 return;
838#endif
839
840 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
841 sw_cons = fp->tx_pkt_cons;
842
843 while (sw_cons != hw_cons) {
844 u16 pkt_cons;
845
846 pkt_cons = TX_BD(sw_cons);
847
848 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
849
34f80b04 850 DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %u\n",
a2fbb9ea
ET
851 hw_cons, sw_cons, pkt_cons);
852
34f80b04 853/* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
a2fbb9ea
ET
854 rmb();
855 prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
856 }
857*/
858 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
859 sw_cons++;
860 done++;
861
862 if (done == work)
863 break;
864 }
865
866 fp->tx_pkt_cons = sw_cons;
867 fp->tx_bd_cons = bd_cons;
868
869 /* Need to make the tx_cons update visible to start_xmit()
870 * before checking for netif_queue_stopped(). Without the
871 * memory barrier, there is a small possibility that start_xmit()
872 * will miss it and cause the queue to be stopped forever.
873 */
874 smp_mb();
875
876 /* TBD need a thresh? */
877 if (unlikely(netif_queue_stopped(bp->dev))) {
878
879 netif_tx_lock(bp->dev);
880
881 if (netif_queue_stopped(bp->dev) &&
da5a662a 882 (bp->state == BNX2X_STATE_OPEN) &&
a2fbb9ea
ET
883 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
884 netif_wake_queue(bp->dev);
885
886 netif_tx_unlock(bp->dev);
a2fbb9ea
ET
887 }
888}
889
3196a88a 890
a2fbb9ea
ET
891static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
892 union eth_rx_cqe *rr_cqe)
893{
894 struct bnx2x *bp = fp->bp;
895 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
896 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
897
34f80b04 898 DP(BNX2X_MSG_SP,
a2fbb9ea 899 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
34f80b04
EG
900 FP_IDX(fp), cid, command, bp->state,
901 rr_cqe->ramrod_cqe.ramrod_type);
a2fbb9ea
ET
902
903 bp->spq_left++;
904
34f80b04 905 if (FP_IDX(fp)) {
a2fbb9ea
ET
906 switch (command | fp->state) {
907 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
908 BNX2X_FP_STATE_OPENING):
909 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
910 cid);
911 fp->state = BNX2X_FP_STATE_OPEN;
912 break;
913
914 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
915 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
916 cid);
917 fp->state = BNX2X_FP_STATE_HALTED;
918 break;
919
920 default:
34f80b04
EG
921 BNX2X_ERR("unexpected MC reply (%d) "
922 "fp->state is %x\n", command, fp->state);
923 break;
a2fbb9ea 924 }
34f80b04 925 mb(); /* force bnx2x_wait_ramrod() to see the change */
a2fbb9ea
ET
926 return;
927 }
c14423fe 928
a2fbb9ea
ET
929 switch (command | bp->state) {
930 case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
931 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
932 bp->state = BNX2X_STATE_OPEN;
933 break;
934
935 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
936 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
937 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
938 fp->state = BNX2X_FP_STATE_HALTED;
939 break;
940
a2fbb9ea 941 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
34f80b04 942 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
49d66772 943 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
a2fbb9ea
ET
944 break;
945
3196a88a 946
a2fbb9ea 947 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
34f80b04 948 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
a2fbb9ea 949 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
bb2a0f7a 950 bp->set_mac_pending = 0;
a2fbb9ea
ET
951 break;
952
49d66772 953 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
34f80b04 954 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
49d66772
ET
955 break;
956
a2fbb9ea 957 default:
34f80b04 958 BNX2X_ERR("unexpected MC reply (%d) bp->state is %x\n",
a2fbb9ea 959 command, bp->state);
34f80b04 960 break;
a2fbb9ea 961 }
34f80b04 962 mb(); /* force bnx2x_wait_ramrod() to see the change */
a2fbb9ea
ET
963}
964
7a9b2557
VZ
965static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
966 struct bnx2x_fastpath *fp, u16 index)
967{
968 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
969 struct page *page = sw_buf->page;
970 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
971
972 /* Skip "next page" elements */
973 if (!page)
974 return;
975
976 pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
977 BCM_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
978 __free_pages(page, PAGES_PER_SGE_SHIFT);
979
980 sw_buf->page = NULL;
981 sge->addr_hi = 0;
982 sge->addr_lo = 0;
983}
984
985static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
986 struct bnx2x_fastpath *fp, int last)
987{
988 int i;
989
990 for (i = 0; i < last; i++)
991 bnx2x_free_rx_sge(bp, fp, i);
992}
993
994static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
995 struct bnx2x_fastpath *fp, u16 index)
996{
997 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
998 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
999 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1000 dma_addr_t mapping;
1001
1002 if (unlikely(page == NULL))
1003 return -ENOMEM;
1004
1005 mapping = pci_map_page(bp->pdev, page, 0, BCM_PAGE_SIZE*PAGES_PER_SGE,
1006 PCI_DMA_FROMDEVICE);
8d8bb39b 1007 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
7a9b2557
VZ
1008 __free_pages(page, PAGES_PER_SGE_SHIFT);
1009 return -ENOMEM;
1010 }
1011
1012 sw_buf->page = page;
1013 pci_unmap_addr_set(sw_buf, mapping, mapping);
1014
1015 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1016 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1017
1018 return 0;
1019}
1020
a2fbb9ea
ET
1021static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1022 struct bnx2x_fastpath *fp, u16 index)
1023{
1024 struct sk_buff *skb;
1025 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1026 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1027 dma_addr_t mapping;
1028
1029 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1030 if (unlikely(skb == NULL))
1031 return -ENOMEM;
1032
437cf2f1 1033 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_size,
a2fbb9ea 1034 PCI_DMA_FROMDEVICE);
8d8bb39b 1035 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
a2fbb9ea
ET
1036 dev_kfree_skb(skb);
1037 return -ENOMEM;
1038 }
1039
1040 rx_buf->skb = skb;
1041 pci_unmap_addr_set(rx_buf, mapping, mapping);
1042
1043 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1044 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1045
1046 return 0;
1047}
1048
1049/* note that we are not allocating a new skb,
1050 * we are just moving one from cons to prod
1051 * we are not creating a new mapping,
1052 * so there is no need to check for dma_mapping_error().
1053 */
1054static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1055 struct sk_buff *skb, u16 cons, u16 prod)
1056{
1057 struct bnx2x *bp = fp->bp;
1058 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1059 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1060 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1061 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1062
1063 pci_dma_sync_single_for_device(bp->pdev,
1064 pci_unmap_addr(cons_rx_buf, mapping),
1065 bp->rx_offset + RX_COPY_THRESH,
1066 PCI_DMA_FROMDEVICE);
1067
1068 prod_rx_buf->skb = cons_rx_buf->skb;
1069 pci_unmap_addr_set(prod_rx_buf, mapping,
1070 pci_unmap_addr(cons_rx_buf, mapping));
1071 *prod_bd = *cons_bd;
1072}
1073
7a9b2557
VZ
1074static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1075 u16 idx)
1076{
1077 u16 last_max = fp->last_max_sge;
1078
1079 if (SUB_S16(idx, last_max) > 0)
1080 fp->last_max_sge = idx;
1081}
1082
1083static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1084{
1085 int i, j;
1086
1087 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1088 int idx = RX_SGE_CNT * i - 1;
1089
1090 for (j = 0; j < 2; j++) {
1091 SGE_MASK_CLEAR_BIT(fp, idx);
1092 idx--;
1093 }
1094 }
1095}
1096
1097static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1098 struct eth_fast_path_rx_cqe *fp_cqe)
1099{
1100 struct bnx2x *bp = fp->bp;
1101 u16 sge_len = BCM_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
1102 le16_to_cpu(fp_cqe->len_on_bd)) >>
1103 BCM_PAGE_SHIFT;
1104 u16 last_max, last_elem, first_elem;
1105 u16 delta = 0;
1106 u16 i;
1107
1108 if (!sge_len)
1109 return;
1110
1111 /* First mark all used pages */
1112 for (i = 0; i < sge_len; i++)
1113 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1114
1115 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1116 sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1117
1118 /* Here we assume that the last SGE index is the biggest */
1119 prefetch((void *)(fp->sge_mask));
1120 bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1121
1122 last_max = RX_SGE(fp->last_max_sge);
1123 last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1124 first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1125
1126 /* If ring is not full */
1127 if (last_elem + 1 != first_elem)
1128 last_elem++;
1129
1130 /* Now update the prod */
1131 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1132 if (likely(fp->sge_mask[i]))
1133 break;
1134
1135 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1136 delta += RX_SGE_MASK_ELEM_SZ;
1137 }
1138
1139 if (delta > 0) {
1140 fp->rx_sge_prod += delta;
1141 /* clear page-end entries */
1142 bnx2x_clear_sge_mask_next_elems(fp);
1143 }
1144
1145 DP(NETIF_MSG_RX_STATUS,
1146 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
1147 fp->last_max_sge, fp->rx_sge_prod);
1148}
1149
1150static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1151{
1152 /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1153 memset(fp->sge_mask, 0xff,
1154 (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1155
33471629
EG
1156 /* Clear the two last indices in the page to 1:
1157 these are the indices that correspond to the "next" element,
7a9b2557
VZ
1158 hence will never be indicated and should be removed from
1159 the calculations. */
1160 bnx2x_clear_sge_mask_next_elems(fp);
1161}
1162
1163static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1164 struct sk_buff *skb, u16 cons, u16 prod)
1165{
1166 struct bnx2x *bp = fp->bp;
1167 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1168 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1169 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1170 dma_addr_t mapping;
1171
1172 /* move empty skb from pool to prod and map it */
1173 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1174 mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
437cf2f1 1175 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
1176 pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1177
1178 /* move partial skb from cons to pool (don't unmap yet) */
1179 fp->tpa_pool[queue] = *cons_rx_buf;
1180
1181 /* mark bin state as start - print error if current state != stop */
1182 if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1183 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1184
1185 fp->tpa_state[queue] = BNX2X_TPA_START;
1186
1187 /* point prod_bd to new skb */
1188 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1189 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1190
1191#ifdef BNX2X_STOP_ON_ERROR
1192 fp->tpa_queue_used |= (1 << queue);
1193#ifdef __powerpc64__
1194 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1195#else
1196 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1197#endif
1198 fp->tpa_queue_used);
1199#endif
1200}
1201
1202static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1203 struct sk_buff *skb,
1204 struct eth_fast_path_rx_cqe *fp_cqe,
1205 u16 cqe_idx)
1206{
1207 struct sw_rx_page *rx_pg, old_rx_pg;
1208 struct page *sge;
1209 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1210 u32 i, frag_len, frag_size, pages;
1211 int err;
1212 int j;
1213
1214 frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
1215 pages = BCM_PAGE_ALIGN(frag_size) >> BCM_PAGE_SHIFT;
1216
1217 /* This is needed in order to enable forwarding support */
1218 if (frag_size)
1219 skb_shinfo(skb)->gso_size = min((u32)BCM_PAGE_SIZE,
1220 max(frag_size, (u32)len_on_bd));
1221
1222#ifdef BNX2X_STOP_ON_ERROR
1223 if (pages > 8*PAGES_PER_SGE) {
1224 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1225 pages, cqe_idx);
1226 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
1227 fp_cqe->pkt_len, len_on_bd);
1228 bnx2x_panic();
1229 return -EINVAL;
1230 }
1231#endif
1232
1233 /* Run through the SGL and compose the fragmented skb */
1234 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1235 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1236
1237 /* FW gives the indices of the SGE as if the ring is an array
1238 (meaning that "next" element will consume 2 indices) */
1239 frag_len = min(frag_size, (u32)(BCM_PAGE_SIZE*PAGES_PER_SGE));
1240 rx_pg = &fp->rx_page_ring[sge_idx];
1241 sge = rx_pg->page;
1242 old_rx_pg = *rx_pg;
1243
1244 /* If we fail to allocate a substitute page, we simply stop
1245 where we are and drop the whole packet */
1246 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1247 if (unlikely(err)) {
66e855f3 1248 bp->eth_stats.rx_skb_alloc_failed++;
7a9b2557
VZ
1249 return err;
1250 }
1251
1252 /* Unmap the page as we r going to pass it to the stack */
1253 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
1254 BCM_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1255
1256 /* Add one frag and update the appropriate fields in the skb */
1257 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1258
1259 skb->data_len += frag_len;
1260 skb->truesize += frag_len;
1261 skb->len += frag_len;
1262
1263 frag_size -= frag_len;
1264 }
1265
1266 return 0;
1267}
1268
1269static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1270 u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1271 u16 cqe_idx)
1272{
1273 struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1274 struct sk_buff *skb = rx_buf->skb;
1275 /* alloc new skb */
1276 struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1277
1278 /* Unmap skb in the pool anyway, as we are going to change
1279 pool entry status to BNX2X_TPA_STOP even if new skb allocation
1280 fails. */
1281 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
437cf2f1 1282 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
7a9b2557 1283
7a9b2557 1284 if (likely(new_skb)) {
66e855f3
YG
1285 /* fix ip xsum and give it to the stack */
1286 /* (no need to map the new skb) */
7a9b2557
VZ
1287
1288 prefetch(skb);
1289 prefetch(((char *)(skb)) + 128);
1290
7a9b2557
VZ
1291#ifdef BNX2X_STOP_ON_ERROR
1292 if (pad + len > bp->rx_buf_size) {
1293 BNX2X_ERR("skb_put is about to fail... "
1294 "pad %d len %d rx_buf_size %d\n",
1295 pad, len, bp->rx_buf_size);
1296 bnx2x_panic();
1297 return;
1298 }
1299#endif
1300
1301 skb_reserve(skb, pad);
1302 skb_put(skb, len);
1303
1304 skb->protocol = eth_type_trans(skb, bp->dev);
1305 skb->ip_summed = CHECKSUM_UNNECESSARY;
1306
1307 {
1308 struct iphdr *iph;
1309
1310 iph = (struct iphdr *)skb->data;
1311 iph->check = 0;
1312 iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1313 }
1314
1315 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1316 &cqe->fast_path_cqe, cqe_idx)) {
1317#ifdef BCM_VLAN
1318 if ((bp->vlgrp != NULL) &&
1319 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1320 PARSING_FLAGS_VLAN))
1321 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1322 le16_to_cpu(cqe->fast_path_cqe.
1323 vlan_tag));
1324 else
1325#endif
1326 netif_receive_skb(skb);
1327 } else {
1328 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1329 " - dropping packet!\n");
1330 dev_kfree_skb(skb);
1331 }
1332
7a9b2557
VZ
1333
1334 /* put new skb in bin */
1335 fp->tpa_pool[queue].skb = new_skb;
1336
1337 } else {
66e855f3 1338 /* else drop the packet and keep the buffer in the bin */
7a9b2557
VZ
1339 DP(NETIF_MSG_RX_STATUS,
1340 "Failed to allocate new skb - dropping packet!\n");
66e855f3 1341 bp->eth_stats.rx_skb_alloc_failed++;
7a9b2557
VZ
1342 }
1343
1344 fp->tpa_state[queue] = BNX2X_TPA_STOP;
1345}
1346
1347static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1348 struct bnx2x_fastpath *fp,
1349 u16 bd_prod, u16 rx_comp_prod,
1350 u16 rx_sge_prod)
1351{
1352 struct tstorm_eth_rx_producers rx_prods = {0};
1353 int i;
1354
1355 /* Update producers */
1356 rx_prods.bd_prod = bd_prod;
1357 rx_prods.cqe_prod = rx_comp_prod;
1358 rx_prods.sge_prod = rx_sge_prod;
1359
58f4c4cf
EG
1360 /*
1361 * Make sure that the BD and SGE data is updated before updating the
1362 * producers since FW might read the BD/SGE right after the producer
1363 * is updated.
1364 * This is only applicable for weak-ordered memory model archs such
1365 * as IA-64. The following barrier is also mandatory since FW will
1366 * assumes BDs must have buffers.
1367 */
1368 wmb();
1369
7a9b2557
VZ
1370 for (i = 0; i < sizeof(struct tstorm_eth_rx_producers)/4; i++)
1371 REG_WR(bp, BAR_TSTRORM_INTMEM +
1372 TSTORM_RX_PRODS_OFFSET(BP_PORT(bp), FP_CL_ID(fp)) + i*4,
1373 ((u32 *)&rx_prods)[i]);
1374
58f4c4cf
EG
1375 mmiowb(); /* keep prod updates ordered */
1376
7a9b2557
VZ
1377 DP(NETIF_MSG_RX_STATUS,
1378 "Wrote: bd_prod %u cqe_prod %u sge_prod %u\n",
1379 bd_prod, rx_comp_prod, rx_sge_prod);
1380}
1381
a2fbb9ea
ET
1382static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1383{
1384 struct bnx2x *bp = fp->bp;
34f80b04 1385 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
a2fbb9ea
ET
1386 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1387 int rx_pkt = 0;
1388
1389#ifdef BNX2X_STOP_ON_ERROR
1390 if (unlikely(bp->panic))
1391 return 0;
1392#endif
1393
34f80b04
EG
1394 /* CQ "next element" is of the size of the regular element,
1395 that's why it's ok here */
a2fbb9ea
ET
1396 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1397 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1398 hw_comp_cons++;
1399
1400 bd_cons = fp->rx_bd_cons;
1401 bd_prod = fp->rx_bd_prod;
34f80b04 1402 bd_prod_fw = bd_prod;
a2fbb9ea
ET
1403 sw_comp_cons = fp->rx_comp_cons;
1404 sw_comp_prod = fp->rx_comp_prod;
1405
1406 /* Memory barrier necessary as speculative reads of the rx
1407 * buffer can be ahead of the index in the status block
1408 */
1409 rmb();
1410
1411 DP(NETIF_MSG_RX_STATUS,
1412 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
34f80b04 1413 FP_IDX(fp), hw_comp_cons, sw_comp_cons);
a2fbb9ea
ET
1414
1415 while (sw_comp_cons != hw_comp_cons) {
34f80b04 1416 struct sw_rx_bd *rx_buf = NULL;
a2fbb9ea
ET
1417 struct sk_buff *skb;
1418 union eth_rx_cqe *cqe;
34f80b04
EG
1419 u8 cqe_fp_flags;
1420 u16 len, pad;
a2fbb9ea
ET
1421
1422 comp_ring_cons = RCQ_BD(sw_comp_cons);
1423 bd_prod = RX_BD(bd_prod);
1424 bd_cons = RX_BD(bd_cons);
1425
1426 cqe = &fp->rx_comp_ring[comp_ring_cons];
34f80b04 1427 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
a2fbb9ea 1428
a2fbb9ea 1429 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
34f80b04
EG
1430 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
1431 cqe_fp_flags, cqe->fast_path_cqe.status_flags,
a2fbb9ea 1432 cqe->fast_path_cqe.rss_hash_result,
34f80b04
EG
1433 le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1434 le16_to_cpu(cqe->fast_path_cqe.pkt_len));
a2fbb9ea
ET
1435
1436 /* is this a slowpath msg? */
34f80b04 1437 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
a2fbb9ea
ET
1438 bnx2x_sp_event(fp, cqe);
1439 goto next_cqe;
1440
1441 /* this is an rx packet */
1442 } else {
1443 rx_buf = &fp->rx_buf_ring[bd_cons];
1444 skb = rx_buf->skb;
a2fbb9ea
ET
1445 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1446 pad = cqe->fast_path_cqe.placement_offset;
1447
7a9b2557
VZ
1448 /* If CQE is marked both TPA_START and TPA_END
1449 it is a non-TPA CQE */
1450 if ((!fp->disable_tpa) &&
1451 (TPA_TYPE(cqe_fp_flags) !=
1452 (TPA_TYPE_START | TPA_TYPE_END))) {
3196a88a 1453 u16 queue = cqe->fast_path_cqe.queue_index;
7a9b2557
VZ
1454
1455 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1456 DP(NETIF_MSG_RX_STATUS,
1457 "calling tpa_start on queue %d\n",
1458 queue);
1459
1460 bnx2x_tpa_start(fp, queue, skb,
1461 bd_cons, bd_prod);
1462 goto next_rx;
1463 }
1464
1465 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1466 DP(NETIF_MSG_RX_STATUS,
1467 "calling tpa_stop on queue %d\n",
1468 queue);
1469
1470 if (!BNX2X_RX_SUM_FIX(cqe))
1471 BNX2X_ERR("STOP on none TCP "
1472 "data\n");
1473
1474 /* This is a size of the linear data
1475 on this skb */
1476 len = le16_to_cpu(cqe->fast_path_cqe.
1477 len_on_bd);
1478 bnx2x_tpa_stop(bp, fp, queue, pad,
1479 len, cqe, comp_ring_cons);
1480#ifdef BNX2X_STOP_ON_ERROR
1481 if (bp->panic)
1482 return -EINVAL;
1483#endif
1484
1485 bnx2x_update_sge_prod(fp,
1486 &cqe->fast_path_cqe);
1487 goto next_cqe;
1488 }
1489 }
1490
a2fbb9ea
ET
1491 pci_dma_sync_single_for_device(bp->pdev,
1492 pci_unmap_addr(rx_buf, mapping),
1493 pad + RX_COPY_THRESH,
1494 PCI_DMA_FROMDEVICE);
1495 prefetch(skb);
1496 prefetch(((char *)(skb)) + 128);
1497
1498 /* is this an error packet? */
34f80b04 1499 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
a2fbb9ea 1500 DP(NETIF_MSG_RX_ERR,
34f80b04
EG
1501 "ERROR flags %x rx packet %u\n",
1502 cqe_fp_flags, sw_comp_cons);
66e855f3 1503 bp->eth_stats.rx_err_discard_pkt++;
a2fbb9ea
ET
1504 goto reuse_rx;
1505 }
1506
1507 /* Since we don't have a jumbo ring
1508 * copy small packets if mtu > 1500
1509 */
1510 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1511 (len <= RX_COPY_THRESH)) {
1512 struct sk_buff *new_skb;
1513
1514 new_skb = netdev_alloc_skb(bp->dev,
1515 len + pad);
1516 if (new_skb == NULL) {
1517 DP(NETIF_MSG_RX_ERR,
34f80b04 1518 "ERROR packet dropped "
a2fbb9ea 1519 "because of alloc failure\n");
66e855f3 1520 bp->eth_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
1521 goto reuse_rx;
1522 }
1523
1524 /* aligned copy */
1525 skb_copy_from_linear_data_offset(skb, pad,
1526 new_skb->data + pad, len);
1527 skb_reserve(new_skb, pad);
1528 skb_put(new_skb, len);
1529
1530 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1531
1532 skb = new_skb;
1533
1534 } else if (bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0) {
1535 pci_unmap_single(bp->pdev,
1536 pci_unmap_addr(rx_buf, mapping),
437cf2f1 1537 bp->rx_buf_size,
a2fbb9ea
ET
1538 PCI_DMA_FROMDEVICE);
1539 skb_reserve(skb, pad);
1540 skb_put(skb, len);
1541
1542 } else {
1543 DP(NETIF_MSG_RX_ERR,
34f80b04 1544 "ERROR packet dropped because "
a2fbb9ea 1545 "of alloc failure\n");
66e855f3 1546 bp->eth_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
1547reuse_rx:
1548 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1549 goto next_rx;
1550 }
1551
1552 skb->protocol = eth_type_trans(skb, bp->dev);
1553
1554 skb->ip_summed = CHECKSUM_NONE;
66e855f3 1555 if (bp->rx_csum) {
1adcd8be
EG
1556 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1557 skb->ip_summed = CHECKSUM_UNNECESSARY;
66e855f3
YG
1558 else
1559 bp->eth_stats.hw_csum_err++;
1560 }
a2fbb9ea
ET
1561 }
1562
1563#ifdef BCM_VLAN
34f80b04
EG
1564 if ((bp->vlgrp != NULL) &&
1565 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1566 PARSING_FLAGS_VLAN))
a2fbb9ea
ET
1567 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1568 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1569 else
1570#endif
34f80b04 1571 netif_receive_skb(skb);
a2fbb9ea 1572
a2fbb9ea
ET
1573
1574next_rx:
1575 rx_buf->skb = NULL;
1576
1577 bd_cons = NEXT_RX_IDX(bd_cons);
1578 bd_prod = NEXT_RX_IDX(bd_prod);
34f80b04
EG
1579 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1580 rx_pkt++;
a2fbb9ea
ET
1581next_cqe:
1582 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1583 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
a2fbb9ea 1584
34f80b04 1585 if (rx_pkt == budget)
a2fbb9ea
ET
1586 break;
1587 } /* while */
1588
1589 fp->rx_bd_cons = bd_cons;
34f80b04 1590 fp->rx_bd_prod = bd_prod_fw;
a2fbb9ea
ET
1591 fp->rx_comp_cons = sw_comp_cons;
1592 fp->rx_comp_prod = sw_comp_prod;
1593
7a9b2557
VZ
1594 /* Update producers */
1595 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1596 fp->rx_sge_prod);
a2fbb9ea
ET
1597
1598 fp->rx_pkt += rx_pkt;
1599 fp->rx_calls++;
1600
1601 return rx_pkt;
1602}
1603
1604static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1605{
1606 struct bnx2x_fastpath *fp = fp_cookie;
1607 struct bnx2x *bp = fp->bp;
34f80b04 1608 int index = FP_IDX(fp);
a2fbb9ea 1609
da5a662a
VZ
1610 /* Return here if interrupt is disabled */
1611 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1612 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1613 return IRQ_HANDLED;
1614 }
1615
34f80b04
EG
1616 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
1617 index, FP_SB_ID(fp));
1618 bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID, 0, IGU_INT_DISABLE, 0);
a2fbb9ea
ET
1619
1620#ifdef BNX2X_STOP_ON_ERROR
1621 if (unlikely(bp->panic))
1622 return IRQ_HANDLED;
1623#endif
1624
1625 prefetch(fp->rx_cons_sb);
1626 prefetch(fp->tx_cons_sb);
1627 prefetch(&fp->status_blk->c_status_block.status_block_index);
1628 prefetch(&fp->status_blk->u_status_block.status_block_index);
1629
908a7a16 1630 netif_rx_schedule(&bnx2x_fp(bp, index, napi));
34f80b04 1631
a2fbb9ea
ET
1632 return IRQ_HANDLED;
1633}
1634
1635static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1636{
1637 struct net_device *dev = dev_instance;
1638 struct bnx2x *bp = netdev_priv(dev);
1639 u16 status = bnx2x_ack_int(bp);
34f80b04 1640 u16 mask;
a2fbb9ea 1641
34f80b04 1642 /* Return here if interrupt is shared and it's not for us */
a2fbb9ea
ET
1643 if (unlikely(status == 0)) {
1644 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1645 return IRQ_NONE;
1646 }
34f80b04 1647 DP(NETIF_MSG_INTR, "got an interrupt status %u\n", status);
a2fbb9ea 1648
34f80b04 1649 /* Return here if interrupt is disabled */
a2fbb9ea
ET
1650 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1651 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1652 return IRQ_HANDLED;
1653 }
1654
3196a88a
EG
1655#ifdef BNX2X_STOP_ON_ERROR
1656 if (unlikely(bp->panic))
1657 return IRQ_HANDLED;
1658#endif
1659
34f80b04
EG
1660 mask = 0x2 << bp->fp[0].sb_id;
1661 if (status & mask) {
a2fbb9ea
ET
1662 struct bnx2x_fastpath *fp = &bp->fp[0];
1663
1664 prefetch(fp->rx_cons_sb);
1665 prefetch(fp->tx_cons_sb);
1666 prefetch(&fp->status_blk->c_status_block.status_block_index);
1667 prefetch(&fp->status_blk->u_status_block.status_block_index);
1668
908a7a16 1669 netif_rx_schedule(&bnx2x_fp(bp, 0, napi));
a2fbb9ea 1670
34f80b04 1671 status &= ~mask;
a2fbb9ea
ET
1672 }
1673
a2fbb9ea 1674
34f80b04 1675 if (unlikely(status & 0x1)) {
1cf167f2 1676 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
a2fbb9ea
ET
1677
1678 status &= ~0x1;
1679 if (!status)
1680 return IRQ_HANDLED;
1681 }
1682
34f80b04
EG
1683 if (status)
1684 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1685 status);
a2fbb9ea 1686
c18487ee 1687 return IRQ_HANDLED;
a2fbb9ea
ET
1688}
1689
c18487ee 1690/* end of fast path */
a2fbb9ea 1691
bb2a0f7a 1692static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
a2fbb9ea 1693
c18487ee
YR
1694/* Link */
1695
1696/*
1697 * General service functions
1698 */
a2fbb9ea 1699
4a37fb66 1700static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
c18487ee
YR
1701{
1702 u32 lock_status;
1703 u32 resource_bit = (1 << resource);
4a37fb66
YG
1704 int func = BP_FUNC(bp);
1705 u32 hw_lock_control_reg;
c18487ee 1706 int cnt;
a2fbb9ea 1707
c18487ee
YR
1708 /* Validating that the resource is within range */
1709 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1710 DP(NETIF_MSG_HW,
1711 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1712 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1713 return -EINVAL;
1714 }
a2fbb9ea 1715
4a37fb66
YG
1716 if (func <= 5) {
1717 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1718 } else {
1719 hw_lock_control_reg =
1720 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1721 }
1722
c18487ee 1723 /* Validating that the resource is not already taken */
4a37fb66 1724 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1725 if (lock_status & resource_bit) {
1726 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1727 lock_status, resource_bit);
1728 return -EEXIST;
1729 }
a2fbb9ea 1730
46230476
EG
1731 /* Try for 5 second every 5ms */
1732 for (cnt = 0; cnt < 1000; cnt++) {
c18487ee 1733 /* Try to acquire the lock */
4a37fb66
YG
1734 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1735 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1736 if (lock_status & resource_bit)
1737 return 0;
a2fbb9ea 1738
c18487ee 1739 msleep(5);
a2fbb9ea 1740 }
c18487ee
YR
1741 DP(NETIF_MSG_HW, "Timeout\n");
1742 return -EAGAIN;
1743}
a2fbb9ea 1744
4a37fb66 1745static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
c18487ee
YR
1746{
1747 u32 lock_status;
1748 u32 resource_bit = (1 << resource);
4a37fb66
YG
1749 int func = BP_FUNC(bp);
1750 u32 hw_lock_control_reg;
a2fbb9ea 1751
c18487ee
YR
1752 /* Validating that the resource is within range */
1753 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1754 DP(NETIF_MSG_HW,
1755 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1756 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1757 return -EINVAL;
1758 }
1759
4a37fb66
YG
1760 if (func <= 5) {
1761 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1762 } else {
1763 hw_lock_control_reg =
1764 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1765 }
1766
c18487ee 1767 /* Validating that the resource is currently taken */
4a37fb66 1768 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1769 if (!(lock_status & resource_bit)) {
1770 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1771 lock_status, resource_bit);
1772 return -EFAULT;
a2fbb9ea
ET
1773 }
1774
4a37fb66 1775 REG_WR(bp, hw_lock_control_reg, resource_bit);
c18487ee
YR
1776 return 0;
1777}
1778
1779/* HW Lock for shared dual port PHYs */
4a37fb66 1780static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
c18487ee
YR
1781{
1782 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea 1783
34f80b04 1784 mutex_lock(&bp->port.phy_mutex);
a2fbb9ea 1785
c18487ee
YR
1786 if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) ||
1787 (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))
4a37fb66 1788 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
c18487ee 1789}
a2fbb9ea 1790
4a37fb66 1791static void bnx2x_release_phy_lock(struct bnx2x *bp)
c18487ee
YR
1792{
1793 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea 1794
c18487ee
YR
1795 if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) ||
1796 (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))
4a37fb66 1797 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
a2fbb9ea 1798
34f80b04 1799 mutex_unlock(&bp->port.phy_mutex);
c18487ee 1800}
a2fbb9ea 1801
17de50b7 1802int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
c18487ee
YR
1803{
1804 /* The GPIO should be swapped if swap register is set and active */
1805 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
17de50b7 1806 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
c18487ee
YR
1807 int gpio_shift = gpio_num +
1808 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1809 u32 gpio_mask = (1 << gpio_shift);
1810 u32 gpio_reg;
a2fbb9ea 1811
c18487ee
YR
1812 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1813 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1814 return -EINVAL;
1815 }
a2fbb9ea 1816
4a37fb66 1817 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
c18487ee
YR
1818 /* read GPIO and mask except the float bits */
1819 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
a2fbb9ea 1820
c18487ee
YR
1821 switch (mode) {
1822 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1823 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1824 gpio_num, gpio_shift);
1825 /* clear FLOAT and set CLR */
1826 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1827 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1828 break;
a2fbb9ea 1829
c18487ee
YR
1830 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1831 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1832 gpio_num, gpio_shift);
1833 /* clear FLOAT and set SET */
1834 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1835 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1836 break;
a2fbb9ea 1837
17de50b7 1838 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
c18487ee
YR
1839 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1840 gpio_num, gpio_shift);
1841 /* set FLOAT */
1842 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1843 break;
a2fbb9ea 1844
c18487ee
YR
1845 default:
1846 break;
a2fbb9ea
ET
1847 }
1848
c18487ee 1849 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
4a37fb66 1850 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
f1410647 1851
c18487ee 1852 return 0;
a2fbb9ea
ET
1853}
1854
c18487ee 1855static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
a2fbb9ea 1856{
c18487ee
YR
1857 u32 spio_mask = (1 << spio_num);
1858 u32 spio_reg;
a2fbb9ea 1859
c18487ee
YR
1860 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
1861 (spio_num > MISC_REGISTERS_SPIO_7)) {
1862 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
1863 return -EINVAL;
a2fbb9ea
ET
1864 }
1865
4a37fb66 1866 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
c18487ee
YR
1867 /* read SPIO and mask except the float bits */
1868 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
a2fbb9ea 1869
c18487ee 1870 switch (mode) {
6378c025 1871 case MISC_REGISTERS_SPIO_OUTPUT_LOW:
c18487ee
YR
1872 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
1873 /* clear FLOAT and set CLR */
1874 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1875 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
1876 break;
a2fbb9ea 1877
6378c025 1878 case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
c18487ee
YR
1879 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
1880 /* clear FLOAT and set SET */
1881 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1882 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
1883 break;
a2fbb9ea 1884
c18487ee
YR
1885 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
1886 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
1887 /* set FLOAT */
1888 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1889 break;
a2fbb9ea 1890
c18487ee
YR
1891 default:
1892 break;
a2fbb9ea
ET
1893 }
1894
c18487ee 1895 REG_WR(bp, MISC_REG_SPIO, spio_reg);
4a37fb66 1896 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
c18487ee 1897
a2fbb9ea
ET
1898 return 0;
1899}
1900
c18487ee 1901static void bnx2x_calc_fc_adv(struct bnx2x *bp)
a2fbb9ea 1902{
c18487ee
YR
1903 switch (bp->link_vars.ieee_fc) {
1904 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
34f80b04 1905 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
c18487ee
YR
1906 ADVERTISED_Pause);
1907 break;
1908 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
34f80b04 1909 bp->port.advertising |= (ADVERTISED_Asym_Pause |
c18487ee
YR
1910 ADVERTISED_Pause);
1911 break;
1912 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
34f80b04 1913 bp->port.advertising |= ADVERTISED_Asym_Pause;
c18487ee
YR
1914 break;
1915 default:
34f80b04 1916 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
c18487ee
YR
1917 ADVERTISED_Pause);
1918 break;
1919 }
1920}
f1410647 1921
c18487ee
YR
1922static void bnx2x_link_report(struct bnx2x *bp)
1923{
1924 if (bp->link_vars.link_up) {
1925 if (bp->state == BNX2X_STATE_OPEN)
1926 netif_carrier_on(bp->dev);
1927 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
f1410647 1928
c18487ee 1929 printk("%d Mbps ", bp->link_vars.line_speed);
f1410647 1930
c18487ee
YR
1931 if (bp->link_vars.duplex == DUPLEX_FULL)
1932 printk("full duplex");
1933 else
1934 printk("half duplex");
f1410647 1935
c0700f90
DM
1936 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
1937 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
c18487ee 1938 printk(", receive ");
c0700f90 1939 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
c18487ee
YR
1940 printk("& transmit ");
1941 } else {
1942 printk(", transmit ");
1943 }
1944 printk("flow control ON");
1945 }
1946 printk("\n");
f1410647 1947
c18487ee
YR
1948 } else { /* link_down */
1949 netif_carrier_off(bp->dev);
1950 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
f1410647 1951 }
c18487ee
YR
1952}
1953
1954static u8 bnx2x_initial_phy_init(struct bnx2x *bp)
1955{
19680c48
EG
1956 if (!BP_NOMCP(bp)) {
1957 u8 rc;
a2fbb9ea 1958
19680c48 1959 /* Initialize link parameters structure variables */
8c99e7b0
YR
1960 /* It is recommended to turn off RX FC for jumbo frames
1961 for better performance */
1962 if (IS_E1HMF(bp))
c0700f90 1963 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
8c99e7b0 1964 else if (bp->dev->mtu > 5000)
c0700f90 1965 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
8c99e7b0 1966 else
c0700f90 1967 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
a2fbb9ea 1968
4a37fb66 1969 bnx2x_acquire_phy_lock(bp);
19680c48 1970 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
4a37fb66 1971 bnx2x_release_phy_lock(bp);
a2fbb9ea 1972
19680c48
EG
1973 if (bp->link_vars.link_up)
1974 bnx2x_link_report(bp);
a2fbb9ea 1975
19680c48 1976 bnx2x_calc_fc_adv(bp);
34f80b04 1977
19680c48
EG
1978 return rc;
1979 }
1980 BNX2X_ERR("Bootcode is missing -not initializing link\n");
1981 return -EINVAL;
a2fbb9ea
ET
1982}
1983
c18487ee 1984static void bnx2x_link_set(struct bnx2x *bp)
a2fbb9ea 1985{
19680c48 1986 if (!BP_NOMCP(bp)) {
4a37fb66 1987 bnx2x_acquire_phy_lock(bp);
19680c48 1988 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
4a37fb66 1989 bnx2x_release_phy_lock(bp);
a2fbb9ea 1990
19680c48
EG
1991 bnx2x_calc_fc_adv(bp);
1992 } else
1993 BNX2X_ERR("Bootcode is missing -not setting link\n");
c18487ee 1994}
a2fbb9ea 1995
c18487ee
YR
1996static void bnx2x__link_reset(struct bnx2x *bp)
1997{
19680c48 1998 if (!BP_NOMCP(bp)) {
4a37fb66 1999 bnx2x_acquire_phy_lock(bp);
19680c48 2000 bnx2x_link_reset(&bp->link_params, &bp->link_vars);
4a37fb66 2001 bnx2x_release_phy_lock(bp);
19680c48
EG
2002 } else
2003 BNX2X_ERR("Bootcode is missing -not resetting link\n");
c18487ee 2004}
a2fbb9ea 2005
c18487ee
YR
2006static u8 bnx2x_link_test(struct bnx2x *bp)
2007{
2008 u8 rc;
a2fbb9ea 2009
4a37fb66 2010 bnx2x_acquire_phy_lock(bp);
c18487ee 2011 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
4a37fb66 2012 bnx2x_release_phy_lock(bp);
a2fbb9ea 2013
c18487ee
YR
2014 return rc;
2015}
a2fbb9ea 2016
34f80b04
EG
2017/* Calculates the sum of vn_min_rates.
2018 It's needed for further normalizing of the min_rates.
2019
2020 Returns:
2021 sum of vn_min_rates
2022 or
2023 0 - if all the min_rates are 0.
33471629 2024 In the later case fairness algorithm should be deactivated.
34f80b04
EG
2025 If not all min_rates are zero then those that are zeroes will
2026 be set to 1.
2027 */
2028static u32 bnx2x_calc_vn_wsum(struct bnx2x *bp)
2029{
2030 int i, port = BP_PORT(bp);
2031 u32 wsum = 0;
2032 int all_zero = 1;
2033
2034 for (i = 0; i < E1HVN_MAX; i++) {
2035 u32 vn_cfg =
2036 SHMEM_RD(bp, mf_cfg.func_mf_config[2*i + port].config);
2037 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2038 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2039 if (!(vn_cfg & FUNC_MF_CFG_FUNC_HIDE)) {
2040 /* If min rate is zero - set it to 1 */
2041 if (!vn_min_rate)
2042 vn_min_rate = DEF_MIN_RATE;
2043 else
2044 all_zero = 0;
2045
2046 wsum += vn_min_rate;
2047 }
2048 }
2049
2050 /* ... only if all min rates are zeros - disable FAIRNESS */
2051 if (all_zero)
2052 return 0;
2053
2054 return wsum;
2055}
2056
2057static void bnx2x_init_port_minmax(struct bnx2x *bp,
2058 int en_fness,
2059 u16 port_rate,
2060 struct cmng_struct_per_port *m_cmng_port)
2061{
2062 u32 r_param = port_rate / 8;
2063 int port = BP_PORT(bp);
2064 int i;
2065
2066 memset(m_cmng_port, 0, sizeof(struct cmng_struct_per_port));
2067
2068 /* Enable minmax only if we are in e1hmf mode */
2069 if (IS_E1HMF(bp)) {
2070 u32 fair_periodic_timeout_usec;
2071 u32 t_fair;
2072
2073 /* Enable rate shaping and fairness */
2074 m_cmng_port->flags.cmng_vn_enable = 1;
2075 m_cmng_port->flags.fairness_enable = en_fness ? 1 : 0;
2076 m_cmng_port->flags.rate_shaping_enable = 1;
2077
2078 if (!en_fness)
2079 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
2080 " fairness will be disabled\n");
2081
2082 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2083 m_cmng_port->rs_vars.rs_periodic_timeout =
2084 RS_PERIODIC_TIMEOUT_USEC / 4;
2085
2086 /* this is the threshold below which no timer arming will occur
2087 1.25 coefficient is for the threshold to be a little bigger
2088 than the real time, to compensate for timer in-accuracy */
2089 m_cmng_port->rs_vars.rs_threshold =
2090 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2091
2092 /* resolution of fairness timer */
2093 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2094 /* for 10G it is 1000usec. for 1G it is 10000usec. */
2095 t_fair = T_FAIR_COEF / port_rate;
2096
2097 /* this is the threshold below which we won't arm
2098 the timer anymore */
2099 m_cmng_port->fair_vars.fair_threshold = QM_ARB_BYTES;
2100
2101 /* we multiply by 1e3/8 to get bytes/msec.
2102 We don't want the credits to pass a credit
2103 of the T_FAIR*FAIR_MEM (algorithm resolution) */
2104 m_cmng_port->fair_vars.upper_bound =
2105 r_param * t_fair * FAIR_MEM;
2106 /* since each tick is 4 usec */
2107 m_cmng_port->fair_vars.fairness_timeout =
2108 fair_periodic_timeout_usec / 4;
2109
2110 } else {
2111 /* Disable rate shaping and fairness */
2112 m_cmng_port->flags.cmng_vn_enable = 0;
2113 m_cmng_port->flags.fairness_enable = 0;
2114 m_cmng_port->flags.rate_shaping_enable = 0;
2115
2116 DP(NETIF_MSG_IFUP,
2117 "Single function mode minmax will be disabled\n");
2118 }
2119
2120 /* Store it to internal memory */
2121 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
2122 REG_WR(bp, BAR_XSTRORM_INTMEM +
2123 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
2124 ((u32 *)(m_cmng_port))[i]);
2125}
2126
2127static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func,
2128 u32 wsum, u16 port_rate,
2129 struct cmng_struct_per_port *m_cmng_port)
2130{
2131 struct rate_shaping_vars_per_vn m_rs_vn;
2132 struct fairness_vars_per_vn m_fair_vn;
2133 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2134 u16 vn_min_rate, vn_max_rate;
2135 int i;
2136
2137 /* If function is hidden - set min and max to zeroes */
2138 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2139 vn_min_rate = 0;
2140 vn_max_rate = 0;
2141
2142 } else {
2143 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2144 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2145 /* If FAIRNESS is enabled (not all min rates are zeroes) and
2146 if current min rate is zero - set it to 1.
33471629 2147 This is a requirement of the algorithm. */
34f80b04
EG
2148 if ((vn_min_rate == 0) && wsum)
2149 vn_min_rate = DEF_MIN_RATE;
2150 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2151 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2152 }
2153
2154 DP(NETIF_MSG_IFUP, "func %d: vn_min_rate=%d vn_max_rate=%d "
2155 "wsum=%d\n", func, vn_min_rate, vn_max_rate, wsum);
2156
2157 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2158 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2159
2160 /* global vn counter - maximal Mbps for this vn */
2161 m_rs_vn.vn_counter.rate = vn_max_rate;
2162
2163 /* quota - number of bytes transmitted in this period */
2164 m_rs_vn.vn_counter.quota =
2165 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2166
2167#ifdef BNX2X_PER_PROT_QOS
2168 /* per protocol counter */
2169 for (protocol = 0; protocol < NUM_OF_PROTOCOLS; protocol++) {
2170 /* maximal Mbps for this protocol */
2171 m_rs_vn.protocol_counters[protocol].rate =
2172 protocol_max_rate[protocol];
2173 /* the quota in each timer period -
2174 number of bytes transmitted in this period */
2175 m_rs_vn.protocol_counters[protocol].quota =
2176 (u32)(rs_periodic_timeout_usec *
2177 ((double)m_rs_vn.
2178 protocol_counters[protocol].rate/8));
2179 }
2180#endif
2181
2182 if (wsum) {
2183 /* credit for each period of the fairness algorithm:
2184 number of bytes in T_FAIR (the vn share the port rate).
2185 wsum should not be larger than 10000, thus
2186 T_FAIR_COEF / (8 * wsum) will always be grater than zero */
2187 m_fair_vn.vn_credit_delta =
2188 max((u64)(vn_min_rate * (T_FAIR_COEF / (8 * wsum))),
2189 (u64)(m_cmng_port->fair_vars.fair_threshold * 2));
2190 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2191 m_fair_vn.vn_credit_delta);
2192 }
2193
2194#ifdef BNX2X_PER_PROT_QOS
2195 do {
2196 u32 protocolWeightSum = 0;
2197
2198 for (protocol = 0; protocol < NUM_OF_PROTOCOLS; protocol++)
2199 protocolWeightSum +=
2200 drvInit.protocol_min_rate[protocol];
2201 /* per protocol counter -
2202 NOT NEEDED IF NO PER-PROTOCOL CONGESTION MANAGEMENT */
2203 if (protocolWeightSum > 0) {
2204 for (protocol = 0;
2205 protocol < NUM_OF_PROTOCOLS; protocol++)
2206 /* credit for each period of the
2207 fairness algorithm - number of bytes in
2208 T_FAIR (the protocol share the vn rate) */
2209 m_fair_vn.protocol_credit_delta[protocol] =
2210 (u32)((vn_min_rate / 8) * t_fair *
2211 protocol_min_rate / protocolWeightSum);
2212 }
2213 } while (0);
2214#endif
2215
2216 /* Store it to internal memory */
2217 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2218 REG_WR(bp, BAR_XSTRORM_INTMEM +
2219 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2220 ((u32 *)(&m_rs_vn))[i]);
2221
2222 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2223 REG_WR(bp, BAR_XSTRORM_INTMEM +
2224 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2225 ((u32 *)(&m_fair_vn))[i]);
2226}
2227
c18487ee
YR
2228/* This function is called upon link interrupt */
2229static void bnx2x_link_attn(struct bnx2x *bp)
2230{
34f80b04
EG
2231 int vn;
2232
bb2a0f7a
YG
2233 /* Make sure that we are synced with the current statistics */
2234 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2235
4a37fb66 2236 bnx2x_acquire_phy_lock(bp);
c18487ee 2237 bnx2x_link_update(&bp->link_params, &bp->link_vars);
4a37fb66 2238 bnx2x_release_phy_lock(bp);
a2fbb9ea 2239
bb2a0f7a
YG
2240 if (bp->link_vars.link_up) {
2241
2242 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2243 struct host_port_stats *pstats;
2244
2245 pstats = bnx2x_sp(bp, port_stats);
2246 /* reset old bmac stats */
2247 memset(&(pstats->mac_stx[0]), 0,
2248 sizeof(struct mac_stx));
2249 }
2250 if ((bp->state == BNX2X_STATE_OPEN) ||
2251 (bp->state == BNX2X_STATE_DISABLED))
2252 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2253 }
2254
c18487ee
YR
2255 /* indicate link status */
2256 bnx2x_link_report(bp);
34f80b04
EG
2257
2258 if (IS_E1HMF(bp)) {
2259 int func;
2260
2261 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2262 if (vn == BP_E1HVN(bp))
2263 continue;
2264
2265 func = ((vn << 1) | BP_PORT(bp));
2266
2267 /* Set the attention towards other drivers
2268 on the same port */
2269 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2270 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2271 }
2272 }
2273
2274 if (CHIP_IS_E1H(bp) && (bp->link_vars.line_speed > 0)) {
2275 struct cmng_struct_per_port m_cmng_port;
2276 u32 wsum;
2277 int port = BP_PORT(bp);
2278
2279 /* Init RATE SHAPING and FAIRNESS contexts */
2280 wsum = bnx2x_calc_vn_wsum(bp);
2281 bnx2x_init_port_minmax(bp, (int)wsum,
2282 bp->link_vars.line_speed,
2283 &m_cmng_port);
2284 if (IS_E1HMF(bp))
2285 for (vn = VN_0; vn < E1HVN_MAX; vn++)
2286 bnx2x_init_vn_minmax(bp, 2*vn + port,
2287 wsum, bp->link_vars.line_speed,
2288 &m_cmng_port);
2289 }
c18487ee 2290}
a2fbb9ea 2291
c18487ee
YR
2292static void bnx2x__link_status_update(struct bnx2x *bp)
2293{
2294 if (bp->state != BNX2X_STATE_OPEN)
2295 return;
a2fbb9ea 2296
c18487ee 2297 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
a2fbb9ea 2298
bb2a0f7a
YG
2299 if (bp->link_vars.link_up)
2300 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2301 else
2302 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2303
c18487ee
YR
2304 /* indicate link status */
2305 bnx2x_link_report(bp);
a2fbb9ea 2306}
a2fbb9ea 2307
34f80b04
EG
2308static void bnx2x_pmf_update(struct bnx2x *bp)
2309{
2310 int port = BP_PORT(bp);
2311 u32 val;
2312
2313 bp->port.pmf = 1;
2314 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2315
2316 /* enable nig attention */
2317 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2318 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2319 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
bb2a0f7a
YG
2320
2321 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
34f80b04
EG
2322}
2323
c18487ee 2324/* end of Link */
a2fbb9ea
ET
2325
2326/* slow path */
2327
2328/*
2329 * General service functions
2330 */
2331
2332/* the slow path queue is odd since completions arrive on the fastpath ring */
2333static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2334 u32 data_hi, u32 data_lo, int common)
2335{
34f80b04 2336 int func = BP_FUNC(bp);
a2fbb9ea 2337
34f80b04
EG
2338 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2339 "SPQE (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
a2fbb9ea
ET
2340 (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2341 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2342 HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2343
2344#ifdef BNX2X_STOP_ON_ERROR
2345 if (unlikely(bp->panic))
2346 return -EIO;
2347#endif
2348
34f80b04 2349 spin_lock_bh(&bp->spq_lock);
a2fbb9ea
ET
2350
2351 if (!bp->spq_left) {
2352 BNX2X_ERR("BUG! SPQ ring full!\n");
34f80b04 2353 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
2354 bnx2x_panic();
2355 return -EBUSY;
2356 }
f1410647 2357
a2fbb9ea
ET
2358 /* CID needs port number to be encoded int it */
2359 bp->spq_prod_bd->hdr.conn_and_cmd_data =
2360 cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2361 HW_CID(bp, cid)));
2362 bp->spq_prod_bd->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2363 if (common)
2364 bp->spq_prod_bd->hdr.type |=
2365 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2366
2367 bp->spq_prod_bd->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2368 bp->spq_prod_bd->data.mac_config_addr.lo = cpu_to_le32(data_lo);
2369
2370 bp->spq_left--;
2371
2372 if (bp->spq_prod_bd == bp->spq_last_bd) {
2373 bp->spq_prod_bd = bp->spq;
2374 bp->spq_prod_idx = 0;
2375 DP(NETIF_MSG_TIMER, "end of spq\n");
2376
2377 } else {
2378 bp->spq_prod_bd++;
2379 bp->spq_prod_idx++;
2380 }
2381
34f80b04 2382 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
a2fbb9ea
ET
2383 bp->spq_prod_idx);
2384
34f80b04 2385 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
2386 return 0;
2387}
2388
2389/* acquire split MCP access lock register */
4a37fb66 2390static int bnx2x_acquire_alr(struct bnx2x *bp)
a2fbb9ea 2391{
a2fbb9ea 2392 u32 i, j, val;
34f80b04 2393 int rc = 0;
a2fbb9ea
ET
2394
2395 might_sleep();
2396 i = 100;
2397 for (j = 0; j < i*10; j++) {
2398 val = (1UL << 31);
2399 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2400 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2401 if (val & (1L << 31))
2402 break;
2403
2404 msleep(5);
2405 }
a2fbb9ea 2406 if (!(val & (1L << 31))) {
19680c48 2407 BNX2X_ERR("Cannot acquire MCP access lock register\n");
a2fbb9ea
ET
2408 rc = -EBUSY;
2409 }
2410
2411 return rc;
2412}
2413
4a37fb66
YG
2414/* release split MCP access lock register */
2415static void bnx2x_release_alr(struct bnx2x *bp)
a2fbb9ea
ET
2416{
2417 u32 val = 0;
2418
2419 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2420}
2421
2422static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2423{
2424 struct host_def_status_block *def_sb = bp->def_status_blk;
2425 u16 rc = 0;
2426
2427 barrier(); /* status block is written to by the chip */
a2fbb9ea
ET
2428 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2429 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2430 rc |= 1;
2431 }
2432 if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2433 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2434 rc |= 2;
2435 }
2436 if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2437 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2438 rc |= 4;
2439 }
2440 if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2441 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2442 rc |= 8;
2443 }
2444 if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2445 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2446 rc |= 16;
2447 }
2448 return rc;
2449}
2450
2451/*
2452 * slow path service functions
2453 */
2454
2455static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2456{
34f80b04 2457 int port = BP_PORT(bp);
5c862848
EG
2458 u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2459 COMMAND_REG_ATTN_BITS_SET);
a2fbb9ea
ET
2460 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2461 MISC_REG_AEU_MASK_ATTN_FUNC_0;
877e9aa4
ET
2462 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2463 NIG_REG_MASK_INTERRUPT_PORT0;
3fcaf2e5 2464 u32 aeu_mask;
a2fbb9ea 2465
a2fbb9ea
ET
2466 if (bp->attn_state & asserted)
2467 BNX2X_ERR("IGU ERROR\n");
2468
3fcaf2e5
EG
2469 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2470 aeu_mask = REG_RD(bp, aeu_addr);
2471
a2fbb9ea 2472 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
3fcaf2e5
EG
2473 aeu_mask, asserted);
2474 aeu_mask &= ~(asserted & 0xff);
2475 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
a2fbb9ea 2476
3fcaf2e5
EG
2477 REG_WR(bp, aeu_addr, aeu_mask);
2478 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
a2fbb9ea 2479
3fcaf2e5 2480 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
a2fbb9ea 2481 bp->attn_state |= asserted;
3fcaf2e5 2482 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
a2fbb9ea
ET
2483
2484 if (asserted & ATTN_HARD_WIRED_MASK) {
2485 if (asserted & ATTN_NIG_FOR_FUNC) {
a2fbb9ea 2486
877e9aa4
ET
2487 /* save nig interrupt mask */
2488 bp->nig_mask = REG_RD(bp, nig_int_mask_addr);
2489 REG_WR(bp, nig_int_mask_addr, 0);
a2fbb9ea 2490
c18487ee 2491 bnx2x_link_attn(bp);
a2fbb9ea
ET
2492
2493 /* handle unicore attn? */
2494 }
2495 if (asserted & ATTN_SW_TIMER_4_FUNC)
2496 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2497
2498 if (asserted & GPIO_2_FUNC)
2499 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2500
2501 if (asserted & GPIO_3_FUNC)
2502 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2503
2504 if (asserted & GPIO_4_FUNC)
2505 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2506
2507 if (port == 0) {
2508 if (asserted & ATTN_GENERAL_ATTN_1) {
2509 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2510 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2511 }
2512 if (asserted & ATTN_GENERAL_ATTN_2) {
2513 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2514 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2515 }
2516 if (asserted & ATTN_GENERAL_ATTN_3) {
2517 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2518 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2519 }
2520 } else {
2521 if (asserted & ATTN_GENERAL_ATTN_4) {
2522 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2523 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2524 }
2525 if (asserted & ATTN_GENERAL_ATTN_5) {
2526 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2527 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2528 }
2529 if (asserted & ATTN_GENERAL_ATTN_6) {
2530 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2531 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2532 }
2533 }
2534
2535 } /* if hardwired */
2536
5c862848
EG
2537 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2538 asserted, hc_addr);
2539 REG_WR(bp, hc_addr, asserted);
a2fbb9ea
ET
2540
2541 /* now set back the mask */
2542 if (asserted & ATTN_NIG_FOR_FUNC)
877e9aa4 2543 REG_WR(bp, nig_int_mask_addr, bp->nig_mask);
a2fbb9ea
ET
2544}
2545
877e9aa4 2546static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
a2fbb9ea 2547{
34f80b04 2548 int port = BP_PORT(bp);
877e9aa4
ET
2549 int reg_offset;
2550 u32 val;
2551
34f80b04
EG
2552 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2553 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
877e9aa4 2554
34f80b04 2555 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
877e9aa4
ET
2556
2557 val = REG_RD(bp, reg_offset);
2558 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2559 REG_WR(bp, reg_offset, val);
2560
2561 BNX2X_ERR("SPIO5 hw attention\n");
2562
34f80b04 2563 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
7add905f 2564 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1021G:
877e9aa4
ET
2565 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
2566 /* Fan failure attention */
2567
17de50b7 2568 /* The PHY reset is controlled by GPIO 1 */
877e9aa4 2569 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
17de50b7
EG
2570 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2571 /* Low power mode is controlled by GPIO 2 */
877e9aa4 2572 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
17de50b7 2573 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
877e9aa4 2574 /* mark the failure */
c18487ee 2575 bp->link_params.ext_phy_config &=
877e9aa4 2576 ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
c18487ee 2577 bp->link_params.ext_phy_config |=
877e9aa4
ET
2578 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2579 SHMEM_WR(bp,
2580 dev_info.port_hw_config[port].
2581 external_phy_config,
c18487ee 2582 bp->link_params.ext_phy_config);
877e9aa4
ET
2583 /* log the failure */
2584 printk(KERN_ERR PFX "Fan Failure on Network"
2585 " Controller %s has caused the driver to"
2586 " shutdown the card to prevent permanent"
2587 " damage. Please contact Dell Support for"
2588 " assistance\n", bp->dev->name);
2589 break;
2590
2591 default:
2592 break;
2593 }
2594 }
34f80b04
EG
2595
2596 if (attn & HW_INTERRUT_ASSERT_SET_0) {
2597
2598 val = REG_RD(bp, reg_offset);
2599 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2600 REG_WR(bp, reg_offset, val);
2601
2602 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2603 (attn & HW_INTERRUT_ASSERT_SET_0));
2604 bnx2x_panic();
2605 }
877e9aa4
ET
2606}
2607
2608static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2609{
2610 u32 val;
2611
2612 if (attn & BNX2X_DOORQ_ASSERT) {
2613
2614 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2615 BNX2X_ERR("DB hw attention 0x%x\n", val);
2616 /* DORQ discard attention */
2617 if (val & 0x2)
2618 BNX2X_ERR("FATAL error from DORQ\n");
2619 }
34f80b04
EG
2620
2621 if (attn & HW_INTERRUT_ASSERT_SET_1) {
2622
2623 int port = BP_PORT(bp);
2624 int reg_offset;
2625
2626 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2627 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2628
2629 val = REG_RD(bp, reg_offset);
2630 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2631 REG_WR(bp, reg_offset, val);
2632
2633 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2634 (attn & HW_INTERRUT_ASSERT_SET_1));
2635 bnx2x_panic();
2636 }
877e9aa4
ET
2637}
2638
2639static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2640{
2641 u32 val;
2642
2643 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2644
2645 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2646 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2647 /* CFC error attention */
2648 if (val & 0x2)
2649 BNX2X_ERR("FATAL error from CFC\n");
2650 }
2651
2652 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
2653
2654 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
2655 BNX2X_ERR("PXP hw attention 0x%x\n", val);
2656 /* RQ_USDMDP_FIFO_OVERFLOW */
2657 if (val & 0x18000)
2658 BNX2X_ERR("FATAL error from PXP\n");
2659 }
34f80b04
EG
2660
2661 if (attn & HW_INTERRUT_ASSERT_SET_2) {
2662
2663 int port = BP_PORT(bp);
2664 int reg_offset;
2665
2666 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
2667 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
2668
2669 val = REG_RD(bp, reg_offset);
2670 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
2671 REG_WR(bp, reg_offset, val);
2672
2673 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
2674 (attn & HW_INTERRUT_ASSERT_SET_2));
2675 bnx2x_panic();
2676 }
877e9aa4
ET
2677}
2678
2679static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
2680{
34f80b04
EG
2681 u32 val;
2682
877e9aa4
ET
2683 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
2684
34f80b04
EG
2685 if (attn & BNX2X_PMF_LINK_ASSERT) {
2686 int func = BP_FUNC(bp);
2687
2688 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
2689 bnx2x__link_status_update(bp);
2690 if (SHMEM_RD(bp, func_mb[func].drv_status) &
2691 DRV_STATUS_PMF)
2692 bnx2x_pmf_update(bp);
2693
2694 } else if (attn & BNX2X_MC_ASSERT_BITS) {
877e9aa4
ET
2695
2696 BNX2X_ERR("MC assert!\n");
2697 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
2698 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
2699 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
2700 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
2701 bnx2x_panic();
2702
2703 } else if (attn & BNX2X_MCP_ASSERT) {
2704
2705 BNX2X_ERR("MCP assert!\n");
2706 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
34f80b04 2707 bnx2x_fw_dump(bp);
877e9aa4
ET
2708
2709 } else
2710 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
2711 }
2712
2713 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
34f80b04
EG
2714 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
2715 if (attn & BNX2X_GRC_TIMEOUT) {
2716 val = CHIP_IS_E1H(bp) ?
2717 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
2718 BNX2X_ERR("GRC time-out 0x%08x\n", val);
2719 }
2720 if (attn & BNX2X_GRC_RSV) {
2721 val = CHIP_IS_E1H(bp) ?
2722 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
2723 BNX2X_ERR("GRC reserved 0x%08x\n", val);
2724 }
877e9aa4 2725 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
877e9aa4
ET
2726 }
2727}
2728
2729static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
2730{
a2fbb9ea
ET
2731 struct attn_route attn;
2732 struct attn_route group_mask;
34f80b04 2733 int port = BP_PORT(bp);
877e9aa4 2734 int index;
a2fbb9ea
ET
2735 u32 reg_addr;
2736 u32 val;
3fcaf2e5 2737 u32 aeu_mask;
a2fbb9ea
ET
2738
2739 /* need to take HW lock because MCP or other port might also
2740 try to handle this event */
4a37fb66 2741 bnx2x_acquire_alr(bp);
a2fbb9ea
ET
2742
2743 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
2744 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
2745 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
2746 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
34f80b04
EG
2747 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
2748 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
a2fbb9ea
ET
2749
2750 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
2751 if (deasserted & (1 << index)) {
2752 group_mask = bp->attn_group[index];
2753
34f80b04
EG
2754 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
2755 index, group_mask.sig[0], group_mask.sig[1],
2756 group_mask.sig[2], group_mask.sig[3]);
a2fbb9ea 2757
877e9aa4
ET
2758 bnx2x_attn_int_deasserted3(bp,
2759 attn.sig[3] & group_mask.sig[3]);
2760 bnx2x_attn_int_deasserted1(bp,
2761 attn.sig[1] & group_mask.sig[1]);
2762 bnx2x_attn_int_deasserted2(bp,
2763 attn.sig[2] & group_mask.sig[2]);
2764 bnx2x_attn_int_deasserted0(bp,
2765 attn.sig[0] & group_mask.sig[0]);
a2fbb9ea 2766
a2fbb9ea
ET
2767 if ((attn.sig[0] & group_mask.sig[0] &
2768 HW_PRTY_ASSERT_SET_0) ||
2769 (attn.sig[1] & group_mask.sig[1] &
2770 HW_PRTY_ASSERT_SET_1) ||
2771 (attn.sig[2] & group_mask.sig[2] &
2772 HW_PRTY_ASSERT_SET_2))
6378c025 2773 BNX2X_ERR("FATAL HW block parity attention\n");
a2fbb9ea
ET
2774 }
2775 }
2776
4a37fb66 2777 bnx2x_release_alr(bp);
a2fbb9ea 2778
5c862848 2779 reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
a2fbb9ea
ET
2780
2781 val = ~deasserted;
3fcaf2e5
EG
2782 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2783 val, reg_addr);
5c862848 2784 REG_WR(bp, reg_addr, val);
a2fbb9ea 2785
a2fbb9ea 2786 if (~bp->attn_state & deasserted)
3fcaf2e5 2787 BNX2X_ERR("IGU ERROR\n");
a2fbb9ea
ET
2788
2789 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2790 MISC_REG_AEU_MASK_ATTN_FUNC_0;
2791
3fcaf2e5
EG
2792 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2793 aeu_mask = REG_RD(bp, reg_addr);
2794
2795 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
2796 aeu_mask, deasserted);
2797 aeu_mask |= (deasserted & 0xff);
2798 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
a2fbb9ea 2799
3fcaf2e5
EG
2800 REG_WR(bp, reg_addr, aeu_mask);
2801 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
a2fbb9ea
ET
2802
2803 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2804 bp->attn_state &= ~deasserted;
2805 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2806}
2807
2808static void bnx2x_attn_int(struct bnx2x *bp)
2809{
2810 /* read local copy of bits */
2811 u32 attn_bits = bp->def_status_blk->atten_status_block.attn_bits;
2812 u32 attn_ack = bp->def_status_blk->atten_status_block.attn_bits_ack;
2813 u32 attn_state = bp->attn_state;
2814
2815 /* look for changed bits */
2816 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
2817 u32 deasserted = ~attn_bits & attn_ack & attn_state;
2818
2819 DP(NETIF_MSG_HW,
2820 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
2821 attn_bits, attn_ack, asserted, deasserted);
2822
2823 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
34f80b04 2824 BNX2X_ERR("BAD attention state\n");
a2fbb9ea
ET
2825
2826 /* handle bits that were raised */
2827 if (asserted)
2828 bnx2x_attn_int_asserted(bp, asserted);
2829
2830 if (deasserted)
2831 bnx2x_attn_int_deasserted(bp, deasserted);
2832}
2833
2834static void bnx2x_sp_task(struct work_struct *work)
2835{
1cf167f2 2836 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
a2fbb9ea
ET
2837 u16 status;
2838
34f80b04 2839
a2fbb9ea
ET
2840 /* Return here if interrupt is disabled */
2841 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3196a88a 2842 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
2843 return;
2844 }
2845
2846 status = bnx2x_update_dsb_idx(bp);
34f80b04
EG
2847/* if (status == 0) */
2848/* BNX2X_ERR("spurious slowpath interrupt!\n"); */
a2fbb9ea 2849
3196a88a 2850 DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
a2fbb9ea 2851
877e9aa4
ET
2852 /* HW attentions */
2853 if (status & 0x1)
a2fbb9ea 2854 bnx2x_attn_int(bp);
a2fbb9ea 2855
bb2a0f7a
YG
2856 /* CStorm events: query_stats, port delete ramrod */
2857 if (status & 0x2)
2858 bp->stats_pending = 0;
2859
a2fbb9ea
ET
2860 bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, bp->def_att_idx,
2861 IGU_INT_NOP, 1);
2862 bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
2863 IGU_INT_NOP, 1);
2864 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
2865 IGU_INT_NOP, 1);
2866 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
2867 IGU_INT_NOP, 1);
2868 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
2869 IGU_INT_ENABLE, 1);
877e9aa4 2870
a2fbb9ea
ET
2871}
2872
2873static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
2874{
2875 struct net_device *dev = dev_instance;
2876 struct bnx2x *bp = netdev_priv(dev);
2877
2878 /* Return here if interrupt is disabled */
2879 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3196a88a 2880 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
2881 return IRQ_HANDLED;
2882 }
2883
877e9aa4 2884 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, 0, IGU_INT_DISABLE, 0);
a2fbb9ea
ET
2885
2886#ifdef BNX2X_STOP_ON_ERROR
2887 if (unlikely(bp->panic))
2888 return IRQ_HANDLED;
2889#endif
2890
1cf167f2 2891 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
a2fbb9ea
ET
2892
2893 return IRQ_HANDLED;
2894}
2895
2896/* end of slow path */
2897
2898/* Statistics */
2899
2900/****************************************************************************
2901* Macros
2902****************************************************************************/
2903
a2fbb9ea
ET
2904/* sum[hi:lo] += add[hi:lo] */
2905#define ADD_64(s_hi, a_hi, s_lo, a_lo) \
2906 do { \
2907 s_lo += a_lo; \
2908 s_hi += a_hi + (s_lo < a_lo) ? 1 : 0; \
2909 } while (0)
2910
2911/* difference = minuend - subtrahend */
2912#define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
2913 do { \
bb2a0f7a
YG
2914 if (m_lo < s_lo) { \
2915 /* underflow */ \
a2fbb9ea 2916 d_hi = m_hi - s_hi; \
bb2a0f7a 2917 if (d_hi > 0) { \
6378c025 2918 /* we can 'loan' 1 */ \
a2fbb9ea
ET
2919 d_hi--; \
2920 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
bb2a0f7a 2921 } else { \
6378c025 2922 /* m_hi <= s_hi */ \
a2fbb9ea
ET
2923 d_hi = 0; \
2924 d_lo = 0; \
2925 } \
bb2a0f7a
YG
2926 } else { \
2927 /* m_lo >= s_lo */ \
a2fbb9ea 2928 if (m_hi < s_hi) { \
bb2a0f7a
YG
2929 d_hi = 0; \
2930 d_lo = 0; \
2931 } else { \
6378c025 2932 /* m_hi >= s_hi */ \
bb2a0f7a
YG
2933 d_hi = m_hi - s_hi; \
2934 d_lo = m_lo - s_lo; \
a2fbb9ea
ET
2935 } \
2936 } \
2937 } while (0)
2938
bb2a0f7a 2939#define UPDATE_STAT64(s, t) \
a2fbb9ea 2940 do { \
bb2a0f7a
YG
2941 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
2942 diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
2943 pstats->mac_stx[0].t##_hi = new->s##_hi; \
2944 pstats->mac_stx[0].t##_lo = new->s##_lo; \
2945 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
2946 pstats->mac_stx[1].t##_lo, diff.lo); \
a2fbb9ea
ET
2947 } while (0)
2948
bb2a0f7a 2949#define UPDATE_STAT64_NIG(s, t) \
a2fbb9ea 2950 do { \
bb2a0f7a
YG
2951 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
2952 diff.lo, new->s##_lo, old->s##_lo); \
2953 ADD_64(estats->t##_hi, diff.hi, \
2954 estats->t##_lo, diff.lo); \
a2fbb9ea
ET
2955 } while (0)
2956
2957/* sum[hi:lo] += add */
2958#define ADD_EXTEND_64(s_hi, s_lo, a) \
2959 do { \
2960 s_lo += a; \
2961 s_hi += (s_lo < a) ? 1 : 0; \
2962 } while (0)
2963
bb2a0f7a 2964#define UPDATE_EXTEND_STAT(s) \
a2fbb9ea 2965 do { \
bb2a0f7a
YG
2966 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
2967 pstats->mac_stx[1].s##_lo, \
2968 new->s); \
a2fbb9ea
ET
2969 } while (0)
2970
bb2a0f7a 2971#define UPDATE_EXTEND_TSTAT(s, t) \
a2fbb9ea
ET
2972 do { \
2973 diff = le32_to_cpu(tclient->s) - old_tclient->s; \
2974 old_tclient->s = le32_to_cpu(tclient->s); \
bb2a0f7a
YG
2975 ADD_EXTEND_64(fstats->t##_hi, fstats->t##_lo, diff); \
2976 } while (0)
2977
2978#define UPDATE_EXTEND_XSTAT(s, t) \
2979 do { \
2980 diff = le32_to_cpu(xclient->s) - old_xclient->s; \
2981 old_xclient->s = le32_to_cpu(xclient->s); \
2982 ADD_EXTEND_64(fstats->t##_hi, fstats->t##_lo, diff); \
a2fbb9ea
ET
2983 } while (0)
2984
2985/*
2986 * General service functions
2987 */
2988
2989static inline long bnx2x_hilo(u32 *hiref)
2990{
2991 u32 lo = *(hiref + 1);
2992#if (BITS_PER_LONG == 64)
2993 u32 hi = *hiref;
2994
2995 return HILO_U64(hi, lo);
2996#else
2997 return lo;
2998#endif
2999}
3000
3001/*
3002 * Init service functions
3003 */
3004
bb2a0f7a
YG
3005static void bnx2x_storm_stats_post(struct bnx2x *bp)
3006{
3007 if (!bp->stats_pending) {
3008 struct eth_query_ramrod_data ramrod_data = {0};
3009 int rc;
3010
3011 ramrod_data.drv_counter = bp->stats_counter++;
3012 ramrod_data.collect_port_1b = bp->port.pmf ? 1 : 0;
3013 ramrod_data.ctr_id_vector = (1 << BP_CL_ID(bp));
3014
3015 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3016 ((u32 *)&ramrod_data)[1],
3017 ((u32 *)&ramrod_data)[0], 0);
3018 if (rc == 0) {
3019 /* stats ramrod has it's own slot on the spq */
3020 bp->spq_left++;
3021 bp->stats_pending = 1;
3022 }
3023 }
3024}
3025
3026static void bnx2x_stats_init(struct bnx2x *bp)
3027{
3028 int port = BP_PORT(bp);
3029
3030 bp->executer_idx = 0;
3031 bp->stats_counter = 0;
3032
3033 /* port stats */
3034 if (!BP_NOMCP(bp))
3035 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
3036 else
3037 bp->port.port_stx = 0;
3038 DP(BNX2X_MSG_STATS, "port_stx 0x%x\n", bp->port.port_stx);
3039
3040 memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
3041 bp->port.old_nig_stats.brb_discard =
3042 REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
66e855f3
YG
3043 bp->port.old_nig_stats.brb_truncate =
3044 REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
bb2a0f7a
YG
3045 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
3046 &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
3047 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
3048 &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
3049
3050 /* function stats */
3051 memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
3052 memset(&bp->old_tclient, 0, sizeof(struct tstorm_per_client_stats));
3053 memset(&bp->old_xclient, 0, sizeof(struct xstorm_per_client_stats));
3054 memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
3055
3056 bp->stats_state = STATS_STATE_DISABLED;
3057 if (IS_E1HMF(bp) && bp->port.pmf && bp->port.port_stx)
3058 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
3059}
3060
3061static void bnx2x_hw_stats_post(struct bnx2x *bp)
3062{
3063 struct dmae_command *dmae = &bp->stats_dmae;
3064 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3065
3066 *stats_comp = DMAE_COMP_VAL;
3067
3068 /* loader */
3069 if (bp->executer_idx) {
3070 int loader_idx = PMF_DMAE_C(bp);
3071
3072 memset(dmae, 0, sizeof(struct dmae_command));
3073
3074 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3075 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3076 DMAE_CMD_DST_RESET |
3077#ifdef __BIG_ENDIAN
3078 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3079#else
3080 DMAE_CMD_ENDIANITY_DW_SWAP |
3081#endif
3082 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3083 DMAE_CMD_PORT_0) |
3084 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3085 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3086 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3087 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3088 sizeof(struct dmae_command) *
3089 (loader_idx + 1)) >> 2;
3090 dmae->dst_addr_hi = 0;
3091 dmae->len = sizeof(struct dmae_command) >> 2;
3092 if (CHIP_IS_E1(bp))
3093 dmae->len--;
3094 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3095 dmae->comp_addr_hi = 0;
3096 dmae->comp_val = 1;
3097
3098 *stats_comp = 0;
3099 bnx2x_post_dmae(bp, dmae, loader_idx);
3100
3101 } else if (bp->func_stx) {
3102 *stats_comp = 0;
3103 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3104 }
3105}
3106
3107static int bnx2x_stats_comp(struct bnx2x *bp)
3108{
3109 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3110 int cnt = 10;
3111
3112 might_sleep();
3113 while (*stats_comp != DMAE_COMP_VAL) {
bb2a0f7a
YG
3114 if (!cnt) {
3115 BNX2X_ERR("timeout waiting for stats finished\n");
3116 break;
3117 }
3118 cnt--;
12469401 3119 msleep(1);
bb2a0f7a
YG
3120 }
3121 return 1;
3122}
3123
3124/*
3125 * Statistics service functions
3126 */
3127
3128static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3129{
3130 struct dmae_command *dmae;
3131 u32 opcode;
3132 int loader_idx = PMF_DMAE_C(bp);
3133 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3134
3135 /* sanity */
3136 if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3137 BNX2X_ERR("BUG!\n");
3138 return;
3139 }
3140
3141 bp->executer_idx = 0;
3142
3143 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3144 DMAE_CMD_C_ENABLE |
3145 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3146#ifdef __BIG_ENDIAN
3147 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3148#else
3149 DMAE_CMD_ENDIANITY_DW_SWAP |
3150#endif
3151 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3152 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3153
3154 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3155 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3156 dmae->src_addr_lo = bp->port.port_stx >> 2;
3157 dmae->src_addr_hi = 0;
3158 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3159 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3160 dmae->len = DMAE_LEN32_RD_MAX;
3161 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3162 dmae->comp_addr_hi = 0;
3163 dmae->comp_val = 1;
3164
3165 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3166 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3167 dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3168 dmae->src_addr_hi = 0;
7a9b2557
VZ
3169 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3170 DMAE_LEN32_RD_MAX * 4);
3171 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3172 DMAE_LEN32_RD_MAX * 4);
bb2a0f7a
YG
3173 dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3174 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3175 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3176 dmae->comp_val = DMAE_COMP_VAL;
3177
3178 *stats_comp = 0;
3179 bnx2x_hw_stats_post(bp);
3180 bnx2x_stats_comp(bp);
3181}
3182
3183static void bnx2x_port_stats_init(struct bnx2x *bp)
a2fbb9ea
ET
3184{
3185 struct dmae_command *dmae;
34f80b04 3186 int port = BP_PORT(bp);
bb2a0f7a 3187 int vn = BP_E1HVN(bp);
a2fbb9ea 3188 u32 opcode;
bb2a0f7a 3189 int loader_idx = PMF_DMAE_C(bp);
a2fbb9ea 3190 u32 mac_addr;
bb2a0f7a
YG
3191 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3192
3193 /* sanity */
3194 if (!bp->link_vars.link_up || !bp->port.pmf) {
3195 BNX2X_ERR("BUG!\n");
3196 return;
3197 }
a2fbb9ea
ET
3198
3199 bp->executer_idx = 0;
bb2a0f7a
YG
3200
3201 /* MCP */
3202 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3203 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3204 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 3205#ifdef __BIG_ENDIAN
bb2a0f7a 3206 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 3207#else
bb2a0f7a 3208 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 3209#endif
bb2a0f7a
YG
3210 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3211 (vn << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea 3212
bb2a0f7a 3213 if (bp->port.port_stx) {
a2fbb9ea
ET
3214
3215 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3216 dmae->opcode = opcode;
bb2a0f7a
YG
3217 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3218 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3219 dmae->dst_addr_lo = bp->port.port_stx >> 2;
a2fbb9ea 3220 dmae->dst_addr_hi = 0;
bb2a0f7a
YG
3221 dmae->len = sizeof(struct host_port_stats) >> 2;
3222 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3223 dmae->comp_addr_hi = 0;
3224 dmae->comp_val = 1;
a2fbb9ea
ET
3225 }
3226
bb2a0f7a
YG
3227 if (bp->func_stx) {
3228
3229 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3230 dmae->opcode = opcode;
3231 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3232 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3233 dmae->dst_addr_lo = bp->func_stx >> 2;
3234 dmae->dst_addr_hi = 0;
3235 dmae->len = sizeof(struct host_func_stats) >> 2;
3236 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3237 dmae->comp_addr_hi = 0;
3238 dmae->comp_val = 1;
a2fbb9ea
ET
3239 }
3240
bb2a0f7a 3241 /* MAC */
a2fbb9ea
ET
3242 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3243 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3244 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3245#ifdef __BIG_ENDIAN
3246 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3247#else
3248 DMAE_CMD_ENDIANITY_DW_SWAP |
3249#endif
bb2a0f7a
YG
3250 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3251 (vn << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea 3252
c18487ee 3253 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
a2fbb9ea
ET
3254
3255 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3256 NIG_REG_INGRESS_BMAC0_MEM);
3257
3258 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3259 BIGMAC_REGISTER_TX_STAT_GTBYT */
3260 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3261 dmae->opcode = opcode;
3262 dmae->src_addr_lo = (mac_addr +
3263 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3264 dmae->src_addr_hi = 0;
3265 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3266 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3267 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3268 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3269 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3270 dmae->comp_addr_hi = 0;
3271 dmae->comp_val = 1;
3272
3273 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3274 BIGMAC_REGISTER_RX_STAT_GRIPJ */
3275 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3276 dmae->opcode = opcode;
3277 dmae->src_addr_lo = (mac_addr +
3278 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3279 dmae->src_addr_hi = 0;
3280 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3281 offsetof(struct bmac_stats, rx_stat_gr64_lo));
a2fbb9ea 3282 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3283 offsetof(struct bmac_stats, rx_stat_gr64_lo));
a2fbb9ea
ET
3284 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3285 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3286 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3287 dmae->comp_addr_hi = 0;
3288 dmae->comp_val = 1;
3289
c18487ee 3290 } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
a2fbb9ea
ET
3291
3292 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3293
3294 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3295 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3296 dmae->opcode = opcode;
3297 dmae->src_addr_lo = (mac_addr +
3298 EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3299 dmae->src_addr_hi = 0;
3300 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3301 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3302 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3303 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3304 dmae->comp_addr_hi = 0;
3305 dmae->comp_val = 1;
3306
3307 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3308 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3309 dmae->opcode = opcode;
3310 dmae->src_addr_lo = (mac_addr +
3311 EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3312 dmae->src_addr_hi = 0;
3313 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3314 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
a2fbb9ea 3315 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3316 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
a2fbb9ea
ET
3317 dmae->len = 1;
3318 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3319 dmae->comp_addr_hi = 0;
3320 dmae->comp_val = 1;
3321
3322 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3323 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3324 dmae->opcode = opcode;
3325 dmae->src_addr_lo = (mac_addr +
3326 EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3327 dmae->src_addr_hi = 0;
3328 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3329 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
a2fbb9ea 3330 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3331 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
a2fbb9ea
ET
3332 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3333 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3334 dmae->comp_addr_hi = 0;
3335 dmae->comp_val = 1;
3336 }
3337
3338 /* NIG */
bb2a0f7a
YG
3339 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3340 dmae->opcode = opcode;
3341 dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3342 NIG_REG_STAT0_BRB_DISCARD) >> 2;
3343 dmae->src_addr_hi = 0;
3344 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3345 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3346 dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3347 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3348 dmae->comp_addr_hi = 0;
3349 dmae->comp_val = 1;
3350
3351 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3352 dmae->opcode = opcode;
3353 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3354 NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3355 dmae->src_addr_hi = 0;
3356 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3357 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3358 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3359 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3360 dmae->len = (2*sizeof(u32)) >> 2;
3361 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3362 dmae->comp_addr_hi = 0;
3363 dmae->comp_val = 1;
3364
a2fbb9ea
ET
3365 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3366 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3367 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3368 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3369#ifdef __BIG_ENDIAN
3370 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3371#else
3372 DMAE_CMD_ENDIANITY_DW_SWAP |
3373#endif
bb2a0f7a
YG
3374 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3375 (vn << DMAE_CMD_E1HVN_SHIFT));
3376 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3377 NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
a2fbb9ea 3378 dmae->src_addr_hi = 0;
bb2a0f7a
YG
3379 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3380 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3381 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3382 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3383 dmae->len = (2*sizeof(u32)) >> 2;
3384 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3385 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3386 dmae->comp_val = DMAE_COMP_VAL;
3387
3388 *stats_comp = 0;
a2fbb9ea
ET
3389}
3390
bb2a0f7a 3391static void bnx2x_func_stats_init(struct bnx2x *bp)
a2fbb9ea 3392{
bb2a0f7a
YG
3393 struct dmae_command *dmae = &bp->stats_dmae;
3394 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 3395
bb2a0f7a
YG
3396 /* sanity */
3397 if (!bp->func_stx) {
3398 BNX2X_ERR("BUG!\n");
3399 return;
3400 }
a2fbb9ea 3401
bb2a0f7a
YG
3402 bp->executer_idx = 0;
3403 memset(dmae, 0, sizeof(struct dmae_command));
a2fbb9ea 3404
bb2a0f7a
YG
3405 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3406 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3407 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3408#ifdef __BIG_ENDIAN
3409 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3410#else
3411 DMAE_CMD_ENDIANITY_DW_SWAP |
3412#endif
3413 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3414 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3415 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3416 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3417 dmae->dst_addr_lo = bp->func_stx >> 2;
3418 dmae->dst_addr_hi = 0;
3419 dmae->len = sizeof(struct host_func_stats) >> 2;
3420 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3421 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3422 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 3423
bb2a0f7a
YG
3424 *stats_comp = 0;
3425}
a2fbb9ea 3426
bb2a0f7a
YG
3427static void bnx2x_stats_start(struct bnx2x *bp)
3428{
3429 if (bp->port.pmf)
3430 bnx2x_port_stats_init(bp);
3431
3432 else if (bp->func_stx)
3433 bnx2x_func_stats_init(bp);
3434
3435 bnx2x_hw_stats_post(bp);
3436 bnx2x_storm_stats_post(bp);
3437}
3438
3439static void bnx2x_stats_pmf_start(struct bnx2x *bp)
3440{
3441 bnx2x_stats_comp(bp);
3442 bnx2x_stats_pmf_update(bp);
3443 bnx2x_stats_start(bp);
3444}
3445
3446static void bnx2x_stats_restart(struct bnx2x *bp)
3447{
3448 bnx2x_stats_comp(bp);
3449 bnx2x_stats_start(bp);
3450}
3451
3452static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3453{
3454 struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3455 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3456 struct regpair diff;
3457
3458 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3459 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3460 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3461 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3462 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3463 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
66e855f3 3464 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
bb2a0f7a
YG
3465 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
3466 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffpauseframesreceived);
3467 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3468 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3469 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3470 UPDATE_STAT64(tx_stat_gt127,
3471 tx_stat_etherstatspkts65octetsto127octets);
3472 UPDATE_STAT64(tx_stat_gt255,
3473 tx_stat_etherstatspkts128octetsto255octets);
3474 UPDATE_STAT64(tx_stat_gt511,
3475 tx_stat_etherstatspkts256octetsto511octets);
3476 UPDATE_STAT64(tx_stat_gt1023,
3477 tx_stat_etherstatspkts512octetsto1023octets);
3478 UPDATE_STAT64(tx_stat_gt1518,
3479 tx_stat_etherstatspkts1024octetsto1522octets);
3480 UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3481 UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3482 UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3483 UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3484 UPDATE_STAT64(tx_stat_gterr,
3485 tx_stat_dot3statsinternalmactransmiterrors);
3486 UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
3487}
3488
3489static void bnx2x_emac_stats_update(struct bnx2x *bp)
3490{
3491 struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3492 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3493
3494 UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3495 UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3496 UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3497 UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3498 UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3499 UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3500 UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3501 UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3502 UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3503 UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3504 UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3505 UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3506 UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3507 UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3508 UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3509 UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3510 UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3511 UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3512 UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3513 UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3514 UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3515 UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3516 UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3517 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3518 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3519 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3520 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3521 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3522 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3523 UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3524 UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
3525}
3526
3527static int bnx2x_hw_stats_update(struct bnx2x *bp)
3528{
3529 struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3530 struct nig_stats *old = &(bp->port.old_nig_stats);
3531 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3532 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3533 struct regpair diff;
3534
3535 if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3536 bnx2x_bmac_stats_update(bp);
3537
3538 else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3539 bnx2x_emac_stats_update(bp);
3540
3541 else { /* unreached */
3542 BNX2X_ERR("stats updated by dmae but no MAC active\n");
3543 return -1;
3544 }
a2fbb9ea 3545
bb2a0f7a
YG
3546 ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3547 new->brb_discard - old->brb_discard);
66e855f3
YG
3548 ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3549 new->brb_truncate - old->brb_truncate);
a2fbb9ea 3550
bb2a0f7a
YG
3551 UPDATE_STAT64_NIG(egress_mac_pkt0,
3552 etherstatspkts1024octetsto1522octets);
3553 UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
a2fbb9ea 3554
bb2a0f7a 3555 memcpy(old, new, sizeof(struct nig_stats));
a2fbb9ea 3556
bb2a0f7a
YG
3557 memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3558 sizeof(struct mac_stx));
3559 estats->brb_drop_hi = pstats->brb_drop_hi;
3560 estats->brb_drop_lo = pstats->brb_drop_lo;
a2fbb9ea 3561
bb2a0f7a 3562 pstats->host_port_stats_start = ++pstats->host_port_stats_end;
a2fbb9ea 3563
bb2a0f7a 3564 return 0;
a2fbb9ea
ET
3565}
3566
bb2a0f7a 3567static int bnx2x_storm_stats_update(struct bnx2x *bp)
a2fbb9ea
ET
3568{
3569 struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
bb2a0f7a
YG
3570 int cl_id = BP_CL_ID(bp);
3571 struct tstorm_per_port_stats *tport =
3572 &stats->tstorm_common.port_statistics;
a2fbb9ea 3573 struct tstorm_per_client_stats *tclient =
bb2a0f7a 3574 &stats->tstorm_common.client_statistics[cl_id];
a2fbb9ea 3575 struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
bb2a0f7a
YG
3576 struct xstorm_per_client_stats *xclient =
3577 &stats->xstorm_common.client_statistics[cl_id];
3578 struct xstorm_per_client_stats *old_xclient = &bp->old_xclient;
3579 struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
3580 struct bnx2x_eth_stats *estats = &bp->eth_stats;
a2fbb9ea
ET
3581 u32 diff;
3582
bb2a0f7a
YG
3583 /* are storm stats valid? */
3584 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
3585 bp->stats_counter) {
3586 DP(BNX2X_MSG_STATS, "stats not updated by tstorm"
3587 " tstorm counter (%d) != stats_counter (%d)\n",
3588 tclient->stats_counter, bp->stats_counter);
a2fbb9ea
ET
3589 return -1;
3590 }
bb2a0f7a
YG
3591 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
3592 bp->stats_counter) {
3593 DP(BNX2X_MSG_STATS, "stats not updated by xstorm"
3594 " xstorm counter (%d) != stats_counter (%d)\n",
3595 xclient->stats_counter, bp->stats_counter);
a2fbb9ea
ET
3596 return -2;
3597 }
a2fbb9ea 3598
bb2a0f7a
YG
3599 fstats->total_bytes_received_hi =
3600 fstats->valid_bytes_received_hi =
a2fbb9ea 3601 le32_to_cpu(tclient->total_rcv_bytes.hi);
bb2a0f7a
YG
3602 fstats->total_bytes_received_lo =
3603 fstats->valid_bytes_received_lo =
a2fbb9ea 3604 le32_to_cpu(tclient->total_rcv_bytes.lo);
bb2a0f7a
YG
3605
3606 estats->error_bytes_received_hi =
3607 le32_to_cpu(tclient->rcv_error_bytes.hi);
3608 estats->error_bytes_received_lo =
3609 le32_to_cpu(tclient->rcv_error_bytes.lo);
3610 ADD_64(estats->error_bytes_received_hi,
3611 estats->rx_stat_ifhcinbadoctets_hi,
3612 estats->error_bytes_received_lo,
3613 estats->rx_stat_ifhcinbadoctets_lo);
3614
3615 ADD_64(fstats->total_bytes_received_hi,
3616 estats->error_bytes_received_hi,
3617 fstats->total_bytes_received_lo,
3618 estats->error_bytes_received_lo);
3619
3620 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts, total_unicast_packets_received);
a2fbb9ea 3621 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
bb2a0f7a 3622 total_multicast_packets_received);
a2fbb9ea 3623 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
bb2a0f7a
YG
3624 total_broadcast_packets_received);
3625
3626 fstats->total_bytes_transmitted_hi =
3627 le32_to_cpu(xclient->total_sent_bytes.hi);
3628 fstats->total_bytes_transmitted_lo =
3629 le32_to_cpu(xclient->total_sent_bytes.lo);
3630
3631 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
3632 total_unicast_packets_transmitted);
3633 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
3634 total_multicast_packets_transmitted);
3635 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
3636 total_broadcast_packets_transmitted);
3637
3638 memcpy(estats, &(fstats->total_bytes_received_hi),
3639 sizeof(struct host_func_stats) - 2*sizeof(u32));
3640
3641 estats->mac_filter_discard = le32_to_cpu(tport->mac_filter_discard);
3642 estats->xxoverflow_discard = le32_to_cpu(tport->xxoverflow_discard);
3643 estats->brb_truncate_discard =
3644 le32_to_cpu(tport->brb_truncate_discard);
3645 estats->mac_discard = le32_to_cpu(tport->mac_discard);
3646
3647 old_tclient->rcv_unicast_bytes.hi =
a2fbb9ea 3648 le32_to_cpu(tclient->rcv_unicast_bytes.hi);
bb2a0f7a 3649 old_tclient->rcv_unicast_bytes.lo =
a2fbb9ea 3650 le32_to_cpu(tclient->rcv_unicast_bytes.lo);
bb2a0f7a 3651 old_tclient->rcv_broadcast_bytes.hi =
a2fbb9ea 3652 le32_to_cpu(tclient->rcv_broadcast_bytes.hi);
bb2a0f7a 3653 old_tclient->rcv_broadcast_bytes.lo =
a2fbb9ea 3654 le32_to_cpu(tclient->rcv_broadcast_bytes.lo);
bb2a0f7a 3655 old_tclient->rcv_multicast_bytes.hi =
a2fbb9ea 3656 le32_to_cpu(tclient->rcv_multicast_bytes.hi);
bb2a0f7a 3657 old_tclient->rcv_multicast_bytes.lo =
a2fbb9ea 3658 le32_to_cpu(tclient->rcv_multicast_bytes.lo);
bb2a0f7a 3659 old_tclient->total_rcv_pkts = le32_to_cpu(tclient->total_rcv_pkts);
a2fbb9ea 3660
bb2a0f7a
YG
3661 old_tclient->checksum_discard = le32_to_cpu(tclient->checksum_discard);
3662 old_tclient->packets_too_big_discard =
a2fbb9ea 3663 le32_to_cpu(tclient->packets_too_big_discard);
bb2a0f7a
YG
3664 estats->no_buff_discard =
3665 old_tclient->no_buff_discard = le32_to_cpu(tclient->no_buff_discard);
3666 old_tclient->ttl0_discard = le32_to_cpu(tclient->ttl0_discard);
3667
3668 old_xclient->total_sent_pkts = le32_to_cpu(xclient->total_sent_pkts);
3669 old_xclient->unicast_bytes_sent.hi =
3670 le32_to_cpu(xclient->unicast_bytes_sent.hi);
3671 old_xclient->unicast_bytes_sent.lo =
3672 le32_to_cpu(xclient->unicast_bytes_sent.lo);
3673 old_xclient->multicast_bytes_sent.hi =
3674 le32_to_cpu(xclient->multicast_bytes_sent.hi);
3675 old_xclient->multicast_bytes_sent.lo =
3676 le32_to_cpu(xclient->multicast_bytes_sent.lo);
3677 old_xclient->broadcast_bytes_sent.hi =
3678 le32_to_cpu(xclient->broadcast_bytes_sent.hi);
3679 old_xclient->broadcast_bytes_sent.lo =
3680 le32_to_cpu(xclient->broadcast_bytes_sent.lo);
3681
3682 fstats->host_func_stats_start = ++fstats->host_func_stats_end;
a2fbb9ea
ET
3683
3684 return 0;
3685}
3686
bb2a0f7a 3687static void bnx2x_net_stats_update(struct bnx2x *bp)
a2fbb9ea 3688{
bb2a0f7a
YG
3689 struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
3690 struct bnx2x_eth_stats *estats = &bp->eth_stats;
a2fbb9ea
ET
3691 struct net_device_stats *nstats = &bp->dev->stats;
3692
3693 nstats->rx_packets =
3694 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
3695 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
3696 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
3697
3698 nstats->tx_packets =
3699 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
3700 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
3701 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
3702
bb2a0f7a 3703 nstats->rx_bytes = bnx2x_hilo(&estats->valid_bytes_received_hi);
a2fbb9ea 3704
0e39e645 3705 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
a2fbb9ea 3706
bb2a0f7a
YG
3707 nstats->rx_dropped = old_tclient->checksum_discard +
3708 estats->mac_discard;
a2fbb9ea
ET
3709 nstats->tx_dropped = 0;
3710
3711 nstats->multicast =
3712 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi);
3713
bb2a0f7a
YG
3714 nstats->collisions =
3715 estats->tx_stat_dot3statssinglecollisionframes_lo +
3716 estats->tx_stat_dot3statsmultiplecollisionframes_lo +
3717 estats->tx_stat_dot3statslatecollisions_lo +
3718 estats->tx_stat_dot3statsexcessivecollisions_lo;
a2fbb9ea 3719
bb2a0f7a
YG
3720 estats->jabber_packets_received =
3721 old_tclient->packets_too_big_discard +
3722 estats->rx_stat_dot3statsframestoolong_lo;
3723
3724 nstats->rx_length_errors =
3725 estats->rx_stat_etherstatsundersizepkts_lo +
3726 estats->jabber_packets_received;
66e855f3 3727 nstats->rx_over_errors = estats->brb_drop_lo + estats->brb_truncate_lo;
bb2a0f7a
YG
3728 nstats->rx_crc_errors = estats->rx_stat_dot3statsfcserrors_lo;
3729 nstats->rx_frame_errors = estats->rx_stat_dot3statsalignmenterrors_lo;
3730 nstats->rx_fifo_errors = old_tclient->no_buff_discard;
a2fbb9ea
ET
3731 nstats->rx_missed_errors = estats->xxoverflow_discard;
3732
3733 nstats->rx_errors = nstats->rx_length_errors +
3734 nstats->rx_over_errors +
3735 nstats->rx_crc_errors +
3736 nstats->rx_frame_errors +
0e39e645
ET
3737 nstats->rx_fifo_errors +
3738 nstats->rx_missed_errors;
a2fbb9ea 3739
bb2a0f7a
YG
3740 nstats->tx_aborted_errors =
3741 estats->tx_stat_dot3statslatecollisions_lo +
3742 estats->tx_stat_dot3statsexcessivecollisions_lo;
3743 nstats->tx_carrier_errors = estats->rx_stat_falsecarriererrors_lo;
a2fbb9ea
ET
3744 nstats->tx_fifo_errors = 0;
3745 nstats->tx_heartbeat_errors = 0;
3746 nstats->tx_window_errors = 0;
3747
3748 nstats->tx_errors = nstats->tx_aborted_errors +
3749 nstats->tx_carrier_errors;
a2fbb9ea
ET
3750}
3751
bb2a0f7a 3752static void bnx2x_stats_update(struct bnx2x *bp)
a2fbb9ea 3753{
bb2a0f7a
YG
3754 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3755 int update = 0;
a2fbb9ea 3756
bb2a0f7a
YG
3757 if (*stats_comp != DMAE_COMP_VAL)
3758 return;
3759
3760 if (bp->port.pmf)
3761 update = (bnx2x_hw_stats_update(bp) == 0);
a2fbb9ea 3762
bb2a0f7a 3763 update |= (bnx2x_storm_stats_update(bp) == 0);
a2fbb9ea 3764
bb2a0f7a
YG
3765 if (update)
3766 bnx2x_net_stats_update(bp);
a2fbb9ea 3767
bb2a0f7a
YG
3768 else {
3769 if (bp->stats_pending) {
3770 bp->stats_pending++;
3771 if (bp->stats_pending == 3) {
3772 BNX2X_ERR("stats not updated for 3 times\n");
3773 bnx2x_panic();
3774 return;
3775 }
3776 }
a2fbb9ea
ET
3777 }
3778
3779 if (bp->msglevel & NETIF_MSG_TIMER) {
bb2a0f7a
YG
3780 struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
3781 struct bnx2x_eth_stats *estats = &bp->eth_stats;
a2fbb9ea 3782 struct net_device_stats *nstats = &bp->dev->stats;
34f80b04 3783 int i;
a2fbb9ea
ET
3784
3785 printk(KERN_DEBUG "%s:\n", bp->dev->name);
3786 printk(KERN_DEBUG " tx avail (%4x) tx hc idx (%x)"
3787 " tx pkt (%lx)\n",
3788 bnx2x_tx_avail(bp->fp),
7a9b2557 3789 le16_to_cpu(*bp->fp->tx_cons_sb), nstats->tx_packets);
a2fbb9ea
ET
3790 printk(KERN_DEBUG " rx usage (%4x) rx hc idx (%x)"
3791 " rx pkt (%lx)\n",
7a9b2557
VZ
3792 (u16)(le16_to_cpu(*bp->fp->rx_cons_sb) -
3793 bp->fp->rx_comp_cons),
3794 le16_to_cpu(*bp->fp->rx_cons_sb), nstats->rx_packets);
a2fbb9ea 3795 printk(KERN_DEBUG " %s (Xoff events %u) brb drops %u\n",
6378c025 3796 netif_queue_stopped(bp->dev) ? "Xoff" : "Xon",
bb2a0f7a 3797 estats->driver_xoff, estats->brb_drop_lo);
a2fbb9ea
ET
3798 printk(KERN_DEBUG "tstats: checksum_discard %u "
3799 "packets_too_big_discard %u no_buff_discard %u "
3800 "mac_discard %u mac_filter_discard %u "
3801 "xxovrflow_discard %u brb_truncate_discard %u "
3802 "ttl0_discard %u\n",
bb2a0f7a
YG
3803 old_tclient->checksum_discard,
3804 old_tclient->packets_too_big_discard,
3805 old_tclient->no_buff_discard, estats->mac_discard,
a2fbb9ea 3806 estats->mac_filter_discard, estats->xxoverflow_discard,
bb2a0f7a
YG
3807 estats->brb_truncate_discard,
3808 old_tclient->ttl0_discard);
a2fbb9ea
ET
3809
3810 for_each_queue(bp, i) {
3811 printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
3812 bnx2x_fp(bp, i, tx_pkt),
3813 bnx2x_fp(bp, i, rx_pkt),
3814 bnx2x_fp(bp, i, rx_calls));
3815 }
3816 }
3817
bb2a0f7a
YG
3818 bnx2x_hw_stats_post(bp);
3819 bnx2x_storm_stats_post(bp);
3820}
a2fbb9ea 3821
bb2a0f7a
YG
3822static void bnx2x_port_stats_stop(struct bnx2x *bp)
3823{
3824 struct dmae_command *dmae;
3825 u32 opcode;
3826 int loader_idx = PMF_DMAE_C(bp);
3827 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 3828
bb2a0f7a 3829 bp->executer_idx = 0;
a2fbb9ea 3830
bb2a0f7a
YG
3831 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3832 DMAE_CMD_C_ENABLE |
3833 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 3834#ifdef __BIG_ENDIAN
bb2a0f7a 3835 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 3836#else
bb2a0f7a 3837 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 3838#endif
bb2a0f7a
YG
3839 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3840 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3841
3842 if (bp->port.port_stx) {
3843
3844 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3845 if (bp->func_stx)
3846 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3847 else
3848 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3849 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3850 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3851 dmae->dst_addr_lo = bp->port.port_stx >> 2;
a2fbb9ea 3852 dmae->dst_addr_hi = 0;
bb2a0f7a
YG
3853 dmae->len = sizeof(struct host_port_stats) >> 2;
3854 if (bp->func_stx) {
3855 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3856 dmae->comp_addr_hi = 0;
3857 dmae->comp_val = 1;
3858 } else {
3859 dmae->comp_addr_lo =
3860 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3861 dmae->comp_addr_hi =
3862 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3863 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 3864
bb2a0f7a
YG
3865 *stats_comp = 0;
3866 }
a2fbb9ea
ET
3867 }
3868
bb2a0f7a
YG
3869 if (bp->func_stx) {
3870
3871 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3872 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3873 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3874 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3875 dmae->dst_addr_lo = bp->func_stx >> 2;
3876 dmae->dst_addr_hi = 0;
3877 dmae->len = sizeof(struct host_func_stats) >> 2;
3878 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3879 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3880 dmae->comp_val = DMAE_COMP_VAL;
3881
3882 *stats_comp = 0;
a2fbb9ea 3883 }
bb2a0f7a
YG
3884}
3885
3886static void bnx2x_stats_stop(struct bnx2x *bp)
3887{
3888 int update = 0;
3889
3890 bnx2x_stats_comp(bp);
3891
3892 if (bp->port.pmf)
3893 update = (bnx2x_hw_stats_update(bp) == 0);
3894
3895 update |= (bnx2x_storm_stats_update(bp) == 0);
3896
3897 if (update) {
3898 bnx2x_net_stats_update(bp);
a2fbb9ea 3899
bb2a0f7a
YG
3900 if (bp->port.pmf)
3901 bnx2x_port_stats_stop(bp);
3902
3903 bnx2x_hw_stats_post(bp);
3904 bnx2x_stats_comp(bp);
a2fbb9ea
ET
3905 }
3906}
3907
bb2a0f7a
YG
3908static void bnx2x_stats_do_nothing(struct bnx2x *bp)
3909{
3910}
3911
3912static const struct {
3913 void (*action)(struct bnx2x *bp);
3914 enum bnx2x_stats_state next_state;
3915} bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
3916/* state event */
3917{
3918/* DISABLED PMF */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
3919/* LINK_UP */ {bnx2x_stats_start, STATS_STATE_ENABLED},
3920/* UPDATE */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
3921/* STOP */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
3922},
3923{
3924/* ENABLED PMF */ {bnx2x_stats_pmf_start, STATS_STATE_ENABLED},
3925/* LINK_UP */ {bnx2x_stats_restart, STATS_STATE_ENABLED},
3926/* UPDATE */ {bnx2x_stats_update, STATS_STATE_ENABLED},
3927/* STOP */ {bnx2x_stats_stop, STATS_STATE_DISABLED}
3928}
3929};
3930
3931static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
3932{
3933 enum bnx2x_stats_state state = bp->stats_state;
3934
3935 bnx2x_stats_stm[state][event].action(bp);
3936 bp->stats_state = bnx2x_stats_stm[state][event].next_state;
3937
3938 if ((event != STATS_EVENT_UPDATE) || (bp->msglevel & NETIF_MSG_TIMER))
3939 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
3940 state, event, bp->stats_state);
3941}
3942
a2fbb9ea
ET
3943static void bnx2x_timer(unsigned long data)
3944{
3945 struct bnx2x *bp = (struct bnx2x *) data;
3946
3947 if (!netif_running(bp->dev))
3948 return;
3949
3950 if (atomic_read(&bp->intr_sem) != 0)
f1410647 3951 goto timer_restart;
a2fbb9ea
ET
3952
3953 if (poll) {
3954 struct bnx2x_fastpath *fp = &bp->fp[0];
3955 int rc;
3956
3957 bnx2x_tx_int(fp, 1000);
3958 rc = bnx2x_rx_int(fp, 1000);
3959 }
3960
34f80b04
EG
3961 if (!BP_NOMCP(bp)) {
3962 int func = BP_FUNC(bp);
a2fbb9ea
ET
3963 u32 drv_pulse;
3964 u32 mcp_pulse;
3965
3966 ++bp->fw_drv_pulse_wr_seq;
3967 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
3968 /* TBD - add SYSTEM_TIME */
3969 drv_pulse = bp->fw_drv_pulse_wr_seq;
34f80b04 3970 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
a2fbb9ea 3971
34f80b04 3972 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
a2fbb9ea
ET
3973 MCP_PULSE_SEQ_MASK);
3974 /* The delta between driver pulse and mcp response
3975 * should be 1 (before mcp response) or 0 (after mcp response)
3976 */
3977 if ((drv_pulse != mcp_pulse) &&
3978 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
3979 /* someone lost a heartbeat... */
3980 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
3981 drv_pulse, mcp_pulse);
3982 }
3983 }
3984
bb2a0f7a
YG
3985 if ((bp->state == BNX2X_STATE_OPEN) ||
3986 (bp->state == BNX2X_STATE_DISABLED))
3987 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
a2fbb9ea 3988
f1410647 3989timer_restart:
a2fbb9ea
ET
3990 mod_timer(&bp->timer, jiffies + bp->current_interval);
3991}
3992
3993/* end of Statistics */
3994
3995/* nic init */
3996
3997/*
3998 * nic init service functions
3999 */
4000
34f80b04 4001static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
a2fbb9ea 4002{
34f80b04
EG
4003 int port = BP_PORT(bp);
4004
4005 bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
4006 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
35302989 4007 sizeof(struct ustorm_status_block)/4);
34f80b04
EG
4008 bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
4009 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
35302989 4010 sizeof(struct cstorm_status_block)/4);
34f80b04
EG
4011}
4012
5c862848
EG
4013static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
4014 dma_addr_t mapping, int sb_id)
34f80b04
EG
4015{
4016 int port = BP_PORT(bp);
bb2a0f7a 4017 int func = BP_FUNC(bp);
a2fbb9ea 4018 int index;
34f80b04 4019 u64 section;
a2fbb9ea
ET
4020
4021 /* USTORM */
4022 section = ((u64)mapping) + offsetof(struct host_status_block,
4023 u_status_block);
34f80b04 4024 sb->u_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4025
4026 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4027 USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
a2fbb9ea 4028 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4029 ((USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
a2fbb9ea 4030 U64_HI(section));
bb2a0f7a
YG
4031 REG_WR8(bp, BAR_USTRORM_INTMEM + FP_USB_FUNC_OFF +
4032 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
a2fbb9ea
ET
4033
4034 for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
4035 REG_WR16(bp, BAR_USTRORM_INTMEM +
34f80b04 4036 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
a2fbb9ea
ET
4037
4038 /* CSTORM */
4039 section = ((u64)mapping) + offsetof(struct host_status_block,
4040 c_status_block);
34f80b04 4041 sb->c_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4042
4043 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4044 CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
a2fbb9ea 4045 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4046 ((CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
a2fbb9ea 4047 U64_HI(section));
7a9b2557
VZ
4048 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
4049 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
a2fbb9ea
ET
4050
4051 for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4052 REG_WR16(bp, BAR_CSTRORM_INTMEM +
34f80b04
EG
4053 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4054
4055 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4056}
4057
4058static void bnx2x_zero_def_sb(struct bnx2x *bp)
4059{
4060 int func = BP_FUNC(bp);
a2fbb9ea 4061
34f80b04
EG
4062 bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
4063 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4064 sizeof(struct ustorm_def_status_block)/4);
4065 bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
4066 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4067 sizeof(struct cstorm_def_status_block)/4);
4068 bnx2x_init_fill(bp, BAR_XSTRORM_INTMEM +
4069 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4070 sizeof(struct xstorm_def_status_block)/4);
4071 bnx2x_init_fill(bp, BAR_TSTRORM_INTMEM +
4072 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4073 sizeof(struct tstorm_def_status_block)/4);
a2fbb9ea
ET
4074}
4075
4076static void bnx2x_init_def_sb(struct bnx2x *bp,
4077 struct host_def_status_block *def_sb,
34f80b04 4078 dma_addr_t mapping, int sb_id)
a2fbb9ea 4079{
34f80b04
EG
4080 int port = BP_PORT(bp);
4081 int func = BP_FUNC(bp);
a2fbb9ea
ET
4082 int index, val, reg_offset;
4083 u64 section;
4084
4085 /* ATTN */
4086 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4087 atten_status_block);
34f80b04 4088 def_sb->atten_status_block.status_block_id = sb_id;
a2fbb9ea 4089
49d66772
ET
4090 bp->attn_state = 0;
4091
a2fbb9ea
ET
4092 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4093 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4094
34f80b04 4095 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
a2fbb9ea
ET
4096 bp->attn_group[index].sig[0] = REG_RD(bp,
4097 reg_offset + 0x10*index);
4098 bp->attn_group[index].sig[1] = REG_RD(bp,
4099 reg_offset + 0x4 + 0x10*index);
4100 bp->attn_group[index].sig[2] = REG_RD(bp,
4101 reg_offset + 0x8 + 0x10*index);
4102 bp->attn_group[index].sig[3] = REG_RD(bp,
4103 reg_offset + 0xc + 0x10*index);
4104 }
4105
a2fbb9ea
ET
4106 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4107 HC_REG_ATTN_MSG0_ADDR_L);
4108
4109 REG_WR(bp, reg_offset, U64_LO(section));
4110 REG_WR(bp, reg_offset + 4, U64_HI(section));
4111
4112 reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4113
4114 val = REG_RD(bp, reg_offset);
34f80b04 4115 val |= sb_id;
a2fbb9ea
ET
4116 REG_WR(bp, reg_offset, val);
4117
4118 /* USTORM */
4119 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4120 u_def_status_block);
34f80b04 4121 def_sb->u_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4122
4123 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4124 USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4125 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4126 ((USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4127 U64_HI(section));
5c862848 4128 REG_WR8(bp, BAR_USTRORM_INTMEM + DEF_USB_FUNC_OFF +
34f80b04 4129 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4130
4131 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
4132 REG_WR16(bp, BAR_USTRORM_INTMEM +
34f80b04 4133 USTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
a2fbb9ea
ET
4134
4135 /* CSTORM */
4136 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4137 c_def_status_block);
34f80b04 4138 def_sb->c_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4139
4140 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4141 CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4142 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4143 ((CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4144 U64_HI(section));
5c862848 4145 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
34f80b04 4146 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4147
4148 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4149 REG_WR16(bp, BAR_CSTRORM_INTMEM +
34f80b04 4150 CSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
a2fbb9ea
ET
4151
4152 /* TSTORM */
4153 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4154 t_def_status_block);
34f80b04 4155 def_sb->t_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4156
4157 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4158 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4159 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4160 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4161 U64_HI(section));
5c862848 4162 REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
34f80b04 4163 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4164
4165 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4166 REG_WR16(bp, BAR_TSTRORM_INTMEM +
34f80b04 4167 TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
a2fbb9ea
ET
4168
4169 /* XSTORM */
4170 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4171 x_def_status_block);
34f80b04 4172 def_sb->x_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4173
4174 REG_WR(bp, BAR_XSTRORM_INTMEM +
34f80b04 4175 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4176 REG_WR(bp, BAR_XSTRORM_INTMEM +
34f80b04 4177 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4178 U64_HI(section));
5c862848 4179 REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
34f80b04 4180 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4181
4182 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4183 REG_WR16(bp, BAR_XSTRORM_INTMEM +
34f80b04 4184 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
49d66772 4185
bb2a0f7a 4186 bp->stats_pending = 0;
66e855f3 4187 bp->set_mac_pending = 0;
bb2a0f7a 4188
34f80b04 4189 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea
ET
4190}
4191
4192static void bnx2x_update_coalesce(struct bnx2x *bp)
4193{
34f80b04 4194 int port = BP_PORT(bp);
a2fbb9ea
ET
4195 int i;
4196
4197 for_each_queue(bp, i) {
34f80b04 4198 int sb_id = bp->fp[i].sb_id;
a2fbb9ea
ET
4199
4200 /* HC_INDEX_U_ETH_RX_CQ_CONS */
4201 REG_WR8(bp, BAR_USTRORM_INTMEM +
34f80b04 4202 USTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
5c862848 4203 U_SB_ETH_RX_CQ_INDEX),
34f80b04 4204 bp->rx_ticks/12);
a2fbb9ea 4205 REG_WR16(bp, BAR_USTRORM_INTMEM +
34f80b04 4206 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
5c862848
EG
4207 U_SB_ETH_RX_CQ_INDEX),
4208 bp->rx_ticks ? 0 : 1);
4209 REG_WR16(bp, BAR_USTRORM_INTMEM +
4210 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4211 U_SB_ETH_RX_BD_INDEX),
34f80b04 4212 bp->rx_ticks ? 0 : 1);
a2fbb9ea
ET
4213
4214 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4215 REG_WR8(bp, BAR_CSTRORM_INTMEM +
34f80b04 4216 CSTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
5c862848 4217 C_SB_ETH_TX_CQ_INDEX),
34f80b04 4218 bp->tx_ticks/12);
a2fbb9ea 4219 REG_WR16(bp, BAR_CSTRORM_INTMEM +
34f80b04 4220 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
5c862848 4221 C_SB_ETH_TX_CQ_INDEX),
34f80b04 4222 bp->tx_ticks ? 0 : 1);
a2fbb9ea
ET
4223 }
4224}
4225
7a9b2557
VZ
4226static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4227 struct bnx2x_fastpath *fp, int last)
4228{
4229 int i;
4230
4231 for (i = 0; i < last; i++) {
4232 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
4233 struct sk_buff *skb = rx_buf->skb;
4234
4235 if (skb == NULL) {
4236 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
4237 continue;
4238 }
4239
4240 if (fp->tpa_state[i] == BNX2X_TPA_START)
4241 pci_unmap_single(bp->pdev,
4242 pci_unmap_addr(rx_buf, mapping),
437cf2f1 4243 bp->rx_buf_size,
7a9b2557
VZ
4244 PCI_DMA_FROMDEVICE);
4245
4246 dev_kfree_skb(skb);
4247 rx_buf->skb = NULL;
4248 }
4249}
4250
a2fbb9ea
ET
4251static void bnx2x_init_rx_rings(struct bnx2x *bp)
4252{
7a9b2557 4253 int func = BP_FUNC(bp);
32626230
EG
4254 int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
4255 ETH_MAX_AGGREGATION_QUEUES_E1H;
4256 u16 ring_prod, cqe_ring_prod;
a2fbb9ea 4257 int i, j;
a2fbb9ea 4258
437cf2f1
EG
4259 bp->rx_buf_size = bp->dev->mtu;
4260 bp->rx_buf_size += bp->rx_offset + ETH_OVREHEAD +
4261 BCM_RX_ETH_PAYLOAD_ALIGN;
a2fbb9ea 4262
7a9b2557
VZ
4263 if (bp->flags & TPA_ENABLE_FLAG) {
4264 DP(NETIF_MSG_IFUP,
437cf2f1
EG
4265 "rx_buf_size %d effective_mtu %d\n",
4266 bp->rx_buf_size, bp->dev->mtu + ETH_OVREHEAD);
7a9b2557
VZ
4267
4268 for_each_queue(bp, j) {
32626230 4269 struct bnx2x_fastpath *fp = &bp->fp[j];
7a9b2557 4270
32626230 4271 for (i = 0; i < max_agg_queues; i++) {
7a9b2557
VZ
4272 fp->tpa_pool[i].skb =
4273 netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4274 if (!fp->tpa_pool[i].skb) {
4275 BNX2X_ERR("Failed to allocate TPA "
4276 "skb pool for queue[%d] - "
4277 "disabling TPA on this "
4278 "queue!\n", j);
4279 bnx2x_free_tpa_pool(bp, fp, i);
4280 fp->disable_tpa = 1;
4281 break;
4282 }
4283 pci_unmap_addr_set((struct sw_rx_bd *)
4284 &bp->fp->tpa_pool[i],
4285 mapping, 0);
4286 fp->tpa_state[i] = BNX2X_TPA_STOP;
4287 }
4288 }
4289 }
4290
a2fbb9ea
ET
4291 for_each_queue(bp, j) {
4292 struct bnx2x_fastpath *fp = &bp->fp[j];
4293
4294 fp->rx_bd_cons = 0;
4295 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
7a9b2557
VZ
4296 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
4297
4298 /* "next page" elements initialization */
4299 /* SGE ring */
4300 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
4301 struct eth_rx_sge *sge;
4302
4303 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
4304 sge->addr_hi =
4305 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
4306 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4307 sge->addr_lo =
4308 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
4309 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4310 }
4311
4312 bnx2x_init_sge_ring_bit_mask(fp);
a2fbb9ea 4313
7a9b2557 4314 /* RX BD ring */
a2fbb9ea
ET
4315 for (i = 1; i <= NUM_RX_RINGS; i++) {
4316 struct eth_rx_bd *rx_bd;
4317
4318 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
4319 rx_bd->addr_hi =
4320 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
34f80b04 4321 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
a2fbb9ea
ET
4322 rx_bd->addr_lo =
4323 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
34f80b04 4324 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
a2fbb9ea
ET
4325 }
4326
34f80b04 4327 /* CQ ring */
a2fbb9ea
ET
4328 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4329 struct eth_rx_cqe_next_page *nextpg;
4330
4331 nextpg = (struct eth_rx_cqe_next_page *)
4332 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4333 nextpg->addr_hi =
4334 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
34f80b04 4335 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
a2fbb9ea
ET
4336 nextpg->addr_lo =
4337 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
34f80b04 4338 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
a2fbb9ea
ET
4339 }
4340
7a9b2557
VZ
4341 /* Allocate SGEs and initialize the ring elements */
4342 for (i = 0, ring_prod = 0;
4343 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
a2fbb9ea 4344
7a9b2557
VZ
4345 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
4346 BNX2X_ERR("was only able to allocate "
4347 "%d rx sges\n", i);
4348 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
4349 /* Cleanup already allocated elements */
4350 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
32626230 4351 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
7a9b2557
VZ
4352 fp->disable_tpa = 1;
4353 ring_prod = 0;
4354 break;
4355 }
4356 ring_prod = NEXT_SGE_IDX(ring_prod);
4357 }
4358 fp->rx_sge_prod = ring_prod;
4359
4360 /* Allocate BDs and initialize BD ring */
66e855f3 4361 fp->rx_comp_cons = 0;
7a9b2557 4362 cqe_ring_prod = ring_prod = 0;
a2fbb9ea
ET
4363 for (i = 0; i < bp->rx_ring_size; i++) {
4364 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
4365 BNX2X_ERR("was only able to allocate "
4366 "%d rx skbs\n", i);
66e855f3 4367 bp->eth_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
4368 break;
4369 }
4370 ring_prod = NEXT_RX_IDX(ring_prod);
7a9b2557 4371 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
53e5e96e 4372 WARN_ON(ring_prod <= i);
a2fbb9ea
ET
4373 }
4374
7a9b2557
VZ
4375 fp->rx_bd_prod = ring_prod;
4376 /* must not have more available CQEs than BDs */
4377 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
4378 cqe_ring_prod);
a2fbb9ea
ET
4379 fp->rx_pkt = fp->rx_calls = 0;
4380
7a9b2557
VZ
4381 /* Warning!
4382 * this will generate an interrupt (to the TSTORM)
4383 * must only be done after chip is initialized
4384 */
4385 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
4386 fp->rx_sge_prod);
a2fbb9ea
ET
4387 if (j != 0)
4388 continue;
4389
4390 REG_WR(bp, BAR_USTRORM_INTMEM +
7a9b2557 4391 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
a2fbb9ea
ET
4392 U64_LO(fp->rx_comp_mapping));
4393 REG_WR(bp, BAR_USTRORM_INTMEM +
7a9b2557 4394 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
a2fbb9ea
ET
4395 U64_HI(fp->rx_comp_mapping));
4396 }
4397}
4398
4399static void bnx2x_init_tx_ring(struct bnx2x *bp)
4400{
4401 int i, j;
4402
4403 for_each_queue(bp, j) {
4404 struct bnx2x_fastpath *fp = &bp->fp[j];
4405
4406 for (i = 1; i <= NUM_TX_RINGS; i++) {
4407 struct eth_tx_bd *tx_bd =
4408 &fp->tx_desc_ring[TX_DESC_CNT * i - 1];
4409
4410 tx_bd->addr_hi =
4411 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
34f80b04 4412 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
a2fbb9ea
ET
4413 tx_bd->addr_lo =
4414 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
34f80b04 4415 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
a2fbb9ea
ET
4416 }
4417
4418 fp->tx_pkt_prod = 0;
4419 fp->tx_pkt_cons = 0;
4420 fp->tx_bd_prod = 0;
4421 fp->tx_bd_cons = 0;
4422 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
4423 fp->tx_pkt = 0;
4424 }
4425}
4426
4427static void bnx2x_init_sp_ring(struct bnx2x *bp)
4428{
34f80b04 4429 int func = BP_FUNC(bp);
a2fbb9ea
ET
4430
4431 spin_lock_init(&bp->spq_lock);
4432
4433 bp->spq_left = MAX_SPQ_PENDING;
4434 bp->spq_prod_idx = 0;
a2fbb9ea
ET
4435 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
4436 bp->spq_prod_bd = bp->spq;
4437 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
4438
34f80b04 4439 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
a2fbb9ea 4440 U64_LO(bp->spq_mapping));
34f80b04
EG
4441 REG_WR(bp,
4442 XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
a2fbb9ea
ET
4443 U64_HI(bp->spq_mapping));
4444
34f80b04 4445 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
a2fbb9ea
ET
4446 bp->spq_prod_idx);
4447}
4448
4449static void bnx2x_init_context(struct bnx2x *bp)
4450{
4451 int i;
4452
4453 for_each_queue(bp, i) {
4454 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
4455 struct bnx2x_fastpath *fp = &bp->fp[i];
34f80b04 4456 u8 sb_id = FP_SB_ID(fp);
a2fbb9ea
ET
4457
4458 context->xstorm_st_context.tx_bd_page_base_hi =
4459 U64_HI(fp->tx_desc_mapping);
4460 context->xstorm_st_context.tx_bd_page_base_lo =
4461 U64_LO(fp->tx_desc_mapping);
4462 context->xstorm_st_context.db_data_addr_hi =
4463 U64_HI(fp->tx_prods_mapping);
4464 context->xstorm_st_context.db_data_addr_lo =
4465 U64_LO(fp->tx_prods_mapping);
34f80b04
EG
4466 context->xstorm_st_context.statistics_data = (BP_CL_ID(bp) |
4467 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
4468
4469 context->ustorm_st_context.common.sb_index_numbers =
4470 BNX2X_RX_SB_INDEX_NUM;
4471 context->ustorm_st_context.common.clientId = FP_CL_ID(fp);
4472 context->ustorm_st_context.common.status_block_id = sb_id;
4473 context->ustorm_st_context.common.flags =
4474 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT;
437cf2f1
EG
4475 context->ustorm_st_context.common.mc_alignment_size =
4476 BCM_RX_ETH_PAYLOAD_ALIGN;
34f80b04 4477 context->ustorm_st_context.common.bd_buff_size =
437cf2f1 4478 bp->rx_buf_size;
34f80b04 4479 context->ustorm_st_context.common.bd_page_base_hi =
a2fbb9ea 4480 U64_HI(fp->rx_desc_mapping);
34f80b04 4481 context->ustorm_st_context.common.bd_page_base_lo =
a2fbb9ea 4482 U64_LO(fp->rx_desc_mapping);
7a9b2557
VZ
4483 if (!fp->disable_tpa) {
4484 context->ustorm_st_context.common.flags |=
4485 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA |
4486 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_SGE_RING);
4487 context->ustorm_st_context.common.sge_buff_size =
4488 (u16)(BCM_PAGE_SIZE*PAGES_PER_SGE);
4489 context->ustorm_st_context.common.sge_page_base_hi =
4490 U64_HI(fp->rx_sge_mapping);
4491 context->ustorm_st_context.common.sge_page_base_lo =
4492 U64_LO(fp->rx_sge_mapping);
4493 }
4494
a2fbb9ea 4495 context->cstorm_st_context.sb_index_number =
5c862848 4496 C_SB_ETH_TX_CQ_INDEX;
34f80b04 4497 context->cstorm_st_context.status_block_id = sb_id;
a2fbb9ea
ET
4498
4499 context->xstorm_ag_context.cdu_reserved =
4500 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4501 CDU_REGION_NUMBER_XCM_AG,
4502 ETH_CONNECTION_TYPE);
4503 context->ustorm_ag_context.cdu_usage =
4504 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4505 CDU_REGION_NUMBER_UCM_AG,
4506 ETH_CONNECTION_TYPE);
4507 }
4508}
4509
4510static void bnx2x_init_ind_table(struct bnx2x *bp)
4511{
34f80b04 4512 int port = BP_PORT(bp);
a2fbb9ea
ET
4513 int i;
4514
4515 if (!is_multi(bp))
4516 return;
4517
34f80b04 4518 DP(NETIF_MSG_IFUP, "Initializing indirection table\n");
a2fbb9ea 4519 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
34f80b04
EG
4520 REG_WR8(bp, BAR_TSTRORM_INTMEM +
4521 TSTORM_INDIRECTION_TABLE_OFFSET(port) + i,
a2fbb9ea
ET
4522 i % bp->num_queues);
4523
4524 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
4525}
4526
49d66772
ET
4527static void bnx2x_set_client_config(struct bnx2x *bp)
4528{
49d66772 4529 struct tstorm_eth_client_config tstorm_client = {0};
34f80b04
EG
4530 int port = BP_PORT(bp);
4531 int i;
49d66772 4532
34f80b04 4533 tstorm_client.mtu = bp->dev->mtu + ETH_OVREHEAD;
66e855f3 4534 tstorm_client.statistics_counter_id = BP_CL_ID(bp);
49d66772
ET
4535 tstorm_client.config_flags =
4536 TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE;
4537#ifdef BCM_VLAN
34f80b04 4538 if (bp->rx_mode && bp->vlgrp) {
49d66772
ET
4539 tstorm_client.config_flags |=
4540 TSTORM_ETH_CLIENT_CONFIG_VLAN_REMOVAL_ENABLE;
4541 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
4542 }
4543#endif
49d66772 4544
7a9b2557
VZ
4545 if (bp->flags & TPA_ENABLE_FLAG) {
4546 tstorm_client.max_sges_for_packet =
4547 BCM_PAGE_ALIGN(tstorm_client.mtu) >> BCM_PAGE_SHIFT;
4548 tstorm_client.max_sges_for_packet =
4549 ((tstorm_client.max_sges_for_packet +
4550 PAGES_PER_SGE - 1) & (~(PAGES_PER_SGE - 1))) >>
4551 PAGES_PER_SGE_SHIFT;
4552
4553 tstorm_client.config_flags |=
4554 TSTORM_ETH_CLIENT_CONFIG_ENABLE_SGE_RING;
4555 }
4556
49d66772
ET
4557 for_each_queue(bp, i) {
4558 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4559 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
49d66772
ET
4560 ((u32 *)&tstorm_client)[0]);
4561 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4562 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
49d66772
ET
4563 ((u32 *)&tstorm_client)[1]);
4564 }
4565
34f80b04
EG
4566 DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
4567 ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
49d66772
ET
4568}
4569
a2fbb9ea
ET
4570static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
4571{
a2fbb9ea 4572 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
34f80b04
EG
4573 int mode = bp->rx_mode;
4574 int mask = (1 << BP_L_ID(bp));
4575 int func = BP_FUNC(bp);
a2fbb9ea
ET
4576 int i;
4577
3196a88a 4578 DP(NETIF_MSG_IFUP, "rx mode %d mask 0x%x\n", mode, mask);
a2fbb9ea
ET
4579
4580 switch (mode) {
4581 case BNX2X_RX_MODE_NONE: /* no Rx */
34f80b04
EG
4582 tstorm_mac_filter.ucast_drop_all = mask;
4583 tstorm_mac_filter.mcast_drop_all = mask;
4584 tstorm_mac_filter.bcast_drop_all = mask;
a2fbb9ea
ET
4585 break;
4586 case BNX2X_RX_MODE_NORMAL:
34f80b04 4587 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea
ET
4588 break;
4589 case BNX2X_RX_MODE_ALLMULTI:
34f80b04
EG
4590 tstorm_mac_filter.mcast_accept_all = mask;
4591 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea
ET
4592 break;
4593 case BNX2X_RX_MODE_PROMISC:
34f80b04
EG
4594 tstorm_mac_filter.ucast_accept_all = mask;
4595 tstorm_mac_filter.mcast_accept_all = mask;
4596 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea
ET
4597 break;
4598 default:
34f80b04
EG
4599 BNX2X_ERR("BAD rx mode (%d)\n", mode);
4600 break;
a2fbb9ea
ET
4601 }
4602
4603 for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
4604 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4605 TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
a2fbb9ea
ET
4606 ((u32 *)&tstorm_mac_filter)[i]);
4607
34f80b04 4608/* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
a2fbb9ea
ET
4609 ((u32 *)&tstorm_mac_filter)[i]); */
4610 }
a2fbb9ea 4611
49d66772
ET
4612 if (mode != BNX2X_RX_MODE_NONE)
4613 bnx2x_set_client_config(bp);
a2fbb9ea
ET
4614}
4615
471de716
EG
4616static void bnx2x_init_internal_common(struct bnx2x *bp)
4617{
4618 int i;
4619
3cdf1db7
YG
4620 if (bp->flags & TPA_ENABLE_FLAG) {
4621 struct tstorm_eth_tpa_exist tpa = {0};
4622
4623 tpa.tpa_exist = 1;
4624
4625 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET,
4626 ((u32 *)&tpa)[0]);
4627 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET + 4,
4628 ((u32 *)&tpa)[1]);
4629 }
4630
471de716
EG
4631 /* Zero this manually as its initialization is
4632 currently missing in the initTool */
4633 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
4634 REG_WR(bp, BAR_USTRORM_INTMEM +
4635 USTORM_AGG_DATA_OFFSET + i * 4, 0);
4636}
4637
4638static void bnx2x_init_internal_port(struct bnx2x *bp)
4639{
4640 int port = BP_PORT(bp);
4641
4642 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4643 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4644 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4645 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4646}
4647
4648static void bnx2x_init_internal_func(struct bnx2x *bp)
a2fbb9ea 4649{
a2fbb9ea
ET
4650 struct tstorm_eth_function_common_config tstorm_config = {0};
4651 struct stats_indication_flags stats_flags = {0};
34f80b04
EG
4652 int port = BP_PORT(bp);
4653 int func = BP_FUNC(bp);
4654 int i;
471de716 4655 u16 max_agg_size;
a2fbb9ea
ET
4656
4657 if (is_multi(bp)) {
4658 tstorm_config.config_flags = MULTI_FLAGS;
4659 tstorm_config.rss_result_mask = MULTI_MASK;
4660 }
4661
34f80b04
EG
4662 tstorm_config.leading_client_id = BP_L_ID(bp);
4663
a2fbb9ea 4664 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4665 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
a2fbb9ea
ET
4666 (*(u32 *)&tstorm_config));
4667
c14423fe 4668 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
a2fbb9ea
ET
4669 bnx2x_set_storm_rx_mode(bp);
4670
66e855f3
YG
4671 /* reset xstorm per client statistics */
4672 for (i = 0; i < sizeof(struct xstorm_per_client_stats) / 4; i++) {
4673 REG_WR(bp, BAR_XSTRORM_INTMEM +
4674 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, BP_CL_ID(bp)) +
4675 i*4, 0);
4676 }
4677 /* reset tstorm per client statistics */
4678 for (i = 0; i < sizeof(struct tstorm_per_client_stats) / 4; i++) {
4679 REG_WR(bp, BAR_TSTRORM_INTMEM +
4680 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, BP_CL_ID(bp)) +
4681 i*4, 0);
4682 }
4683
4684 /* Init statistics related context */
34f80b04 4685 stats_flags.collect_eth = 1;
a2fbb9ea 4686
66e855f3 4687 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 4688 ((u32 *)&stats_flags)[0]);
66e855f3 4689 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
4690 ((u32 *)&stats_flags)[1]);
4691
66e855f3 4692 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 4693 ((u32 *)&stats_flags)[0]);
66e855f3 4694 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
4695 ((u32 *)&stats_flags)[1]);
4696
66e855f3 4697 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 4698 ((u32 *)&stats_flags)[0]);
66e855f3 4699 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
4700 ((u32 *)&stats_flags)[1]);
4701
66e855f3
YG
4702 REG_WR(bp, BAR_XSTRORM_INTMEM +
4703 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4704 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4705 REG_WR(bp, BAR_XSTRORM_INTMEM +
4706 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4707 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4708
4709 REG_WR(bp, BAR_TSTRORM_INTMEM +
4710 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4711 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4712 REG_WR(bp, BAR_TSTRORM_INTMEM +
4713 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4714 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
34f80b04
EG
4715
4716 if (CHIP_IS_E1H(bp)) {
4717 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
4718 IS_E1HMF(bp));
4719 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
4720 IS_E1HMF(bp));
4721 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
4722 IS_E1HMF(bp));
4723 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
4724 IS_E1HMF(bp));
4725
7a9b2557
VZ
4726 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
4727 bp->e1hov);
34f80b04
EG
4728 }
4729
471de716 4730 /* Init CQ ring mapping and aggregation size */
437cf2f1 4731 max_agg_size = min((u32)(bp->rx_buf_size +
471de716
EG
4732 8*BCM_PAGE_SIZE*PAGES_PER_SGE),
4733 (u32)0xffff);
7a9b2557
VZ
4734 for_each_queue(bp, i) {
4735 struct bnx2x_fastpath *fp = &bp->fp[i];
7a9b2557
VZ
4736
4737 REG_WR(bp, BAR_USTRORM_INTMEM +
4738 USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)),
4739 U64_LO(fp->rx_comp_mapping));
4740 REG_WR(bp, BAR_USTRORM_INTMEM +
4741 USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)) + 4,
4742 U64_HI(fp->rx_comp_mapping));
4743
7a9b2557
VZ
4744 REG_WR16(bp, BAR_USTRORM_INTMEM +
4745 USTORM_MAX_AGG_SIZE_OFFSET(port, FP_CL_ID(fp)),
4746 max_agg_size);
4747 }
a2fbb9ea
ET
4748}
4749
471de716
EG
4750static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
4751{
4752 switch (load_code) {
4753 case FW_MSG_CODE_DRV_LOAD_COMMON:
4754 bnx2x_init_internal_common(bp);
4755 /* no break */
4756
4757 case FW_MSG_CODE_DRV_LOAD_PORT:
4758 bnx2x_init_internal_port(bp);
4759 /* no break */
4760
4761 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
4762 bnx2x_init_internal_func(bp);
4763 break;
4764
4765 default:
4766 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
4767 break;
4768 }
4769}
4770
4771static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
a2fbb9ea
ET
4772{
4773 int i;
4774
4775 for_each_queue(bp, i) {
4776 struct bnx2x_fastpath *fp = &bp->fp[i];
4777
34f80b04 4778 fp->bp = bp;
a2fbb9ea 4779 fp->state = BNX2X_FP_STATE_CLOSED;
a2fbb9ea 4780 fp->index = i;
34f80b04
EG
4781 fp->cl_id = BP_L_ID(bp) + i;
4782 fp->sb_id = fp->cl_id;
4783 DP(NETIF_MSG_IFUP,
4784 "bnx2x_init_sb(%p,%p) index %d cl_id %d sb %d\n",
4785 bp, fp->status_blk, i, FP_CL_ID(fp), FP_SB_ID(fp));
5c862848
EG
4786 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
4787 FP_SB_ID(fp));
4788 bnx2x_update_fpsb_idx(fp);
a2fbb9ea
ET
4789 }
4790
5c862848
EG
4791 bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
4792 DEF_SB_ID);
4793 bnx2x_update_dsb_idx(bp);
a2fbb9ea
ET
4794 bnx2x_update_coalesce(bp);
4795 bnx2x_init_rx_rings(bp);
4796 bnx2x_init_tx_ring(bp);
4797 bnx2x_init_sp_ring(bp);
4798 bnx2x_init_context(bp);
471de716 4799 bnx2x_init_internal(bp, load_code);
a2fbb9ea 4800 bnx2x_init_ind_table(bp);
615f8fd9 4801 bnx2x_int_enable(bp);
a2fbb9ea
ET
4802}
4803
4804/* end of nic init */
4805
4806/*
4807 * gzip service functions
4808 */
4809
4810static int bnx2x_gunzip_init(struct bnx2x *bp)
4811{
4812 bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
4813 &bp->gunzip_mapping);
4814 if (bp->gunzip_buf == NULL)
4815 goto gunzip_nomem1;
4816
4817 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
4818 if (bp->strm == NULL)
4819 goto gunzip_nomem2;
4820
4821 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
4822 GFP_KERNEL);
4823 if (bp->strm->workspace == NULL)
4824 goto gunzip_nomem3;
4825
4826 return 0;
4827
4828gunzip_nomem3:
4829 kfree(bp->strm);
4830 bp->strm = NULL;
4831
4832gunzip_nomem2:
4833 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
4834 bp->gunzip_mapping);
4835 bp->gunzip_buf = NULL;
4836
4837gunzip_nomem1:
4838 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
34f80b04 4839 " un-compression\n", bp->dev->name);
a2fbb9ea
ET
4840 return -ENOMEM;
4841}
4842
4843static void bnx2x_gunzip_end(struct bnx2x *bp)
4844{
4845 kfree(bp->strm->workspace);
4846
4847 kfree(bp->strm);
4848 bp->strm = NULL;
4849
4850 if (bp->gunzip_buf) {
4851 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
4852 bp->gunzip_mapping);
4853 bp->gunzip_buf = NULL;
4854 }
4855}
4856
4857static int bnx2x_gunzip(struct bnx2x *bp, u8 *zbuf, int len)
4858{
4859 int n, rc;
4860
4861 /* check gzip header */
4862 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
4863 return -EINVAL;
4864
4865 n = 10;
4866
34f80b04 4867#define FNAME 0x8
a2fbb9ea
ET
4868
4869 if (zbuf[3] & FNAME)
4870 while ((zbuf[n++] != 0) && (n < len));
4871
4872 bp->strm->next_in = zbuf + n;
4873 bp->strm->avail_in = len - n;
4874 bp->strm->next_out = bp->gunzip_buf;
4875 bp->strm->avail_out = FW_BUF_SIZE;
4876
4877 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
4878 if (rc != Z_OK)
4879 return rc;
4880
4881 rc = zlib_inflate(bp->strm, Z_FINISH);
4882 if ((rc != Z_OK) && (rc != Z_STREAM_END))
4883 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
4884 bp->dev->name, bp->strm->msg);
4885
4886 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
4887 if (bp->gunzip_outlen & 0x3)
4888 printk(KERN_ERR PFX "%s: Firmware decompression error:"
4889 " gunzip_outlen (%d) not aligned\n",
4890 bp->dev->name, bp->gunzip_outlen);
4891 bp->gunzip_outlen >>= 2;
4892
4893 zlib_inflateEnd(bp->strm);
4894
4895 if (rc == Z_STREAM_END)
4896 return 0;
4897
4898 return rc;
4899}
4900
4901/* nic load/unload */
4902
4903/*
34f80b04 4904 * General service functions
a2fbb9ea
ET
4905 */
4906
4907/* send a NIG loopback debug packet */
4908static void bnx2x_lb_pckt(struct bnx2x *bp)
4909{
a2fbb9ea 4910 u32 wb_write[3];
a2fbb9ea
ET
4911
4912 /* Ethernet source and destination addresses */
a2fbb9ea
ET
4913 wb_write[0] = 0x55555555;
4914 wb_write[1] = 0x55555555;
34f80b04 4915 wb_write[2] = 0x20; /* SOP */
a2fbb9ea 4916 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
4917
4918 /* NON-IP protocol */
a2fbb9ea
ET
4919 wb_write[0] = 0x09000000;
4920 wb_write[1] = 0x55555555;
34f80b04 4921 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
a2fbb9ea 4922 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
4923}
4924
4925/* some of the internal memories
4926 * are not directly readable from the driver
4927 * to test them we send debug packets
4928 */
4929static int bnx2x_int_mem_test(struct bnx2x *bp)
4930{
4931 int factor;
4932 int count, i;
4933 u32 val = 0;
4934
ad8d3948 4935 if (CHIP_REV_IS_FPGA(bp))
a2fbb9ea 4936 factor = 120;
ad8d3948
EG
4937 else if (CHIP_REV_IS_EMUL(bp))
4938 factor = 200;
4939 else
a2fbb9ea 4940 factor = 1;
a2fbb9ea
ET
4941
4942 DP(NETIF_MSG_HW, "start part1\n");
4943
4944 /* Disable inputs of parser neighbor blocks */
4945 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4946 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4947 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3196a88a 4948 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
a2fbb9ea
ET
4949
4950 /* Write 0 to parser credits for CFC search request */
4951 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
4952
4953 /* send Ethernet packet */
4954 bnx2x_lb_pckt(bp);
4955
4956 /* TODO do i reset NIG statistic? */
4957 /* Wait until NIG register shows 1 packet of size 0x10 */
4958 count = 1000 * factor;
4959 while (count) {
34f80b04 4960
a2fbb9ea
ET
4961 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
4962 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
4963 if (val == 0x10)
4964 break;
4965
4966 msleep(10);
4967 count--;
4968 }
4969 if (val != 0x10) {
4970 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
4971 return -1;
4972 }
4973
4974 /* Wait until PRS register shows 1 packet */
4975 count = 1000 * factor;
4976 while (count) {
4977 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
a2fbb9ea
ET
4978 if (val == 1)
4979 break;
4980
4981 msleep(10);
4982 count--;
4983 }
4984 if (val != 0x1) {
4985 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
4986 return -2;
4987 }
4988
4989 /* Reset and init BRB, PRS */
34f80b04 4990 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
a2fbb9ea 4991 msleep(50);
34f80b04 4992 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
a2fbb9ea
ET
4993 msleep(50);
4994 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
4995 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
4996
4997 DP(NETIF_MSG_HW, "part2\n");
4998
4999 /* Disable inputs of parser neighbor blocks */
5000 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5001 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5002 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3196a88a 5003 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
a2fbb9ea
ET
5004
5005 /* Write 0 to parser credits for CFC search request */
5006 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5007
5008 /* send 10 Ethernet packets */
5009 for (i = 0; i < 10; i++)
5010 bnx2x_lb_pckt(bp);
5011
5012 /* Wait until NIG register shows 10 + 1
5013 packets of size 11*0x10 = 0xb0 */
5014 count = 1000 * factor;
5015 while (count) {
34f80b04 5016
a2fbb9ea
ET
5017 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5018 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
5019 if (val == 0xb0)
5020 break;
5021
5022 msleep(10);
5023 count--;
5024 }
5025 if (val != 0xb0) {
5026 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5027 return -3;
5028 }
5029
5030 /* Wait until PRS register shows 2 packets */
5031 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5032 if (val != 2)
5033 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5034
5035 /* Write 1 to parser credits for CFC search request */
5036 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5037
5038 /* Wait until PRS register shows 3 packets */
5039 msleep(10 * factor);
5040 /* Wait until NIG register shows 1 packet of size 0x10 */
5041 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5042 if (val != 3)
5043 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5044
5045 /* clear NIG EOP FIFO */
5046 for (i = 0; i < 11; i++)
5047 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5048 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5049 if (val != 1) {
5050 BNX2X_ERR("clear of NIG failed\n");
5051 return -4;
5052 }
5053
5054 /* Reset and init BRB, PRS, NIG */
5055 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5056 msleep(50);
5057 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5058 msleep(50);
5059 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5060 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5061#ifndef BCM_ISCSI
5062 /* set NIC mode */
5063 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5064#endif
5065
5066 /* Enable inputs of parser neighbor blocks */
5067 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5068 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5069 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
3196a88a 5070 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
a2fbb9ea
ET
5071
5072 DP(NETIF_MSG_HW, "done\n");
5073
5074 return 0; /* OK */
5075}
5076
5077static void enable_blocks_attention(struct bnx2x *bp)
5078{
5079 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5080 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5081 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5082 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5083 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5084 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5085 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5086 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5087 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
34f80b04
EG
5088/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5089/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5090 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5091 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5092 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
34f80b04
EG
5093/* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5094/* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5095 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5096 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5097 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5098 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
34f80b04
EG
5099/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5100/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5101 if (CHIP_REV_IS_FPGA(bp))
5102 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
5103 else
5104 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
a2fbb9ea
ET
5105 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5106 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5107 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
34f80b04
EG
5108/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5109/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5110 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5111 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
34f80b04
EG
5112/* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5113 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
a2fbb9ea
ET
5114}
5115
34f80b04
EG
5116
5117static int bnx2x_init_common(struct bnx2x *bp)
a2fbb9ea 5118{
a2fbb9ea 5119 u32 val, i;
a2fbb9ea 5120
34f80b04 5121 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp));
a2fbb9ea 5122
34f80b04
EG
5123 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
5124 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
a2fbb9ea 5125
34f80b04
EG
5126 bnx2x_init_block(bp, MISC_COMMON_START, MISC_COMMON_END);
5127 if (CHIP_IS_E1H(bp))
5128 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
a2fbb9ea 5129
34f80b04
EG
5130 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
5131 msleep(30);
5132 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
a2fbb9ea 5133
34f80b04
EG
5134 bnx2x_init_block(bp, PXP_COMMON_START, PXP_COMMON_END);
5135 if (CHIP_IS_E1(bp)) {
5136 /* enable HW interrupt from PXP on USDM overflow
5137 bit 16 on INT_MASK_0 */
5138 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5139 }
a2fbb9ea 5140
34f80b04
EG
5141 bnx2x_init_block(bp, PXP2_COMMON_START, PXP2_COMMON_END);
5142 bnx2x_init_pxp(bp);
a2fbb9ea
ET
5143
5144#ifdef __BIG_ENDIAN
34f80b04
EG
5145 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
5146 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
5147 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
5148 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
5149 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
5150 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 1);
5151
5152/* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
5153 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
5154 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
5155 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
5156 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
a2fbb9ea
ET
5157#endif
5158
34f80b04 5159 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
a2fbb9ea 5160#ifdef BCM_ISCSI
34f80b04
EG
5161 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
5162 REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
5163 REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
a2fbb9ea
ET
5164#endif
5165
34f80b04
EG
5166 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
5167 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
a2fbb9ea 5168
34f80b04
EG
5169 /* let the HW do it's magic ... */
5170 msleep(100);
5171 /* finish PXP init */
5172 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
5173 if (val != 1) {
5174 BNX2X_ERR("PXP2 CFG failed\n");
5175 return -EBUSY;
5176 }
5177 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
5178 if (val != 1) {
5179 BNX2X_ERR("PXP2 RD_INIT failed\n");
5180 return -EBUSY;
5181 }
a2fbb9ea 5182
34f80b04
EG
5183 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
5184 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
a2fbb9ea 5185
34f80b04 5186 bnx2x_init_block(bp, DMAE_COMMON_START, DMAE_COMMON_END);
a2fbb9ea 5187
34f80b04
EG
5188 /* clean the DMAE memory */
5189 bp->dmae_ready = 1;
5190 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
a2fbb9ea 5191
34f80b04
EG
5192 bnx2x_init_block(bp, TCM_COMMON_START, TCM_COMMON_END);
5193 bnx2x_init_block(bp, UCM_COMMON_START, UCM_COMMON_END);
5194 bnx2x_init_block(bp, CCM_COMMON_START, CCM_COMMON_END);
5195 bnx2x_init_block(bp, XCM_COMMON_START, XCM_COMMON_END);
a2fbb9ea 5196
34f80b04
EG
5197 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
5198 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
5199 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
5200 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
5201
5202 bnx2x_init_block(bp, QM_COMMON_START, QM_COMMON_END);
5203 /* soft reset pulse */
5204 REG_WR(bp, QM_REG_SOFT_RESET, 1);
5205 REG_WR(bp, QM_REG_SOFT_RESET, 0);
a2fbb9ea
ET
5206
5207#ifdef BCM_ISCSI
34f80b04 5208 bnx2x_init_block(bp, TIMERS_COMMON_START, TIMERS_COMMON_END);
a2fbb9ea 5209#endif
a2fbb9ea 5210
34f80b04
EG
5211 bnx2x_init_block(bp, DQ_COMMON_START, DQ_COMMON_END);
5212 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
5213 if (!CHIP_REV_IS_SLOW(bp)) {
5214 /* enable hw interrupt from doorbell Q */
5215 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5216 }
a2fbb9ea 5217
34f80b04
EG
5218 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5219 if (CHIP_REV_IS_SLOW(bp)) {
5220 /* fix for emulation and FPGA for no pause */
5221 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0, 513);
5222 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_1, 513);
5223 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0, 0);
5224 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_1, 0);
5225 }
a2fbb9ea 5226
34f80b04 5227 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
3196a88a
EG
5228 /* set NIC mode */
5229 REG_WR(bp, PRS_REG_NIC_MODE, 1);
34f80b04
EG
5230 if (CHIP_IS_E1H(bp))
5231 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
a2fbb9ea 5232
34f80b04
EG
5233 bnx2x_init_block(bp, TSDM_COMMON_START, TSDM_COMMON_END);
5234 bnx2x_init_block(bp, CSDM_COMMON_START, CSDM_COMMON_END);
5235 bnx2x_init_block(bp, USDM_COMMON_START, USDM_COMMON_END);
5236 bnx2x_init_block(bp, XSDM_COMMON_START, XSDM_COMMON_END);
a2fbb9ea 5237
34f80b04
EG
5238 if (CHIP_IS_E1H(bp)) {
5239 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5240 STORM_INTMEM_SIZE_E1H/2);
5241 bnx2x_init_fill(bp,
5242 TSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5243 0, STORM_INTMEM_SIZE_E1H/2);
5244 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5245 STORM_INTMEM_SIZE_E1H/2);
5246 bnx2x_init_fill(bp,
5247 CSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5248 0, STORM_INTMEM_SIZE_E1H/2);
5249 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5250 STORM_INTMEM_SIZE_E1H/2);
5251 bnx2x_init_fill(bp,
5252 XSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5253 0, STORM_INTMEM_SIZE_E1H/2);
5254 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5255 STORM_INTMEM_SIZE_E1H/2);
5256 bnx2x_init_fill(bp,
5257 USTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5258 0, STORM_INTMEM_SIZE_E1H/2);
5259 } else { /* E1 */
ad8d3948
EG
5260 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5261 STORM_INTMEM_SIZE_E1);
5262 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5263 STORM_INTMEM_SIZE_E1);
5264 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5265 STORM_INTMEM_SIZE_E1);
5266 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5267 STORM_INTMEM_SIZE_E1);
34f80b04 5268 }
a2fbb9ea 5269
34f80b04
EG
5270 bnx2x_init_block(bp, TSEM_COMMON_START, TSEM_COMMON_END);
5271 bnx2x_init_block(bp, USEM_COMMON_START, USEM_COMMON_END);
5272 bnx2x_init_block(bp, CSEM_COMMON_START, CSEM_COMMON_END);
5273 bnx2x_init_block(bp, XSEM_COMMON_START, XSEM_COMMON_END);
a2fbb9ea 5274
34f80b04
EG
5275 /* sync semi rtc */
5276 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5277 0x80000000);
5278 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
5279 0x80000000);
a2fbb9ea 5280
34f80b04
EG
5281 bnx2x_init_block(bp, UPB_COMMON_START, UPB_COMMON_END);
5282 bnx2x_init_block(bp, XPB_COMMON_START, XPB_COMMON_END);
5283 bnx2x_init_block(bp, PBF_COMMON_START, PBF_COMMON_END);
a2fbb9ea 5284
34f80b04
EG
5285 REG_WR(bp, SRC_REG_SOFT_RST, 1);
5286 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
5287 REG_WR(bp, i, 0xc0cac01a);
5288 /* TODO: replace with something meaningful */
5289 }
5290 if (CHIP_IS_E1H(bp))
5291 bnx2x_init_block(bp, SRCH_COMMON_START, SRCH_COMMON_END);
5292 REG_WR(bp, SRC_REG_SOFT_RST, 0);
a2fbb9ea 5293
34f80b04
EG
5294 if (sizeof(union cdu_context) != 1024)
5295 /* we currently assume that a context is 1024 bytes */
5296 printk(KERN_ALERT PFX "please adjust the size of"
5297 " cdu_context(%ld)\n", (long)sizeof(union cdu_context));
a2fbb9ea 5298
34f80b04
EG
5299 bnx2x_init_block(bp, CDU_COMMON_START, CDU_COMMON_END);
5300 val = (4 << 24) + (0 << 12) + 1024;
5301 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
5302 if (CHIP_IS_E1(bp)) {
5303 /* !!! fix pxp client crdit until excel update */
5304 REG_WR(bp, CDU_REG_CDU_DEBUG, 0x264);
5305 REG_WR(bp, CDU_REG_CDU_DEBUG, 0);
5306 }
a2fbb9ea 5307
34f80b04
EG
5308 bnx2x_init_block(bp, CFC_COMMON_START, CFC_COMMON_END);
5309 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
a2fbb9ea 5310
34f80b04
EG
5311 bnx2x_init_block(bp, HC_COMMON_START, HC_COMMON_END);
5312 bnx2x_init_block(bp, MISC_AEU_COMMON_START, MISC_AEU_COMMON_END);
a2fbb9ea 5313
34f80b04
EG
5314 /* PXPCS COMMON comes here */
5315 /* Reset PCIE errors for debug */
5316 REG_WR(bp, 0x2814, 0xffffffff);
5317 REG_WR(bp, 0x3820, 0xffffffff);
a2fbb9ea 5318
34f80b04
EG
5319 /* EMAC0 COMMON comes here */
5320 /* EMAC1 COMMON comes here */
5321 /* DBU COMMON comes here */
5322 /* DBG COMMON comes here */
5323
5324 bnx2x_init_block(bp, NIG_COMMON_START, NIG_COMMON_END);
5325 if (CHIP_IS_E1H(bp)) {
5326 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
5327 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
5328 }
5329
5330 if (CHIP_REV_IS_SLOW(bp))
5331 msleep(200);
5332
5333 /* finish CFC init */
5334 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
5335 if (val != 1) {
5336 BNX2X_ERR("CFC LL_INIT failed\n");
5337 return -EBUSY;
5338 }
5339 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
5340 if (val != 1) {
5341 BNX2X_ERR("CFC AC_INIT failed\n");
5342 return -EBUSY;
5343 }
5344 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
5345 if (val != 1) {
5346 BNX2X_ERR("CFC CAM_INIT failed\n");
5347 return -EBUSY;
5348 }
5349 REG_WR(bp, CFC_REG_DEBUG0, 0);
f1410647 5350
34f80b04
EG
5351 /* read NIG statistic
5352 to see if this is our first up since powerup */
5353 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5354 val = *bnx2x_sp(bp, wb_data[0]);
5355
5356 /* do internal memory self test */
5357 if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
5358 BNX2X_ERR("internal mem self test failed\n");
5359 return -EBUSY;
5360 }
5361
5362 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
7add905f 5363 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1021G:
34f80b04
EG
5364 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
5365 /* Fan failure is indicated by SPIO 5 */
5366 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
5367 MISC_REGISTERS_SPIO_INPUT_HI_Z);
5368
5369 /* set to active low mode */
5370 val = REG_RD(bp, MISC_REG_SPIO_INT);
5371 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
f1410647 5372 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
34f80b04 5373 REG_WR(bp, MISC_REG_SPIO_INT, val);
f1410647 5374
34f80b04
EG
5375 /* enable interrupt to signal the IGU */
5376 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
5377 val |= (1 << MISC_REGISTERS_SPIO_5);
5378 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
5379 break;
f1410647 5380
34f80b04
EG
5381 default:
5382 break;
5383 }
f1410647 5384
34f80b04
EG
5385 /* clear PXP2 attentions */
5386 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
a2fbb9ea 5387
34f80b04 5388 enable_blocks_attention(bp);
a2fbb9ea 5389
6bbca910
YR
5390 if (!BP_NOMCP(bp)) {
5391 bnx2x_acquire_phy_lock(bp);
5392 bnx2x_common_init_phy(bp, bp->common.shmem_base);
5393 bnx2x_release_phy_lock(bp);
5394 } else
5395 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
5396
34f80b04
EG
5397 return 0;
5398}
a2fbb9ea 5399
34f80b04
EG
5400static int bnx2x_init_port(struct bnx2x *bp)
5401{
5402 int port = BP_PORT(bp);
5403 u32 val;
a2fbb9ea 5404
34f80b04
EG
5405 DP(BNX2X_MSG_MCP, "starting port init port %x\n", port);
5406
5407 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
a2fbb9ea
ET
5408
5409 /* Port PXP comes here */
5410 /* Port PXP2 comes here */
a2fbb9ea
ET
5411#ifdef BCM_ISCSI
5412 /* Port0 1
5413 * Port1 385 */
5414 i++;
5415 wb_write[0] = ONCHIP_ADDR1(bp->timers_mapping);
5416 wb_write[1] = ONCHIP_ADDR2(bp->timers_mapping);
5417 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5418 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
5419
5420 /* Port0 2
5421 * Port1 386 */
5422 i++;
5423 wb_write[0] = ONCHIP_ADDR1(bp->qm_mapping);
5424 wb_write[1] = ONCHIP_ADDR2(bp->qm_mapping);
5425 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5426 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
5427
5428 /* Port0 3
5429 * Port1 387 */
5430 i++;
5431 wb_write[0] = ONCHIP_ADDR1(bp->t1_mapping);
5432 wb_write[1] = ONCHIP_ADDR2(bp->t1_mapping);
5433 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5434 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
5435#endif
34f80b04 5436 /* Port CMs come here */
a2fbb9ea
ET
5437
5438 /* Port QM comes here */
a2fbb9ea
ET
5439#ifdef BCM_ISCSI
5440 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + func*4, 1024/64*20);
5441 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + func*4, 31);
5442
5443 bnx2x_init_block(bp, func ? TIMERS_PORT1_START : TIMERS_PORT0_START,
5444 func ? TIMERS_PORT1_END : TIMERS_PORT0_END);
5445#endif
5446 /* Port DQ comes here */
5447 /* Port BRB1 comes here */
ad8d3948 5448 /* Port PRS comes here */
a2fbb9ea
ET
5449 /* Port TSDM comes here */
5450 /* Port CSDM comes here */
5451 /* Port USDM comes here */
5452 /* Port XSDM comes here */
34f80b04
EG
5453 bnx2x_init_block(bp, port ? TSEM_PORT1_START : TSEM_PORT0_START,
5454 port ? TSEM_PORT1_END : TSEM_PORT0_END);
5455 bnx2x_init_block(bp, port ? USEM_PORT1_START : USEM_PORT0_START,
5456 port ? USEM_PORT1_END : USEM_PORT0_END);
5457 bnx2x_init_block(bp, port ? CSEM_PORT1_START : CSEM_PORT0_START,
5458 port ? CSEM_PORT1_END : CSEM_PORT0_END);
5459 bnx2x_init_block(bp, port ? XSEM_PORT1_START : XSEM_PORT0_START,
5460 port ? XSEM_PORT1_END : XSEM_PORT0_END);
a2fbb9ea 5461 /* Port UPB comes here */
34f80b04
EG
5462 /* Port XPB comes here */
5463
5464 bnx2x_init_block(bp, port ? PBF_PORT1_START : PBF_PORT0_START,
5465 port ? PBF_PORT1_END : PBF_PORT0_END);
a2fbb9ea
ET
5466
5467 /* configure PBF to work without PAUSE mtu 9000 */
34f80b04 5468 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
a2fbb9ea
ET
5469
5470 /* update threshold */
34f80b04 5471 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
a2fbb9ea 5472 /* update init credit */
34f80b04 5473 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
a2fbb9ea
ET
5474
5475 /* probe changes */
34f80b04 5476 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
a2fbb9ea 5477 msleep(5);
34f80b04 5478 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
a2fbb9ea
ET
5479
5480#ifdef BCM_ISCSI
5481 /* tell the searcher where the T2 table is */
5482 REG_WR(bp, SRC_REG_COUNTFREE0 + func*4, 16*1024/64);
5483
5484 wb_write[0] = U64_LO(bp->t2_mapping);
5485 wb_write[1] = U64_HI(bp->t2_mapping);
5486 REG_WR_DMAE(bp, SRC_REG_FIRSTFREE0 + func*4, wb_write, 2);
5487 wb_write[0] = U64_LO((u64)bp->t2_mapping + 16*1024 - 64);
5488 wb_write[1] = U64_HI((u64)bp->t2_mapping + 16*1024 - 64);
5489 REG_WR_DMAE(bp, SRC_REG_LASTFREE0 + func*4, wb_write, 2);
5490
5491 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + func*4, 10);
5492 /* Port SRCH comes here */
5493#endif
5494 /* Port CDU comes here */
5495 /* Port CFC comes here */
34f80b04
EG
5496
5497 if (CHIP_IS_E1(bp)) {
5498 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5499 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5500 }
5501 bnx2x_init_block(bp, port ? HC_PORT1_START : HC_PORT0_START,
5502 port ? HC_PORT1_END : HC_PORT0_END);
5503
5504 bnx2x_init_block(bp, port ? MISC_AEU_PORT1_START :
a2fbb9ea 5505 MISC_AEU_PORT0_START,
34f80b04
EG
5506 port ? MISC_AEU_PORT1_END : MISC_AEU_PORT0_END);
5507 /* init aeu_mask_attn_func_0/1:
5508 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
5509 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
5510 * bits 4-7 are used for "per vn group attention" */
5511 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
5512 (IS_E1HMF(bp) ? 0xF7 : 0x7));
5513
a2fbb9ea
ET
5514 /* Port PXPCS comes here */
5515 /* Port EMAC0 comes here */
5516 /* Port EMAC1 comes here */
5517 /* Port DBU comes here */
5518 /* Port DBG comes here */
34f80b04
EG
5519 bnx2x_init_block(bp, port ? NIG_PORT1_START : NIG_PORT0_START,
5520 port ? NIG_PORT1_END : NIG_PORT0_END);
5521
5522 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
5523
5524 if (CHIP_IS_E1H(bp)) {
5525 u32 wsum;
5526 struct cmng_struct_per_port m_cmng_port;
5527 int vn;
5528
5529 /* 0x2 disable e1hov, 0x1 enable */
5530 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
5531 (IS_E1HMF(bp) ? 0x1 : 0x2));
5532
5533 /* Init RATE SHAPING and FAIRNESS contexts.
5534 Initialize as if there is 10G link. */
5535 wsum = bnx2x_calc_vn_wsum(bp);
5536 bnx2x_init_port_minmax(bp, (int)wsum, 10000, &m_cmng_port);
5537 if (IS_E1HMF(bp))
5538 for (vn = VN_0; vn < E1HVN_MAX; vn++)
5539 bnx2x_init_vn_minmax(bp, 2*vn + port,
5540 wsum, 10000, &m_cmng_port);
5541 }
5542
a2fbb9ea
ET
5543 /* Port MCP comes here */
5544 /* Port DMAE comes here */
5545
34f80b04 5546 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
7add905f 5547 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1021G:
f1410647
ET
5548 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
5549 /* add SPIO 5 to group 0 */
5550 val = REG_RD(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
5551 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
5552 REG_WR(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0, val);
5553 break;
5554
5555 default:
5556 break;
5557 }
5558
c18487ee 5559 bnx2x__link_reset(bp);
a2fbb9ea 5560
34f80b04
EG
5561 return 0;
5562}
5563
5564#define ILT_PER_FUNC (768/2)
5565#define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
5566/* the phys address is shifted right 12 bits and has an added
5567 1=valid bit added to the 53rd bit
5568 then since this is a wide register(TM)
5569 we split it into two 32 bit writes
5570 */
5571#define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
5572#define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
5573#define PXP_ONE_ILT(x) (((x) << 10) | x)
5574#define PXP_ILT_RANGE(f, l) (((l) << 10) | f)
5575
5576#define CNIC_ILT_LINES 0
5577
5578static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
5579{
5580 int reg;
5581
5582 if (CHIP_IS_E1H(bp))
5583 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
5584 else /* E1 */
5585 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
5586
5587 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
5588}
5589
5590static int bnx2x_init_func(struct bnx2x *bp)
5591{
5592 int port = BP_PORT(bp);
5593 int func = BP_FUNC(bp);
5594 int i;
5595
5596 DP(BNX2X_MSG_MCP, "starting func init func %x\n", func);
5597
5598 i = FUNC_ILT_BASE(func);
5599
5600 bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
5601 if (CHIP_IS_E1H(bp)) {
5602 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
5603 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
5604 } else /* E1 */
5605 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
5606 PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
5607
5608
5609 if (CHIP_IS_E1H(bp)) {
5610 for (i = 0; i < 9; i++)
5611 bnx2x_init_block(bp,
5612 cm_start[func][i], cm_end[func][i]);
5613
5614 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
5615 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
5616 }
5617
5618 /* HC init per function */
5619 if (CHIP_IS_E1H(bp)) {
5620 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
5621
5622 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5623 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5624 }
5625 bnx2x_init_block(bp, hc_limits[func][0], hc_limits[func][1]);
5626
5627 if (CHIP_IS_E1H(bp))
5628 REG_WR(bp, HC_REG_FUNC_NUM_P0 + port*4, func);
5629
c14423fe 5630 /* Reset PCIE errors for debug */
a2fbb9ea
ET
5631 REG_WR(bp, 0x2114, 0xffffffff);
5632 REG_WR(bp, 0x2120, 0xffffffff);
a2fbb9ea 5633
34f80b04
EG
5634 return 0;
5635}
5636
5637static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
5638{
5639 int i, rc = 0;
a2fbb9ea 5640
34f80b04
EG
5641 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
5642 BP_FUNC(bp), load_code);
a2fbb9ea 5643
34f80b04
EG
5644 bp->dmae_ready = 0;
5645 mutex_init(&bp->dmae_mutex);
5646 bnx2x_gunzip_init(bp);
a2fbb9ea 5647
34f80b04
EG
5648 switch (load_code) {
5649 case FW_MSG_CODE_DRV_LOAD_COMMON:
5650 rc = bnx2x_init_common(bp);
5651 if (rc)
5652 goto init_hw_err;
5653 /* no break */
5654
5655 case FW_MSG_CODE_DRV_LOAD_PORT:
5656 bp->dmae_ready = 1;
5657 rc = bnx2x_init_port(bp);
5658 if (rc)
5659 goto init_hw_err;
5660 /* no break */
5661
5662 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5663 bp->dmae_ready = 1;
5664 rc = bnx2x_init_func(bp);
5665 if (rc)
5666 goto init_hw_err;
5667 break;
5668
5669 default:
5670 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5671 break;
5672 }
5673
5674 if (!BP_NOMCP(bp)) {
5675 int func = BP_FUNC(bp);
a2fbb9ea
ET
5676
5677 bp->fw_drv_pulse_wr_seq =
34f80b04 5678 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
a2fbb9ea 5679 DRV_PULSE_SEQ_MASK);
34f80b04
EG
5680 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
5681 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x func_stx 0x%x\n",
5682 bp->fw_drv_pulse_wr_seq, bp->func_stx);
5683 } else
5684 bp->func_stx = 0;
a2fbb9ea 5685
34f80b04
EG
5686 /* this needs to be done before gunzip end */
5687 bnx2x_zero_def_sb(bp);
5688 for_each_queue(bp, i)
5689 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
5690
5691init_hw_err:
5692 bnx2x_gunzip_end(bp);
5693
5694 return rc;
a2fbb9ea
ET
5695}
5696
c14423fe 5697/* send the MCP a request, block until there is a reply */
a2fbb9ea
ET
5698static u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
5699{
34f80b04 5700 int func = BP_FUNC(bp);
f1410647
ET
5701 u32 seq = ++bp->fw_seq;
5702 u32 rc = 0;
19680c48
EG
5703 u32 cnt = 1;
5704 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
a2fbb9ea 5705
34f80b04 5706 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
f1410647 5707 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
a2fbb9ea 5708
19680c48
EG
5709 do {
5710 /* let the FW do it's magic ... */
5711 msleep(delay);
a2fbb9ea 5712
19680c48 5713 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
a2fbb9ea 5714
19680c48
EG
5715 /* Give the FW up to 2 second (200*10ms) */
5716 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 200));
5717
5718 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
5719 cnt*delay, rc, seq);
a2fbb9ea
ET
5720
5721 /* is this a reply to our command? */
5722 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) {
5723 rc &= FW_MSG_CODE_MASK;
f1410647 5724
a2fbb9ea
ET
5725 } else {
5726 /* FW BUG! */
5727 BNX2X_ERR("FW failed to respond!\n");
5728 bnx2x_fw_dump(bp);
5729 rc = 0;
5730 }
f1410647 5731
a2fbb9ea
ET
5732 return rc;
5733}
5734
5735static void bnx2x_free_mem(struct bnx2x *bp)
5736{
5737
5738#define BNX2X_PCI_FREE(x, y, size) \
5739 do { \
5740 if (x) { \
5741 pci_free_consistent(bp->pdev, size, x, y); \
5742 x = NULL; \
5743 y = 0; \
5744 } \
5745 } while (0)
5746
5747#define BNX2X_FREE(x) \
5748 do { \
5749 if (x) { \
5750 vfree(x); \
5751 x = NULL; \
5752 } \
5753 } while (0)
5754
5755 int i;
5756
5757 /* fastpath */
5758 for_each_queue(bp, i) {
5759
5760 /* Status blocks */
5761 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
5762 bnx2x_fp(bp, i, status_blk_mapping),
5763 sizeof(struct host_status_block) +
5764 sizeof(struct eth_tx_db_data));
5765
5766 /* fast path rings: tx_buf tx_desc rx_buf rx_desc rx_comp */
5767 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
5768 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
5769 bnx2x_fp(bp, i, tx_desc_mapping),
5770 sizeof(struct eth_tx_bd) * NUM_TX_BD);
5771
5772 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
5773 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
5774 bnx2x_fp(bp, i, rx_desc_mapping),
5775 sizeof(struct eth_rx_bd) * NUM_RX_BD);
5776
5777 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
5778 bnx2x_fp(bp, i, rx_comp_mapping),
5779 sizeof(struct eth_fast_path_rx_cqe) *
5780 NUM_RCQ_BD);
a2fbb9ea 5781
7a9b2557 5782 /* SGE ring */
32626230 5783 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
7a9b2557
VZ
5784 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
5785 bnx2x_fp(bp, i, rx_sge_mapping),
5786 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
5787 }
a2fbb9ea
ET
5788 /* end of fastpath */
5789
5790 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
34f80b04 5791 sizeof(struct host_def_status_block));
a2fbb9ea
ET
5792
5793 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
34f80b04 5794 sizeof(struct bnx2x_slowpath));
a2fbb9ea
ET
5795
5796#ifdef BCM_ISCSI
5797 BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
5798 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
5799 BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
5800 BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
5801#endif
7a9b2557 5802 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
a2fbb9ea
ET
5803
5804#undef BNX2X_PCI_FREE
5805#undef BNX2X_KFREE
5806}
5807
5808static int bnx2x_alloc_mem(struct bnx2x *bp)
5809{
5810
5811#define BNX2X_PCI_ALLOC(x, y, size) \
5812 do { \
5813 x = pci_alloc_consistent(bp->pdev, size, y); \
5814 if (x == NULL) \
5815 goto alloc_mem_err; \
5816 memset(x, 0, size); \
5817 } while (0)
5818
5819#define BNX2X_ALLOC(x, size) \
5820 do { \
5821 x = vmalloc(size); \
5822 if (x == NULL) \
5823 goto alloc_mem_err; \
5824 memset(x, 0, size); \
5825 } while (0)
5826
5827 int i;
5828
5829 /* fastpath */
a2fbb9ea
ET
5830 for_each_queue(bp, i) {
5831 bnx2x_fp(bp, i, bp) = bp;
5832
5833 /* Status blocks */
5834 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
5835 &bnx2x_fp(bp, i, status_blk_mapping),
5836 sizeof(struct host_status_block) +
5837 sizeof(struct eth_tx_db_data));
5838
5839 bnx2x_fp(bp, i, hw_tx_prods) =
5840 (void *)(bnx2x_fp(bp, i, status_blk) + 1);
5841
5842 bnx2x_fp(bp, i, tx_prods_mapping) =
5843 bnx2x_fp(bp, i, status_blk_mapping) +
5844 sizeof(struct host_status_block);
5845
5846 /* fast path rings: tx_buf tx_desc rx_buf rx_desc rx_comp */
5847 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
5848 sizeof(struct sw_tx_bd) * NUM_TX_BD);
5849 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
5850 &bnx2x_fp(bp, i, tx_desc_mapping),
5851 sizeof(struct eth_tx_bd) * NUM_TX_BD);
5852
5853 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
5854 sizeof(struct sw_rx_bd) * NUM_RX_BD);
5855 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
5856 &bnx2x_fp(bp, i, rx_desc_mapping),
5857 sizeof(struct eth_rx_bd) * NUM_RX_BD);
5858
5859 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
5860 &bnx2x_fp(bp, i, rx_comp_mapping),
5861 sizeof(struct eth_fast_path_rx_cqe) *
5862 NUM_RCQ_BD);
5863
7a9b2557
VZ
5864 /* SGE ring */
5865 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
5866 sizeof(struct sw_rx_page) * NUM_RX_SGE);
5867 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
5868 &bnx2x_fp(bp, i, rx_sge_mapping),
5869 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
a2fbb9ea
ET
5870 }
5871 /* end of fastpath */
5872
5873 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
5874 sizeof(struct host_def_status_block));
5875
5876 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
5877 sizeof(struct bnx2x_slowpath));
5878
5879#ifdef BCM_ISCSI
5880 BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
5881
5882 /* Initialize T1 */
5883 for (i = 0; i < 64*1024; i += 64) {
5884 *(u64 *)((char *)bp->t1 + i + 56) = 0x0UL;
5885 *(u64 *)((char *)bp->t1 + i + 3) = 0x0UL;
5886 }
5887
5888 /* allocate searcher T2 table
5889 we allocate 1/4 of alloc num for T2
5890 (which is not entered into the ILT) */
5891 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
5892
5893 /* Initialize T2 */
5894 for (i = 0; i < 16*1024; i += 64)
5895 * (u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
5896
c14423fe 5897 /* now fixup the last line in the block to point to the next block */
a2fbb9ea
ET
5898 *(u64 *)((char *)bp->t2 + 1024*16-8) = bp->t2_mapping;
5899
5900 /* Timer block array (MAX_CONN*8) phys uncached for now 1024 conns */
5901 BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
5902
5903 /* QM queues (128*MAX_CONN) */
5904 BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
5905#endif
5906
5907 /* Slow path ring */
5908 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
5909
5910 return 0;
5911
5912alloc_mem_err:
5913 bnx2x_free_mem(bp);
5914 return -ENOMEM;
5915
5916#undef BNX2X_PCI_ALLOC
5917#undef BNX2X_ALLOC
5918}
5919
5920static void bnx2x_free_tx_skbs(struct bnx2x *bp)
5921{
5922 int i;
5923
5924 for_each_queue(bp, i) {
5925 struct bnx2x_fastpath *fp = &bp->fp[i];
5926
5927 u16 bd_cons = fp->tx_bd_cons;
5928 u16 sw_prod = fp->tx_pkt_prod;
5929 u16 sw_cons = fp->tx_pkt_cons;
5930
a2fbb9ea
ET
5931 while (sw_cons != sw_prod) {
5932 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
5933 sw_cons++;
5934 }
5935 }
5936}
5937
5938static void bnx2x_free_rx_skbs(struct bnx2x *bp)
5939{
5940 int i, j;
5941
5942 for_each_queue(bp, j) {
5943 struct bnx2x_fastpath *fp = &bp->fp[j];
5944
a2fbb9ea
ET
5945 for (i = 0; i < NUM_RX_BD; i++) {
5946 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
5947 struct sk_buff *skb = rx_buf->skb;
5948
5949 if (skb == NULL)
5950 continue;
5951
5952 pci_unmap_single(bp->pdev,
5953 pci_unmap_addr(rx_buf, mapping),
437cf2f1 5954 bp->rx_buf_size,
a2fbb9ea
ET
5955 PCI_DMA_FROMDEVICE);
5956
5957 rx_buf->skb = NULL;
5958 dev_kfree_skb(skb);
5959 }
7a9b2557 5960 if (!fp->disable_tpa)
32626230
EG
5961 bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
5962 ETH_MAX_AGGREGATION_QUEUES_E1 :
7a9b2557 5963 ETH_MAX_AGGREGATION_QUEUES_E1H);
a2fbb9ea
ET
5964 }
5965}
5966
5967static void bnx2x_free_skbs(struct bnx2x *bp)
5968{
5969 bnx2x_free_tx_skbs(bp);
5970 bnx2x_free_rx_skbs(bp);
5971}
5972
5973static void bnx2x_free_msix_irqs(struct bnx2x *bp)
5974{
34f80b04 5975 int i, offset = 1;
a2fbb9ea
ET
5976
5977 free_irq(bp->msix_table[0].vector, bp->dev);
c14423fe 5978 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
a2fbb9ea
ET
5979 bp->msix_table[0].vector);
5980
5981 for_each_queue(bp, i) {
c14423fe 5982 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
34f80b04 5983 "state %x\n", i, bp->msix_table[i + offset].vector,
a2fbb9ea
ET
5984 bnx2x_fp(bp, i, state));
5985
228241eb
ET
5986 if (bnx2x_fp(bp, i, state) != BNX2X_FP_STATE_CLOSED)
5987 BNX2X_ERR("IRQ of fp #%d being freed while "
5988 "state != closed\n", i);
a2fbb9ea 5989
34f80b04 5990 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
a2fbb9ea 5991 }
a2fbb9ea
ET
5992}
5993
5994static void bnx2x_free_irq(struct bnx2x *bp)
5995{
a2fbb9ea 5996 if (bp->flags & USING_MSIX_FLAG) {
a2fbb9ea
ET
5997 bnx2x_free_msix_irqs(bp);
5998 pci_disable_msix(bp->pdev);
a2fbb9ea
ET
5999 bp->flags &= ~USING_MSIX_FLAG;
6000
6001 } else
6002 free_irq(bp->pdev->irq, bp->dev);
6003}
6004
6005static int bnx2x_enable_msix(struct bnx2x *bp)
6006{
34f80b04 6007 int i, rc, offset;
a2fbb9ea
ET
6008
6009 bp->msix_table[0].entry = 0;
34f80b04
EG
6010 offset = 1;
6011 DP(NETIF_MSG_IFUP, "msix_table[0].entry = 0 (slowpath)\n");
a2fbb9ea 6012
34f80b04
EG
6013 for_each_queue(bp, i) {
6014 int igu_vec = offset + i + BP_L_ID(bp);
a2fbb9ea 6015
34f80b04
EG
6016 bp->msix_table[i + offset].entry = igu_vec;
6017 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
6018 "(fastpath #%u)\n", i + offset, igu_vec, i);
a2fbb9ea
ET
6019 }
6020
34f80b04
EG
6021 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
6022 bp->num_queues + offset);
6023 if (rc) {
6024 DP(NETIF_MSG_IFUP, "MSI-X is not attainable\n");
6025 return -1;
6026 }
a2fbb9ea
ET
6027 bp->flags |= USING_MSIX_FLAG;
6028
6029 return 0;
a2fbb9ea
ET
6030}
6031
a2fbb9ea
ET
6032static int bnx2x_req_msix_irqs(struct bnx2x *bp)
6033{
34f80b04 6034 int i, rc, offset = 1;
a2fbb9ea 6035
a2fbb9ea
ET
6036 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
6037 bp->dev->name, bp->dev);
a2fbb9ea
ET
6038 if (rc) {
6039 BNX2X_ERR("request sp irq failed\n");
6040 return -EBUSY;
6041 }
6042
6043 for_each_queue(bp, i) {
34f80b04 6044 rc = request_irq(bp->msix_table[i + offset].vector,
a2fbb9ea
ET
6045 bnx2x_msix_fp_int, 0,
6046 bp->dev->name, &bp->fp[i]);
a2fbb9ea 6047 if (rc) {
3196a88a
EG
6048 BNX2X_ERR("request fp #%d irq failed rc -%d\n",
6049 i + offset, -rc);
a2fbb9ea
ET
6050 bnx2x_free_msix_irqs(bp);
6051 return -EBUSY;
6052 }
6053
6054 bnx2x_fp(bp, i, state) = BNX2X_FP_STATE_IRQ;
a2fbb9ea
ET
6055 }
6056
6057 return 0;
a2fbb9ea
ET
6058}
6059
6060static int bnx2x_req_irq(struct bnx2x *bp)
6061{
34f80b04 6062 int rc;
a2fbb9ea 6063
34f80b04
EG
6064 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, IRQF_SHARED,
6065 bp->dev->name, bp->dev);
a2fbb9ea
ET
6066 if (!rc)
6067 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
6068
6069 return rc;
a2fbb9ea
ET
6070}
6071
65abd74d
YG
6072static void bnx2x_napi_enable(struct bnx2x *bp)
6073{
6074 int i;
6075
6076 for_each_queue(bp, i)
6077 napi_enable(&bnx2x_fp(bp, i, napi));
6078}
6079
6080static void bnx2x_napi_disable(struct bnx2x *bp)
6081{
6082 int i;
6083
6084 for_each_queue(bp, i)
6085 napi_disable(&bnx2x_fp(bp, i, napi));
6086}
6087
6088static void bnx2x_netif_start(struct bnx2x *bp)
6089{
6090 if (atomic_dec_and_test(&bp->intr_sem)) {
6091 if (netif_running(bp->dev)) {
6092 if (bp->state == BNX2X_STATE_OPEN)
6093 netif_wake_queue(bp->dev);
6094 bnx2x_napi_enable(bp);
6095 bnx2x_int_enable(bp);
6096 }
6097 }
6098}
6099
f8ef6e44 6100static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
65abd74d 6101{
f8ef6e44 6102 bnx2x_int_disable_sync(bp, disable_hw);
65abd74d
YG
6103 if (netif_running(bp->dev)) {
6104 bnx2x_napi_disable(bp);
6105 netif_tx_disable(bp->dev);
6106 bp->dev->trans_start = jiffies; /* prevent tx timeout */
6107 }
6108}
6109
a2fbb9ea
ET
6110/*
6111 * Init service functions
6112 */
6113
3101c2bc 6114static void bnx2x_set_mac_addr_e1(struct bnx2x *bp, int set)
a2fbb9ea
ET
6115{
6116 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
34f80b04 6117 int port = BP_PORT(bp);
a2fbb9ea
ET
6118
6119 /* CAM allocation
6120 * unicasts 0-31:port0 32-63:port1
6121 * multicast 64-127:port0 128-191:port1
6122 */
6123 config->hdr.length_6b = 2;
34f80b04
EG
6124 config->hdr.offset = port ? 31 : 0;
6125 config->hdr.client_id = BP_CL_ID(bp);
a2fbb9ea
ET
6126 config->hdr.reserved1 = 0;
6127
6128 /* primary MAC */
6129 config->config_table[0].cam_entry.msb_mac_addr =
6130 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6131 config->config_table[0].cam_entry.middle_mac_addr =
6132 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6133 config->config_table[0].cam_entry.lsb_mac_addr =
6134 swab16(*(u16 *)&bp->dev->dev_addr[4]);
34f80b04 6135 config->config_table[0].cam_entry.flags = cpu_to_le16(port);
3101c2bc
YG
6136 if (set)
6137 config->config_table[0].target_table_entry.flags = 0;
6138 else
6139 CAM_INVALIDATE(config->config_table[0]);
a2fbb9ea
ET
6140 config->config_table[0].target_table_entry.client_id = 0;
6141 config->config_table[0].target_table_entry.vlan_id = 0;
6142
3101c2bc
YG
6143 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
6144 (set ? "setting" : "clearing"),
a2fbb9ea
ET
6145 config->config_table[0].cam_entry.msb_mac_addr,
6146 config->config_table[0].cam_entry.middle_mac_addr,
6147 config->config_table[0].cam_entry.lsb_mac_addr);
6148
6149 /* broadcast */
6150 config->config_table[1].cam_entry.msb_mac_addr = 0xffff;
6151 config->config_table[1].cam_entry.middle_mac_addr = 0xffff;
6152 config->config_table[1].cam_entry.lsb_mac_addr = 0xffff;
34f80b04 6153 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
3101c2bc
YG
6154 if (set)
6155 config->config_table[1].target_table_entry.flags =
a2fbb9ea 6156 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
3101c2bc
YG
6157 else
6158 CAM_INVALIDATE(config->config_table[1]);
a2fbb9ea
ET
6159 config->config_table[1].target_table_entry.client_id = 0;
6160 config->config_table[1].target_table_entry.vlan_id = 0;
6161
6162 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6163 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6164 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6165}
6166
3101c2bc 6167static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp, int set)
34f80b04
EG
6168{
6169 struct mac_configuration_cmd_e1h *config =
6170 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
6171
3101c2bc 6172 if (set && (bp->state != BNX2X_STATE_OPEN)) {
34f80b04
EG
6173 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
6174 return;
6175 }
6176
6177 /* CAM allocation for E1H
6178 * unicasts: by func number
6179 * multicast: 20+FUNC*20, 20 each
6180 */
6181 config->hdr.length_6b = 1;
6182 config->hdr.offset = BP_FUNC(bp);
6183 config->hdr.client_id = BP_CL_ID(bp);
6184 config->hdr.reserved1 = 0;
6185
6186 /* primary MAC */
6187 config->config_table[0].msb_mac_addr =
6188 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6189 config->config_table[0].middle_mac_addr =
6190 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6191 config->config_table[0].lsb_mac_addr =
6192 swab16(*(u16 *)&bp->dev->dev_addr[4]);
6193 config->config_table[0].client_id = BP_L_ID(bp);
6194 config->config_table[0].vlan_id = 0;
6195 config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
3101c2bc
YG
6196 if (set)
6197 config->config_table[0].flags = BP_PORT(bp);
6198 else
6199 config->config_table[0].flags =
6200 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
34f80b04 6201
3101c2bc
YG
6202 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) E1HOV %d CLID %d\n",
6203 (set ? "setting" : "clearing"),
34f80b04
EG
6204 config->config_table[0].msb_mac_addr,
6205 config->config_table[0].middle_mac_addr,
6206 config->config_table[0].lsb_mac_addr, bp->e1hov, BP_L_ID(bp));
6207
6208 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6209 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6210 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6211}
6212
a2fbb9ea
ET
6213static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6214 int *state_p, int poll)
6215{
6216 /* can take a while if any port is running */
34f80b04 6217 int cnt = 500;
a2fbb9ea 6218
c14423fe
ET
6219 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
6220 poll ? "polling" : "waiting", state, idx);
a2fbb9ea
ET
6221
6222 might_sleep();
34f80b04 6223 while (cnt--) {
a2fbb9ea
ET
6224 if (poll) {
6225 bnx2x_rx_int(bp->fp, 10);
34f80b04
EG
6226 /* if index is different from 0
6227 * the reply for some commands will
3101c2bc 6228 * be on the non default queue
a2fbb9ea
ET
6229 */
6230 if (idx)
6231 bnx2x_rx_int(&bp->fp[idx], 10);
6232 }
a2fbb9ea 6233
3101c2bc 6234 mb(); /* state is changed by bnx2x_sp_event() */
49d66772 6235 if (*state_p == state)
a2fbb9ea
ET
6236 return 0;
6237
a2fbb9ea 6238 msleep(1);
a2fbb9ea
ET
6239 }
6240
a2fbb9ea 6241 /* timeout! */
49d66772
ET
6242 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
6243 poll ? "polling" : "waiting", state, idx);
34f80b04
EG
6244#ifdef BNX2X_STOP_ON_ERROR
6245 bnx2x_panic();
6246#endif
a2fbb9ea 6247
49d66772 6248 return -EBUSY;
a2fbb9ea
ET
6249}
6250
6251static int bnx2x_setup_leading(struct bnx2x *bp)
6252{
34f80b04 6253 int rc;
a2fbb9ea 6254
c14423fe 6255 /* reset IGU state */
34f80b04 6256 bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea
ET
6257
6258 /* SETUP ramrod */
6259 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
6260
34f80b04
EG
6261 /* Wait for completion */
6262 rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
a2fbb9ea 6263
34f80b04 6264 return rc;
a2fbb9ea
ET
6265}
6266
6267static int bnx2x_setup_multi(struct bnx2x *bp, int index)
6268{
a2fbb9ea 6269 /* reset IGU state */
34f80b04 6270 bnx2x_ack_sb(bp, bp->fp[index].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea 6271
228241eb 6272 /* SETUP ramrod */
a2fbb9ea
ET
6273 bp->fp[index].state = BNX2X_FP_STATE_OPENING;
6274 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0, index, 0);
6275
6276 /* Wait for completion */
6277 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
228241eb 6278 &(bp->fp[index].state), 0);
a2fbb9ea
ET
6279}
6280
a2fbb9ea
ET
6281static int bnx2x_poll(struct napi_struct *napi, int budget);
6282static void bnx2x_set_rx_mode(struct net_device *dev);
6283
34f80b04
EG
6284/* must be called with rtnl_lock */
6285static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
a2fbb9ea 6286{
228241eb 6287 u32 load_code;
34f80b04 6288 int i, rc;
34f80b04
EG
6289#ifdef BNX2X_STOP_ON_ERROR
6290 if (unlikely(bp->panic))
6291 return -EPERM;
6292#endif
a2fbb9ea
ET
6293
6294 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
6295
34f80b04
EG
6296 /* Send LOAD_REQUEST command to MCP
6297 Returns the type of LOAD command:
6298 if it is the first port to be initialized
6299 common blocks should be initialized, otherwise - not
a2fbb9ea 6300 */
34f80b04 6301 if (!BP_NOMCP(bp)) {
228241eb
ET
6302 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
6303 if (!load_code) {
da5a662a 6304 BNX2X_ERR("MCP response failure, aborting\n");
228241eb
ET
6305 return -EBUSY;
6306 }
34f80b04 6307 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED)
a2fbb9ea 6308 return -EBUSY; /* other port in diagnostic mode */
34f80b04 6309
a2fbb9ea 6310 } else {
da5a662a
VZ
6311 int port = BP_PORT(bp);
6312
34f80b04
EG
6313 DP(NETIF_MSG_IFUP, "NO MCP load counts before us %d, %d, %d\n",
6314 load_count[0], load_count[1], load_count[2]);
6315 load_count[0]++;
da5a662a 6316 load_count[1 + port]++;
34f80b04
EG
6317 DP(NETIF_MSG_IFUP, "NO MCP new load counts %d, %d, %d\n",
6318 load_count[0], load_count[1], load_count[2]);
6319 if (load_count[0] == 1)
6320 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
da5a662a 6321 else if (load_count[1 + port] == 1)
34f80b04
EG
6322 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
6323 else
6324 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
a2fbb9ea
ET
6325 }
6326
34f80b04
EG
6327 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
6328 (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
6329 bp->port.pmf = 1;
6330 else
6331 bp->port.pmf = 0;
6332 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
6333
6334 /* if we can't use MSI-X we only need one fp,
6335 * so try to enable MSI-X with the requested number of fp's
a2fbb9ea
ET
6336 * and fallback to inta with one fp
6337 */
34f80b04
EG
6338 if (use_inta) {
6339 bp->num_queues = 1;
6340
6341 } else {
6342 if ((use_multi > 1) && (use_multi <= BP_MAX_QUEUES(bp)))
6343 /* user requested number */
6344 bp->num_queues = use_multi;
6345
6346 else if (use_multi)
6347 bp->num_queues = min_t(u32, num_online_cpus(),
6348 BP_MAX_QUEUES(bp));
6349 else
a2fbb9ea 6350 bp->num_queues = 1;
34f80b04
EG
6351
6352 if (bnx2x_enable_msix(bp)) {
6353 /* failed to enable MSI-X */
6354 bp->num_queues = 1;
6355 if (use_multi)
6356 BNX2X_ERR("Multi requested but failed"
6357 " to enable MSI-X\n");
a2fbb9ea
ET
6358 }
6359 }
34f80b04
EG
6360 DP(NETIF_MSG_IFUP,
6361 "set number of queues to %d\n", bp->num_queues);
c14423fe 6362
a2fbb9ea
ET
6363 if (bnx2x_alloc_mem(bp))
6364 return -ENOMEM;
6365
7a9b2557
VZ
6366 for_each_queue(bp, i)
6367 bnx2x_fp(bp, i, disable_tpa) =
6368 ((bp->flags & TPA_ENABLE_FLAG) == 0);
6369
34f80b04
EG
6370 if (bp->flags & USING_MSIX_FLAG) {
6371 rc = bnx2x_req_msix_irqs(bp);
6372 if (rc) {
6373 pci_disable_msix(bp->pdev);
6374 goto load_error;
6375 }
6376 } else {
6377 bnx2x_ack_int(bp);
6378 rc = bnx2x_req_irq(bp);
6379 if (rc) {
6380 BNX2X_ERR("IRQ request failed, aborting\n");
6381 goto load_error;
a2fbb9ea
ET
6382 }
6383 }
6384
6385 for_each_queue(bp, i)
6386 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
6387 bnx2x_poll, 128);
6388
a2fbb9ea 6389 /* Initialize HW */
34f80b04
EG
6390 rc = bnx2x_init_hw(bp, load_code);
6391 if (rc) {
a2fbb9ea 6392 BNX2X_ERR("HW init failed, aborting\n");
d1014634 6393 goto load_int_disable;
a2fbb9ea
ET
6394 }
6395
a2fbb9ea 6396 /* Setup NIC internals and enable interrupts */
471de716 6397 bnx2x_nic_init(bp, load_code);
a2fbb9ea
ET
6398
6399 /* Send LOAD_DONE command to MCP */
34f80b04 6400 if (!BP_NOMCP(bp)) {
228241eb
ET
6401 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
6402 if (!load_code) {
da5a662a 6403 BNX2X_ERR("MCP response failure, aborting\n");
34f80b04 6404 rc = -EBUSY;
d1014634 6405 goto load_rings_free;
a2fbb9ea
ET
6406 }
6407 }
6408
bb2a0f7a
YG
6409 bnx2x_stats_init(bp);
6410
a2fbb9ea
ET
6411 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
6412
6413 /* Enable Rx interrupt handling before sending the ramrod
6414 as it's completed on Rx FP queue */
65abd74d 6415 bnx2x_napi_enable(bp);
a2fbb9ea 6416
da5a662a
VZ
6417 /* Enable interrupt handling */
6418 atomic_set(&bp->intr_sem, 0);
6419
34f80b04
EG
6420 rc = bnx2x_setup_leading(bp);
6421 if (rc) {
da5a662a 6422 BNX2X_ERR("Setup leading failed!\n");
d1014634 6423 goto load_netif_stop;
34f80b04 6424 }
a2fbb9ea 6425
34f80b04
EG
6426 if (CHIP_IS_E1H(bp))
6427 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
6428 BNX2X_ERR("!!! mf_cfg function disabled\n");
6429 bp->state = BNX2X_STATE_DISABLED;
6430 }
a2fbb9ea 6431
34f80b04
EG
6432 if (bp->state == BNX2X_STATE_OPEN)
6433 for_each_nondefault_queue(bp, i) {
6434 rc = bnx2x_setup_multi(bp, i);
6435 if (rc)
d1014634 6436 goto load_netif_stop;
34f80b04 6437 }
a2fbb9ea 6438
34f80b04 6439 if (CHIP_IS_E1(bp))
3101c2bc 6440 bnx2x_set_mac_addr_e1(bp, 1);
34f80b04 6441 else
3101c2bc 6442 bnx2x_set_mac_addr_e1h(bp, 1);
34f80b04
EG
6443
6444 if (bp->port.pmf)
6445 bnx2x_initial_phy_init(bp);
a2fbb9ea
ET
6446
6447 /* Start fast path */
34f80b04
EG
6448 switch (load_mode) {
6449 case LOAD_NORMAL:
6450 /* Tx queue should be only reenabled */
6451 netif_wake_queue(bp->dev);
6452 bnx2x_set_rx_mode(bp->dev);
6453 break;
6454
6455 case LOAD_OPEN:
a2fbb9ea 6456 netif_start_queue(bp->dev);
34f80b04 6457 bnx2x_set_rx_mode(bp->dev);
a2fbb9ea
ET
6458 if (bp->flags & USING_MSIX_FLAG)
6459 printk(KERN_INFO PFX "%s: using MSI-X\n",
6460 bp->dev->name);
34f80b04 6461 break;
a2fbb9ea 6462
34f80b04 6463 case LOAD_DIAG:
a2fbb9ea 6464 bnx2x_set_rx_mode(bp->dev);
34f80b04
EG
6465 bp->state = BNX2X_STATE_DIAG;
6466 break;
6467
6468 default:
6469 break;
a2fbb9ea
ET
6470 }
6471
34f80b04
EG
6472 if (!bp->port.pmf)
6473 bnx2x__link_status_update(bp);
6474
a2fbb9ea
ET
6475 /* start the timer */
6476 mod_timer(&bp->timer, jiffies + bp->current_interval);
6477
34f80b04 6478
a2fbb9ea
ET
6479 return 0;
6480
d1014634 6481load_netif_stop:
65abd74d 6482 bnx2x_napi_disable(bp);
d1014634 6483load_rings_free:
7a9b2557
VZ
6484 /* Free SKBs, SGEs, TPA pool and driver internals */
6485 bnx2x_free_skbs(bp);
6486 for_each_queue(bp, i)
3196a88a 6487 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
d1014634 6488load_int_disable:
f8ef6e44 6489 bnx2x_int_disable_sync(bp, 1);
d1014634
YG
6490 /* Release IRQs */
6491 bnx2x_free_irq(bp);
228241eb 6492load_error:
a2fbb9ea 6493 bnx2x_free_mem(bp);
9a035440 6494 bp->port.pmf = 0;
a2fbb9ea
ET
6495
6496 /* TBD we really need to reset the chip
6497 if we want to recover from this */
34f80b04 6498 return rc;
a2fbb9ea
ET
6499}
6500
6501static int bnx2x_stop_multi(struct bnx2x *bp, int index)
6502{
a2fbb9ea
ET
6503 int rc;
6504
c14423fe 6505 /* halt the connection */
a2fbb9ea 6506 bp->fp[index].state = BNX2X_FP_STATE_HALTING;
231fd58a 6507 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, index, 0);
a2fbb9ea 6508
34f80b04 6509 /* Wait for completion */
a2fbb9ea 6510 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
34f80b04 6511 &(bp->fp[index].state), 1);
c14423fe 6512 if (rc) /* timeout */
a2fbb9ea
ET
6513 return rc;
6514
6515 /* delete cfc entry */
6516 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
6517
34f80b04
EG
6518 /* Wait for completion */
6519 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
6520 &(bp->fp[index].state), 1);
6521 return rc;
a2fbb9ea
ET
6522}
6523
da5a662a 6524static int bnx2x_stop_leading(struct bnx2x *bp)
a2fbb9ea 6525{
49d66772 6526 u16 dsb_sp_prod_idx;
c14423fe 6527 /* if the other port is handling traffic,
a2fbb9ea 6528 this can take a lot of time */
34f80b04
EG
6529 int cnt = 500;
6530 int rc;
a2fbb9ea
ET
6531
6532 might_sleep();
6533
6534 /* Send HALT ramrod */
6535 bp->fp[0].state = BNX2X_FP_STATE_HALTING;
34f80b04 6536 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, BP_CL_ID(bp), 0);
a2fbb9ea 6537
34f80b04
EG
6538 /* Wait for completion */
6539 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
6540 &(bp->fp[0].state), 1);
6541 if (rc) /* timeout */
da5a662a 6542 return rc;
a2fbb9ea 6543
49d66772 6544 dsb_sp_prod_idx = *bp->dsb_sp_prod;
a2fbb9ea 6545
228241eb 6546 /* Send PORT_DELETE ramrod */
a2fbb9ea
ET
6547 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
6548
49d66772 6549 /* Wait for completion to arrive on default status block
a2fbb9ea
ET
6550 we are going to reset the chip anyway
6551 so there is not much to do if this times out
6552 */
34f80b04 6553 while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
34f80b04
EG
6554 if (!cnt) {
6555 DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
6556 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
6557 *bp->dsb_sp_prod, dsb_sp_prod_idx);
6558#ifdef BNX2X_STOP_ON_ERROR
6559 bnx2x_panic();
da5a662a
VZ
6560#else
6561 rc = -EBUSY;
34f80b04
EG
6562#endif
6563 break;
6564 }
6565 cnt--;
da5a662a 6566 msleep(1);
49d66772
ET
6567 }
6568 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
6569 bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
da5a662a
VZ
6570
6571 return rc;
a2fbb9ea
ET
6572}
6573
34f80b04
EG
6574static void bnx2x_reset_func(struct bnx2x *bp)
6575{
6576 int port = BP_PORT(bp);
6577 int func = BP_FUNC(bp);
6578 int base, i;
6579
6580 /* Configure IGU */
6581 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6582 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6583
6584 REG_WR(bp, HC_REG_CONFIG_0 + port*4, 0x1000);
6585
6586 /* Clear ILT */
6587 base = FUNC_ILT_BASE(func);
6588 for (i = base; i < base + ILT_PER_FUNC; i++)
6589 bnx2x_ilt_wr(bp, i, 0);
6590}
6591
6592static void bnx2x_reset_port(struct bnx2x *bp)
6593{
6594 int port = BP_PORT(bp);
6595 u32 val;
6596
6597 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
6598
6599 /* Do not rcv packets to BRB */
6600 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
6601 /* Do not direct rcv packets that are not for MCP to the BRB */
6602 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
6603 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
6604
6605 /* Configure AEU */
6606 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
6607
6608 msleep(100);
6609 /* Check for BRB port occupancy */
6610 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
6611 if (val)
6612 DP(NETIF_MSG_IFDOWN,
33471629 6613 "BRB1 is not empty %d blocks are occupied\n", val);
34f80b04
EG
6614
6615 /* TODO: Close Doorbell port? */
6616}
6617
6618static void bnx2x_reset_common(struct bnx2x *bp)
6619{
6620 /* reset_common */
6621 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6622 0xd3ffff7f);
6623 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
6624}
6625
6626static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
6627{
6628 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
6629 BP_FUNC(bp), reset_code);
6630
6631 switch (reset_code) {
6632 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
6633 bnx2x_reset_port(bp);
6634 bnx2x_reset_func(bp);
6635 bnx2x_reset_common(bp);
6636 break;
6637
6638 case FW_MSG_CODE_DRV_UNLOAD_PORT:
6639 bnx2x_reset_port(bp);
6640 bnx2x_reset_func(bp);
6641 break;
6642
6643 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
6644 bnx2x_reset_func(bp);
6645 break;
49d66772 6646
34f80b04
EG
6647 default:
6648 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
6649 break;
6650 }
6651}
6652
33471629 6653/* must be called with rtnl_lock */
34f80b04 6654static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
a2fbb9ea 6655{
da5a662a 6656 int port = BP_PORT(bp);
a2fbb9ea 6657 u32 reset_code = 0;
da5a662a 6658 int i, cnt, rc;
a2fbb9ea
ET
6659
6660 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
6661
228241eb
ET
6662 bp->rx_mode = BNX2X_RX_MODE_NONE;
6663 bnx2x_set_storm_rx_mode(bp);
a2fbb9ea 6664
f8ef6e44 6665 bnx2x_netif_stop(bp, 1);
65abd74d
YG
6666 if (!netif_running(bp->dev))
6667 bnx2x_napi_disable(bp);
34f80b04
EG
6668 del_timer_sync(&bp->timer);
6669 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
6670 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
bb2a0f7a 6671 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04 6672
da5a662a 6673 /* Wait until tx fast path tasks complete */
228241eb
ET
6674 for_each_queue(bp, i) {
6675 struct bnx2x_fastpath *fp = &bp->fp[i];
6676
34f80b04
EG
6677 cnt = 1000;
6678 smp_rmb();
da5a662a
VZ
6679 while (BNX2X_HAS_TX_WORK(fp)) {
6680
65abd74d 6681 bnx2x_tx_int(fp, 1000);
34f80b04
EG
6682 if (!cnt) {
6683 BNX2X_ERR("timeout waiting for queue[%d]\n",
6684 i);
6685#ifdef BNX2X_STOP_ON_ERROR
6686 bnx2x_panic();
6687 return -EBUSY;
6688#else
6689 break;
6690#endif
6691 }
6692 cnt--;
da5a662a 6693 msleep(1);
34f80b04
EG
6694 smp_rmb();
6695 }
228241eb 6696 }
da5a662a
VZ
6697 /* Give HW time to discard old tx messages */
6698 msleep(1);
a2fbb9ea 6699
34f80b04
EG
6700 /* Release IRQs */
6701 bnx2x_free_irq(bp);
6702
3101c2bc
YG
6703 if (CHIP_IS_E1(bp)) {
6704 struct mac_configuration_cmd *config =
6705 bnx2x_sp(bp, mcast_config);
6706
6707 bnx2x_set_mac_addr_e1(bp, 0);
6708
6709 for (i = 0; i < config->hdr.length_6b; i++)
6710 CAM_INVALIDATE(config->config_table[i]);
6711
6712 config->hdr.length_6b = i;
6713 if (CHIP_REV_IS_SLOW(bp))
6714 config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
6715 else
6716 config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
6717 config->hdr.client_id = BP_CL_ID(bp);
6718 config->hdr.reserved1 = 0;
6719
6720 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6721 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
6722 U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
6723
6724 } else { /* E1H */
65abd74d
YG
6725 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
6726
3101c2bc
YG
6727 bnx2x_set_mac_addr_e1h(bp, 0);
6728
6729 for (i = 0; i < MC_HASH_SIZE; i++)
6730 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
6731 }
6732
65abd74d
YG
6733 if (unload_mode == UNLOAD_NORMAL)
6734 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6735
6736 else if (bp->flags & NO_WOL_FLAG) {
6737 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
6738 if (CHIP_IS_E1H(bp))
6739 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
6740
6741 } else if (bp->wol) {
6742 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
6743 u8 *mac_addr = bp->dev->dev_addr;
6744 u32 val;
6745 /* The mac address is written to entries 1-4 to
6746 preserve entry 0 which is used by the PMF */
6747 u8 entry = (BP_E1HVN(bp) + 1)*8;
6748
6749 val = (mac_addr[0] << 8) | mac_addr[1];
6750 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
6751
6752 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
6753 (mac_addr[4] << 8) | mac_addr[5];
6754 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
6755
6756 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
6757
6758 } else
6759 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
da5a662a 6760
34f80b04
EG
6761 /* Close multi and leading connections
6762 Completions for ramrods are collected in a synchronous way */
a2fbb9ea
ET
6763 for_each_nondefault_queue(bp, i)
6764 if (bnx2x_stop_multi(bp, i))
228241eb 6765 goto unload_error;
a2fbb9ea 6766
da5a662a
VZ
6767 rc = bnx2x_stop_leading(bp);
6768 if (rc) {
34f80b04 6769 BNX2X_ERR("Stop leading failed!\n");
da5a662a 6770#ifdef BNX2X_STOP_ON_ERROR
34f80b04 6771 return -EBUSY;
da5a662a
VZ
6772#else
6773 goto unload_error;
34f80b04 6774#endif
228241eb
ET
6775 }
6776
6777unload_error:
34f80b04 6778 if (!BP_NOMCP(bp))
228241eb 6779 reset_code = bnx2x_fw_command(bp, reset_code);
34f80b04
EG
6780 else {
6781 DP(NETIF_MSG_IFDOWN, "NO MCP load counts %d, %d, %d\n",
6782 load_count[0], load_count[1], load_count[2]);
6783 load_count[0]--;
da5a662a 6784 load_count[1 + port]--;
34f80b04
EG
6785 DP(NETIF_MSG_IFDOWN, "NO MCP new load counts %d, %d, %d\n",
6786 load_count[0], load_count[1], load_count[2]);
6787 if (load_count[0] == 0)
6788 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
da5a662a 6789 else if (load_count[1 + port] == 0)
34f80b04
EG
6790 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
6791 else
6792 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
6793 }
a2fbb9ea 6794
34f80b04
EG
6795 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
6796 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
6797 bnx2x__link_reset(bp);
a2fbb9ea
ET
6798
6799 /* Reset the chip */
228241eb 6800 bnx2x_reset_chip(bp, reset_code);
a2fbb9ea
ET
6801
6802 /* Report UNLOAD_DONE to MCP */
34f80b04 6803 if (!BP_NOMCP(bp))
a2fbb9ea 6804 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
9a035440 6805 bp->port.pmf = 0;
a2fbb9ea 6806
7a9b2557 6807 /* Free SKBs, SGEs, TPA pool and driver internals */
a2fbb9ea 6808 bnx2x_free_skbs(bp);
7a9b2557 6809 for_each_queue(bp, i)
3196a88a 6810 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
a2fbb9ea
ET
6811 bnx2x_free_mem(bp);
6812
6813 bp->state = BNX2X_STATE_CLOSED;
228241eb 6814
a2fbb9ea
ET
6815 netif_carrier_off(bp->dev);
6816
6817 return 0;
6818}
6819
34f80b04
EG
6820static void bnx2x_reset_task(struct work_struct *work)
6821{
6822 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
6823
6824#ifdef BNX2X_STOP_ON_ERROR
6825 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
6826 " so reset not done to allow debug dump,\n"
6827 KERN_ERR " you will need to reboot when done\n");
6828 return;
6829#endif
6830
6831 rtnl_lock();
6832
6833 if (!netif_running(bp->dev))
6834 goto reset_task_exit;
6835
6836 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
6837 bnx2x_nic_load(bp, LOAD_NORMAL);
6838
6839reset_task_exit:
6840 rtnl_unlock();
6841}
6842
a2fbb9ea
ET
6843/* end of nic load/unload */
6844
6845/* ethtool_ops */
6846
6847/*
6848 * Init service functions
6849 */
6850
34f80b04
EG
6851static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
6852{
6853 u32 val;
6854
6855 /* Check if there is any driver already loaded */
6856 val = REG_RD(bp, MISC_REG_UNPREPARED);
6857 if (val == 0x1) {
6858 /* Check if it is the UNDI driver
6859 * UNDI driver initializes CID offset for normal bell to 0x7
6860 */
4a37fb66 6861 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
34f80b04 6862 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
76b190c5
EG
6863 if (val == 0x7)
6864 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
6865 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
6866
34f80b04
EG
6867 if (val == 0x7) {
6868 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
da5a662a 6869 /* save our func */
34f80b04 6870 int func = BP_FUNC(bp);
da5a662a
VZ
6871 u32 swap_en;
6872 u32 swap_val;
34f80b04
EG
6873
6874 BNX2X_DEV_INFO("UNDI is active! reset device\n");
6875
6876 /* try unload UNDI on port 0 */
6877 bp->func = 0;
da5a662a
VZ
6878 bp->fw_seq =
6879 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
6880 DRV_MSG_SEQ_NUMBER_MASK);
34f80b04 6881 reset_code = bnx2x_fw_command(bp, reset_code);
34f80b04
EG
6882
6883 /* if UNDI is loaded on the other port */
6884 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
6885
da5a662a
VZ
6886 /* send "DONE" for previous unload */
6887 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6888
6889 /* unload UNDI on port 1 */
34f80b04 6890 bp->func = 1;
da5a662a
VZ
6891 bp->fw_seq =
6892 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
6893 DRV_MSG_SEQ_NUMBER_MASK);
6894 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6895
6896 bnx2x_fw_command(bp, reset_code);
34f80b04
EG
6897 }
6898
da5a662a
VZ
6899 REG_WR(bp, (BP_PORT(bp) ? HC_REG_CONFIG_1 :
6900 HC_REG_CONFIG_0), 0x1000);
6901
6902 /* close input traffic and wait for it */
6903 /* Do not rcv packets to BRB */
6904 REG_WR(bp,
6905 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
6906 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
6907 /* Do not direct rcv packets that are not for MCP to
6908 * the BRB */
6909 REG_WR(bp,
6910 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
6911 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
6912 /* clear AEU */
6913 REG_WR(bp,
6914 (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
6915 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
6916 msleep(10);
6917
6918 /* save NIG port swap info */
6919 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
6920 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
34f80b04
EG
6921 /* reset device */
6922 REG_WR(bp,
6923 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
da5a662a 6924 0xd3ffffff);
34f80b04
EG
6925 REG_WR(bp,
6926 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
6927 0x1403);
da5a662a
VZ
6928 /* take the NIG out of reset and restore swap values */
6929 REG_WR(bp,
6930 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
6931 MISC_REGISTERS_RESET_REG_1_RST_NIG);
6932 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
6933 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
6934
6935 /* send unload done to the MCP */
6936 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6937
6938 /* restore our func and fw_seq */
6939 bp->func = func;
6940 bp->fw_seq =
6941 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
6942 DRV_MSG_SEQ_NUMBER_MASK);
34f80b04
EG
6943 }
6944 }
6945}
6946
6947static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
6948{
6949 u32 val, val2, val3, val4, id;
72ce58c3 6950 u16 pmc;
34f80b04
EG
6951
6952 /* Get the chip revision id and number. */
6953 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
6954 val = REG_RD(bp, MISC_REG_CHIP_NUM);
6955 id = ((val & 0xffff) << 16);
6956 val = REG_RD(bp, MISC_REG_CHIP_REV);
6957 id |= ((val & 0xf) << 12);
6958 val = REG_RD(bp, MISC_REG_CHIP_METAL);
6959 id |= ((val & 0xff) << 4);
6960 REG_RD(bp, MISC_REG_BOND_ID);
6961 id |= (val & 0xf);
6962 bp->common.chip_id = id;
6963 bp->link_params.chip_id = bp->common.chip_id;
6964 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
6965
6966 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
6967 bp->common.flash_size = (NVRAM_1MB_SIZE <<
6968 (val & MCPR_NVM_CFG4_FLASH_SIZE));
6969 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
6970 bp->common.flash_size, bp->common.flash_size);
6971
6972 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
6973 bp->link_params.shmem_base = bp->common.shmem_base;
6974 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
6975
6976 if (!bp->common.shmem_base ||
6977 (bp->common.shmem_base < 0xA0000) ||
6978 (bp->common.shmem_base >= 0xC0000)) {
6979 BNX2X_DEV_INFO("MCP not active\n");
6980 bp->flags |= NO_MCP_FLAG;
6981 return;
6982 }
6983
6984 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
6985 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
6986 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
6987 BNX2X_ERR("BAD MCP validity signature\n");
6988
6989 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
6990 bp->common.board = SHMEM_RD(bp, dev_info.shared_hw_config.board);
6991
6992 BNX2X_DEV_INFO("hw_config 0x%08x board 0x%08x\n",
6993 bp->common.hw_config, bp->common.board);
6994
6995 bp->link_params.hw_led_mode = ((bp->common.hw_config &
6996 SHARED_HW_CFG_LED_MODE_MASK) >>
6997 SHARED_HW_CFG_LED_MODE_SHIFT);
6998
6999 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
7000 bp->common.bc_ver = val;
7001 BNX2X_DEV_INFO("bc_ver %X\n", val);
7002 if (val < BNX2X_BC_VER) {
7003 /* for now only warn
7004 * later we might need to enforce this */
7005 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
7006 " please upgrade BC\n", BNX2X_BC_VER, val);
7007 }
72ce58c3
EG
7008
7009 if (BP_E1HVN(bp) == 0) {
7010 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
7011 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
7012 } else {
7013 /* no WOL capability for E1HVN != 0 */
7014 bp->flags |= NO_WOL_FLAG;
7015 }
7016 BNX2X_DEV_INFO("%sWoL capable\n",
7017 (bp->flags & NO_WOL_FLAG) ? "Not " : "");
34f80b04
EG
7018
7019 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
7020 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
7021 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
7022 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
7023
7024 printk(KERN_INFO PFX "part number %X-%X-%X-%X\n",
7025 val, val2, val3, val4);
7026}
7027
7028static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
7029 u32 switch_cfg)
a2fbb9ea 7030{
34f80b04 7031 int port = BP_PORT(bp);
a2fbb9ea
ET
7032 u32 ext_phy_type;
7033
a2fbb9ea
ET
7034 switch (switch_cfg) {
7035 case SWITCH_CFG_1G:
7036 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
7037
c18487ee
YR
7038 ext_phy_type =
7039 SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea
ET
7040 switch (ext_phy_type) {
7041 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
7042 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7043 ext_phy_type);
7044
34f80b04
EG
7045 bp->port.supported |= (SUPPORTED_10baseT_Half |
7046 SUPPORTED_10baseT_Full |
7047 SUPPORTED_100baseT_Half |
7048 SUPPORTED_100baseT_Full |
7049 SUPPORTED_1000baseT_Full |
7050 SUPPORTED_2500baseX_Full |
7051 SUPPORTED_TP |
7052 SUPPORTED_FIBRE |
7053 SUPPORTED_Autoneg |
7054 SUPPORTED_Pause |
7055 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
7056 break;
7057
7058 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
7059 BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
7060 ext_phy_type);
7061
34f80b04
EG
7062 bp->port.supported |= (SUPPORTED_10baseT_Half |
7063 SUPPORTED_10baseT_Full |
7064 SUPPORTED_100baseT_Half |
7065 SUPPORTED_100baseT_Full |
7066 SUPPORTED_1000baseT_Full |
7067 SUPPORTED_TP |
7068 SUPPORTED_FIBRE |
7069 SUPPORTED_Autoneg |
7070 SUPPORTED_Pause |
7071 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
7072 break;
7073
7074 default:
7075 BNX2X_ERR("NVRAM config error. "
7076 "BAD SerDes ext_phy_config 0x%x\n",
c18487ee 7077 bp->link_params.ext_phy_config);
a2fbb9ea
ET
7078 return;
7079 }
7080
34f80b04
EG
7081 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
7082 port*0x10);
7083 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea
ET
7084 break;
7085
7086 case SWITCH_CFG_10G:
7087 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
7088
c18487ee
YR
7089 ext_phy_type =
7090 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea
ET
7091 switch (ext_phy_type) {
7092 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7093 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7094 ext_phy_type);
7095
34f80b04
EG
7096 bp->port.supported |= (SUPPORTED_10baseT_Half |
7097 SUPPORTED_10baseT_Full |
7098 SUPPORTED_100baseT_Half |
7099 SUPPORTED_100baseT_Full |
7100 SUPPORTED_1000baseT_Full |
7101 SUPPORTED_2500baseX_Full |
7102 SUPPORTED_10000baseT_Full |
7103 SUPPORTED_TP |
7104 SUPPORTED_FIBRE |
7105 SUPPORTED_Autoneg |
7106 SUPPORTED_Pause |
7107 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
7108 break;
7109
7110 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
f1410647 7111 BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
34f80b04 7112 ext_phy_type);
f1410647 7113
34f80b04
EG
7114 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7115 SUPPORTED_FIBRE |
7116 SUPPORTED_Pause |
7117 SUPPORTED_Asym_Pause);
f1410647
ET
7118 break;
7119
a2fbb9ea 7120 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
f1410647
ET
7121 BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
7122 ext_phy_type);
7123
34f80b04
EG
7124 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7125 SUPPORTED_1000baseT_Full |
7126 SUPPORTED_FIBRE |
7127 SUPPORTED_Pause |
7128 SUPPORTED_Asym_Pause);
f1410647
ET
7129 break;
7130
7131 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
7132 BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
a2fbb9ea
ET
7133 ext_phy_type);
7134
34f80b04
EG
7135 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7136 SUPPORTED_1000baseT_Full |
7137 SUPPORTED_FIBRE |
7138 SUPPORTED_Autoneg |
7139 SUPPORTED_Pause |
7140 SUPPORTED_Asym_Pause);
f1410647
ET
7141 break;
7142
c18487ee
YR
7143 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
7144 BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
7145 ext_phy_type);
7146
34f80b04
EG
7147 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7148 SUPPORTED_2500baseX_Full |
7149 SUPPORTED_1000baseT_Full |
7150 SUPPORTED_FIBRE |
7151 SUPPORTED_Autoneg |
7152 SUPPORTED_Pause |
7153 SUPPORTED_Asym_Pause);
c18487ee
YR
7154 break;
7155
f1410647
ET
7156 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7157 BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
7158 ext_phy_type);
7159
34f80b04
EG
7160 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7161 SUPPORTED_TP |
7162 SUPPORTED_Autoneg |
7163 SUPPORTED_Pause |
7164 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
7165 break;
7166
c18487ee
YR
7167 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
7168 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
7169 bp->link_params.ext_phy_config);
7170 break;
7171
a2fbb9ea
ET
7172 default:
7173 BNX2X_ERR("NVRAM config error. "
7174 "BAD XGXS ext_phy_config 0x%x\n",
c18487ee 7175 bp->link_params.ext_phy_config);
a2fbb9ea
ET
7176 return;
7177 }
7178
34f80b04
EG
7179 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
7180 port*0x18);
7181 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea 7182
a2fbb9ea
ET
7183 break;
7184
7185 default:
7186 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
34f80b04 7187 bp->port.link_config);
a2fbb9ea
ET
7188 return;
7189 }
34f80b04 7190 bp->link_params.phy_addr = bp->port.phy_addr;
a2fbb9ea
ET
7191
7192 /* mask what we support according to speed_cap_mask */
c18487ee
YR
7193 if (!(bp->link_params.speed_cap_mask &
7194 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
34f80b04 7195 bp->port.supported &= ~SUPPORTED_10baseT_Half;
a2fbb9ea 7196
c18487ee
YR
7197 if (!(bp->link_params.speed_cap_mask &
7198 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
34f80b04 7199 bp->port.supported &= ~SUPPORTED_10baseT_Full;
a2fbb9ea 7200
c18487ee
YR
7201 if (!(bp->link_params.speed_cap_mask &
7202 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
34f80b04 7203 bp->port.supported &= ~SUPPORTED_100baseT_Half;
a2fbb9ea 7204
c18487ee
YR
7205 if (!(bp->link_params.speed_cap_mask &
7206 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
34f80b04 7207 bp->port.supported &= ~SUPPORTED_100baseT_Full;
a2fbb9ea 7208
c18487ee
YR
7209 if (!(bp->link_params.speed_cap_mask &
7210 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
34f80b04
EG
7211 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
7212 SUPPORTED_1000baseT_Full);
a2fbb9ea 7213
c18487ee
YR
7214 if (!(bp->link_params.speed_cap_mask &
7215 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
34f80b04 7216 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
a2fbb9ea 7217
c18487ee
YR
7218 if (!(bp->link_params.speed_cap_mask &
7219 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
34f80b04 7220 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
a2fbb9ea 7221
34f80b04 7222 BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
a2fbb9ea
ET
7223}
7224
34f80b04 7225static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
a2fbb9ea 7226{
c18487ee 7227 bp->link_params.req_duplex = DUPLEX_FULL;
a2fbb9ea 7228
34f80b04 7229 switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
a2fbb9ea 7230 case PORT_FEATURE_LINK_SPEED_AUTO:
34f80b04 7231 if (bp->port.supported & SUPPORTED_Autoneg) {
c18487ee 7232 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
34f80b04 7233 bp->port.advertising = bp->port.supported;
a2fbb9ea 7234 } else {
c18487ee
YR
7235 u32 ext_phy_type =
7236 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7237
7238 if ((ext_phy_type ==
7239 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
7240 (ext_phy_type ==
7241 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
a2fbb9ea 7242 /* force 10G, no AN */
c18487ee 7243 bp->link_params.req_line_speed = SPEED_10000;
34f80b04 7244 bp->port.advertising =
a2fbb9ea
ET
7245 (ADVERTISED_10000baseT_Full |
7246 ADVERTISED_FIBRE);
7247 break;
7248 }
7249 BNX2X_ERR("NVRAM config error. "
7250 "Invalid link_config 0x%x"
7251 " Autoneg not supported\n",
34f80b04 7252 bp->port.link_config);
a2fbb9ea
ET
7253 return;
7254 }
7255 break;
7256
7257 case PORT_FEATURE_LINK_SPEED_10M_FULL:
34f80b04 7258 if (bp->port.supported & SUPPORTED_10baseT_Full) {
c18487ee 7259 bp->link_params.req_line_speed = SPEED_10;
34f80b04
EG
7260 bp->port.advertising = (ADVERTISED_10baseT_Full |
7261 ADVERTISED_TP);
a2fbb9ea
ET
7262 } else {
7263 BNX2X_ERR("NVRAM config error. "
7264 "Invalid link_config 0x%x"
7265 " speed_cap_mask 0x%x\n",
34f80b04 7266 bp->port.link_config,
c18487ee 7267 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7268 return;
7269 }
7270 break;
7271
7272 case PORT_FEATURE_LINK_SPEED_10M_HALF:
34f80b04 7273 if (bp->port.supported & SUPPORTED_10baseT_Half) {
c18487ee
YR
7274 bp->link_params.req_line_speed = SPEED_10;
7275 bp->link_params.req_duplex = DUPLEX_HALF;
34f80b04
EG
7276 bp->port.advertising = (ADVERTISED_10baseT_Half |
7277 ADVERTISED_TP);
a2fbb9ea
ET
7278 } else {
7279 BNX2X_ERR("NVRAM config error. "
7280 "Invalid link_config 0x%x"
7281 " speed_cap_mask 0x%x\n",
34f80b04 7282 bp->port.link_config,
c18487ee 7283 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7284 return;
7285 }
7286 break;
7287
7288 case PORT_FEATURE_LINK_SPEED_100M_FULL:
34f80b04 7289 if (bp->port.supported & SUPPORTED_100baseT_Full) {
c18487ee 7290 bp->link_params.req_line_speed = SPEED_100;
34f80b04
EG
7291 bp->port.advertising = (ADVERTISED_100baseT_Full |
7292 ADVERTISED_TP);
a2fbb9ea
ET
7293 } else {
7294 BNX2X_ERR("NVRAM config error. "
7295 "Invalid link_config 0x%x"
7296 " speed_cap_mask 0x%x\n",
34f80b04 7297 bp->port.link_config,
c18487ee 7298 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7299 return;
7300 }
7301 break;
7302
7303 case PORT_FEATURE_LINK_SPEED_100M_HALF:
34f80b04 7304 if (bp->port.supported & SUPPORTED_100baseT_Half) {
c18487ee
YR
7305 bp->link_params.req_line_speed = SPEED_100;
7306 bp->link_params.req_duplex = DUPLEX_HALF;
34f80b04
EG
7307 bp->port.advertising = (ADVERTISED_100baseT_Half |
7308 ADVERTISED_TP);
a2fbb9ea
ET
7309 } else {
7310 BNX2X_ERR("NVRAM config error. "
7311 "Invalid link_config 0x%x"
7312 " speed_cap_mask 0x%x\n",
34f80b04 7313 bp->port.link_config,
c18487ee 7314 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7315 return;
7316 }
7317 break;
7318
7319 case PORT_FEATURE_LINK_SPEED_1G:
34f80b04 7320 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
c18487ee 7321 bp->link_params.req_line_speed = SPEED_1000;
34f80b04
EG
7322 bp->port.advertising = (ADVERTISED_1000baseT_Full |
7323 ADVERTISED_TP);
a2fbb9ea
ET
7324 } else {
7325 BNX2X_ERR("NVRAM config error. "
7326 "Invalid link_config 0x%x"
7327 " speed_cap_mask 0x%x\n",
34f80b04 7328 bp->port.link_config,
c18487ee 7329 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7330 return;
7331 }
7332 break;
7333
7334 case PORT_FEATURE_LINK_SPEED_2_5G:
34f80b04 7335 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
c18487ee 7336 bp->link_params.req_line_speed = SPEED_2500;
34f80b04
EG
7337 bp->port.advertising = (ADVERTISED_2500baseX_Full |
7338 ADVERTISED_TP);
a2fbb9ea
ET
7339 } else {
7340 BNX2X_ERR("NVRAM config error. "
7341 "Invalid link_config 0x%x"
7342 " speed_cap_mask 0x%x\n",
34f80b04 7343 bp->port.link_config,
c18487ee 7344 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7345 return;
7346 }
7347 break;
7348
7349 case PORT_FEATURE_LINK_SPEED_10G_CX4:
7350 case PORT_FEATURE_LINK_SPEED_10G_KX4:
7351 case PORT_FEATURE_LINK_SPEED_10G_KR:
34f80b04 7352 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
c18487ee 7353 bp->link_params.req_line_speed = SPEED_10000;
34f80b04
EG
7354 bp->port.advertising = (ADVERTISED_10000baseT_Full |
7355 ADVERTISED_FIBRE);
a2fbb9ea
ET
7356 } else {
7357 BNX2X_ERR("NVRAM config error. "
7358 "Invalid link_config 0x%x"
7359 " speed_cap_mask 0x%x\n",
34f80b04 7360 bp->port.link_config,
c18487ee 7361 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7362 return;
7363 }
7364 break;
7365
7366 default:
7367 BNX2X_ERR("NVRAM config error. "
7368 "BAD link speed link_config 0x%x\n",
34f80b04 7369 bp->port.link_config);
c18487ee 7370 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
34f80b04 7371 bp->port.advertising = bp->port.supported;
a2fbb9ea
ET
7372 break;
7373 }
a2fbb9ea 7374
34f80b04
EG
7375 bp->link_params.req_flow_ctrl = (bp->port.link_config &
7376 PORT_FEATURE_FLOW_CONTROL_MASK);
c0700f90 7377 if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
4ab84d45 7378 !(bp->port.supported & SUPPORTED_Autoneg))
c0700f90 7379 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
a2fbb9ea 7380
c18487ee 7381 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x"
f1410647 7382 " advertising 0x%x\n",
c18487ee
YR
7383 bp->link_params.req_line_speed,
7384 bp->link_params.req_duplex,
34f80b04 7385 bp->link_params.req_flow_ctrl, bp->port.advertising);
a2fbb9ea
ET
7386}
7387
34f80b04 7388static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
a2fbb9ea 7389{
34f80b04
EG
7390 int port = BP_PORT(bp);
7391 u32 val, val2;
a2fbb9ea 7392
c18487ee 7393 bp->link_params.bp = bp;
34f80b04 7394 bp->link_params.port = port;
c18487ee 7395
c18487ee 7396 bp->link_params.serdes_config =
f1410647 7397 SHMEM_RD(bp, dev_info.port_hw_config[port].serdes_config);
c18487ee 7398 bp->link_params.lane_config =
a2fbb9ea 7399 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
c18487ee 7400 bp->link_params.ext_phy_config =
a2fbb9ea
ET
7401 SHMEM_RD(bp,
7402 dev_info.port_hw_config[port].external_phy_config);
c18487ee 7403 bp->link_params.speed_cap_mask =
a2fbb9ea
ET
7404 SHMEM_RD(bp,
7405 dev_info.port_hw_config[port].speed_capability_mask);
7406
34f80b04 7407 bp->port.link_config =
a2fbb9ea
ET
7408 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
7409
34f80b04
EG
7410 BNX2X_DEV_INFO("serdes_config 0x%08x lane_config 0x%08x\n"
7411 KERN_INFO " ext_phy_config 0x%08x speed_cap_mask 0x%08x"
7412 " link_config 0x%08x\n",
c18487ee
YR
7413 bp->link_params.serdes_config,
7414 bp->link_params.lane_config,
7415 bp->link_params.ext_phy_config,
34f80b04 7416 bp->link_params.speed_cap_mask, bp->port.link_config);
a2fbb9ea 7417
34f80b04 7418 bp->link_params.switch_cfg = (bp->port.link_config &
c18487ee
YR
7419 PORT_FEATURE_CONNECTED_SWITCH_MASK);
7420 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
a2fbb9ea
ET
7421
7422 bnx2x_link_settings_requested(bp);
7423
7424 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
7425 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
7426 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
7427 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
7428 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
7429 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
7430 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
7431 bp->dev->dev_addr[5] = (u8)(val & 0xff);
c18487ee
YR
7432 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
7433 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
34f80b04
EG
7434}
7435
7436static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
7437{
7438 int func = BP_FUNC(bp);
7439 u32 val, val2;
7440 int rc = 0;
a2fbb9ea 7441
34f80b04 7442 bnx2x_get_common_hwinfo(bp);
a2fbb9ea 7443
34f80b04
EG
7444 bp->e1hov = 0;
7445 bp->e1hmf = 0;
7446 if (CHIP_IS_E1H(bp)) {
7447 bp->mf_config =
7448 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
a2fbb9ea 7449
3196a88a
EG
7450 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].e1hov_tag) &
7451 FUNC_MF_CFG_E1HOV_TAG_MASK);
34f80b04 7452 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
a2fbb9ea 7453
34f80b04
EG
7454 bp->e1hov = val;
7455 bp->e1hmf = 1;
7456 BNX2X_DEV_INFO("MF mode E1HOV for func %d is %d "
7457 "(0x%04x)\n",
7458 func, bp->e1hov, bp->e1hov);
7459 } else {
7460 BNX2X_DEV_INFO("Single function mode\n");
7461 if (BP_E1HVN(bp)) {
7462 BNX2X_ERR("!!! No valid E1HOV for func %d,"
7463 " aborting\n", func);
7464 rc = -EPERM;
7465 }
7466 }
7467 }
a2fbb9ea 7468
34f80b04
EG
7469 if (!BP_NOMCP(bp)) {
7470 bnx2x_get_port_hwinfo(bp);
7471
7472 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
7473 DRV_MSG_SEQ_NUMBER_MASK);
7474 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
7475 }
7476
7477 if (IS_E1HMF(bp)) {
7478 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
7479 val = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_lower);
7480 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
7481 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
7482 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
7483 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
7484 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
7485 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
7486 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
7487 bp->dev->dev_addr[5] = (u8)(val & 0xff);
7488 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
7489 ETH_ALEN);
7490 memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
7491 ETH_ALEN);
a2fbb9ea 7492 }
34f80b04
EG
7493
7494 return rc;
a2fbb9ea
ET
7495 }
7496
34f80b04
EG
7497 if (BP_NOMCP(bp)) {
7498 /* only supposed to happen on emulation/FPGA */
33471629 7499 BNX2X_ERR("warning random MAC workaround active\n");
34f80b04
EG
7500 random_ether_addr(bp->dev->dev_addr);
7501 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
7502 }
a2fbb9ea 7503
34f80b04
EG
7504 return rc;
7505}
7506
7507static int __devinit bnx2x_init_bp(struct bnx2x *bp)
7508{
7509 int func = BP_FUNC(bp);
7510 int rc;
7511
da5a662a
VZ
7512 /* Disable interrupt handling until HW is initialized */
7513 atomic_set(&bp->intr_sem, 1);
7514
34f80b04 7515 mutex_init(&bp->port.phy_mutex);
a2fbb9ea 7516
1cf167f2 7517 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
34f80b04
EG
7518 INIT_WORK(&bp->reset_task, bnx2x_reset_task);
7519
7520 rc = bnx2x_get_hwinfo(bp);
7521
7522 /* need to reset chip if undi was active */
7523 if (!BP_NOMCP(bp))
7524 bnx2x_undi_unload(bp);
7525
7526 if (CHIP_REV_IS_FPGA(bp))
7527 printk(KERN_ERR PFX "FPGA detected\n");
7528
7529 if (BP_NOMCP(bp) && (func == 0))
7530 printk(KERN_ERR PFX
7531 "MCP disabled, must load devices in order!\n");
7532
7a9b2557
VZ
7533 /* Set TPA flags */
7534 if (disable_tpa) {
7535 bp->flags &= ~TPA_ENABLE_FLAG;
7536 bp->dev->features &= ~NETIF_F_LRO;
7537 } else {
7538 bp->flags |= TPA_ENABLE_FLAG;
7539 bp->dev->features |= NETIF_F_LRO;
7540 }
7541
7542
34f80b04
EG
7543 bp->tx_ring_size = MAX_TX_AVAIL;
7544 bp->rx_ring_size = MAX_RX_AVAIL;
7545
7546 bp->rx_csum = 1;
7547 bp->rx_offset = 0;
7548
7549 bp->tx_ticks = 50;
7550 bp->rx_ticks = 25;
7551
34f80b04
EG
7552 bp->timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
7553 bp->current_interval = (poll ? poll : bp->timer_interval);
7554
7555 init_timer(&bp->timer);
7556 bp->timer.expires = jiffies + bp->current_interval;
7557 bp->timer.data = (unsigned long) bp;
7558 bp->timer.function = bnx2x_timer;
7559
7560 return rc;
a2fbb9ea
ET
7561}
7562
7563/*
7564 * ethtool service functions
7565 */
7566
7567/* All ethtool functions called with rtnl_lock */
7568
7569static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7570{
7571 struct bnx2x *bp = netdev_priv(dev);
7572
34f80b04
EG
7573 cmd->supported = bp->port.supported;
7574 cmd->advertising = bp->port.advertising;
a2fbb9ea
ET
7575
7576 if (netif_carrier_ok(dev)) {
c18487ee
YR
7577 cmd->speed = bp->link_vars.line_speed;
7578 cmd->duplex = bp->link_vars.duplex;
a2fbb9ea 7579 } else {
c18487ee
YR
7580 cmd->speed = bp->link_params.req_line_speed;
7581 cmd->duplex = bp->link_params.req_duplex;
a2fbb9ea 7582 }
34f80b04
EG
7583 if (IS_E1HMF(bp)) {
7584 u16 vn_max_rate;
7585
7586 vn_max_rate = ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
7587 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
7588 if (vn_max_rate < cmd->speed)
7589 cmd->speed = vn_max_rate;
7590 }
a2fbb9ea 7591
c18487ee
YR
7592 if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
7593 u32 ext_phy_type =
7594 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
f1410647
ET
7595
7596 switch (ext_phy_type) {
7597 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7598 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
7599 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
7600 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
c18487ee 7601 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
f1410647
ET
7602 cmd->port = PORT_FIBRE;
7603 break;
7604
7605 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7606 cmd->port = PORT_TP;
7607 break;
7608
c18487ee
YR
7609 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
7610 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
7611 bp->link_params.ext_phy_config);
7612 break;
7613
f1410647
ET
7614 default:
7615 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
c18487ee
YR
7616 bp->link_params.ext_phy_config);
7617 break;
f1410647
ET
7618 }
7619 } else
a2fbb9ea 7620 cmd->port = PORT_TP;
a2fbb9ea 7621
34f80b04 7622 cmd->phy_address = bp->port.phy_addr;
a2fbb9ea
ET
7623 cmd->transceiver = XCVR_INTERNAL;
7624
c18487ee 7625 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
a2fbb9ea 7626 cmd->autoneg = AUTONEG_ENABLE;
f1410647 7627 else
a2fbb9ea 7628 cmd->autoneg = AUTONEG_DISABLE;
a2fbb9ea
ET
7629
7630 cmd->maxtxpkt = 0;
7631 cmd->maxrxpkt = 0;
7632
7633 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
7634 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
7635 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
7636 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
7637 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
7638 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
7639 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
7640
7641 return 0;
7642}
7643
7644static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7645{
7646 struct bnx2x *bp = netdev_priv(dev);
7647 u32 advertising;
7648
34f80b04
EG
7649 if (IS_E1HMF(bp))
7650 return 0;
7651
a2fbb9ea
ET
7652 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
7653 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
7654 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
7655 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
7656 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
7657 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
7658 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
7659
a2fbb9ea 7660 if (cmd->autoneg == AUTONEG_ENABLE) {
34f80b04
EG
7661 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
7662 DP(NETIF_MSG_LINK, "Autoneg not supported\n");
a2fbb9ea 7663 return -EINVAL;
f1410647 7664 }
a2fbb9ea
ET
7665
7666 /* advertise the requested speed and duplex if supported */
34f80b04 7667 cmd->advertising &= bp->port.supported;
a2fbb9ea 7668
c18487ee
YR
7669 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
7670 bp->link_params.req_duplex = DUPLEX_FULL;
34f80b04
EG
7671 bp->port.advertising |= (ADVERTISED_Autoneg |
7672 cmd->advertising);
a2fbb9ea
ET
7673
7674 } else { /* forced speed */
7675 /* advertise the requested speed and duplex if supported */
7676 switch (cmd->speed) {
7677 case SPEED_10:
7678 if (cmd->duplex == DUPLEX_FULL) {
34f80b04 7679 if (!(bp->port.supported &
f1410647
ET
7680 SUPPORTED_10baseT_Full)) {
7681 DP(NETIF_MSG_LINK,
7682 "10M full not supported\n");
a2fbb9ea 7683 return -EINVAL;
f1410647 7684 }
a2fbb9ea
ET
7685
7686 advertising = (ADVERTISED_10baseT_Full |
7687 ADVERTISED_TP);
7688 } else {
34f80b04 7689 if (!(bp->port.supported &
f1410647
ET
7690 SUPPORTED_10baseT_Half)) {
7691 DP(NETIF_MSG_LINK,
7692 "10M half not supported\n");
a2fbb9ea 7693 return -EINVAL;
f1410647 7694 }
a2fbb9ea
ET
7695
7696 advertising = (ADVERTISED_10baseT_Half |
7697 ADVERTISED_TP);
7698 }
7699 break;
7700
7701 case SPEED_100:
7702 if (cmd->duplex == DUPLEX_FULL) {
34f80b04 7703 if (!(bp->port.supported &
f1410647
ET
7704 SUPPORTED_100baseT_Full)) {
7705 DP(NETIF_MSG_LINK,
7706 "100M full not supported\n");
a2fbb9ea 7707 return -EINVAL;
f1410647 7708 }
a2fbb9ea
ET
7709
7710 advertising = (ADVERTISED_100baseT_Full |
7711 ADVERTISED_TP);
7712 } else {
34f80b04 7713 if (!(bp->port.supported &
f1410647
ET
7714 SUPPORTED_100baseT_Half)) {
7715 DP(NETIF_MSG_LINK,
7716 "100M half not supported\n");
a2fbb9ea 7717 return -EINVAL;
f1410647 7718 }
a2fbb9ea
ET
7719
7720 advertising = (ADVERTISED_100baseT_Half |
7721 ADVERTISED_TP);
7722 }
7723 break;
7724
7725 case SPEED_1000:
f1410647
ET
7726 if (cmd->duplex != DUPLEX_FULL) {
7727 DP(NETIF_MSG_LINK, "1G half not supported\n");
a2fbb9ea 7728 return -EINVAL;
f1410647 7729 }
a2fbb9ea 7730
34f80b04 7731 if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
f1410647 7732 DP(NETIF_MSG_LINK, "1G full not supported\n");
a2fbb9ea 7733 return -EINVAL;
f1410647 7734 }
a2fbb9ea
ET
7735
7736 advertising = (ADVERTISED_1000baseT_Full |
7737 ADVERTISED_TP);
7738 break;
7739
7740 case SPEED_2500:
f1410647
ET
7741 if (cmd->duplex != DUPLEX_FULL) {
7742 DP(NETIF_MSG_LINK,
7743 "2.5G half not supported\n");
a2fbb9ea 7744 return -EINVAL;
f1410647 7745 }
a2fbb9ea 7746
34f80b04 7747 if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
f1410647
ET
7748 DP(NETIF_MSG_LINK,
7749 "2.5G full not supported\n");
a2fbb9ea 7750 return -EINVAL;
f1410647 7751 }
a2fbb9ea 7752
f1410647 7753 advertising = (ADVERTISED_2500baseX_Full |
a2fbb9ea
ET
7754 ADVERTISED_TP);
7755 break;
7756
7757 case SPEED_10000:
f1410647
ET
7758 if (cmd->duplex != DUPLEX_FULL) {
7759 DP(NETIF_MSG_LINK, "10G half not supported\n");
a2fbb9ea 7760 return -EINVAL;
f1410647 7761 }
a2fbb9ea 7762
34f80b04 7763 if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
f1410647 7764 DP(NETIF_MSG_LINK, "10G full not supported\n");
a2fbb9ea 7765 return -EINVAL;
f1410647 7766 }
a2fbb9ea
ET
7767
7768 advertising = (ADVERTISED_10000baseT_Full |
7769 ADVERTISED_FIBRE);
7770 break;
7771
7772 default:
f1410647 7773 DP(NETIF_MSG_LINK, "Unsupported speed\n");
a2fbb9ea
ET
7774 return -EINVAL;
7775 }
7776
c18487ee
YR
7777 bp->link_params.req_line_speed = cmd->speed;
7778 bp->link_params.req_duplex = cmd->duplex;
34f80b04 7779 bp->port.advertising = advertising;
a2fbb9ea
ET
7780 }
7781
c18487ee 7782 DP(NETIF_MSG_LINK, "req_line_speed %d\n"
a2fbb9ea 7783 DP_LEVEL " req_duplex %d advertising 0x%x\n",
c18487ee 7784 bp->link_params.req_line_speed, bp->link_params.req_duplex,
34f80b04 7785 bp->port.advertising);
a2fbb9ea 7786
34f80b04 7787 if (netif_running(dev)) {
bb2a0f7a 7788 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
7789 bnx2x_link_set(bp);
7790 }
a2fbb9ea
ET
7791
7792 return 0;
7793}
7794
c18487ee
YR
7795#define PHY_FW_VER_LEN 10
7796
a2fbb9ea
ET
7797static void bnx2x_get_drvinfo(struct net_device *dev,
7798 struct ethtool_drvinfo *info)
7799{
7800 struct bnx2x *bp = netdev_priv(dev);
f0e53a84 7801 u8 phy_fw_ver[PHY_FW_VER_LEN];
a2fbb9ea
ET
7802
7803 strcpy(info->driver, DRV_MODULE_NAME);
7804 strcpy(info->version, DRV_MODULE_VERSION);
c18487ee
YR
7805
7806 phy_fw_ver[0] = '\0';
34f80b04 7807 if (bp->port.pmf) {
4a37fb66 7808 bnx2x_acquire_phy_lock(bp);
34f80b04
EG
7809 bnx2x_get_ext_phy_fw_version(&bp->link_params,
7810 (bp->state != BNX2X_STATE_CLOSED),
7811 phy_fw_ver, PHY_FW_VER_LEN);
4a37fb66 7812 bnx2x_release_phy_lock(bp);
34f80b04 7813 }
c18487ee 7814
f0e53a84
EG
7815 snprintf(info->fw_version, 32, "BC:%d.%d.%d%s%s",
7816 (bp->common.bc_ver & 0xff0000) >> 16,
7817 (bp->common.bc_ver & 0xff00) >> 8,
7818 (bp->common.bc_ver & 0xff),
7819 ((phy_fw_ver[0] != '\0') ? " PHY:" : ""), phy_fw_ver);
a2fbb9ea
ET
7820 strcpy(info->bus_info, pci_name(bp->pdev));
7821 info->n_stats = BNX2X_NUM_STATS;
7822 info->testinfo_len = BNX2X_NUM_TESTS;
34f80b04 7823 info->eedump_len = bp->common.flash_size;
a2fbb9ea
ET
7824 info->regdump_len = 0;
7825}
7826
7827static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7828{
7829 struct bnx2x *bp = netdev_priv(dev);
7830
7831 if (bp->flags & NO_WOL_FLAG) {
7832 wol->supported = 0;
7833 wol->wolopts = 0;
7834 } else {
7835 wol->supported = WAKE_MAGIC;
7836 if (bp->wol)
7837 wol->wolopts = WAKE_MAGIC;
7838 else
7839 wol->wolopts = 0;
7840 }
7841 memset(&wol->sopass, 0, sizeof(wol->sopass));
7842}
7843
7844static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7845{
7846 struct bnx2x *bp = netdev_priv(dev);
7847
7848 if (wol->wolopts & ~WAKE_MAGIC)
7849 return -EINVAL;
7850
7851 if (wol->wolopts & WAKE_MAGIC) {
7852 if (bp->flags & NO_WOL_FLAG)
7853 return -EINVAL;
7854
7855 bp->wol = 1;
34f80b04 7856 } else
a2fbb9ea 7857 bp->wol = 0;
34f80b04 7858
a2fbb9ea
ET
7859 return 0;
7860}
7861
7862static u32 bnx2x_get_msglevel(struct net_device *dev)
7863{
7864 struct bnx2x *bp = netdev_priv(dev);
7865
7866 return bp->msglevel;
7867}
7868
7869static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
7870{
7871 struct bnx2x *bp = netdev_priv(dev);
7872
7873 if (capable(CAP_NET_ADMIN))
7874 bp->msglevel = level;
7875}
7876
7877static int bnx2x_nway_reset(struct net_device *dev)
7878{
7879 struct bnx2x *bp = netdev_priv(dev);
7880
34f80b04
EG
7881 if (!bp->port.pmf)
7882 return 0;
a2fbb9ea 7883
34f80b04 7884 if (netif_running(dev)) {
bb2a0f7a 7885 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
7886 bnx2x_link_set(bp);
7887 }
a2fbb9ea
ET
7888
7889 return 0;
7890}
7891
7892static int bnx2x_get_eeprom_len(struct net_device *dev)
7893{
7894 struct bnx2x *bp = netdev_priv(dev);
7895
34f80b04 7896 return bp->common.flash_size;
a2fbb9ea
ET
7897}
7898
7899static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
7900{
34f80b04 7901 int port = BP_PORT(bp);
a2fbb9ea
ET
7902 int count, i;
7903 u32 val = 0;
7904
7905 /* adjust timeout for emulation/FPGA */
7906 count = NVRAM_TIMEOUT_COUNT;
7907 if (CHIP_REV_IS_SLOW(bp))
7908 count *= 100;
7909
7910 /* request access to nvram interface */
7911 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
7912 (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
7913
7914 for (i = 0; i < count*10; i++) {
7915 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
7916 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
7917 break;
7918
7919 udelay(5);
7920 }
7921
7922 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
34f80b04 7923 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
a2fbb9ea
ET
7924 return -EBUSY;
7925 }
7926
7927 return 0;
7928}
7929
7930static int bnx2x_release_nvram_lock(struct bnx2x *bp)
7931{
34f80b04 7932 int port = BP_PORT(bp);
a2fbb9ea
ET
7933 int count, i;
7934 u32 val = 0;
7935
7936 /* adjust timeout for emulation/FPGA */
7937 count = NVRAM_TIMEOUT_COUNT;
7938 if (CHIP_REV_IS_SLOW(bp))
7939 count *= 100;
7940
7941 /* relinquish nvram interface */
7942 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
7943 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
7944
7945 for (i = 0; i < count*10; i++) {
7946 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
7947 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
7948 break;
7949
7950 udelay(5);
7951 }
7952
7953 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
34f80b04 7954 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
a2fbb9ea
ET
7955 return -EBUSY;
7956 }
7957
7958 return 0;
7959}
7960
7961static void bnx2x_enable_nvram_access(struct bnx2x *bp)
7962{
7963 u32 val;
7964
7965 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
7966
7967 /* enable both bits, even on read */
7968 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
7969 (val | MCPR_NVM_ACCESS_ENABLE_EN |
7970 MCPR_NVM_ACCESS_ENABLE_WR_EN));
7971}
7972
7973static void bnx2x_disable_nvram_access(struct bnx2x *bp)
7974{
7975 u32 val;
7976
7977 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
7978
7979 /* disable both bits, even after read */
7980 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
7981 (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
7982 MCPR_NVM_ACCESS_ENABLE_WR_EN)));
7983}
7984
7985static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, u32 *ret_val,
7986 u32 cmd_flags)
7987{
f1410647 7988 int count, i, rc;
a2fbb9ea
ET
7989 u32 val;
7990
7991 /* build the command word */
7992 cmd_flags |= MCPR_NVM_COMMAND_DOIT;
7993
7994 /* need to clear DONE bit separately */
7995 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
7996
7997 /* address of the NVRAM to read from */
7998 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
7999 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8000
8001 /* issue a read command */
8002 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8003
8004 /* adjust timeout for emulation/FPGA */
8005 count = NVRAM_TIMEOUT_COUNT;
8006 if (CHIP_REV_IS_SLOW(bp))
8007 count *= 100;
8008
8009 /* wait for completion */
8010 *ret_val = 0;
8011 rc = -EBUSY;
8012 for (i = 0; i < count; i++) {
8013 udelay(5);
8014 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8015
8016 if (val & MCPR_NVM_COMMAND_DONE) {
8017 val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
a2fbb9ea
ET
8018 /* we read nvram data in cpu order
8019 * but ethtool sees it as an array of bytes
8020 * converting to big-endian will do the work */
8021 val = cpu_to_be32(val);
8022 *ret_val = val;
8023 rc = 0;
8024 break;
8025 }
8026 }
8027
8028 return rc;
8029}
8030
8031static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
8032 int buf_size)
8033{
8034 int rc;
8035 u32 cmd_flags;
8036 u32 val;
8037
8038 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
34f80b04 8039 DP(BNX2X_MSG_NVM,
c14423fe 8040 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
a2fbb9ea
ET
8041 offset, buf_size);
8042 return -EINVAL;
8043 }
8044
34f80b04
EG
8045 if (offset + buf_size > bp->common.flash_size) {
8046 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 8047 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 8048 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
8049 return -EINVAL;
8050 }
8051
8052 /* request access to nvram interface */
8053 rc = bnx2x_acquire_nvram_lock(bp);
8054 if (rc)
8055 return rc;
8056
8057 /* enable access to nvram interface */
8058 bnx2x_enable_nvram_access(bp);
8059
8060 /* read the first word(s) */
8061 cmd_flags = MCPR_NVM_COMMAND_FIRST;
8062 while ((buf_size > sizeof(u32)) && (rc == 0)) {
8063 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8064 memcpy(ret_buf, &val, 4);
8065
8066 /* advance to the next dword */
8067 offset += sizeof(u32);
8068 ret_buf += sizeof(u32);
8069 buf_size -= sizeof(u32);
8070 cmd_flags = 0;
8071 }
8072
8073 if (rc == 0) {
8074 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8075 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8076 memcpy(ret_buf, &val, 4);
8077 }
8078
8079 /* disable access to nvram interface */
8080 bnx2x_disable_nvram_access(bp);
8081 bnx2x_release_nvram_lock(bp);
8082
8083 return rc;
8084}
8085
8086static int bnx2x_get_eeprom(struct net_device *dev,
8087 struct ethtool_eeprom *eeprom, u8 *eebuf)
8088{
8089 struct bnx2x *bp = netdev_priv(dev);
8090 int rc;
8091
34f80b04 8092 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
a2fbb9ea
ET
8093 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
8094 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8095 eeprom->len, eeprom->len);
8096
8097 /* parameters already validated in ethtool_get_eeprom */
8098
8099 rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
8100
8101 return rc;
8102}
8103
8104static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
8105 u32 cmd_flags)
8106{
f1410647 8107 int count, i, rc;
a2fbb9ea
ET
8108
8109 /* build the command word */
8110 cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
8111
8112 /* need to clear DONE bit separately */
8113 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8114
8115 /* write the data */
8116 REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
8117
8118 /* address of the NVRAM to write to */
8119 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8120 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8121
8122 /* issue the write command */
8123 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8124
8125 /* adjust timeout for emulation/FPGA */
8126 count = NVRAM_TIMEOUT_COUNT;
8127 if (CHIP_REV_IS_SLOW(bp))
8128 count *= 100;
8129
8130 /* wait for completion */
8131 rc = -EBUSY;
8132 for (i = 0; i < count; i++) {
8133 udelay(5);
8134 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8135 if (val & MCPR_NVM_COMMAND_DONE) {
8136 rc = 0;
8137 break;
8138 }
8139 }
8140
8141 return rc;
8142}
8143
f1410647 8144#define BYTE_OFFSET(offset) (8 * (offset & 0x03))
a2fbb9ea
ET
8145
8146static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
8147 int buf_size)
8148{
8149 int rc;
8150 u32 cmd_flags;
8151 u32 align_offset;
8152 u32 val;
8153
34f80b04
EG
8154 if (offset + buf_size > bp->common.flash_size) {
8155 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 8156 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 8157 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
8158 return -EINVAL;
8159 }
8160
8161 /* request access to nvram interface */
8162 rc = bnx2x_acquire_nvram_lock(bp);
8163 if (rc)
8164 return rc;
8165
8166 /* enable access to nvram interface */
8167 bnx2x_enable_nvram_access(bp);
8168
8169 cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
8170 align_offset = (offset & ~0x03);
8171 rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
8172
8173 if (rc == 0) {
8174 val &= ~(0xff << BYTE_OFFSET(offset));
8175 val |= (*data_buf << BYTE_OFFSET(offset));
8176
8177 /* nvram data is returned as an array of bytes
8178 * convert it back to cpu order */
8179 val = be32_to_cpu(val);
8180
a2fbb9ea
ET
8181 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
8182 cmd_flags);
8183 }
8184
8185 /* disable access to nvram interface */
8186 bnx2x_disable_nvram_access(bp);
8187 bnx2x_release_nvram_lock(bp);
8188
8189 return rc;
8190}
8191
8192static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
8193 int buf_size)
8194{
8195 int rc;
8196 u32 cmd_flags;
8197 u32 val;
8198 u32 written_so_far;
8199
34f80b04 8200 if (buf_size == 1) /* ethtool */
a2fbb9ea 8201 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
a2fbb9ea
ET
8202
8203 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
34f80b04 8204 DP(BNX2X_MSG_NVM,
c14423fe 8205 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
a2fbb9ea
ET
8206 offset, buf_size);
8207 return -EINVAL;
8208 }
8209
34f80b04
EG
8210 if (offset + buf_size > bp->common.flash_size) {
8211 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 8212 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 8213 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
8214 return -EINVAL;
8215 }
8216
8217 /* request access to nvram interface */
8218 rc = bnx2x_acquire_nvram_lock(bp);
8219 if (rc)
8220 return rc;
8221
8222 /* enable access to nvram interface */
8223 bnx2x_enable_nvram_access(bp);
8224
8225 written_so_far = 0;
8226 cmd_flags = MCPR_NVM_COMMAND_FIRST;
8227 while ((written_so_far < buf_size) && (rc == 0)) {
8228 if (written_so_far == (buf_size - sizeof(u32)))
8229 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8230 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
8231 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8232 else if ((offset % NVRAM_PAGE_SIZE) == 0)
8233 cmd_flags |= MCPR_NVM_COMMAND_FIRST;
8234
8235 memcpy(&val, data_buf, 4);
a2fbb9ea
ET
8236
8237 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
8238
8239 /* advance to the next dword */
8240 offset += sizeof(u32);
8241 data_buf += sizeof(u32);
8242 written_so_far += sizeof(u32);
8243 cmd_flags = 0;
8244 }
8245
8246 /* disable access to nvram interface */
8247 bnx2x_disable_nvram_access(bp);
8248 bnx2x_release_nvram_lock(bp);
8249
8250 return rc;
8251}
8252
8253static int bnx2x_set_eeprom(struct net_device *dev,
8254 struct ethtool_eeprom *eeprom, u8 *eebuf)
8255{
8256 struct bnx2x *bp = netdev_priv(dev);
8257 int rc;
8258
9f4c9583
EG
8259 if (!netif_running(dev))
8260 return -EAGAIN;
8261
34f80b04 8262 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
a2fbb9ea
ET
8263 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
8264 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8265 eeprom->len, eeprom->len);
8266
8267 /* parameters already validated in ethtool_set_eeprom */
8268
c18487ee 8269 /* If the magic number is PHY (0x00504859) upgrade the PHY FW */
34f80b04
EG
8270 if (eeprom->magic == 0x00504859)
8271 if (bp->port.pmf) {
8272
4a37fb66 8273 bnx2x_acquire_phy_lock(bp);
34f80b04
EG
8274 rc = bnx2x_flash_download(bp, BP_PORT(bp),
8275 bp->link_params.ext_phy_config,
8276 (bp->state != BNX2X_STATE_CLOSED),
8277 eebuf, eeprom->len);
bb2a0f7a
YG
8278 if ((bp->state == BNX2X_STATE_OPEN) ||
8279 (bp->state == BNX2X_STATE_DISABLED)) {
34f80b04
EG
8280 rc |= bnx2x_link_reset(&bp->link_params,
8281 &bp->link_vars);
8282 rc |= bnx2x_phy_init(&bp->link_params,
8283 &bp->link_vars);
bb2a0f7a 8284 }
4a37fb66 8285 bnx2x_release_phy_lock(bp);
34f80b04
EG
8286
8287 } else /* Only the PMF can access the PHY */
8288 return -EINVAL;
8289 else
c18487ee 8290 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
a2fbb9ea
ET
8291
8292 return rc;
8293}
8294
8295static int bnx2x_get_coalesce(struct net_device *dev,
8296 struct ethtool_coalesce *coal)
8297{
8298 struct bnx2x *bp = netdev_priv(dev);
8299
8300 memset(coal, 0, sizeof(struct ethtool_coalesce));
8301
8302 coal->rx_coalesce_usecs = bp->rx_ticks;
8303 coal->tx_coalesce_usecs = bp->tx_ticks;
a2fbb9ea
ET
8304
8305 return 0;
8306}
8307
8308static int bnx2x_set_coalesce(struct net_device *dev,
8309 struct ethtool_coalesce *coal)
8310{
8311 struct bnx2x *bp = netdev_priv(dev);
8312
8313 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
8314 if (bp->rx_ticks > 3000)
8315 bp->rx_ticks = 3000;
8316
8317 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
8318 if (bp->tx_ticks > 0x3000)
8319 bp->tx_ticks = 0x3000;
8320
34f80b04 8321 if (netif_running(dev))
a2fbb9ea
ET
8322 bnx2x_update_coalesce(bp);
8323
8324 return 0;
8325}
8326
8327static void bnx2x_get_ringparam(struct net_device *dev,
8328 struct ethtool_ringparam *ering)
8329{
8330 struct bnx2x *bp = netdev_priv(dev);
8331
8332 ering->rx_max_pending = MAX_RX_AVAIL;
8333 ering->rx_mini_max_pending = 0;
8334 ering->rx_jumbo_max_pending = 0;
8335
8336 ering->rx_pending = bp->rx_ring_size;
8337 ering->rx_mini_pending = 0;
8338 ering->rx_jumbo_pending = 0;
8339
8340 ering->tx_max_pending = MAX_TX_AVAIL;
8341 ering->tx_pending = bp->tx_ring_size;
8342}
8343
8344static int bnx2x_set_ringparam(struct net_device *dev,
8345 struct ethtool_ringparam *ering)
8346{
8347 struct bnx2x *bp = netdev_priv(dev);
34f80b04 8348 int rc = 0;
a2fbb9ea
ET
8349
8350 if ((ering->rx_pending > MAX_RX_AVAIL) ||
8351 (ering->tx_pending > MAX_TX_AVAIL) ||
8352 (ering->tx_pending <= MAX_SKB_FRAGS + 4))
8353 return -EINVAL;
8354
8355 bp->rx_ring_size = ering->rx_pending;
8356 bp->tx_ring_size = ering->tx_pending;
8357
34f80b04
EG
8358 if (netif_running(dev)) {
8359 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8360 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
a2fbb9ea
ET
8361 }
8362
34f80b04 8363 return rc;
a2fbb9ea
ET
8364}
8365
8366static void bnx2x_get_pauseparam(struct net_device *dev,
8367 struct ethtool_pauseparam *epause)
8368{
8369 struct bnx2x *bp = netdev_priv(dev);
8370
c0700f90 8371 epause->autoneg = (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
c18487ee
YR
8372 (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
8373
c0700f90
DM
8374 epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) ==
8375 BNX2X_FLOW_CTRL_RX);
8376 epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) ==
8377 BNX2X_FLOW_CTRL_TX);
a2fbb9ea
ET
8378
8379 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
8380 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
8381 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
8382}
8383
8384static int bnx2x_set_pauseparam(struct net_device *dev,
8385 struct ethtool_pauseparam *epause)
8386{
8387 struct bnx2x *bp = netdev_priv(dev);
8388
34f80b04
EG
8389 if (IS_E1HMF(bp))
8390 return 0;
8391
a2fbb9ea
ET
8392 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
8393 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
8394 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
8395
c0700f90 8396 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
a2fbb9ea 8397
f1410647 8398 if (epause->rx_pause)
c0700f90 8399 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_RX;
c18487ee 8400
f1410647 8401 if (epause->tx_pause)
c0700f90 8402 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_TX;
c18487ee 8403
c0700f90
DM
8404 if (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO)
8405 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
a2fbb9ea 8406
c18487ee 8407 if (epause->autoneg) {
34f80b04 8408 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
3196a88a 8409 DP(NETIF_MSG_LINK, "autoneg not supported\n");
c18487ee
YR
8410 return -EINVAL;
8411 }
a2fbb9ea 8412
c18487ee 8413 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
c0700f90 8414 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
c18487ee 8415 }
a2fbb9ea 8416
c18487ee
YR
8417 DP(NETIF_MSG_LINK,
8418 "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
34f80b04
EG
8419
8420 if (netif_running(dev)) {
bb2a0f7a 8421 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
8422 bnx2x_link_set(bp);
8423 }
a2fbb9ea
ET
8424
8425 return 0;
8426}
8427
df0f2343
VZ
8428static int bnx2x_set_flags(struct net_device *dev, u32 data)
8429{
8430 struct bnx2x *bp = netdev_priv(dev);
8431 int changed = 0;
8432 int rc = 0;
8433
8434 /* TPA requires Rx CSUM offloading */
8435 if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
8436 if (!(dev->features & NETIF_F_LRO)) {
8437 dev->features |= NETIF_F_LRO;
8438 bp->flags |= TPA_ENABLE_FLAG;
8439 changed = 1;
8440 }
8441
8442 } else if (dev->features & NETIF_F_LRO) {
8443 dev->features &= ~NETIF_F_LRO;
8444 bp->flags &= ~TPA_ENABLE_FLAG;
8445 changed = 1;
8446 }
8447
8448 if (changed && netif_running(dev)) {
8449 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8450 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
8451 }
8452
8453 return rc;
8454}
8455
a2fbb9ea
ET
8456static u32 bnx2x_get_rx_csum(struct net_device *dev)
8457{
8458 struct bnx2x *bp = netdev_priv(dev);
8459
8460 return bp->rx_csum;
8461}
8462
8463static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
8464{
8465 struct bnx2x *bp = netdev_priv(dev);
df0f2343 8466 int rc = 0;
a2fbb9ea
ET
8467
8468 bp->rx_csum = data;
df0f2343
VZ
8469
8470 /* Disable TPA, when Rx CSUM is disabled. Otherwise all
8471 TPA'ed packets will be discarded due to wrong TCP CSUM */
8472 if (!data) {
8473 u32 flags = ethtool_op_get_flags(dev);
8474
8475 rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
8476 }
8477
8478 return rc;
a2fbb9ea
ET
8479}
8480
8481static int bnx2x_set_tso(struct net_device *dev, u32 data)
8482{
755735eb 8483 if (data) {
a2fbb9ea 8484 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb
EG
8485 dev->features |= NETIF_F_TSO6;
8486 } else {
a2fbb9ea 8487 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb
EG
8488 dev->features &= ~NETIF_F_TSO6;
8489 }
8490
a2fbb9ea
ET
8491 return 0;
8492}
8493
f3c87cdd 8494static const struct {
a2fbb9ea
ET
8495 char string[ETH_GSTRING_LEN];
8496} bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
f3c87cdd
YG
8497 { "register_test (offline)" },
8498 { "memory_test (offline)" },
8499 { "loopback_test (offline)" },
8500 { "nvram_test (online)" },
8501 { "interrupt_test (online)" },
8502 { "link_test (online)" },
8503 { "idle check (online)" },
8504 { "MC errors (online)" }
a2fbb9ea
ET
8505};
8506
8507static int bnx2x_self_test_count(struct net_device *dev)
8508{
8509 return BNX2X_NUM_TESTS;
8510}
8511
f3c87cdd
YG
8512static int bnx2x_test_registers(struct bnx2x *bp)
8513{
8514 int idx, i, rc = -ENODEV;
8515 u32 wr_val = 0;
9dabc424 8516 int port = BP_PORT(bp);
f3c87cdd
YG
8517 static const struct {
8518 u32 offset0;
8519 u32 offset1;
8520 u32 mask;
8521 } reg_tbl[] = {
8522/* 0 */ { BRB1_REG_PAUSE_LOW_THRESHOLD_0, 4, 0x000003ff },
8523 { DORQ_REG_DB_ADDR0, 4, 0xffffffff },
8524 { HC_REG_AGG_INT_0, 4, 0x000003ff },
8525 { PBF_REG_MAC_IF0_ENABLE, 4, 0x00000001 },
8526 { PBF_REG_P0_INIT_CRD, 4, 0x000007ff },
8527 { PRS_REG_CID_PORT_0, 4, 0x00ffffff },
8528 { PXP2_REG_PSWRQ_CDU0_L2P, 4, 0x000fffff },
8529 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
8530 { PXP2_REG_PSWRQ_TM0_L2P, 4, 0x000fffff },
8531 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
8532/* 10 */ { PXP2_REG_PSWRQ_TSDM0_L2P, 4, 0x000fffff },
8533 { QM_REG_CONNNUM_0, 4, 0x000fffff },
8534 { TM_REG_LIN0_MAX_ACTIVE_CID, 4, 0x0003ffff },
8535 { SRC_REG_KEYRSS0_0, 40, 0xffffffff },
8536 { SRC_REG_KEYRSS0_7, 40, 0xffffffff },
8537 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
8538 { XCM_REG_WU_DA_CNT_CMD00, 4, 0x00000003 },
8539 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 4, 0x000000ff },
8540 { NIG_REG_EGRESS_MNG0_FIFO, 20, 0xffffffff },
8541 { NIG_REG_LLH0_T_BIT, 4, 0x00000001 },
8542/* 20 */ { NIG_REG_EMAC0_IN_EN, 4, 0x00000001 },
8543 { NIG_REG_BMAC0_IN_EN, 4, 0x00000001 },
8544 { NIG_REG_XCM0_OUT_EN, 4, 0x00000001 },
8545 { NIG_REG_BRB0_OUT_EN, 4, 0x00000001 },
8546 { NIG_REG_LLH0_XCM_MASK, 4, 0x00000007 },
8547 { NIG_REG_LLH0_ACPI_PAT_6_LEN, 68, 0x000000ff },
8548 { NIG_REG_LLH0_ACPI_PAT_0_CRC, 68, 0xffffffff },
8549 { NIG_REG_LLH0_DEST_MAC_0_0, 160, 0xffffffff },
8550 { NIG_REG_LLH0_DEST_IP_0_1, 160, 0xffffffff },
8551 { NIG_REG_LLH0_IPV4_IPV6_0, 160, 0x00000001 },
8552/* 30 */ { NIG_REG_LLH0_DEST_UDP_0, 160, 0x0000ffff },
8553 { NIG_REG_LLH0_DEST_TCP_0, 160, 0x0000ffff },
8554 { NIG_REG_LLH0_VLAN_ID_0, 160, 0x00000fff },
8555 { NIG_REG_XGXS_SERDES0_MODE_SEL, 4, 0x00000001 },
8556 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
8557 { NIG_REG_STATUS_INTERRUPT_PORT0, 4, 0x07ffffff },
8558 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
8559 { NIG_REG_SERDES0_CTRL_PHY_ADDR, 16, 0x0000001f },
8560
8561 { 0xffffffff, 0, 0x00000000 }
8562 };
8563
8564 if (!netif_running(bp->dev))
8565 return rc;
8566
8567 /* Repeat the test twice:
8568 First by writing 0x00000000, second by writing 0xffffffff */
8569 for (idx = 0; idx < 2; idx++) {
8570
8571 switch (idx) {
8572 case 0:
8573 wr_val = 0;
8574 break;
8575 case 1:
8576 wr_val = 0xffffffff;
8577 break;
8578 }
8579
8580 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
8581 u32 offset, mask, save_val, val;
f3c87cdd
YG
8582
8583 offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
8584 mask = reg_tbl[i].mask;
8585
8586 save_val = REG_RD(bp, offset);
8587
8588 REG_WR(bp, offset, wr_val);
8589 val = REG_RD(bp, offset);
8590
8591 /* Restore the original register's value */
8592 REG_WR(bp, offset, save_val);
8593
8594 /* verify that value is as expected value */
8595 if ((val & mask) != (wr_val & mask))
8596 goto test_reg_exit;
8597 }
8598 }
8599
8600 rc = 0;
8601
8602test_reg_exit:
8603 return rc;
8604}
8605
8606static int bnx2x_test_memory(struct bnx2x *bp)
8607{
8608 int i, j, rc = -ENODEV;
8609 u32 val;
8610 static const struct {
8611 u32 offset;
8612 int size;
8613 } mem_tbl[] = {
8614 { CCM_REG_XX_DESCR_TABLE, CCM_REG_XX_DESCR_TABLE_SIZE },
8615 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
8616 { CFC_REG_LINK_LIST, CFC_REG_LINK_LIST_SIZE },
8617 { DMAE_REG_CMD_MEM, DMAE_REG_CMD_MEM_SIZE },
8618 { TCM_REG_XX_DESCR_TABLE, TCM_REG_XX_DESCR_TABLE_SIZE },
8619 { UCM_REG_XX_DESCR_TABLE, UCM_REG_XX_DESCR_TABLE_SIZE },
8620 { XCM_REG_XX_DESCR_TABLE, XCM_REG_XX_DESCR_TABLE_SIZE },
8621
8622 { 0xffffffff, 0 }
8623 };
8624 static const struct {
8625 char *name;
8626 u32 offset;
9dabc424
YG
8627 u32 e1_mask;
8628 u32 e1h_mask;
f3c87cdd 8629 } prty_tbl[] = {
9dabc424
YG
8630 { "CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, 0x3ffc0, 0 },
8631 { "CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, 0x2, 0x2 },
8632 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0, 0 },
8633 { "TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, 0x3ffc0, 0 },
8634 { "UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, 0x3ffc0, 0 },
8635 { "XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, 0x3ffc1, 0 },
8636
8637 { NULL, 0xffffffff, 0, 0 }
f3c87cdd
YG
8638 };
8639
8640 if (!netif_running(bp->dev))
8641 return rc;
8642
8643 /* Go through all the memories */
8644 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
8645 for (j = 0; j < mem_tbl[i].size; j++)
8646 REG_RD(bp, mem_tbl[i].offset + j*4);
8647
8648 /* Check the parity status */
8649 for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
8650 val = REG_RD(bp, prty_tbl[i].offset);
9dabc424
YG
8651 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
8652 (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
f3c87cdd
YG
8653 DP(NETIF_MSG_HW,
8654 "%s is 0x%x\n", prty_tbl[i].name, val);
8655 goto test_mem_exit;
8656 }
8657 }
8658
8659 rc = 0;
8660
8661test_mem_exit:
8662 return rc;
8663}
8664
f3c87cdd
YG
8665static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
8666{
8667 int cnt = 1000;
8668
8669 if (link_up)
8670 while (bnx2x_link_test(bp) && cnt--)
8671 msleep(10);
8672}
8673
8674static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
8675{
8676 unsigned int pkt_size, num_pkts, i;
8677 struct sk_buff *skb;
8678 unsigned char *packet;
8679 struct bnx2x_fastpath *fp = &bp->fp[0];
8680 u16 tx_start_idx, tx_idx;
8681 u16 rx_start_idx, rx_idx;
8682 u16 pkt_prod;
8683 struct sw_tx_bd *tx_buf;
8684 struct eth_tx_bd *tx_bd;
8685 dma_addr_t mapping;
8686 union eth_rx_cqe *cqe;
8687 u8 cqe_fp_flags;
8688 struct sw_rx_bd *rx_buf;
8689 u16 len;
8690 int rc = -ENODEV;
8691
8692 if (loopback_mode == BNX2X_MAC_LOOPBACK) {
8693 bp->link_params.loopback_mode = LOOPBACK_BMAC;
4a37fb66 8694 bnx2x_acquire_phy_lock(bp);
f3c87cdd 8695 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
4a37fb66 8696 bnx2x_release_phy_lock(bp);
f3c87cdd
YG
8697
8698 } else if (loopback_mode == BNX2X_PHY_LOOPBACK) {
8699 bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
4a37fb66 8700 bnx2x_acquire_phy_lock(bp);
f3c87cdd 8701 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
4a37fb66 8702 bnx2x_release_phy_lock(bp);
f3c87cdd
YG
8703 /* wait until link state is restored */
8704 bnx2x_wait_for_link(bp, link_up);
8705
8706 } else
8707 return -EINVAL;
8708
8709 pkt_size = 1514;
8710 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
8711 if (!skb) {
8712 rc = -ENOMEM;
8713 goto test_loopback_exit;
8714 }
8715 packet = skb_put(skb, pkt_size);
8716 memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
8717 memset(packet + ETH_ALEN, 0, (ETH_HLEN - ETH_ALEN));
8718 for (i = ETH_HLEN; i < pkt_size; i++)
8719 packet[i] = (unsigned char) (i & 0xff);
8720
8721 num_pkts = 0;
8722 tx_start_idx = le16_to_cpu(*fp->tx_cons_sb);
8723 rx_start_idx = le16_to_cpu(*fp->rx_cons_sb);
8724
8725 pkt_prod = fp->tx_pkt_prod++;
8726 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
8727 tx_buf->first_bd = fp->tx_bd_prod;
8728 tx_buf->skb = skb;
8729
8730 tx_bd = &fp->tx_desc_ring[TX_BD(fp->tx_bd_prod)];
8731 mapping = pci_map_single(bp->pdev, skb->data,
8732 skb_headlen(skb), PCI_DMA_TODEVICE);
8733 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
8734 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
8735 tx_bd->nbd = cpu_to_le16(1);
8736 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
8737 tx_bd->vlan = cpu_to_le16(pkt_prod);
8738 tx_bd->bd_flags.as_bitfield = (ETH_TX_BD_FLAGS_START_BD |
8739 ETH_TX_BD_FLAGS_END_BD);
8740 tx_bd->general_data = ((UNICAST_ADDRESS <<
8741 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT) | 1);
8742
58f4c4cf
EG
8743 wmb();
8744
f3c87cdd
YG
8745 fp->hw_tx_prods->bds_prod =
8746 cpu_to_le16(le16_to_cpu(fp->hw_tx_prods->bds_prod) + 1);
8747 mb(); /* FW restriction: must not reorder writing nbd and packets */
8748 fp->hw_tx_prods->packets_prod =
8749 cpu_to_le32(le32_to_cpu(fp->hw_tx_prods->packets_prod) + 1);
8750 DOORBELL(bp, FP_IDX(fp), 0);
8751
8752 mmiowb();
8753
8754 num_pkts++;
8755 fp->tx_bd_prod++;
8756 bp->dev->trans_start = jiffies;
8757
8758 udelay(100);
8759
8760 tx_idx = le16_to_cpu(*fp->tx_cons_sb);
8761 if (tx_idx != tx_start_idx + num_pkts)
8762 goto test_loopback_exit;
8763
8764 rx_idx = le16_to_cpu(*fp->rx_cons_sb);
8765 if (rx_idx != rx_start_idx + num_pkts)
8766 goto test_loopback_exit;
8767
8768 cqe = &fp->rx_comp_ring[RCQ_BD(fp->rx_comp_cons)];
8769 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
8770 if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
8771 goto test_loopback_rx_exit;
8772
8773 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
8774 if (len != pkt_size)
8775 goto test_loopback_rx_exit;
8776
8777 rx_buf = &fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)];
8778 skb = rx_buf->skb;
8779 skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
8780 for (i = ETH_HLEN; i < pkt_size; i++)
8781 if (*(skb->data + i) != (unsigned char) (i & 0xff))
8782 goto test_loopback_rx_exit;
8783
8784 rc = 0;
8785
8786test_loopback_rx_exit:
f3c87cdd
YG
8787
8788 fp->rx_bd_cons = NEXT_RX_IDX(fp->rx_bd_cons);
8789 fp->rx_bd_prod = NEXT_RX_IDX(fp->rx_bd_prod);
8790 fp->rx_comp_cons = NEXT_RCQ_IDX(fp->rx_comp_cons);
8791 fp->rx_comp_prod = NEXT_RCQ_IDX(fp->rx_comp_prod);
8792
8793 /* Update producers */
8794 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
8795 fp->rx_sge_prod);
f3c87cdd
YG
8796
8797test_loopback_exit:
8798 bp->link_params.loopback_mode = LOOPBACK_NONE;
8799
8800 return rc;
8801}
8802
8803static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
8804{
8805 int rc = 0;
8806
8807 if (!netif_running(bp->dev))
8808 return BNX2X_LOOPBACK_FAILED;
8809
f8ef6e44 8810 bnx2x_netif_stop(bp, 1);
f3c87cdd
YG
8811
8812 if (bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up)) {
8813 DP(NETIF_MSG_PROBE, "MAC loopback failed\n");
8814 rc |= BNX2X_MAC_LOOPBACK_FAILED;
8815 }
8816
8817 if (bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up)) {
8818 DP(NETIF_MSG_PROBE, "PHY loopback failed\n");
8819 rc |= BNX2X_PHY_LOOPBACK_FAILED;
8820 }
8821
8822 bnx2x_netif_start(bp);
8823
8824 return rc;
8825}
8826
8827#define CRC32_RESIDUAL 0xdebb20e3
8828
8829static int bnx2x_test_nvram(struct bnx2x *bp)
8830{
8831 static const struct {
8832 int offset;
8833 int size;
8834 } nvram_tbl[] = {
8835 { 0, 0x14 }, /* bootstrap */
8836 { 0x14, 0xec }, /* dir */
8837 { 0x100, 0x350 }, /* manuf_info */
8838 { 0x450, 0xf0 }, /* feature_info */
8839 { 0x640, 0x64 }, /* upgrade_key_info */
8840 { 0x6a4, 0x64 },
8841 { 0x708, 0x70 }, /* manuf_key_info */
8842 { 0x778, 0x70 },
8843 { 0, 0 }
8844 };
8845 u32 buf[0x350 / 4];
8846 u8 *data = (u8 *)buf;
8847 int i, rc;
8848 u32 magic, csum;
8849
8850 rc = bnx2x_nvram_read(bp, 0, data, 4);
8851 if (rc) {
8852 DP(NETIF_MSG_PROBE, "magic value read (rc -%d)\n", -rc);
8853 goto test_nvram_exit;
8854 }
8855
8856 magic = be32_to_cpu(buf[0]);
8857 if (magic != 0x669955aa) {
8858 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
8859 rc = -ENODEV;
8860 goto test_nvram_exit;
8861 }
8862
8863 for (i = 0; nvram_tbl[i].size; i++) {
8864
8865 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
8866 nvram_tbl[i].size);
8867 if (rc) {
8868 DP(NETIF_MSG_PROBE,
8869 "nvram_tbl[%d] read data (rc -%d)\n", i, -rc);
8870 goto test_nvram_exit;
8871 }
8872
8873 csum = ether_crc_le(nvram_tbl[i].size, data);
8874 if (csum != CRC32_RESIDUAL) {
8875 DP(NETIF_MSG_PROBE,
8876 "nvram_tbl[%d] csum value (0x%08x)\n", i, csum);
8877 rc = -ENODEV;
8878 goto test_nvram_exit;
8879 }
8880 }
8881
8882test_nvram_exit:
8883 return rc;
8884}
8885
8886static int bnx2x_test_intr(struct bnx2x *bp)
8887{
8888 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
8889 int i, rc;
8890
8891 if (!netif_running(bp->dev))
8892 return -ENODEV;
8893
8894 config->hdr.length_6b = 0;
8895 config->hdr.offset = 0;
8896 config->hdr.client_id = BP_CL_ID(bp);
8897 config->hdr.reserved1 = 0;
8898
8899 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
8900 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
8901 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
8902 if (rc == 0) {
8903 bp->set_mac_pending++;
8904 for (i = 0; i < 10; i++) {
8905 if (!bp->set_mac_pending)
8906 break;
8907 msleep_interruptible(10);
8908 }
8909 if (i == 10)
8910 rc = -ENODEV;
8911 }
8912
8913 return rc;
8914}
8915
a2fbb9ea
ET
8916static void bnx2x_self_test(struct net_device *dev,
8917 struct ethtool_test *etest, u64 *buf)
8918{
8919 struct bnx2x *bp = netdev_priv(dev);
a2fbb9ea
ET
8920
8921 memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
8922
f3c87cdd 8923 if (!netif_running(dev))
a2fbb9ea 8924 return;
a2fbb9ea 8925
33471629 8926 /* offline tests are not supported in MF mode */
f3c87cdd
YG
8927 if (IS_E1HMF(bp))
8928 etest->flags &= ~ETH_TEST_FL_OFFLINE;
8929
8930 if (etest->flags & ETH_TEST_FL_OFFLINE) {
8931 u8 link_up;
8932
8933 link_up = bp->link_vars.link_up;
8934 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8935 bnx2x_nic_load(bp, LOAD_DIAG);
8936 /* wait until link state is restored */
8937 bnx2x_wait_for_link(bp, link_up);
8938
8939 if (bnx2x_test_registers(bp) != 0) {
8940 buf[0] = 1;
8941 etest->flags |= ETH_TEST_FL_FAILED;
8942 }
8943 if (bnx2x_test_memory(bp) != 0) {
8944 buf[1] = 1;
8945 etest->flags |= ETH_TEST_FL_FAILED;
8946 }
8947 buf[2] = bnx2x_test_loopback(bp, link_up);
8948 if (buf[2] != 0)
8949 etest->flags |= ETH_TEST_FL_FAILED;
a2fbb9ea 8950
f3c87cdd
YG
8951 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8952 bnx2x_nic_load(bp, LOAD_NORMAL);
8953 /* wait until link state is restored */
8954 bnx2x_wait_for_link(bp, link_up);
8955 }
8956 if (bnx2x_test_nvram(bp) != 0) {
8957 buf[3] = 1;
a2fbb9ea
ET
8958 etest->flags |= ETH_TEST_FL_FAILED;
8959 }
f3c87cdd
YG
8960 if (bnx2x_test_intr(bp) != 0) {
8961 buf[4] = 1;
8962 etest->flags |= ETH_TEST_FL_FAILED;
8963 }
8964 if (bp->port.pmf)
8965 if (bnx2x_link_test(bp) != 0) {
8966 buf[5] = 1;
8967 etest->flags |= ETH_TEST_FL_FAILED;
8968 }
8969 buf[7] = bnx2x_mc_assert(bp);
8970 if (buf[7] != 0)
8971 etest->flags |= ETH_TEST_FL_FAILED;
8972
8973#ifdef BNX2X_EXTRA_DEBUG
8974 bnx2x_panic_dump(bp);
8975#endif
a2fbb9ea
ET
8976}
8977
bb2a0f7a
YG
8978static const struct {
8979 long offset;
8980 int size;
8981 u32 flags;
66e855f3
YG
8982#define STATS_FLAGS_PORT 1
8983#define STATS_FLAGS_FUNC 2
8984 u8 string[ETH_GSTRING_LEN];
bb2a0f7a 8985} bnx2x_stats_arr[BNX2X_NUM_STATS] = {
66e855f3
YG
8986/* 1 */ { STATS_OFFSET32(valid_bytes_received_hi),
8987 8, STATS_FLAGS_FUNC, "rx_bytes" },
8988 { STATS_OFFSET32(error_bytes_received_hi),
8989 8, STATS_FLAGS_FUNC, "rx_error_bytes" },
8990 { STATS_OFFSET32(total_bytes_transmitted_hi),
8991 8, STATS_FLAGS_FUNC, "tx_bytes" },
8992 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
8993 8, STATS_FLAGS_PORT, "tx_error_bytes" },
bb2a0f7a 8994 { STATS_OFFSET32(total_unicast_packets_received_hi),
66e855f3 8995 8, STATS_FLAGS_FUNC, "rx_ucast_packets" },
bb2a0f7a 8996 { STATS_OFFSET32(total_multicast_packets_received_hi),
66e855f3 8997 8, STATS_FLAGS_FUNC, "rx_mcast_packets" },
bb2a0f7a 8998 { STATS_OFFSET32(total_broadcast_packets_received_hi),
66e855f3 8999 8, STATS_FLAGS_FUNC, "rx_bcast_packets" },
bb2a0f7a 9000 { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
66e855f3 9001 8, STATS_FLAGS_FUNC, "tx_packets" },
bb2a0f7a 9002 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
66e855f3 9003 8, STATS_FLAGS_PORT, "tx_mac_errors" },
bb2a0f7a 9004/* 10 */{ STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
66e855f3 9005 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
bb2a0f7a 9006 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
66e855f3 9007 8, STATS_FLAGS_PORT, "rx_crc_errors" },
bb2a0f7a 9008 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
66e855f3 9009 8, STATS_FLAGS_PORT, "rx_align_errors" },
bb2a0f7a 9010 { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
66e855f3 9011 8, STATS_FLAGS_PORT, "tx_single_collisions" },
bb2a0f7a 9012 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
66e855f3 9013 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
bb2a0f7a 9014 { STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
66e855f3 9015 8, STATS_FLAGS_PORT, "tx_deferred" },
bb2a0f7a 9016 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
66e855f3 9017 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
bb2a0f7a 9018 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
66e855f3 9019 8, STATS_FLAGS_PORT, "tx_late_collisions" },
bb2a0f7a 9020 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
66e855f3 9021 8, STATS_FLAGS_PORT, "tx_total_collisions" },
bb2a0f7a 9022 { STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
66e855f3
YG
9023 8, STATS_FLAGS_PORT, "rx_fragments" },
9024/* 20 */{ STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
9025 8, STATS_FLAGS_PORT, "rx_jabbers" },
bb2a0f7a 9026 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
66e855f3 9027 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
bb2a0f7a 9028 { STATS_OFFSET32(jabber_packets_received),
66e855f3 9029 4, STATS_FLAGS_FUNC, "rx_oversize_packets" },
bb2a0f7a 9030 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
66e855f3 9031 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
bb2a0f7a 9032 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
66e855f3 9033 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
bb2a0f7a 9034 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
66e855f3 9035 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
bb2a0f7a 9036 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
66e855f3 9037 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
bb2a0f7a 9038 { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
66e855f3 9039 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
bb2a0f7a 9040 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
66e855f3 9041 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
bb2a0f7a 9042 { STATS_OFFSET32(etherstatspktsover1522octets_hi),
66e855f3 9043 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
bb2a0f7a 9044/* 30 */{ STATS_OFFSET32(rx_stat_xonpauseframesreceived_hi),
66e855f3 9045 8, STATS_FLAGS_PORT, "rx_xon_frames" },
bb2a0f7a 9046 { STATS_OFFSET32(rx_stat_xoffpauseframesreceived_hi),
66e855f3
YG
9047 8, STATS_FLAGS_PORT, "rx_xoff_frames" },
9048 { STATS_OFFSET32(tx_stat_outxonsent_hi),
9049 8, STATS_FLAGS_PORT, "tx_xon_frames" },
9050 { STATS_OFFSET32(tx_stat_outxoffsent_hi),
9051 8, STATS_FLAGS_PORT, "tx_xoff_frames" },
bb2a0f7a 9052 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
66e855f3
YG
9053 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
9054 { STATS_OFFSET32(mac_filter_discard),
9055 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
9056 { STATS_OFFSET32(no_buff_discard),
9057 4, STATS_FLAGS_FUNC, "rx_discards" },
9058 { STATS_OFFSET32(xxoverflow_discard),
9059 4, STATS_FLAGS_PORT, "rx_fw_discards" },
9060 { STATS_OFFSET32(brb_drop_hi),
9061 8, STATS_FLAGS_PORT, "brb_discard" },
9062 { STATS_OFFSET32(brb_truncate_hi),
9063 8, STATS_FLAGS_PORT, "brb_truncate" },
9064/* 40 */{ STATS_OFFSET32(rx_err_discard_pkt),
9065 4, STATS_FLAGS_FUNC, "rx_phy_ip_err_discards"},
9066 { STATS_OFFSET32(rx_skb_alloc_failed),
9067 4, STATS_FLAGS_FUNC, "rx_skb_alloc_discard" },
9068/* 42 */{ STATS_OFFSET32(hw_csum_err),
9069 4, STATS_FLAGS_FUNC, "rx_csum_offload_errors" }
a2fbb9ea
ET
9070};
9071
66e855f3
YG
9072#define IS_NOT_E1HMF_STAT(bp, i) \
9073 (IS_E1HMF(bp) && (bnx2x_stats_arr[i].flags & STATS_FLAGS_PORT))
9074
a2fbb9ea
ET
9075static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
9076{
bb2a0f7a
YG
9077 struct bnx2x *bp = netdev_priv(dev);
9078 int i, j;
9079
a2fbb9ea
ET
9080 switch (stringset) {
9081 case ETH_SS_STATS:
bb2a0f7a 9082 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
66e855f3 9083 if (IS_NOT_E1HMF_STAT(bp, i))
bb2a0f7a
YG
9084 continue;
9085 strcpy(buf + j*ETH_GSTRING_LEN,
9086 bnx2x_stats_arr[i].string);
9087 j++;
9088 }
a2fbb9ea
ET
9089 break;
9090
9091 case ETH_SS_TEST:
9092 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
9093 break;
9094 }
9095}
9096
9097static int bnx2x_get_stats_count(struct net_device *dev)
9098{
bb2a0f7a
YG
9099 struct bnx2x *bp = netdev_priv(dev);
9100 int i, num_stats = 0;
9101
9102 for (i = 0; i < BNX2X_NUM_STATS; i++) {
66e855f3 9103 if (IS_NOT_E1HMF_STAT(bp, i))
bb2a0f7a
YG
9104 continue;
9105 num_stats++;
9106 }
9107 return num_stats;
a2fbb9ea
ET
9108}
9109
9110static void bnx2x_get_ethtool_stats(struct net_device *dev,
9111 struct ethtool_stats *stats, u64 *buf)
9112{
9113 struct bnx2x *bp = netdev_priv(dev);
bb2a0f7a
YG
9114 u32 *hw_stats = (u32 *)&bp->eth_stats;
9115 int i, j;
a2fbb9ea 9116
bb2a0f7a 9117 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
66e855f3 9118 if (IS_NOT_E1HMF_STAT(bp, i))
a2fbb9ea 9119 continue;
bb2a0f7a
YG
9120
9121 if (bnx2x_stats_arr[i].size == 0) {
9122 /* skip this counter */
9123 buf[j] = 0;
9124 j++;
a2fbb9ea
ET
9125 continue;
9126 }
bb2a0f7a 9127 if (bnx2x_stats_arr[i].size == 4) {
a2fbb9ea 9128 /* 4-byte counter */
bb2a0f7a
YG
9129 buf[j] = (u64) *(hw_stats + bnx2x_stats_arr[i].offset);
9130 j++;
a2fbb9ea
ET
9131 continue;
9132 }
9133 /* 8-byte counter */
bb2a0f7a
YG
9134 buf[j] = HILO_U64(*(hw_stats + bnx2x_stats_arr[i].offset),
9135 *(hw_stats + bnx2x_stats_arr[i].offset + 1));
9136 j++;
a2fbb9ea
ET
9137 }
9138}
9139
9140static int bnx2x_phys_id(struct net_device *dev, u32 data)
9141{
9142 struct bnx2x *bp = netdev_priv(dev);
34f80b04 9143 int port = BP_PORT(bp);
a2fbb9ea
ET
9144 int i;
9145
34f80b04
EG
9146 if (!netif_running(dev))
9147 return 0;
9148
9149 if (!bp->port.pmf)
9150 return 0;
9151
a2fbb9ea
ET
9152 if (data == 0)
9153 data = 2;
9154
9155 for (i = 0; i < (data * 2); i++) {
c18487ee 9156 if ((i % 2) == 0)
34f80b04 9157 bnx2x_set_led(bp, port, LED_MODE_OPER, SPEED_1000,
c18487ee
YR
9158 bp->link_params.hw_led_mode,
9159 bp->link_params.chip_id);
9160 else
34f80b04 9161 bnx2x_set_led(bp, port, LED_MODE_OFF, 0,
c18487ee
YR
9162 bp->link_params.hw_led_mode,
9163 bp->link_params.chip_id);
9164
a2fbb9ea
ET
9165 msleep_interruptible(500);
9166 if (signal_pending(current))
9167 break;
9168 }
9169
c18487ee 9170 if (bp->link_vars.link_up)
34f80b04 9171 bnx2x_set_led(bp, port, LED_MODE_OPER,
c18487ee
YR
9172 bp->link_vars.line_speed,
9173 bp->link_params.hw_led_mode,
9174 bp->link_params.chip_id);
a2fbb9ea
ET
9175
9176 return 0;
9177}
9178
9179static struct ethtool_ops bnx2x_ethtool_ops = {
7a9b2557
VZ
9180 .get_settings = bnx2x_get_settings,
9181 .set_settings = bnx2x_set_settings,
9182 .get_drvinfo = bnx2x_get_drvinfo,
a2fbb9ea
ET
9183 .get_wol = bnx2x_get_wol,
9184 .set_wol = bnx2x_set_wol,
7a9b2557
VZ
9185 .get_msglevel = bnx2x_get_msglevel,
9186 .set_msglevel = bnx2x_set_msglevel,
9187 .nway_reset = bnx2x_nway_reset,
9188 .get_link = ethtool_op_get_link,
9189 .get_eeprom_len = bnx2x_get_eeprom_len,
9190 .get_eeprom = bnx2x_get_eeprom,
9191 .set_eeprom = bnx2x_set_eeprom,
9192 .get_coalesce = bnx2x_get_coalesce,
9193 .set_coalesce = bnx2x_set_coalesce,
9194 .get_ringparam = bnx2x_get_ringparam,
9195 .set_ringparam = bnx2x_set_ringparam,
9196 .get_pauseparam = bnx2x_get_pauseparam,
9197 .set_pauseparam = bnx2x_set_pauseparam,
9198 .get_rx_csum = bnx2x_get_rx_csum,
9199 .set_rx_csum = bnx2x_set_rx_csum,
9200 .get_tx_csum = ethtool_op_get_tx_csum,
755735eb 9201 .set_tx_csum = ethtool_op_set_tx_hw_csum,
7a9b2557
VZ
9202 .set_flags = bnx2x_set_flags,
9203 .get_flags = ethtool_op_get_flags,
9204 .get_sg = ethtool_op_get_sg,
9205 .set_sg = ethtool_op_set_sg,
a2fbb9ea
ET
9206 .get_tso = ethtool_op_get_tso,
9207 .set_tso = bnx2x_set_tso,
9208 .self_test_count = bnx2x_self_test_count,
7a9b2557
VZ
9209 .self_test = bnx2x_self_test,
9210 .get_strings = bnx2x_get_strings,
a2fbb9ea
ET
9211 .phys_id = bnx2x_phys_id,
9212 .get_stats_count = bnx2x_get_stats_count,
bb2a0f7a 9213 .get_ethtool_stats = bnx2x_get_ethtool_stats,
a2fbb9ea
ET
9214};
9215
9216/* end of ethtool_ops */
9217
9218/****************************************************************************
9219* General service functions
9220****************************************************************************/
9221
9222static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
9223{
9224 u16 pmcsr;
9225
9226 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
9227
9228 switch (state) {
9229 case PCI_D0:
34f80b04 9230 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
a2fbb9ea
ET
9231 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
9232 PCI_PM_CTRL_PME_STATUS));
9233
9234 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
33471629 9235 /* delay required during transition out of D3hot */
a2fbb9ea 9236 msleep(20);
34f80b04 9237 break;
a2fbb9ea 9238
34f80b04
EG
9239 case PCI_D3hot:
9240 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
9241 pmcsr |= 3;
a2fbb9ea 9242
34f80b04
EG
9243 if (bp->wol)
9244 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
a2fbb9ea 9245
34f80b04
EG
9246 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
9247 pmcsr);
a2fbb9ea 9248
34f80b04
EG
9249 /* No more memory access after this point until
9250 * device is brought back to D0.
9251 */
9252 break;
9253
9254 default:
9255 return -EINVAL;
9256 }
9257 return 0;
a2fbb9ea
ET
9258}
9259
34f80b04
EG
9260/*
9261 * net_device service functions
9262 */
9263
a2fbb9ea
ET
9264static int bnx2x_poll(struct napi_struct *napi, int budget)
9265{
9266 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
9267 napi);
9268 struct bnx2x *bp = fp->bp;
9269 int work_done = 0;
2772f903 9270 u16 rx_cons_sb;
a2fbb9ea
ET
9271
9272#ifdef BNX2X_STOP_ON_ERROR
9273 if (unlikely(bp->panic))
34f80b04 9274 goto poll_panic;
a2fbb9ea
ET
9275#endif
9276
9277 prefetch(fp->tx_buf_ring[TX_BD(fp->tx_pkt_cons)].skb);
9278 prefetch(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb);
9279 prefetch((char *)(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb) + 256);
9280
9281 bnx2x_update_fpsb_idx(fp);
9282
da5a662a 9283 if (BNX2X_HAS_TX_WORK(fp))
a2fbb9ea
ET
9284 bnx2x_tx_int(fp, budget);
9285
2772f903
EG
9286 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
9287 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
9288 rx_cons_sb++;
da5a662a 9289 if (BNX2X_HAS_RX_WORK(fp))
a2fbb9ea
ET
9290 work_done = bnx2x_rx_int(fp, budget);
9291
da5a662a 9292 rmb(); /* BNX2X_HAS_WORK() reads the status block */
2772f903
EG
9293 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
9294 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
9295 rx_cons_sb++;
a2fbb9ea
ET
9296
9297 /* must not complete if we consumed full budget */
da5a662a 9298 if ((work_done < budget) && !BNX2X_HAS_WORK(fp)) {
a2fbb9ea
ET
9299
9300#ifdef BNX2X_STOP_ON_ERROR
34f80b04 9301poll_panic:
a2fbb9ea 9302#endif
908a7a16 9303 netif_rx_complete(napi);
a2fbb9ea 9304
34f80b04 9305 bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID,
a2fbb9ea 9306 le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
34f80b04 9307 bnx2x_ack_sb(bp, FP_SB_ID(fp), CSTORM_ID,
a2fbb9ea
ET
9308 le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
9309 }
a2fbb9ea
ET
9310 return work_done;
9311}
9312
755735eb
EG
9313
9314/* we split the first BD into headers and data BDs
33471629 9315 * to ease the pain of our fellow microcode engineers
755735eb
EG
9316 * we use one mapping for both BDs
9317 * So far this has only been observed to happen
9318 * in Other Operating Systems(TM)
9319 */
9320static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
9321 struct bnx2x_fastpath *fp,
9322 struct eth_tx_bd **tx_bd, u16 hlen,
9323 u16 bd_prod, int nbd)
9324{
9325 struct eth_tx_bd *h_tx_bd = *tx_bd;
9326 struct eth_tx_bd *d_tx_bd;
9327 dma_addr_t mapping;
9328 int old_len = le16_to_cpu(h_tx_bd->nbytes);
9329
9330 /* first fix first BD */
9331 h_tx_bd->nbd = cpu_to_le16(nbd);
9332 h_tx_bd->nbytes = cpu_to_le16(hlen);
9333
9334 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
9335 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
9336 h_tx_bd->addr_lo, h_tx_bd->nbd);
9337
9338 /* now get a new data BD
9339 * (after the pbd) and fill it */
9340 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9341 d_tx_bd = &fp->tx_desc_ring[bd_prod];
9342
9343 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
9344 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
9345
9346 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9347 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9348 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
9349 d_tx_bd->vlan = 0;
9350 /* this marks the BD as one that has no individual mapping
9351 * the FW ignores this flag in a BD not marked start
9352 */
9353 d_tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_SW_LSO;
9354 DP(NETIF_MSG_TX_QUEUED,
9355 "TSO split data size is %d (%x:%x)\n",
9356 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
9357
9358 /* update tx_bd for marking the last BD flag */
9359 *tx_bd = d_tx_bd;
9360
9361 return bd_prod;
9362}
9363
9364static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
9365{
9366 if (fix > 0)
9367 csum = (u16) ~csum_fold(csum_sub(csum,
9368 csum_partial(t_header - fix, fix, 0)));
9369
9370 else if (fix < 0)
9371 csum = (u16) ~csum_fold(csum_add(csum,
9372 csum_partial(t_header, -fix, 0)));
9373
9374 return swab16(csum);
9375}
9376
9377static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
9378{
9379 u32 rc;
9380
9381 if (skb->ip_summed != CHECKSUM_PARTIAL)
9382 rc = XMIT_PLAIN;
9383
9384 else {
9385 if (skb->protocol == ntohs(ETH_P_IPV6)) {
9386 rc = XMIT_CSUM_V6;
9387 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
9388 rc |= XMIT_CSUM_TCP;
9389
9390 } else {
9391 rc = XMIT_CSUM_V4;
9392 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
9393 rc |= XMIT_CSUM_TCP;
9394 }
9395 }
9396
9397 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
9398 rc |= XMIT_GSO_V4;
9399
9400 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
9401 rc |= XMIT_GSO_V6;
9402
9403 return rc;
9404}
9405
9406/* check if packet requires linearization (packet is too fragmented) */
9407static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
9408 u32 xmit_type)
9409{
9410 int to_copy = 0;
9411 int hlen = 0;
9412 int first_bd_sz = 0;
9413
9414 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
9415 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
9416
9417 if (xmit_type & XMIT_GSO) {
9418 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
9419 /* Check if LSO packet needs to be copied:
9420 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
9421 int wnd_size = MAX_FETCH_BD - 3;
33471629 9422 /* Number of windows to check */
755735eb
EG
9423 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
9424 int wnd_idx = 0;
9425 int frag_idx = 0;
9426 u32 wnd_sum = 0;
9427
9428 /* Headers length */
9429 hlen = (int)(skb_transport_header(skb) - skb->data) +
9430 tcp_hdrlen(skb);
9431
9432 /* Amount of data (w/o headers) on linear part of SKB*/
9433 first_bd_sz = skb_headlen(skb) - hlen;
9434
9435 wnd_sum = first_bd_sz;
9436
9437 /* Calculate the first sum - it's special */
9438 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
9439 wnd_sum +=
9440 skb_shinfo(skb)->frags[frag_idx].size;
9441
9442 /* If there was data on linear skb data - check it */
9443 if (first_bd_sz > 0) {
9444 if (unlikely(wnd_sum < lso_mss)) {
9445 to_copy = 1;
9446 goto exit_lbl;
9447 }
9448
9449 wnd_sum -= first_bd_sz;
9450 }
9451
9452 /* Others are easier: run through the frag list and
9453 check all windows */
9454 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
9455 wnd_sum +=
9456 skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
9457
9458 if (unlikely(wnd_sum < lso_mss)) {
9459 to_copy = 1;
9460 break;
9461 }
9462 wnd_sum -=
9463 skb_shinfo(skb)->frags[wnd_idx].size;
9464 }
9465
9466 } else {
9467 /* in non-LSO too fragmented packet should always
9468 be linearized */
9469 to_copy = 1;
9470 }
9471 }
9472
9473exit_lbl:
9474 if (unlikely(to_copy))
9475 DP(NETIF_MSG_TX_QUEUED,
9476 "Linearization IS REQUIRED for %s packet. "
9477 "num_frags %d hlen %d first_bd_sz %d\n",
9478 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
9479 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
9480
9481 return to_copy;
9482}
9483
9484/* called with netif_tx_lock
a2fbb9ea 9485 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
755735eb 9486 * netif_wake_queue()
a2fbb9ea
ET
9487 */
9488static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
9489{
9490 struct bnx2x *bp = netdev_priv(dev);
9491 struct bnx2x_fastpath *fp;
9492 struct sw_tx_bd *tx_buf;
9493 struct eth_tx_bd *tx_bd;
9494 struct eth_tx_parse_bd *pbd = NULL;
9495 u16 pkt_prod, bd_prod;
755735eb 9496 int nbd, fp_index;
a2fbb9ea 9497 dma_addr_t mapping;
755735eb
EG
9498 u32 xmit_type = bnx2x_xmit_type(bp, skb);
9499 int vlan_off = (bp->e1hov ? 4 : 0);
9500 int i;
9501 u8 hlen = 0;
a2fbb9ea
ET
9502
9503#ifdef BNX2X_STOP_ON_ERROR
9504 if (unlikely(bp->panic))
9505 return NETDEV_TX_BUSY;
9506#endif
9507
755735eb 9508 fp_index = (smp_processor_id() % bp->num_queues);
a2fbb9ea 9509 fp = &bp->fp[fp_index];
755735eb 9510
231fd58a 9511 if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
bb2a0f7a 9512 bp->eth_stats.driver_xoff++,
a2fbb9ea
ET
9513 netif_stop_queue(dev);
9514 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
9515 return NETDEV_TX_BUSY;
9516 }
9517
755735eb
EG
9518 DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x protocol %x protocol(%x,%x)"
9519 " gso type %x xmit_type %x\n",
9520 skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
9521 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
9522
33471629 9523 /* First, check if we need to linearize the skb
755735eb
EG
9524 (due to FW restrictions) */
9525 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
9526 /* Statistics of linearization */
9527 bp->lin_cnt++;
9528 if (skb_linearize(skb) != 0) {
9529 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
9530 "silently dropping this SKB\n");
9531 dev_kfree_skb_any(skb);
da5a662a 9532 return NETDEV_TX_OK;
755735eb
EG
9533 }
9534 }
9535
a2fbb9ea 9536 /*
755735eb 9537 Please read carefully. First we use one BD which we mark as start,
a2fbb9ea 9538 then for TSO or xsum we have a parsing info BD,
755735eb 9539 and only then we have the rest of the TSO BDs.
a2fbb9ea
ET
9540 (don't forget to mark the last one as last,
9541 and to unmap only AFTER you write to the BD ...)
755735eb 9542 And above all, all pdb sizes are in words - NOT DWORDS!
a2fbb9ea
ET
9543 */
9544
9545 pkt_prod = fp->tx_pkt_prod++;
755735eb 9546 bd_prod = TX_BD(fp->tx_bd_prod);
a2fbb9ea 9547
755735eb 9548 /* get a tx_buf and first BD */
a2fbb9ea
ET
9549 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
9550 tx_bd = &fp->tx_desc_ring[bd_prod];
9551
9552 tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
9553 tx_bd->general_data = (UNICAST_ADDRESS <<
9554 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT);
3196a88a
EG
9555 /* header nbd */
9556 tx_bd->general_data |= (1 << ETH_TX_BD_HDR_NBDS_SHIFT);
a2fbb9ea 9557
755735eb
EG
9558 /* remember the first BD of the packet */
9559 tx_buf->first_bd = fp->tx_bd_prod;
9560 tx_buf->skb = skb;
a2fbb9ea
ET
9561
9562 DP(NETIF_MSG_TX_QUEUED,
9563 "sending pkt %u @%p next_idx %u bd %u @%p\n",
9564 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_bd);
9565
755735eb
EG
9566 if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb)) {
9567 tx_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
9568 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
9569 vlan_off += 4;
9570 } else
9571 tx_bd->vlan = cpu_to_le16(pkt_prod);
a2fbb9ea 9572
755735eb 9573 if (xmit_type) {
755735eb 9574 /* turn on parsing and get a BD */
a2fbb9ea
ET
9575 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9576 pbd = (void *)&fp->tx_desc_ring[bd_prod];
755735eb
EG
9577
9578 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
9579 }
9580
9581 if (xmit_type & XMIT_CSUM) {
9582 hlen = (skb_network_header(skb) - skb->data + vlan_off) / 2;
a2fbb9ea
ET
9583
9584 /* for now NS flag is not used in Linux */
755735eb 9585 pbd->global_data = (hlen |
96fc1784 9586 ((skb->protocol == ntohs(ETH_P_8021Q)) <<
a2fbb9ea 9587 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
a2fbb9ea 9588
755735eb
EG
9589 pbd->ip_hlen = (skb_transport_header(skb) -
9590 skb_network_header(skb)) / 2;
9591
9592 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
a2fbb9ea 9593
755735eb
EG
9594 pbd->total_hlen = cpu_to_le16(hlen);
9595 hlen = hlen*2 - vlan_off;
a2fbb9ea 9596
755735eb
EG
9597 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_TCP_CSUM;
9598
9599 if (xmit_type & XMIT_CSUM_V4)
a2fbb9ea 9600 tx_bd->bd_flags.as_bitfield |=
755735eb
EG
9601 ETH_TX_BD_FLAGS_IP_CSUM;
9602 else
9603 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
9604
9605 if (xmit_type & XMIT_CSUM_TCP) {
9606 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
9607
9608 } else {
9609 s8 fix = SKB_CS_OFF(skb); /* signed! */
9610
a2fbb9ea 9611 pbd->global_data |= ETH_TX_PARSE_BD_CS_ANY_FLG;
755735eb 9612 pbd->cs_offset = fix / 2;
a2fbb9ea 9613
755735eb
EG
9614 DP(NETIF_MSG_TX_QUEUED,
9615 "hlen %d offset %d fix %d csum before fix %x\n",
9616 le16_to_cpu(pbd->total_hlen), pbd->cs_offset, fix,
9617 SKB_CS(skb));
9618
9619 /* HW bug: fixup the CSUM */
9620 pbd->tcp_pseudo_csum =
9621 bnx2x_csum_fix(skb_transport_header(skb),
9622 SKB_CS(skb), fix);
9623
9624 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
9625 pbd->tcp_pseudo_csum);
9626 }
a2fbb9ea
ET
9627 }
9628
9629 mapping = pci_map_single(bp->pdev, skb->data,
755735eb 9630 skb_headlen(skb), PCI_DMA_TODEVICE);
a2fbb9ea
ET
9631
9632 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9633 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
6378c025 9634 nbd = skb_shinfo(skb)->nr_frags + ((pbd == NULL) ? 1 : 2);
a2fbb9ea
ET
9635 tx_bd->nbd = cpu_to_le16(nbd);
9636 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
9637
9638 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
755735eb
EG
9639 " nbytes %d flags %x vlan %x\n",
9640 tx_bd, tx_bd->addr_hi, tx_bd->addr_lo, le16_to_cpu(tx_bd->nbd),
9641 le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield,
9642 le16_to_cpu(tx_bd->vlan));
a2fbb9ea 9643
755735eb 9644 if (xmit_type & XMIT_GSO) {
a2fbb9ea
ET
9645
9646 DP(NETIF_MSG_TX_QUEUED,
9647 "TSO packet len %d hlen %d total len %d tso size %d\n",
9648 skb->len, hlen, skb_headlen(skb),
9649 skb_shinfo(skb)->gso_size);
9650
9651 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
9652
755735eb
EG
9653 if (unlikely(skb_headlen(skb) > hlen))
9654 bd_prod = bnx2x_tx_split(bp, fp, &tx_bd, hlen,
9655 bd_prod, ++nbd);
a2fbb9ea
ET
9656
9657 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
9658 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
755735eb
EG
9659 pbd->tcp_flags = pbd_tcp_flags(skb);
9660
9661 if (xmit_type & XMIT_GSO_V4) {
9662 pbd->ip_id = swab16(ip_hdr(skb)->id);
9663 pbd->tcp_pseudo_csum =
a2fbb9ea
ET
9664 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
9665 ip_hdr(skb)->daddr,
9666 0, IPPROTO_TCP, 0));
755735eb
EG
9667
9668 } else
9669 pbd->tcp_pseudo_csum =
9670 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
9671 &ipv6_hdr(skb)->daddr,
9672 0, IPPROTO_TCP, 0));
9673
a2fbb9ea
ET
9674 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
9675 }
9676
755735eb
EG
9677 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
9678 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
a2fbb9ea 9679
755735eb
EG
9680 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9681 tx_bd = &fp->tx_desc_ring[bd_prod];
a2fbb9ea 9682
755735eb
EG
9683 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
9684 frag->size, PCI_DMA_TODEVICE);
a2fbb9ea 9685
755735eb
EG
9686 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9687 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9688 tx_bd->nbytes = cpu_to_le16(frag->size);
9689 tx_bd->vlan = cpu_to_le16(pkt_prod);
9690 tx_bd->bd_flags.as_bitfield = 0;
a2fbb9ea 9691
755735eb
EG
9692 DP(NETIF_MSG_TX_QUEUED,
9693 "frag %d bd @%p addr (%x:%x) nbytes %d flags %x\n",
9694 i, tx_bd, tx_bd->addr_hi, tx_bd->addr_lo,
9695 le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield);
a2fbb9ea
ET
9696 }
9697
755735eb 9698 /* now at last mark the BD as the last BD */
a2fbb9ea
ET
9699 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_END_BD;
9700
9701 DP(NETIF_MSG_TX_QUEUED, "last bd @%p flags %x\n",
9702 tx_bd, tx_bd->bd_flags.as_bitfield);
9703
a2fbb9ea
ET
9704 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9705
755735eb 9706 /* now send a tx doorbell, counting the next BD
a2fbb9ea
ET
9707 * if the packet contains or ends with it
9708 */
9709 if (TX_BD_POFF(bd_prod) < nbd)
9710 nbd++;
9711
9712 if (pbd)
9713 DP(NETIF_MSG_TX_QUEUED,
9714 "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
9715 " tcp_flags %x xsum %x seq %u hlen %u\n",
9716 pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
9717 pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
755735eb 9718 pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
a2fbb9ea 9719
755735eb 9720 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
a2fbb9ea 9721
58f4c4cf
EG
9722 /*
9723 * Make sure that the BD data is updated before updating the producer
9724 * since FW might read the BD right after the producer is updated.
9725 * This is only applicable for weak-ordered memory model archs such
9726 * as IA-64. The following barrier is also mandatory since FW will
9727 * assumes packets must have BDs.
9728 */
9729 wmb();
9730
96fc1784
ET
9731 fp->hw_tx_prods->bds_prod =
9732 cpu_to_le16(le16_to_cpu(fp->hw_tx_prods->bds_prod) + nbd);
a2fbb9ea 9733 mb(); /* FW restriction: must not reorder writing nbd and packets */
96fc1784
ET
9734 fp->hw_tx_prods->packets_prod =
9735 cpu_to_le32(le32_to_cpu(fp->hw_tx_prods->packets_prod) + 1);
755735eb 9736 DOORBELL(bp, FP_IDX(fp), 0);
a2fbb9ea
ET
9737
9738 mmiowb();
9739
755735eb 9740 fp->tx_bd_prod += nbd;
a2fbb9ea
ET
9741 dev->trans_start = jiffies;
9742
9743 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
58f4c4cf
EG
9744 /* We want bnx2x_tx_int to "see" the updated tx_bd_prod
9745 if we put Tx into XOFF state. */
9746 smp_mb();
a2fbb9ea 9747 netif_stop_queue(dev);
bb2a0f7a 9748 bp->eth_stats.driver_xoff++;
a2fbb9ea
ET
9749 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
9750 netif_wake_queue(dev);
9751 }
9752 fp->tx_pkt++;
9753
9754 return NETDEV_TX_OK;
9755}
9756
bb2a0f7a 9757/* called with rtnl_lock */
a2fbb9ea
ET
9758static int bnx2x_open(struct net_device *dev)
9759{
9760 struct bnx2x *bp = netdev_priv(dev);
9761
9762 bnx2x_set_power_state(bp, PCI_D0);
9763
bb2a0f7a 9764 return bnx2x_nic_load(bp, LOAD_OPEN);
a2fbb9ea
ET
9765}
9766
bb2a0f7a 9767/* called with rtnl_lock */
a2fbb9ea
ET
9768static int bnx2x_close(struct net_device *dev)
9769{
a2fbb9ea
ET
9770 struct bnx2x *bp = netdev_priv(dev);
9771
9772 /* Unload the driver, release IRQs */
bb2a0f7a
YG
9773 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
9774 if (atomic_read(&bp->pdev->enable_cnt) == 1)
9775 if (!CHIP_REV_IS_SLOW(bp))
9776 bnx2x_set_power_state(bp, PCI_D3hot);
a2fbb9ea
ET
9777
9778 return 0;
9779}
9780
34f80b04
EG
9781/* called with netif_tx_lock from set_multicast */
9782static void bnx2x_set_rx_mode(struct net_device *dev)
9783{
9784 struct bnx2x *bp = netdev_priv(dev);
9785 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
9786 int port = BP_PORT(bp);
9787
9788 if (bp->state != BNX2X_STATE_OPEN) {
9789 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
9790 return;
9791 }
9792
9793 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
9794
9795 if (dev->flags & IFF_PROMISC)
9796 rx_mode = BNX2X_RX_MODE_PROMISC;
9797
9798 else if ((dev->flags & IFF_ALLMULTI) ||
9799 ((dev->mc_count > BNX2X_MAX_MULTICAST) && CHIP_IS_E1(bp)))
9800 rx_mode = BNX2X_RX_MODE_ALLMULTI;
9801
9802 else { /* some multicasts */
9803 if (CHIP_IS_E1(bp)) {
9804 int i, old, offset;
9805 struct dev_mc_list *mclist;
9806 struct mac_configuration_cmd *config =
9807 bnx2x_sp(bp, mcast_config);
9808
9809 for (i = 0, mclist = dev->mc_list;
9810 mclist && (i < dev->mc_count);
9811 i++, mclist = mclist->next) {
9812
9813 config->config_table[i].
9814 cam_entry.msb_mac_addr =
9815 swab16(*(u16 *)&mclist->dmi_addr[0]);
9816 config->config_table[i].
9817 cam_entry.middle_mac_addr =
9818 swab16(*(u16 *)&mclist->dmi_addr[2]);
9819 config->config_table[i].
9820 cam_entry.lsb_mac_addr =
9821 swab16(*(u16 *)&mclist->dmi_addr[4]);
9822 config->config_table[i].cam_entry.flags =
9823 cpu_to_le16(port);
9824 config->config_table[i].
9825 target_table_entry.flags = 0;
9826 config->config_table[i].
9827 target_table_entry.client_id = 0;
9828 config->config_table[i].
9829 target_table_entry.vlan_id = 0;
9830
9831 DP(NETIF_MSG_IFUP,
9832 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
9833 config->config_table[i].
9834 cam_entry.msb_mac_addr,
9835 config->config_table[i].
9836 cam_entry.middle_mac_addr,
9837 config->config_table[i].
9838 cam_entry.lsb_mac_addr);
9839 }
9840 old = config->hdr.length_6b;
9841 if (old > i) {
9842 for (; i < old; i++) {
9843 if (CAM_IS_INVALID(config->
9844 config_table[i])) {
9845 i--; /* already invalidated */
9846 break;
9847 }
9848 /* invalidate */
9849 CAM_INVALIDATE(config->
9850 config_table[i]);
9851 }
9852 }
9853
9854 if (CHIP_REV_IS_SLOW(bp))
9855 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
9856 else
9857 offset = BNX2X_MAX_MULTICAST*(1 + port);
9858
9859 config->hdr.length_6b = i;
9860 config->hdr.offset = offset;
9861 config->hdr.client_id = BP_CL_ID(bp);
9862 config->hdr.reserved1 = 0;
9863
9864 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
9865 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
9866 U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
9867 0);
9868 } else { /* E1H */
9869 /* Accept one or more multicasts */
9870 struct dev_mc_list *mclist;
9871 u32 mc_filter[MC_HASH_SIZE];
9872 u32 crc, bit, regidx;
9873 int i;
9874
9875 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
9876
9877 for (i = 0, mclist = dev->mc_list;
9878 mclist && (i < dev->mc_count);
9879 i++, mclist = mclist->next) {
9880
7c510e4b
JB
9881 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
9882 mclist->dmi_addr);
34f80b04
EG
9883
9884 crc = crc32c_le(0, mclist->dmi_addr, ETH_ALEN);
9885 bit = (crc >> 24) & 0xff;
9886 regidx = bit >> 5;
9887 bit &= 0x1f;
9888 mc_filter[regidx] |= (1 << bit);
9889 }
9890
9891 for (i = 0; i < MC_HASH_SIZE; i++)
9892 REG_WR(bp, MC_HASH_OFFSET(bp, i),
9893 mc_filter[i]);
9894 }
9895 }
9896
9897 bp->rx_mode = rx_mode;
9898 bnx2x_set_storm_rx_mode(bp);
9899}
9900
9901/* called with rtnl_lock */
a2fbb9ea
ET
9902static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
9903{
9904 struct sockaddr *addr = p;
9905 struct bnx2x *bp = netdev_priv(dev);
9906
34f80b04 9907 if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
a2fbb9ea
ET
9908 return -EINVAL;
9909
9910 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
34f80b04
EG
9911 if (netif_running(dev)) {
9912 if (CHIP_IS_E1(bp))
3101c2bc 9913 bnx2x_set_mac_addr_e1(bp, 1);
34f80b04 9914 else
3101c2bc 9915 bnx2x_set_mac_addr_e1h(bp, 1);
34f80b04 9916 }
a2fbb9ea
ET
9917
9918 return 0;
9919}
9920
c18487ee 9921/* called with rtnl_lock */
a2fbb9ea
ET
9922static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
9923{
9924 struct mii_ioctl_data *data = if_mii(ifr);
9925 struct bnx2x *bp = netdev_priv(dev);
3196a88a 9926 int port = BP_PORT(bp);
a2fbb9ea
ET
9927 int err;
9928
9929 switch (cmd) {
9930 case SIOCGMIIPHY:
34f80b04 9931 data->phy_id = bp->port.phy_addr;
a2fbb9ea 9932
c14423fe 9933 /* fallthrough */
c18487ee 9934
a2fbb9ea 9935 case SIOCGMIIREG: {
c18487ee 9936 u16 mii_regval;
a2fbb9ea 9937
c18487ee
YR
9938 if (!netif_running(dev))
9939 return -EAGAIN;
a2fbb9ea 9940
34f80b04 9941 mutex_lock(&bp->port.phy_mutex);
3196a88a 9942 err = bnx2x_cl45_read(bp, port, 0, bp->port.phy_addr,
c18487ee
YR
9943 DEFAULT_PHY_DEV_ADDR,
9944 (data->reg_num & 0x1f), &mii_regval);
9945 data->val_out = mii_regval;
34f80b04 9946 mutex_unlock(&bp->port.phy_mutex);
a2fbb9ea
ET
9947 return err;
9948 }
9949
9950 case SIOCSMIIREG:
9951 if (!capable(CAP_NET_ADMIN))
9952 return -EPERM;
9953
c18487ee
YR
9954 if (!netif_running(dev))
9955 return -EAGAIN;
9956
34f80b04 9957 mutex_lock(&bp->port.phy_mutex);
3196a88a 9958 err = bnx2x_cl45_write(bp, port, 0, bp->port.phy_addr,
c18487ee
YR
9959 DEFAULT_PHY_DEV_ADDR,
9960 (data->reg_num & 0x1f), data->val_in);
34f80b04 9961 mutex_unlock(&bp->port.phy_mutex);
a2fbb9ea
ET
9962 return err;
9963
9964 default:
9965 /* do nothing */
9966 break;
9967 }
9968
9969 return -EOPNOTSUPP;
9970}
9971
34f80b04 9972/* called with rtnl_lock */
a2fbb9ea
ET
9973static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
9974{
9975 struct bnx2x *bp = netdev_priv(dev);
34f80b04 9976 int rc = 0;
a2fbb9ea
ET
9977
9978 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
9979 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
9980 return -EINVAL;
9981
9982 /* This does not race with packet allocation
c14423fe 9983 * because the actual alloc size is
a2fbb9ea
ET
9984 * only updated as part of load
9985 */
9986 dev->mtu = new_mtu;
9987
9988 if (netif_running(dev)) {
34f80b04
EG
9989 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9990 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
a2fbb9ea 9991 }
34f80b04
EG
9992
9993 return rc;
a2fbb9ea
ET
9994}
9995
9996static void bnx2x_tx_timeout(struct net_device *dev)
9997{
9998 struct bnx2x *bp = netdev_priv(dev);
9999
10000#ifdef BNX2X_STOP_ON_ERROR
10001 if (!bp->panic)
10002 bnx2x_panic();
10003#endif
10004 /* This allows the netif to be shutdown gracefully before resetting */
10005 schedule_work(&bp->reset_task);
10006}
10007
10008#ifdef BCM_VLAN
34f80b04 10009/* called with rtnl_lock */
a2fbb9ea
ET
10010static void bnx2x_vlan_rx_register(struct net_device *dev,
10011 struct vlan_group *vlgrp)
10012{
10013 struct bnx2x *bp = netdev_priv(dev);
10014
10015 bp->vlgrp = vlgrp;
10016 if (netif_running(dev))
49d66772 10017 bnx2x_set_client_config(bp);
a2fbb9ea 10018}
34f80b04 10019
a2fbb9ea
ET
10020#endif
10021
10022#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
10023static void poll_bnx2x(struct net_device *dev)
10024{
10025 struct bnx2x *bp = netdev_priv(dev);
10026
10027 disable_irq(bp->pdev->irq);
10028 bnx2x_interrupt(bp->pdev->irq, dev);
10029 enable_irq(bp->pdev->irq);
10030}
10031#endif
10032
c64213cd
SH
10033static const struct net_device_ops bnx2x_netdev_ops = {
10034 .ndo_open = bnx2x_open,
10035 .ndo_stop = bnx2x_close,
10036 .ndo_start_xmit = bnx2x_start_xmit,
10037 .ndo_set_multicast_list = bnx2x_set_rx_mode,
10038 .ndo_set_mac_address = bnx2x_change_mac_addr,
10039 .ndo_validate_addr = eth_validate_addr,
10040 .ndo_do_ioctl = bnx2x_ioctl,
10041 .ndo_change_mtu = bnx2x_change_mtu,
10042 .ndo_tx_timeout = bnx2x_tx_timeout,
10043#ifdef BCM_VLAN
10044 .ndo_vlan_rx_register = bnx2x_vlan_rx_register,
10045#endif
10046#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
10047 .ndo_poll_controller = poll_bnx2x,
10048#endif
10049};
10050
10051
34f80b04
EG
10052static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
10053 struct net_device *dev)
a2fbb9ea
ET
10054{
10055 struct bnx2x *bp;
10056 int rc;
10057
10058 SET_NETDEV_DEV(dev, &pdev->dev);
10059 bp = netdev_priv(dev);
10060
34f80b04
EG
10061 bp->dev = dev;
10062 bp->pdev = pdev;
a2fbb9ea 10063 bp->flags = 0;
34f80b04 10064 bp->func = PCI_FUNC(pdev->devfn);
a2fbb9ea
ET
10065
10066 rc = pci_enable_device(pdev);
10067 if (rc) {
10068 printk(KERN_ERR PFX "Cannot enable PCI device, aborting\n");
10069 goto err_out;
10070 }
10071
10072 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
10073 printk(KERN_ERR PFX "Cannot find PCI device base address,"
10074 " aborting\n");
10075 rc = -ENODEV;
10076 goto err_out_disable;
10077 }
10078
10079 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
10080 printk(KERN_ERR PFX "Cannot find second PCI device"
10081 " base address, aborting\n");
10082 rc = -ENODEV;
10083 goto err_out_disable;
10084 }
10085
34f80b04
EG
10086 if (atomic_read(&pdev->enable_cnt) == 1) {
10087 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
10088 if (rc) {
10089 printk(KERN_ERR PFX "Cannot obtain PCI resources,"
10090 " aborting\n");
10091 goto err_out_disable;
10092 }
a2fbb9ea 10093
34f80b04
EG
10094 pci_set_master(pdev);
10095 pci_save_state(pdev);
10096 }
a2fbb9ea
ET
10097
10098 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
10099 if (bp->pm_cap == 0) {
10100 printk(KERN_ERR PFX "Cannot find power management"
10101 " capability, aborting\n");
10102 rc = -EIO;
10103 goto err_out_release;
10104 }
10105
10106 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
10107 if (bp->pcie_cap == 0) {
10108 printk(KERN_ERR PFX "Cannot find PCI Express capability,"
10109 " aborting\n");
10110 rc = -EIO;
10111 goto err_out_release;
10112 }
10113
10114 if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0) {
10115 bp->flags |= USING_DAC_FLAG;
10116 if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) != 0) {
10117 printk(KERN_ERR PFX "pci_set_consistent_dma_mask"
10118 " failed, aborting\n");
10119 rc = -EIO;
10120 goto err_out_release;
10121 }
10122
10123 } else if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0) {
10124 printk(KERN_ERR PFX "System does not support DMA,"
10125 " aborting\n");
10126 rc = -EIO;
10127 goto err_out_release;
10128 }
10129
34f80b04
EG
10130 dev->mem_start = pci_resource_start(pdev, 0);
10131 dev->base_addr = dev->mem_start;
10132 dev->mem_end = pci_resource_end(pdev, 0);
a2fbb9ea
ET
10133
10134 dev->irq = pdev->irq;
10135
275f165f 10136 bp->regview = pci_ioremap_bar(pdev, 0);
a2fbb9ea
ET
10137 if (!bp->regview) {
10138 printk(KERN_ERR PFX "Cannot map register space, aborting\n");
10139 rc = -ENOMEM;
10140 goto err_out_release;
10141 }
10142
34f80b04
EG
10143 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
10144 min_t(u64, BNX2X_DB_SIZE,
10145 pci_resource_len(pdev, 2)));
a2fbb9ea
ET
10146 if (!bp->doorbells) {
10147 printk(KERN_ERR PFX "Cannot map doorbell space, aborting\n");
10148 rc = -ENOMEM;
10149 goto err_out_unmap;
10150 }
10151
10152 bnx2x_set_power_state(bp, PCI_D0);
10153
34f80b04
EG
10154 /* clean indirect addresses */
10155 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
10156 PCICFG_VENDOR_ID_OFFSET);
10157 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
10158 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
10159 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
10160 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
a2fbb9ea 10161
34f80b04 10162 dev->watchdog_timeo = TX_TIMEOUT;
a2fbb9ea 10163
c64213cd 10164 dev->netdev_ops = &bnx2x_netdev_ops;
34f80b04 10165 dev->ethtool_ops = &bnx2x_ethtool_ops;
34f80b04
EG
10166 dev->features |= NETIF_F_SG;
10167 dev->features |= NETIF_F_HW_CSUM;
10168 if (bp->flags & USING_DAC_FLAG)
10169 dev->features |= NETIF_F_HIGHDMA;
10170#ifdef BCM_VLAN
10171 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
10172#endif
10173 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb 10174 dev->features |= NETIF_F_TSO6;
a2fbb9ea
ET
10175
10176 return 0;
10177
10178err_out_unmap:
10179 if (bp->regview) {
10180 iounmap(bp->regview);
10181 bp->regview = NULL;
10182 }
a2fbb9ea
ET
10183 if (bp->doorbells) {
10184 iounmap(bp->doorbells);
10185 bp->doorbells = NULL;
10186 }
10187
10188err_out_release:
34f80b04
EG
10189 if (atomic_read(&pdev->enable_cnt) == 1)
10190 pci_release_regions(pdev);
a2fbb9ea
ET
10191
10192err_out_disable:
10193 pci_disable_device(pdev);
10194 pci_set_drvdata(pdev, NULL);
10195
10196err_out:
10197 return rc;
10198}
10199
25047950
ET
10200static int __devinit bnx2x_get_pcie_width(struct bnx2x *bp)
10201{
10202 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
10203
10204 val = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
10205 return val;
10206}
10207
10208/* return value of 1=2.5GHz 2=5GHz */
10209static int __devinit bnx2x_get_pcie_speed(struct bnx2x *bp)
10210{
10211 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
10212
10213 val = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
10214 return val;
10215}
10216
a2fbb9ea
ET
10217static int __devinit bnx2x_init_one(struct pci_dev *pdev,
10218 const struct pci_device_id *ent)
10219{
10220 static int version_printed;
10221 struct net_device *dev = NULL;
10222 struct bnx2x *bp;
25047950 10223 int rc;
a2fbb9ea
ET
10224
10225 if (version_printed++ == 0)
10226 printk(KERN_INFO "%s", version);
10227
10228 /* dev zeroed in init_etherdev */
10229 dev = alloc_etherdev(sizeof(*bp));
34f80b04
EG
10230 if (!dev) {
10231 printk(KERN_ERR PFX "Cannot allocate net device\n");
a2fbb9ea 10232 return -ENOMEM;
34f80b04 10233 }
a2fbb9ea 10234
a2fbb9ea
ET
10235 bp = netdev_priv(dev);
10236 bp->msglevel = debug;
10237
34f80b04 10238 rc = bnx2x_init_dev(pdev, dev);
a2fbb9ea
ET
10239 if (rc < 0) {
10240 free_netdev(dev);
10241 return rc;
10242 }
10243
a2fbb9ea
ET
10244 rc = register_netdev(dev);
10245 if (rc) {
c14423fe 10246 dev_err(&pdev->dev, "Cannot register net device\n");
34f80b04 10247 goto init_one_exit;
a2fbb9ea
ET
10248 }
10249
10250 pci_set_drvdata(pdev, dev);
10251
34f80b04
EG
10252 rc = bnx2x_init_bp(bp);
10253 if (rc) {
10254 unregister_netdev(dev);
10255 goto init_one_exit;
10256 }
10257
12b56ea8
EG
10258 netif_carrier_off(dev);
10259
34f80b04 10260 bp->common.name = board_info[ent->driver_data].name;
25047950 10261 printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
34f80b04
EG
10262 " IRQ %d, ", dev->name, bp->common.name,
10263 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
25047950
ET
10264 bnx2x_get_pcie_width(bp),
10265 (bnx2x_get_pcie_speed(bp) == 2) ? "5GHz (Gen2)" : "2.5GHz",
10266 dev->base_addr, bp->pdev->irq);
e174961c 10267 printk(KERN_CONT "node addr %pM\n", dev->dev_addr);
a2fbb9ea 10268 return 0;
34f80b04
EG
10269
10270init_one_exit:
10271 if (bp->regview)
10272 iounmap(bp->regview);
10273
10274 if (bp->doorbells)
10275 iounmap(bp->doorbells);
10276
10277 free_netdev(dev);
10278
10279 if (atomic_read(&pdev->enable_cnt) == 1)
10280 pci_release_regions(pdev);
10281
10282 pci_disable_device(pdev);
10283 pci_set_drvdata(pdev, NULL);
10284
10285 return rc;
a2fbb9ea
ET
10286}
10287
10288static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
10289{
10290 struct net_device *dev = pci_get_drvdata(pdev);
228241eb
ET
10291 struct bnx2x *bp;
10292
10293 if (!dev) {
228241eb
ET
10294 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10295 return;
10296 }
228241eb 10297 bp = netdev_priv(dev);
a2fbb9ea 10298
a2fbb9ea
ET
10299 unregister_netdev(dev);
10300
10301 if (bp->regview)
10302 iounmap(bp->regview);
10303
10304 if (bp->doorbells)
10305 iounmap(bp->doorbells);
10306
10307 free_netdev(dev);
34f80b04
EG
10308
10309 if (atomic_read(&pdev->enable_cnt) == 1)
10310 pci_release_regions(pdev);
10311
a2fbb9ea
ET
10312 pci_disable_device(pdev);
10313 pci_set_drvdata(pdev, NULL);
10314}
10315
10316static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
10317{
10318 struct net_device *dev = pci_get_drvdata(pdev);
228241eb
ET
10319 struct bnx2x *bp;
10320
34f80b04
EG
10321 if (!dev) {
10322 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10323 return -ENODEV;
10324 }
10325 bp = netdev_priv(dev);
a2fbb9ea 10326
34f80b04 10327 rtnl_lock();
a2fbb9ea 10328
34f80b04 10329 pci_save_state(pdev);
228241eb 10330
34f80b04
EG
10331 if (!netif_running(dev)) {
10332 rtnl_unlock();
10333 return 0;
10334 }
a2fbb9ea
ET
10335
10336 netif_device_detach(dev);
a2fbb9ea 10337
da5a662a 10338 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
34f80b04 10339
a2fbb9ea 10340 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
228241eb 10341
34f80b04
EG
10342 rtnl_unlock();
10343
a2fbb9ea
ET
10344 return 0;
10345}
10346
10347static int bnx2x_resume(struct pci_dev *pdev)
10348{
10349 struct net_device *dev = pci_get_drvdata(pdev);
228241eb 10350 struct bnx2x *bp;
a2fbb9ea
ET
10351 int rc;
10352
228241eb
ET
10353 if (!dev) {
10354 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10355 return -ENODEV;
10356 }
228241eb 10357 bp = netdev_priv(dev);
a2fbb9ea 10358
34f80b04
EG
10359 rtnl_lock();
10360
228241eb 10361 pci_restore_state(pdev);
34f80b04
EG
10362
10363 if (!netif_running(dev)) {
10364 rtnl_unlock();
10365 return 0;
10366 }
10367
a2fbb9ea
ET
10368 bnx2x_set_power_state(bp, PCI_D0);
10369 netif_device_attach(dev);
10370
da5a662a 10371 rc = bnx2x_nic_load(bp, LOAD_OPEN);
a2fbb9ea 10372
34f80b04
EG
10373 rtnl_unlock();
10374
10375 return rc;
a2fbb9ea
ET
10376}
10377
f8ef6e44
YG
10378static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
10379{
10380 int i;
10381
10382 bp->state = BNX2X_STATE_ERROR;
10383
10384 bp->rx_mode = BNX2X_RX_MODE_NONE;
10385
10386 bnx2x_netif_stop(bp, 0);
10387
10388 del_timer_sync(&bp->timer);
10389 bp->stats_state = STATS_STATE_DISABLED;
10390 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
10391
10392 /* Release IRQs */
10393 bnx2x_free_irq(bp);
10394
10395 if (CHIP_IS_E1(bp)) {
10396 struct mac_configuration_cmd *config =
10397 bnx2x_sp(bp, mcast_config);
10398
10399 for (i = 0; i < config->hdr.length_6b; i++)
10400 CAM_INVALIDATE(config->config_table[i]);
10401 }
10402
10403 /* Free SKBs, SGEs, TPA pool and driver internals */
10404 bnx2x_free_skbs(bp);
10405 for_each_queue(bp, i)
10406 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
10407 bnx2x_free_mem(bp);
10408
10409 bp->state = BNX2X_STATE_CLOSED;
10410
10411 netif_carrier_off(bp->dev);
10412
10413 return 0;
10414}
10415
10416static void bnx2x_eeh_recover(struct bnx2x *bp)
10417{
10418 u32 val;
10419
10420 mutex_init(&bp->port.phy_mutex);
10421
10422 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
10423 bp->link_params.shmem_base = bp->common.shmem_base;
10424 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
10425
10426 if (!bp->common.shmem_base ||
10427 (bp->common.shmem_base < 0xA0000) ||
10428 (bp->common.shmem_base >= 0xC0000)) {
10429 BNX2X_DEV_INFO("MCP not active\n");
10430 bp->flags |= NO_MCP_FLAG;
10431 return;
10432 }
10433
10434 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
10435 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
10436 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
10437 BNX2X_ERR("BAD MCP validity signature\n");
10438
10439 if (!BP_NOMCP(bp)) {
10440 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
10441 & DRV_MSG_SEQ_NUMBER_MASK);
10442 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
10443 }
10444}
10445
493adb1f
WX
10446/**
10447 * bnx2x_io_error_detected - called when PCI error is detected
10448 * @pdev: Pointer to PCI device
10449 * @state: The current pci connection state
10450 *
10451 * This function is called after a PCI bus error affecting
10452 * this device has been detected.
10453 */
10454static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
10455 pci_channel_state_t state)
10456{
10457 struct net_device *dev = pci_get_drvdata(pdev);
10458 struct bnx2x *bp = netdev_priv(dev);
10459
10460 rtnl_lock();
10461
10462 netif_device_detach(dev);
10463
10464 if (netif_running(dev))
f8ef6e44 10465 bnx2x_eeh_nic_unload(bp);
493adb1f
WX
10466
10467 pci_disable_device(pdev);
10468
10469 rtnl_unlock();
10470
10471 /* Request a slot reset */
10472 return PCI_ERS_RESULT_NEED_RESET;
10473}
10474
10475/**
10476 * bnx2x_io_slot_reset - called after the PCI bus has been reset
10477 * @pdev: Pointer to PCI device
10478 *
10479 * Restart the card from scratch, as if from a cold-boot.
10480 */
10481static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
10482{
10483 struct net_device *dev = pci_get_drvdata(pdev);
10484 struct bnx2x *bp = netdev_priv(dev);
10485
10486 rtnl_lock();
10487
10488 if (pci_enable_device(pdev)) {
10489 dev_err(&pdev->dev,
10490 "Cannot re-enable PCI device after reset\n");
10491 rtnl_unlock();
10492 return PCI_ERS_RESULT_DISCONNECT;
10493 }
10494
10495 pci_set_master(pdev);
10496 pci_restore_state(pdev);
10497
10498 if (netif_running(dev))
10499 bnx2x_set_power_state(bp, PCI_D0);
10500
10501 rtnl_unlock();
10502
10503 return PCI_ERS_RESULT_RECOVERED;
10504}
10505
10506/**
10507 * bnx2x_io_resume - called when traffic can start flowing again
10508 * @pdev: Pointer to PCI device
10509 *
10510 * This callback is called when the error recovery driver tells us that
10511 * its OK to resume normal operation.
10512 */
10513static void bnx2x_io_resume(struct pci_dev *pdev)
10514{
10515 struct net_device *dev = pci_get_drvdata(pdev);
10516 struct bnx2x *bp = netdev_priv(dev);
10517
10518 rtnl_lock();
10519
f8ef6e44
YG
10520 bnx2x_eeh_recover(bp);
10521
493adb1f 10522 if (netif_running(dev))
f8ef6e44 10523 bnx2x_nic_load(bp, LOAD_NORMAL);
493adb1f
WX
10524
10525 netif_device_attach(dev);
10526
10527 rtnl_unlock();
10528}
10529
10530static struct pci_error_handlers bnx2x_err_handler = {
10531 .error_detected = bnx2x_io_error_detected,
10532 .slot_reset = bnx2x_io_slot_reset,
10533 .resume = bnx2x_io_resume,
10534};
10535
a2fbb9ea 10536static struct pci_driver bnx2x_pci_driver = {
493adb1f
WX
10537 .name = DRV_MODULE_NAME,
10538 .id_table = bnx2x_pci_tbl,
10539 .probe = bnx2x_init_one,
10540 .remove = __devexit_p(bnx2x_remove_one),
10541 .suspend = bnx2x_suspend,
10542 .resume = bnx2x_resume,
10543 .err_handler = &bnx2x_err_handler,
a2fbb9ea
ET
10544};
10545
10546static int __init bnx2x_init(void)
10547{
1cf167f2
EG
10548 bnx2x_wq = create_singlethread_workqueue("bnx2x");
10549 if (bnx2x_wq == NULL) {
10550 printk(KERN_ERR PFX "Cannot create workqueue\n");
10551 return -ENOMEM;
10552 }
10553
a2fbb9ea
ET
10554 return pci_register_driver(&bnx2x_pci_driver);
10555}
10556
10557static void __exit bnx2x_cleanup(void)
10558{
10559 pci_unregister_driver(&bnx2x_pci_driver);
1cf167f2
EG
10560
10561 destroy_workqueue(bnx2x_wq);
a2fbb9ea
ET
10562}
10563
10564module_init(bnx2x_init);
10565module_exit(bnx2x_cleanup);
10566
This page took 0.712939 seconds and 5 git commands to generate.