bnx2x: Supporting BCM8726 PHY
[deliverable/linux.git] / drivers / net / bnx2x_main.c
CommitLineData
34f80b04 1/* bnx2x_main.c: Broadcom Everest network driver.
a2fbb9ea 2 *
d05c26ce 3 * Copyright (c) 2007-2009 Broadcom Corporation
a2fbb9ea
ET
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
24e3fcef
EG
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
a2fbb9ea
ET
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath rework by Vladislav Zolotarov
c14423fe 14 * Statistics and Link management by Yitchak Gertner
a2fbb9ea
ET
15 *
16 */
17
a2fbb9ea
ET
18#include <linux/module.h>
19#include <linux/moduleparam.h>
20#include <linux/kernel.h>
21#include <linux/device.h> /* for dev_info() */
22#include <linux/timer.h>
23#include <linux/errno.h>
24#include <linux/ioport.h>
25#include <linux/slab.h>
26#include <linux/vmalloc.h>
27#include <linux/interrupt.h>
28#include <linux/pci.h>
29#include <linux/init.h>
30#include <linux/netdevice.h>
31#include <linux/etherdevice.h>
32#include <linux/skbuff.h>
33#include <linux/dma-mapping.h>
34#include <linux/bitops.h>
35#include <linux/irq.h>
36#include <linux/delay.h>
37#include <asm/byteorder.h>
38#include <linux/time.h>
39#include <linux/ethtool.h>
40#include <linux/mii.h>
0c6671b0 41#include <linux/if_vlan.h>
a2fbb9ea
ET
42#include <net/ip.h>
43#include <net/tcp.h>
44#include <net/checksum.h>
34f80b04 45#include <net/ip6_checksum.h>
a2fbb9ea
ET
46#include <linux/workqueue.h>
47#include <linux/crc32.h>
34f80b04 48#include <linux/crc32c.h>
a2fbb9ea
ET
49#include <linux/prefetch.h>
50#include <linux/zlib.h>
a2fbb9ea
ET
51#include <linux/io.h>
52
53#include "bnx2x_reg.h"
54#include "bnx2x_fw_defs.h"
55#include "bnx2x_hsi.h"
c18487ee 56#include "bnx2x_link.h"
a2fbb9ea
ET
57#include "bnx2x.h"
58#include "bnx2x_init.h"
59
e8b5fc51
VZ
60#define DRV_MODULE_VERSION "1.45.26"
61#define DRV_MODULE_RELDATE "2009/01/26"
34f80b04 62#define BNX2X_BC_VER 0x040200
a2fbb9ea 63
34f80b04
EG
64/* Time in jiffies before concluding the transmitter is hung */
65#define TX_TIMEOUT (5*HZ)
a2fbb9ea 66
53a10565 67static char version[] __devinitdata =
34f80b04 68 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
a2fbb9ea
ET
69 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
70
24e3fcef 71MODULE_AUTHOR("Eliezer Tamir");
e47d7e6e 72MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
a2fbb9ea
ET
73MODULE_LICENSE("GPL");
74MODULE_VERSION(DRV_MODULE_VERSION);
a2fbb9ea 75
555f6c78
EG
76static int multi_mode = 1;
77module_param(multi_mode, int, 0);
78
19680c48 79static int disable_tpa;
a2fbb9ea 80static int poll;
a2fbb9ea 81static int debug;
34f80b04 82static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
a2fbb9ea 83
19680c48 84module_param(disable_tpa, int, 0);
8badd27a
EG
85
86static int int_mode;
87module_param(int_mode, int, 0);
88MODULE_PARM_DESC(int_mode, " Force interrupt mode (1 INT#x; 2 MSI)");
89
a2fbb9ea 90module_param(poll, int, 0);
a2fbb9ea 91module_param(debug, int, 0);
19680c48 92MODULE_PARM_DESC(disable_tpa, "disable the TPA (LRO) feature");
a2fbb9ea 93MODULE_PARM_DESC(poll, "use polling (for debug)");
c14423fe 94MODULE_PARM_DESC(debug, "default debug msglevel");
a2fbb9ea 95
1cf167f2 96static struct workqueue_struct *bnx2x_wq;
a2fbb9ea
ET
97
98enum bnx2x_board_type {
99 BCM57710 = 0,
34f80b04
EG
100 BCM57711 = 1,
101 BCM57711E = 2,
a2fbb9ea
ET
102};
103
34f80b04 104/* indexed by board_type, above */
53a10565 105static struct {
a2fbb9ea
ET
106 char *name;
107} board_info[] __devinitdata = {
34f80b04
EG
108 { "Broadcom NetXtreme II BCM57710 XGb" },
109 { "Broadcom NetXtreme II BCM57711 XGb" },
110 { "Broadcom NetXtreme II BCM57711E XGb" }
a2fbb9ea
ET
111};
112
34f80b04 113
a2fbb9ea
ET
114static const struct pci_device_id bnx2x_pci_tbl[] = {
115 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57710,
116 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57710 },
34f80b04
EG
117 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711,
118 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711 },
119 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711E,
120 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711E },
a2fbb9ea
ET
121 { 0 }
122};
123
124MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
125
126/****************************************************************************
127* General service functions
128****************************************************************************/
129
130/* used only at init
131 * locking is done by mcp
132 */
133static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
134{
135 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
136 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
137 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
138 PCICFG_VENDOR_ID_OFFSET);
139}
140
a2fbb9ea
ET
141static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
142{
143 u32 val;
144
145 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
146 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
147 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
148 PCICFG_VENDOR_ID_OFFSET);
149
150 return val;
151}
a2fbb9ea
ET
152
153static const u32 dmae_reg_go_c[] = {
154 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
155 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
156 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
157 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
158};
159
160/* copy command into DMAE command memory and set DMAE command go */
161static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
162 int idx)
163{
164 u32 cmd_offset;
165 int i;
166
167 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
168 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
169 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
170
ad8d3948
EG
171 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
172 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
a2fbb9ea
ET
173 }
174 REG_WR(bp, dmae_reg_go_c[idx], 1);
175}
176
ad8d3948
EG
177void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
178 u32 len32)
a2fbb9ea 179{
ad8d3948 180 struct dmae_command *dmae = &bp->init_dmae;
a2fbb9ea 181 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
ad8d3948
EG
182 int cnt = 200;
183
184 if (!bp->dmae_ready) {
185 u32 *data = bnx2x_sp(bp, wb_data[0]);
186
187 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
188 " using indirect\n", dst_addr, len32);
189 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
190 return;
191 }
192
193 mutex_lock(&bp->dmae_mutex);
a2fbb9ea
ET
194
195 memset(dmae, 0, sizeof(struct dmae_command));
196
197 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
198 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
199 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
200#ifdef __BIG_ENDIAN
201 DMAE_CMD_ENDIANITY_B_DW_SWAP |
202#else
203 DMAE_CMD_ENDIANITY_DW_SWAP |
204#endif
34f80b04
EG
205 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
206 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea
ET
207 dmae->src_addr_lo = U64_LO(dma_addr);
208 dmae->src_addr_hi = U64_HI(dma_addr);
209 dmae->dst_addr_lo = dst_addr >> 2;
210 dmae->dst_addr_hi = 0;
211 dmae->len = len32;
212 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
213 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
ad8d3948 214 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 215
ad8d3948 216 DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
a2fbb9ea
ET
217 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
218 "dst_addr [%x:%08x (%08x)]\n"
219 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
220 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
221 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, dst_addr,
222 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
ad8d3948 223 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
a2fbb9ea
ET
224 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
225 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
a2fbb9ea
ET
226
227 *wb_comp = 0;
228
34f80b04 229 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
a2fbb9ea
ET
230
231 udelay(5);
ad8d3948
EG
232
233 while (*wb_comp != DMAE_COMP_VAL) {
234 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
235
ad8d3948 236 if (!cnt) {
a2fbb9ea
ET
237 BNX2X_ERR("dmae timeout!\n");
238 break;
239 }
ad8d3948 240 cnt--;
12469401
YG
241 /* adjust delay for emulation/FPGA */
242 if (CHIP_REV_IS_SLOW(bp))
243 msleep(100);
244 else
245 udelay(5);
a2fbb9ea 246 }
ad8d3948
EG
247
248 mutex_unlock(&bp->dmae_mutex);
a2fbb9ea
ET
249}
250
c18487ee 251void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
a2fbb9ea 252{
ad8d3948 253 struct dmae_command *dmae = &bp->init_dmae;
a2fbb9ea 254 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
ad8d3948
EG
255 int cnt = 200;
256
257 if (!bp->dmae_ready) {
258 u32 *data = bnx2x_sp(bp, wb_data[0]);
259 int i;
260
261 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
262 " using indirect\n", src_addr, len32);
263 for (i = 0; i < len32; i++)
264 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
265 return;
266 }
267
268 mutex_lock(&bp->dmae_mutex);
a2fbb9ea
ET
269
270 memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
271 memset(dmae, 0, sizeof(struct dmae_command));
272
273 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
274 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
275 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
276#ifdef __BIG_ENDIAN
277 DMAE_CMD_ENDIANITY_B_DW_SWAP |
278#else
279 DMAE_CMD_ENDIANITY_DW_SWAP |
280#endif
34f80b04
EG
281 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
282 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea
ET
283 dmae->src_addr_lo = src_addr >> 2;
284 dmae->src_addr_hi = 0;
285 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
286 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
287 dmae->len = len32;
288 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
289 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
ad8d3948 290 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 291
ad8d3948 292 DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
a2fbb9ea
ET
293 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
294 "dst_addr [%x:%08x (%08x)]\n"
295 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
296 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
297 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, src_addr,
298 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
a2fbb9ea
ET
299
300 *wb_comp = 0;
301
34f80b04 302 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
a2fbb9ea
ET
303
304 udelay(5);
ad8d3948
EG
305
306 while (*wb_comp != DMAE_COMP_VAL) {
307
ad8d3948 308 if (!cnt) {
a2fbb9ea
ET
309 BNX2X_ERR("dmae timeout!\n");
310 break;
311 }
ad8d3948 312 cnt--;
12469401
YG
313 /* adjust delay for emulation/FPGA */
314 if (CHIP_REV_IS_SLOW(bp))
315 msleep(100);
316 else
317 udelay(5);
a2fbb9ea 318 }
ad8d3948 319 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
a2fbb9ea
ET
320 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
321 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
ad8d3948
EG
322
323 mutex_unlock(&bp->dmae_mutex);
324}
325
326/* used only for slowpath so not inlined */
327static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
328{
329 u32 wb_write[2];
330
331 wb_write[0] = val_hi;
332 wb_write[1] = val_lo;
333 REG_WR_DMAE(bp, reg, wb_write, 2);
a2fbb9ea 334}
a2fbb9ea 335
ad8d3948
EG
336#ifdef USE_WB_RD
337static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
338{
339 u32 wb_data[2];
340
341 REG_RD_DMAE(bp, reg, wb_data, 2);
342
343 return HILO_U64(wb_data[0], wb_data[1]);
344}
345#endif
346
a2fbb9ea
ET
347static int bnx2x_mc_assert(struct bnx2x *bp)
348{
a2fbb9ea 349 char last_idx;
34f80b04
EG
350 int i, rc = 0;
351 u32 row0, row1, row2, row3;
352
353 /* XSTORM */
354 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
355 XSTORM_ASSERT_LIST_INDEX_OFFSET);
356 if (last_idx)
357 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
358
359 /* print the asserts */
360 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
361
362 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
363 XSTORM_ASSERT_LIST_OFFSET(i));
364 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
365 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
366 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
367 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
368 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
369 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
370
371 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
372 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
373 " 0x%08x 0x%08x 0x%08x\n",
374 i, row3, row2, row1, row0);
375 rc++;
376 } else {
377 break;
378 }
379 }
380
381 /* TSTORM */
382 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
383 TSTORM_ASSERT_LIST_INDEX_OFFSET);
384 if (last_idx)
385 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
386
387 /* print the asserts */
388 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
389
390 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
391 TSTORM_ASSERT_LIST_OFFSET(i));
392 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
393 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
394 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
395 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
396 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
397 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
398
399 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
400 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
401 " 0x%08x 0x%08x 0x%08x\n",
402 i, row3, row2, row1, row0);
403 rc++;
404 } else {
405 break;
406 }
407 }
408
409 /* CSTORM */
410 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
411 CSTORM_ASSERT_LIST_INDEX_OFFSET);
412 if (last_idx)
413 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
414
415 /* print the asserts */
416 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
417
418 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
419 CSTORM_ASSERT_LIST_OFFSET(i));
420 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
421 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
422 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
423 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
424 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
425 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
426
427 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
428 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
429 " 0x%08x 0x%08x 0x%08x\n",
430 i, row3, row2, row1, row0);
431 rc++;
432 } else {
433 break;
434 }
435 }
436
437 /* USTORM */
438 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
439 USTORM_ASSERT_LIST_INDEX_OFFSET);
440 if (last_idx)
441 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
442
443 /* print the asserts */
444 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
445
446 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
447 USTORM_ASSERT_LIST_OFFSET(i));
448 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
449 USTORM_ASSERT_LIST_OFFSET(i) + 4);
450 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
451 USTORM_ASSERT_LIST_OFFSET(i) + 8);
452 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
453 USTORM_ASSERT_LIST_OFFSET(i) + 12);
454
455 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
456 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
457 " 0x%08x 0x%08x 0x%08x\n",
458 i, row3, row2, row1, row0);
459 rc++;
460 } else {
461 break;
a2fbb9ea
ET
462 }
463 }
34f80b04 464
a2fbb9ea
ET
465 return rc;
466}
c14423fe 467
a2fbb9ea
ET
468static void bnx2x_fw_dump(struct bnx2x *bp)
469{
470 u32 mark, offset;
471 u32 data[9];
472 int word;
473
474 mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
49d66772
ET
475 mark = ((mark + 0x3) & ~0x3);
476 printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n" KERN_ERR, mark);
a2fbb9ea
ET
477
478 for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
479 for (word = 0; word < 8; word++)
480 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
481 offset + 4*word));
482 data[8] = 0x0;
49d66772 483 printk(KERN_CONT "%s", (char *)data);
a2fbb9ea
ET
484 }
485 for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
486 for (word = 0; word < 8; word++)
487 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
488 offset + 4*word));
489 data[8] = 0x0;
49d66772 490 printk(KERN_CONT "%s", (char *)data);
a2fbb9ea
ET
491 }
492 printk("\n" KERN_ERR PFX "end of fw dump\n");
493}
494
495static void bnx2x_panic_dump(struct bnx2x *bp)
496{
497 int i;
498 u16 j, start, end;
499
66e855f3
YG
500 bp->stats_state = STATS_STATE_DISABLED;
501 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
502
a2fbb9ea
ET
503 BNX2X_ERR("begin crash dump -----------------\n");
504
505 for_each_queue(bp, i) {
506 struct bnx2x_fastpath *fp = &bp->fp[i];
507 struct eth_tx_db_data *hw_prods = fp->hw_tx_prods;
508
509 BNX2X_ERR("queue[%d]: tx_pkt_prod(%x) tx_pkt_cons(%x)"
34f80b04 510 " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)\n",
a2fbb9ea 511 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
34f80b04 512 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
66e855f3
YG
513 BNX2X_ERR(" rx_bd_prod(%x) rx_bd_cons(%x)"
514 " *rx_bd_cons_sb(%x) rx_comp_prod(%x)"
515 " rx_comp_cons(%x) *rx_cons_sb(%x)\n",
516 fp->rx_bd_prod, fp->rx_bd_cons,
517 le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
518 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
519 BNX2X_ERR(" rx_sge_prod(%x) last_max_sge(%x)"
520 " fp_c_idx(%x) *sb_c_idx(%x) fp_u_idx(%x)"
521 " *sb_u_idx(%x) bd data(%x,%x)\n",
522 fp->rx_sge_prod, fp->last_max_sge, fp->fp_c_idx,
523 fp->status_blk->c_status_block.status_block_index,
524 fp->fp_u_idx,
525 fp->status_blk->u_status_block.status_block_index,
526 hw_prods->packets_prod, hw_prods->bds_prod);
a2fbb9ea
ET
527
528 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
529 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
530 for (j = start; j < end; j++) {
531 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
532
533 BNX2X_ERR("packet[%x]=[%p,%x]\n", j,
534 sw_bd->skb, sw_bd->first_bd);
535 }
536
537 start = TX_BD(fp->tx_bd_cons - 10);
538 end = TX_BD(fp->tx_bd_cons + 254);
539 for (j = start; j < end; j++) {
540 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
541
542 BNX2X_ERR("tx_bd[%x]=[%x:%x:%x:%x]\n",
543 j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
544 }
545
546 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
547 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
548 for (j = start; j < end; j++) {
549 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
550 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
551
552 BNX2X_ERR("rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
34f80b04 553 j, rx_bd[1], rx_bd[0], sw_bd->skb);
a2fbb9ea
ET
554 }
555
3196a88a
EG
556 start = RX_SGE(fp->rx_sge_prod);
557 end = RX_SGE(fp->last_max_sge);
7a9b2557
VZ
558 for (j = start; j < end; j++) {
559 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
560 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
561
562 BNX2X_ERR("rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
563 j, rx_sge[1], rx_sge[0], sw_page->page);
564 }
565
a2fbb9ea
ET
566 start = RCQ_BD(fp->rx_comp_cons - 10);
567 end = RCQ_BD(fp->rx_comp_cons + 503);
568 for (j = start; j < end; j++) {
569 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
570
571 BNX2X_ERR("cqe[%x]=[%x:%x:%x:%x]\n",
572 j, cqe[0], cqe[1], cqe[2], cqe[3]);
573 }
574 }
575
49d66772
ET
576 BNX2X_ERR("def_c_idx(%u) def_u_idx(%u) def_x_idx(%u)"
577 " def_t_idx(%u) def_att_idx(%u) attn_state(%u)"
a2fbb9ea 578 " spq_prod_idx(%u)\n",
49d66772 579 bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
a2fbb9ea
ET
580 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
581
34f80b04 582 bnx2x_fw_dump(bp);
a2fbb9ea
ET
583 bnx2x_mc_assert(bp);
584 BNX2X_ERR("end crash dump -----------------\n");
a2fbb9ea
ET
585}
586
615f8fd9 587static void bnx2x_int_enable(struct bnx2x *bp)
a2fbb9ea 588{
34f80b04 589 int port = BP_PORT(bp);
a2fbb9ea
ET
590 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
591 u32 val = REG_RD(bp, addr);
592 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
8badd27a 593 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
a2fbb9ea
ET
594
595 if (msix) {
8badd27a
EG
596 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
597 HC_CONFIG_0_REG_INT_LINE_EN_0);
a2fbb9ea
ET
598 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
599 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
8badd27a
EG
600 } else if (msi) {
601 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
602 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
603 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
604 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
a2fbb9ea
ET
605 } else {
606 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
615f8fd9 607 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
a2fbb9ea
ET
608 HC_CONFIG_0_REG_INT_LINE_EN_0 |
609 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
615f8fd9 610
8badd27a
EG
611 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
612 val, port, addr);
615f8fd9
ET
613
614 REG_WR(bp, addr, val);
615
a2fbb9ea
ET
616 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
617 }
618
8badd27a
EG
619 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n",
620 val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
a2fbb9ea
ET
621
622 REG_WR(bp, addr, val);
34f80b04
EG
623
624 if (CHIP_IS_E1H(bp)) {
625 /* init leading/trailing edge */
626 if (IS_E1HMF(bp)) {
8badd27a 627 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
34f80b04 628 if (bp->port.pmf)
4acac6a5
EG
629 /* enable nig and gpio3 attention */
630 val |= 0x1100;
34f80b04
EG
631 } else
632 val = 0xffff;
633
634 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
635 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
636 }
a2fbb9ea
ET
637}
638
615f8fd9 639static void bnx2x_int_disable(struct bnx2x *bp)
a2fbb9ea 640{
34f80b04 641 int port = BP_PORT(bp);
a2fbb9ea
ET
642 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
643 u32 val = REG_RD(bp, addr);
644
645 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
646 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
647 HC_CONFIG_0_REG_INT_LINE_EN_0 |
648 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
649
650 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
651 val, port, addr);
652
8badd27a
EG
653 /* flush all outstanding writes */
654 mmiowb();
655
a2fbb9ea
ET
656 REG_WR(bp, addr, val);
657 if (REG_RD(bp, addr) != val)
658 BNX2X_ERR("BUG! proper val not read from IGU!\n");
659}
660
f8ef6e44 661static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
a2fbb9ea 662{
a2fbb9ea 663 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
8badd27a 664 int i, offset;
a2fbb9ea 665
34f80b04 666 /* disable interrupt handling */
a2fbb9ea 667 atomic_inc(&bp->intr_sem);
f8ef6e44
YG
668 if (disable_hw)
669 /* prevent the HW from sending interrupts */
670 bnx2x_int_disable(bp);
a2fbb9ea
ET
671
672 /* make sure all ISRs are done */
673 if (msix) {
8badd27a
EG
674 synchronize_irq(bp->msix_table[0].vector);
675 offset = 1;
a2fbb9ea 676 for_each_queue(bp, i)
8badd27a 677 synchronize_irq(bp->msix_table[i + offset].vector);
a2fbb9ea
ET
678 } else
679 synchronize_irq(bp->pdev->irq);
680
681 /* make sure sp_task is not running */
1cf167f2
EG
682 cancel_delayed_work(&bp->sp_task);
683 flush_workqueue(bnx2x_wq);
a2fbb9ea
ET
684}
685
34f80b04 686/* fast path */
a2fbb9ea
ET
687
688/*
34f80b04 689 * General service functions
a2fbb9ea
ET
690 */
691
34f80b04 692static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
a2fbb9ea
ET
693 u8 storm, u16 index, u8 op, u8 update)
694{
5c862848
EG
695 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
696 COMMAND_REG_INT_ACK);
a2fbb9ea
ET
697 struct igu_ack_register igu_ack;
698
699 igu_ack.status_block_index = index;
700 igu_ack.sb_id_and_flags =
34f80b04 701 ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
a2fbb9ea
ET
702 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
703 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
704 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
705
5c862848
EG
706 DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
707 (*(u32 *)&igu_ack), hc_addr);
708 REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
a2fbb9ea
ET
709}
710
711static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
712{
713 struct host_status_block *fpsb = fp->status_blk;
714 u16 rc = 0;
715
716 barrier(); /* status block is written to by the chip */
717 if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
718 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
719 rc |= 1;
720 }
721 if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
722 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
723 rc |= 2;
724 }
725 return rc;
726}
727
a2fbb9ea
ET
728static u16 bnx2x_ack_int(struct bnx2x *bp)
729{
5c862848
EG
730 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
731 COMMAND_REG_SIMD_MASK);
732 u32 result = REG_RD(bp, hc_addr);
a2fbb9ea 733
5c862848
EG
734 DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
735 result, hc_addr);
a2fbb9ea 736
a2fbb9ea
ET
737 return result;
738}
739
740
741/*
742 * fast path service functions
743 */
744
237907c1
EG
745static inline int bnx2x_has_tx_work(struct bnx2x_fastpath *fp)
746{
747 u16 tx_cons_sb;
748
749 /* Tell compiler that status block fields can change */
750 barrier();
751 tx_cons_sb = le16_to_cpu(*fp->tx_cons_sb);
e8b5fc51
VZ
752 return (fp->tx_pkt_cons != tx_cons_sb);
753}
754
755static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp)
756{
757 /* Tell compiler that consumer and producer can change */
758 barrier();
759 return (fp->tx_pkt_prod != fp->tx_pkt_cons);
760
237907c1
EG
761}
762
a2fbb9ea
ET
763/* free skb in the packet ring at pos idx
764 * return idx of last bd freed
765 */
766static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
767 u16 idx)
768{
769 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
770 struct eth_tx_bd *tx_bd;
771 struct sk_buff *skb = tx_buf->skb;
34f80b04 772 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
a2fbb9ea
ET
773 int nbd;
774
775 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
776 idx, tx_buf, skb);
777
778 /* unmap first bd */
779 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
780 tx_bd = &fp->tx_desc_ring[bd_idx];
781 pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_bd),
782 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
783
784 nbd = le16_to_cpu(tx_bd->nbd) - 1;
34f80b04 785 new_cons = nbd + tx_buf->first_bd;
a2fbb9ea
ET
786#ifdef BNX2X_STOP_ON_ERROR
787 if (nbd > (MAX_SKB_FRAGS + 2)) {
34f80b04 788 BNX2X_ERR("BAD nbd!\n");
a2fbb9ea
ET
789 bnx2x_panic();
790 }
791#endif
792
793 /* Skip a parse bd and the TSO split header bd
794 since they have no mapping */
795 if (nbd)
796 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
797
798 if (tx_bd->bd_flags.as_bitfield & (ETH_TX_BD_FLAGS_IP_CSUM |
799 ETH_TX_BD_FLAGS_TCP_CSUM |
800 ETH_TX_BD_FLAGS_SW_LSO)) {
801 if (--nbd)
802 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
803 tx_bd = &fp->tx_desc_ring[bd_idx];
804 /* is this a TSO split header bd? */
805 if (tx_bd->bd_flags.as_bitfield & ETH_TX_BD_FLAGS_SW_LSO) {
806 if (--nbd)
807 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
808 }
809 }
810
811 /* now free frags */
812 while (nbd > 0) {
813
814 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
815 tx_bd = &fp->tx_desc_ring[bd_idx];
816 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_bd),
817 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
818 if (--nbd)
819 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
820 }
821
822 /* release skb */
53e5e96e 823 WARN_ON(!skb);
a2fbb9ea
ET
824 dev_kfree_skb(skb);
825 tx_buf->first_bd = 0;
826 tx_buf->skb = NULL;
827
34f80b04 828 return new_cons;
a2fbb9ea
ET
829}
830
34f80b04 831static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
a2fbb9ea 832{
34f80b04
EG
833 s16 used;
834 u16 prod;
835 u16 cons;
a2fbb9ea 836
34f80b04 837 barrier(); /* Tell compiler that prod and cons can change */
a2fbb9ea
ET
838 prod = fp->tx_bd_prod;
839 cons = fp->tx_bd_cons;
840
34f80b04
EG
841 /* NUM_TX_RINGS = number of "next-page" entries
842 It will be used as a threshold */
843 used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
a2fbb9ea 844
34f80b04 845#ifdef BNX2X_STOP_ON_ERROR
53e5e96e
IJ
846 WARN_ON(used < 0);
847 WARN_ON(used > fp->bp->tx_ring_size);
848 WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
34f80b04 849#endif
a2fbb9ea 850
34f80b04 851 return (s16)(fp->bp->tx_ring_size) - used;
a2fbb9ea
ET
852}
853
854static void bnx2x_tx_int(struct bnx2x_fastpath *fp, int work)
855{
856 struct bnx2x *bp = fp->bp;
555f6c78 857 struct netdev_queue *txq;
a2fbb9ea
ET
858 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
859 int done = 0;
860
861#ifdef BNX2X_STOP_ON_ERROR
862 if (unlikely(bp->panic))
863 return;
864#endif
865
555f6c78 866 txq = netdev_get_tx_queue(bp->dev, fp->index);
a2fbb9ea
ET
867 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
868 sw_cons = fp->tx_pkt_cons;
869
870 while (sw_cons != hw_cons) {
871 u16 pkt_cons;
872
873 pkt_cons = TX_BD(sw_cons);
874
875 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
876
34f80b04 877 DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %u\n",
a2fbb9ea
ET
878 hw_cons, sw_cons, pkt_cons);
879
34f80b04 880/* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
a2fbb9ea
ET
881 rmb();
882 prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
883 }
884*/
885 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
886 sw_cons++;
887 done++;
888
889 if (done == work)
890 break;
891 }
892
893 fp->tx_pkt_cons = sw_cons;
894 fp->tx_bd_cons = bd_cons;
895
555f6c78
EG
896 /* Need to make the tx_bd_cons update visible to start_xmit()
897 * before checking for netif_tx_queue_stopped(). Without the
a2fbb9ea
ET
898 * memory barrier, there is a small possibility that start_xmit()
899 * will miss it and cause the queue to be stopped forever.
900 */
901 smp_mb();
902
903 /* TBD need a thresh? */
555f6c78 904 if (unlikely(netif_tx_queue_stopped(txq))) {
a2fbb9ea 905
555f6c78 906 __netif_tx_lock(txq, smp_processor_id());
a2fbb9ea 907
555f6c78 908 if ((netif_tx_queue_stopped(txq)) &&
da5a662a 909 (bp->state == BNX2X_STATE_OPEN) &&
a2fbb9ea 910 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
555f6c78 911 netif_tx_wake_queue(txq);
a2fbb9ea 912
555f6c78 913 __netif_tx_unlock(txq);
a2fbb9ea
ET
914 }
915}
916
3196a88a 917
a2fbb9ea
ET
918static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
919 union eth_rx_cqe *rr_cqe)
920{
921 struct bnx2x *bp = fp->bp;
922 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
923 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
924
34f80b04 925 DP(BNX2X_MSG_SP,
a2fbb9ea 926 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
34f80b04
EG
927 FP_IDX(fp), cid, command, bp->state,
928 rr_cqe->ramrod_cqe.ramrod_type);
a2fbb9ea
ET
929
930 bp->spq_left++;
931
34f80b04 932 if (FP_IDX(fp)) {
a2fbb9ea
ET
933 switch (command | fp->state) {
934 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
935 BNX2X_FP_STATE_OPENING):
936 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
937 cid);
938 fp->state = BNX2X_FP_STATE_OPEN;
939 break;
940
941 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
942 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
943 cid);
944 fp->state = BNX2X_FP_STATE_HALTED;
945 break;
946
947 default:
34f80b04
EG
948 BNX2X_ERR("unexpected MC reply (%d) "
949 "fp->state is %x\n", command, fp->state);
950 break;
a2fbb9ea 951 }
34f80b04 952 mb(); /* force bnx2x_wait_ramrod() to see the change */
a2fbb9ea
ET
953 return;
954 }
c14423fe 955
a2fbb9ea
ET
956 switch (command | bp->state) {
957 case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
958 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
959 bp->state = BNX2X_STATE_OPEN;
960 break;
961
962 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
963 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
964 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
965 fp->state = BNX2X_FP_STATE_HALTED;
966 break;
967
a2fbb9ea 968 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
34f80b04 969 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
49d66772 970 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
a2fbb9ea
ET
971 break;
972
3196a88a 973
a2fbb9ea 974 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
34f80b04 975 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
a2fbb9ea 976 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
bb2a0f7a 977 bp->set_mac_pending = 0;
a2fbb9ea
ET
978 break;
979
49d66772 980 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
34f80b04 981 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
49d66772
ET
982 break;
983
a2fbb9ea 984 default:
34f80b04 985 BNX2X_ERR("unexpected MC reply (%d) bp->state is %x\n",
a2fbb9ea 986 command, bp->state);
34f80b04 987 break;
a2fbb9ea 988 }
34f80b04 989 mb(); /* force bnx2x_wait_ramrod() to see the change */
a2fbb9ea
ET
990}
991
7a9b2557
VZ
992static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
993 struct bnx2x_fastpath *fp, u16 index)
994{
995 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
996 struct page *page = sw_buf->page;
997 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
998
999 /* Skip "next page" elements */
1000 if (!page)
1001 return;
1002
1003 pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
4f40f2cb 1004 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
1005 __free_pages(page, PAGES_PER_SGE_SHIFT);
1006
1007 sw_buf->page = NULL;
1008 sge->addr_hi = 0;
1009 sge->addr_lo = 0;
1010}
1011
1012static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
1013 struct bnx2x_fastpath *fp, int last)
1014{
1015 int i;
1016
1017 for (i = 0; i < last; i++)
1018 bnx2x_free_rx_sge(bp, fp, i);
1019}
1020
1021static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
1022 struct bnx2x_fastpath *fp, u16 index)
1023{
1024 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
1025 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1026 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1027 dma_addr_t mapping;
1028
1029 if (unlikely(page == NULL))
1030 return -ENOMEM;
1031
4f40f2cb 1032 mapping = pci_map_page(bp->pdev, page, 0, SGE_PAGE_SIZE*PAGES_PER_SGE,
7a9b2557 1033 PCI_DMA_FROMDEVICE);
8d8bb39b 1034 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
7a9b2557
VZ
1035 __free_pages(page, PAGES_PER_SGE_SHIFT);
1036 return -ENOMEM;
1037 }
1038
1039 sw_buf->page = page;
1040 pci_unmap_addr_set(sw_buf, mapping, mapping);
1041
1042 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1043 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1044
1045 return 0;
1046}
1047
a2fbb9ea
ET
1048static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1049 struct bnx2x_fastpath *fp, u16 index)
1050{
1051 struct sk_buff *skb;
1052 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1053 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1054 dma_addr_t mapping;
1055
1056 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1057 if (unlikely(skb == NULL))
1058 return -ENOMEM;
1059
437cf2f1 1060 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_size,
a2fbb9ea 1061 PCI_DMA_FROMDEVICE);
8d8bb39b 1062 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
a2fbb9ea
ET
1063 dev_kfree_skb(skb);
1064 return -ENOMEM;
1065 }
1066
1067 rx_buf->skb = skb;
1068 pci_unmap_addr_set(rx_buf, mapping, mapping);
1069
1070 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1071 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1072
1073 return 0;
1074}
1075
1076/* note that we are not allocating a new skb,
1077 * we are just moving one from cons to prod
1078 * we are not creating a new mapping,
1079 * so there is no need to check for dma_mapping_error().
1080 */
1081static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1082 struct sk_buff *skb, u16 cons, u16 prod)
1083{
1084 struct bnx2x *bp = fp->bp;
1085 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1086 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1087 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1088 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1089
1090 pci_dma_sync_single_for_device(bp->pdev,
1091 pci_unmap_addr(cons_rx_buf, mapping),
87942b46 1092 RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
a2fbb9ea
ET
1093
1094 prod_rx_buf->skb = cons_rx_buf->skb;
1095 pci_unmap_addr_set(prod_rx_buf, mapping,
1096 pci_unmap_addr(cons_rx_buf, mapping));
1097 *prod_bd = *cons_bd;
1098}
1099
7a9b2557
VZ
1100static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1101 u16 idx)
1102{
1103 u16 last_max = fp->last_max_sge;
1104
1105 if (SUB_S16(idx, last_max) > 0)
1106 fp->last_max_sge = idx;
1107}
1108
1109static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1110{
1111 int i, j;
1112
1113 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1114 int idx = RX_SGE_CNT * i - 1;
1115
1116 for (j = 0; j < 2; j++) {
1117 SGE_MASK_CLEAR_BIT(fp, idx);
1118 idx--;
1119 }
1120 }
1121}
1122
1123static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1124 struct eth_fast_path_rx_cqe *fp_cqe)
1125{
1126 struct bnx2x *bp = fp->bp;
4f40f2cb 1127 u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
7a9b2557 1128 le16_to_cpu(fp_cqe->len_on_bd)) >>
4f40f2cb 1129 SGE_PAGE_SHIFT;
7a9b2557
VZ
1130 u16 last_max, last_elem, first_elem;
1131 u16 delta = 0;
1132 u16 i;
1133
1134 if (!sge_len)
1135 return;
1136
1137 /* First mark all used pages */
1138 for (i = 0; i < sge_len; i++)
1139 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1140
1141 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1142 sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1143
1144 /* Here we assume that the last SGE index is the biggest */
1145 prefetch((void *)(fp->sge_mask));
1146 bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1147
1148 last_max = RX_SGE(fp->last_max_sge);
1149 last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1150 first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1151
1152 /* If ring is not full */
1153 if (last_elem + 1 != first_elem)
1154 last_elem++;
1155
1156 /* Now update the prod */
1157 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1158 if (likely(fp->sge_mask[i]))
1159 break;
1160
1161 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1162 delta += RX_SGE_MASK_ELEM_SZ;
1163 }
1164
1165 if (delta > 0) {
1166 fp->rx_sge_prod += delta;
1167 /* clear page-end entries */
1168 bnx2x_clear_sge_mask_next_elems(fp);
1169 }
1170
1171 DP(NETIF_MSG_RX_STATUS,
1172 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
1173 fp->last_max_sge, fp->rx_sge_prod);
1174}
1175
1176static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1177{
1178 /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1179 memset(fp->sge_mask, 0xff,
1180 (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1181
33471629
EG
1182 /* Clear the two last indices in the page to 1:
1183 these are the indices that correspond to the "next" element,
7a9b2557
VZ
1184 hence will never be indicated and should be removed from
1185 the calculations. */
1186 bnx2x_clear_sge_mask_next_elems(fp);
1187}
1188
1189static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1190 struct sk_buff *skb, u16 cons, u16 prod)
1191{
1192 struct bnx2x *bp = fp->bp;
1193 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1194 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1195 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1196 dma_addr_t mapping;
1197
1198 /* move empty skb from pool to prod and map it */
1199 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1200 mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
437cf2f1 1201 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
1202 pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1203
1204 /* move partial skb from cons to pool (don't unmap yet) */
1205 fp->tpa_pool[queue] = *cons_rx_buf;
1206
1207 /* mark bin state as start - print error if current state != stop */
1208 if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1209 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1210
1211 fp->tpa_state[queue] = BNX2X_TPA_START;
1212
1213 /* point prod_bd to new skb */
1214 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1215 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1216
1217#ifdef BNX2X_STOP_ON_ERROR
1218 fp->tpa_queue_used |= (1 << queue);
1219#ifdef __powerpc64__
1220 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1221#else
1222 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1223#endif
1224 fp->tpa_queue_used);
1225#endif
1226}
1227
1228static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1229 struct sk_buff *skb,
1230 struct eth_fast_path_rx_cqe *fp_cqe,
1231 u16 cqe_idx)
1232{
1233 struct sw_rx_page *rx_pg, old_rx_pg;
7a9b2557
VZ
1234 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1235 u32 i, frag_len, frag_size, pages;
1236 int err;
1237 int j;
1238
1239 frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
4f40f2cb 1240 pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
7a9b2557
VZ
1241
1242 /* This is needed in order to enable forwarding support */
1243 if (frag_size)
4f40f2cb 1244 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
7a9b2557
VZ
1245 max(frag_size, (u32)len_on_bd));
1246
1247#ifdef BNX2X_STOP_ON_ERROR
4f40f2cb
EG
1248 if (pages >
1249 min((u32)8, (u32)MAX_SKB_FRAGS) * SGE_PAGE_SIZE * PAGES_PER_SGE) {
7a9b2557
VZ
1250 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1251 pages, cqe_idx);
1252 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
1253 fp_cqe->pkt_len, len_on_bd);
1254 bnx2x_panic();
1255 return -EINVAL;
1256 }
1257#endif
1258
1259 /* Run through the SGL and compose the fragmented skb */
1260 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1261 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1262
1263 /* FW gives the indices of the SGE as if the ring is an array
1264 (meaning that "next" element will consume 2 indices) */
4f40f2cb 1265 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
7a9b2557 1266 rx_pg = &fp->rx_page_ring[sge_idx];
7a9b2557
VZ
1267 old_rx_pg = *rx_pg;
1268
1269 /* If we fail to allocate a substitute page, we simply stop
1270 where we are and drop the whole packet */
1271 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1272 if (unlikely(err)) {
de832a55 1273 fp->eth_q_stats.rx_skb_alloc_failed++;
7a9b2557
VZ
1274 return err;
1275 }
1276
1277 /* Unmap the page as we r going to pass it to the stack */
1278 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
4f40f2cb 1279 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
1280
1281 /* Add one frag and update the appropriate fields in the skb */
1282 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1283
1284 skb->data_len += frag_len;
1285 skb->truesize += frag_len;
1286 skb->len += frag_len;
1287
1288 frag_size -= frag_len;
1289 }
1290
1291 return 0;
1292}
1293
1294static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1295 u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1296 u16 cqe_idx)
1297{
1298 struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1299 struct sk_buff *skb = rx_buf->skb;
1300 /* alloc new skb */
1301 struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1302
1303 /* Unmap skb in the pool anyway, as we are going to change
1304 pool entry status to BNX2X_TPA_STOP even if new skb allocation
1305 fails. */
1306 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
437cf2f1 1307 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
7a9b2557 1308
7a9b2557 1309 if (likely(new_skb)) {
66e855f3
YG
1310 /* fix ip xsum and give it to the stack */
1311 /* (no need to map the new skb) */
0c6671b0
EG
1312#ifdef BCM_VLAN
1313 int is_vlan_cqe =
1314 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1315 PARSING_FLAGS_VLAN);
1316 int is_not_hwaccel_vlan_cqe =
1317 (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
1318#endif
7a9b2557
VZ
1319
1320 prefetch(skb);
1321 prefetch(((char *)(skb)) + 128);
1322
7a9b2557
VZ
1323#ifdef BNX2X_STOP_ON_ERROR
1324 if (pad + len > bp->rx_buf_size) {
1325 BNX2X_ERR("skb_put is about to fail... "
1326 "pad %d len %d rx_buf_size %d\n",
1327 pad, len, bp->rx_buf_size);
1328 bnx2x_panic();
1329 return;
1330 }
1331#endif
1332
1333 skb_reserve(skb, pad);
1334 skb_put(skb, len);
1335
1336 skb->protocol = eth_type_trans(skb, bp->dev);
1337 skb->ip_summed = CHECKSUM_UNNECESSARY;
1338
1339 {
1340 struct iphdr *iph;
1341
1342 iph = (struct iphdr *)skb->data;
0c6671b0
EG
1343#ifdef BCM_VLAN
1344 /* If there is no Rx VLAN offloading -
1345 take VLAN tag into an account */
1346 if (unlikely(is_not_hwaccel_vlan_cqe))
1347 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
1348#endif
7a9b2557
VZ
1349 iph->check = 0;
1350 iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1351 }
1352
1353 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1354 &cqe->fast_path_cqe, cqe_idx)) {
1355#ifdef BCM_VLAN
0c6671b0
EG
1356 if ((bp->vlgrp != NULL) && is_vlan_cqe &&
1357 (!is_not_hwaccel_vlan_cqe))
7a9b2557
VZ
1358 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1359 le16_to_cpu(cqe->fast_path_cqe.
1360 vlan_tag));
1361 else
1362#endif
1363 netif_receive_skb(skb);
1364 } else {
1365 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1366 " - dropping packet!\n");
1367 dev_kfree_skb(skb);
1368 }
1369
7a9b2557
VZ
1370
1371 /* put new skb in bin */
1372 fp->tpa_pool[queue].skb = new_skb;
1373
1374 } else {
66e855f3 1375 /* else drop the packet and keep the buffer in the bin */
7a9b2557
VZ
1376 DP(NETIF_MSG_RX_STATUS,
1377 "Failed to allocate new skb - dropping packet!\n");
de832a55 1378 fp->eth_q_stats.rx_skb_alloc_failed++;
7a9b2557
VZ
1379 }
1380
1381 fp->tpa_state[queue] = BNX2X_TPA_STOP;
1382}
1383
1384static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1385 struct bnx2x_fastpath *fp,
1386 u16 bd_prod, u16 rx_comp_prod,
1387 u16 rx_sge_prod)
1388{
8d9c5f34 1389 struct ustorm_eth_rx_producers rx_prods = {0};
7a9b2557
VZ
1390 int i;
1391
1392 /* Update producers */
1393 rx_prods.bd_prod = bd_prod;
1394 rx_prods.cqe_prod = rx_comp_prod;
1395 rx_prods.sge_prod = rx_sge_prod;
1396
58f4c4cf
EG
1397 /*
1398 * Make sure that the BD and SGE data is updated before updating the
1399 * producers since FW might read the BD/SGE right after the producer
1400 * is updated.
1401 * This is only applicable for weak-ordered memory model archs such
1402 * as IA-64. The following barrier is also mandatory since FW will
1403 * assumes BDs must have buffers.
1404 */
1405 wmb();
1406
8d9c5f34
EG
1407 for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++)
1408 REG_WR(bp, BAR_USTRORM_INTMEM +
1409 USTORM_RX_PRODS_OFFSET(BP_PORT(bp), FP_CL_ID(fp)) + i*4,
7a9b2557
VZ
1410 ((u32 *)&rx_prods)[i]);
1411
58f4c4cf
EG
1412 mmiowb(); /* keep prod updates ordered */
1413
7a9b2557 1414 DP(NETIF_MSG_RX_STATUS,
555f6c78
EG
1415 "queue[%d]: wrote bd_prod %u cqe_prod %u sge_prod %u\n",
1416 fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
7a9b2557
VZ
1417}
1418
a2fbb9ea
ET
1419static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1420{
1421 struct bnx2x *bp = fp->bp;
34f80b04 1422 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
a2fbb9ea
ET
1423 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1424 int rx_pkt = 0;
1425
1426#ifdef BNX2X_STOP_ON_ERROR
1427 if (unlikely(bp->panic))
1428 return 0;
1429#endif
1430
34f80b04
EG
1431 /* CQ "next element" is of the size of the regular element,
1432 that's why it's ok here */
a2fbb9ea
ET
1433 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1434 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1435 hw_comp_cons++;
1436
1437 bd_cons = fp->rx_bd_cons;
1438 bd_prod = fp->rx_bd_prod;
34f80b04 1439 bd_prod_fw = bd_prod;
a2fbb9ea
ET
1440 sw_comp_cons = fp->rx_comp_cons;
1441 sw_comp_prod = fp->rx_comp_prod;
1442
1443 /* Memory barrier necessary as speculative reads of the rx
1444 * buffer can be ahead of the index in the status block
1445 */
1446 rmb();
1447
1448 DP(NETIF_MSG_RX_STATUS,
1449 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
34f80b04 1450 FP_IDX(fp), hw_comp_cons, sw_comp_cons);
a2fbb9ea
ET
1451
1452 while (sw_comp_cons != hw_comp_cons) {
34f80b04 1453 struct sw_rx_bd *rx_buf = NULL;
a2fbb9ea
ET
1454 struct sk_buff *skb;
1455 union eth_rx_cqe *cqe;
34f80b04
EG
1456 u8 cqe_fp_flags;
1457 u16 len, pad;
a2fbb9ea
ET
1458
1459 comp_ring_cons = RCQ_BD(sw_comp_cons);
1460 bd_prod = RX_BD(bd_prod);
1461 bd_cons = RX_BD(bd_cons);
1462
1463 cqe = &fp->rx_comp_ring[comp_ring_cons];
34f80b04 1464 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
a2fbb9ea 1465
a2fbb9ea 1466 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
34f80b04
EG
1467 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
1468 cqe_fp_flags, cqe->fast_path_cqe.status_flags,
68d59484 1469 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
34f80b04
EG
1470 le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1471 le16_to_cpu(cqe->fast_path_cqe.pkt_len));
a2fbb9ea
ET
1472
1473 /* is this a slowpath msg? */
34f80b04 1474 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
a2fbb9ea
ET
1475 bnx2x_sp_event(fp, cqe);
1476 goto next_cqe;
1477
1478 /* this is an rx packet */
1479 } else {
1480 rx_buf = &fp->rx_buf_ring[bd_cons];
1481 skb = rx_buf->skb;
a2fbb9ea
ET
1482 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1483 pad = cqe->fast_path_cqe.placement_offset;
1484
7a9b2557
VZ
1485 /* If CQE is marked both TPA_START and TPA_END
1486 it is a non-TPA CQE */
1487 if ((!fp->disable_tpa) &&
1488 (TPA_TYPE(cqe_fp_flags) !=
1489 (TPA_TYPE_START | TPA_TYPE_END))) {
3196a88a 1490 u16 queue = cqe->fast_path_cqe.queue_index;
7a9b2557
VZ
1491
1492 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1493 DP(NETIF_MSG_RX_STATUS,
1494 "calling tpa_start on queue %d\n",
1495 queue);
1496
1497 bnx2x_tpa_start(fp, queue, skb,
1498 bd_cons, bd_prod);
1499 goto next_rx;
1500 }
1501
1502 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1503 DP(NETIF_MSG_RX_STATUS,
1504 "calling tpa_stop on queue %d\n",
1505 queue);
1506
1507 if (!BNX2X_RX_SUM_FIX(cqe))
1508 BNX2X_ERR("STOP on none TCP "
1509 "data\n");
1510
1511 /* This is a size of the linear data
1512 on this skb */
1513 len = le16_to_cpu(cqe->fast_path_cqe.
1514 len_on_bd);
1515 bnx2x_tpa_stop(bp, fp, queue, pad,
1516 len, cqe, comp_ring_cons);
1517#ifdef BNX2X_STOP_ON_ERROR
1518 if (bp->panic)
1519 return -EINVAL;
1520#endif
1521
1522 bnx2x_update_sge_prod(fp,
1523 &cqe->fast_path_cqe);
1524 goto next_cqe;
1525 }
1526 }
1527
a2fbb9ea
ET
1528 pci_dma_sync_single_for_device(bp->pdev,
1529 pci_unmap_addr(rx_buf, mapping),
1530 pad + RX_COPY_THRESH,
1531 PCI_DMA_FROMDEVICE);
1532 prefetch(skb);
1533 prefetch(((char *)(skb)) + 128);
1534
1535 /* is this an error packet? */
34f80b04 1536 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
a2fbb9ea 1537 DP(NETIF_MSG_RX_ERR,
34f80b04
EG
1538 "ERROR flags %x rx packet %u\n",
1539 cqe_fp_flags, sw_comp_cons);
de832a55 1540 fp->eth_q_stats.rx_err_discard_pkt++;
a2fbb9ea
ET
1541 goto reuse_rx;
1542 }
1543
1544 /* Since we don't have a jumbo ring
1545 * copy small packets if mtu > 1500
1546 */
1547 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1548 (len <= RX_COPY_THRESH)) {
1549 struct sk_buff *new_skb;
1550
1551 new_skb = netdev_alloc_skb(bp->dev,
1552 len + pad);
1553 if (new_skb == NULL) {
1554 DP(NETIF_MSG_RX_ERR,
34f80b04 1555 "ERROR packet dropped "
a2fbb9ea 1556 "because of alloc failure\n");
de832a55 1557 fp->eth_q_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
1558 goto reuse_rx;
1559 }
1560
1561 /* aligned copy */
1562 skb_copy_from_linear_data_offset(skb, pad,
1563 new_skb->data + pad, len);
1564 skb_reserve(new_skb, pad);
1565 skb_put(new_skb, len);
1566
1567 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1568
1569 skb = new_skb;
1570
1571 } else if (bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0) {
1572 pci_unmap_single(bp->pdev,
1573 pci_unmap_addr(rx_buf, mapping),
437cf2f1 1574 bp->rx_buf_size,
a2fbb9ea
ET
1575 PCI_DMA_FROMDEVICE);
1576 skb_reserve(skb, pad);
1577 skb_put(skb, len);
1578
1579 } else {
1580 DP(NETIF_MSG_RX_ERR,
34f80b04 1581 "ERROR packet dropped because "
a2fbb9ea 1582 "of alloc failure\n");
de832a55 1583 fp->eth_q_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
1584reuse_rx:
1585 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1586 goto next_rx;
1587 }
1588
1589 skb->protocol = eth_type_trans(skb, bp->dev);
1590
1591 skb->ip_summed = CHECKSUM_NONE;
66e855f3 1592 if (bp->rx_csum) {
1adcd8be
EG
1593 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1594 skb->ip_summed = CHECKSUM_UNNECESSARY;
66e855f3 1595 else
de832a55 1596 fp->eth_q_stats.hw_csum_err++;
66e855f3 1597 }
a2fbb9ea
ET
1598 }
1599
748e5439 1600 skb_record_rx_queue(skb, fp->index);
a2fbb9ea 1601#ifdef BCM_VLAN
0c6671b0 1602 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
34f80b04
EG
1603 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1604 PARSING_FLAGS_VLAN))
a2fbb9ea
ET
1605 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1606 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1607 else
1608#endif
34f80b04 1609 netif_receive_skb(skb);
a2fbb9ea 1610
a2fbb9ea
ET
1611
1612next_rx:
1613 rx_buf->skb = NULL;
1614
1615 bd_cons = NEXT_RX_IDX(bd_cons);
1616 bd_prod = NEXT_RX_IDX(bd_prod);
34f80b04
EG
1617 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1618 rx_pkt++;
a2fbb9ea
ET
1619next_cqe:
1620 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1621 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
a2fbb9ea 1622
34f80b04 1623 if (rx_pkt == budget)
a2fbb9ea
ET
1624 break;
1625 } /* while */
1626
1627 fp->rx_bd_cons = bd_cons;
34f80b04 1628 fp->rx_bd_prod = bd_prod_fw;
a2fbb9ea
ET
1629 fp->rx_comp_cons = sw_comp_cons;
1630 fp->rx_comp_prod = sw_comp_prod;
1631
7a9b2557
VZ
1632 /* Update producers */
1633 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1634 fp->rx_sge_prod);
a2fbb9ea
ET
1635
1636 fp->rx_pkt += rx_pkt;
1637 fp->rx_calls++;
1638
1639 return rx_pkt;
1640}
1641
1642static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1643{
1644 struct bnx2x_fastpath *fp = fp_cookie;
1645 struct bnx2x *bp = fp->bp;
34f80b04 1646 int index = FP_IDX(fp);
a2fbb9ea 1647
da5a662a
VZ
1648 /* Return here if interrupt is disabled */
1649 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1650 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1651 return IRQ_HANDLED;
1652 }
1653
34f80b04
EG
1654 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
1655 index, FP_SB_ID(fp));
1656 bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID, 0, IGU_INT_DISABLE, 0);
a2fbb9ea
ET
1657
1658#ifdef BNX2X_STOP_ON_ERROR
1659 if (unlikely(bp->panic))
1660 return IRQ_HANDLED;
1661#endif
1662
1663 prefetch(fp->rx_cons_sb);
1664 prefetch(fp->tx_cons_sb);
1665 prefetch(&fp->status_blk->c_status_block.status_block_index);
1666 prefetch(&fp->status_blk->u_status_block.status_block_index);
1667
288379f0 1668 napi_schedule(&bnx2x_fp(bp, index, napi));
34f80b04 1669
a2fbb9ea
ET
1670 return IRQ_HANDLED;
1671}
1672
1673static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1674{
555f6c78 1675 struct bnx2x *bp = netdev_priv(dev_instance);
a2fbb9ea 1676 u16 status = bnx2x_ack_int(bp);
34f80b04 1677 u16 mask;
a2fbb9ea 1678
34f80b04 1679 /* Return here if interrupt is shared and it's not for us */
a2fbb9ea
ET
1680 if (unlikely(status == 0)) {
1681 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1682 return IRQ_NONE;
1683 }
34f80b04 1684 DP(NETIF_MSG_INTR, "got an interrupt status %u\n", status);
a2fbb9ea 1685
34f80b04 1686 /* Return here if interrupt is disabled */
a2fbb9ea
ET
1687 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1688 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1689 return IRQ_HANDLED;
1690 }
1691
3196a88a
EG
1692#ifdef BNX2X_STOP_ON_ERROR
1693 if (unlikely(bp->panic))
1694 return IRQ_HANDLED;
1695#endif
1696
34f80b04
EG
1697 mask = 0x2 << bp->fp[0].sb_id;
1698 if (status & mask) {
a2fbb9ea
ET
1699 struct bnx2x_fastpath *fp = &bp->fp[0];
1700
1701 prefetch(fp->rx_cons_sb);
1702 prefetch(fp->tx_cons_sb);
1703 prefetch(&fp->status_blk->c_status_block.status_block_index);
1704 prefetch(&fp->status_blk->u_status_block.status_block_index);
1705
288379f0 1706 napi_schedule(&bnx2x_fp(bp, 0, napi));
a2fbb9ea 1707
34f80b04 1708 status &= ~mask;
a2fbb9ea
ET
1709 }
1710
a2fbb9ea 1711
34f80b04 1712 if (unlikely(status & 0x1)) {
1cf167f2 1713 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
a2fbb9ea
ET
1714
1715 status &= ~0x1;
1716 if (!status)
1717 return IRQ_HANDLED;
1718 }
1719
34f80b04
EG
1720 if (status)
1721 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1722 status);
a2fbb9ea 1723
c18487ee 1724 return IRQ_HANDLED;
a2fbb9ea
ET
1725}
1726
c18487ee 1727/* end of fast path */
a2fbb9ea 1728
bb2a0f7a 1729static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
a2fbb9ea 1730
c18487ee
YR
1731/* Link */
1732
1733/*
1734 * General service functions
1735 */
a2fbb9ea 1736
4a37fb66 1737static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
c18487ee
YR
1738{
1739 u32 lock_status;
1740 u32 resource_bit = (1 << resource);
4a37fb66
YG
1741 int func = BP_FUNC(bp);
1742 u32 hw_lock_control_reg;
c18487ee 1743 int cnt;
a2fbb9ea 1744
c18487ee
YR
1745 /* Validating that the resource is within range */
1746 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1747 DP(NETIF_MSG_HW,
1748 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1749 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1750 return -EINVAL;
1751 }
a2fbb9ea 1752
4a37fb66
YG
1753 if (func <= 5) {
1754 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1755 } else {
1756 hw_lock_control_reg =
1757 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1758 }
1759
c18487ee 1760 /* Validating that the resource is not already taken */
4a37fb66 1761 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1762 if (lock_status & resource_bit) {
1763 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1764 lock_status, resource_bit);
1765 return -EEXIST;
1766 }
a2fbb9ea 1767
46230476
EG
1768 /* Try for 5 second every 5ms */
1769 for (cnt = 0; cnt < 1000; cnt++) {
c18487ee 1770 /* Try to acquire the lock */
4a37fb66
YG
1771 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1772 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1773 if (lock_status & resource_bit)
1774 return 0;
a2fbb9ea 1775
c18487ee 1776 msleep(5);
a2fbb9ea 1777 }
c18487ee
YR
1778 DP(NETIF_MSG_HW, "Timeout\n");
1779 return -EAGAIN;
1780}
a2fbb9ea 1781
4a37fb66 1782static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
c18487ee
YR
1783{
1784 u32 lock_status;
1785 u32 resource_bit = (1 << resource);
4a37fb66
YG
1786 int func = BP_FUNC(bp);
1787 u32 hw_lock_control_reg;
a2fbb9ea 1788
c18487ee
YR
1789 /* Validating that the resource is within range */
1790 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1791 DP(NETIF_MSG_HW,
1792 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1793 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1794 return -EINVAL;
1795 }
1796
4a37fb66
YG
1797 if (func <= 5) {
1798 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1799 } else {
1800 hw_lock_control_reg =
1801 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1802 }
1803
c18487ee 1804 /* Validating that the resource is currently taken */
4a37fb66 1805 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1806 if (!(lock_status & resource_bit)) {
1807 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1808 lock_status, resource_bit);
1809 return -EFAULT;
a2fbb9ea
ET
1810 }
1811
4a37fb66 1812 REG_WR(bp, hw_lock_control_reg, resource_bit);
c18487ee
YR
1813 return 0;
1814}
1815
1816/* HW Lock for shared dual port PHYs */
4a37fb66 1817static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
c18487ee
YR
1818{
1819 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea 1820
34f80b04 1821 mutex_lock(&bp->port.phy_mutex);
a2fbb9ea 1822
c18487ee
YR
1823 if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) ||
1824 (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))
4a37fb66 1825 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
c18487ee 1826}
a2fbb9ea 1827
4a37fb66 1828static void bnx2x_release_phy_lock(struct bnx2x *bp)
c18487ee
YR
1829{
1830 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea 1831
c18487ee
YR
1832 if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) ||
1833 (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))
4a37fb66 1834 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
a2fbb9ea 1835
34f80b04 1836 mutex_unlock(&bp->port.phy_mutex);
c18487ee 1837}
a2fbb9ea 1838
4acac6a5
EG
1839int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1840{
1841 /* The GPIO should be swapped if swap register is set and active */
1842 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1843 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1844 int gpio_shift = gpio_num +
1845 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1846 u32 gpio_mask = (1 << gpio_shift);
1847 u32 gpio_reg;
1848 int value;
1849
1850 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1851 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1852 return -EINVAL;
1853 }
1854
1855 /* read GPIO value */
1856 gpio_reg = REG_RD(bp, MISC_REG_GPIO);
1857
1858 /* get the requested pin value */
1859 if ((gpio_reg & gpio_mask) == gpio_mask)
1860 value = 1;
1861 else
1862 value = 0;
1863
1864 DP(NETIF_MSG_LINK, "pin %d value 0x%x\n", gpio_num, value);
1865
1866 return value;
1867}
1868
17de50b7 1869int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
c18487ee
YR
1870{
1871 /* The GPIO should be swapped if swap register is set and active */
1872 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
17de50b7 1873 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
c18487ee
YR
1874 int gpio_shift = gpio_num +
1875 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1876 u32 gpio_mask = (1 << gpio_shift);
1877 u32 gpio_reg;
a2fbb9ea 1878
c18487ee
YR
1879 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1880 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1881 return -EINVAL;
1882 }
a2fbb9ea 1883
4a37fb66 1884 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
c18487ee
YR
1885 /* read GPIO and mask except the float bits */
1886 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
a2fbb9ea 1887
c18487ee
YR
1888 switch (mode) {
1889 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1890 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1891 gpio_num, gpio_shift);
1892 /* clear FLOAT and set CLR */
1893 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1894 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1895 break;
a2fbb9ea 1896
c18487ee
YR
1897 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1898 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1899 gpio_num, gpio_shift);
1900 /* clear FLOAT and set SET */
1901 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1902 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1903 break;
a2fbb9ea 1904
17de50b7 1905 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
c18487ee
YR
1906 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1907 gpio_num, gpio_shift);
1908 /* set FLOAT */
1909 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1910 break;
a2fbb9ea 1911
c18487ee
YR
1912 default:
1913 break;
a2fbb9ea
ET
1914 }
1915
c18487ee 1916 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
4a37fb66 1917 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
f1410647 1918
c18487ee 1919 return 0;
a2fbb9ea
ET
1920}
1921
4acac6a5
EG
1922int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1923{
1924 /* The GPIO should be swapped if swap register is set and active */
1925 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1926 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1927 int gpio_shift = gpio_num +
1928 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1929 u32 gpio_mask = (1 << gpio_shift);
1930 u32 gpio_reg;
1931
1932 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1933 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1934 return -EINVAL;
1935 }
1936
1937 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1938 /* read GPIO int */
1939 gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
1940
1941 switch (mode) {
1942 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
1943 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
1944 "output low\n", gpio_num, gpio_shift);
1945 /* clear SET and set CLR */
1946 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1947 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1948 break;
1949
1950 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
1951 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
1952 "output high\n", gpio_num, gpio_shift);
1953 /* clear CLR and set SET */
1954 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1955 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1956 break;
1957
1958 default:
1959 break;
1960 }
1961
1962 REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
1963 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1964
1965 return 0;
1966}
1967
c18487ee 1968static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
a2fbb9ea 1969{
c18487ee
YR
1970 u32 spio_mask = (1 << spio_num);
1971 u32 spio_reg;
a2fbb9ea 1972
c18487ee
YR
1973 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
1974 (spio_num > MISC_REGISTERS_SPIO_7)) {
1975 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
1976 return -EINVAL;
a2fbb9ea
ET
1977 }
1978
4a37fb66 1979 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
c18487ee
YR
1980 /* read SPIO and mask except the float bits */
1981 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
a2fbb9ea 1982
c18487ee 1983 switch (mode) {
6378c025 1984 case MISC_REGISTERS_SPIO_OUTPUT_LOW:
c18487ee
YR
1985 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
1986 /* clear FLOAT and set CLR */
1987 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1988 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
1989 break;
a2fbb9ea 1990
6378c025 1991 case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
c18487ee
YR
1992 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
1993 /* clear FLOAT and set SET */
1994 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1995 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
1996 break;
a2fbb9ea 1997
c18487ee
YR
1998 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
1999 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
2000 /* set FLOAT */
2001 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2002 break;
a2fbb9ea 2003
c18487ee
YR
2004 default:
2005 break;
a2fbb9ea
ET
2006 }
2007
c18487ee 2008 REG_WR(bp, MISC_REG_SPIO, spio_reg);
4a37fb66 2009 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
c18487ee 2010
a2fbb9ea
ET
2011 return 0;
2012}
2013
c18487ee 2014static void bnx2x_calc_fc_adv(struct bnx2x *bp)
a2fbb9ea 2015{
ad33ea3a
EG
2016 switch (bp->link_vars.ieee_fc &
2017 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
c18487ee 2018 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
34f80b04 2019 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
c18487ee
YR
2020 ADVERTISED_Pause);
2021 break;
2022 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
34f80b04 2023 bp->port.advertising |= (ADVERTISED_Asym_Pause |
c18487ee
YR
2024 ADVERTISED_Pause);
2025 break;
2026 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
34f80b04 2027 bp->port.advertising |= ADVERTISED_Asym_Pause;
c18487ee
YR
2028 break;
2029 default:
34f80b04 2030 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
c18487ee
YR
2031 ADVERTISED_Pause);
2032 break;
2033 }
2034}
f1410647 2035
c18487ee
YR
2036static void bnx2x_link_report(struct bnx2x *bp)
2037{
2038 if (bp->link_vars.link_up) {
2039 if (bp->state == BNX2X_STATE_OPEN)
2040 netif_carrier_on(bp->dev);
2041 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
f1410647 2042
c18487ee 2043 printk("%d Mbps ", bp->link_vars.line_speed);
f1410647 2044
c18487ee
YR
2045 if (bp->link_vars.duplex == DUPLEX_FULL)
2046 printk("full duplex");
2047 else
2048 printk("half duplex");
f1410647 2049
c0700f90
DM
2050 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
2051 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
c18487ee 2052 printk(", receive ");
c0700f90 2053 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
c18487ee
YR
2054 printk("& transmit ");
2055 } else {
2056 printk(", transmit ");
2057 }
2058 printk("flow control ON");
2059 }
2060 printk("\n");
f1410647 2061
c18487ee
YR
2062 } else { /* link_down */
2063 netif_carrier_off(bp->dev);
2064 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
f1410647 2065 }
c18487ee
YR
2066}
2067
2068static u8 bnx2x_initial_phy_init(struct bnx2x *bp)
2069{
19680c48
EG
2070 if (!BP_NOMCP(bp)) {
2071 u8 rc;
a2fbb9ea 2072
19680c48 2073 /* Initialize link parameters structure variables */
8c99e7b0
YR
2074 /* It is recommended to turn off RX FC for jumbo frames
2075 for better performance */
2076 if (IS_E1HMF(bp))
c0700f90 2077 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
8c99e7b0 2078 else if (bp->dev->mtu > 5000)
c0700f90 2079 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
8c99e7b0 2080 else
c0700f90 2081 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
a2fbb9ea 2082
4a37fb66 2083 bnx2x_acquire_phy_lock(bp);
19680c48 2084 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
4a37fb66 2085 bnx2x_release_phy_lock(bp);
a2fbb9ea 2086
3c96c68b
EG
2087 bnx2x_calc_fc_adv(bp);
2088
19680c48
EG
2089 if (bp->link_vars.link_up)
2090 bnx2x_link_report(bp);
a2fbb9ea 2091
34f80b04 2092
19680c48
EG
2093 return rc;
2094 }
2095 BNX2X_ERR("Bootcode is missing -not initializing link\n");
2096 return -EINVAL;
a2fbb9ea
ET
2097}
2098
c18487ee 2099static void bnx2x_link_set(struct bnx2x *bp)
a2fbb9ea 2100{
19680c48 2101 if (!BP_NOMCP(bp)) {
4a37fb66 2102 bnx2x_acquire_phy_lock(bp);
19680c48 2103 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
4a37fb66 2104 bnx2x_release_phy_lock(bp);
a2fbb9ea 2105
19680c48
EG
2106 bnx2x_calc_fc_adv(bp);
2107 } else
2108 BNX2X_ERR("Bootcode is missing -not setting link\n");
c18487ee 2109}
a2fbb9ea 2110
c18487ee
YR
2111static void bnx2x__link_reset(struct bnx2x *bp)
2112{
19680c48 2113 if (!BP_NOMCP(bp)) {
4a37fb66 2114 bnx2x_acquire_phy_lock(bp);
589abe3a 2115 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
4a37fb66 2116 bnx2x_release_phy_lock(bp);
19680c48
EG
2117 } else
2118 BNX2X_ERR("Bootcode is missing -not resetting link\n");
c18487ee 2119}
a2fbb9ea 2120
c18487ee
YR
2121static u8 bnx2x_link_test(struct bnx2x *bp)
2122{
2123 u8 rc;
a2fbb9ea 2124
4a37fb66 2125 bnx2x_acquire_phy_lock(bp);
c18487ee 2126 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
4a37fb66 2127 bnx2x_release_phy_lock(bp);
a2fbb9ea 2128
c18487ee
YR
2129 return rc;
2130}
a2fbb9ea 2131
8a1c38d1 2132static void bnx2x_init_port_minmax(struct bnx2x *bp)
34f80b04 2133{
8a1c38d1
EG
2134 u32 r_param = bp->link_vars.line_speed / 8;
2135 u32 fair_periodic_timeout_usec;
2136 u32 t_fair;
34f80b04 2137
8a1c38d1
EG
2138 memset(&(bp->cmng.rs_vars), 0,
2139 sizeof(struct rate_shaping_vars_per_port));
2140 memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
34f80b04 2141
8a1c38d1
EG
2142 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2143 bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
34f80b04 2144
8a1c38d1
EG
2145 /* this is the threshold below which no timer arming will occur
2146 1.25 coefficient is for the threshold to be a little bigger
2147 than the real time, to compensate for timer in-accuracy */
2148 bp->cmng.rs_vars.rs_threshold =
34f80b04
EG
2149 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2150
8a1c38d1
EG
2151 /* resolution of fairness timer */
2152 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2153 /* for 10G it is 1000usec. for 1G it is 10000usec. */
2154 t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
34f80b04 2155
8a1c38d1
EG
2156 /* this is the threshold below which we won't arm the timer anymore */
2157 bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
34f80b04 2158
8a1c38d1
EG
2159 /* we multiply by 1e3/8 to get bytes/msec.
2160 We don't want the credits to pass a credit
2161 of the t_fair*FAIR_MEM (algorithm resolution) */
2162 bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
2163 /* since each tick is 4 usec */
2164 bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
34f80b04
EG
2165}
2166
8a1c38d1 2167static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
34f80b04
EG
2168{
2169 struct rate_shaping_vars_per_vn m_rs_vn;
2170 struct fairness_vars_per_vn m_fair_vn;
2171 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2172 u16 vn_min_rate, vn_max_rate;
2173 int i;
2174
2175 /* If function is hidden - set min and max to zeroes */
2176 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2177 vn_min_rate = 0;
2178 vn_max_rate = 0;
2179
2180 } else {
2181 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2182 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
8a1c38d1 2183 /* If fairness is enabled (not all min rates are zeroes) and
34f80b04 2184 if current min rate is zero - set it to 1.
33471629 2185 This is a requirement of the algorithm. */
8a1c38d1 2186 if (bp->vn_weight_sum && (vn_min_rate == 0))
34f80b04
EG
2187 vn_min_rate = DEF_MIN_RATE;
2188 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2189 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2190 }
2191
8a1c38d1
EG
2192 DP(NETIF_MSG_IFUP,
2193 "func %d: vn_min_rate=%d vn_max_rate=%d vn_weight_sum=%d\n",
2194 func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
34f80b04
EG
2195
2196 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2197 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2198
2199 /* global vn counter - maximal Mbps for this vn */
2200 m_rs_vn.vn_counter.rate = vn_max_rate;
2201
2202 /* quota - number of bytes transmitted in this period */
2203 m_rs_vn.vn_counter.quota =
2204 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2205
8a1c38d1 2206 if (bp->vn_weight_sum) {
34f80b04
EG
2207 /* credit for each period of the fairness algorithm:
2208 number of bytes in T_FAIR (the vn share the port rate).
8a1c38d1
EG
2209 vn_weight_sum should not be larger than 10000, thus
2210 T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
2211 than zero */
34f80b04 2212 m_fair_vn.vn_credit_delta =
8a1c38d1
EG
2213 max((u32)(vn_min_rate * (T_FAIR_COEF /
2214 (8 * bp->vn_weight_sum))),
2215 (u32)(bp->cmng.fair_vars.fair_threshold * 2));
34f80b04
EG
2216 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2217 m_fair_vn.vn_credit_delta);
2218 }
2219
34f80b04
EG
2220 /* Store it to internal memory */
2221 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2222 REG_WR(bp, BAR_XSTRORM_INTMEM +
2223 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2224 ((u32 *)(&m_rs_vn))[i]);
2225
2226 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2227 REG_WR(bp, BAR_XSTRORM_INTMEM +
2228 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2229 ((u32 *)(&m_fair_vn))[i]);
2230}
2231
8a1c38d1 2232
c18487ee
YR
2233/* This function is called upon link interrupt */
2234static void bnx2x_link_attn(struct bnx2x *bp)
2235{
bb2a0f7a
YG
2236 /* Make sure that we are synced with the current statistics */
2237 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2238
c18487ee 2239 bnx2x_link_update(&bp->link_params, &bp->link_vars);
a2fbb9ea 2240
bb2a0f7a
YG
2241 if (bp->link_vars.link_up) {
2242
1c06328c
EG
2243 /* dropless flow control */
2244 if (CHIP_IS_E1H(bp)) {
2245 int port = BP_PORT(bp);
2246 u32 pause_enabled = 0;
2247
2248 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2249 pause_enabled = 1;
2250
2251 REG_WR(bp, BAR_USTRORM_INTMEM +
2252 USTORM_PAUSE_ENABLED_OFFSET(port),
2253 pause_enabled);
2254 }
2255
bb2a0f7a
YG
2256 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2257 struct host_port_stats *pstats;
2258
2259 pstats = bnx2x_sp(bp, port_stats);
2260 /* reset old bmac stats */
2261 memset(&(pstats->mac_stx[0]), 0,
2262 sizeof(struct mac_stx));
2263 }
2264 if ((bp->state == BNX2X_STATE_OPEN) ||
2265 (bp->state == BNX2X_STATE_DISABLED))
2266 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2267 }
2268
c18487ee
YR
2269 /* indicate link status */
2270 bnx2x_link_report(bp);
34f80b04
EG
2271
2272 if (IS_E1HMF(bp)) {
8a1c38d1 2273 int port = BP_PORT(bp);
34f80b04 2274 int func;
8a1c38d1 2275 int vn;
34f80b04
EG
2276
2277 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2278 if (vn == BP_E1HVN(bp))
2279 continue;
2280
8a1c38d1 2281 func = ((vn << 1) | port);
34f80b04
EG
2282
2283 /* Set the attention towards other drivers
2284 on the same port */
2285 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2286 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2287 }
34f80b04 2288
8a1c38d1
EG
2289 if (bp->link_vars.link_up) {
2290 int i;
2291
2292 /* Init rate shaping and fairness contexts */
2293 bnx2x_init_port_minmax(bp);
34f80b04 2294
34f80b04 2295 for (vn = VN_0; vn < E1HVN_MAX; vn++)
8a1c38d1
EG
2296 bnx2x_init_vn_minmax(bp, 2*vn + port);
2297
2298 /* Store it to internal memory */
2299 for (i = 0;
2300 i < sizeof(struct cmng_struct_per_port) / 4; i++)
2301 REG_WR(bp, BAR_XSTRORM_INTMEM +
2302 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2303 ((u32 *)(&bp->cmng))[i]);
2304 }
34f80b04 2305 }
c18487ee 2306}
a2fbb9ea 2307
c18487ee
YR
2308static void bnx2x__link_status_update(struct bnx2x *bp)
2309{
2310 if (bp->state != BNX2X_STATE_OPEN)
2311 return;
a2fbb9ea 2312
c18487ee 2313 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
a2fbb9ea 2314
bb2a0f7a
YG
2315 if (bp->link_vars.link_up)
2316 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2317 else
2318 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2319
c18487ee
YR
2320 /* indicate link status */
2321 bnx2x_link_report(bp);
a2fbb9ea 2322}
a2fbb9ea 2323
34f80b04
EG
2324static void bnx2x_pmf_update(struct bnx2x *bp)
2325{
2326 int port = BP_PORT(bp);
2327 u32 val;
2328
2329 bp->port.pmf = 1;
2330 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2331
2332 /* enable nig attention */
2333 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2334 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2335 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
bb2a0f7a
YG
2336
2337 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
34f80b04
EG
2338}
2339
c18487ee 2340/* end of Link */
a2fbb9ea
ET
2341
2342/* slow path */
2343
2344/*
2345 * General service functions
2346 */
2347
2348/* the slow path queue is odd since completions arrive on the fastpath ring */
2349static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2350 u32 data_hi, u32 data_lo, int common)
2351{
34f80b04 2352 int func = BP_FUNC(bp);
a2fbb9ea 2353
34f80b04
EG
2354 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2355 "SPQE (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
a2fbb9ea
ET
2356 (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2357 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2358 HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2359
2360#ifdef BNX2X_STOP_ON_ERROR
2361 if (unlikely(bp->panic))
2362 return -EIO;
2363#endif
2364
34f80b04 2365 spin_lock_bh(&bp->spq_lock);
a2fbb9ea
ET
2366
2367 if (!bp->spq_left) {
2368 BNX2X_ERR("BUG! SPQ ring full!\n");
34f80b04 2369 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
2370 bnx2x_panic();
2371 return -EBUSY;
2372 }
f1410647 2373
a2fbb9ea
ET
2374 /* CID needs port number to be encoded int it */
2375 bp->spq_prod_bd->hdr.conn_and_cmd_data =
2376 cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2377 HW_CID(bp, cid)));
2378 bp->spq_prod_bd->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2379 if (common)
2380 bp->spq_prod_bd->hdr.type |=
2381 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2382
2383 bp->spq_prod_bd->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2384 bp->spq_prod_bd->data.mac_config_addr.lo = cpu_to_le32(data_lo);
2385
2386 bp->spq_left--;
2387
2388 if (bp->spq_prod_bd == bp->spq_last_bd) {
2389 bp->spq_prod_bd = bp->spq;
2390 bp->spq_prod_idx = 0;
2391 DP(NETIF_MSG_TIMER, "end of spq\n");
2392
2393 } else {
2394 bp->spq_prod_bd++;
2395 bp->spq_prod_idx++;
2396 }
2397
34f80b04 2398 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
a2fbb9ea
ET
2399 bp->spq_prod_idx);
2400
34f80b04 2401 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
2402 return 0;
2403}
2404
2405/* acquire split MCP access lock register */
4a37fb66 2406static int bnx2x_acquire_alr(struct bnx2x *bp)
a2fbb9ea 2407{
a2fbb9ea 2408 u32 i, j, val;
34f80b04 2409 int rc = 0;
a2fbb9ea
ET
2410
2411 might_sleep();
2412 i = 100;
2413 for (j = 0; j < i*10; j++) {
2414 val = (1UL << 31);
2415 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2416 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2417 if (val & (1L << 31))
2418 break;
2419
2420 msleep(5);
2421 }
a2fbb9ea 2422 if (!(val & (1L << 31))) {
19680c48 2423 BNX2X_ERR("Cannot acquire MCP access lock register\n");
a2fbb9ea
ET
2424 rc = -EBUSY;
2425 }
2426
2427 return rc;
2428}
2429
4a37fb66
YG
2430/* release split MCP access lock register */
2431static void bnx2x_release_alr(struct bnx2x *bp)
a2fbb9ea
ET
2432{
2433 u32 val = 0;
2434
2435 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2436}
2437
2438static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2439{
2440 struct host_def_status_block *def_sb = bp->def_status_blk;
2441 u16 rc = 0;
2442
2443 barrier(); /* status block is written to by the chip */
a2fbb9ea
ET
2444 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2445 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2446 rc |= 1;
2447 }
2448 if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2449 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2450 rc |= 2;
2451 }
2452 if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2453 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2454 rc |= 4;
2455 }
2456 if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2457 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2458 rc |= 8;
2459 }
2460 if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2461 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2462 rc |= 16;
2463 }
2464 return rc;
2465}
2466
2467/*
2468 * slow path service functions
2469 */
2470
2471static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2472{
34f80b04 2473 int port = BP_PORT(bp);
5c862848
EG
2474 u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2475 COMMAND_REG_ATTN_BITS_SET);
a2fbb9ea
ET
2476 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2477 MISC_REG_AEU_MASK_ATTN_FUNC_0;
877e9aa4
ET
2478 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2479 NIG_REG_MASK_INTERRUPT_PORT0;
3fcaf2e5 2480 u32 aeu_mask;
87942b46 2481 u32 nig_mask = 0;
a2fbb9ea 2482
a2fbb9ea
ET
2483 if (bp->attn_state & asserted)
2484 BNX2X_ERR("IGU ERROR\n");
2485
3fcaf2e5
EG
2486 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2487 aeu_mask = REG_RD(bp, aeu_addr);
2488
a2fbb9ea 2489 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
3fcaf2e5
EG
2490 aeu_mask, asserted);
2491 aeu_mask &= ~(asserted & 0xff);
2492 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
a2fbb9ea 2493
3fcaf2e5
EG
2494 REG_WR(bp, aeu_addr, aeu_mask);
2495 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
a2fbb9ea 2496
3fcaf2e5 2497 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
a2fbb9ea 2498 bp->attn_state |= asserted;
3fcaf2e5 2499 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
a2fbb9ea
ET
2500
2501 if (asserted & ATTN_HARD_WIRED_MASK) {
2502 if (asserted & ATTN_NIG_FOR_FUNC) {
a2fbb9ea 2503
a5e9a7cf
EG
2504 bnx2x_acquire_phy_lock(bp);
2505
877e9aa4 2506 /* save nig interrupt mask */
87942b46 2507 nig_mask = REG_RD(bp, nig_int_mask_addr);
877e9aa4 2508 REG_WR(bp, nig_int_mask_addr, 0);
a2fbb9ea 2509
c18487ee 2510 bnx2x_link_attn(bp);
a2fbb9ea
ET
2511
2512 /* handle unicore attn? */
2513 }
2514 if (asserted & ATTN_SW_TIMER_4_FUNC)
2515 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2516
2517 if (asserted & GPIO_2_FUNC)
2518 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2519
2520 if (asserted & GPIO_3_FUNC)
2521 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2522
2523 if (asserted & GPIO_4_FUNC)
2524 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2525
2526 if (port == 0) {
2527 if (asserted & ATTN_GENERAL_ATTN_1) {
2528 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2529 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2530 }
2531 if (asserted & ATTN_GENERAL_ATTN_2) {
2532 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2533 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2534 }
2535 if (asserted & ATTN_GENERAL_ATTN_3) {
2536 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2537 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2538 }
2539 } else {
2540 if (asserted & ATTN_GENERAL_ATTN_4) {
2541 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2542 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2543 }
2544 if (asserted & ATTN_GENERAL_ATTN_5) {
2545 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2546 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2547 }
2548 if (asserted & ATTN_GENERAL_ATTN_6) {
2549 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2550 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2551 }
2552 }
2553
2554 } /* if hardwired */
2555
5c862848
EG
2556 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2557 asserted, hc_addr);
2558 REG_WR(bp, hc_addr, asserted);
a2fbb9ea
ET
2559
2560 /* now set back the mask */
a5e9a7cf 2561 if (asserted & ATTN_NIG_FOR_FUNC) {
87942b46 2562 REG_WR(bp, nig_int_mask_addr, nig_mask);
a5e9a7cf
EG
2563 bnx2x_release_phy_lock(bp);
2564 }
a2fbb9ea
ET
2565}
2566
877e9aa4 2567static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
a2fbb9ea 2568{
34f80b04 2569 int port = BP_PORT(bp);
877e9aa4
ET
2570 int reg_offset;
2571 u32 val;
2572
34f80b04
EG
2573 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2574 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
877e9aa4 2575
34f80b04 2576 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
877e9aa4
ET
2577
2578 val = REG_RD(bp, reg_offset);
2579 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2580 REG_WR(bp, reg_offset, val);
2581
2582 BNX2X_ERR("SPIO5 hw attention\n");
2583
35b19ba5
EG
2584 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
2585 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
877e9aa4
ET
2586 /* Fan failure attention */
2587
17de50b7 2588 /* The PHY reset is controlled by GPIO 1 */
877e9aa4 2589 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
17de50b7
EG
2590 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2591 /* Low power mode is controlled by GPIO 2 */
877e9aa4 2592 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
17de50b7 2593 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
877e9aa4 2594 /* mark the failure */
c18487ee 2595 bp->link_params.ext_phy_config &=
877e9aa4 2596 ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
c18487ee 2597 bp->link_params.ext_phy_config |=
877e9aa4
ET
2598 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2599 SHMEM_WR(bp,
2600 dev_info.port_hw_config[port].
2601 external_phy_config,
c18487ee 2602 bp->link_params.ext_phy_config);
877e9aa4
ET
2603 /* log the failure */
2604 printk(KERN_ERR PFX "Fan Failure on Network"
2605 " Controller %s has caused the driver to"
2606 " shutdown the card to prevent permanent"
2607 " damage. Please contact Dell Support for"
2608 " assistance\n", bp->dev->name);
2609 break;
2610
2611 default:
2612 break;
2613 }
2614 }
34f80b04 2615
589abe3a
EG
2616 if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
2617 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
2618 bnx2x_acquire_phy_lock(bp);
2619 bnx2x_handle_module_detect_int(&bp->link_params);
2620 bnx2x_release_phy_lock(bp);
2621 }
2622
34f80b04
EG
2623 if (attn & HW_INTERRUT_ASSERT_SET_0) {
2624
2625 val = REG_RD(bp, reg_offset);
2626 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2627 REG_WR(bp, reg_offset, val);
2628
2629 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2630 (attn & HW_INTERRUT_ASSERT_SET_0));
2631 bnx2x_panic();
2632 }
877e9aa4
ET
2633}
2634
2635static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2636{
2637 u32 val;
2638
2639 if (attn & BNX2X_DOORQ_ASSERT) {
2640
2641 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2642 BNX2X_ERR("DB hw attention 0x%x\n", val);
2643 /* DORQ discard attention */
2644 if (val & 0x2)
2645 BNX2X_ERR("FATAL error from DORQ\n");
2646 }
34f80b04
EG
2647
2648 if (attn & HW_INTERRUT_ASSERT_SET_1) {
2649
2650 int port = BP_PORT(bp);
2651 int reg_offset;
2652
2653 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2654 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2655
2656 val = REG_RD(bp, reg_offset);
2657 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2658 REG_WR(bp, reg_offset, val);
2659
2660 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2661 (attn & HW_INTERRUT_ASSERT_SET_1));
2662 bnx2x_panic();
2663 }
877e9aa4
ET
2664}
2665
2666static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2667{
2668 u32 val;
2669
2670 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2671
2672 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2673 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2674 /* CFC error attention */
2675 if (val & 0x2)
2676 BNX2X_ERR("FATAL error from CFC\n");
2677 }
2678
2679 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
2680
2681 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
2682 BNX2X_ERR("PXP hw attention 0x%x\n", val);
2683 /* RQ_USDMDP_FIFO_OVERFLOW */
2684 if (val & 0x18000)
2685 BNX2X_ERR("FATAL error from PXP\n");
2686 }
34f80b04
EG
2687
2688 if (attn & HW_INTERRUT_ASSERT_SET_2) {
2689
2690 int port = BP_PORT(bp);
2691 int reg_offset;
2692
2693 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
2694 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
2695
2696 val = REG_RD(bp, reg_offset);
2697 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
2698 REG_WR(bp, reg_offset, val);
2699
2700 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
2701 (attn & HW_INTERRUT_ASSERT_SET_2));
2702 bnx2x_panic();
2703 }
877e9aa4
ET
2704}
2705
2706static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
2707{
34f80b04
EG
2708 u32 val;
2709
877e9aa4
ET
2710 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
2711
34f80b04
EG
2712 if (attn & BNX2X_PMF_LINK_ASSERT) {
2713 int func = BP_FUNC(bp);
2714
2715 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
2716 bnx2x__link_status_update(bp);
2717 if (SHMEM_RD(bp, func_mb[func].drv_status) &
2718 DRV_STATUS_PMF)
2719 bnx2x_pmf_update(bp);
2720
2721 } else if (attn & BNX2X_MC_ASSERT_BITS) {
877e9aa4
ET
2722
2723 BNX2X_ERR("MC assert!\n");
2724 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
2725 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
2726 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
2727 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
2728 bnx2x_panic();
2729
2730 } else if (attn & BNX2X_MCP_ASSERT) {
2731
2732 BNX2X_ERR("MCP assert!\n");
2733 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
34f80b04 2734 bnx2x_fw_dump(bp);
877e9aa4
ET
2735
2736 } else
2737 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
2738 }
2739
2740 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
34f80b04
EG
2741 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
2742 if (attn & BNX2X_GRC_TIMEOUT) {
2743 val = CHIP_IS_E1H(bp) ?
2744 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
2745 BNX2X_ERR("GRC time-out 0x%08x\n", val);
2746 }
2747 if (attn & BNX2X_GRC_RSV) {
2748 val = CHIP_IS_E1H(bp) ?
2749 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
2750 BNX2X_ERR("GRC reserved 0x%08x\n", val);
2751 }
877e9aa4 2752 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
877e9aa4
ET
2753 }
2754}
2755
2756static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
2757{
a2fbb9ea
ET
2758 struct attn_route attn;
2759 struct attn_route group_mask;
34f80b04 2760 int port = BP_PORT(bp);
877e9aa4 2761 int index;
a2fbb9ea
ET
2762 u32 reg_addr;
2763 u32 val;
3fcaf2e5 2764 u32 aeu_mask;
a2fbb9ea
ET
2765
2766 /* need to take HW lock because MCP or other port might also
2767 try to handle this event */
4a37fb66 2768 bnx2x_acquire_alr(bp);
a2fbb9ea
ET
2769
2770 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
2771 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
2772 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
2773 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
34f80b04
EG
2774 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
2775 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
a2fbb9ea
ET
2776
2777 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
2778 if (deasserted & (1 << index)) {
2779 group_mask = bp->attn_group[index];
2780
34f80b04
EG
2781 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
2782 index, group_mask.sig[0], group_mask.sig[1],
2783 group_mask.sig[2], group_mask.sig[3]);
a2fbb9ea 2784
877e9aa4
ET
2785 bnx2x_attn_int_deasserted3(bp,
2786 attn.sig[3] & group_mask.sig[3]);
2787 bnx2x_attn_int_deasserted1(bp,
2788 attn.sig[1] & group_mask.sig[1]);
2789 bnx2x_attn_int_deasserted2(bp,
2790 attn.sig[2] & group_mask.sig[2]);
2791 bnx2x_attn_int_deasserted0(bp,
2792 attn.sig[0] & group_mask.sig[0]);
a2fbb9ea 2793
a2fbb9ea
ET
2794 if ((attn.sig[0] & group_mask.sig[0] &
2795 HW_PRTY_ASSERT_SET_0) ||
2796 (attn.sig[1] & group_mask.sig[1] &
2797 HW_PRTY_ASSERT_SET_1) ||
2798 (attn.sig[2] & group_mask.sig[2] &
2799 HW_PRTY_ASSERT_SET_2))
6378c025 2800 BNX2X_ERR("FATAL HW block parity attention\n");
a2fbb9ea
ET
2801 }
2802 }
2803
4a37fb66 2804 bnx2x_release_alr(bp);
a2fbb9ea 2805
5c862848 2806 reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
a2fbb9ea
ET
2807
2808 val = ~deasserted;
3fcaf2e5
EG
2809 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2810 val, reg_addr);
5c862848 2811 REG_WR(bp, reg_addr, val);
a2fbb9ea 2812
a2fbb9ea 2813 if (~bp->attn_state & deasserted)
3fcaf2e5 2814 BNX2X_ERR("IGU ERROR\n");
a2fbb9ea
ET
2815
2816 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2817 MISC_REG_AEU_MASK_ATTN_FUNC_0;
2818
3fcaf2e5
EG
2819 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2820 aeu_mask = REG_RD(bp, reg_addr);
2821
2822 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
2823 aeu_mask, deasserted);
2824 aeu_mask |= (deasserted & 0xff);
2825 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
a2fbb9ea 2826
3fcaf2e5
EG
2827 REG_WR(bp, reg_addr, aeu_mask);
2828 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
a2fbb9ea
ET
2829
2830 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2831 bp->attn_state &= ~deasserted;
2832 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2833}
2834
2835static void bnx2x_attn_int(struct bnx2x *bp)
2836{
2837 /* read local copy of bits */
68d59484
EG
2838 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
2839 attn_bits);
2840 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
2841 attn_bits_ack);
a2fbb9ea
ET
2842 u32 attn_state = bp->attn_state;
2843
2844 /* look for changed bits */
2845 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
2846 u32 deasserted = ~attn_bits & attn_ack & attn_state;
2847
2848 DP(NETIF_MSG_HW,
2849 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
2850 attn_bits, attn_ack, asserted, deasserted);
2851
2852 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
34f80b04 2853 BNX2X_ERR("BAD attention state\n");
a2fbb9ea
ET
2854
2855 /* handle bits that were raised */
2856 if (asserted)
2857 bnx2x_attn_int_asserted(bp, asserted);
2858
2859 if (deasserted)
2860 bnx2x_attn_int_deasserted(bp, deasserted);
2861}
2862
2863static void bnx2x_sp_task(struct work_struct *work)
2864{
1cf167f2 2865 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
a2fbb9ea
ET
2866 u16 status;
2867
34f80b04 2868
a2fbb9ea
ET
2869 /* Return here if interrupt is disabled */
2870 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3196a88a 2871 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
2872 return;
2873 }
2874
2875 status = bnx2x_update_dsb_idx(bp);
34f80b04
EG
2876/* if (status == 0) */
2877/* BNX2X_ERR("spurious slowpath interrupt!\n"); */
a2fbb9ea 2878
3196a88a 2879 DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
a2fbb9ea 2880
877e9aa4
ET
2881 /* HW attentions */
2882 if (status & 0x1)
a2fbb9ea 2883 bnx2x_attn_int(bp);
a2fbb9ea 2884
68d59484 2885 bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
a2fbb9ea
ET
2886 IGU_INT_NOP, 1);
2887 bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
2888 IGU_INT_NOP, 1);
2889 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
2890 IGU_INT_NOP, 1);
2891 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
2892 IGU_INT_NOP, 1);
2893 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
2894 IGU_INT_ENABLE, 1);
877e9aa4 2895
a2fbb9ea
ET
2896}
2897
2898static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
2899{
2900 struct net_device *dev = dev_instance;
2901 struct bnx2x *bp = netdev_priv(dev);
2902
2903 /* Return here if interrupt is disabled */
2904 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3196a88a 2905 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
2906 return IRQ_HANDLED;
2907 }
2908
8d9c5f34 2909 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0);
a2fbb9ea
ET
2910
2911#ifdef BNX2X_STOP_ON_ERROR
2912 if (unlikely(bp->panic))
2913 return IRQ_HANDLED;
2914#endif
2915
1cf167f2 2916 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
a2fbb9ea
ET
2917
2918 return IRQ_HANDLED;
2919}
2920
2921/* end of slow path */
2922
2923/* Statistics */
2924
2925/****************************************************************************
2926* Macros
2927****************************************************************************/
2928
a2fbb9ea
ET
2929/* sum[hi:lo] += add[hi:lo] */
2930#define ADD_64(s_hi, a_hi, s_lo, a_lo) \
2931 do { \
2932 s_lo += a_lo; \
f5ba6772 2933 s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
a2fbb9ea
ET
2934 } while (0)
2935
2936/* difference = minuend - subtrahend */
2937#define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
2938 do { \
bb2a0f7a
YG
2939 if (m_lo < s_lo) { \
2940 /* underflow */ \
a2fbb9ea 2941 d_hi = m_hi - s_hi; \
bb2a0f7a 2942 if (d_hi > 0) { \
6378c025 2943 /* we can 'loan' 1 */ \
a2fbb9ea
ET
2944 d_hi--; \
2945 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
bb2a0f7a 2946 } else { \
6378c025 2947 /* m_hi <= s_hi */ \
a2fbb9ea
ET
2948 d_hi = 0; \
2949 d_lo = 0; \
2950 } \
bb2a0f7a
YG
2951 } else { \
2952 /* m_lo >= s_lo */ \
a2fbb9ea 2953 if (m_hi < s_hi) { \
bb2a0f7a
YG
2954 d_hi = 0; \
2955 d_lo = 0; \
2956 } else { \
6378c025 2957 /* m_hi >= s_hi */ \
bb2a0f7a
YG
2958 d_hi = m_hi - s_hi; \
2959 d_lo = m_lo - s_lo; \
a2fbb9ea
ET
2960 } \
2961 } \
2962 } while (0)
2963
bb2a0f7a 2964#define UPDATE_STAT64(s, t) \
a2fbb9ea 2965 do { \
bb2a0f7a
YG
2966 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
2967 diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
2968 pstats->mac_stx[0].t##_hi = new->s##_hi; \
2969 pstats->mac_stx[0].t##_lo = new->s##_lo; \
2970 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
2971 pstats->mac_stx[1].t##_lo, diff.lo); \
a2fbb9ea
ET
2972 } while (0)
2973
bb2a0f7a 2974#define UPDATE_STAT64_NIG(s, t) \
a2fbb9ea 2975 do { \
bb2a0f7a
YG
2976 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
2977 diff.lo, new->s##_lo, old->s##_lo); \
2978 ADD_64(estats->t##_hi, diff.hi, \
2979 estats->t##_lo, diff.lo); \
a2fbb9ea
ET
2980 } while (0)
2981
2982/* sum[hi:lo] += add */
2983#define ADD_EXTEND_64(s_hi, s_lo, a) \
2984 do { \
2985 s_lo += a; \
2986 s_hi += (s_lo < a) ? 1 : 0; \
2987 } while (0)
2988
bb2a0f7a 2989#define UPDATE_EXTEND_STAT(s) \
a2fbb9ea 2990 do { \
bb2a0f7a
YG
2991 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
2992 pstats->mac_stx[1].s##_lo, \
2993 new->s); \
a2fbb9ea
ET
2994 } while (0)
2995
bb2a0f7a 2996#define UPDATE_EXTEND_TSTAT(s, t) \
a2fbb9ea
ET
2997 do { \
2998 diff = le32_to_cpu(tclient->s) - old_tclient->s; \
2999 old_tclient->s = le32_to_cpu(tclient->s); \
de832a55
EG
3000 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3001 } while (0)
3002
3003#define UPDATE_EXTEND_USTAT(s, t) \
3004 do { \
3005 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3006 old_uclient->s = uclient->s; \
3007 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
bb2a0f7a
YG
3008 } while (0)
3009
3010#define UPDATE_EXTEND_XSTAT(s, t) \
3011 do { \
3012 diff = le32_to_cpu(xclient->s) - old_xclient->s; \
3013 old_xclient->s = le32_to_cpu(xclient->s); \
de832a55
EG
3014 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3015 } while (0)
3016
3017/* minuend -= subtrahend */
3018#define SUB_64(m_hi, s_hi, m_lo, s_lo) \
3019 do { \
3020 DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
3021 } while (0)
3022
3023/* minuend[hi:lo] -= subtrahend */
3024#define SUB_EXTEND_64(m_hi, m_lo, s) \
3025 do { \
3026 SUB_64(m_hi, 0, m_lo, s); \
3027 } while (0)
3028
3029#define SUB_EXTEND_USTAT(s, t) \
3030 do { \
3031 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3032 SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
a2fbb9ea
ET
3033 } while (0)
3034
3035/*
3036 * General service functions
3037 */
3038
3039static inline long bnx2x_hilo(u32 *hiref)
3040{
3041 u32 lo = *(hiref + 1);
3042#if (BITS_PER_LONG == 64)
3043 u32 hi = *hiref;
3044
3045 return HILO_U64(hi, lo);
3046#else
3047 return lo;
3048#endif
3049}
3050
3051/*
3052 * Init service functions
3053 */
3054
bb2a0f7a
YG
3055static void bnx2x_storm_stats_post(struct bnx2x *bp)
3056{
3057 if (!bp->stats_pending) {
3058 struct eth_query_ramrod_data ramrod_data = {0};
de832a55 3059 int i, rc;
bb2a0f7a
YG
3060
3061 ramrod_data.drv_counter = bp->stats_counter++;
8d9c5f34 3062 ramrod_data.collect_port = bp->port.pmf ? 1 : 0;
de832a55
EG
3063 for_each_queue(bp, i)
3064 ramrod_data.ctr_id_vector |= (1 << bp->fp[i].cl_id);
bb2a0f7a
YG
3065
3066 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3067 ((u32 *)&ramrod_data)[1],
3068 ((u32 *)&ramrod_data)[0], 0);
3069 if (rc == 0) {
3070 /* stats ramrod has it's own slot on the spq */
3071 bp->spq_left++;
3072 bp->stats_pending = 1;
3073 }
3074 }
3075}
3076
3077static void bnx2x_stats_init(struct bnx2x *bp)
3078{
3079 int port = BP_PORT(bp);
de832a55 3080 int i;
bb2a0f7a 3081
de832a55 3082 bp->stats_pending = 0;
bb2a0f7a
YG
3083 bp->executer_idx = 0;
3084 bp->stats_counter = 0;
3085
3086 /* port stats */
3087 if (!BP_NOMCP(bp))
3088 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
3089 else
3090 bp->port.port_stx = 0;
3091 DP(BNX2X_MSG_STATS, "port_stx 0x%x\n", bp->port.port_stx);
3092
3093 memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
3094 bp->port.old_nig_stats.brb_discard =
3095 REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
66e855f3
YG
3096 bp->port.old_nig_stats.brb_truncate =
3097 REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
bb2a0f7a
YG
3098 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
3099 &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
3100 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
3101 &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
3102
3103 /* function stats */
de832a55
EG
3104 for_each_queue(bp, i) {
3105 struct bnx2x_fastpath *fp = &bp->fp[i];
3106
3107 memset(&fp->old_tclient, 0,
3108 sizeof(struct tstorm_per_client_stats));
3109 memset(&fp->old_uclient, 0,
3110 sizeof(struct ustorm_per_client_stats));
3111 memset(&fp->old_xclient, 0,
3112 sizeof(struct xstorm_per_client_stats));
3113 memset(&fp->eth_q_stats, 0, sizeof(struct bnx2x_eth_q_stats));
3114 }
3115
bb2a0f7a 3116 memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
bb2a0f7a
YG
3117 memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
3118
3119 bp->stats_state = STATS_STATE_DISABLED;
3120 if (IS_E1HMF(bp) && bp->port.pmf && bp->port.port_stx)
3121 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
3122}
3123
3124static void bnx2x_hw_stats_post(struct bnx2x *bp)
3125{
3126 struct dmae_command *dmae = &bp->stats_dmae;
3127 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3128
3129 *stats_comp = DMAE_COMP_VAL;
de832a55
EG
3130 if (CHIP_REV_IS_SLOW(bp))
3131 return;
bb2a0f7a
YG
3132
3133 /* loader */
3134 if (bp->executer_idx) {
3135 int loader_idx = PMF_DMAE_C(bp);
3136
3137 memset(dmae, 0, sizeof(struct dmae_command));
3138
3139 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3140 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3141 DMAE_CMD_DST_RESET |
3142#ifdef __BIG_ENDIAN
3143 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3144#else
3145 DMAE_CMD_ENDIANITY_DW_SWAP |
3146#endif
3147 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3148 DMAE_CMD_PORT_0) |
3149 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3150 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3151 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3152 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3153 sizeof(struct dmae_command) *
3154 (loader_idx + 1)) >> 2;
3155 dmae->dst_addr_hi = 0;
3156 dmae->len = sizeof(struct dmae_command) >> 2;
3157 if (CHIP_IS_E1(bp))
3158 dmae->len--;
3159 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3160 dmae->comp_addr_hi = 0;
3161 dmae->comp_val = 1;
3162
3163 *stats_comp = 0;
3164 bnx2x_post_dmae(bp, dmae, loader_idx);
3165
3166 } else if (bp->func_stx) {
3167 *stats_comp = 0;
3168 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3169 }
3170}
3171
3172static int bnx2x_stats_comp(struct bnx2x *bp)
3173{
3174 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3175 int cnt = 10;
3176
3177 might_sleep();
3178 while (*stats_comp != DMAE_COMP_VAL) {
bb2a0f7a
YG
3179 if (!cnt) {
3180 BNX2X_ERR("timeout waiting for stats finished\n");
3181 break;
3182 }
3183 cnt--;
12469401 3184 msleep(1);
bb2a0f7a
YG
3185 }
3186 return 1;
3187}
3188
3189/*
3190 * Statistics service functions
3191 */
3192
3193static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3194{
3195 struct dmae_command *dmae;
3196 u32 opcode;
3197 int loader_idx = PMF_DMAE_C(bp);
3198 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3199
3200 /* sanity */
3201 if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3202 BNX2X_ERR("BUG!\n");
3203 return;
3204 }
3205
3206 bp->executer_idx = 0;
3207
3208 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3209 DMAE_CMD_C_ENABLE |
3210 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3211#ifdef __BIG_ENDIAN
3212 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3213#else
3214 DMAE_CMD_ENDIANITY_DW_SWAP |
3215#endif
3216 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3217 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3218
3219 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3220 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3221 dmae->src_addr_lo = bp->port.port_stx >> 2;
3222 dmae->src_addr_hi = 0;
3223 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3224 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3225 dmae->len = DMAE_LEN32_RD_MAX;
3226 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3227 dmae->comp_addr_hi = 0;
3228 dmae->comp_val = 1;
3229
3230 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3231 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3232 dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3233 dmae->src_addr_hi = 0;
7a9b2557
VZ
3234 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3235 DMAE_LEN32_RD_MAX * 4);
3236 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3237 DMAE_LEN32_RD_MAX * 4);
bb2a0f7a
YG
3238 dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3239 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3240 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3241 dmae->comp_val = DMAE_COMP_VAL;
3242
3243 *stats_comp = 0;
3244 bnx2x_hw_stats_post(bp);
3245 bnx2x_stats_comp(bp);
3246}
3247
3248static void bnx2x_port_stats_init(struct bnx2x *bp)
a2fbb9ea
ET
3249{
3250 struct dmae_command *dmae;
34f80b04 3251 int port = BP_PORT(bp);
bb2a0f7a 3252 int vn = BP_E1HVN(bp);
a2fbb9ea 3253 u32 opcode;
bb2a0f7a 3254 int loader_idx = PMF_DMAE_C(bp);
a2fbb9ea 3255 u32 mac_addr;
bb2a0f7a
YG
3256 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3257
3258 /* sanity */
3259 if (!bp->link_vars.link_up || !bp->port.pmf) {
3260 BNX2X_ERR("BUG!\n");
3261 return;
3262 }
a2fbb9ea
ET
3263
3264 bp->executer_idx = 0;
bb2a0f7a
YG
3265
3266 /* MCP */
3267 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3268 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3269 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 3270#ifdef __BIG_ENDIAN
bb2a0f7a 3271 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 3272#else
bb2a0f7a 3273 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 3274#endif
bb2a0f7a
YG
3275 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3276 (vn << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea 3277
bb2a0f7a 3278 if (bp->port.port_stx) {
a2fbb9ea
ET
3279
3280 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3281 dmae->opcode = opcode;
bb2a0f7a
YG
3282 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3283 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3284 dmae->dst_addr_lo = bp->port.port_stx >> 2;
a2fbb9ea 3285 dmae->dst_addr_hi = 0;
bb2a0f7a
YG
3286 dmae->len = sizeof(struct host_port_stats) >> 2;
3287 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3288 dmae->comp_addr_hi = 0;
3289 dmae->comp_val = 1;
a2fbb9ea
ET
3290 }
3291
bb2a0f7a
YG
3292 if (bp->func_stx) {
3293
3294 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3295 dmae->opcode = opcode;
3296 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3297 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3298 dmae->dst_addr_lo = bp->func_stx >> 2;
3299 dmae->dst_addr_hi = 0;
3300 dmae->len = sizeof(struct host_func_stats) >> 2;
3301 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3302 dmae->comp_addr_hi = 0;
3303 dmae->comp_val = 1;
a2fbb9ea
ET
3304 }
3305
bb2a0f7a 3306 /* MAC */
a2fbb9ea
ET
3307 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3308 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3309 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3310#ifdef __BIG_ENDIAN
3311 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3312#else
3313 DMAE_CMD_ENDIANITY_DW_SWAP |
3314#endif
bb2a0f7a
YG
3315 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3316 (vn << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea 3317
c18487ee 3318 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
a2fbb9ea
ET
3319
3320 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3321 NIG_REG_INGRESS_BMAC0_MEM);
3322
3323 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3324 BIGMAC_REGISTER_TX_STAT_GTBYT */
3325 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3326 dmae->opcode = opcode;
3327 dmae->src_addr_lo = (mac_addr +
3328 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3329 dmae->src_addr_hi = 0;
3330 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3331 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3332 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3333 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3334 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3335 dmae->comp_addr_hi = 0;
3336 dmae->comp_val = 1;
3337
3338 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3339 BIGMAC_REGISTER_RX_STAT_GRIPJ */
3340 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3341 dmae->opcode = opcode;
3342 dmae->src_addr_lo = (mac_addr +
3343 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3344 dmae->src_addr_hi = 0;
3345 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3346 offsetof(struct bmac_stats, rx_stat_gr64_lo));
a2fbb9ea 3347 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3348 offsetof(struct bmac_stats, rx_stat_gr64_lo));
a2fbb9ea
ET
3349 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3350 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3351 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3352 dmae->comp_addr_hi = 0;
3353 dmae->comp_val = 1;
3354
c18487ee 3355 } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
a2fbb9ea
ET
3356
3357 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3358
3359 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3360 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3361 dmae->opcode = opcode;
3362 dmae->src_addr_lo = (mac_addr +
3363 EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3364 dmae->src_addr_hi = 0;
3365 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3366 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3367 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3368 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3369 dmae->comp_addr_hi = 0;
3370 dmae->comp_val = 1;
3371
3372 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3373 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3374 dmae->opcode = opcode;
3375 dmae->src_addr_lo = (mac_addr +
3376 EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3377 dmae->src_addr_hi = 0;
3378 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3379 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
a2fbb9ea 3380 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3381 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
a2fbb9ea
ET
3382 dmae->len = 1;
3383 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3384 dmae->comp_addr_hi = 0;
3385 dmae->comp_val = 1;
3386
3387 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3388 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3389 dmae->opcode = opcode;
3390 dmae->src_addr_lo = (mac_addr +
3391 EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3392 dmae->src_addr_hi = 0;
3393 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3394 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
a2fbb9ea 3395 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3396 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
a2fbb9ea
ET
3397 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3398 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3399 dmae->comp_addr_hi = 0;
3400 dmae->comp_val = 1;
3401 }
3402
3403 /* NIG */
bb2a0f7a
YG
3404 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3405 dmae->opcode = opcode;
3406 dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3407 NIG_REG_STAT0_BRB_DISCARD) >> 2;
3408 dmae->src_addr_hi = 0;
3409 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3410 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3411 dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3412 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3413 dmae->comp_addr_hi = 0;
3414 dmae->comp_val = 1;
3415
3416 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3417 dmae->opcode = opcode;
3418 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3419 NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3420 dmae->src_addr_hi = 0;
3421 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3422 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3423 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3424 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3425 dmae->len = (2*sizeof(u32)) >> 2;
3426 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3427 dmae->comp_addr_hi = 0;
3428 dmae->comp_val = 1;
3429
a2fbb9ea
ET
3430 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3431 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3432 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3433 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3434#ifdef __BIG_ENDIAN
3435 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3436#else
3437 DMAE_CMD_ENDIANITY_DW_SWAP |
3438#endif
bb2a0f7a
YG
3439 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3440 (vn << DMAE_CMD_E1HVN_SHIFT));
3441 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3442 NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
a2fbb9ea 3443 dmae->src_addr_hi = 0;
bb2a0f7a
YG
3444 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3445 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3446 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3447 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3448 dmae->len = (2*sizeof(u32)) >> 2;
3449 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3450 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3451 dmae->comp_val = DMAE_COMP_VAL;
3452
3453 *stats_comp = 0;
a2fbb9ea
ET
3454}
3455
bb2a0f7a 3456static void bnx2x_func_stats_init(struct bnx2x *bp)
a2fbb9ea 3457{
bb2a0f7a
YG
3458 struct dmae_command *dmae = &bp->stats_dmae;
3459 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 3460
bb2a0f7a
YG
3461 /* sanity */
3462 if (!bp->func_stx) {
3463 BNX2X_ERR("BUG!\n");
3464 return;
3465 }
a2fbb9ea 3466
bb2a0f7a
YG
3467 bp->executer_idx = 0;
3468 memset(dmae, 0, sizeof(struct dmae_command));
a2fbb9ea 3469
bb2a0f7a
YG
3470 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3471 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3472 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3473#ifdef __BIG_ENDIAN
3474 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3475#else
3476 DMAE_CMD_ENDIANITY_DW_SWAP |
3477#endif
3478 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3479 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3480 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3481 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3482 dmae->dst_addr_lo = bp->func_stx >> 2;
3483 dmae->dst_addr_hi = 0;
3484 dmae->len = sizeof(struct host_func_stats) >> 2;
3485 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3486 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3487 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 3488
bb2a0f7a
YG
3489 *stats_comp = 0;
3490}
a2fbb9ea 3491
bb2a0f7a
YG
3492static void bnx2x_stats_start(struct bnx2x *bp)
3493{
3494 if (bp->port.pmf)
3495 bnx2x_port_stats_init(bp);
3496
3497 else if (bp->func_stx)
3498 bnx2x_func_stats_init(bp);
3499
3500 bnx2x_hw_stats_post(bp);
3501 bnx2x_storm_stats_post(bp);
3502}
3503
3504static void bnx2x_stats_pmf_start(struct bnx2x *bp)
3505{
3506 bnx2x_stats_comp(bp);
3507 bnx2x_stats_pmf_update(bp);
3508 bnx2x_stats_start(bp);
3509}
3510
3511static void bnx2x_stats_restart(struct bnx2x *bp)
3512{
3513 bnx2x_stats_comp(bp);
3514 bnx2x_stats_start(bp);
3515}
3516
3517static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3518{
3519 struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3520 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
de832a55 3521 struct bnx2x_eth_stats *estats = &bp->eth_stats;
bb2a0f7a
YG
3522 struct regpair diff;
3523
3524 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3525 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3526 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3527 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3528 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3529 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
66e855f3 3530 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
bb2a0f7a 3531 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
de832a55 3532 UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf);
bb2a0f7a
YG
3533 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3534 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3535 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3536 UPDATE_STAT64(tx_stat_gt127,
3537 tx_stat_etherstatspkts65octetsto127octets);
3538 UPDATE_STAT64(tx_stat_gt255,
3539 tx_stat_etherstatspkts128octetsto255octets);
3540 UPDATE_STAT64(tx_stat_gt511,
3541 tx_stat_etherstatspkts256octetsto511octets);
3542 UPDATE_STAT64(tx_stat_gt1023,
3543 tx_stat_etherstatspkts512octetsto1023octets);
3544 UPDATE_STAT64(tx_stat_gt1518,
3545 tx_stat_etherstatspkts1024octetsto1522octets);
3546 UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3547 UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3548 UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3549 UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3550 UPDATE_STAT64(tx_stat_gterr,
3551 tx_stat_dot3statsinternalmactransmiterrors);
3552 UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
de832a55
EG
3553
3554 estats->pause_frames_received_hi =
3555 pstats->mac_stx[1].rx_stat_bmac_xpf_hi;
3556 estats->pause_frames_received_lo =
3557 pstats->mac_stx[1].rx_stat_bmac_xpf_lo;
3558
3559 estats->pause_frames_sent_hi =
3560 pstats->mac_stx[1].tx_stat_outxoffsent_hi;
3561 estats->pause_frames_sent_lo =
3562 pstats->mac_stx[1].tx_stat_outxoffsent_lo;
bb2a0f7a
YG
3563}
3564
3565static void bnx2x_emac_stats_update(struct bnx2x *bp)
3566{
3567 struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3568 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
de832a55 3569 struct bnx2x_eth_stats *estats = &bp->eth_stats;
bb2a0f7a
YG
3570
3571 UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3572 UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3573 UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3574 UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3575 UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3576 UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3577 UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3578 UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3579 UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3580 UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3581 UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3582 UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3583 UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3584 UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3585 UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3586 UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3587 UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3588 UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3589 UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3590 UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3591 UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3592 UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3593 UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3594 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3595 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3596 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3597 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3598 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3599 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3600 UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3601 UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
de832a55
EG
3602
3603 estats->pause_frames_received_hi =
3604 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
3605 estats->pause_frames_received_lo =
3606 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
3607 ADD_64(estats->pause_frames_received_hi,
3608 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
3609 estats->pause_frames_received_lo,
3610 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
3611
3612 estats->pause_frames_sent_hi =
3613 pstats->mac_stx[1].tx_stat_outxonsent_hi;
3614 estats->pause_frames_sent_lo =
3615 pstats->mac_stx[1].tx_stat_outxonsent_lo;
3616 ADD_64(estats->pause_frames_sent_hi,
3617 pstats->mac_stx[1].tx_stat_outxoffsent_hi,
3618 estats->pause_frames_sent_lo,
3619 pstats->mac_stx[1].tx_stat_outxoffsent_lo);
bb2a0f7a
YG
3620}
3621
3622static int bnx2x_hw_stats_update(struct bnx2x *bp)
3623{
3624 struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3625 struct nig_stats *old = &(bp->port.old_nig_stats);
3626 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3627 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3628 struct regpair diff;
de832a55 3629 u32 nig_timer_max;
bb2a0f7a
YG
3630
3631 if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3632 bnx2x_bmac_stats_update(bp);
3633
3634 else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3635 bnx2x_emac_stats_update(bp);
3636
3637 else { /* unreached */
3638 BNX2X_ERR("stats updated by dmae but no MAC active\n");
3639 return -1;
3640 }
a2fbb9ea 3641
bb2a0f7a
YG
3642 ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3643 new->brb_discard - old->brb_discard);
66e855f3
YG
3644 ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3645 new->brb_truncate - old->brb_truncate);
a2fbb9ea 3646
bb2a0f7a
YG
3647 UPDATE_STAT64_NIG(egress_mac_pkt0,
3648 etherstatspkts1024octetsto1522octets);
3649 UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
a2fbb9ea 3650
bb2a0f7a 3651 memcpy(old, new, sizeof(struct nig_stats));
a2fbb9ea 3652
bb2a0f7a
YG
3653 memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3654 sizeof(struct mac_stx));
3655 estats->brb_drop_hi = pstats->brb_drop_hi;
3656 estats->brb_drop_lo = pstats->brb_drop_lo;
a2fbb9ea 3657
bb2a0f7a 3658 pstats->host_port_stats_start = ++pstats->host_port_stats_end;
a2fbb9ea 3659
de832a55
EG
3660 nig_timer_max = SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
3661 if (nig_timer_max != estats->nig_timer_max) {
3662 estats->nig_timer_max = nig_timer_max;
3663 BNX2X_ERR("NIG timer max (%u)\n", estats->nig_timer_max);
3664 }
3665
bb2a0f7a 3666 return 0;
a2fbb9ea
ET
3667}
3668
bb2a0f7a 3669static int bnx2x_storm_stats_update(struct bnx2x *bp)
a2fbb9ea
ET
3670{
3671 struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
bb2a0f7a 3672 struct tstorm_per_port_stats *tport =
de832a55 3673 &stats->tstorm_common.port_statistics;
bb2a0f7a
YG
3674 struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
3675 struct bnx2x_eth_stats *estats = &bp->eth_stats;
de832a55
EG
3676 int i;
3677
3678 memset(&(fstats->total_bytes_received_hi), 0,
3679 sizeof(struct host_func_stats) - 2*sizeof(u32));
3680 estats->error_bytes_received_hi = 0;
3681 estats->error_bytes_received_lo = 0;
3682 estats->etherstatsoverrsizepkts_hi = 0;
3683 estats->etherstatsoverrsizepkts_lo = 0;
3684 estats->no_buff_discard_hi = 0;
3685 estats->no_buff_discard_lo = 0;
a2fbb9ea 3686
de832a55
EG
3687 for_each_queue(bp, i) {
3688 struct bnx2x_fastpath *fp = &bp->fp[i];
3689 int cl_id = fp->cl_id;
3690 struct tstorm_per_client_stats *tclient =
3691 &stats->tstorm_common.client_statistics[cl_id];
3692 struct tstorm_per_client_stats *old_tclient = &fp->old_tclient;
3693 struct ustorm_per_client_stats *uclient =
3694 &stats->ustorm_common.client_statistics[cl_id];
3695 struct ustorm_per_client_stats *old_uclient = &fp->old_uclient;
3696 struct xstorm_per_client_stats *xclient =
3697 &stats->xstorm_common.client_statistics[cl_id];
3698 struct xstorm_per_client_stats *old_xclient = &fp->old_xclient;
3699 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
3700 u32 diff;
3701
3702 /* are storm stats valid? */
3703 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
bb2a0f7a 3704 bp->stats_counter) {
de832a55
EG
3705 DP(BNX2X_MSG_STATS, "[%d] stats not updated by xstorm"
3706 " xstorm counter (%d) != stats_counter (%d)\n",
3707 i, xclient->stats_counter, bp->stats_counter);
3708 return -1;
3709 }
3710 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
bb2a0f7a 3711 bp->stats_counter) {
de832a55
EG
3712 DP(BNX2X_MSG_STATS, "[%d] stats not updated by tstorm"
3713 " tstorm counter (%d) != stats_counter (%d)\n",
3714 i, tclient->stats_counter, bp->stats_counter);
3715 return -2;
3716 }
3717 if ((u16)(le16_to_cpu(uclient->stats_counter) + 1) !=
3718 bp->stats_counter) {
3719 DP(BNX2X_MSG_STATS, "[%d] stats not updated by ustorm"
3720 " ustorm counter (%d) != stats_counter (%d)\n",
3721 i, uclient->stats_counter, bp->stats_counter);
3722 return -4;
3723 }
a2fbb9ea 3724
de832a55
EG
3725 qstats->total_bytes_received_hi =
3726 qstats->valid_bytes_received_hi =
a2fbb9ea 3727 le32_to_cpu(tclient->total_rcv_bytes.hi);
de832a55
EG
3728 qstats->total_bytes_received_lo =
3729 qstats->valid_bytes_received_lo =
a2fbb9ea 3730 le32_to_cpu(tclient->total_rcv_bytes.lo);
bb2a0f7a 3731
de832a55 3732 qstats->error_bytes_received_hi =
bb2a0f7a 3733 le32_to_cpu(tclient->rcv_error_bytes.hi);
de832a55 3734 qstats->error_bytes_received_lo =
bb2a0f7a 3735 le32_to_cpu(tclient->rcv_error_bytes.lo);
bb2a0f7a 3736
de832a55
EG
3737 ADD_64(qstats->total_bytes_received_hi,
3738 qstats->error_bytes_received_hi,
3739 qstats->total_bytes_received_lo,
3740 qstats->error_bytes_received_lo);
3741
3742 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts,
3743 total_unicast_packets_received);
3744 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
3745 total_multicast_packets_received);
3746 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
3747 total_broadcast_packets_received);
3748 UPDATE_EXTEND_TSTAT(packets_too_big_discard,
3749 etherstatsoverrsizepkts);
3750 UPDATE_EXTEND_TSTAT(no_buff_discard, no_buff_discard);
3751
3752 SUB_EXTEND_USTAT(ucast_no_buff_pkts,
3753 total_unicast_packets_received);
3754 SUB_EXTEND_USTAT(mcast_no_buff_pkts,
3755 total_multicast_packets_received);
3756 SUB_EXTEND_USTAT(bcast_no_buff_pkts,
3757 total_broadcast_packets_received);
3758 UPDATE_EXTEND_USTAT(ucast_no_buff_pkts, no_buff_discard);
3759 UPDATE_EXTEND_USTAT(mcast_no_buff_pkts, no_buff_discard);
3760 UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard);
3761
3762 qstats->total_bytes_transmitted_hi =
bb2a0f7a 3763 le32_to_cpu(xclient->total_sent_bytes.hi);
de832a55 3764 qstats->total_bytes_transmitted_lo =
bb2a0f7a
YG
3765 le32_to_cpu(xclient->total_sent_bytes.lo);
3766
de832a55
EG
3767 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
3768 total_unicast_packets_transmitted);
3769 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
3770 total_multicast_packets_transmitted);
3771 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
3772 total_broadcast_packets_transmitted);
3773
3774 old_tclient->checksum_discard = tclient->checksum_discard;
3775 old_tclient->ttl0_discard = tclient->ttl0_discard;
3776
3777 ADD_64(fstats->total_bytes_received_hi,
3778 qstats->total_bytes_received_hi,
3779 fstats->total_bytes_received_lo,
3780 qstats->total_bytes_received_lo);
3781 ADD_64(fstats->total_bytes_transmitted_hi,
3782 qstats->total_bytes_transmitted_hi,
3783 fstats->total_bytes_transmitted_lo,
3784 qstats->total_bytes_transmitted_lo);
3785 ADD_64(fstats->total_unicast_packets_received_hi,
3786 qstats->total_unicast_packets_received_hi,
3787 fstats->total_unicast_packets_received_lo,
3788 qstats->total_unicast_packets_received_lo);
3789 ADD_64(fstats->total_multicast_packets_received_hi,
3790 qstats->total_multicast_packets_received_hi,
3791 fstats->total_multicast_packets_received_lo,
3792 qstats->total_multicast_packets_received_lo);
3793 ADD_64(fstats->total_broadcast_packets_received_hi,
3794 qstats->total_broadcast_packets_received_hi,
3795 fstats->total_broadcast_packets_received_lo,
3796 qstats->total_broadcast_packets_received_lo);
3797 ADD_64(fstats->total_unicast_packets_transmitted_hi,
3798 qstats->total_unicast_packets_transmitted_hi,
3799 fstats->total_unicast_packets_transmitted_lo,
3800 qstats->total_unicast_packets_transmitted_lo);
3801 ADD_64(fstats->total_multicast_packets_transmitted_hi,
3802 qstats->total_multicast_packets_transmitted_hi,
3803 fstats->total_multicast_packets_transmitted_lo,
3804 qstats->total_multicast_packets_transmitted_lo);
3805 ADD_64(fstats->total_broadcast_packets_transmitted_hi,
3806 qstats->total_broadcast_packets_transmitted_hi,
3807 fstats->total_broadcast_packets_transmitted_lo,
3808 qstats->total_broadcast_packets_transmitted_lo);
3809 ADD_64(fstats->valid_bytes_received_hi,
3810 qstats->valid_bytes_received_hi,
3811 fstats->valid_bytes_received_lo,
3812 qstats->valid_bytes_received_lo);
3813
3814 ADD_64(estats->error_bytes_received_hi,
3815 qstats->error_bytes_received_hi,
3816 estats->error_bytes_received_lo,
3817 qstats->error_bytes_received_lo);
3818 ADD_64(estats->etherstatsoverrsizepkts_hi,
3819 qstats->etherstatsoverrsizepkts_hi,
3820 estats->etherstatsoverrsizepkts_lo,
3821 qstats->etherstatsoverrsizepkts_lo);
3822 ADD_64(estats->no_buff_discard_hi, qstats->no_buff_discard_hi,
3823 estats->no_buff_discard_lo, qstats->no_buff_discard_lo);
3824 }
3825
3826 ADD_64(fstats->total_bytes_received_hi,
3827 estats->rx_stat_ifhcinbadoctets_hi,
3828 fstats->total_bytes_received_lo,
3829 estats->rx_stat_ifhcinbadoctets_lo);
bb2a0f7a
YG
3830
3831 memcpy(estats, &(fstats->total_bytes_received_hi),
3832 sizeof(struct host_func_stats) - 2*sizeof(u32));
3833
de832a55
EG
3834 ADD_64(estats->etherstatsoverrsizepkts_hi,
3835 estats->rx_stat_dot3statsframestoolong_hi,
3836 estats->etherstatsoverrsizepkts_lo,
3837 estats->rx_stat_dot3statsframestoolong_lo);
3838 ADD_64(estats->error_bytes_received_hi,
3839 estats->rx_stat_ifhcinbadoctets_hi,
3840 estats->error_bytes_received_lo,
3841 estats->rx_stat_ifhcinbadoctets_lo);
3842
3843 if (bp->port.pmf) {
3844 estats->mac_filter_discard =
3845 le32_to_cpu(tport->mac_filter_discard);
3846 estats->xxoverflow_discard =
3847 le32_to_cpu(tport->xxoverflow_discard);
3848 estats->brb_truncate_discard =
bb2a0f7a 3849 le32_to_cpu(tport->brb_truncate_discard);
de832a55
EG
3850 estats->mac_discard = le32_to_cpu(tport->mac_discard);
3851 }
bb2a0f7a
YG
3852
3853 fstats->host_func_stats_start = ++fstats->host_func_stats_end;
a2fbb9ea 3854
de832a55
EG
3855 bp->stats_pending = 0;
3856
a2fbb9ea
ET
3857 return 0;
3858}
3859
bb2a0f7a 3860static void bnx2x_net_stats_update(struct bnx2x *bp)
a2fbb9ea 3861{
bb2a0f7a 3862 struct bnx2x_eth_stats *estats = &bp->eth_stats;
a2fbb9ea 3863 struct net_device_stats *nstats = &bp->dev->stats;
de832a55 3864 int i;
a2fbb9ea
ET
3865
3866 nstats->rx_packets =
3867 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
3868 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
3869 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
3870
3871 nstats->tx_packets =
3872 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
3873 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
3874 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
3875
de832a55 3876 nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
a2fbb9ea 3877
0e39e645 3878 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
a2fbb9ea 3879
de832a55
EG
3880 nstats->rx_dropped = estats->mac_discard;
3881 for_each_queue(bp, i)
3882 nstats->rx_dropped +=
3883 le32_to_cpu(bp->fp[i].old_tclient.checksum_discard);
3884
a2fbb9ea
ET
3885 nstats->tx_dropped = 0;
3886
3887 nstats->multicast =
de832a55 3888 bnx2x_hilo(&estats->total_multicast_packets_received_hi);
a2fbb9ea 3889
bb2a0f7a 3890 nstats->collisions =
de832a55 3891 bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi);
bb2a0f7a
YG
3892
3893 nstats->rx_length_errors =
de832a55
EG
3894 bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) +
3895 bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi);
3896 nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) +
3897 bnx2x_hilo(&estats->brb_truncate_hi);
3898 nstats->rx_crc_errors =
3899 bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi);
3900 nstats->rx_frame_errors =
3901 bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi);
3902 nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi);
a2fbb9ea
ET
3903 nstats->rx_missed_errors = estats->xxoverflow_discard;
3904
3905 nstats->rx_errors = nstats->rx_length_errors +
3906 nstats->rx_over_errors +
3907 nstats->rx_crc_errors +
3908 nstats->rx_frame_errors +
0e39e645
ET
3909 nstats->rx_fifo_errors +
3910 nstats->rx_missed_errors;
a2fbb9ea 3911
bb2a0f7a 3912 nstats->tx_aborted_errors =
de832a55
EG
3913 bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) +
3914 bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi);
3915 nstats->tx_carrier_errors =
3916 bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi);
a2fbb9ea
ET
3917 nstats->tx_fifo_errors = 0;
3918 nstats->tx_heartbeat_errors = 0;
3919 nstats->tx_window_errors = 0;
3920
3921 nstats->tx_errors = nstats->tx_aborted_errors +
de832a55
EG
3922 nstats->tx_carrier_errors +
3923 bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi);
3924}
3925
3926static void bnx2x_drv_stats_update(struct bnx2x *bp)
3927{
3928 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3929 int i;
3930
3931 estats->driver_xoff = 0;
3932 estats->rx_err_discard_pkt = 0;
3933 estats->rx_skb_alloc_failed = 0;
3934 estats->hw_csum_err = 0;
3935 for_each_queue(bp, i) {
3936 struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats;
3937
3938 estats->driver_xoff += qstats->driver_xoff;
3939 estats->rx_err_discard_pkt += qstats->rx_err_discard_pkt;
3940 estats->rx_skb_alloc_failed += qstats->rx_skb_alloc_failed;
3941 estats->hw_csum_err += qstats->hw_csum_err;
3942 }
a2fbb9ea
ET
3943}
3944
bb2a0f7a 3945static void bnx2x_stats_update(struct bnx2x *bp)
a2fbb9ea 3946{
bb2a0f7a 3947 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 3948
bb2a0f7a
YG
3949 if (*stats_comp != DMAE_COMP_VAL)
3950 return;
3951
3952 if (bp->port.pmf)
de832a55 3953 bnx2x_hw_stats_update(bp);
a2fbb9ea 3954
de832a55
EG
3955 if (bnx2x_storm_stats_update(bp) && (bp->stats_pending++ == 3)) {
3956 BNX2X_ERR("storm stats were not updated for 3 times\n");
3957 bnx2x_panic();
3958 return;
a2fbb9ea
ET
3959 }
3960
de832a55
EG
3961 bnx2x_net_stats_update(bp);
3962 bnx2x_drv_stats_update(bp);
3963
a2fbb9ea 3964 if (bp->msglevel & NETIF_MSG_TIMER) {
de832a55
EG
3965 struct tstorm_per_client_stats *old_tclient =
3966 &bp->fp->old_tclient;
3967 struct bnx2x_eth_q_stats *qstats = &bp->fp->eth_q_stats;
bb2a0f7a 3968 struct bnx2x_eth_stats *estats = &bp->eth_stats;
a2fbb9ea 3969 struct net_device_stats *nstats = &bp->dev->stats;
34f80b04 3970 int i;
a2fbb9ea
ET
3971
3972 printk(KERN_DEBUG "%s:\n", bp->dev->name);
3973 printk(KERN_DEBUG " tx avail (%4x) tx hc idx (%x)"
3974 " tx pkt (%lx)\n",
3975 bnx2x_tx_avail(bp->fp),
7a9b2557 3976 le16_to_cpu(*bp->fp->tx_cons_sb), nstats->tx_packets);
a2fbb9ea
ET
3977 printk(KERN_DEBUG " rx usage (%4x) rx hc idx (%x)"
3978 " rx pkt (%lx)\n",
7a9b2557
VZ
3979 (u16)(le16_to_cpu(*bp->fp->rx_cons_sb) -
3980 bp->fp->rx_comp_cons),
3981 le16_to_cpu(*bp->fp->rx_cons_sb), nstats->rx_packets);
de832a55
EG
3982 printk(KERN_DEBUG " %s (Xoff events %u) brb drops %u "
3983 "brb truncate %u\n",
3984 (netif_queue_stopped(bp->dev) ? "Xoff" : "Xon"),
3985 qstats->driver_xoff,
3986 estats->brb_drop_lo, estats->brb_truncate_lo);
a2fbb9ea 3987 printk(KERN_DEBUG "tstats: checksum_discard %u "
de832a55 3988 "packets_too_big_discard %lu no_buff_discard %lu "
a2fbb9ea
ET
3989 "mac_discard %u mac_filter_discard %u "
3990 "xxovrflow_discard %u brb_truncate_discard %u "
3991 "ttl0_discard %u\n",
bb2a0f7a 3992 old_tclient->checksum_discard,
de832a55
EG
3993 bnx2x_hilo(&qstats->etherstatsoverrsizepkts_hi),
3994 bnx2x_hilo(&qstats->no_buff_discard_hi),
3995 estats->mac_discard, estats->mac_filter_discard,
3996 estats->xxoverflow_discard, estats->brb_truncate_discard,
bb2a0f7a 3997 old_tclient->ttl0_discard);
a2fbb9ea
ET
3998
3999 for_each_queue(bp, i) {
4000 printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
4001 bnx2x_fp(bp, i, tx_pkt),
4002 bnx2x_fp(bp, i, rx_pkt),
4003 bnx2x_fp(bp, i, rx_calls));
4004 }
4005 }
4006
bb2a0f7a
YG
4007 bnx2x_hw_stats_post(bp);
4008 bnx2x_storm_stats_post(bp);
4009}
a2fbb9ea 4010
bb2a0f7a
YG
4011static void bnx2x_port_stats_stop(struct bnx2x *bp)
4012{
4013 struct dmae_command *dmae;
4014 u32 opcode;
4015 int loader_idx = PMF_DMAE_C(bp);
4016 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 4017
bb2a0f7a 4018 bp->executer_idx = 0;
a2fbb9ea 4019
bb2a0f7a
YG
4020 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4021 DMAE_CMD_C_ENABLE |
4022 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 4023#ifdef __BIG_ENDIAN
bb2a0f7a 4024 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 4025#else
bb2a0f7a 4026 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 4027#endif
bb2a0f7a
YG
4028 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4029 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4030
4031 if (bp->port.port_stx) {
4032
4033 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4034 if (bp->func_stx)
4035 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
4036 else
4037 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4038 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4039 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4040 dmae->dst_addr_lo = bp->port.port_stx >> 2;
a2fbb9ea 4041 dmae->dst_addr_hi = 0;
bb2a0f7a
YG
4042 dmae->len = sizeof(struct host_port_stats) >> 2;
4043 if (bp->func_stx) {
4044 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4045 dmae->comp_addr_hi = 0;
4046 dmae->comp_val = 1;
4047 } else {
4048 dmae->comp_addr_lo =
4049 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4050 dmae->comp_addr_hi =
4051 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4052 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 4053
bb2a0f7a
YG
4054 *stats_comp = 0;
4055 }
a2fbb9ea
ET
4056 }
4057
bb2a0f7a
YG
4058 if (bp->func_stx) {
4059
4060 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4061 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4062 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
4063 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
4064 dmae->dst_addr_lo = bp->func_stx >> 2;
4065 dmae->dst_addr_hi = 0;
4066 dmae->len = sizeof(struct host_func_stats) >> 2;
4067 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4068 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4069 dmae->comp_val = DMAE_COMP_VAL;
4070
4071 *stats_comp = 0;
a2fbb9ea 4072 }
bb2a0f7a
YG
4073}
4074
4075static void bnx2x_stats_stop(struct bnx2x *bp)
4076{
4077 int update = 0;
4078
4079 bnx2x_stats_comp(bp);
4080
4081 if (bp->port.pmf)
4082 update = (bnx2x_hw_stats_update(bp) == 0);
4083
4084 update |= (bnx2x_storm_stats_update(bp) == 0);
4085
4086 if (update) {
4087 bnx2x_net_stats_update(bp);
a2fbb9ea 4088
bb2a0f7a
YG
4089 if (bp->port.pmf)
4090 bnx2x_port_stats_stop(bp);
4091
4092 bnx2x_hw_stats_post(bp);
4093 bnx2x_stats_comp(bp);
a2fbb9ea
ET
4094 }
4095}
4096
bb2a0f7a
YG
4097static void bnx2x_stats_do_nothing(struct bnx2x *bp)
4098{
4099}
4100
4101static const struct {
4102 void (*action)(struct bnx2x *bp);
4103 enum bnx2x_stats_state next_state;
4104} bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
4105/* state event */
4106{
4107/* DISABLED PMF */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
4108/* LINK_UP */ {bnx2x_stats_start, STATS_STATE_ENABLED},
4109/* UPDATE */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
4110/* STOP */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
4111},
4112{
4113/* ENABLED PMF */ {bnx2x_stats_pmf_start, STATS_STATE_ENABLED},
4114/* LINK_UP */ {bnx2x_stats_restart, STATS_STATE_ENABLED},
4115/* UPDATE */ {bnx2x_stats_update, STATS_STATE_ENABLED},
4116/* STOP */ {bnx2x_stats_stop, STATS_STATE_DISABLED}
4117}
4118};
4119
4120static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
4121{
4122 enum bnx2x_stats_state state = bp->stats_state;
4123
4124 bnx2x_stats_stm[state][event].action(bp);
4125 bp->stats_state = bnx2x_stats_stm[state][event].next_state;
4126
4127 if ((event != STATS_EVENT_UPDATE) || (bp->msglevel & NETIF_MSG_TIMER))
4128 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
4129 state, event, bp->stats_state);
4130}
4131
a2fbb9ea
ET
4132static void bnx2x_timer(unsigned long data)
4133{
4134 struct bnx2x *bp = (struct bnx2x *) data;
4135
4136 if (!netif_running(bp->dev))
4137 return;
4138
4139 if (atomic_read(&bp->intr_sem) != 0)
f1410647 4140 goto timer_restart;
a2fbb9ea
ET
4141
4142 if (poll) {
4143 struct bnx2x_fastpath *fp = &bp->fp[0];
4144 int rc;
4145
4146 bnx2x_tx_int(fp, 1000);
4147 rc = bnx2x_rx_int(fp, 1000);
4148 }
4149
34f80b04
EG
4150 if (!BP_NOMCP(bp)) {
4151 int func = BP_FUNC(bp);
a2fbb9ea
ET
4152 u32 drv_pulse;
4153 u32 mcp_pulse;
4154
4155 ++bp->fw_drv_pulse_wr_seq;
4156 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
4157 /* TBD - add SYSTEM_TIME */
4158 drv_pulse = bp->fw_drv_pulse_wr_seq;
34f80b04 4159 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
a2fbb9ea 4160
34f80b04 4161 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
a2fbb9ea
ET
4162 MCP_PULSE_SEQ_MASK);
4163 /* The delta between driver pulse and mcp response
4164 * should be 1 (before mcp response) or 0 (after mcp response)
4165 */
4166 if ((drv_pulse != mcp_pulse) &&
4167 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
4168 /* someone lost a heartbeat... */
4169 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
4170 drv_pulse, mcp_pulse);
4171 }
4172 }
4173
bb2a0f7a
YG
4174 if ((bp->state == BNX2X_STATE_OPEN) ||
4175 (bp->state == BNX2X_STATE_DISABLED))
4176 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
a2fbb9ea 4177
f1410647 4178timer_restart:
a2fbb9ea
ET
4179 mod_timer(&bp->timer, jiffies + bp->current_interval);
4180}
4181
4182/* end of Statistics */
4183
4184/* nic init */
4185
4186/*
4187 * nic init service functions
4188 */
4189
34f80b04 4190static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
a2fbb9ea 4191{
34f80b04
EG
4192 int port = BP_PORT(bp);
4193
4194 bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
4195 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
35302989 4196 sizeof(struct ustorm_status_block)/4);
34f80b04
EG
4197 bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
4198 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
35302989 4199 sizeof(struct cstorm_status_block)/4);
34f80b04
EG
4200}
4201
5c862848
EG
4202static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
4203 dma_addr_t mapping, int sb_id)
34f80b04
EG
4204{
4205 int port = BP_PORT(bp);
bb2a0f7a 4206 int func = BP_FUNC(bp);
a2fbb9ea 4207 int index;
34f80b04 4208 u64 section;
a2fbb9ea
ET
4209
4210 /* USTORM */
4211 section = ((u64)mapping) + offsetof(struct host_status_block,
4212 u_status_block);
34f80b04 4213 sb->u_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4214
4215 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4216 USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
a2fbb9ea 4217 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4218 ((USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
a2fbb9ea 4219 U64_HI(section));
bb2a0f7a
YG
4220 REG_WR8(bp, BAR_USTRORM_INTMEM + FP_USB_FUNC_OFF +
4221 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
a2fbb9ea
ET
4222
4223 for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
4224 REG_WR16(bp, BAR_USTRORM_INTMEM +
34f80b04 4225 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
a2fbb9ea
ET
4226
4227 /* CSTORM */
4228 section = ((u64)mapping) + offsetof(struct host_status_block,
4229 c_status_block);
34f80b04 4230 sb->c_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4231
4232 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4233 CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
a2fbb9ea 4234 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4235 ((CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
a2fbb9ea 4236 U64_HI(section));
7a9b2557
VZ
4237 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
4238 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
a2fbb9ea
ET
4239
4240 for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4241 REG_WR16(bp, BAR_CSTRORM_INTMEM +
34f80b04
EG
4242 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4243
4244 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4245}
4246
4247static void bnx2x_zero_def_sb(struct bnx2x *bp)
4248{
4249 int func = BP_FUNC(bp);
a2fbb9ea 4250
34f80b04
EG
4251 bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
4252 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4253 sizeof(struct ustorm_def_status_block)/4);
4254 bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
4255 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4256 sizeof(struct cstorm_def_status_block)/4);
4257 bnx2x_init_fill(bp, BAR_XSTRORM_INTMEM +
4258 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4259 sizeof(struct xstorm_def_status_block)/4);
4260 bnx2x_init_fill(bp, BAR_TSTRORM_INTMEM +
4261 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4262 sizeof(struct tstorm_def_status_block)/4);
a2fbb9ea
ET
4263}
4264
4265static void bnx2x_init_def_sb(struct bnx2x *bp,
4266 struct host_def_status_block *def_sb,
34f80b04 4267 dma_addr_t mapping, int sb_id)
a2fbb9ea 4268{
34f80b04
EG
4269 int port = BP_PORT(bp);
4270 int func = BP_FUNC(bp);
a2fbb9ea
ET
4271 int index, val, reg_offset;
4272 u64 section;
4273
4274 /* ATTN */
4275 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4276 atten_status_block);
34f80b04 4277 def_sb->atten_status_block.status_block_id = sb_id;
a2fbb9ea 4278
49d66772
ET
4279 bp->attn_state = 0;
4280
a2fbb9ea
ET
4281 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4282 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4283
34f80b04 4284 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
a2fbb9ea
ET
4285 bp->attn_group[index].sig[0] = REG_RD(bp,
4286 reg_offset + 0x10*index);
4287 bp->attn_group[index].sig[1] = REG_RD(bp,
4288 reg_offset + 0x4 + 0x10*index);
4289 bp->attn_group[index].sig[2] = REG_RD(bp,
4290 reg_offset + 0x8 + 0x10*index);
4291 bp->attn_group[index].sig[3] = REG_RD(bp,
4292 reg_offset + 0xc + 0x10*index);
4293 }
4294
a2fbb9ea
ET
4295 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4296 HC_REG_ATTN_MSG0_ADDR_L);
4297
4298 REG_WR(bp, reg_offset, U64_LO(section));
4299 REG_WR(bp, reg_offset + 4, U64_HI(section));
4300
4301 reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4302
4303 val = REG_RD(bp, reg_offset);
34f80b04 4304 val |= sb_id;
a2fbb9ea
ET
4305 REG_WR(bp, reg_offset, val);
4306
4307 /* USTORM */
4308 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4309 u_def_status_block);
34f80b04 4310 def_sb->u_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4311
4312 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4313 USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4314 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4315 ((USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4316 U64_HI(section));
5c862848 4317 REG_WR8(bp, BAR_USTRORM_INTMEM + DEF_USB_FUNC_OFF +
34f80b04 4318 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4319
4320 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
4321 REG_WR16(bp, BAR_USTRORM_INTMEM +
34f80b04 4322 USTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
a2fbb9ea
ET
4323
4324 /* CSTORM */
4325 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4326 c_def_status_block);
34f80b04 4327 def_sb->c_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4328
4329 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4330 CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4331 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4332 ((CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4333 U64_HI(section));
5c862848 4334 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
34f80b04 4335 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4336
4337 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4338 REG_WR16(bp, BAR_CSTRORM_INTMEM +
34f80b04 4339 CSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
a2fbb9ea
ET
4340
4341 /* TSTORM */
4342 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4343 t_def_status_block);
34f80b04 4344 def_sb->t_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4345
4346 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4347 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4348 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4349 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4350 U64_HI(section));
5c862848 4351 REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
34f80b04 4352 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4353
4354 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4355 REG_WR16(bp, BAR_TSTRORM_INTMEM +
34f80b04 4356 TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
a2fbb9ea
ET
4357
4358 /* XSTORM */
4359 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4360 x_def_status_block);
34f80b04 4361 def_sb->x_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4362
4363 REG_WR(bp, BAR_XSTRORM_INTMEM +
34f80b04 4364 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4365 REG_WR(bp, BAR_XSTRORM_INTMEM +
34f80b04 4366 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4367 U64_HI(section));
5c862848 4368 REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
34f80b04 4369 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4370
4371 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4372 REG_WR16(bp, BAR_XSTRORM_INTMEM +
34f80b04 4373 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
49d66772 4374
bb2a0f7a 4375 bp->stats_pending = 0;
66e855f3 4376 bp->set_mac_pending = 0;
bb2a0f7a 4377
34f80b04 4378 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea
ET
4379}
4380
4381static void bnx2x_update_coalesce(struct bnx2x *bp)
4382{
34f80b04 4383 int port = BP_PORT(bp);
a2fbb9ea
ET
4384 int i;
4385
4386 for_each_queue(bp, i) {
34f80b04 4387 int sb_id = bp->fp[i].sb_id;
a2fbb9ea
ET
4388
4389 /* HC_INDEX_U_ETH_RX_CQ_CONS */
4390 REG_WR8(bp, BAR_USTRORM_INTMEM +
34f80b04 4391 USTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
5c862848 4392 U_SB_ETH_RX_CQ_INDEX),
34f80b04 4393 bp->rx_ticks/12);
a2fbb9ea 4394 REG_WR16(bp, BAR_USTRORM_INTMEM +
34f80b04 4395 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
5c862848
EG
4396 U_SB_ETH_RX_CQ_INDEX),
4397 bp->rx_ticks ? 0 : 1);
a2fbb9ea
ET
4398
4399 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4400 REG_WR8(bp, BAR_CSTRORM_INTMEM +
34f80b04 4401 CSTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
5c862848 4402 C_SB_ETH_TX_CQ_INDEX),
34f80b04 4403 bp->tx_ticks/12);
a2fbb9ea 4404 REG_WR16(bp, BAR_CSTRORM_INTMEM +
34f80b04 4405 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
5c862848 4406 C_SB_ETH_TX_CQ_INDEX),
34f80b04 4407 bp->tx_ticks ? 0 : 1);
a2fbb9ea
ET
4408 }
4409}
4410
7a9b2557
VZ
4411static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4412 struct bnx2x_fastpath *fp, int last)
4413{
4414 int i;
4415
4416 for (i = 0; i < last; i++) {
4417 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
4418 struct sk_buff *skb = rx_buf->skb;
4419
4420 if (skb == NULL) {
4421 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
4422 continue;
4423 }
4424
4425 if (fp->tpa_state[i] == BNX2X_TPA_START)
4426 pci_unmap_single(bp->pdev,
4427 pci_unmap_addr(rx_buf, mapping),
437cf2f1 4428 bp->rx_buf_size,
7a9b2557
VZ
4429 PCI_DMA_FROMDEVICE);
4430
4431 dev_kfree_skb(skb);
4432 rx_buf->skb = NULL;
4433 }
4434}
4435
a2fbb9ea
ET
4436static void bnx2x_init_rx_rings(struct bnx2x *bp)
4437{
7a9b2557 4438 int func = BP_FUNC(bp);
32626230
EG
4439 int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
4440 ETH_MAX_AGGREGATION_QUEUES_E1H;
4441 u16 ring_prod, cqe_ring_prod;
a2fbb9ea 4442 int i, j;
a2fbb9ea 4443
87942b46 4444 bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN;
0f00846d
EG
4445 DP(NETIF_MSG_IFUP,
4446 "mtu %d rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
a2fbb9ea 4447
7a9b2557 4448 if (bp->flags & TPA_ENABLE_FLAG) {
7a9b2557 4449
555f6c78 4450 for_each_rx_queue(bp, j) {
32626230 4451 struct bnx2x_fastpath *fp = &bp->fp[j];
7a9b2557 4452
32626230 4453 for (i = 0; i < max_agg_queues; i++) {
7a9b2557
VZ
4454 fp->tpa_pool[i].skb =
4455 netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4456 if (!fp->tpa_pool[i].skb) {
4457 BNX2X_ERR("Failed to allocate TPA "
4458 "skb pool for queue[%d] - "
4459 "disabling TPA on this "
4460 "queue!\n", j);
4461 bnx2x_free_tpa_pool(bp, fp, i);
4462 fp->disable_tpa = 1;
4463 break;
4464 }
4465 pci_unmap_addr_set((struct sw_rx_bd *)
4466 &bp->fp->tpa_pool[i],
4467 mapping, 0);
4468 fp->tpa_state[i] = BNX2X_TPA_STOP;
4469 }
4470 }
4471 }
4472
555f6c78 4473 for_each_rx_queue(bp, j) {
a2fbb9ea
ET
4474 struct bnx2x_fastpath *fp = &bp->fp[j];
4475
4476 fp->rx_bd_cons = 0;
4477 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
7a9b2557
VZ
4478 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
4479
4480 /* "next page" elements initialization */
4481 /* SGE ring */
4482 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
4483 struct eth_rx_sge *sge;
4484
4485 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
4486 sge->addr_hi =
4487 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
4488 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4489 sge->addr_lo =
4490 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
4491 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4492 }
4493
4494 bnx2x_init_sge_ring_bit_mask(fp);
a2fbb9ea 4495
7a9b2557 4496 /* RX BD ring */
a2fbb9ea
ET
4497 for (i = 1; i <= NUM_RX_RINGS; i++) {
4498 struct eth_rx_bd *rx_bd;
4499
4500 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
4501 rx_bd->addr_hi =
4502 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
34f80b04 4503 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
a2fbb9ea
ET
4504 rx_bd->addr_lo =
4505 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
34f80b04 4506 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
a2fbb9ea
ET
4507 }
4508
34f80b04 4509 /* CQ ring */
a2fbb9ea
ET
4510 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4511 struct eth_rx_cqe_next_page *nextpg;
4512
4513 nextpg = (struct eth_rx_cqe_next_page *)
4514 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4515 nextpg->addr_hi =
4516 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
34f80b04 4517 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
a2fbb9ea
ET
4518 nextpg->addr_lo =
4519 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
34f80b04 4520 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
a2fbb9ea
ET
4521 }
4522
7a9b2557
VZ
4523 /* Allocate SGEs and initialize the ring elements */
4524 for (i = 0, ring_prod = 0;
4525 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
a2fbb9ea 4526
7a9b2557
VZ
4527 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
4528 BNX2X_ERR("was only able to allocate "
4529 "%d rx sges\n", i);
4530 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
4531 /* Cleanup already allocated elements */
4532 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
32626230 4533 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
7a9b2557
VZ
4534 fp->disable_tpa = 1;
4535 ring_prod = 0;
4536 break;
4537 }
4538 ring_prod = NEXT_SGE_IDX(ring_prod);
4539 }
4540 fp->rx_sge_prod = ring_prod;
4541
4542 /* Allocate BDs and initialize BD ring */
66e855f3 4543 fp->rx_comp_cons = 0;
7a9b2557 4544 cqe_ring_prod = ring_prod = 0;
a2fbb9ea
ET
4545 for (i = 0; i < bp->rx_ring_size; i++) {
4546 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
4547 BNX2X_ERR("was only able to allocate "
de832a55
EG
4548 "%d rx skbs on queue[%d]\n", i, j);
4549 fp->eth_q_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
4550 break;
4551 }
4552 ring_prod = NEXT_RX_IDX(ring_prod);
7a9b2557 4553 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
53e5e96e 4554 WARN_ON(ring_prod <= i);
a2fbb9ea
ET
4555 }
4556
7a9b2557
VZ
4557 fp->rx_bd_prod = ring_prod;
4558 /* must not have more available CQEs than BDs */
4559 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
4560 cqe_ring_prod);
a2fbb9ea
ET
4561 fp->rx_pkt = fp->rx_calls = 0;
4562
7a9b2557
VZ
4563 /* Warning!
4564 * this will generate an interrupt (to the TSTORM)
4565 * must only be done after chip is initialized
4566 */
4567 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
4568 fp->rx_sge_prod);
a2fbb9ea
ET
4569 if (j != 0)
4570 continue;
4571
4572 REG_WR(bp, BAR_USTRORM_INTMEM +
7a9b2557 4573 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
a2fbb9ea
ET
4574 U64_LO(fp->rx_comp_mapping));
4575 REG_WR(bp, BAR_USTRORM_INTMEM +
7a9b2557 4576 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
a2fbb9ea
ET
4577 U64_HI(fp->rx_comp_mapping));
4578 }
4579}
4580
4581static void bnx2x_init_tx_ring(struct bnx2x *bp)
4582{
4583 int i, j;
4584
555f6c78 4585 for_each_tx_queue(bp, j) {
a2fbb9ea
ET
4586 struct bnx2x_fastpath *fp = &bp->fp[j];
4587
4588 for (i = 1; i <= NUM_TX_RINGS; i++) {
4589 struct eth_tx_bd *tx_bd =
4590 &fp->tx_desc_ring[TX_DESC_CNT * i - 1];
4591
4592 tx_bd->addr_hi =
4593 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
34f80b04 4594 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
a2fbb9ea
ET
4595 tx_bd->addr_lo =
4596 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
34f80b04 4597 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
a2fbb9ea
ET
4598 }
4599
4600 fp->tx_pkt_prod = 0;
4601 fp->tx_pkt_cons = 0;
4602 fp->tx_bd_prod = 0;
4603 fp->tx_bd_cons = 0;
4604 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
4605 fp->tx_pkt = 0;
4606 }
4607}
4608
4609static void bnx2x_init_sp_ring(struct bnx2x *bp)
4610{
34f80b04 4611 int func = BP_FUNC(bp);
a2fbb9ea
ET
4612
4613 spin_lock_init(&bp->spq_lock);
4614
4615 bp->spq_left = MAX_SPQ_PENDING;
4616 bp->spq_prod_idx = 0;
a2fbb9ea
ET
4617 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
4618 bp->spq_prod_bd = bp->spq;
4619 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
4620
34f80b04 4621 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
a2fbb9ea 4622 U64_LO(bp->spq_mapping));
34f80b04
EG
4623 REG_WR(bp,
4624 XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
a2fbb9ea
ET
4625 U64_HI(bp->spq_mapping));
4626
34f80b04 4627 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
a2fbb9ea
ET
4628 bp->spq_prod_idx);
4629}
4630
4631static void bnx2x_init_context(struct bnx2x *bp)
4632{
4633 int i;
4634
4635 for_each_queue(bp, i) {
4636 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
4637 struct bnx2x_fastpath *fp = &bp->fp[i];
de832a55 4638 u8 cl_id = fp->cl_id;
34f80b04 4639 u8 sb_id = FP_SB_ID(fp);
a2fbb9ea 4640
34f80b04
EG
4641 context->ustorm_st_context.common.sb_index_numbers =
4642 BNX2X_RX_SB_INDEX_NUM;
4643 context->ustorm_st_context.common.clientId = FP_CL_ID(fp);
4644 context->ustorm_st_context.common.status_block_id = sb_id;
4645 context->ustorm_st_context.common.flags =
de832a55
EG
4646 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT |
4647 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS);
4648 context->ustorm_st_context.common.statistics_counter_id =
4649 cl_id;
8d9c5f34 4650 context->ustorm_st_context.common.mc_alignment_log_size =
0f00846d 4651 BNX2X_RX_ALIGN_SHIFT;
34f80b04 4652 context->ustorm_st_context.common.bd_buff_size =
437cf2f1 4653 bp->rx_buf_size;
34f80b04 4654 context->ustorm_st_context.common.bd_page_base_hi =
a2fbb9ea 4655 U64_HI(fp->rx_desc_mapping);
34f80b04 4656 context->ustorm_st_context.common.bd_page_base_lo =
a2fbb9ea 4657 U64_LO(fp->rx_desc_mapping);
7a9b2557
VZ
4658 if (!fp->disable_tpa) {
4659 context->ustorm_st_context.common.flags |=
4660 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA |
4661 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_SGE_RING);
4662 context->ustorm_st_context.common.sge_buff_size =
8d9c5f34
EG
4663 (u16)min((u32)SGE_PAGE_SIZE*PAGES_PER_SGE,
4664 (u32)0xffff);
7a9b2557
VZ
4665 context->ustorm_st_context.common.sge_page_base_hi =
4666 U64_HI(fp->rx_sge_mapping);
4667 context->ustorm_st_context.common.sge_page_base_lo =
4668 U64_LO(fp->rx_sge_mapping);
4669 }
4670
8d9c5f34
EG
4671 context->ustorm_ag_context.cdu_usage =
4672 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4673 CDU_REGION_NUMBER_UCM_AG,
4674 ETH_CONNECTION_TYPE);
4675
4676 context->xstorm_st_context.tx_bd_page_base_hi =
4677 U64_HI(fp->tx_desc_mapping);
4678 context->xstorm_st_context.tx_bd_page_base_lo =
4679 U64_LO(fp->tx_desc_mapping);
4680 context->xstorm_st_context.db_data_addr_hi =
4681 U64_HI(fp->tx_prods_mapping);
4682 context->xstorm_st_context.db_data_addr_lo =
4683 U64_LO(fp->tx_prods_mapping);
4684 context->xstorm_st_context.statistics_data = (fp->cl_id |
4685 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
a2fbb9ea 4686 context->cstorm_st_context.sb_index_number =
5c862848 4687 C_SB_ETH_TX_CQ_INDEX;
34f80b04 4688 context->cstorm_st_context.status_block_id = sb_id;
a2fbb9ea
ET
4689
4690 context->xstorm_ag_context.cdu_reserved =
4691 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4692 CDU_REGION_NUMBER_XCM_AG,
4693 ETH_CONNECTION_TYPE);
a2fbb9ea
ET
4694 }
4695}
4696
4697static void bnx2x_init_ind_table(struct bnx2x *bp)
4698{
26c8fa4d 4699 int func = BP_FUNC(bp);
a2fbb9ea
ET
4700 int i;
4701
555f6c78 4702 if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
a2fbb9ea
ET
4703 return;
4704
555f6c78
EG
4705 DP(NETIF_MSG_IFUP,
4706 "Initializing indirection table multi_mode %d\n", bp->multi_mode);
a2fbb9ea 4707 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
34f80b04 4708 REG_WR8(bp, BAR_TSTRORM_INTMEM +
26c8fa4d 4709 TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
555f6c78 4710 BP_CL_ID(bp) + (i % bp->num_rx_queues));
a2fbb9ea
ET
4711}
4712
49d66772
ET
4713static void bnx2x_set_client_config(struct bnx2x *bp)
4714{
49d66772 4715 struct tstorm_eth_client_config tstorm_client = {0};
34f80b04
EG
4716 int port = BP_PORT(bp);
4717 int i;
49d66772 4718
e7799c5f 4719 tstorm_client.mtu = bp->dev->mtu;
49d66772 4720 tstorm_client.config_flags =
de832a55
EG
4721 (TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE |
4722 TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE);
49d66772 4723#ifdef BCM_VLAN
0c6671b0 4724 if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
49d66772 4725 tstorm_client.config_flags |=
8d9c5f34 4726 TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE;
49d66772
ET
4727 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
4728 }
4729#endif
49d66772 4730
7a9b2557
VZ
4731 if (bp->flags & TPA_ENABLE_FLAG) {
4732 tstorm_client.max_sges_for_packet =
4f40f2cb 4733 SGE_PAGE_ALIGN(tstorm_client.mtu) >> SGE_PAGE_SHIFT;
7a9b2557
VZ
4734 tstorm_client.max_sges_for_packet =
4735 ((tstorm_client.max_sges_for_packet +
4736 PAGES_PER_SGE - 1) & (~(PAGES_PER_SGE - 1))) >>
4737 PAGES_PER_SGE_SHIFT;
4738
4739 tstorm_client.config_flags |=
4740 TSTORM_ETH_CLIENT_CONFIG_ENABLE_SGE_RING;
4741 }
4742
49d66772 4743 for_each_queue(bp, i) {
de832a55
EG
4744 tstorm_client.statistics_counter_id = bp->fp[i].cl_id;
4745
49d66772 4746 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4747 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
49d66772
ET
4748 ((u32 *)&tstorm_client)[0]);
4749 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4750 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
49d66772
ET
4751 ((u32 *)&tstorm_client)[1]);
4752 }
4753
34f80b04
EG
4754 DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
4755 ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
49d66772
ET
4756}
4757
a2fbb9ea
ET
4758static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
4759{
a2fbb9ea 4760 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
34f80b04
EG
4761 int mode = bp->rx_mode;
4762 int mask = (1 << BP_L_ID(bp));
4763 int func = BP_FUNC(bp);
a2fbb9ea
ET
4764 int i;
4765
3196a88a 4766 DP(NETIF_MSG_IFUP, "rx mode %d mask 0x%x\n", mode, mask);
a2fbb9ea
ET
4767
4768 switch (mode) {
4769 case BNX2X_RX_MODE_NONE: /* no Rx */
34f80b04
EG
4770 tstorm_mac_filter.ucast_drop_all = mask;
4771 tstorm_mac_filter.mcast_drop_all = mask;
4772 tstorm_mac_filter.bcast_drop_all = mask;
a2fbb9ea
ET
4773 break;
4774 case BNX2X_RX_MODE_NORMAL:
34f80b04 4775 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea
ET
4776 break;
4777 case BNX2X_RX_MODE_ALLMULTI:
34f80b04
EG
4778 tstorm_mac_filter.mcast_accept_all = mask;
4779 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea
ET
4780 break;
4781 case BNX2X_RX_MODE_PROMISC:
34f80b04
EG
4782 tstorm_mac_filter.ucast_accept_all = mask;
4783 tstorm_mac_filter.mcast_accept_all = mask;
4784 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea
ET
4785 break;
4786 default:
34f80b04
EG
4787 BNX2X_ERR("BAD rx mode (%d)\n", mode);
4788 break;
a2fbb9ea
ET
4789 }
4790
4791 for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
4792 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4793 TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
a2fbb9ea
ET
4794 ((u32 *)&tstorm_mac_filter)[i]);
4795
34f80b04 4796/* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
a2fbb9ea
ET
4797 ((u32 *)&tstorm_mac_filter)[i]); */
4798 }
a2fbb9ea 4799
49d66772
ET
4800 if (mode != BNX2X_RX_MODE_NONE)
4801 bnx2x_set_client_config(bp);
a2fbb9ea
ET
4802}
4803
471de716
EG
4804static void bnx2x_init_internal_common(struct bnx2x *bp)
4805{
4806 int i;
4807
3cdf1db7
YG
4808 if (bp->flags & TPA_ENABLE_FLAG) {
4809 struct tstorm_eth_tpa_exist tpa = {0};
4810
4811 tpa.tpa_exist = 1;
4812
4813 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET,
4814 ((u32 *)&tpa)[0]);
4815 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET + 4,
4816 ((u32 *)&tpa)[1]);
4817 }
4818
471de716
EG
4819 /* Zero this manually as its initialization is
4820 currently missing in the initTool */
4821 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
4822 REG_WR(bp, BAR_USTRORM_INTMEM +
4823 USTORM_AGG_DATA_OFFSET + i * 4, 0);
4824}
4825
4826static void bnx2x_init_internal_port(struct bnx2x *bp)
4827{
4828 int port = BP_PORT(bp);
4829
4830 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4831 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4832 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4833 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4834}
4835
8a1c38d1
EG
4836/* Calculates the sum of vn_min_rates.
4837 It's needed for further normalizing of the min_rates.
4838 Returns:
4839 sum of vn_min_rates.
4840 or
4841 0 - if all the min_rates are 0.
4842 In the later case fainess algorithm should be deactivated.
4843 If not all min_rates are zero then those that are zeroes will be set to 1.
4844 */
4845static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
4846{
4847 int all_zero = 1;
4848 int port = BP_PORT(bp);
4849 int vn;
4850
4851 bp->vn_weight_sum = 0;
4852 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
4853 int func = 2*vn + port;
4854 u32 vn_cfg =
4855 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
4856 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
4857 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
4858
4859 /* Skip hidden vns */
4860 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
4861 continue;
4862
4863 /* If min rate is zero - set it to 1 */
4864 if (!vn_min_rate)
4865 vn_min_rate = DEF_MIN_RATE;
4866 else
4867 all_zero = 0;
4868
4869 bp->vn_weight_sum += vn_min_rate;
4870 }
4871
4872 /* ... only if all min rates are zeros - disable fairness */
4873 if (all_zero)
4874 bp->vn_weight_sum = 0;
4875}
4876
471de716 4877static void bnx2x_init_internal_func(struct bnx2x *bp)
a2fbb9ea 4878{
a2fbb9ea
ET
4879 struct tstorm_eth_function_common_config tstorm_config = {0};
4880 struct stats_indication_flags stats_flags = {0};
34f80b04
EG
4881 int port = BP_PORT(bp);
4882 int func = BP_FUNC(bp);
de832a55
EG
4883 int i, j;
4884 u32 offset;
471de716 4885 u16 max_agg_size;
a2fbb9ea
ET
4886
4887 if (is_multi(bp)) {
555f6c78 4888 tstorm_config.config_flags = MULTI_FLAGS(bp);
a2fbb9ea
ET
4889 tstorm_config.rss_result_mask = MULTI_MASK;
4890 }
8d9c5f34
EG
4891 if (IS_E1HMF(bp))
4892 tstorm_config.config_flags |=
4893 TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM;
a2fbb9ea 4894
34f80b04
EG
4895 tstorm_config.leading_client_id = BP_L_ID(bp);
4896
a2fbb9ea 4897 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4898 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
a2fbb9ea
ET
4899 (*(u32 *)&tstorm_config));
4900
c14423fe 4901 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
a2fbb9ea
ET
4902 bnx2x_set_storm_rx_mode(bp);
4903
de832a55
EG
4904 for_each_queue(bp, i) {
4905 u8 cl_id = bp->fp[i].cl_id;
4906
4907 /* reset xstorm per client statistics */
4908 offset = BAR_XSTRORM_INTMEM +
4909 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
4910 for (j = 0;
4911 j < sizeof(struct xstorm_per_client_stats) / 4; j++)
4912 REG_WR(bp, offset + j*4, 0);
4913
4914 /* reset tstorm per client statistics */
4915 offset = BAR_TSTRORM_INTMEM +
4916 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
4917 for (j = 0;
4918 j < sizeof(struct tstorm_per_client_stats) / 4; j++)
4919 REG_WR(bp, offset + j*4, 0);
4920
4921 /* reset ustorm per client statistics */
4922 offset = BAR_USTRORM_INTMEM +
4923 USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
4924 for (j = 0;
4925 j < sizeof(struct ustorm_per_client_stats) / 4; j++)
4926 REG_WR(bp, offset + j*4, 0);
66e855f3
YG
4927 }
4928
4929 /* Init statistics related context */
34f80b04 4930 stats_flags.collect_eth = 1;
a2fbb9ea 4931
66e855f3 4932 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 4933 ((u32 *)&stats_flags)[0]);
66e855f3 4934 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
4935 ((u32 *)&stats_flags)[1]);
4936
66e855f3 4937 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 4938 ((u32 *)&stats_flags)[0]);
66e855f3 4939 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
4940 ((u32 *)&stats_flags)[1]);
4941
de832a55
EG
4942 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func),
4943 ((u32 *)&stats_flags)[0]);
4944 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func) + 4,
4945 ((u32 *)&stats_flags)[1]);
4946
66e855f3 4947 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 4948 ((u32 *)&stats_flags)[0]);
66e855f3 4949 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
4950 ((u32 *)&stats_flags)[1]);
4951
66e855f3
YG
4952 REG_WR(bp, BAR_XSTRORM_INTMEM +
4953 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4954 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4955 REG_WR(bp, BAR_XSTRORM_INTMEM +
4956 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4957 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4958
4959 REG_WR(bp, BAR_TSTRORM_INTMEM +
4960 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4961 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4962 REG_WR(bp, BAR_TSTRORM_INTMEM +
4963 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4964 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
34f80b04 4965
de832a55
EG
4966 REG_WR(bp, BAR_USTRORM_INTMEM +
4967 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4968 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4969 REG_WR(bp, BAR_USTRORM_INTMEM +
4970 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4971 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4972
34f80b04
EG
4973 if (CHIP_IS_E1H(bp)) {
4974 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
4975 IS_E1HMF(bp));
4976 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
4977 IS_E1HMF(bp));
4978 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
4979 IS_E1HMF(bp));
4980 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
4981 IS_E1HMF(bp));
4982
7a9b2557
VZ
4983 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
4984 bp->e1hov);
34f80b04
EG
4985 }
4986
4f40f2cb
EG
4987 /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
4988 max_agg_size =
4989 min((u32)(min((u32)8, (u32)MAX_SKB_FRAGS) *
4990 SGE_PAGE_SIZE * PAGES_PER_SGE),
4991 (u32)0xffff);
555f6c78 4992 for_each_rx_queue(bp, i) {
7a9b2557 4993 struct bnx2x_fastpath *fp = &bp->fp[i];
7a9b2557
VZ
4994
4995 REG_WR(bp, BAR_USTRORM_INTMEM +
4996 USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)),
4997 U64_LO(fp->rx_comp_mapping));
4998 REG_WR(bp, BAR_USTRORM_INTMEM +
4999 USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)) + 4,
5000 U64_HI(fp->rx_comp_mapping));
5001
7a9b2557
VZ
5002 REG_WR16(bp, BAR_USTRORM_INTMEM +
5003 USTORM_MAX_AGG_SIZE_OFFSET(port, FP_CL_ID(fp)),
5004 max_agg_size);
5005 }
8a1c38d1 5006
1c06328c
EG
5007 /* dropless flow control */
5008 if (CHIP_IS_E1H(bp)) {
5009 struct ustorm_eth_rx_pause_data_e1h rx_pause = {0};
5010
5011 rx_pause.bd_thr_low = 250;
5012 rx_pause.cqe_thr_low = 250;
5013 rx_pause.cos = 1;
5014 rx_pause.sge_thr_low = 0;
5015 rx_pause.bd_thr_high = 350;
5016 rx_pause.cqe_thr_high = 350;
5017 rx_pause.sge_thr_high = 0;
5018
5019 for_each_rx_queue(bp, i) {
5020 struct bnx2x_fastpath *fp = &bp->fp[i];
5021
5022 if (!fp->disable_tpa) {
5023 rx_pause.sge_thr_low = 150;
5024 rx_pause.sge_thr_high = 250;
5025 }
5026
5027
5028 offset = BAR_USTRORM_INTMEM +
5029 USTORM_ETH_RING_PAUSE_DATA_OFFSET(port,
5030 fp->cl_id);
5031 for (j = 0;
5032 j < sizeof(struct ustorm_eth_rx_pause_data_e1h)/4;
5033 j++)
5034 REG_WR(bp, offset + j*4,
5035 ((u32 *)&rx_pause)[j]);
5036 }
5037 }
5038
8a1c38d1
EG
5039 memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
5040
5041 /* Init rate shaping and fairness contexts */
5042 if (IS_E1HMF(bp)) {
5043 int vn;
5044
5045 /* During init there is no active link
5046 Until link is up, set link rate to 10Gbps */
5047 bp->link_vars.line_speed = SPEED_10000;
5048 bnx2x_init_port_minmax(bp);
5049
5050 bnx2x_calc_vn_weight_sum(bp);
5051
5052 for (vn = VN_0; vn < E1HVN_MAX; vn++)
5053 bnx2x_init_vn_minmax(bp, 2*vn + port);
5054
5055 /* Enable rate shaping and fairness */
5056 bp->cmng.flags.cmng_enables =
5057 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
5058 if (bp->vn_weight_sum)
5059 bp->cmng.flags.cmng_enables |=
5060 CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
5061 else
5062 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
5063 " fairness will be disabled\n");
5064 } else {
5065 /* rate shaping and fairness are disabled */
5066 DP(NETIF_MSG_IFUP,
5067 "single function mode minmax will be disabled\n");
5068 }
5069
5070
5071 /* Store it to internal memory */
5072 if (bp->port.pmf)
5073 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
5074 REG_WR(bp, BAR_XSTRORM_INTMEM +
5075 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
5076 ((u32 *)(&bp->cmng))[i]);
a2fbb9ea
ET
5077}
5078
471de716
EG
5079static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
5080{
5081 switch (load_code) {
5082 case FW_MSG_CODE_DRV_LOAD_COMMON:
5083 bnx2x_init_internal_common(bp);
5084 /* no break */
5085
5086 case FW_MSG_CODE_DRV_LOAD_PORT:
5087 bnx2x_init_internal_port(bp);
5088 /* no break */
5089
5090 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5091 bnx2x_init_internal_func(bp);
5092 break;
5093
5094 default:
5095 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5096 break;
5097 }
5098}
5099
5100static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
a2fbb9ea
ET
5101{
5102 int i;
5103
5104 for_each_queue(bp, i) {
5105 struct bnx2x_fastpath *fp = &bp->fp[i];
5106
34f80b04 5107 fp->bp = bp;
a2fbb9ea 5108 fp->state = BNX2X_FP_STATE_CLOSED;
a2fbb9ea 5109 fp->index = i;
34f80b04
EG
5110 fp->cl_id = BP_L_ID(bp) + i;
5111 fp->sb_id = fp->cl_id;
5112 DP(NETIF_MSG_IFUP,
5113 "bnx2x_init_sb(%p,%p) index %d cl_id %d sb %d\n",
5114 bp, fp->status_blk, i, FP_CL_ID(fp), FP_SB_ID(fp));
5c862848
EG
5115 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
5116 FP_SB_ID(fp));
5117 bnx2x_update_fpsb_idx(fp);
a2fbb9ea
ET
5118 }
5119
5c862848
EG
5120 bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
5121 DEF_SB_ID);
5122 bnx2x_update_dsb_idx(bp);
a2fbb9ea
ET
5123 bnx2x_update_coalesce(bp);
5124 bnx2x_init_rx_rings(bp);
5125 bnx2x_init_tx_ring(bp);
5126 bnx2x_init_sp_ring(bp);
5127 bnx2x_init_context(bp);
471de716 5128 bnx2x_init_internal(bp, load_code);
a2fbb9ea 5129 bnx2x_init_ind_table(bp);
0ef00459
EG
5130 bnx2x_stats_init(bp);
5131
5132 /* At this point, we are ready for interrupts */
5133 atomic_set(&bp->intr_sem, 0);
5134
5135 /* flush all before enabling interrupts */
5136 mb();
5137 mmiowb();
5138
615f8fd9 5139 bnx2x_int_enable(bp);
a2fbb9ea
ET
5140}
5141
5142/* end of nic init */
5143
5144/*
5145 * gzip service functions
5146 */
5147
5148static int bnx2x_gunzip_init(struct bnx2x *bp)
5149{
5150 bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
5151 &bp->gunzip_mapping);
5152 if (bp->gunzip_buf == NULL)
5153 goto gunzip_nomem1;
5154
5155 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
5156 if (bp->strm == NULL)
5157 goto gunzip_nomem2;
5158
5159 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
5160 GFP_KERNEL);
5161 if (bp->strm->workspace == NULL)
5162 goto gunzip_nomem3;
5163
5164 return 0;
5165
5166gunzip_nomem3:
5167 kfree(bp->strm);
5168 bp->strm = NULL;
5169
5170gunzip_nomem2:
5171 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5172 bp->gunzip_mapping);
5173 bp->gunzip_buf = NULL;
5174
5175gunzip_nomem1:
5176 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
34f80b04 5177 " un-compression\n", bp->dev->name);
a2fbb9ea
ET
5178 return -ENOMEM;
5179}
5180
5181static void bnx2x_gunzip_end(struct bnx2x *bp)
5182{
5183 kfree(bp->strm->workspace);
5184
5185 kfree(bp->strm);
5186 bp->strm = NULL;
5187
5188 if (bp->gunzip_buf) {
5189 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5190 bp->gunzip_mapping);
5191 bp->gunzip_buf = NULL;
5192 }
5193}
5194
5195static int bnx2x_gunzip(struct bnx2x *bp, u8 *zbuf, int len)
5196{
5197 int n, rc;
5198
5199 /* check gzip header */
5200 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
5201 return -EINVAL;
5202
5203 n = 10;
5204
34f80b04 5205#define FNAME 0x8
a2fbb9ea
ET
5206
5207 if (zbuf[3] & FNAME)
5208 while ((zbuf[n++] != 0) && (n < len));
5209
5210 bp->strm->next_in = zbuf + n;
5211 bp->strm->avail_in = len - n;
5212 bp->strm->next_out = bp->gunzip_buf;
5213 bp->strm->avail_out = FW_BUF_SIZE;
5214
5215 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
5216 if (rc != Z_OK)
5217 return rc;
5218
5219 rc = zlib_inflate(bp->strm, Z_FINISH);
5220 if ((rc != Z_OK) && (rc != Z_STREAM_END))
5221 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
5222 bp->dev->name, bp->strm->msg);
5223
5224 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
5225 if (bp->gunzip_outlen & 0x3)
5226 printk(KERN_ERR PFX "%s: Firmware decompression error:"
5227 " gunzip_outlen (%d) not aligned\n",
5228 bp->dev->name, bp->gunzip_outlen);
5229 bp->gunzip_outlen >>= 2;
5230
5231 zlib_inflateEnd(bp->strm);
5232
5233 if (rc == Z_STREAM_END)
5234 return 0;
5235
5236 return rc;
5237}
5238
5239/* nic load/unload */
5240
5241/*
34f80b04 5242 * General service functions
a2fbb9ea
ET
5243 */
5244
5245/* send a NIG loopback debug packet */
5246static void bnx2x_lb_pckt(struct bnx2x *bp)
5247{
a2fbb9ea 5248 u32 wb_write[3];
a2fbb9ea
ET
5249
5250 /* Ethernet source and destination addresses */
a2fbb9ea
ET
5251 wb_write[0] = 0x55555555;
5252 wb_write[1] = 0x55555555;
34f80b04 5253 wb_write[2] = 0x20; /* SOP */
a2fbb9ea 5254 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
5255
5256 /* NON-IP protocol */
a2fbb9ea
ET
5257 wb_write[0] = 0x09000000;
5258 wb_write[1] = 0x55555555;
34f80b04 5259 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
a2fbb9ea 5260 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
5261}
5262
5263/* some of the internal memories
5264 * are not directly readable from the driver
5265 * to test them we send debug packets
5266 */
5267static int bnx2x_int_mem_test(struct bnx2x *bp)
5268{
5269 int factor;
5270 int count, i;
5271 u32 val = 0;
5272
ad8d3948 5273 if (CHIP_REV_IS_FPGA(bp))
a2fbb9ea 5274 factor = 120;
ad8d3948
EG
5275 else if (CHIP_REV_IS_EMUL(bp))
5276 factor = 200;
5277 else
a2fbb9ea 5278 factor = 1;
a2fbb9ea
ET
5279
5280 DP(NETIF_MSG_HW, "start part1\n");
5281
5282 /* Disable inputs of parser neighbor blocks */
5283 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5284 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5285 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3196a88a 5286 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
a2fbb9ea
ET
5287
5288 /* Write 0 to parser credits for CFC search request */
5289 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5290
5291 /* send Ethernet packet */
5292 bnx2x_lb_pckt(bp);
5293
5294 /* TODO do i reset NIG statistic? */
5295 /* Wait until NIG register shows 1 packet of size 0x10 */
5296 count = 1000 * factor;
5297 while (count) {
34f80b04 5298
a2fbb9ea
ET
5299 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5300 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
5301 if (val == 0x10)
5302 break;
5303
5304 msleep(10);
5305 count--;
5306 }
5307 if (val != 0x10) {
5308 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5309 return -1;
5310 }
5311
5312 /* Wait until PRS register shows 1 packet */
5313 count = 1000 * factor;
5314 while (count) {
5315 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
a2fbb9ea
ET
5316 if (val == 1)
5317 break;
5318
5319 msleep(10);
5320 count--;
5321 }
5322 if (val != 0x1) {
5323 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5324 return -2;
5325 }
5326
5327 /* Reset and init BRB, PRS */
34f80b04 5328 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
a2fbb9ea 5329 msleep(50);
34f80b04 5330 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
a2fbb9ea
ET
5331 msleep(50);
5332 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5333 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5334
5335 DP(NETIF_MSG_HW, "part2\n");
5336
5337 /* Disable inputs of parser neighbor blocks */
5338 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5339 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5340 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3196a88a 5341 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
a2fbb9ea
ET
5342
5343 /* Write 0 to parser credits for CFC search request */
5344 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5345
5346 /* send 10 Ethernet packets */
5347 for (i = 0; i < 10; i++)
5348 bnx2x_lb_pckt(bp);
5349
5350 /* Wait until NIG register shows 10 + 1
5351 packets of size 11*0x10 = 0xb0 */
5352 count = 1000 * factor;
5353 while (count) {
34f80b04 5354
a2fbb9ea
ET
5355 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5356 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
5357 if (val == 0xb0)
5358 break;
5359
5360 msleep(10);
5361 count--;
5362 }
5363 if (val != 0xb0) {
5364 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5365 return -3;
5366 }
5367
5368 /* Wait until PRS register shows 2 packets */
5369 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5370 if (val != 2)
5371 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5372
5373 /* Write 1 to parser credits for CFC search request */
5374 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5375
5376 /* Wait until PRS register shows 3 packets */
5377 msleep(10 * factor);
5378 /* Wait until NIG register shows 1 packet of size 0x10 */
5379 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5380 if (val != 3)
5381 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5382
5383 /* clear NIG EOP FIFO */
5384 for (i = 0; i < 11; i++)
5385 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5386 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5387 if (val != 1) {
5388 BNX2X_ERR("clear of NIG failed\n");
5389 return -4;
5390 }
5391
5392 /* Reset and init BRB, PRS, NIG */
5393 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5394 msleep(50);
5395 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5396 msleep(50);
5397 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5398 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5399#ifndef BCM_ISCSI
5400 /* set NIC mode */
5401 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5402#endif
5403
5404 /* Enable inputs of parser neighbor blocks */
5405 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5406 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5407 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
3196a88a 5408 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
a2fbb9ea
ET
5409
5410 DP(NETIF_MSG_HW, "done\n");
5411
5412 return 0; /* OK */
5413}
5414
5415static void enable_blocks_attention(struct bnx2x *bp)
5416{
5417 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5418 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5419 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5420 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5421 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5422 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5423 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5424 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5425 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
34f80b04
EG
5426/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5427/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5428 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5429 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5430 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
34f80b04
EG
5431/* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5432/* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5433 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5434 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5435 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5436 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
34f80b04
EG
5437/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5438/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5439 if (CHIP_REV_IS_FPGA(bp))
5440 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
5441 else
5442 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
a2fbb9ea
ET
5443 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5444 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5445 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
34f80b04
EG
5446/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5447/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5448 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5449 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
34f80b04
EG
5450/* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5451 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
a2fbb9ea
ET
5452}
5453
34f80b04 5454
81f75bbf
EG
5455static void bnx2x_reset_common(struct bnx2x *bp)
5456{
5457 /* reset_common */
5458 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5459 0xd3ffff7f);
5460 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
5461}
5462
34f80b04 5463static int bnx2x_init_common(struct bnx2x *bp)
a2fbb9ea 5464{
a2fbb9ea 5465 u32 val, i;
a2fbb9ea 5466
34f80b04 5467 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp));
a2fbb9ea 5468
81f75bbf 5469 bnx2x_reset_common(bp);
34f80b04
EG
5470 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
5471 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
a2fbb9ea 5472
34f80b04
EG
5473 bnx2x_init_block(bp, MISC_COMMON_START, MISC_COMMON_END);
5474 if (CHIP_IS_E1H(bp))
5475 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
a2fbb9ea 5476
34f80b04
EG
5477 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
5478 msleep(30);
5479 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
a2fbb9ea 5480
34f80b04
EG
5481 bnx2x_init_block(bp, PXP_COMMON_START, PXP_COMMON_END);
5482 if (CHIP_IS_E1(bp)) {
5483 /* enable HW interrupt from PXP on USDM overflow
5484 bit 16 on INT_MASK_0 */
5485 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5486 }
a2fbb9ea 5487
34f80b04
EG
5488 bnx2x_init_block(bp, PXP2_COMMON_START, PXP2_COMMON_END);
5489 bnx2x_init_pxp(bp);
a2fbb9ea
ET
5490
5491#ifdef __BIG_ENDIAN
34f80b04
EG
5492 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
5493 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
5494 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
5495 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
5496 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
8badd27a
EG
5497 /* make sure this value is 0 */
5498 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
34f80b04
EG
5499
5500/* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
5501 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
5502 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
5503 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
5504 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
a2fbb9ea
ET
5505#endif
5506
34f80b04 5507 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
a2fbb9ea 5508#ifdef BCM_ISCSI
34f80b04
EG
5509 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
5510 REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
5511 REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
a2fbb9ea
ET
5512#endif
5513
34f80b04
EG
5514 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
5515 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
a2fbb9ea 5516
34f80b04
EG
5517 /* let the HW do it's magic ... */
5518 msleep(100);
5519 /* finish PXP init */
5520 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
5521 if (val != 1) {
5522 BNX2X_ERR("PXP2 CFG failed\n");
5523 return -EBUSY;
5524 }
5525 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
5526 if (val != 1) {
5527 BNX2X_ERR("PXP2 RD_INIT failed\n");
5528 return -EBUSY;
5529 }
a2fbb9ea 5530
34f80b04
EG
5531 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
5532 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
a2fbb9ea 5533
34f80b04 5534 bnx2x_init_block(bp, DMAE_COMMON_START, DMAE_COMMON_END);
a2fbb9ea 5535
34f80b04
EG
5536 /* clean the DMAE memory */
5537 bp->dmae_ready = 1;
5538 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
a2fbb9ea 5539
34f80b04
EG
5540 bnx2x_init_block(bp, TCM_COMMON_START, TCM_COMMON_END);
5541 bnx2x_init_block(bp, UCM_COMMON_START, UCM_COMMON_END);
5542 bnx2x_init_block(bp, CCM_COMMON_START, CCM_COMMON_END);
5543 bnx2x_init_block(bp, XCM_COMMON_START, XCM_COMMON_END);
a2fbb9ea 5544
34f80b04
EG
5545 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
5546 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
5547 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
5548 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
5549
5550 bnx2x_init_block(bp, QM_COMMON_START, QM_COMMON_END);
5551 /* soft reset pulse */
5552 REG_WR(bp, QM_REG_SOFT_RESET, 1);
5553 REG_WR(bp, QM_REG_SOFT_RESET, 0);
a2fbb9ea
ET
5554
5555#ifdef BCM_ISCSI
34f80b04 5556 bnx2x_init_block(bp, TIMERS_COMMON_START, TIMERS_COMMON_END);
a2fbb9ea 5557#endif
a2fbb9ea 5558
34f80b04
EG
5559 bnx2x_init_block(bp, DQ_COMMON_START, DQ_COMMON_END);
5560 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
5561 if (!CHIP_REV_IS_SLOW(bp)) {
5562 /* enable hw interrupt from doorbell Q */
5563 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5564 }
a2fbb9ea 5565
34f80b04 5566 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
34f80b04 5567 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
26c8fa4d 5568 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
3196a88a
EG
5569 /* set NIC mode */
5570 REG_WR(bp, PRS_REG_NIC_MODE, 1);
34f80b04
EG
5571 if (CHIP_IS_E1H(bp))
5572 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
a2fbb9ea 5573
34f80b04
EG
5574 bnx2x_init_block(bp, TSDM_COMMON_START, TSDM_COMMON_END);
5575 bnx2x_init_block(bp, CSDM_COMMON_START, CSDM_COMMON_END);
5576 bnx2x_init_block(bp, USDM_COMMON_START, USDM_COMMON_END);
5577 bnx2x_init_block(bp, XSDM_COMMON_START, XSDM_COMMON_END);
a2fbb9ea 5578
34f80b04
EG
5579 if (CHIP_IS_E1H(bp)) {
5580 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5581 STORM_INTMEM_SIZE_E1H/2);
5582 bnx2x_init_fill(bp,
5583 TSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5584 0, STORM_INTMEM_SIZE_E1H/2);
5585 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5586 STORM_INTMEM_SIZE_E1H/2);
5587 bnx2x_init_fill(bp,
5588 CSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5589 0, STORM_INTMEM_SIZE_E1H/2);
5590 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5591 STORM_INTMEM_SIZE_E1H/2);
5592 bnx2x_init_fill(bp,
5593 XSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5594 0, STORM_INTMEM_SIZE_E1H/2);
5595 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5596 STORM_INTMEM_SIZE_E1H/2);
5597 bnx2x_init_fill(bp,
5598 USTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5599 0, STORM_INTMEM_SIZE_E1H/2);
5600 } else { /* E1 */
ad8d3948
EG
5601 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5602 STORM_INTMEM_SIZE_E1);
5603 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5604 STORM_INTMEM_SIZE_E1);
5605 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5606 STORM_INTMEM_SIZE_E1);
5607 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5608 STORM_INTMEM_SIZE_E1);
34f80b04 5609 }
a2fbb9ea 5610
34f80b04
EG
5611 bnx2x_init_block(bp, TSEM_COMMON_START, TSEM_COMMON_END);
5612 bnx2x_init_block(bp, USEM_COMMON_START, USEM_COMMON_END);
5613 bnx2x_init_block(bp, CSEM_COMMON_START, CSEM_COMMON_END);
5614 bnx2x_init_block(bp, XSEM_COMMON_START, XSEM_COMMON_END);
a2fbb9ea 5615
34f80b04
EG
5616 /* sync semi rtc */
5617 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5618 0x80000000);
5619 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
5620 0x80000000);
a2fbb9ea 5621
34f80b04
EG
5622 bnx2x_init_block(bp, UPB_COMMON_START, UPB_COMMON_END);
5623 bnx2x_init_block(bp, XPB_COMMON_START, XPB_COMMON_END);
5624 bnx2x_init_block(bp, PBF_COMMON_START, PBF_COMMON_END);
a2fbb9ea 5625
34f80b04
EG
5626 REG_WR(bp, SRC_REG_SOFT_RST, 1);
5627 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
5628 REG_WR(bp, i, 0xc0cac01a);
5629 /* TODO: replace with something meaningful */
5630 }
8d9c5f34 5631 bnx2x_init_block(bp, SRCH_COMMON_START, SRCH_COMMON_END);
34f80b04 5632 REG_WR(bp, SRC_REG_SOFT_RST, 0);
a2fbb9ea 5633
34f80b04
EG
5634 if (sizeof(union cdu_context) != 1024)
5635 /* we currently assume that a context is 1024 bytes */
5636 printk(KERN_ALERT PFX "please adjust the size of"
5637 " cdu_context(%ld)\n", (long)sizeof(union cdu_context));
a2fbb9ea 5638
34f80b04
EG
5639 bnx2x_init_block(bp, CDU_COMMON_START, CDU_COMMON_END);
5640 val = (4 << 24) + (0 << 12) + 1024;
5641 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
5642 if (CHIP_IS_E1(bp)) {
5643 /* !!! fix pxp client crdit until excel update */
5644 REG_WR(bp, CDU_REG_CDU_DEBUG, 0x264);
5645 REG_WR(bp, CDU_REG_CDU_DEBUG, 0);
5646 }
a2fbb9ea 5647
34f80b04
EG
5648 bnx2x_init_block(bp, CFC_COMMON_START, CFC_COMMON_END);
5649 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
8d9c5f34
EG
5650 /* enable context validation interrupt from CFC */
5651 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5652
5653 /* set the thresholds to prevent CFC/CDU race */
5654 REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
a2fbb9ea 5655
34f80b04
EG
5656 bnx2x_init_block(bp, HC_COMMON_START, HC_COMMON_END);
5657 bnx2x_init_block(bp, MISC_AEU_COMMON_START, MISC_AEU_COMMON_END);
a2fbb9ea 5658
34f80b04
EG
5659 /* PXPCS COMMON comes here */
5660 /* Reset PCIE errors for debug */
5661 REG_WR(bp, 0x2814, 0xffffffff);
5662 REG_WR(bp, 0x3820, 0xffffffff);
a2fbb9ea 5663
34f80b04
EG
5664 /* EMAC0 COMMON comes here */
5665 /* EMAC1 COMMON comes here */
5666 /* DBU COMMON comes here */
5667 /* DBG COMMON comes here */
5668
5669 bnx2x_init_block(bp, NIG_COMMON_START, NIG_COMMON_END);
5670 if (CHIP_IS_E1H(bp)) {
5671 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
5672 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
5673 }
5674
5675 if (CHIP_REV_IS_SLOW(bp))
5676 msleep(200);
5677
5678 /* finish CFC init */
5679 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
5680 if (val != 1) {
5681 BNX2X_ERR("CFC LL_INIT failed\n");
5682 return -EBUSY;
5683 }
5684 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
5685 if (val != 1) {
5686 BNX2X_ERR("CFC AC_INIT failed\n");
5687 return -EBUSY;
5688 }
5689 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
5690 if (val != 1) {
5691 BNX2X_ERR("CFC CAM_INIT failed\n");
5692 return -EBUSY;
5693 }
5694 REG_WR(bp, CFC_REG_DEBUG0, 0);
f1410647 5695
34f80b04
EG
5696 /* read NIG statistic
5697 to see if this is our first up since powerup */
5698 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5699 val = *bnx2x_sp(bp, wb_data[0]);
5700
5701 /* do internal memory self test */
5702 if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
5703 BNX2X_ERR("internal mem self test failed\n");
5704 return -EBUSY;
5705 }
5706
35b19ba5
EG
5707 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
5708 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
34f80b04
EG
5709 /* Fan failure is indicated by SPIO 5 */
5710 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
5711 MISC_REGISTERS_SPIO_INPUT_HI_Z);
5712
5713 /* set to active low mode */
5714 val = REG_RD(bp, MISC_REG_SPIO_INT);
5715 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
f1410647 5716 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
34f80b04 5717 REG_WR(bp, MISC_REG_SPIO_INT, val);
f1410647 5718
34f80b04
EG
5719 /* enable interrupt to signal the IGU */
5720 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
5721 val |= (1 << MISC_REGISTERS_SPIO_5);
5722 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
5723 break;
f1410647 5724
34f80b04
EG
5725 default:
5726 break;
5727 }
f1410647 5728
34f80b04
EG
5729 /* clear PXP2 attentions */
5730 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
a2fbb9ea 5731
34f80b04 5732 enable_blocks_attention(bp);
a2fbb9ea 5733
6bbca910
YR
5734 if (!BP_NOMCP(bp)) {
5735 bnx2x_acquire_phy_lock(bp);
5736 bnx2x_common_init_phy(bp, bp->common.shmem_base);
5737 bnx2x_release_phy_lock(bp);
5738 } else
5739 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
5740
34f80b04
EG
5741 return 0;
5742}
a2fbb9ea 5743
34f80b04
EG
5744static int bnx2x_init_port(struct bnx2x *bp)
5745{
5746 int port = BP_PORT(bp);
1c06328c 5747 u32 low, high;
34f80b04 5748 u32 val;
a2fbb9ea 5749
34f80b04
EG
5750 DP(BNX2X_MSG_MCP, "starting port init port %x\n", port);
5751
5752 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
a2fbb9ea
ET
5753
5754 /* Port PXP comes here */
5755 /* Port PXP2 comes here */
a2fbb9ea
ET
5756#ifdef BCM_ISCSI
5757 /* Port0 1
5758 * Port1 385 */
5759 i++;
5760 wb_write[0] = ONCHIP_ADDR1(bp->timers_mapping);
5761 wb_write[1] = ONCHIP_ADDR2(bp->timers_mapping);
5762 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5763 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
5764
5765 /* Port0 2
5766 * Port1 386 */
5767 i++;
5768 wb_write[0] = ONCHIP_ADDR1(bp->qm_mapping);
5769 wb_write[1] = ONCHIP_ADDR2(bp->qm_mapping);
5770 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5771 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
5772
5773 /* Port0 3
5774 * Port1 387 */
5775 i++;
5776 wb_write[0] = ONCHIP_ADDR1(bp->t1_mapping);
5777 wb_write[1] = ONCHIP_ADDR2(bp->t1_mapping);
5778 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5779 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
5780#endif
34f80b04 5781 /* Port CMs come here */
8d9c5f34
EG
5782 bnx2x_init_block(bp, (port ? XCM_PORT1_START : XCM_PORT0_START),
5783 (port ? XCM_PORT1_END : XCM_PORT0_END));
a2fbb9ea
ET
5784
5785 /* Port QM comes here */
a2fbb9ea
ET
5786#ifdef BCM_ISCSI
5787 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + func*4, 1024/64*20);
5788 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + func*4, 31);
5789
5790 bnx2x_init_block(bp, func ? TIMERS_PORT1_START : TIMERS_PORT0_START,
5791 func ? TIMERS_PORT1_END : TIMERS_PORT0_END);
5792#endif
5793 /* Port DQ comes here */
1c06328c
EG
5794
5795 bnx2x_init_block(bp, (port ? BRB1_PORT1_START : BRB1_PORT0_START),
5796 (port ? BRB1_PORT1_END : BRB1_PORT0_END));
5797 if (CHIP_REV_IS_SLOW(bp) && !CHIP_IS_E1H(bp)) {
5798 /* no pause for emulation and FPGA */
5799 low = 0;
5800 high = 513;
5801 } else {
5802 if (IS_E1HMF(bp))
5803 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
5804 else if (bp->dev->mtu > 4096) {
5805 if (bp->flags & ONE_PORT_FLAG)
5806 low = 160;
5807 else {
5808 val = bp->dev->mtu;
5809 /* (24*1024 + val*4)/256 */
5810 low = 96 + (val/64) + ((val % 64) ? 1 : 0);
5811 }
5812 } else
5813 low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
5814 high = low + 56; /* 14*1024/256 */
5815 }
5816 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
5817 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
5818
5819
ad8d3948 5820 /* Port PRS comes here */
a2fbb9ea
ET
5821 /* Port TSDM comes here */
5822 /* Port CSDM comes here */
5823 /* Port USDM comes here */
5824 /* Port XSDM comes here */
34f80b04
EG
5825 bnx2x_init_block(bp, port ? TSEM_PORT1_START : TSEM_PORT0_START,
5826 port ? TSEM_PORT1_END : TSEM_PORT0_END);
5827 bnx2x_init_block(bp, port ? USEM_PORT1_START : USEM_PORT0_START,
5828 port ? USEM_PORT1_END : USEM_PORT0_END);
5829 bnx2x_init_block(bp, port ? CSEM_PORT1_START : CSEM_PORT0_START,
5830 port ? CSEM_PORT1_END : CSEM_PORT0_END);
5831 bnx2x_init_block(bp, port ? XSEM_PORT1_START : XSEM_PORT0_START,
5832 port ? XSEM_PORT1_END : XSEM_PORT0_END);
a2fbb9ea 5833 /* Port UPB comes here */
34f80b04
EG
5834 /* Port XPB comes here */
5835
5836 bnx2x_init_block(bp, port ? PBF_PORT1_START : PBF_PORT0_START,
5837 port ? PBF_PORT1_END : PBF_PORT0_END);
a2fbb9ea
ET
5838
5839 /* configure PBF to work without PAUSE mtu 9000 */
34f80b04 5840 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
a2fbb9ea
ET
5841
5842 /* update threshold */
34f80b04 5843 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
a2fbb9ea 5844 /* update init credit */
34f80b04 5845 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
a2fbb9ea
ET
5846
5847 /* probe changes */
34f80b04 5848 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
a2fbb9ea 5849 msleep(5);
34f80b04 5850 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
a2fbb9ea
ET
5851
5852#ifdef BCM_ISCSI
5853 /* tell the searcher where the T2 table is */
5854 REG_WR(bp, SRC_REG_COUNTFREE0 + func*4, 16*1024/64);
5855
5856 wb_write[0] = U64_LO(bp->t2_mapping);
5857 wb_write[1] = U64_HI(bp->t2_mapping);
5858 REG_WR_DMAE(bp, SRC_REG_FIRSTFREE0 + func*4, wb_write, 2);
5859 wb_write[0] = U64_LO((u64)bp->t2_mapping + 16*1024 - 64);
5860 wb_write[1] = U64_HI((u64)bp->t2_mapping + 16*1024 - 64);
5861 REG_WR_DMAE(bp, SRC_REG_LASTFREE0 + func*4, wb_write, 2);
5862
5863 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + func*4, 10);
5864 /* Port SRCH comes here */
5865#endif
5866 /* Port CDU comes here */
5867 /* Port CFC comes here */
34f80b04
EG
5868
5869 if (CHIP_IS_E1(bp)) {
5870 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5871 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5872 }
5873 bnx2x_init_block(bp, port ? HC_PORT1_START : HC_PORT0_START,
5874 port ? HC_PORT1_END : HC_PORT0_END);
5875
5876 bnx2x_init_block(bp, port ? MISC_AEU_PORT1_START :
a2fbb9ea 5877 MISC_AEU_PORT0_START,
34f80b04
EG
5878 port ? MISC_AEU_PORT1_END : MISC_AEU_PORT0_END);
5879 /* init aeu_mask_attn_func_0/1:
5880 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
5881 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
5882 * bits 4-7 are used for "per vn group attention" */
5883 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
5884 (IS_E1HMF(bp) ? 0xF7 : 0x7));
5885
a2fbb9ea
ET
5886 /* Port PXPCS comes here */
5887 /* Port EMAC0 comes here */
5888 /* Port EMAC1 comes here */
5889 /* Port DBU comes here */
5890 /* Port DBG comes here */
34f80b04
EG
5891 bnx2x_init_block(bp, port ? NIG_PORT1_START : NIG_PORT0_START,
5892 port ? NIG_PORT1_END : NIG_PORT0_END);
5893
5894 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
5895
5896 if (CHIP_IS_E1H(bp)) {
34f80b04
EG
5897 /* 0x2 disable e1hov, 0x1 enable */
5898 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
5899 (IS_E1HMF(bp) ? 0x1 : 0x2));
5900
1c06328c
EG
5901 /* support pause requests from USDM, TSDM and BRB */
5902 REG_WR(bp, NIG_REG_LLFC_EGRESS_SRC_ENABLE_0 + port*4, 0x7);
5903
5904 {
5905 REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
5906 REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
5907 REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
5908 }
34f80b04
EG
5909 }
5910
a2fbb9ea
ET
5911 /* Port MCP comes here */
5912 /* Port DMAE comes here */
5913
35b19ba5 5914 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
589abe3a
EG
5915 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
5916 {
5917 u32 swap_val, swap_override, aeu_gpio_mask, offset;
5918
5919 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
5920 MISC_REGISTERS_GPIO_INPUT_HI_Z, port);
5921
5922 /* The GPIO should be swapped if the swap register is
5923 set and active */
5924 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
5925 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
5926
5927 /* Select function upon port-swap configuration */
5928 if (port == 0) {
5929 offset = MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
5930 aeu_gpio_mask = (swap_val && swap_override) ?
5931 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1 :
5932 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0;
5933 } else {
5934 offset = MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0;
5935 aeu_gpio_mask = (swap_val && swap_override) ?
5936 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 :
5937 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1;
5938 }
5939 val = REG_RD(bp, offset);
5940 /* add GPIO3 to group */
5941 val |= aeu_gpio_mask;
5942 REG_WR(bp, offset, val);
5943 }
5944 break;
5945
35b19ba5 5946 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
f1410647
ET
5947 /* add SPIO 5 to group 0 */
5948 val = REG_RD(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
5949 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
5950 REG_WR(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0, val);
5951 break;
5952
5953 default:
5954 break;
5955 }
5956
c18487ee 5957 bnx2x__link_reset(bp);
a2fbb9ea 5958
34f80b04
EG
5959 return 0;
5960}
5961
5962#define ILT_PER_FUNC (768/2)
5963#define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
5964/* the phys address is shifted right 12 bits and has an added
5965 1=valid bit added to the 53rd bit
5966 then since this is a wide register(TM)
5967 we split it into two 32 bit writes
5968 */
5969#define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
5970#define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
5971#define PXP_ONE_ILT(x) (((x) << 10) | x)
5972#define PXP_ILT_RANGE(f, l) (((l) << 10) | f)
5973
5974#define CNIC_ILT_LINES 0
5975
5976static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
5977{
5978 int reg;
5979
5980 if (CHIP_IS_E1H(bp))
5981 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
5982 else /* E1 */
5983 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
5984
5985 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
5986}
5987
5988static int bnx2x_init_func(struct bnx2x *bp)
5989{
5990 int port = BP_PORT(bp);
5991 int func = BP_FUNC(bp);
8badd27a 5992 u32 addr, val;
34f80b04
EG
5993 int i;
5994
5995 DP(BNX2X_MSG_MCP, "starting func init func %x\n", func);
5996
8badd27a
EG
5997 /* set MSI reconfigure capability */
5998 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
5999 val = REG_RD(bp, addr);
6000 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
6001 REG_WR(bp, addr, val);
6002
34f80b04
EG
6003 i = FUNC_ILT_BASE(func);
6004
6005 bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
6006 if (CHIP_IS_E1H(bp)) {
6007 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
6008 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
6009 } else /* E1 */
6010 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
6011 PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
6012
6013
6014 if (CHIP_IS_E1H(bp)) {
6015 for (i = 0; i < 9; i++)
6016 bnx2x_init_block(bp,
6017 cm_start[func][i], cm_end[func][i]);
6018
6019 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
6020 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
6021 }
6022
6023 /* HC init per function */
6024 if (CHIP_IS_E1H(bp)) {
6025 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
6026
6027 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6028 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6029 }
6030 bnx2x_init_block(bp, hc_limits[func][0], hc_limits[func][1]);
6031
c14423fe 6032 /* Reset PCIE errors for debug */
a2fbb9ea
ET
6033 REG_WR(bp, 0x2114, 0xffffffff);
6034 REG_WR(bp, 0x2120, 0xffffffff);
a2fbb9ea 6035
34f80b04
EG
6036 return 0;
6037}
6038
6039static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
6040{
6041 int i, rc = 0;
a2fbb9ea 6042
34f80b04
EG
6043 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
6044 BP_FUNC(bp), load_code);
a2fbb9ea 6045
34f80b04
EG
6046 bp->dmae_ready = 0;
6047 mutex_init(&bp->dmae_mutex);
6048 bnx2x_gunzip_init(bp);
a2fbb9ea 6049
34f80b04
EG
6050 switch (load_code) {
6051 case FW_MSG_CODE_DRV_LOAD_COMMON:
6052 rc = bnx2x_init_common(bp);
6053 if (rc)
6054 goto init_hw_err;
6055 /* no break */
6056
6057 case FW_MSG_CODE_DRV_LOAD_PORT:
6058 bp->dmae_ready = 1;
6059 rc = bnx2x_init_port(bp);
6060 if (rc)
6061 goto init_hw_err;
6062 /* no break */
6063
6064 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
6065 bp->dmae_ready = 1;
6066 rc = bnx2x_init_func(bp);
6067 if (rc)
6068 goto init_hw_err;
6069 break;
6070
6071 default:
6072 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
6073 break;
6074 }
6075
6076 if (!BP_NOMCP(bp)) {
6077 int func = BP_FUNC(bp);
a2fbb9ea
ET
6078
6079 bp->fw_drv_pulse_wr_seq =
34f80b04 6080 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
a2fbb9ea 6081 DRV_PULSE_SEQ_MASK);
34f80b04
EG
6082 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
6083 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x func_stx 0x%x\n",
6084 bp->fw_drv_pulse_wr_seq, bp->func_stx);
6085 } else
6086 bp->func_stx = 0;
a2fbb9ea 6087
34f80b04
EG
6088 /* this needs to be done before gunzip end */
6089 bnx2x_zero_def_sb(bp);
6090 for_each_queue(bp, i)
6091 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
6092
6093init_hw_err:
6094 bnx2x_gunzip_end(bp);
6095
6096 return rc;
a2fbb9ea
ET
6097}
6098
c14423fe 6099/* send the MCP a request, block until there is a reply */
a2fbb9ea
ET
6100static u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
6101{
34f80b04 6102 int func = BP_FUNC(bp);
f1410647
ET
6103 u32 seq = ++bp->fw_seq;
6104 u32 rc = 0;
19680c48
EG
6105 u32 cnt = 1;
6106 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
a2fbb9ea 6107
34f80b04 6108 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
f1410647 6109 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
a2fbb9ea 6110
19680c48
EG
6111 do {
6112 /* let the FW do it's magic ... */
6113 msleep(delay);
a2fbb9ea 6114
19680c48 6115 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
a2fbb9ea 6116
19680c48
EG
6117 /* Give the FW up to 2 second (200*10ms) */
6118 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 200));
6119
6120 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
6121 cnt*delay, rc, seq);
a2fbb9ea
ET
6122
6123 /* is this a reply to our command? */
6124 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) {
6125 rc &= FW_MSG_CODE_MASK;
f1410647 6126
a2fbb9ea
ET
6127 } else {
6128 /* FW BUG! */
6129 BNX2X_ERR("FW failed to respond!\n");
6130 bnx2x_fw_dump(bp);
6131 rc = 0;
6132 }
f1410647 6133
a2fbb9ea
ET
6134 return rc;
6135}
6136
6137static void bnx2x_free_mem(struct bnx2x *bp)
6138{
6139
6140#define BNX2X_PCI_FREE(x, y, size) \
6141 do { \
6142 if (x) { \
6143 pci_free_consistent(bp->pdev, size, x, y); \
6144 x = NULL; \
6145 y = 0; \
6146 } \
6147 } while (0)
6148
6149#define BNX2X_FREE(x) \
6150 do { \
6151 if (x) { \
6152 vfree(x); \
6153 x = NULL; \
6154 } \
6155 } while (0)
6156
6157 int i;
6158
6159 /* fastpath */
555f6c78 6160 /* Common */
a2fbb9ea
ET
6161 for_each_queue(bp, i) {
6162
555f6c78 6163 /* status blocks */
a2fbb9ea
ET
6164 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
6165 bnx2x_fp(bp, i, status_blk_mapping),
6166 sizeof(struct host_status_block) +
6167 sizeof(struct eth_tx_db_data));
555f6c78
EG
6168 }
6169 /* Rx */
6170 for_each_rx_queue(bp, i) {
a2fbb9ea 6171
555f6c78 6172 /* fastpath rx rings: rx_buf rx_desc rx_comp */
a2fbb9ea
ET
6173 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
6174 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
6175 bnx2x_fp(bp, i, rx_desc_mapping),
6176 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6177
6178 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
6179 bnx2x_fp(bp, i, rx_comp_mapping),
6180 sizeof(struct eth_fast_path_rx_cqe) *
6181 NUM_RCQ_BD);
a2fbb9ea 6182
7a9b2557 6183 /* SGE ring */
32626230 6184 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
7a9b2557
VZ
6185 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
6186 bnx2x_fp(bp, i, rx_sge_mapping),
6187 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
6188 }
555f6c78
EG
6189 /* Tx */
6190 for_each_tx_queue(bp, i) {
6191
6192 /* fastpath tx rings: tx_buf tx_desc */
6193 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
6194 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
6195 bnx2x_fp(bp, i, tx_desc_mapping),
6196 sizeof(struct eth_tx_bd) * NUM_TX_BD);
6197 }
a2fbb9ea
ET
6198 /* end of fastpath */
6199
6200 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
34f80b04 6201 sizeof(struct host_def_status_block));
a2fbb9ea
ET
6202
6203 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
34f80b04 6204 sizeof(struct bnx2x_slowpath));
a2fbb9ea
ET
6205
6206#ifdef BCM_ISCSI
6207 BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
6208 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
6209 BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
6210 BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
6211#endif
7a9b2557 6212 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
a2fbb9ea
ET
6213
6214#undef BNX2X_PCI_FREE
6215#undef BNX2X_KFREE
6216}
6217
6218static int bnx2x_alloc_mem(struct bnx2x *bp)
6219{
6220
6221#define BNX2X_PCI_ALLOC(x, y, size) \
6222 do { \
6223 x = pci_alloc_consistent(bp->pdev, size, y); \
6224 if (x == NULL) \
6225 goto alloc_mem_err; \
6226 memset(x, 0, size); \
6227 } while (0)
6228
6229#define BNX2X_ALLOC(x, size) \
6230 do { \
6231 x = vmalloc(size); \
6232 if (x == NULL) \
6233 goto alloc_mem_err; \
6234 memset(x, 0, size); \
6235 } while (0)
6236
6237 int i;
6238
6239 /* fastpath */
555f6c78 6240 /* Common */
a2fbb9ea
ET
6241 for_each_queue(bp, i) {
6242 bnx2x_fp(bp, i, bp) = bp;
6243
555f6c78 6244 /* status blocks */
a2fbb9ea
ET
6245 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
6246 &bnx2x_fp(bp, i, status_blk_mapping),
6247 sizeof(struct host_status_block) +
6248 sizeof(struct eth_tx_db_data));
555f6c78
EG
6249 }
6250 /* Rx */
6251 for_each_rx_queue(bp, i) {
a2fbb9ea 6252
555f6c78 6253 /* fastpath rx rings: rx_buf rx_desc rx_comp */
a2fbb9ea
ET
6254 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
6255 sizeof(struct sw_rx_bd) * NUM_RX_BD);
6256 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
6257 &bnx2x_fp(bp, i, rx_desc_mapping),
6258 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6259
6260 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
6261 &bnx2x_fp(bp, i, rx_comp_mapping),
6262 sizeof(struct eth_fast_path_rx_cqe) *
6263 NUM_RCQ_BD);
6264
7a9b2557
VZ
6265 /* SGE ring */
6266 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
6267 sizeof(struct sw_rx_page) * NUM_RX_SGE);
6268 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
6269 &bnx2x_fp(bp, i, rx_sge_mapping),
6270 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
a2fbb9ea 6271 }
555f6c78
EG
6272 /* Tx */
6273 for_each_tx_queue(bp, i) {
6274
6275 bnx2x_fp(bp, i, hw_tx_prods) =
6276 (void *)(bnx2x_fp(bp, i, status_blk) + 1);
6277
6278 bnx2x_fp(bp, i, tx_prods_mapping) =
6279 bnx2x_fp(bp, i, status_blk_mapping) +
6280 sizeof(struct host_status_block);
6281
6282 /* fastpath tx rings: tx_buf tx_desc */
6283 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
6284 sizeof(struct sw_tx_bd) * NUM_TX_BD);
6285 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
6286 &bnx2x_fp(bp, i, tx_desc_mapping),
6287 sizeof(struct eth_tx_bd) * NUM_TX_BD);
6288 }
a2fbb9ea
ET
6289 /* end of fastpath */
6290
6291 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
6292 sizeof(struct host_def_status_block));
6293
6294 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
6295 sizeof(struct bnx2x_slowpath));
6296
6297#ifdef BCM_ISCSI
6298 BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
6299
6300 /* Initialize T1 */
6301 for (i = 0; i < 64*1024; i += 64) {
6302 *(u64 *)((char *)bp->t1 + i + 56) = 0x0UL;
6303 *(u64 *)((char *)bp->t1 + i + 3) = 0x0UL;
6304 }
6305
6306 /* allocate searcher T2 table
6307 we allocate 1/4 of alloc num for T2
6308 (which is not entered into the ILT) */
6309 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
6310
6311 /* Initialize T2 */
6312 for (i = 0; i < 16*1024; i += 64)
6313 * (u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
6314
c14423fe 6315 /* now fixup the last line in the block to point to the next block */
a2fbb9ea
ET
6316 *(u64 *)((char *)bp->t2 + 1024*16-8) = bp->t2_mapping;
6317
6318 /* Timer block array (MAX_CONN*8) phys uncached for now 1024 conns */
6319 BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
6320
6321 /* QM queues (128*MAX_CONN) */
6322 BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
6323#endif
6324
6325 /* Slow path ring */
6326 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
6327
6328 return 0;
6329
6330alloc_mem_err:
6331 bnx2x_free_mem(bp);
6332 return -ENOMEM;
6333
6334#undef BNX2X_PCI_ALLOC
6335#undef BNX2X_ALLOC
6336}
6337
6338static void bnx2x_free_tx_skbs(struct bnx2x *bp)
6339{
6340 int i;
6341
555f6c78 6342 for_each_tx_queue(bp, i) {
a2fbb9ea
ET
6343 struct bnx2x_fastpath *fp = &bp->fp[i];
6344
6345 u16 bd_cons = fp->tx_bd_cons;
6346 u16 sw_prod = fp->tx_pkt_prod;
6347 u16 sw_cons = fp->tx_pkt_cons;
6348
a2fbb9ea
ET
6349 while (sw_cons != sw_prod) {
6350 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
6351 sw_cons++;
6352 }
6353 }
6354}
6355
6356static void bnx2x_free_rx_skbs(struct bnx2x *bp)
6357{
6358 int i, j;
6359
555f6c78 6360 for_each_rx_queue(bp, j) {
a2fbb9ea
ET
6361 struct bnx2x_fastpath *fp = &bp->fp[j];
6362
a2fbb9ea
ET
6363 for (i = 0; i < NUM_RX_BD; i++) {
6364 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
6365 struct sk_buff *skb = rx_buf->skb;
6366
6367 if (skb == NULL)
6368 continue;
6369
6370 pci_unmap_single(bp->pdev,
6371 pci_unmap_addr(rx_buf, mapping),
437cf2f1 6372 bp->rx_buf_size,
a2fbb9ea
ET
6373 PCI_DMA_FROMDEVICE);
6374
6375 rx_buf->skb = NULL;
6376 dev_kfree_skb(skb);
6377 }
7a9b2557 6378 if (!fp->disable_tpa)
32626230
EG
6379 bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
6380 ETH_MAX_AGGREGATION_QUEUES_E1 :
7a9b2557 6381 ETH_MAX_AGGREGATION_QUEUES_E1H);
a2fbb9ea
ET
6382 }
6383}
6384
6385static void bnx2x_free_skbs(struct bnx2x *bp)
6386{
6387 bnx2x_free_tx_skbs(bp);
6388 bnx2x_free_rx_skbs(bp);
6389}
6390
6391static void bnx2x_free_msix_irqs(struct bnx2x *bp)
6392{
34f80b04 6393 int i, offset = 1;
a2fbb9ea
ET
6394
6395 free_irq(bp->msix_table[0].vector, bp->dev);
c14423fe 6396 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
a2fbb9ea
ET
6397 bp->msix_table[0].vector);
6398
6399 for_each_queue(bp, i) {
c14423fe 6400 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
34f80b04 6401 "state %x\n", i, bp->msix_table[i + offset].vector,
a2fbb9ea
ET
6402 bnx2x_fp(bp, i, state));
6403
34f80b04 6404 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
a2fbb9ea 6405 }
a2fbb9ea
ET
6406}
6407
6408static void bnx2x_free_irq(struct bnx2x *bp)
6409{
a2fbb9ea 6410 if (bp->flags & USING_MSIX_FLAG) {
a2fbb9ea
ET
6411 bnx2x_free_msix_irqs(bp);
6412 pci_disable_msix(bp->pdev);
a2fbb9ea
ET
6413 bp->flags &= ~USING_MSIX_FLAG;
6414
8badd27a
EG
6415 } else if (bp->flags & USING_MSI_FLAG) {
6416 free_irq(bp->pdev->irq, bp->dev);
6417 pci_disable_msi(bp->pdev);
6418 bp->flags &= ~USING_MSI_FLAG;
6419
a2fbb9ea
ET
6420 } else
6421 free_irq(bp->pdev->irq, bp->dev);
6422}
6423
6424static int bnx2x_enable_msix(struct bnx2x *bp)
6425{
8badd27a
EG
6426 int i, rc, offset = 1;
6427 int igu_vec = 0;
a2fbb9ea 6428
8badd27a
EG
6429 bp->msix_table[0].entry = igu_vec;
6430 DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n", igu_vec);
a2fbb9ea 6431
34f80b04 6432 for_each_queue(bp, i) {
8badd27a 6433 igu_vec = BP_L_ID(bp) + offset + i;
34f80b04
EG
6434 bp->msix_table[i + offset].entry = igu_vec;
6435 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
6436 "(fastpath #%u)\n", i + offset, igu_vec, i);
a2fbb9ea
ET
6437 }
6438
34f80b04 6439 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
555f6c78 6440 BNX2X_NUM_QUEUES(bp) + offset);
34f80b04 6441 if (rc) {
8badd27a
EG
6442 DP(NETIF_MSG_IFUP, "MSI-X is not attainable rc %d\n", rc);
6443 return rc;
34f80b04 6444 }
8badd27a 6445
a2fbb9ea
ET
6446 bp->flags |= USING_MSIX_FLAG;
6447
6448 return 0;
a2fbb9ea
ET
6449}
6450
a2fbb9ea
ET
6451static int bnx2x_req_msix_irqs(struct bnx2x *bp)
6452{
34f80b04 6453 int i, rc, offset = 1;
a2fbb9ea 6454
a2fbb9ea
ET
6455 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
6456 bp->dev->name, bp->dev);
a2fbb9ea
ET
6457 if (rc) {
6458 BNX2X_ERR("request sp irq failed\n");
6459 return -EBUSY;
6460 }
6461
6462 for_each_queue(bp, i) {
555f6c78
EG
6463 struct bnx2x_fastpath *fp = &bp->fp[i];
6464
6465 sprintf(fp->name, "%s.fp%d", bp->dev->name, i);
34f80b04 6466 rc = request_irq(bp->msix_table[i + offset].vector,
555f6c78 6467 bnx2x_msix_fp_int, 0, fp->name, fp);
a2fbb9ea 6468 if (rc) {
555f6c78 6469 BNX2X_ERR("request fp #%d irq failed rc %d\n", i, rc);
a2fbb9ea
ET
6470 bnx2x_free_msix_irqs(bp);
6471 return -EBUSY;
6472 }
6473
555f6c78 6474 fp->state = BNX2X_FP_STATE_IRQ;
a2fbb9ea
ET
6475 }
6476
555f6c78
EG
6477 i = BNX2X_NUM_QUEUES(bp);
6478 if (is_multi(bp))
6479 printk(KERN_INFO PFX
6480 "%s: using MSI-X IRQs: sp %d fp %d - %d\n",
6481 bp->dev->name, bp->msix_table[0].vector,
6482 bp->msix_table[offset].vector,
6483 bp->msix_table[offset + i - 1].vector);
6484 else
6485 printk(KERN_INFO PFX "%s: using MSI-X IRQs: sp %d fp %d\n",
6486 bp->dev->name, bp->msix_table[0].vector,
6487 bp->msix_table[offset + i - 1].vector);
6488
a2fbb9ea 6489 return 0;
a2fbb9ea
ET
6490}
6491
8badd27a
EG
6492static int bnx2x_enable_msi(struct bnx2x *bp)
6493{
6494 int rc;
6495
6496 rc = pci_enable_msi(bp->pdev);
6497 if (rc) {
6498 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
6499 return -1;
6500 }
6501 bp->flags |= USING_MSI_FLAG;
6502
6503 return 0;
6504}
6505
a2fbb9ea
ET
6506static int bnx2x_req_irq(struct bnx2x *bp)
6507{
8badd27a 6508 unsigned long flags;
34f80b04 6509 int rc;
a2fbb9ea 6510
8badd27a
EG
6511 if (bp->flags & USING_MSI_FLAG)
6512 flags = 0;
6513 else
6514 flags = IRQF_SHARED;
6515
6516 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
34f80b04 6517 bp->dev->name, bp->dev);
a2fbb9ea
ET
6518 if (!rc)
6519 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
6520
6521 return rc;
a2fbb9ea
ET
6522}
6523
65abd74d
YG
6524static void bnx2x_napi_enable(struct bnx2x *bp)
6525{
6526 int i;
6527
555f6c78 6528 for_each_rx_queue(bp, i)
65abd74d
YG
6529 napi_enable(&bnx2x_fp(bp, i, napi));
6530}
6531
6532static void bnx2x_napi_disable(struct bnx2x *bp)
6533{
6534 int i;
6535
555f6c78 6536 for_each_rx_queue(bp, i)
65abd74d
YG
6537 napi_disable(&bnx2x_fp(bp, i, napi));
6538}
6539
6540static void bnx2x_netif_start(struct bnx2x *bp)
6541{
6542 if (atomic_dec_and_test(&bp->intr_sem)) {
6543 if (netif_running(bp->dev)) {
65abd74d
YG
6544 bnx2x_napi_enable(bp);
6545 bnx2x_int_enable(bp);
555f6c78
EG
6546 if (bp->state == BNX2X_STATE_OPEN)
6547 netif_tx_wake_all_queues(bp->dev);
65abd74d
YG
6548 }
6549 }
6550}
6551
f8ef6e44 6552static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
65abd74d 6553{
f8ef6e44 6554 bnx2x_int_disable_sync(bp, disable_hw);
e94d8af3 6555 bnx2x_napi_disable(bp);
65abd74d 6556 if (netif_running(bp->dev)) {
65abd74d
YG
6557 netif_tx_disable(bp->dev);
6558 bp->dev->trans_start = jiffies; /* prevent tx timeout */
6559 }
6560}
6561
a2fbb9ea
ET
6562/*
6563 * Init service functions
6564 */
6565
3101c2bc 6566static void bnx2x_set_mac_addr_e1(struct bnx2x *bp, int set)
a2fbb9ea
ET
6567{
6568 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
34f80b04 6569 int port = BP_PORT(bp);
a2fbb9ea
ET
6570
6571 /* CAM allocation
6572 * unicasts 0-31:port0 32-63:port1
6573 * multicast 64-127:port0 128-191:port1
6574 */
8d9c5f34 6575 config->hdr.length = 2;
af246401 6576 config->hdr.offset = port ? 32 : 0;
34f80b04 6577 config->hdr.client_id = BP_CL_ID(bp);
a2fbb9ea
ET
6578 config->hdr.reserved1 = 0;
6579
6580 /* primary MAC */
6581 config->config_table[0].cam_entry.msb_mac_addr =
6582 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6583 config->config_table[0].cam_entry.middle_mac_addr =
6584 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6585 config->config_table[0].cam_entry.lsb_mac_addr =
6586 swab16(*(u16 *)&bp->dev->dev_addr[4]);
34f80b04 6587 config->config_table[0].cam_entry.flags = cpu_to_le16(port);
3101c2bc
YG
6588 if (set)
6589 config->config_table[0].target_table_entry.flags = 0;
6590 else
6591 CAM_INVALIDATE(config->config_table[0]);
a2fbb9ea
ET
6592 config->config_table[0].target_table_entry.client_id = 0;
6593 config->config_table[0].target_table_entry.vlan_id = 0;
6594
3101c2bc
YG
6595 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
6596 (set ? "setting" : "clearing"),
a2fbb9ea
ET
6597 config->config_table[0].cam_entry.msb_mac_addr,
6598 config->config_table[0].cam_entry.middle_mac_addr,
6599 config->config_table[0].cam_entry.lsb_mac_addr);
6600
6601 /* broadcast */
6602 config->config_table[1].cam_entry.msb_mac_addr = 0xffff;
6603 config->config_table[1].cam_entry.middle_mac_addr = 0xffff;
6604 config->config_table[1].cam_entry.lsb_mac_addr = 0xffff;
34f80b04 6605 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
3101c2bc
YG
6606 if (set)
6607 config->config_table[1].target_table_entry.flags =
a2fbb9ea 6608 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
3101c2bc
YG
6609 else
6610 CAM_INVALIDATE(config->config_table[1]);
a2fbb9ea
ET
6611 config->config_table[1].target_table_entry.client_id = 0;
6612 config->config_table[1].target_table_entry.vlan_id = 0;
6613
6614 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6615 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6616 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6617}
6618
3101c2bc 6619static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp, int set)
34f80b04
EG
6620{
6621 struct mac_configuration_cmd_e1h *config =
6622 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
6623
3101c2bc 6624 if (set && (bp->state != BNX2X_STATE_OPEN)) {
34f80b04
EG
6625 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
6626 return;
6627 }
6628
6629 /* CAM allocation for E1H
6630 * unicasts: by func number
6631 * multicast: 20+FUNC*20, 20 each
6632 */
8d9c5f34 6633 config->hdr.length = 1;
34f80b04
EG
6634 config->hdr.offset = BP_FUNC(bp);
6635 config->hdr.client_id = BP_CL_ID(bp);
6636 config->hdr.reserved1 = 0;
6637
6638 /* primary MAC */
6639 config->config_table[0].msb_mac_addr =
6640 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6641 config->config_table[0].middle_mac_addr =
6642 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6643 config->config_table[0].lsb_mac_addr =
6644 swab16(*(u16 *)&bp->dev->dev_addr[4]);
6645 config->config_table[0].client_id = BP_L_ID(bp);
6646 config->config_table[0].vlan_id = 0;
6647 config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
3101c2bc
YG
6648 if (set)
6649 config->config_table[0].flags = BP_PORT(bp);
6650 else
6651 config->config_table[0].flags =
6652 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
34f80b04 6653
3101c2bc
YG
6654 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) E1HOV %d CLID %d\n",
6655 (set ? "setting" : "clearing"),
34f80b04
EG
6656 config->config_table[0].msb_mac_addr,
6657 config->config_table[0].middle_mac_addr,
6658 config->config_table[0].lsb_mac_addr, bp->e1hov, BP_L_ID(bp));
6659
6660 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6661 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6662 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6663}
6664
a2fbb9ea
ET
6665static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6666 int *state_p, int poll)
6667{
6668 /* can take a while if any port is running */
34f80b04 6669 int cnt = 500;
a2fbb9ea 6670
c14423fe
ET
6671 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
6672 poll ? "polling" : "waiting", state, idx);
a2fbb9ea
ET
6673
6674 might_sleep();
34f80b04 6675 while (cnt--) {
a2fbb9ea
ET
6676 if (poll) {
6677 bnx2x_rx_int(bp->fp, 10);
34f80b04
EG
6678 /* if index is different from 0
6679 * the reply for some commands will
3101c2bc 6680 * be on the non default queue
a2fbb9ea
ET
6681 */
6682 if (idx)
6683 bnx2x_rx_int(&bp->fp[idx], 10);
6684 }
a2fbb9ea 6685
3101c2bc 6686 mb(); /* state is changed by bnx2x_sp_event() */
49d66772 6687 if (*state_p == state)
a2fbb9ea
ET
6688 return 0;
6689
a2fbb9ea 6690 msleep(1);
a2fbb9ea
ET
6691 }
6692
a2fbb9ea 6693 /* timeout! */
49d66772
ET
6694 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
6695 poll ? "polling" : "waiting", state, idx);
34f80b04
EG
6696#ifdef BNX2X_STOP_ON_ERROR
6697 bnx2x_panic();
6698#endif
a2fbb9ea 6699
49d66772 6700 return -EBUSY;
a2fbb9ea
ET
6701}
6702
6703static int bnx2x_setup_leading(struct bnx2x *bp)
6704{
34f80b04 6705 int rc;
a2fbb9ea 6706
c14423fe 6707 /* reset IGU state */
34f80b04 6708 bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea
ET
6709
6710 /* SETUP ramrod */
6711 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
6712
34f80b04
EG
6713 /* Wait for completion */
6714 rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
a2fbb9ea 6715
34f80b04 6716 return rc;
a2fbb9ea
ET
6717}
6718
6719static int bnx2x_setup_multi(struct bnx2x *bp, int index)
6720{
555f6c78
EG
6721 struct bnx2x_fastpath *fp = &bp->fp[index];
6722
a2fbb9ea 6723 /* reset IGU state */
555f6c78 6724 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea 6725
228241eb 6726 /* SETUP ramrod */
555f6c78
EG
6727 fp->state = BNX2X_FP_STATE_OPENING;
6728 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0,
6729 fp->cl_id, 0);
a2fbb9ea
ET
6730
6731 /* Wait for completion */
6732 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
555f6c78 6733 &(fp->state), 0);
a2fbb9ea
ET
6734}
6735
a2fbb9ea 6736static int bnx2x_poll(struct napi_struct *napi, int budget);
a2fbb9ea 6737
8badd27a 6738static void bnx2x_set_int_mode(struct bnx2x *bp)
a2fbb9ea 6739{
555f6c78 6740 int num_queues;
a2fbb9ea 6741
8badd27a
EG
6742 switch (int_mode) {
6743 case INT_MODE_INTx:
6744 case INT_MODE_MSI:
555f6c78
EG
6745 num_queues = 1;
6746 bp->num_rx_queues = num_queues;
6747 bp->num_tx_queues = num_queues;
6748 DP(NETIF_MSG_IFUP,
6749 "set number of queues to %d\n", num_queues);
8badd27a
EG
6750 break;
6751
6752 case INT_MODE_MSIX:
6753 default:
555f6c78
EG
6754 if (bp->multi_mode == ETH_RSS_MODE_REGULAR)
6755 num_queues = min_t(u32, num_online_cpus(),
6756 BNX2X_MAX_QUEUES(bp));
34f80b04 6757 else
555f6c78
EG
6758 num_queues = 1;
6759 bp->num_rx_queues = num_queues;
6760 bp->num_tx_queues = num_queues;
6761 DP(NETIF_MSG_IFUP, "set number of rx queues to %d"
6762 " number of tx queues to %d\n",
6763 bp->num_rx_queues, bp->num_tx_queues);
2dfe0e1f
EG
6764 /* if we can't use MSI-X we only need one fp,
6765 * so try to enable MSI-X with the requested number of fp's
6766 * and fallback to MSI or legacy INTx with one fp
6767 */
8badd27a 6768 if (bnx2x_enable_msix(bp)) {
34f80b04 6769 /* failed to enable MSI-X */
555f6c78
EG
6770 num_queues = 1;
6771 bp->num_rx_queues = num_queues;
6772 bp->num_tx_queues = num_queues;
6773 if (bp->multi_mode)
6774 BNX2X_ERR("Multi requested but failed to "
6775 "enable MSI-X set number of "
6776 "queues to %d\n", num_queues);
a2fbb9ea 6777 }
8badd27a 6778 break;
a2fbb9ea 6779 }
555f6c78 6780 bp->dev->real_num_tx_queues = bp->num_tx_queues;
8badd27a
EG
6781}
6782
6783static void bnx2x_set_rx_mode(struct net_device *dev);
6784
6785/* must be called with rtnl_lock */
6786static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
6787{
6788 u32 load_code;
6789 int i, rc = 0;
6790#ifdef BNX2X_STOP_ON_ERROR
6791 DP(NETIF_MSG_IFUP, "enter load_mode %d\n", load_mode);
6792 if (unlikely(bp->panic))
6793 return -EPERM;
6794#endif
6795
6796 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
6797
6798 bnx2x_set_int_mode(bp);
c14423fe 6799
a2fbb9ea
ET
6800 if (bnx2x_alloc_mem(bp))
6801 return -ENOMEM;
6802
555f6c78 6803 for_each_rx_queue(bp, i)
7a9b2557
VZ
6804 bnx2x_fp(bp, i, disable_tpa) =
6805 ((bp->flags & TPA_ENABLE_FLAG) == 0);
6806
555f6c78 6807 for_each_rx_queue(bp, i)
2dfe0e1f
EG
6808 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
6809 bnx2x_poll, 128);
6810
6811#ifdef BNX2X_STOP_ON_ERROR
555f6c78 6812 for_each_rx_queue(bp, i) {
2dfe0e1f
EG
6813 struct bnx2x_fastpath *fp = &bp->fp[i];
6814
6815 fp->poll_no_work = 0;
6816 fp->poll_calls = 0;
6817 fp->poll_max_calls = 0;
6818 fp->poll_complete = 0;
6819 fp->poll_exit = 0;
6820 }
6821#endif
6822 bnx2x_napi_enable(bp);
6823
34f80b04
EG
6824 if (bp->flags & USING_MSIX_FLAG) {
6825 rc = bnx2x_req_msix_irqs(bp);
6826 if (rc) {
6827 pci_disable_msix(bp->pdev);
2dfe0e1f 6828 goto load_error1;
34f80b04
EG
6829 }
6830 } else {
8badd27a
EG
6831 if ((rc != -ENOMEM) && (int_mode != INT_MODE_INTx))
6832 bnx2x_enable_msi(bp);
34f80b04
EG
6833 bnx2x_ack_int(bp);
6834 rc = bnx2x_req_irq(bp);
6835 if (rc) {
2dfe0e1f 6836 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
8badd27a
EG
6837 if (bp->flags & USING_MSI_FLAG)
6838 pci_disable_msi(bp->pdev);
2dfe0e1f 6839 goto load_error1;
a2fbb9ea 6840 }
8badd27a
EG
6841 if (bp->flags & USING_MSI_FLAG) {
6842 bp->dev->irq = bp->pdev->irq;
6843 printk(KERN_INFO PFX "%s: using MSI IRQ %d\n",
6844 bp->dev->name, bp->pdev->irq);
6845 }
a2fbb9ea
ET
6846 }
6847
2dfe0e1f
EG
6848 /* Send LOAD_REQUEST command to MCP
6849 Returns the type of LOAD command:
6850 if it is the first port to be initialized
6851 common blocks should be initialized, otherwise - not
6852 */
6853 if (!BP_NOMCP(bp)) {
6854 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
6855 if (!load_code) {
6856 BNX2X_ERR("MCP response failure, aborting\n");
6857 rc = -EBUSY;
6858 goto load_error2;
6859 }
6860 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
6861 rc = -EBUSY; /* other port in diagnostic mode */
6862 goto load_error2;
6863 }
6864
6865 } else {
6866 int port = BP_PORT(bp);
6867
6868 DP(NETIF_MSG_IFUP, "NO MCP load counts before us %d, %d, %d\n",
6869 load_count[0], load_count[1], load_count[2]);
6870 load_count[0]++;
6871 load_count[1 + port]++;
6872 DP(NETIF_MSG_IFUP, "NO MCP new load counts %d, %d, %d\n",
6873 load_count[0], load_count[1], load_count[2]);
6874 if (load_count[0] == 1)
6875 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
6876 else if (load_count[1 + port] == 1)
6877 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
6878 else
6879 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
6880 }
6881
6882 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
6883 (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
6884 bp->port.pmf = 1;
6885 else
6886 bp->port.pmf = 0;
6887 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
a2fbb9ea 6888
a2fbb9ea 6889 /* Initialize HW */
34f80b04
EG
6890 rc = bnx2x_init_hw(bp, load_code);
6891 if (rc) {
a2fbb9ea 6892 BNX2X_ERR("HW init failed, aborting\n");
2dfe0e1f 6893 goto load_error2;
a2fbb9ea
ET
6894 }
6895
a2fbb9ea 6896 /* Setup NIC internals and enable interrupts */
471de716 6897 bnx2x_nic_init(bp, load_code);
a2fbb9ea
ET
6898
6899 /* Send LOAD_DONE command to MCP */
34f80b04 6900 if (!BP_NOMCP(bp)) {
228241eb
ET
6901 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
6902 if (!load_code) {
da5a662a 6903 BNX2X_ERR("MCP response failure, aborting\n");
34f80b04 6904 rc = -EBUSY;
2dfe0e1f 6905 goto load_error3;
a2fbb9ea
ET
6906 }
6907 }
6908
6909 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
6910
34f80b04
EG
6911 rc = bnx2x_setup_leading(bp);
6912 if (rc) {
da5a662a 6913 BNX2X_ERR("Setup leading failed!\n");
2dfe0e1f 6914 goto load_error3;
34f80b04 6915 }
a2fbb9ea 6916
34f80b04
EG
6917 if (CHIP_IS_E1H(bp))
6918 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
6919 BNX2X_ERR("!!! mf_cfg function disabled\n");
6920 bp->state = BNX2X_STATE_DISABLED;
6921 }
a2fbb9ea 6922
34f80b04
EG
6923 if (bp->state == BNX2X_STATE_OPEN)
6924 for_each_nondefault_queue(bp, i) {
6925 rc = bnx2x_setup_multi(bp, i);
6926 if (rc)
2dfe0e1f 6927 goto load_error3;
34f80b04 6928 }
a2fbb9ea 6929
34f80b04 6930 if (CHIP_IS_E1(bp))
3101c2bc 6931 bnx2x_set_mac_addr_e1(bp, 1);
34f80b04 6932 else
3101c2bc 6933 bnx2x_set_mac_addr_e1h(bp, 1);
34f80b04
EG
6934
6935 if (bp->port.pmf)
6936 bnx2x_initial_phy_init(bp);
a2fbb9ea
ET
6937
6938 /* Start fast path */
34f80b04
EG
6939 switch (load_mode) {
6940 case LOAD_NORMAL:
6941 /* Tx queue should be only reenabled */
555f6c78 6942 netif_tx_wake_all_queues(bp->dev);
2dfe0e1f 6943 /* Initialize the receive filter. */
34f80b04
EG
6944 bnx2x_set_rx_mode(bp->dev);
6945 break;
6946
6947 case LOAD_OPEN:
555f6c78 6948 netif_tx_start_all_queues(bp->dev);
2dfe0e1f 6949 /* Initialize the receive filter. */
34f80b04 6950 bnx2x_set_rx_mode(bp->dev);
34f80b04 6951 break;
a2fbb9ea 6952
34f80b04 6953 case LOAD_DIAG:
2dfe0e1f 6954 /* Initialize the receive filter. */
a2fbb9ea 6955 bnx2x_set_rx_mode(bp->dev);
34f80b04
EG
6956 bp->state = BNX2X_STATE_DIAG;
6957 break;
6958
6959 default:
6960 break;
a2fbb9ea
ET
6961 }
6962
34f80b04
EG
6963 if (!bp->port.pmf)
6964 bnx2x__link_status_update(bp);
6965
a2fbb9ea
ET
6966 /* start the timer */
6967 mod_timer(&bp->timer, jiffies + bp->current_interval);
6968
34f80b04 6969
a2fbb9ea
ET
6970 return 0;
6971
2dfe0e1f
EG
6972load_error3:
6973 bnx2x_int_disable_sync(bp, 1);
6974 if (!BP_NOMCP(bp)) {
6975 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
6976 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6977 }
6978 bp->port.pmf = 0;
7a9b2557
VZ
6979 /* Free SKBs, SGEs, TPA pool and driver internals */
6980 bnx2x_free_skbs(bp);
555f6c78 6981 for_each_rx_queue(bp, i)
3196a88a 6982 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
2dfe0e1f 6983load_error2:
d1014634
YG
6984 /* Release IRQs */
6985 bnx2x_free_irq(bp);
2dfe0e1f
EG
6986load_error1:
6987 bnx2x_napi_disable(bp);
555f6c78 6988 for_each_rx_queue(bp, i)
7cde1c8b 6989 netif_napi_del(&bnx2x_fp(bp, i, napi));
a2fbb9ea
ET
6990 bnx2x_free_mem(bp);
6991
6992 /* TBD we really need to reset the chip
6993 if we want to recover from this */
34f80b04 6994 return rc;
a2fbb9ea
ET
6995}
6996
6997static int bnx2x_stop_multi(struct bnx2x *bp, int index)
6998{
555f6c78 6999 struct bnx2x_fastpath *fp = &bp->fp[index];
a2fbb9ea
ET
7000 int rc;
7001
c14423fe 7002 /* halt the connection */
555f6c78
EG
7003 fp->state = BNX2X_FP_STATE_HALTING;
7004 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, fp->cl_id, 0);
a2fbb9ea 7005
34f80b04 7006 /* Wait for completion */
a2fbb9ea 7007 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
555f6c78 7008 &(fp->state), 1);
c14423fe 7009 if (rc) /* timeout */
a2fbb9ea
ET
7010 return rc;
7011
7012 /* delete cfc entry */
7013 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
7014
34f80b04
EG
7015 /* Wait for completion */
7016 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
555f6c78 7017 &(fp->state), 1);
34f80b04 7018 return rc;
a2fbb9ea
ET
7019}
7020
da5a662a 7021static int bnx2x_stop_leading(struct bnx2x *bp)
a2fbb9ea 7022{
49d66772 7023 u16 dsb_sp_prod_idx;
c14423fe 7024 /* if the other port is handling traffic,
a2fbb9ea 7025 this can take a lot of time */
34f80b04
EG
7026 int cnt = 500;
7027 int rc;
a2fbb9ea
ET
7028
7029 might_sleep();
7030
7031 /* Send HALT ramrod */
7032 bp->fp[0].state = BNX2X_FP_STATE_HALTING;
34f80b04 7033 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, BP_CL_ID(bp), 0);
a2fbb9ea 7034
34f80b04
EG
7035 /* Wait for completion */
7036 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
7037 &(bp->fp[0].state), 1);
7038 if (rc) /* timeout */
da5a662a 7039 return rc;
a2fbb9ea 7040
49d66772 7041 dsb_sp_prod_idx = *bp->dsb_sp_prod;
a2fbb9ea 7042
228241eb 7043 /* Send PORT_DELETE ramrod */
a2fbb9ea
ET
7044 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
7045
49d66772 7046 /* Wait for completion to arrive on default status block
a2fbb9ea
ET
7047 we are going to reset the chip anyway
7048 so there is not much to do if this times out
7049 */
34f80b04 7050 while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
34f80b04
EG
7051 if (!cnt) {
7052 DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
7053 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
7054 *bp->dsb_sp_prod, dsb_sp_prod_idx);
7055#ifdef BNX2X_STOP_ON_ERROR
7056 bnx2x_panic();
da5a662a
VZ
7057#else
7058 rc = -EBUSY;
34f80b04
EG
7059#endif
7060 break;
7061 }
7062 cnt--;
da5a662a 7063 msleep(1);
5650d9d4 7064 rmb(); /* Refresh the dsb_sp_prod */
49d66772
ET
7065 }
7066 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
7067 bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
da5a662a
VZ
7068
7069 return rc;
a2fbb9ea
ET
7070}
7071
34f80b04
EG
7072static void bnx2x_reset_func(struct bnx2x *bp)
7073{
7074 int port = BP_PORT(bp);
7075 int func = BP_FUNC(bp);
7076 int base, i;
7077
7078 /* Configure IGU */
7079 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
7080 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
7081
34f80b04
EG
7082 /* Clear ILT */
7083 base = FUNC_ILT_BASE(func);
7084 for (i = base; i < base + ILT_PER_FUNC; i++)
7085 bnx2x_ilt_wr(bp, i, 0);
7086}
7087
7088static void bnx2x_reset_port(struct bnx2x *bp)
7089{
7090 int port = BP_PORT(bp);
7091 u32 val;
7092
7093 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
7094
7095 /* Do not rcv packets to BRB */
7096 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
7097 /* Do not direct rcv packets that are not for MCP to the BRB */
7098 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
7099 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7100
7101 /* Configure AEU */
7102 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
7103
7104 msleep(100);
7105 /* Check for BRB port occupancy */
7106 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
7107 if (val)
7108 DP(NETIF_MSG_IFDOWN,
33471629 7109 "BRB1 is not empty %d blocks are occupied\n", val);
34f80b04
EG
7110
7111 /* TODO: Close Doorbell port? */
7112}
7113
34f80b04
EG
7114static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
7115{
7116 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
7117 BP_FUNC(bp), reset_code);
7118
7119 switch (reset_code) {
7120 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
7121 bnx2x_reset_port(bp);
7122 bnx2x_reset_func(bp);
7123 bnx2x_reset_common(bp);
7124 break;
7125
7126 case FW_MSG_CODE_DRV_UNLOAD_PORT:
7127 bnx2x_reset_port(bp);
7128 bnx2x_reset_func(bp);
7129 break;
7130
7131 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
7132 bnx2x_reset_func(bp);
7133 break;
49d66772 7134
34f80b04
EG
7135 default:
7136 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
7137 break;
7138 }
7139}
7140
33471629 7141/* must be called with rtnl_lock */
34f80b04 7142static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
a2fbb9ea 7143{
da5a662a 7144 int port = BP_PORT(bp);
a2fbb9ea 7145 u32 reset_code = 0;
da5a662a 7146 int i, cnt, rc;
a2fbb9ea
ET
7147
7148 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
7149
228241eb
ET
7150 bp->rx_mode = BNX2X_RX_MODE_NONE;
7151 bnx2x_set_storm_rx_mode(bp);
a2fbb9ea 7152
f8ef6e44 7153 bnx2x_netif_stop(bp, 1);
e94d8af3 7154
34f80b04
EG
7155 del_timer_sync(&bp->timer);
7156 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
7157 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
bb2a0f7a 7158 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04 7159
70b9986c
EG
7160 /* Release IRQs */
7161 bnx2x_free_irq(bp);
7162
555f6c78
EG
7163 /* Wait until tx fastpath tasks complete */
7164 for_each_tx_queue(bp, i) {
228241eb
ET
7165 struct bnx2x_fastpath *fp = &bp->fp[i];
7166
34f80b04
EG
7167 cnt = 1000;
7168 smp_rmb();
e8b5fc51 7169 while (bnx2x_has_tx_work_unload(fp)) {
da5a662a 7170
65abd74d 7171 bnx2x_tx_int(fp, 1000);
34f80b04
EG
7172 if (!cnt) {
7173 BNX2X_ERR("timeout waiting for queue[%d]\n",
7174 i);
7175#ifdef BNX2X_STOP_ON_ERROR
7176 bnx2x_panic();
7177 return -EBUSY;
7178#else
7179 break;
7180#endif
7181 }
7182 cnt--;
da5a662a 7183 msleep(1);
34f80b04
EG
7184 smp_rmb();
7185 }
228241eb 7186 }
da5a662a
VZ
7187 /* Give HW time to discard old tx messages */
7188 msleep(1);
a2fbb9ea 7189
3101c2bc
YG
7190 if (CHIP_IS_E1(bp)) {
7191 struct mac_configuration_cmd *config =
7192 bnx2x_sp(bp, mcast_config);
7193
7194 bnx2x_set_mac_addr_e1(bp, 0);
7195
8d9c5f34 7196 for (i = 0; i < config->hdr.length; i++)
3101c2bc
YG
7197 CAM_INVALIDATE(config->config_table[i]);
7198
8d9c5f34 7199 config->hdr.length = i;
3101c2bc
YG
7200 if (CHIP_REV_IS_SLOW(bp))
7201 config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
7202 else
7203 config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
7204 config->hdr.client_id = BP_CL_ID(bp);
7205 config->hdr.reserved1 = 0;
7206
7207 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7208 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
7209 U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
7210
7211 } else { /* E1H */
65abd74d
YG
7212 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
7213
3101c2bc
YG
7214 bnx2x_set_mac_addr_e1h(bp, 0);
7215
7216 for (i = 0; i < MC_HASH_SIZE; i++)
7217 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
7218 }
7219
65abd74d
YG
7220 if (unload_mode == UNLOAD_NORMAL)
7221 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7222
7223 else if (bp->flags & NO_WOL_FLAG) {
7224 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
7225 if (CHIP_IS_E1H(bp))
7226 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
7227
7228 } else if (bp->wol) {
7229 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
7230 u8 *mac_addr = bp->dev->dev_addr;
7231 u32 val;
7232 /* The mac address is written to entries 1-4 to
7233 preserve entry 0 which is used by the PMF */
7234 u8 entry = (BP_E1HVN(bp) + 1)*8;
7235
7236 val = (mac_addr[0] << 8) | mac_addr[1];
7237 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
7238
7239 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
7240 (mac_addr[4] << 8) | mac_addr[5];
7241 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
7242
7243 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
7244
7245 } else
7246 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
da5a662a 7247
34f80b04
EG
7248 /* Close multi and leading connections
7249 Completions for ramrods are collected in a synchronous way */
a2fbb9ea
ET
7250 for_each_nondefault_queue(bp, i)
7251 if (bnx2x_stop_multi(bp, i))
228241eb 7252 goto unload_error;
a2fbb9ea 7253
da5a662a
VZ
7254 rc = bnx2x_stop_leading(bp);
7255 if (rc) {
34f80b04 7256 BNX2X_ERR("Stop leading failed!\n");
da5a662a 7257#ifdef BNX2X_STOP_ON_ERROR
34f80b04 7258 return -EBUSY;
da5a662a
VZ
7259#else
7260 goto unload_error;
34f80b04 7261#endif
228241eb
ET
7262 }
7263
7264unload_error:
34f80b04 7265 if (!BP_NOMCP(bp))
228241eb 7266 reset_code = bnx2x_fw_command(bp, reset_code);
34f80b04
EG
7267 else {
7268 DP(NETIF_MSG_IFDOWN, "NO MCP load counts %d, %d, %d\n",
7269 load_count[0], load_count[1], load_count[2]);
7270 load_count[0]--;
da5a662a 7271 load_count[1 + port]--;
34f80b04
EG
7272 DP(NETIF_MSG_IFDOWN, "NO MCP new load counts %d, %d, %d\n",
7273 load_count[0], load_count[1], load_count[2]);
7274 if (load_count[0] == 0)
7275 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
da5a662a 7276 else if (load_count[1 + port] == 0)
34f80b04
EG
7277 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
7278 else
7279 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
7280 }
a2fbb9ea 7281
34f80b04
EG
7282 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
7283 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
7284 bnx2x__link_reset(bp);
a2fbb9ea
ET
7285
7286 /* Reset the chip */
228241eb 7287 bnx2x_reset_chip(bp, reset_code);
a2fbb9ea
ET
7288
7289 /* Report UNLOAD_DONE to MCP */
34f80b04 7290 if (!BP_NOMCP(bp))
a2fbb9ea 7291 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
9a035440 7292 bp->port.pmf = 0;
a2fbb9ea 7293
7a9b2557 7294 /* Free SKBs, SGEs, TPA pool and driver internals */
a2fbb9ea 7295 bnx2x_free_skbs(bp);
555f6c78 7296 for_each_rx_queue(bp, i)
3196a88a 7297 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
555f6c78 7298 for_each_rx_queue(bp, i)
7cde1c8b 7299 netif_napi_del(&bnx2x_fp(bp, i, napi));
a2fbb9ea
ET
7300 bnx2x_free_mem(bp);
7301
7302 bp->state = BNX2X_STATE_CLOSED;
228241eb 7303
a2fbb9ea
ET
7304 netif_carrier_off(bp->dev);
7305
7306 return 0;
7307}
7308
34f80b04
EG
7309static void bnx2x_reset_task(struct work_struct *work)
7310{
7311 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
7312
7313#ifdef BNX2X_STOP_ON_ERROR
7314 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
7315 " so reset not done to allow debug dump,\n"
7316 KERN_ERR " you will need to reboot when done\n");
7317 return;
7318#endif
7319
7320 rtnl_lock();
7321
7322 if (!netif_running(bp->dev))
7323 goto reset_task_exit;
7324
7325 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
7326 bnx2x_nic_load(bp, LOAD_NORMAL);
7327
7328reset_task_exit:
7329 rtnl_unlock();
7330}
7331
a2fbb9ea
ET
7332/* end of nic load/unload */
7333
7334/* ethtool_ops */
7335
7336/*
7337 * Init service functions
7338 */
7339
f1ef27ef
EG
7340static inline u32 bnx2x_get_pretend_reg(struct bnx2x *bp, int func)
7341{
7342 switch (func) {
7343 case 0: return PXP2_REG_PGL_PRETEND_FUNC_F0;
7344 case 1: return PXP2_REG_PGL_PRETEND_FUNC_F1;
7345 case 2: return PXP2_REG_PGL_PRETEND_FUNC_F2;
7346 case 3: return PXP2_REG_PGL_PRETEND_FUNC_F3;
7347 case 4: return PXP2_REG_PGL_PRETEND_FUNC_F4;
7348 case 5: return PXP2_REG_PGL_PRETEND_FUNC_F5;
7349 case 6: return PXP2_REG_PGL_PRETEND_FUNC_F6;
7350 case 7: return PXP2_REG_PGL_PRETEND_FUNC_F7;
7351 default:
7352 BNX2X_ERR("Unsupported function index: %d\n", func);
7353 return (u32)(-1);
7354 }
7355}
7356
7357static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp, int orig_func)
7358{
7359 u32 reg = bnx2x_get_pretend_reg(bp, orig_func), new_val;
7360
7361 /* Flush all outstanding writes */
7362 mmiowb();
7363
7364 /* Pretend to be function 0 */
7365 REG_WR(bp, reg, 0);
7366 /* Flush the GRC transaction (in the chip) */
7367 new_val = REG_RD(bp, reg);
7368 if (new_val != 0) {
7369 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (0,%d)!\n",
7370 new_val);
7371 BUG();
7372 }
7373
7374 /* From now we are in the "like-E1" mode */
7375 bnx2x_int_disable(bp);
7376
7377 /* Flush all outstanding writes */
7378 mmiowb();
7379
7380 /* Restore the original funtion settings */
7381 REG_WR(bp, reg, orig_func);
7382 new_val = REG_RD(bp, reg);
7383 if (new_val != orig_func) {
7384 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (%d,%d)!\n",
7385 orig_func, new_val);
7386 BUG();
7387 }
7388}
7389
7390static inline void bnx2x_undi_int_disable(struct bnx2x *bp, int func)
7391{
7392 if (CHIP_IS_E1H(bp))
7393 bnx2x_undi_int_disable_e1h(bp, func);
7394 else
7395 bnx2x_int_disable(bp);
7396}
7397
34f80b04
EG
7398static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
7399{
7400 u32 val;
7401
7402 /* Check if there is any driver already loaded */
7403 val = REG_RD(bp, MISC_REG_UNPREPARED);
7404 if (val == 0x1) {
7405 /* Check if it is the UNDI driver
7406 * UNDI driver initializes CID offset for normal bell to 0x7
7407 */
4a37fb66 7408 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
34f80b04
EG
7409 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
7410 if (val == 0x7) {
7411 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
da5a662a 7412 /* save our func */
34f80b04 7413 int func = BP_FUNC(bp);
da5a662a
VZ
7414 u32 swap_en;
7415 u32 swap_val;
34f80b04 7416
b4661739
EG
7417 /* clear the UNDI indication */
7418 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
7419
34f80b04
EG
7420 BNX2X_DEV_INFO("UNDI is active! reset device\n");
7421
7422 /* try unload UNDI on port 0 */
7423 bp->func = 0;
da5a662a
VZ
7424 bp->fw_seq =
7425 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7426 DRV_MSG_SEQ_NUMBER_MASK);
34f80b04 7427 reset_code = bnx2x_fw_command(bp, reset_code);
34f80b04
EG
7428
7429 /* if UNDI is loaded on the other port */
7430 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
7431
da5a662a
VZ
7432 /* send "DONE" for previous unload */
7433 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7434
7435 /* unload UNDI on port 1 */
34f80b04 7436 bp->func = 1;
da5a662a
VZ
7437 bp->fw_seq =
7438 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7439 DRV_MSG_SEQ_NUMBER_MASK);
7440 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7441
7442 bnx2x_fw_command(bp, reset_code);
34f80b04
EG
7443 }
7444
b4661739
EG
7445 /* now it's safe to release the lock */
7446 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7447
f1ef27ef 7448 bnx2x_undi_int_disable(bp, func);
da5a662a
VZ
7449
7450 /* close input traffic and wait for it */
7451 /* Do not rcv packets to BRB */
7452 REG_WR(bp,
7453 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
7454 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
7455 /* Do not direct rcv packets that are not for MCP to
7456 * the BRB */
7457 REG_WR(bp,
7458 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
7459 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7460 /* clear AEU */
7461 REG_WR(bp,
7462 (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
7463 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
7464 msleep(10);
7465
7466 /* save NIG port swap info */
7467 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
7468 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
34f80b04
EG
7469 /* reset device */
7470 REG_WR(bp,
7471 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
da5a662a 7472 0xd3ffffff);
34f80b04
EG
7473 REG_WR(bp,
7474 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
7475 0x1403);
da5a662a
VZ
7476 /* take the NIG out of reset and restore swap values */
7477 REG_WR(bp,
7478 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
7479 MISC_REGISTERS_RESET_REG_1_RST_NIG);
7480 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
7481 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
7482
7483 /* send unload done to the MCP */
7484 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7485
7486 /* restore our func and fw_seq */
7487 bp->func = func;
7488 bp->fw_seq =
7489 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7490 DRV_MSG_SEQ_NUMBER_MASK);
b4661739
EG
7491
7492 } else
7493 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
34f80b04
EG
7494 }
7495}
7496
7497static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
7498{
7499 u32 val, val2, val3, val4, id;
72ce58c3 7500 u16 pmc;
34f80b04
EG
7501
7502 /* Get the chip revision id and number. */
7503 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
7504 val = REG_RD(bp, MISC_REG_CHIP_NUM);
7505 id = ((val & 0xffff) << 16);
7506 val = REG_RD(bp, MISC_REG_CHIP_REV);
7507 id |= ((val & 0xf) << 12);
7508 val = REG_RD(bp, MISC_REG_CHIP_METAL);
7509 id |= ((val & 0xff) << 4);
5a40e08e 7510 val = REG_RD(bp, MISC_REG_BOND_ID);
34f80b04
EG
7511 id |= (val & 0xf);
7512 bp->common.chip_id = id;
7513 bp->link_params.chip_id = bp->common.chip_id;
7514 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
7515
1c06328c
EG
7516 val = (REG_RD(bp, 0x2874) & 0x55);
7517 if ((bp->common.chip_id & 0x1) ||
7518 (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
7519 bp->flags |= ONE_PORT_FLAG;
7520 BNX2X_DEV_INFO("single port device\n");
7521 }
7522
34f80b04
EG
7523 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
7524 bp->common.flash_size = (NVRAM_1MB_SIZE <<
7525 (val & MCPR_NVM_CFG4_FLASH_SIZE));
7526 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
7527 bp->common.flash_size, bp->common.flash_size);
7528
7529 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
7530 bp->link_params.shmem_base = bp->common.shmem_base;
7531 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
7532
7533 if (!bp->common.shmem_base ||
7534 (bp->common.shmem_base < 0xA0000) ||
7535 (bp->common.shmem_base >= 0xC0000)) {
7536 BNX2X_DEV_INFO("MCP not active\n");
7537 bp->flags |= NO_MCP_FLAG;
7538 return;
7539 }
7540
7541 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
7542 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7543 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7544 BNX2X_ERR("BAD MCP validity signature\n");
7545
7546 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
35b19ba5 7547 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
34f80b04
EG
7548
7549 bp->link_params.hw_led_mode = ((bp->common.hw_config &
7550 SHARED_HW_CFG_LED_MODE_MASK) >>
7551 SHARED_HW_CFG_LED_MODE_SHIFT);
7552
7553 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
7554 bp->common.bc_ver = val;
7555 BNX2X_DEV_INFO("bc_ver %X\n", val);
7556 if (val < BNX2X_BC_VER) {
7557 /* for now only warn
7558 * later we might need to enforce this */
7559 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
7560 " please upgrade BC\n", BNX2X_BC_VER, val);
7561 }
72ce58c3
EG
7562
7563 if (BP_E1HVN(bp) == 0) {
7564 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
7565 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
7566 } else {
7567 /* no WOL capability for E1HVN != 0 */
7568 bp->flags |= NO_WOL_FLAG;
7569 }
7570 BNX2X_DEV_INFO("%sWoL capable\n",
7571 (bp->flags & NO_WOL_FLAG) ? "Not " : "");
34f80b04
EG
7572
7573 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
7574 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
7575 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
7576 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
7577
7578 printk(KERN_INFO PFX "part number %X-%X-%X-%X\n",
7579 val, val2, val3, val4);
7580}
7581
7582static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
7583 u32 switch_cfg)
a2fbb9ea 7584{
34f80b04 7585 int port = BP_PORT(bp);
a2fbb9ea
ET
7586 u32 ext_phy_type;
7587
a2fbb9ea
ET
7588 switch (switch_cfg) {
7589 case SWITCH_CFG_1G:
7590 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
7591
c18487ee
YR
7592 ext_phy_type =
7593 SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea
ET
7594 switch (ext_phy_type) {
7595 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
7596 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7597 ext_phy_type);
7598
34f80b04
EG
7599 bp->port.supported |= (SUPPORTED_10baseT_Half |
7600 SUPPORTED_10baseT_Full |
7601 SUPPORTED_100baseT_Half |
7602 SUPPORTED_100baseT_Full |
7603 SUPPORTED_1000baseT_Full |
7604 SUPPORTED_2500baseX_Full |
7605 SUPPORTED_TP |
7606 SUPPORTED_FIBRE |
7607 SUPPORTED_Autoneg |
7608 SUPPORTED_Pause |
7609 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
7610 break;
7611
7612 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
7613 BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
7614 ext_phy_type);
7615
34f80b04
EG
7616 bp->port.supported |= (SUPPORTED_10baseT_Half |
7617 SUPPORTED_10baseT_Full |
7618 SUPPORTED_100baseT_Half |
7619 SUPPORTED_100baseT_Full |
7620 SUPPORTED_1000baseT_Full |
7621 SUPPORTED_TP |
7622 SUPPORTED_FIBRE |
7623 SUPPORTED_Autoneg |
7624 SUPPORTED_Pause |
7625 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
7626 break;
7627
7628 default:
7629 BNX2X_ERR("NVRAM config error. "
7630 "BAD SerDes ext_phy_config 0x%x\n",
c18487ee 7631 bp->link_params.ext_phy_config);
a2fbb9ea
ET
7632 return;
7633 }
7634
34f80b04
EG
7635 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
7636 port*0x10);
7637 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea
ET
7638 break;
7639
7640 case SWITCH_CFG_10G:
7641 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
7642
c18487ee
YR
7643 ext_phy_type =
7644 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea
ET
7645 switch (ext_phy_type) {
7646 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7647 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7648 ext_phy_type);
7649
34f80b04
EG
7650 bp->port.supported |= (SUPPORTED_10baseT_Half |
7651 SUPPORTED_10baseT_Full |
7652 SUPPORTED_100baseT_Half |
7653 SUPPORTED_100baseT_Full |
7654 SUPPORTED_1000baseT_Full |
7655 SUPPORTED_2500baseX_Full |
7656 SUPPORTED_10000baseT_Full |
7657 SUPPORTED_TP |
7658 SUPPORTED_FIBRE |
7659 SUPPORTED_Autoneg |
7660 SUPPORTED_Pause |
7661 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
7662 break;
7663
589abe3a
EG
7664 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
7665 BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
34f80b04 7666 ext_phy_type);
f1410647 7667
34f80b04 7668 bp->port.supported |= (SUPPORTED_10000baseT_Full |
589abe3a 7669 SUPPORTED_1000baseT_Full |
34f80b04 7670 SUPPORTED_FIBRE |
589abe3a 7671 SUPPORTED_Autoneg |
34f80b04
EG
7672 SUPPORTED_Pause |
7673 SUPPORTED_Asym_Pause);
f1410647
ET
7674 break;
7675
589abe3a
EG
7676 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
7677 BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
f1410647
ET
7678 ext_phy_type);
7679
34f80b04 7680 bp->port.supported |= (SUPPORTED_10000baseT_Full |
589abe3a 7681 SUPPORTED_2500baseX_Full |
34f80b04 7682 SUPPORTED_1000baseT_Full |
589abe3a
EG
7683 SUPPORTED_FIBRE |
7684 SUPPORTED_Autoneg |
7685 SUPPORTED_Pause |
7686 SUPPORTED_Asym_Pause);
7687 break;
7688
7689 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
7690 BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
7691 ext_phy_type);
7692
7693 bp->port.supported |= (SUPPORTED_10000baseT_Full |
34f80b04
EG
7694 SUPPORTED_FIBRE |
7695 SUPPORTED_Pause |
7696 SUPPORTED_Asym_Pause);
f1410647
ET
7697 break;
7698
589abe3a
EG
7699 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
7700 BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
a2fbb9ea
ET
7701 ext_phy_type);
7702
34f80b04
EG
7703 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7704 SUPPORTED_1000baseT_Full |
7705 SUPPORTED_FIBRE |
34f80b04
EG
7706 SUPPORTED_Pause |
7707 SUPPORTED_Asym_Pause);
f1410647
ET
7708 break;
7709
589abe3a
EG
7710 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
7711 BNX2X_DEV_INFO("ext_phy_type 0x%x (8726)\n",
c18487ee
YR
7712 ext_phy_type);
7713
34f80b04 7714 bp->port.supported |= (SUPPORTED_10000baseT_Full |
34f80b04 7715 SUPPORTED_1000baseT_Full |
34f80b04 7716 SUPPORTED_Autoneg |
589abe3a 7717 SUPPORTED_FIBRE |
34f80b04
EG
7718 SUPPORTED_Pause |
7719 SUPPORTED_Asym_Pause);
c18487ee
YR
7720 break;
7721
f1410647
ET
7722 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7723 BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
7724 ext_phy_type);
7725
34f80b04
EG
7726 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7727 SUPPORTED_TP |
7728 SUPPORTED_Autoneg |
7729 SUPPORTED_Pause |
7730 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
7731 break;
7732
c18487ee
YR
7733 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
7734 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
7735 bp->link_params.ext_phy_config);
7736 break;
7737
a2fbb9ea
ET
7738 default:
7739 BNX2X_ERR("NVRAM config error. "
7740 "BAD XGXS ext_phy_config 0x%x\n",
c18487ee 7741 bp->link_params.ext_phy_config);
a2fbb9ea
ET
7742 return;
7743 }
7744
34f80b04
EG
7745 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
7746 port*0x18);
7747 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea 7748
a2fbb9ea
ET
7749 break;
7750
7751 default:
7752 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
34f80b04 7753 bp->port.link_config);
a2fbb9ea
ET
7754 return;
7755 }
34f80b04 7756 bp->link_params.phy_addr = bp->port.phy_addr;
a2fbb9ea
ET
7757
7758 /* mask what we support according to speed_cap_mask */
c18487ee
YR
7759 if (!(bp->link_params.speed_cap_mask &
7760 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
34f80b04 7761 bp->port.supported &= ~SUPPORTED_10baseT_Half;
a2fbb9ea 7762
c18487ee
YR
7763 if (!(bp->link_params.speed_cap_mask &
7764 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
34f80b04 7765 bp->port.supported &= ~SUPPORTED_10baseT_Full;
a2fbb9ea 7766
c18487ee
YR
7767 if (!(bp->link_params.speed_cap_mask &
7768 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
34f80b04 7769 bp->port.supported &= ~SUPPORTED_100baseT_Half;
a2fbb9ea 7770
c18487ee
YR
7771 if (!(bp->link_params.speed_cap_mask &
7772 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
34f80b04 7773 bp->port.supported &= ~SUPPORTED_100baseT_Full;
a2fbb9ea 7774
c18487ee
YR
7775 if (!(bp->link_params.speed_cap_mask &
7776 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
34f80b04
EG
7777 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
7778 SUPPORTED_1000baseT_Full);
a2fbb9ea 7779
c18487ee
YR
7780 if (!(bp->link_params.speed_cap_mask &
7781 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
34f80b04 7782 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
a2fbb9ea 7783
c18487ee
YR
7784 if (!(bp->link_params.speed_cap_mask &
7785 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
34f80b04 7786 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
a2fbb9ea 7787
34f80b04 7788 BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
a2fbb9ea
ET
7789}
7790
34f80b04 7791static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
a2fbb9ea 7792{
c18487ee 7793 bp->link_params.req_duplex = DUPLEX_FULL;
a2fbb9ea 7794
34f80b04 7795 switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
a2fbb9ea 7796 case PORT_FEATURE_LINK_SPEED_AUTO:
34f80b04 7797 if (bp->port.supported & SUPPORTED_Autoneg) {
c18487ee 7798 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
34f80b04 7799 bp->port.advertising = bp->port.supported;
a2fbb9ea 7800 } else {
c18487ee
YR
7801 u32 ext_phy_type =
7802 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7803
7804 if ((ext_phy_type ==
7805 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
7806 (ext_phy_type ==
7807 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
a2fbb9ea 7808 /* force 10G, no AN */
c18487ee 7809 bp->link_params.req_line_speed = SPEED_10000;
34f80b04 7810 bp->port.advertising =
a2fbb9ea
ET
7811 (ADVERTISED_10000baseT_Full |
7812 ADVERTISED_FIBRE);
7813 break;
7814 }
7815 BNX2X_ERR("NVRAM config error. "
7816 "Invalid link_config 0x%x"
7817 " Autoneg not supported\n",
34f80b04 7818 bp->port.link_config);
a2fbb9ea
ET
7819 return;
7820 }
7821 break;
7822
7823 case PORT_FEATURE_LINK_SPEED_10M_FULL:
34f80b04 7824 if (bp->port.supported & SUPPORTED_10baseT_Full) {
c18487ee 7825 bp->link_params.req_line_speed = SPEED_10;
34f80b04
EG
7826 bp->port.advertising = (ADVERTISED_10baseT_Full |
7827 ADVERTISED_TP);
a2fbb9ea
ET
7828 } else {
7829 BNX2X_ERR("NVRAM config error. "
7830 "Invalid link_config 0x%x"
7831 " speed_cap_mask 0x%x\n",
34f80b04 7832 bp->port.link_config,
c18487ee 7833 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7834 return;
7835 }
7836 break;
7837
7838 case PORT_FEATURE_LINK_SPEED_10M_HALF:
34f80b04 7839 if (bp->port.supported & SUPPORTED_10baseT_Half) {
c18487ee
YR
7840 bp->link_params.req_line_speed = SPEED_10;
7841 bp->link_params.req_duplex = DUPLEX_HALF;
34f80b04
EG
7842 bp->port.advertising = (ADVERTISED_10baseT_Half |
7843 ADVERTISED_TP);
a2fbb9ea
ET
7844 } else {
7845 BNX2X_ERR("NVRAM config error. "
7846 "Invalid link_config 0x%x"
7847 " speed_cap_mask 0x%x\n",
34f80b04 7848 bp->port.link_config,
c18487ee 7849 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7850 return;
7851 }
7852 break;
7853
7854 case PORT_FEATURE_LINK_SPEED_100M_FULL:
34f80b04 7855 if (bp->port.supported & SUPPORTED_100baseT_Full) {
c18487ee 7856 bp->link_params.req_line_speed = SPEED_100;
34f80b04
EG
7857 bp->port.advertising = (ADVERTISED_100baseT_Full |
7858 ADVERTISED_TP);
a2fbb9ea
ET
7859 } else {
7860 BNX2X_ERR("NVRAM config error. "
7861 "Invalid link_config 0x%x"
7862 " speed_cap_mask 0x%x\n",
34f80b04 7863 bp->port.link_config,
c18487ee 7864 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7865 return;
7866 }
7867 break;
7868
7869 case PORT_FEATURE_LINK_SPEED_100M_HALF:
34f80b04 7870 if (bp->port.supported & SUPPORTED_100baseT_Half) {
c18487ee
YR
7871 bp->link_params.req_line_speed = SPEED_100;
7872 bp->link_params.req_duplex = DUPLEX_HALF;
34f80b04
EG
7873 bp->port.advertising = (ADVERTISED_100baseT_Half |
7874 ADVERTISED_TP);
a2fbb9ea
ET
7875 } else {
7876 BNX2X_ERR("NVRAM config error. "
7877 "Invalid link_config 0x%x"
7878 " speed_cap_mask 0x%x\n",
34f80b04 7879 bp->port.link_config,
c18487ee 7880 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7881 return;
7882 }
7883 break;
7884
7885 case PORT_FEATURE_LINK_SPEED_1G:
34f80b04 7886 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
c18487ee 7887 bp->link_params.req_line_speed = SPEED_1000;
34f80b04
EG
7888 bp->port.advertising = (ADVERTISED_1000baseT_Full |
7889 ADVERTISED_TP);
a2fbb9ea
ET
7890 } else {
7891 BNX2X_ERR("NVRAM config error. "
7892 "Invalid link_config 0x%x"
7893 " speed_cap_mask 0x%x\n",
34f80b04 7894 bp->port.link_config,
c18487ee 7895 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7896 return;
7897 }
7898 break;
7899
7900 case PORT_FEATURE_LINK_SPEED_2_5G:
34f80b04 7901 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
c18487ee 7902 bp->link_params.req_line_speed = SPEED_2500;
34f80b04
EG
7903 bp->port.advertising = (ADVERTISED_2500baseX_Full |
7904 ADVERTISED_TP);
a2fbb9ea
ET
7905 } else {
7906 BNX2X_ERR("NVRAM config error. "
7907 "Invalid link_config 0x%x"
7908 " speed_cap_mask 0x%x\n",
34f80b04 7909 bp->port.link_config,
c18487ee 7910 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7911 return;
7912 }
7913 break;
7914
7915 case PORT_FEATURE_LINK_SPEED_10G_CX4:
7916 case PORT_FEATURE_LINK_SPEED_10G_KX4:
7917 case PORT_FEATURE_LINK_SPEED_10G_KR:
34f80b04 7918 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
c18487ee 7919 bp->link_params.req_line_speed = SPEED_10000;
34f80b04
EG
7920 bp->port.advertising = (ADVERTISED_10000baseT_Full |
7921 ADVERTISED_FIBRE);
a2fbb9ea
ET
7922 } else {
7923 BNX2X_ERR("NVRAM config error. "
7924 "Invalid link_config 0x%x"
7925 " speed_cap_mask 0x%x\n",
34f80b04 7926 bp->port.link_config,
c18487ee 7927 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7928 return;
7929 }
7930 break;
7931
7932 default:
7933 BNX2X_ERR("NVRAM config error. "
7934 "BAD link speed link_config 0x%x\n",
34f80b04 7935 bp->port.link_config);
c18487ee 7936 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
34f80b04 7937 bp->port.advertising = bp->port.supported;
a2fbb9ea
ET
7938 break;
7939 }
a2fbb9ea 7940
34f80b04
EG
7941 bp->link_params.req_flow_ctrl = (bp->port.link_config &
7942 PORT_FEATURE_FLOW_CONTROL_MASK);
c0700f90 7943 if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
4ab84d45 7944 !(bp->port.supported & SUPPORTED_Autoneg))
c0700f90 7945 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
a2fbb9ea 7946
c18487ee 7947 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x"
f1410647 7948 " advertising 0x%x\n",
c18487ee
YR
7949 bp->link_params.req_line_speed,
7950 bp->link_params.req_duplex,
34f80b04 7951 bp->link_params.req_flow_ctrl, bp->port.advertising);
a2fbb9ea
ET
7952}
7953
34f80b04 7954static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
a2fbb9ea 7955{
34f80b04
EG
7956 int port = BP_PORT(bp);
7957 u32 val, val2;
589abe3a 7958 u32 config;
a2fbb9ea 7959
c18487ee 7960 bp->link_params.bp = bp;
34f80b04 7961 bp->link_params.port = port;
c18487ee 7962
c18487ee 7963 bp->link_params.serdes_config =
f1410647 7964 SHMEM_RD(bp, dev_info.port_hw_config[port].serdes_config);
c18487ee 7965 bp->link_params.lane_config =
a2fbb9ea 7966 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
c18487ee 7967 bp->link_params.ext_phy_config =
a2fbb9ea
ET
7968 SHMEM_RD(bp,
7969 dev_info.port_hw_config[port].external_phy_config);
c18487ee 7970 bp->link_params.speed_cap_mask =
a2fbb9ea
ET
7971 SHMEM_RD(bp,
7972 dev_info.port_hw_config[port].speed_capability_mask);
7973
34f80b04 7974 bp->port.link_config =
a2fbb9ea
ET
7975 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
7976
589abe3a
EG
7977 config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
7978 if (config & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_ENABLED)
7979 bp->link_params.feature_config_flags |=
7980 FEATURE_CONFIG_MODULE_ENFORCMENT_ENABLED;
7981 else
7982 bp->link_params.feature_config_flags &=
7983 ~FEATURE_CONFIG_MODULE_ENFORCMENT_ENABLED;
7984
34f80b04
EG
7985 BNX2X_DEV_INFO("serdes_config 0x%08x lane_config 0x%08x\n"
7986 KERN_INFO " ext_phy_config 0x%08x speed_cap_mask 0x%08x"
7987 " link_config 0x%08x\n",
c18487ee
YR
7988 bp->link_params.serdes_config,
7989 bp->link_params.lane_config,
7990 bp->link_params.ext_phy_config,
34f80b04 7991 bp->link_params.speed_cap_mask, bp->port.link_config);
a2fbb9ea 7992
34f80b04 7993 bp->link_params.switch_cfg = (bp->port.link_config &
c18487ee
YR
7994 PORT_FEATURE_CONNECTED_SWITCH_MASK);
7995 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
a2fbb9ea
ET
7996
7997 bnx2x_link_settings_requested(bp);
7998
7999 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
8000 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
8001 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
8002 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8003 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8004 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8005 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
8006 bp->dev->dev_addr[5] = (u8)(val & 0xff);
c18487ee
YR
8007 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
8008 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
34f80b04
EG
8009}
8010
8011static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
8012{
8013 int func = BP_FUNC(bp);
8014 u32 val, val2;
8015 int rc = 0;
a2fbb9ea 8016
34f80b04 8017 bnx2x_get_common_hwinfo(bp);
a2fbb9ea 8018
34f80b04
EG
8019 bp->e1hov = 0;
8020 bp->e1hmf = 0;
8021 if (CHIP_IS_E1H(bp)) {
8022 bp->mf_config =
8023 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
a2fbb9ea 8024
3196a88a
EG
8025 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].e1hov_tag) &
8026 FUNC_MF_CFG_E1HOV_TAG_MASK);
34f80b04 8027 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
a2fbb9ea 8028
34f80b04
EG
8029 bp->e1hov = val;
8030 bp->e1hmf = 1;
8031 BNX2X_DEV_INFO("MF mode E1HOV for func %d is %d "
8032 "(0x%04x)\n",
8033 func, bp->e1hov, bp->e1hov);
8034 } else {
8035 BNX2X_DEV_INFO("Single function mode\n");
8036 if (BP_E1HVN(bp)) {
8037 BNX2X_ERR("!!! No valid E1HOV for func %d,"
8038 " aborting\n", func);
8039 rc = -EPERM;
8040 }
8041 }
8042 }
a2fbb9ea 8043
34f80b04
EG
8044 if (!BP_NOMCP(bp)) {
8045 bnx2x_get_port_hwinfo(bp);
8046
8047 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
8048 DRV_MSG_SEQ_NUMBER_MASK);
8049 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
8050 }
8051
8052 if (IS_E1HMF(bp)) {
8053 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
8054 val = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_lower);
8055 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
8056 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
8057 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
8058 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8059 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8060 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8061 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
8062 bp->dev->dev_addr[5] = (u8)(val & 0xff);
8063 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
8064 ETH_ALEN);
8065 memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
8066 ETH_ALEN);
a2fbb9ea 8067 }
34f80b04
EG
8068
8069 return rc;
a2fbb9ea
ET
8070 }
8071
34f80b04
EG
8072 if (BP_NOMCP(bp)) {
8073 /* only supposed to happen on emulation/FPGA */
33471629 8074 BNX2X_ERR("warning random MAC workaround active\n");
34f80b04
EG
8075 random_ether_addr(bp->dev->dev_addr);
8076 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8077 }
a2fbb9ea 8078
34f80b04
EG
8079 return rc;
8080}
8081
8082static int __devinit bnx2x_init_bp(struct bnx2x *bp)
8083{
8084 int func = BP_FUNC(bp);
87942b46 8085 int timer_interval;
34f80b04
EG
8086 int rc;
8087
da5a662a
VZ
8088 /* Disable interrupt handling until HW is initialized */
8089 atomic_set(&bp->intr_sem, 1);
8090
34f80b04 8091 mutex_init(&bp->port.phy_mutex);
a2fbb9ea 8092
1cf167f2 8093 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
34f80b04
EG
8094 INIT_WORK(&bp->reset_task, bnx2x_reset_task);
8095
8096 rc = bnx2x_get_hwinfo(bp);
8097
8098 /* need to reset chip if undi was active */
8099 if (!BP_NOMCP(bp))
8100 bnx2x_undi_unload(bp);
8101
8102 if (CHIP_REV_IS_FPGA(bp))
8103 printk(KERN_ERR PFX "FPGA detected\n");
8104
8105 if (BP_NOMCP(bp) && (func == 0))
8106 printk(KERN_ERR PFX
8107 "MCP disabled, must load devices in order!\n");
8108
555f6c78 8109 /* Set multi queue mode */
8badd27a
EG
8110 if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
8111 ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
555f6c78 8112 printk(KERN_ERR PFX
8badd27a 8113 "Multi disabled since int_mode requested is not MSI-X\n");
555f6c78
EG
8114 multi_mode = ETH_RSS_MODE_DISABLED;
8115 }
8116 bp->multi_mode = multi_mode;
8117
8118
7a9b2557
VZ
8119 /* Set TPA flags */
8120 if (disable_tpa) {
8121 bp->flags &= ~TPA_ENABLE_FLAG;
8122 bp->dev->features &= ~NETIF_F_LRO;
8123 } else {
8124 bp->flags |= TPA_ENABLE_FLAG;
8125 bp->dev->features |= NETIF_F_LRO;
8126 }
8127
8128
34f80b04
EG
8129 bp->tx_ring_size = MAX_TX_AVAIL;
8130 bp->rx_ring_size = MAX_RX_AVAIL;
8131
8132 bp->rx_csum = 1;
34f80b04
EG
8133
8134 bp->tx_ticks = 50;
8135 bp->rx_ticks = 25;
8136
87942b46
EG
8137 timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
8138 bp->current_interval = (poll ? poll : timer_interval);
34f80b04
EG
8139
8140 init_timer(&bp->timer);
8141 bp->timer.expires = jiffies + bp->current_interval;
8142 bp->timer.data = (unsigned long) bp;
8143 bp->timer.function = bnx2x_timer;
8144
8145 return rc;
a2fbb9ea
ET
8146}
8147
8148/*
8149 * ethtool service functions
8150 */
8151
8152/* All ethtool functions called with rtnl_lock */
8153
8154static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8155{
8156 struct bnx2x *bp = netdev_priv(dev);
8157
34f80b04
EG
8158 cmd->supported = bp->port.supported;
8159 cmd->advertising = bp->port.advertising;
a2fbb9ea
ET
8160
8161 if (netif_carrier_ok(dev)) {
c18487ee
YR
8162 cmd->speed = bp->link_vars.line_speed;
8163 cmd->duplex = bp->link_vars.duplex;
a2fbb9ea 8164 } else {
c18487ee
YR
8165 cmd->speed = bp->link_params.req_line_speed;
8166 cmd->duplex = bp->link_params.req_duplex;
a2fbb9ea 8167 }
34f80b04
EG
8168 if (IS_E1HMF(bp)) {
8169 u16 vn_max_rate;
8170
8171 vn_max_rate = ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
8172 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
8173 if (vn_max_rate < cmd->speed)
8174 cmd->speed = vn_max_rate;
8175 }
a2fbb9ea 8176
c18487ee
YR
8177 if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
8178 u32 ext_phy_type =
8179 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
f1410647
ET
8180
8181 switch (ext_phy_type) {
8182 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
f1410647 8183 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
c18487ee 8184 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
589abe3a
EG
8185 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
8186 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
8187 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
f1410647
ET
8188 cmd->port = PORT_FIBRE;
8189 break;
8190
8191 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
8192 cmd->port = PORT_TP;
8193 break;
8194
c18487ee
YR
8195 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
8196 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
8197 bp->link_params.ext_phy_config);
8198 break;
8199
f1410647
ET
8200 default:
8201 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
c18487ee
YR
8202 bp->link_params.ext_phy_config);
8203 break;
f1410647
ET
8204 }
8205 } else
a2fbb9ea 8206 cmd->port = PORT_TP;
a2fbb9ea 8207
34f80b04 8208 cmd->phy_address = bp->port.phy_addr;
a2fbb9ea
ET
8209 cmd->transceiver = XCVR_INTERNAL;
8210
c18487ee 8211 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
a2fbb9ea 8212 cmd->autoneg = AUTONEG_ENABLE;
f1410647 8213 else
a2fbb9ea 8214 cmd->autoneg = AUTONEG_DISABLE;
a2fbb9ea
ET
8215
8216 cmd->maxtxpkt = 0;
8217 cmd->maxrxpkt = 0;
8218
8219 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
8220 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
8221 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
8222 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
8223 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
8224 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
8225 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
8226
8227 return 0;
8228}
8229
8230static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8231{
8232 struct bnx2x *bp = netdev_priv(dev);
8233 u32 advertising;
8234
34f80b04
EG
8235 if (IS_E1HMF(bp))
8236 return 0;
8237
a2fbb9ea
ET
8238 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
8239 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
8240 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
8241 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
8242 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
8243 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
8244 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
8245
a2fbb9ea 8246 if (cmd->autoneg == AUTONEG_ENABLE) {
34f80b04
EG
8247 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
8248 DP(NETIF_MSG_LINK, "Autoneg not supported\n");
a2fbb9ea 8249 return -EINVAL;
f1410647 8250 }
a2fbb9ea
ET
8251
8252 /* advertise the requested speed and duplex if supported */
34f80b04 8253 cmd->advertising &= bp->port.supported;
a2fbb9ea 8254
c18487ee
YR
8255 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
8256 bp->link_params.req_duplex = DUPLEX_FULL;
34f80b04
EG
8257 bp->port.advertising |= (ADVERTISED_Autoneg |
8258 cmd->advertising);
a2fbb9ea
ET
8259
8260 } else { /* forced speed */
8261 /* advertise the requested speed and duplex if supported */
8262 switch (cmd->speed) {
8263 case SPEED_10:
8264 if (cmd->duplex == DUPLEX_FULL) {
34f80b04 8265 if (!(bp->port.supported &
f1410647
ET
8266 SUPPORTED_10baseT_Full)) {
8267 DP(NETIF_MSG_LINK,
8268 "10M full not supported\n");
a2fbb9ea 8269 return -EINVAL;
f1410647 8270 }
a2fbb9ea
ET
8271
8272 advertising = (ADVERTISED_10baseT_Full |
8273 ADVERTISED_TP);
8274 } else {
34f80b04 8275 if (!(bp->port.supported &
f1410647
ET
8276 SUPPORTED_10baseT_Half)) {
8277 DP(NETIF_MSG_LINK,
8278 "10M half not supported\n");
a2fbb9ea 8279 return -EINVAL;
f1410647 8280 }
a2fbb9ea
ET
8281
8282 advertising = (ADVERTISED_10baseT_Half |
8283 ADVERTISED_TP);
8284 }
8285 break;
8286
8287 case SPEED_100:
8288 if (cmd->duplex == DUPLEX_FULL) {
34f80b04 8289 if (!(bp->port.supported &
f1410647
ET
8290 SUPPORTED_100baseT_Full)) {
8291 DP(NETIF_MSG_LINK,
8292 "100M full not supported\n");
a2fbb9ea 8293 return -EINVAL;
f1410647 8294 }
a2fbb9ea
ET
8295
8296 advertising = (ADVERTISED_100baseT_Full |
8297 ADVERTISED_TP);
8298 } else {
34f80b04 8299 if (!(bp->port.supported &
f1410647
ET
8300 SUPPORTED_100baseT_Half)) {
8301 DP(NETIF_MSG_LINK,
8302 "100M half not supported\n");
a2fbb9ea 8303 return -EINVAL;
f1410647 8304 }
a2fbb9ea
ET
8305
8306 advertising = (ADVERTISED_100baseT_Half |
8307 ADVERTISED_TP);
8308 }
8309 break;
8310
8311 case SPEED_1000:
f1410647
ET
8312 if (cmd->duplex != DUPLEX_FULL) {
8313 DP(NETIF_MSG_LINK, "1G half not supported\n");
a2fbb9ea 8314 return -EINVAL;
f1410647 8315 }
a2fbb9ea 8316
34f80b04 8317 if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
f1410647 8318 DP(NETIF_MSG_LINK, "1G full not supported\n");
a2fbb9ea 8319 return -EINVAL;
f1410647 8320 }
a2fbb9ea
ET
8321
8322 advertising = (ADVERTISED_1000baseT_Full |
8323 ADVERTISED_TP);
8324 break;
8325
8326 case SPEED_2500:
f1410647
ET
8327 if (cmd->duplex != DUPLEX_FULL) {
8328 DP(NETIF_MSG_LINK,
8329 "2.5G half not supported\n");
a2fbb9ea 8330 return -EINVAL;
f1410647 8331 }
a2fbb9ea 8332
34f80b04 8333 if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
f1410647
ET
8334 DP(NETIF_MSG_LINK,
8335 "2.5G full not supported\n");
a2fbb9ea 8336 return -EINVAL;
f1410647 8337 }
a2fbb9ea 8338
f1410647 8339 advertising = (ADVERTISED_2500baseX_Full |
a2fbb9ea
ET
8340 ADVERTISED_TP);
8341 break;
8342
8343 case SPEED_10000:
f1410647
ET
8344 if (cmd->duplex != DUPLEX_FULL) {
8345 DP(NETIF_MSG_LINK, "10G half not supported\n");
a2fbb9ea 8346 return -EINVAL;
f1410647 8347 }
a2fbb9ea 8348
34f80b04 8349 if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
f1410647 8350 DP(NETIF_MSG_LINK, "10G full not supported\n");
a2fbb9ea 8351 return -EINVAL;
f1410647 8352 }
a2fbb9ea
ET
8353
8354 advertising = (ADVERTISED_10000baseT_Full |
8355 ADVERTISED_FIBRE);
8356 break;
8357
8358 default:
f1410647 8359 DP(NETIF_MSG_LINK, "Unsupported speed\n");
a2fbb9ea
ET
8360 return -EINVAL;
8361 }
8362
c18487ee
YR
8363 bp->link_params.req_line_speed = cmd->speed;
8364 bp->link_params.req_duplex = cmd->duplex;
34f80b04 8365 bp->port.advertising = advertising;
a2fbb9ea
ET
8366 }
8367
c18487ee 8368 DP(NETIF_MSG_LINK, "req_line_speed %d\n"
a2fbb9ea 8369 DP_LEVEL " req_duplex %d advertising 0x%x\n",
c18487ee 8370 bp->link_params.req_line_speed, bp->link_params.req_duplex,
34f80b04 8371 bp->port.advertising);
a2fbb9ea 8372
34f80b04 8373 if (netif_running(dev)) {
bb2a0f7a 8374 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
8375 bnx2x_link_set(bp);
8376 }
a2fbb9ea
ET
8377
8378 return 0;
8379}
8380
c18487ee
YR
8381#define PHY_FW_VER_LEN 10
8382
a2fbb9ea
ET
8383static void bnx2x_get_drvinfo(struct net_device *dev,
8384 struct ethtool_drvinfo *info)
8385{
8386 struct bnx2x *bp = netdev_priv(dev);
f0e53a84 8387 u8 phy_fw_ver[PHY_FW_VER_LEN];
a2fbb9ea
ET
8388
8389 strcpy(info->driver, DRV_MODULE_NAME);
8390 strcpy(info->version, DRV_MODULE_VERSION);
c18487ee
YR
8391
8392 phy_fw_ver[0] = '\0';
34f80b04 8393 if (bp->port.pmf) {
4a37fb66 8394 bnx2x_acquire_phy_lock(bp);
34f80b04
EG
8395 bnx2x_get_ext_phy_fw_version(&bp->link_params,
8396 (bp->state != BNX2X_STATE_CLOSED),
8397 phy_fw_ver, PHY_FW_VER_LEN);
4a37fb66 8398 bnx2x_release_phy_lock(bp);
34f80b04 8399 }
c18487ee 8400
f0e53a84
EG
8401 snprintf(info->fw_version, 32, "BC:%d.%d.%d%s%s",
8402 (bp->common.bc_ver & 0xff0000) >> 16,
8403 (bp->common.bc_ver & 0xff00) >> 8,
8404 (bp->common.bc_ver & 0xff),
8405 ((phy_fw_ver[0] != '\0') ? " PHY:" : ""), phy_fw_ver);
a2fbb9ea
ET
8406 strcpy(info->bus_info, pci_name(bp->pdev));
8407 info->n_stats = BNX2X_NUM_STATS;
8408 info->testinfo_len = BNX2X_NUM_TESTS;
34f80b04 8409 info->eedump_len = bp->common.flash_size;
a2fbb9ea
ET
8410 info->regdump_len = 0;
8411}
8412
8413static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8414{
8415 struct bnx2x *bp = netdev_priv(dev);
8416
8417 if (bp->flags & NO_WOL_FLAG) {
8418 wol->supported = 0;
8419 wol->wolopts = 0;
8420 } else {
8421 wol->supported = WAKE_MAGIC;
8422 if (bp->wol)
8423 wol->wolopts = WAKE_MAGIC;
8424 else
8425 wol->wolopts = 0;
8426 }
8427 memset(&wol->sopass, 0, sizeof(wol->sopass));
8428}
8429
8430static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8431{
8432 struct bnx2x *bp = netdev_priv(dev);
8433
8434 if (wol->wolopts & ~WAKE_MAGIC)
8435 return -EINVAL;
8436
8437 if (wol->wolopts & WAKE_MAGIC) {
8438 if (bp->flags & NO_WOL_FLAG)
8439 return -EINVAL;
8440
8441 bp->wol = 1;
34f80b04 8442 } else
a2fbb9ea 8443 bp->wol = 0;
34f80b04 8444
a2fbb9ea
ET
8445 return 0;
8446}
8447
8448static u32 bnx2x_get_msglevel(struct net_device *dev)
8449{
8450 struct bnx2x *bp = netdev_priv(dev);
8451
8452 return bp->msglevel;
8453}
8454
8455static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
8456{
8457 struct bnx2x *bp = netdev_priv(dev);
8458
8459 if (capable(CAP_NET_ADMIN))
8460 bp->msglevel = level;
8461}
8462
8463static int bnx2x_nway_reset(struct net_device *dev)
8464{
8465 struct bnx2x *bp = netdev_priv(dev);
8466
34f80b04
EG
8467 if (!bp->port.pmf)
8468 return 0;
a2fbb9ea 8469
34f80b04 8470 if (netif_running(dev)) {
bb2a0f7a 8471 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
8472 bnx2x_link_set(bp);
8473 }
a2fbb9ea
ET
8474
8475 return 0;
8476}
8477
8478static int bnx2x_get_eeprom_len(struct net_device *dev)
8479{
8480 struct bnx2x *bp = netdev_priv(dev);
8481
34f80b04 8482 return bp->common.flash_size;
a2fbb9ea
ET
8483}
8484
8485static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
8486{
34f80b04 8487 int port = BP_PORT(bp);
a2fbb9ea
ET
8488 int count, i;
8489 u32 val = 0;
8490
8491 /* adjust timeout for emulation/FPGA */
8492 count = NVRAM_TIMEOUT_COUNT;
8493 if (CHIP_REV_IS_SLOW(bp))
8494 count *= 100;
8495
8496 /* request access to nvram interface */
8497 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
8498 (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
8499
8500 for (i = 0; i < count*10; i++) {
8501 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
8502 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
8503 break;
8504
8505 udelay(5);
8506 }
8507
8508 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
34f80b04 8509 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
a2fbb9ea
ET
8510 return -EBUSY;
8511 }
8512
8513 return 0;
8514}
8515
8516static int bnx2x_release_nvram_lock(struct bnx2x *bp)
8517{
34f80b04 8518 int port = BP_PORT(bp);
a2fbb9ea
ET
8519 int count, i;
8520 u32 val = 0;
8521
8522 /* adjust timeout for emulation/FPGA */
8523 count = NVRAM_TIMEOUT_COUNT;
8524 if (CHIP_REV_IS_SLOW(bp))
8525 count *= 100;
8526
8527 /* relinquish nvram interface */
8528 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
8529 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
8530
8531 for (i = 0; i < count*10; i++) {
8532 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
8533 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
8534 break;
8535
8536 udelay(5);
8537 }
8538
8539 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
34f80b04 8540 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
a2fbb9ea
ET
8541 return -EBUSY;
8542 }
8543
8544 return 0;
8545}
8546
8547static void bnx2x_enable_nvram_access(struct bnx2x *bp)
8548{
8549 u32 val;
8550
8551 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
8552
8553 /* enable both bits, even on read */
8554 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
8555 (val | MCPR_NVM_ACCESS_ENABLE_EN |
8556 MCPR_NVM_ACCESS_ENABLE_WR_EN));
8557}
8558
8559static void bnx2x_disable_nvram_access(struct bnx2x *bp)
8560{
8561 u32 val;
8562
8563 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
8564
8565 /* disable both bits, even after read */
8566 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
8567 (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
8568 MCPR_NVM_ACCESS_ENABLE_WR_EN)));
8569}
8570
8571static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, u32 *ret_val,
8572 u32 cmd_flags)
8573{
f1410647 8574 int count, i, rc;
a2fbb9ea
ET
8575 u32 val;
8576
8577 /* build the command word */
8578 cmd_flags |= MCPR_NVM_COMMAND_DOIT;
8579
8580 /* need to clear DONE bit separately */
8581 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8582
8583 /* address of the NVRAM to read from */
8584 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8585 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8586
8587 /* issue a read command */
8588 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8589
8590 /* adjust timeout for emulation/FPGA */
8591 count = NVRAM_TIMEOUT_COUNT;
8592 if (CHIP_REV_IS_SLOW(bp))
8593 count *= 100;
8594
8595 /* wait for completion */
8596 *ret_val = 0;
8597 rc = -EBUSY;
8598 for (i = 0; i < count; i++) {
8599 udelay(5);
8600 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8601
8602 if (val & MCPR_NVM_COMMAND_DONE) {
8603 val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
a2fbb9ea
ET
8604 /* we read nvram data in cpu order
8605 * but ethtool sees it as an array of bytes
8606 * converting to big-endian will do the work */
8607 val = cpu_to_be32(val);
8608 *ret_val = val;
8609 rc = 0;
8610 break;
8611 }
8612 }
8613
8614 return rc;
8615}
8616
8617static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
8618 int buf_size)
8619{
8620 int rc;
8621 u32 cmd_flags;
8622 u32 val;
8623
8624 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
34f80b04 8625 DP(BNX2X_MSG_NVM,
c14423fe 8626 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
a2fbb9ea
ET
8627 offset, buf_size);
8628 return -EINVAL;
8629 }
8630
34f80b04
EG
8631 if (offset + buf_size > bp->common.flash_size) {
8632 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 8633 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 8634 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
8635 return -EINVAL;
8636 }
8637
8638 /* request access to nvram interface */
8639 rc = bnx2x_acquire_nvram_lock(bp);
8640 if (rc)
8641 return rc;
8642
8643 /* enable access to nvram interface */
8644 bnx2x_enable_nvram_access(bp);
8645
8646 /* read the first word(s) */
8647 cmd_flags = MCPR_NVM_COMMAND_FIRST;
8648 while ((buf_size > sizeof(u32)) && (rc == 0)) {
8649 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8650 memcpy(ret_buf, &val, 4);
8651
8652 /* advance to the next dword */
8653 offset += sizeof(u32);
8654 ret_buf += sizeof(u32);
8655 buf_size -= sizeof(u32);
8656 cmd_flags = 0;
8657 }
8658
8659 if (rc == 0) {
8660 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8661 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8662 memcpy(ret_buf, &val, 4);
8663 }
8664
8665 /* disable access to nvram interface */
8666 bnx2x_disable_nvram_access(bp);
8667 bnx2x_release_nvram_lock(bp);
8668
8669 return rc;
8670}
8671
8672static int bnx2x_get_eeprom(struct net_device *dev,
8673 struct ethtool_eeprom *eeprom, u8 *eebuf)
8674{
8675 struct bnx2x *bp = netdev_priv(dev);
8676 int rc;
8677
2add3acb
EG
8678 if (!netif_running(dev))
8679 return -EAGAIN;
8680
34f80b04 8681 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
a2fbb9ea
ET
8682 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
8683 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8684 eeprom->len, eeprom->len);
8685
8686 /* parameters already validated in ethtool_get_eeprom */
8687
8688 rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
8689
8690 return rc;
8691}
8692
8693static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
8694 u32 cmd_flags)
8695{
f1410647 8696 int count, i, rc;
a2fbb9ea
ET
8697
8698 /* build the command word */
8699 cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
8700
8701 /* need to clear DONE bit separately */
8702 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8703
8704 /* write the data */
8705 REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
8706
8707 /* address of the NVRAM to write to */
8708 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8709 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8710
8711 /* issue the write command */
8712 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8713
8714 /* adjust timeout for emulation/FPGA */
8715 count = NVRAM_TIMEOUT_COUNT;
8716 if (CHIP_REV_IS_SLOW(bp))
8717 count *= 100;
8718
8719 /* wait for completion */
8720 rc = -EBUSY;
8721 for (i = 0; i < count; i++) {
8722 udelay(5);
8723 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8724 if (val & MCPR_NVM_COMMAND_DONE) {
8725 rc = 0;
8726 break;
8727 }
8728 }
8729
8730 return rc;
8731}
8732
f1410647 8733#define BYTE_OFFSET(offset) (8 * (offset & 0x03))
a2fbb9ea
ET
8734
8735static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
8736 int buf_size)
8737{
8738 int rc;
8739 u32 cmd_flags;
8740 u32 align_offset;
8741 u32 val;
8742
34f80b04
EG
8743 if (offset + buf_size > bp->common.flash_size) {
8744 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 8745 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 8746 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
8747 return -EINVAL;
8748 }
8749
8750 /* request access to nvram interface */
8751 rc = bnx2x_acquire_nvram_lock(bp);
8752 if (rc)
8753 return rc;
8754
8755 /* enable access to nvram interface */
8756 bnx2x_enable_nvram_access(bp);
8757
8758 cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
8759 align_offset = (offset & ~0x03);
8760 rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
8761
8762 if (rc == 0) {
8763 val &= ~(0xff << BYTE_OFFSET(offset));
8764 val |= (*data_buf << BYTE_OFFSET(offset));
8765
8766 /* nvram data is returned as an array of bytes
8767 * convert it back to cpu order */
8768 val = be32_to_cpu(val);
8769
a2fbb9ea
ET
8770 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
8771 cmd_flags);
8772 }
8773
8774 /* disable access to nvram interface */
8775 bnx2x_disable_nvram_access(bp);
8776 bnx2x_release_nvram_lock(bp);
8777
8778 return rc;
8779}
8780
8781static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
8782 int buf_size)
8783{
8784 int rc;
8785 u32 cmd_flags;
8786 u32 val;
8787 u32 written_so_far;
8788
34f80b04 8789 if (buf_size == 1) /* ethtool */
a2fbb9ea 8790 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
a2fbb9ea
ET
8791
8792 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
34f80b04 8793 DP(BNX2X_MSG_NVM,
c14423fe 8794 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
a2fbb9ea
ET
8795 offset, buf_size);
8796 return -EINVAL;
8797 }
8798
34f80b04
EG
8799 if (offset + buf_size > bp->common.flash_size) {
8800 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 8801 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 8802 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
8803 return -EINVAL;
8804 }
8805
8806 /* request access to nvram interface */
8807 rc = bnx2x_acquire_nvram_lock(bp);
8808 if (rc)
8809 return rc;
8810
8811 /* enable access to nvram interface */
8812 bnx2x_enable_nvram_access(bp);
8813
8814 written_so_far = 0;
8815 cmd_flags = MCPR_NVM_COMMAND_FIRST;
8816 while ((written_so_far < buf_size) && (rc == 0)) {
8817 if (written_so_far == (buf_size - sizeof(u32)))
8818 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8819 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
8820 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8821 else if ((offset % NVRAM_PAGE_SIZE) == 0)
8822 cmd_flags |= MCPR_NVM_COMMAND_FIRST;
8823
8824 memcpy(&val, data_buf, 4);
a2fbb9ea
ET
8825
8826 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
8827
8828 /* advance to the next dword */
8829 offset += sizeof(u32);
8830 data_buf += sizeof(u32);
8831 written_so_far += sizeof(u32);
8832 cmd_flags = 0;
8833 }
8834
8835 /* disable access to nvram interface */
8836 bnx2x_disable_nvram_access(bp);
8837 bnx2x_release_nvram_lock(bp);
8838
8839 return rc;
8840}
8841
8842static int bnx2x_set_eeprom(struct net_device *dev,
8843 struct ethtool_eeprom *eeprom, u8 *eebuf)
8844{
8845 struct bnx2x *bp = netdev_priv(dev);
8846 int rc;
8847
9f4c9583
EG
8848 if (!netif_running(dev))
8849 return -EAGAIN;
8850
34f80b04 8851 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
a2fbb9ea
ET
8852 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
8853 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8854 eeprom->len, eeprom->len);
8855
8856 /* parameters already validated in ethtool_set_eeprom */
8857
c18487ee 8858 /* If the magic number is PHY (0x00504859) upgrade the PHY FW */
34f80b04
EG
8859 if (eeprom->magic == 0x00504859)
8860 if (bp->port.pmf) {
8861
4a37fb66 8862 bnx2x_acquire_phy_lock(bp);
34f80b04
EG
8863 rc = bnx2x_flash_download(bp, BP_PORT(bp),
8864 bp->link_params.ext_phy_config,
8865 (bp->state != BNX2X_STATE_CLOSED),
8866 eebuf, eeprom->len);
bb2a0f7a
YG
8867 if ((bp->state == BNX2X_STATE_OPEN) ||
8868 (bp->state == BNX2X_STATE_DISABLED)) {
34f80b04 8869 rc |= bnx2x_link_reset(&bp->link_params,
589abe3a 8870 &bp->link_vars, 1);
34f80b04
EG
8871 rc |= bnx2x_phy_init(&bp->link_params,
8872 &bp->link_vars);
bb2a0f7a 8873 }
4a37fb66 8874 bnx2x_release_phy_lock(bp);
34f80b04
EG
8875
8876 } else /* Only the PMF can access the PHY */
8877 return -EINVAL;
8878 else
c18487ee 8879 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
a2fbb9ea
ET
8880
8881 return rc;
8882}
8883
8884static int bnx2x_get_coalesce(struct net_device *dev,
8885 struct ethtool_coalesce *coal)
8886{
8887 struct bnx2x *bp = netdev_priv(dev);
8888
8889 memset(coal, 0, sizeof(struct ethtool_coalesce));
8890
8891 coal->rx_coalesce_usecs = bp->rx_ticks;
8892 coal->tx_coalesce_usecs = bp->tx_ticks;
a2fbb9ea
ET
8893
8894 return 0;
8895}
8896
8897static int bnx2x_set_coalesce(struct net_device *dev,
8898 struct ethtool_coalesce *coal)
8899{
8900 struct bnx2x *bp = netdev_priv(dev);
8901
8902 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
8903 if (bp->rx_ticks > 3000)
8904 bp->rx_ticks = 3000;
8905
8906 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
8907 if (bp->tx_ticks > 0x3000)
8908 bp->tx_ticks = 0x3000;
8909
34f80b04 8910 if (netif_running(dev))
a2fbb9ea
ET
8911 bnx2x_update_coalesce(bp);
8912
8913 return 0;
8914}
8915
8916static void bnx2x_get_ringparam(struct net_device *dev,
8917 struct ethtool_ringparam *ering)
8918{
8919 struct bnx2x *bp = netdev_priv(dev);
8920
8921 ering->rx_max_pending = MAX_RX_AVAIL;
8922 ering->rx_mini_max_pending = 0;
8923 ering->rx_jumbo_max_pending = 0;
8924
8925 ering->rx_pending = bp->rx_ring_size;
8926 ering->rx_mini_pending = 0;
8927 ering->rx_jumbo_pending = 0;
8928
8929 ering->tx_max_pending = MAX_TX_AVAIL;
8930 ering->tx_pending = bp->tx_ring_size;
8931}
8932
8933static int bnx2x_set_ringparam(struct net_device *dev,
8934 struct ethtool_ringparam *ering)
8935{
8936 struct bnx2x *bp = netdev_priv(dev);
34f80b04 8937 int rc = 0;
a2fbb9ea
ET
8938
8939 if ((ering->rx_pending > MAX_RX_AVAIL) ||
8940 (ering->tx_pending > MAX_TX_AVAIL) ||
8941 (ering->tx_pending <= MAX_SKB_FRAGS + 4))
8942 return -EINVAL;
8943
8944 bp->rx_ring_size = ering->rx_pending;
8945 bp->tx_ring_size = ering->tx_pending;
8946
34f80b04
EG
8947 if (netif_running(dev)) {
8948 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8949 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
a2fbb9ea
ET
8950 }
8951
34f80b04 8952 return rc;
a2fbb9ea
ET
8953}
8954
8955static void bnx2x_get_pauseparam(struct net_device *dev,
8956 struct ethtool_pauseparam *epause)
8957{
8958 struct bnx2x *bp = netdev_priv(dev);
8959
c0700f90 8960 epause->autoneg = (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
c18487ee
YR
8961 (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
8962
c0700f90
DM
8963 epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) ==
8964 BNX2X_FLOW_CTRL_RX);
8965 epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) ==
8966 BNX2X_FLOW_CTRL_TX);
a2fbb9ea
ET
8967
8968 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
8969 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
8970 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
8971}
8972
8973static int bnx2x_set_pauseparam(struct net_device *dev,
8974 struct ethtool_pauseparam *epause)
8975{
8976 struct bnx2x *bp = netdev_priv(dev);
8977
34f80b04
EG
8978 if (IS_E1HMF(bp))
8979 return 0;
8980
a2fbb9ea
ET
8981 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
8982 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
8983 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
8984
c0700f90 8985 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
a2fbb9ea 8986
f1410647 8987 if (epause->rx_pause)
c0700f90 8988 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_RX;
c18487ee 8989
f1410647 8990 if (epause->tx_pause)
c0700f90 8991 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_TX;
c18487ee 8992
c0700f90
DM
8993 if (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO)
8994 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
a2fbb9ea 8995
c18487ee 8996 if (epause->autoneg) {
34f80b04 8997 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
3196a88a 8998 DP(NETIF_MSG_LINK, "autoneg not supported\n");
c18487ee
YR
8999 return -EINVAL;
9000 }
a2fbb9ea 9001
c18487ee 9002 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
c0700f90 9003 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
c18487ee 9004 }
a2fbb9ea 9005
c18487ee
YR
9006 DP(NETIF_MSG_LINK,
9007 "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
34f80b04
EG
9008
9009 if (netif_running(dev)) {
bb2a0f7a 9010 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
9011 bnx2x_link_set(bp);
9012 }
a2fbb9ea
ET
9013
9014 return 0;
9015}
9016
df0f2343
VZ
9017static int bnx2x_set_flags(struct net_device *dev, u32 data)
9018{
9019 struct bnx2x *bp = netdev_priv(dev);
9020 int changed = 0;
9021 int rc = 0;
9022
9023 /* TPA requires Rx CSUM offloading */
9024 if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
9025 if (!(dev->features & NETIF_F_LRO)) {
9026 dev->features |= NETIF_F_LRO;
9027 bp->flags |= TPA_ENABLE_FLAG;
9028 changed = 1;
9029 }
9030
9031 } else if (dev->features & NETIF_F_LRO) {
9032 dev->features &= ~NETIF_F_LRO;
9033 bp->flags &= ~TPA_ENABLE_FLAG;
9034 changed = 1;
9035 }
9036
9037 if (changed && netif_running(dev)) {
9038 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9039 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
9040 }
9041
9042 return rc;
9043}
9044
a2fbb9ea
ET
9045static u32 bnx2x_get_rx_csum(struct net_device *dev)
9046{
9047 struct bnx2x *bp = netdev_priv(dev);
9048
9049 return bp->rx_csum;
9050}
9051
9052static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
9053{
9054 struct bnx2x *bp = netdev_priv(dev);
df0f2343 9055 int rc = 0;
a2fbb9ea
ET
9056
9057 bp->rx_csum = data;
df0f2343
VZ
9058
9059 /* Disable TPA, when Rx CSUM is disabled. Otherwise all
9060 TPA'ed packets will be discarded due to wrong TCP CSUM */
9061 if (!data) {
9062 u32 flags = ethtool_op_get_flags(dev);
9063
9064 rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
9065 }
9066
9067 return rc;
a2fbb9ea
ET
9068}
9069
9070static int bnx2x_set_tso(struct net_device *dev, u32 data)
9071{
755735eb 9072 if (data) {
a2fbb9ea 9073 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb
EG
9074 dev->features |= NETIF_F_TSO6;
9075 } else {
a2fbb9ea 9076 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb
EG
9077 dev->features &= ~NETIF_F_TSO6;
9078 }
9079
a2fbb9ea
ET
9080 return 0;
9081}
9082
f3c87cdd 9083static const struct {
a2fbb9ea
ET
9084 char string[ETH_GSTRING_LEN];
9085} bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
f3c87cdd
YG
9086 { "register_test (offline)" },
9087 { "memory_test (offline)" },
9088 { "loopback_test (offline)" },
9089 { "nvram_test (online)" },
9090 { "interrupt_test (online)" },
9091 { "link_test (online)" },
d3d4f495 9092 { "idle check (online)" }
a2fbb9ea
ET
9093};
9094
9095static int bnx2x_self_test_count(struct net_device *dev)
9096{
9097 return BNX2X_NUM_TESTS;
9098}
9099
f3c87cdd
YG
9100static int bnx2x_test_registers(struct bnx2x *bp)
9101{
9102 int idx, i, rc = -ENODEV;
9103 u32 wr_val = 0;
9dabc424 9104 int port = BP_PORT(bp);
f3c87cdd
YG
9105 static const struct {
9106 u32 offset0;
9107 u32 offset1;
9108 u32 mask;
9109 } reg_tbl[] = {
9110/* 0 */ { BRB1_REG_PAUSE_LOW_THRESHOLD_0, 4, 0x000003ff },
9111 { DORQ_REG_DB_ADDR0, 4, 0xffffffff },
9112 { HC_REG_AGG_INT_0, 4, 0x000003ff },
9113 { PBF_REG_MAC_IF0_ENABLE, 4, 0x00000001 },
9114 { PBF_REG_P0_INIT_CRD, 4, 0x000007ff },
9115 { PRS_REG_CID_PORT_0, 4, 0x00ffffff },
9116 { PXP2_REG_PSWRQ_CDU0_L2P, 4, 0x000fffff },
9117 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
9118 { PXP2_REG_PSWRQ_TM0_L2P, 4, 0x000fffff },
9119 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
9120/* 10 */ { PXP2_REG_PSWRQ_TSDM0_L2P, 4, 0x000fffff },
9121 { QM_REG_CONNNUM_0, 4, 0x000fffff },
9122 { TM_REG_LIN0_MAX_ACTIVE_CID, 4, 0x0003ffff },
9123 { SRC_REG_KEYRSS0_0, 40, 0xffffffff },
9124 { SRC_REG_KEYRSS0_7, 40, 0xffffffff },
9125 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
9126 { XCM_REG_WU_DA_CNT_CMD00, 4, 0x00000003 },
9127 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 4, 0x000000ff },
9128 { NIG_REG_EGRESS_MNG0_FIFO, 20, 0xffffffff },
9129 { NIG_REG_LLH0_T_BIT, 4, 0x00000001 },
9130/* 20 */ { NIG_REG_EMAC0_IN_EN, 4, 0x00000001 },
9131 { NIG_REG_BMAC0_IN_EN, 4, 0x00000001 },
9132 { NIG_REG_XCM0_OUT_EN, 4, 0x00000001 },
9133 { NIG_REG_BRB0_OUT_EN, 4, 0x00000001 },
9134 { NIG_REG_LLH0_XCM_MASK, 4, 0x00000007 },
9135 { NIG_REG_LLH0_ACPI_PAT_6_LEN, 68, 0x000000ff },
9136 { NIG_REG_LLH0_ACPI_PAT_0_CRC, 68, 0xffffffff },
9137 { NIG_REG_LLH0_DEST_MAC_0_0, 160, 0xffffffff },
9138 { NIG_REG_LLH0_DEST_IP_0_1, 160, 0xffffffff },
9139 { NIG_REG_LLH0_IPV4_IPV6_0, 160, 0x00000001 },
9140/* 30 */ { NIG_REG_LLH0_DEST_UDP_0, 160, 0x0000ffff },
9141 { NIG_REG_LLH0_DEST_TCP_0, 160, 0x0000ffff },
9142 { NIG_REG_LLH0_VLAN_ID_0, 160, 0x00000fff },
9143 { NIG_REG_XGXS_SERDES0_MODE_SEL, 4, 0x00000001 },
9144 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
9145 { NIG_REG_STATUS_INTERRUPT_PORT0, 4, 0x07ffffff },
9146 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
9147 { NIG_REG_SERDES0_CTRL_PHY_ADDR, 16, 0x0000001f },
9148
9149 { 0xffffffff, 0, 0x00000000 }
9150 };
9151
9152 if (!netif_running(bp->dev))
9153 return rc;
9154
9155 /* Repeat the test twice:
9156 First by writing 0x00000000, second by writing 0xffffffff */
9157 for (idx = 0; idx < 2; idx++) {
9158
9159 switch (idx) {
9160 case 0:
9161 wr_val = 0;
9162 break;
9163 case 1:
9164 wr_val = 0xffffffff;
9165 break;
9166 }
9167
9168 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
9169 u32 offset, mask, save_val, val;
f3c87cdd
YG
9170
9171 offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
9172 mask = reg_tbl[i].mask;
9173
9174 save_val = REG_RD(bp, offset);
9175
9176 REG_WR(bp, offset, wr_val);
9177 val = REG_RD(bp, offset);
9178
9179 /* Restore the original register's value */
9180 REG_WR(bp, offset, save_val);
9181
9182 /* verify that value is as expected value */
9183 if ((val & mask) != (wr_val & mask))
9184 goto test_reg_exit;
9185 }
9186 }
9187
9188 rc = 0;
9189
9190test_reg_exit:
9191 return rc;
9192}
9193
9194static int bnx2x_test_memory(struct bnx2x *bp)
9195{
9196 int i, j, rc = -ENODEV;
9197 u32 val;
9198 static const struct {
9199 u32 offset;
9200 int size;
9201 } mem_tbl[] = {
9202 { CCM_REG_XX_DESCR_TABLE, CCM_REG_XX_DESCR_TABLE_SIZE },
9203 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
9204 { CFC_REG_LINK_LIST, CFC_REG_LINK_LIST_SIZE },
9205 { DMAE_REG_CMD_MEM, DMAE_REG_CMD_MEM_SIZE },
9206 { TCM_REG_XX_DESCR_TABLE, TCM_REG_XX_DESCR_TABLE_SIZE },
9207 { UCM_REG_XX_DESCR_TABLE, UCM_REG_XX_DESCR_TABLE_SIZE },
9208 { XCM_REG_XX_DESCR_TABLE, XCM_REG_XX_DESCR_TABLE_SIZE },
9209
9210 { 0xffffffff, 0 }
9211 };
9212 static const struct {
9213 char *name;
9214 u32 offset;
9dabc424
YG
9215 u32 e1_mask;
9216 u32 e1h_mask;
f3c87cdd 9217 } prty_tbl[] = {
9dabc424
YG
9218 { "CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, 0x3ffc0, 0 },
9219 { "CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, 0x2, 0x2 },
9220 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0, 0 },
9221 { "TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, 0x3ffc0, 0 },
9222 { "UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, 0x3ffc0, 0 },
9223 { "XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, 0x3ffc1, 0 },
9224
9225 { NULL, 0xffffffff, 0, 0 }
f3c87cdd
YG
9226 };
9227
9228 if (!netif_running(bp->dev))
9229 return rc;
9230
9231 /* Go through all the memories */
9232 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
9233 for (j = 0; j < mem_tbl[i].size; j++)
9234 REG_RD(bp, mem_tbl[i].offset + j*4);
9235
9236 /* Check the parity status */
9237 for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
9238 val = REG_RD(bp, prty_tbl[i].offset);
9dabc424
YG
9239 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
9240 (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
f3c87cdd
YG
9241 DP(NETIF_MSG_HW,
9242 "%s is 0x%x\n", prty_tbl[i].name, val);
9243 goto test_mem_exit;
9244 }
9245 }
9246
9247 rc = 0;
9248
9249test_mem_exit:
9250 return rc;
9251}
9252
f3c87cdd
YG
9253static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
9254{
9255 int cnt = 1000;
9256
9257 if (link_up)
9258 while (bnx2x_link_test(bp) && cnt--)
9259 msleep(10);
9260}
9261
9262static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
9263{
9264 unsigned int pkt_size, num_pkts, i;
9265 struct sk_buff *skb;
9266 unsigned char *packet;
9267 struct bnx2x_fastpath *fp = &bp->fp[0];
9268 u16 tx_start_idx, tx_idx;
9269 u16 rx_start_idx, rx_idx;
9270 u16 pkt_prod;
9271 struct sw_tx_bd *tx_buf;
9272 struct eth_tx_bd *tx_bd;
9273 dma_addr_t mapping;
9274 union eth_rx_cqe *cqe;
9275 u8 cqe_fp_flags;
9276 struct sw_rx_bd *rx_buf;
9277 u16 len;
9278 int rc = -ENODEV;
9279
9280 if (loopback_mode == BNX2X_MAC_LOOPBACK) {
9281 bp->link_params.loopback_mode = LOOPBACK_BMAC;
f3c87cdd 9282 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
f3c87cdd
YG
9283
9284 } else if (loopback_mode == BNX2X_PHY_LOOPBACK) {
3910c8ae 9285 u16 cnt = 1000;
f3c87cdd 9286 bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
f3c87cdd 9287 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
f3c87cdd 9288 /* wait until link state is restored */
3910c8ae
EG
9289 if (link_up)
9290 while (cnt-- && bnx2x_test_link(&bp->link_params,
9291 &bp->link_vars))
9292 msleep(10);
f3c87cdd
YG
9293 } else
9294 return -EINVAL;
9295
9296 pkt_size = 1514;
9297 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
9298 if (!skb) {
9299 rc = -ENOMEM;
9300 goto test_loopback_exit;
9301 }
9302 packet = skb_put(skb, pkt_size);
9303 memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
9304 memset(packet + ETH_ALEN, 0, (ETH_HLEN - ETH_ALEN));
9305 for (i = ETH_HLEN; i < pkt_size; i++)
9306 packet[i] = (unsigned char) (i & 0xff);
9307
9308 num_pkts = 0;
9309 tx_start_idx = le16_to_cpu(*fp->tx_cons_sb);
9310 rx_start_idx = le16_to_cpu(*fp->rx_cons_sb);
9311
9312 pkt_prod = fp->tx_pkt_prod++;
9313 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
9314 tx_buf->first_bd = fp->tx_bd_prod;
9315 tx_buf->skb = skb;
9316
9317 tx_bd = &fp->tx_desc_ring[TX_BD(fp->tx_bd_prod)];
9318 mapping = pci_map_single(bp->pdev, skb->data,
9319 skb_headlen(skb), PCI_DMA_TODEVICE);
9320 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9321 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9322 tx_bd->nbd = cpu_to_le16(1);
9323 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
9324 tx_bd->vlan = cpu_to_le16(pkt_prod);
9325 tx_bd->bd_flags.as_bitfield = (ETH_TX_BD_FLAGS_START_BD |
9326 ETH_TX_BD_FLAGS_END_BD);
9327 tx_bd->general_data = ((UNICAST_ADDRESS <<
9328 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT) | 1);
9329
58f4c4cf
EG
9330 wmb();
9331
f3c87cdd
YG
9332 fp->hw_tx_prods->bds_prod =
9333 cpu_to_le16(le16_to_cpu(fp->hw_tx_prods->bds_prod) + 1);
9334 mb(); /* FW restriction: must not reorder writing nbd and packets */
9335 fp->hw_tx_prods->packets_prod =
9336 cpu_to_le32(le32_to_cpu(fp->hw_tx_prods->packets_prod) + 1);
9337 DOORBELL(bp, FP_IDX(fp), 0);
9338
9339 mmiowb();
9340
9341 num_pkts++;
9342 fp->tx_bd_prod++;
9343 bp->dev->trans_start = jiffies;
9344
9345 udelay(100);
9346
9347 tx_idx = le16_to_cpu(*fp->tx_cons_sb);
9348 if (tx_idx != tx_start_idx + num_pkts)
9349 goto test_loopback_exit;
9350
9351 rx_idx = le16_to_cpu(*fp->rx_cons_sb);
9352 if (rx_idx != rx_start_idx + num_pkts)
9353 goto test_loopback_exit;
9354
9355 cqe = &fp->rx_comp_ring[RCQ_BD(fp->rx_comp_cons)];
9356 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
9357 if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
9358 goto test_loopback_rx_exit;
9359
9360 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
9361 if (len != pkt_size)
9362 goto test_loopback_rx_exit;
9363
9364 rx_buf = &fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)];
9365 skb = rx_buf->skb;
9366 skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
9367 for (i = ETH_HLEN; i < pkt_size; i++)
9368 if (*(skb->data + i) != (unsigned char) (i & 0xff))
9369 goto test_loopback_rx_exit;
9370
9371 rc = 0;
9372
9373test_loopback_rx_exit:
f3c87cdd
YG
9374
9375 fp->rx_bd_cons = NEXT_RX_IDX(fp->rx_bd_cons);
9376 fp->rx_bd_prod = NEXT_RX_IDX(fp->rx_bd_prod);
9377 fp->rx_comp_cons = NEXT_RCQ_IDX(fp->rx_comp_cons);
9378 fp->rx_comp_prod = NEXT_RCQ_IDX(fp->rx_comp_prod);
9379
9380 /* Update producers */
9381 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
9382 fp->rx_sge_prod);
f3c87cdd
YG
9383
9384test_loopback_exit:
9385 bp->link_params.loopback_mode = LOOPBACK_NONE;
9386
9387 return rc;
9388}
9389
9390static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
9391{
9392 int rc = 0;
9393
9394 if (!netif_running(bp->dev))
9395 return BNX2X_LOOPBACK_FAILED;
9396
f8ef6e44 9397 bnx2x_netif_stop(bp, 1);
3910c8ae 9398 bnx2x_acquire_phy_lock(bp);
f3c87cdd
YG
9399
9400 if (bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up)) {
9401 DP(NETIF_MSG_PROBE, "MAC loopback failed\n");
9402 rc |= BNX2X_MAC_LOOPBACK_FAILED;
9403 }
9404
9405 if (bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up)) {
9406 DP(NETIF_MSG_PROBE, "PHY loopback failed\n");
9407 rc |= BNX2X_PHY_LOOPBACK_FAILED;
9408 }
9409
3910c8ae 9410 bnx2x_release_phy_lock(bp);
f3c87cdd
YG
9411 bnx2x_netif_start(bp);
9412
9413 return rc;
9414}
9415
9416#define CRC32_RESIDUAL 0xdebb20e3
9417
9418static int bnx2x_test_nvram(struct bnx2x *bp)
9419{
9420 static const struct {
9421 int offset;
9422 int size;
9423 } nvram_tbl[] = {
9424 { 0, 0x14 }, /* bootstrap */
9425 { 0x14, 0xec }, /* dir */
9426 { 0x100, 0x350 }, /* manuf_info */
9427 { 0x450, 0xf0 }, /* feature_info */
9428 { 0x640, 0x64 }, /* upgrade_key_info */
9429 { 0x6a4, 0x64 },
9430 { 0x708, 0x70 }, /* manuf_key_info */
9431 { 0x778, 0x70 },
9432 { 0, 0 }
9433 };
9434 u32 buf[0x350 / 4];
9435 u8 *data = (u8 *)buf;
9436 int i, rc;
9437 u32 magic, csum;
9438
9439 rc = bnx2x_nvram_read(bp, 0, data, 4);
9440 if (rc) {
9441 DP(NETIF_MSG_PROBE, "magic value read (rc -%d)\n", -rc);
9442 goto test_nvram_exit;
9443 }
9444
9445 magic = be32_to_cpu(buf[0]);
9446 if (magic != 0x669955aa) {
9447 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
9448 rc = -ENODEV;
9449 goto test_nvram_exit;
9450 }
9451
9452 for (i = 0; nvram_tbl[i].size; i++) {
9453
9454 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
9455 nvram_tbl[i].size);
9456 if (rc) {
9457 DP(NETIF_MSG_PROBE,
9458 "nvram_tbl[%d] read data (rc -%d)\n", i, -rc);
9459 goto test_nvram_exit;
9460 }
9461
9462 csum = ether_crc_le(nvram_tbl[i].size, data);
9463 if (csum != CRC32_RESIDUAL) {
9464 DP(NETIF_MSG_PROBE,
9465 "nvram_tbl[%d] csum value (0x%08x)\n", i, csum);
9466 rc = -ENODEV;
9467 goto test_nvram_exit;
9468 }
9469 }
9470
9471test_nvram_exit:
9472 return rc;
9473}
9474
9475static int bnx2x_test_intr(struct bnx2x *bp)
9476{
9477 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
9478 int i, rc;
9479
9480 if (!netif_running(bp->dev))
9481 return -ENODEV;
9482
8d9c5f34 9483 config->hdr.length = 0;
af246401
EG
9484 if (CHIP_IS_E1(bp))
9485 config->hdr.offset = (BP_PORT(bp) ? 32 : 0);
9486 else
9487 config->hdr.offset = BP_FUNC(bp);
f3c87cdd
YG
9488 config->hdr.client_id = BP_CL_ID(bp);
9489 config->hdr.reserved1 = 0;
9490
9491 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
9492 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
9493 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
9494 if (rc == 0) {
9495 bp->set_mac_pending++;
9496 for (i = 0; i < 10; i++) {
9497 if (!bp->set_mac_pending)
9498 break;
9499 msleep_interruptible(10);
9500 }
9501 if (i == 10)
9502 rc = -ENODEV;
9503 }
9504
9505 return rc;
9506}
9507
a2fbb9ea
ET
9508static void bnx2x_self_test(struct net_device *dev,
9509 struct ethtool_test *etest, u64 *buf)
9510{
9511 struct bnx2x *bp = netdev_priv(dev);
a2fbb9ea
ET
9512
9513 memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
9514
f3c87cdd 9515 if (!netif_running(dev))
a2fbb9ea 9516 return;
a2fbb9ea 9517
33471629 9518 /* offline tests are not supported in MF mode */
f3c87cdd
YG
9519 if (IS_E1HMF(bp))
9520 etest->flags &= ~ETH_TEST_FL_OFFLINE;
9521
9522 if (etest->flags & ETH_TEST_FL_OFFLINE) {
9523 u8 link_up;
9524
9525 link_up = bp->link_vars.link_up;
9526 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9527 bnx2x_nic_load(bp, LOAD_DIAG);
9528 /* wait until link state is restored */
9529 bnx2x_wait_for_link(bp, link_up);
9530
9531 if (bnx2x_test_registers(bp) != 0) {
9532 buf[0] = 1;
9533 etest->flags |= ETH_TEST_FL_FAILED;
9534 }
9535 if (bnx2x_test_memory(bp) != 0) {
9536 buf[1] = 1;
9537 etest->flags |= ETH_TEST_FL_FAILED;
9538 }
9539 buf[2] = bnx2x_test_loopback(bp, link_up);
9540 if (buf[2] != 0)
9541 etest->flags |= ETH_TEST_FL_FAILED;
a2fbb9ea 9542
f3c87cdd
YG
9543 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9544 bnx2x_nic_load(bp, LOAD_NORMAL);
9545 /* wait until link state is restored */
9546 bnx2x_wait_for_link(bp, link_up);
9547 }
9548 if (bnx2x_test_nvram(bp) != 0) {
9549 buf[3] = 1;
a2fbb9ea
ET
9550 etest->flags |= ETH_TEST_FL_FAILED;
9551 }
f3c87cdd
YG
9552 if (bnx2x_test_intr(bp) != 0) {
9553 buf[4] = 1;
9554 etest->flags |= ETH_TEST_FL_FAILED;
9555 }
9556 if (bp->port.pmf)
9557 if (bnx2x_link_test(bp) != 0) {
9558 buf[5] = 1;
9559 etest->flags |= ETH_TEST_FL_FAILED;
9560 }
f3c87cdd
YG
9561
9562#ifdef BNX2X_EXTRA_DEBUG
9563 bnx2x_panic_dump(bp);
9564#endif
a2fbb9ea
ET
9565}
9566
de832a55
EG
9567static const struct {
9568 long offset;
9569 int size;
9570 u8 string[ETH_GSTRING_LEN];
9571} bnx2x_q_stats_arr[BNX2X_NUM_Q_STATS] = {
9572/* 1 */ { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%d]: rx_bytes" },
9573 { Q_STATS_OFFSET32(error_bytes_received_hi),
9574 8, "[%d]: rx_error_bytes" },
9575 { Q_STATS_OFFSET32(total_unicast_packets_received_hi),
9576 8, "[%d]: rx_ucast_packets" },
9577 { Q_STATS_OFFSET32(total_multicast_packets_received_hi),
9578 8, "[%d]: rx_mcast_packets" },
9579 { Q_STATS_OFFSET32(total_broadcast_packets_received_hi),
9580 8, "[%d]: rx_bcast_packets" },
9581 { Q_STATS_OFFSET32(no_buff_discard_hi), 8, "[%d]: rx_discards" },
9582 { Q_STATS_OFFSET32(rx_err_discard_pkt),
9583 4, "[%d]: rx_phy_ip_err_discards"},
9584 { Q_STATS_OFFSET32(rx_skb_alloc_failed),
9585 4, "[%d]: rx_skb_alloc_discard" },
9586 { Q_STATS_OFFSET32(hw_csum_err), 4, "[%d]: rx_csum_offload_errors" },
9587
9588/* 10 */{ Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%d]: tx_bytes" },
9589 { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
9590 8, "[%d]: tx_packets" }
9591};
9592
bb2a0f7a
YG
9593static const struct {
9594 long offset;
9595 int size;
9596 u32 flags;
66e855f3
YG
9597#define STATS_FLAGS_PORT 1
9598#define STATS_FLAGS_FUNC 2
de832a55 9599#define STATS_FLAGS_BOTH (STATS_FLAGS_FUNC | STATS_FLAGS_PORT)
66e855f3 9600 u8 string[ETH_GSTRING_LEN];
bb2a0f7a 9601} bnx2x_stats_arr[BNX2X_NUM_STATS] = {
de832a55
EG
9602/* 1 */ { STATS_OFFSET32(total_bytes_received_hi),
9603 8, STATS_FLAGS_BOTH, "rx_bytes" },
66e855f3 9604 { STATS_OFFSET32(error_bytes_received_hi),
de832a55 9605 8, STATS_FLAGS_BOTH, "rx_error_bytes" },
bb2a0f7a 9606 { STATS_OFFSET32(total_unicast_packets_received_hi),
de832a55 9607 8, STATS_FLAGS_BOTH, "rx_ucast_packets" },
bb2a0f7a 9608 { STATS_OFFSET32(total_multicast_packets_received_hi),
de832a55 9609 8, STATS_FLAGS_BOTH, "rx_mcast_packets" },
bb2a0f7a 9610 { STATS_OFFSET32(total_broadcast_packets_received_hi),
de832a55 9611 8, STATS_FLAGS_BOTH, "rx_bcast_packets" },
bb2a0f7a 9612 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
66e855f3 9613 8, STATS_FLAGS_PORT, "rx_crc_errors" },
bb2a0f7a 9614 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
66e855f3 9615 8, STATS_FLAGS_PORT, "rx_align_errors" },
de832a55
EG
9616 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
9617 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
9618 { STATS_OFFSET32(etherstatsoverrsizepkts_hi),
9619 8, STATS_FLAGS_PORT, "rx_oversize_packets" },
9620/* 10 */{ STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
9621 8, STATS_FLAGS_PORT, "rx_fragments" },
9622 { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
9623 8, STATS_FLAGS_PORT, "rx_jabbers" },
9624 { STATS_OFFSET32(no_buff_discard_hi),
9625 8, STATS_FLAGS_BOTH, "rx_discards" },
9626 { STATS_OFFSET32(mac_filter_discard),
9627 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
9628 { STATS_OFFSET32(xxoverflow_discard),
9629 4, STATS_FLAGS_PORT, "rx_fw_discards" },
9630 { STATS_OFFSET32(brb_drop_hi),
9631 8, STATS_FLAGS_PORT, "rx_brb_discard" },
9632 { STATS_OFFSET32(brb_truncate_hi),
9633 8, STATS_FLAGS_PORT, "rx_brb_truncate" },
9634 { STATS_OFFSET32(pause_frames_received_hi),
9635 8, STATS_FLAGS_PORT, "rx_pause_frames" },
9636 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
9637 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
9638 { STATS_OFFSET32(nig_timer_max),
9639 4, STATS_FLAGS_PORT, "rx_constant_pause_events" },
9640/* 20 */{ STATS_OFFSET32(rx_err_discard_pkt),
9641 4, STATS_FLAGS_BOTH, "rx_phy_ip_err_discards"},
9642 { STATS_OFFSET32(rx_skb_alloc_failed),
9643 4, STATS_FLAGS_BOTH, "rx_skb_alloc_discard" },
9644 { STATS_OFFSET32(hw_csum_err),
9645 4, STATS_FLAGS_BOTH, "rx_csum_offload_errors" },
9646
9647 { STATS_OFFSET32(total_bytes_transmitted_hi),
9648 8, STATS_FLAGS_BOTH, "tx_bytes" },
9649 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
9650 8, STATS_FLAGS_PORT, "tx_error_bytes" },
9651 { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
9652 8, STATS_FLAGS_BOTH, "tx_packets" },
9653 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
9654 8, STATS_FLAGS_PORT, "tx_mac_errors" },
9655 { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
9656 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
bb2a0f7a 9657 { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
66e855f3 9658 8, STATS_FLAGS_PORT, "tx_single_collisions" },
bb2a0f7a 9659 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
66e855f3 9660 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
de832a55 9661/* 30 */{ STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
66e855f3 9662 8, STATS_FLAGS_PORT, "tx_deferred" },
bb2a0f7a 9663 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
66e855f3 9664 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
bb2a0f7a 9665 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
66e855f3 9666 8, STATS_FLAGS_PORT, "tx_late_collisions" },
bb2a0f7a 9667 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
66e855f3 9668 8, STATS_FLAGS_PORT, "tx_total_collisions" },
bb2a0f7a 9669 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
66e855f3 9670 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
bb2a0f7a 9671 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
66e855f3 9672 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
bb2a0f7a 9673 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
66e855f3 9674 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
bb2a0f7a 9675 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
66e855f3 9676 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
bb2a0f7a 9677 { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
66e855f3 9678 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
bb2a0f7a 9679 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
66e855f3 9680 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
de832a55 9681/* 40 */{ STATS_OFFSET32(etherstatspktsover1522octets_hi),
66e855f3 9682 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
de832a55
EG
9683 { STATS_OFFSET32(pause_frames_sent_hi),
9684 8, STATS_FLAGS_PORT, "tx_pause_frames" }
a2fbb9ea
ET
9685};
9686
de832a55
EG
9687#define IS_PORT_STAT(i) \
9688 ((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT)
9689#define IS_FUNC_STAT(i) (bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC)
9690#define IS_E1HMF_MODE_STAT(bp) \
9691 (IS_E1HMF(bp) && !(bp->msglevel & BNX2X_MSG_STATS))
66e855f3 9692
a2fbb9ea
ET
9693static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
9694{
bb2a0f7a 9695 struct bnx2x *bp = netdev_priv(dev);
de832a55 9696 int i, j, k;
bb2a0f7a 9697
a2fbb9ea
ET
9698 switch (stringset) {
9699 case ETH_SS_STATS:
de832a55
EG
9700 if (is_multi(bp)) {
9701 k = 0;
9702 for_each_queue(bp, i) {
9703 for (j = 0; j < BNX2X_NUM_Q_STATS; j++)
9704 sprintf(buf + (k + j)*ETH_GSTRING_LEN,
9705 bnx2x_q_stats_arr[j].string, i);
9706 k += BNX2X_NUM_Q_STATS;
9707 }
9708 if (IS_E1HMF_MODE_STAT(bp))
9709 break;
9710 for (j = 0; j < BNX2X_NUM_STATS; j++)
9711 strcpy(buf + (k + j)*ETH_GSTRING_LEN,
9712 bnx2x_stats_arr[j].string);
9713 } else {
9714 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
9715 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
9716 continue;
9717 strcpy(buf + j*ETH_GSTRING_LEN,
9718 bnx2x_stats_arr[i].string);
9719 j++;
9720 }
bb2a0f7a 9721 }
a2fbb9ea
ET
9722 break;
9723
9724 case ETH_SS_TEST:
9725 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
9726 break;
9727 }
9728}
9729
9730static int bnx2x_get_stats_count(struct net_device *dev)
9731{
bb2a0f7a 9732 struct bnx2x *bp = netdev_priv(dev);
de832a55 9733 int i, num_stats;
bb2a0f7a 9734
de832a55
EG
9735 if (is_multi(bp)) {
9736 num_stats = BNX2X_NUM_Q_STATS * BNX2X_NUM_QUEUES(bp);
9737 if (!IS_E1HMF_MODE_STAT(bp))
9738 num_stats += BNX2X_NUM_STATS;
9739 } else {
9740 if (IS_E1HMF_MODE_STAT(bp)) {
9741 num_stats = 0;
9742 for (i = 0; i < BNX2X_NUM_STATS; i++)
9743 if (IS_FUNC_STAT(i))
9744 num_stats++;
9745 } else
9746 num_stats = BNX2X_NUM_STATS;
bb2a0f7a 9747 }
de832a55 9748
bb2a0f7a 9749 return num_stats;
a2fbb9ea
ET
9750}
9751
9752static void bnx2x_get_ethtool_stats(struct net_device *dev,
9753 struct ethtool_stats *stats, u64 *buf)
9754{
9755 struct bnx2x *bp = netdev_priv(dev);
de832a55
EG
9756 u32 *hw_stats, *offset;
9757 int i, j, k;
bb2a0f7a 9758
de832a55
EG
9759 if (is_multi(bp)) {
9760 k = 0;
9761 for_each_queue(bp, i) {
9762 hw_stats = (u32 *)&bp->fp[i].eth_q_stats;
9763 for (j = 0; j < BNX2X_NUM_Q_STATS; j++) {
9764 if (bnx2x_q_stats_arr[j].size == 0) {
9765 /* skip this counter */
9766 buf[k + j] = 0;
9767 continue;
9768 }
9769 offset = (hw_stats +
9770 bnx2x_q_stats_arr[j].offset);
9771 if (bnx2x_q_stats_arr[j].size == 4) {
9772 /* 4-byte counter */
9773 buf[k + j] = (u64) *offset;
9774 continue;
9775 }
9776 /* 8-byte counter */
9777 buf[k + j] = HILO_U64(*offset, *(offset + 1));
9778 }
9779 k += BNX2X_NUM_Q_STATS;
9780 }
9781 if (IS_E1HMF_MODE_STAT(bp))
9782 return;
9783 hw_stats = (u32 *)&bp->eth_stats;
9784 for (j = 0; j < BNX2X_NUM_STATS; j++) {
9785 if (bnx2x_stats_arr[j].size == 0) {
9786 /* skip this counter */
9787 buf[k + j] = 0;
9788 continue;
9789 }
9790 offset = (hw_stats + bnx2x_stats_arr[j].offset);
9791 if (bnx2x_stats_arr[j].size == 4) {
9792 /* 4-byte counter */
9793 buf[k + j] = (u64) *offset;
9794 continue;
9795 }
9796 /* 8-byte counter */
9797 buf[k + j] = HILO_U64(*offset, *(offset + 1));
a2fbb9ea 9798 }
de832a55
EG
9799 } else {
9800 hw_stats = (u32 *)&bp->eth_stats;
9801 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
9802 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
9803 continue;
9804 if (bnx2x_stats_arr[i].size == 0) {
9805 /* skip this counter */
9806 buf[j] = 0;
9807 j++;
9808 continue;
9809 }
9810 offset = (hw_stats + bnx2x_stats_arr[i].offset);
9811 if (bnx2x_stats_arr[i].size == 4) {
9812 /* 4-byte counter */
9813 buf[j] = (u64) *offset;
9814 j++;
9815 continue;
9816 }
9817 /* 8-byte counter */
9818 buf[j] = HILO_U64(*offset, *(offset + 1));
bb2a0f7a 9819 j++;
a2fbb9ea 9820 }
a2fbb9ea
ET
9821 }
9822}
9823
9824static int bnx2x_phys_id(struct net_device *dev, u32 data)
9825{
9826 struct bnx2x *bp = netdev_priv(dev);
34f80b04 9827 int port = BP_PORT(bp);
a2fbb9ea
ET
9828 int i;
9829
34f80b04
EG
9830 if (!netif_running(dev))
9831 return 0;
9832
9833 if (!bp->port.pmf)
9834 return 0;
9835
a2fbb9ea
ET
9836 if (data == 0)
9837 data = 2;
9838
9839 for (i = 0; i < (data * 2); i++) {
c18487ee 9840 if ((i % 2) == 0)
34f80b04 9841 bnx2x_set_led(bp, port, LED_MODE_OPER, SPEED_1000,
c18487ee
YR
9842 bp->link_params.hw_led_mode,
9843 bp->link_params.chip_id);
9844 else
34f80b04 9845 bnx2x_set_led(bp, port, LED_MODE_OFF, 0,
c18487ee
YR
9846 bp->link_params.hw_led_mode,
9847 bp->link_params.chip_id);
9848
a2fbb9ea
ET
9849 msleep_interruptible(500);
9850 if (signal_pending(current))
9851 break;
9852 }
9853
c18487ee 9854 if (bp->link_vars.link_up)
34f80b04 9855 bnx2x_set_led(bp, port, LED_MODE_OPER,
c18487ee
YR
9856 bp->link_vars.line_speed,
9857 bp->link_params.hw_led_mode,
9858 bp->link_params.chip_id);
a2fbb9ea
ET
9859
9860 return 0;
9861}
9862
9863static struct ethtool_ops bnx2x_ethtool_ops = {
7a9b2557
VZ
9864 .get_settings = bnx2x_get_settings,
9865 .set_settings = bnx2x_set_settings,
9866 .get_drvinfo = bnx2x_get_drvinfo,
a2fbb9ea
ET
9867 .get_wol = bnx2x_get_wol,
9868 .set_wol = bnx2x_set_wol,
7a9b2557
VZ
9869 .get_msglevel = bnx2x_get_msglevel,
9870 .set_msglevel = bnx2x_set_msglevel,
9871 .nway_reset = bnx2x_nway_reset,
9872 .get_link = ethtool_op_get_link,
9873 .get_eeprom_len = bnx2x_get_eeprom_len,
9874 .get_eeprom = bnx2x_get_eeprom,
9875 .set_eeprom = bnx2x_set_eeprom,
9876 .get_coalesce = bnx2x_get_coalesce,
9877 .set_coalesce = bnx2x_set_coalesce,
9878 .get_ringparam = bnx2x_get_ringparam,
9879 .set_ringparam = bnx2x_set_ringparam,
9880 .get_pauseparam = bnx2x_get_pauseparam,
9881 .set_pauseparam = bnx2x_set_pauseparam,
9882 .get_rx_csum = bnx2x_get_rx_csum,
9883 .set_rx_csum = bnx2x_set_rx_csum,
9884 .get_tx_csum = ethtool_op_get_tx_csum,
755735eb 9885 .set_tx_csum = ethtool_op_set_tx_hw_csum,
7a9b2557
VZ
9886 .set_flags = bnx2x_set_flags,
9887 .get_flags = ethtool_op_get_flags,
9888 .get_sg = ethtool_op_get_sg,
9889 .set_sg = ethtool_op_set_sg,
a2fbb9ea
ET
9890 .get_tso = ethtool_op_get_tso,
9891 .set_tso = bnx2x_set_tso,
9892 .self_test_count = bnx2x_self_test_count,
7a9b2557
VZ
9893 .self_test = bnx2x_self_test,
9894 .get_strings = bnx2x_get_strings,
a2fbb9ea
ET
9895 .phys_id = bnx2x_phys_id,
9896 .get_stats_count = bnx2x_get_stats_count,
bb2a0f7a 9897 .get_ethtool_stats = bnx2x_get_ethtool_stats,
a2fbb9ea
ET
9898};
9899
9900/* end of ethtool_ops */
9901
9902/****************************************************************************
9903* General service functions
9904****************************************************************************/
9905
9906static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
9907{
9908 u16 pmcsr;
9909
9910 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
9911
9912 switch (state) {
9913 case PCI_D0:
34f80b04 9914 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
a2fbb9ea
ET
9915 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
9916 PCI_PM_CTRL_PME_STATUS));
9917
9918 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
33471629 9919 /* delay required during transition out of D3hot */
a2fbb9ea 9920 msleep(20);
34f80b04 9921 break;
a2fbb9ea 9922
34f80b04
EG
9923 case PCI_D3hot:
9924 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
9925 pmcsr |= 3;
a2fbb9ea 9926
34f80b04
EG
9927 if (bp->wol)
9928 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
a2fbb9ea 9929
34f80b04
EG
9930 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
9931 pmcsr);
a2fbb9ea 9932
34f80b04
EG
9933 /* No more memory access after this point until
9934 * device is brought back to D0.
9935 */
9936 break;
9937
9938 default:
9939 return -EINVAL;
9940 }
9941 return 0;
a2fbb9ea
ET
9942}
9943
237907c1
EG
9944static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
9945{
9946 u16 rx_cons_sb;
9947
9948 /* Tell compiler that status block fields can change */
9949 barrier();
9950 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
9951 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
9952 rx_cons_sb++;
9953 return (fp->rx_comp_cons != rx_cons_sb);
9954}
9955
34f80b04
EG
9956/*
9957 * net_device service functions
9958 */
9959
a2fbb9ea
ET
9960static int bnx2x_poll(struct napi_struct *napi, int budget)
9961{
9962 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
9963 napi);
9964 struct bnx2x *bp = fp->bp;
9965 int work_done = 0;
9966
9967#ifdef BNX2X_STOP_ON_ERROR
9968 if (unlikely(bp->panic))
34f80b04 9969 goto poll_panic;
a2fbb9ea
ET
9970#endif
9971
9972 prefetch(fp->tx_buf_ring[TX_BD(fp->tx_pkt_cons)].skb);
9973 prefetch(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb);
9974 prefetch((char *)(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb) + 256);
9975
9976 bnx2x_update_fpsb_idx(fp);
9977
237907c1 9978 if (bnx2x_has_tx_work(fp))
a2fbb9ea
ET
9979 bnx2x_tx_int(fp, budget);
9980
237907c1 9981 if (bnx2x_has_rx_work(fp))
a2fbb9ea 9982 work_done = bnx2x_rx_int(fp, budget);
da5a662a 9983 rmb(); /* BNX2X_HAS_WORK() reads the status block */
a2fbb9ea
ET
9984
9985 /* must not complete if we consumed full budget */
da5a662a 9986 if ((work_done < budget) && !BNX2X_HAS_WORK(fp)) {
a2fbb9ea
ET
9987
9988#ifdef BNX2X_STOP_ON_ERROR
34f80b04 9989poll_panic:
a2fbb9ea 9990#endif
288379f0 9991 napi_complete(napi);
a2fbb9ea 9992
34f80b04 9993 bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID,
a2fbb9ea 9994 le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
34f80b04 9995 bnx2x_ack_sb(bp, FP_SB_ID(fp), CSTORM_ID,
a2fbb9ea
ET
9996 le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
9997 }
a2fbb9ea
ET
9998 return work_done;
9999}
10000
755735eb
EG
10001
10002/* we split the first BD into headers and data BDs
33471629 10003 * to ease the pain of our fellow microcode engineers
755735eb
EG
10004 * we use one mapping for both BDs
10005 * So far this has only been observed to happen
10006 * in Other Operating Systems(TM)
10007 */
10008static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
10009 struct bnx2x_fastpath *fp,
10010 struct eth_tx_bd **tx_bd, u16 hlen,
10011 u16 bd_prod, int nbd)
10012{
10013 struct eth_tx_bd *h_tx_bd = *tx_bd;
10014 struct eth_tx_bd *d_tx_bd;
10015 dma_addr_t mapping;
10016 int old_len = le16_to_cpu(h_tx_bd->nbytes);
10017
10018 /* first fix first BD */
10019 h_tx_bd->nbd = cpu_to_le16(nbd);
10020 h_tx_bd->nbytes = cpu_to_le16(hlen);
10021
10022 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
10023 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
10024 h_tx_bd->addr_lo, h_tx_bd->nbd);
10025
10026 /* now get a new data BD
10027 * (after the pbd) and fill it */
10028 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10029 d_tx_bd = &fp->tx_desc_ring[bd_prod];
10030
10031 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
10032 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
10033
10034 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10035 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10036 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
10037 d_tx_bd->vlan = 0;
10038 /* this marks the BD as one that has no individual mapping
10039 * the FW ignores this flag in a BD not marked start
10040 */
10041 d_tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_SW_LSO;
10042 DP(NETIF_MSG_TX_QUEUED,
10043 "TSO split data size is %d (%x:%x)\n",
10044 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
10045
10046 /* update tx_bd for marking the last BD flag */
10047 *tx_bd = d_tx_bd;
10048
10049 return bd_prod;
10050}
10051
10052static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
10053{
10054 if (fix > 0)
10055 csum = (u16) ~csum_fold(csum_sub(csum,
10056 csum_partial(t_header - fix, fix, 0)));
10057
10058 else if (fix < 0)
10059 csum = (u16) ~csum_fold(csum_add(csum,
10060 csum_partial(t_header, -fix, 0)));
10061
10062 return swab16(csum);
10063}
10064
10065static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
10066{
10067 u32 rc;
10068
10069 if (skb->ip_summed != CHECKSUM_PARTIAL)
10070 rc = XMIT_PLAIN;
10071
10072 else {
10073 if (skb->protocol == ntohs(ETH_P_IPV6)) {
10074 rc = XMIT_CSUM_V6;
10075 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
10076 rc |= XMIT_CSUM_TCP;
10077
10078 } else {
10079 rc = XMIT_CSUM_V4;
10080 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
10081 rc |= XMIT_CSUM_TCP;
10082 }
10083 }
10084
10085 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
10086 rc |= XMIT_GSO_V4;
10087
10088 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
10089 rc |= XMIT_GSO_V6;
10090
10091 return rc;
10092}
10093
632da4d6 10094#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
755735eb
EG
10095/* check if packet requires linearization (packet is too fragmented) */
10096static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
10097 u32 xmit_type)
10098{
10099 int to_copy = 0;
10100 int hlen = 0;
10101 int first_bd_sz = 0;
10102
10103 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
10104 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
10105
10106 if (xmit_type & XMIT_GSO) {
10107 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
10108 /* Check if LSO packet needs to be copied:
10109 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
10110 int wnd_size = MAX_FETCH_BD - 3;
33471629 10111 /* Number of windows to check */
755735eb
EG
10112 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
10113 int wnd_idx = 0;
10114 int frag_idx = 0;
10115 u32 wnd_sum = 0;
10116
10117 /* Headers length */
10118 hlen = (int)(skb_transport_header(skb) - skb->data) +
10119 tcp_hdrlen(skb);
10120
10121 /* Amount of data (w/o headers) on linear part of SKB*/
10122 first_bd_sz = skb_headlen(skb) - hlen;
10123
10124 wnd_sum = first_bd_sz;
10125
10126 /* Calculate the first sum - it's special */
10127 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
10128 wnd_sum +=
10129 skb_shinfo(skb)->frags[frag_idx].size;
10130
10131 /* If there was data on linear skb data - check it */
10132 if (first_bd_sz > 0) {
10133 if (unlikely(wnd_sum < lso_mss)) {
10134 to_copy = 1;
10135 goto exit_lbl;
10136 }
10137
10138 wnd_sum -= first_bd_sz;
10139 }
10140
10141 /* Others are easier: run through the frag list and
10142 check all windows */
10143 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
10144 wnd_sum +=
10145 skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
10146
10147 if (unlikely(wnd_sum < lso_mss)) {
10148 to_copy = 1;
10149 break;
10150 }
10151 wnd_sum -=
10152 skb_shinfo(skb)->frags[wnd_idx].size;
10153 }
10154
10155 } else {
10156 /* in non-LSO too fragmented packet should always
10157 be linearized */
10158 to_copy = 1;
10159 }
10160 }
10161
10162exit_lbl:
10163 if (unlikely(to_copy))
10164 DP(NETIF_MSG_TX_QUEUED,
10165 "Linearization IS REQUIRED for %s packet. "
10166 "num_frags %d hlen %d first_bd_sz %d\n",
10167 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
10168 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
10169
10170 return to_copy;
10171}
632da4d6 10172#endif
755735eb
EG
10173
10174/* called with netif_tx_lock
a2fbb9ea 10175 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
755735eb 10176 * netif_wake_queue()
a2fbb9ea
ET
10177 */
10178static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
10179{
10180 struct bnx2x *bp = netdev_priv(dev);
10181 struct bnx2x_fastpath *fp;
555f6c78 10182 struct netdev_queue *txq;
a2fbb9ea
ET
10183 struct sw_tx_bd *tx_buf;
10184 struct eth_tx_bd *tx_bd;
10185 struct eth_tx_parse_bd *pbd = NULL;
10186 u16 pkt_prod, bd_prod;
755735eb 10187 int nbd, fp_index;
a2fbb9ea 10188 dma_addr_t mapping;
755735eb
EG
10189 u32 xmit_type = bnx2x_xmit_type(bp, skb);
10190 int vlan_off = (bp->e1hov ? 4 : 0);
10191 int i;
10192 u8 hlen = 0;
a2fbb9ea
ET
10193
10194#ifdef BNX2X_STOP_ON_ERROR
10195 if (unlikely(bp->panic))
10196 return NETDEV_TX_BUSY;
10197#endif
10198
555f6c78
EG
10199 fp_index = skb_get_queue_mapping(skb);
10200 txq = netdev_get_tx_queue(dev, fp_index);
10201
a2fbb9ea 10202 fp = &bp->fp[fp_index];
755735eb 10203
231fd58a 10204 if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
de832a55 10205 fp->eth_q_stats.driver_xoff++,
555f6c78 10206 netif_tx_stop_queue(txq);
a2fbb9ea
ET
10207 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
10208 return NETDEV_TX_BUSY;
10209 }
10210
755735eb
EG
10211 DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x protocol %x protocol(%x,%x)"
10212 " gso type %x xmit_type %x\n",
10213 skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
10214 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
10215
632da4d6 10216#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
33471629 10217 /* First, check if we need to linearize the skb
755735eb
EG
10218 (due to FW restrictions) */
10219 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
10220 /* Statistics of linearization */
10221 bp->lin_cnt++;
10222 if (skb_linearize(skb) != 0) {
10223 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
10224 "silently dropping this SKB\n");
10225 dev_kfree_skb_any(skb);
da5a662a 10226 return NETDEV_TX_OK;
755735eb
EG
10227 }
10228 }
632da4d6 10229#endif
755735eb 10230
a2fbb9ea 10231 /*
755735eb 10232 Please read carefully. First we use one BD which we mark as start,
a2fbb9ea 10233 then for TSO or xsum we have a parsing info BD,
755735eb 10234 and only then we have the rest of the TSO BDs.
a2fbb9ea
ET
10235 (don't forget to mark the last one as last,
10236 and to unmap only AFTER you write to the BD ...)
755735eb 10237 And above all, all pdb sizes are in words - NOT DWORDS!
a2fbb9ea
ET
10238 */
10239
10240 pkt_prod = fp->tx_pkt_prod++;
755735eb 10241 bd_prod = TX_BD(fp->tx_bd_prod);
a2fbb9ea 10242
755735eb 10243 /* get a tx_buf and first BD */
a2fbb9ea
ET
10244 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
10245 tx_bd = &fp->tx_desc_ring[bd_prod];
10246
10247 tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
10248 tx_bd->general_data = (UNICAST_ADDRESS <<
10249 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT);
3196a88a
EG
10250 /* header nbd */
10251 tx_bd->general_data |= (1 << ETH_TX_BD_HDR_NBDS_SHIFT);
a2fbb9ea 10252
755735eb
EG
10253 /* remember the first BD of the packet */
10254 tx_buf->first_bd = fp->tx_bd_prod;
10255 tx_buf->skb = skb;
a2fbb9ea
ET
10256
10257 DP(NETIF_MSG_TX_QUEUED,
10258 "sending pkt %u @%p next_idx %u bd %u @%p\n",
10259 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_bd);
10260
0c6671b0
EG
10261#ifdef BCM_VLAN
10262 if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
10263 (bp->flags & HW_VLAN_TX_FLAG)) {
755735eb
EG
10264 tx_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
10265 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
10266 vlan_off += 4;
10267 } else
0c6671b0 10268#endif
755735eb 10269 tx_bd->vlan = cpu_to_le16(pkt_prod);
a2fbb9ea 10270
755735eb 10271 if (xmit_type) {
755735eb 10272 /* turn on parsing and get a BD */
a2fbb9ea
ET
10273 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10274 pbd = (void *)&fp->tx_desc_ring[bd_prod];
755735eb
EG
10275
10276 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
10277 }
10278
10279 if (xmit_type & XMIT_CSUM) {
10280 hlen = (skb_network_header(skb) - skb->data + vlan_off) / 2;
a2fbb9ea
ET
10281
10282 /* for now NS flag is not used in Linux */
755735eb 10283 pbd->global_data = (hlen |
96fc1784 10284 ((skb->protocol == ntohs(ETH_P_8021Q)) <<
a2fbb9ea 10285 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
a2fbb9ea 10286
755735eb
EG
10287 pbd->ip_hlen = (skb_transport_header(skb) -
10288 skb_network_header(skb)) / 2;
10289
10290 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
a2fbb9ea 10291
755735eb
EG
10292 pbd->total_hlen = cpu_to_le16(hlen);
10293 hlen = hlen*2 - vlan_off;
a2fbb9ea 10294
755735eb
EG
10295 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_TCP_CSUM;
10296
10297 if (xmit_type & XMIT_CSUM_V4)
a2fbb9ea 10298 tx_bd->bd_flags.as_bitfield |=
755735eb
EG
10299 ETH_TX_BD_FLAGS_IP_CSUM;
10300 else
10301 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
10302
10303 if (xmit_type & XMIT_CSUM_TCP) {
10304 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
10305
10306 } else {
10307 s8 fix = SKB_CS_OFF(skb); /* signed! */
10308
a2fbb9ea 10309 pbd->global_data |= ETH_TX_PARSE_BD_CS_ANY_FLG;
755735eb 10310 pbd->cs_offset = fix / 2;
a2fbb9ea 10311
755735eb
EG
10312 DP(NETIF_MSG_TX_QUEUED,
10313 "hlen %d offset %d fix %d csum before fix %x\n",
10314 le16_to_cpu(pbd->total_hlen), pbd->cs_offset, fix,
10315 SKB_CS(skb));
10316
10317 /* HW bug: fixup the CSUM */
10318 pbd->tcp_pseudo_csum =
10319 bnx2x_csum_fix(skb_transport_header(skb),
10320 SKB_CS(skb), fix);
10321
10322 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
10323 pbd->tcp_pseudo_csum);
10324 }
a2fbb9ea
ET
10325 }
10326
10327 mapping = pci_map_single(bp->pdev, skb->data,
755735eb 10328 skb_headlen(skb), PCI_DMA_TODEVICE);
a2fbb9ea
ET
10329
10330 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10331 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
6378c025 10332 nbd = skb_shinfo(skb)->nr_frags + ((pbd == NULL) ? 1 : 2);
a2fbb9ea
ET
10333 tx_bd->nbd = cpu_to_le16(nbd);
10334 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
10335
10336 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
755735eb
EG
10337 " nbytes %d flags %x vlan %x\n",
10338 tx_bd, tx_bd->addr_hi, tx_bd->addr_lo, le16_to_cpu(tx_bd->nbd),
10339 le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield,
10340 le16_to_cpu(tx_bd->vlan));
a2fbb9ea 10341
755735eb 10342 if (xmit_type & XMIT_GSO) {
a2fbb9ea
ET
10343
10344 DP(NETIF_MSG_TX_QUEUED,
10345 "TSO packet len %d hlen %d total len %d tso size %d\n",
10346 skb->len, hlen, skb_headlen(skb),
10347 skb_shinfo(skb)->gso_size);
10348
10349 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
10350
755735eb
EG
10351 if (unlikely(skb_headlen(skb) > hlen))
10352 bd_prod = bnx2x_tx_split(bp, fp, &tx_bd, hlen,
10353 bd_prod, ++nbd);
a2fbb9ea
ET
10354
10355 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
10356 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
755735eb
EG
10357 pbd->tcp_flags = pbd_tcp_flags(skb);
10358
10359 if (xmit_type & XMIT_GSO_V4) {
10360 pbd->ip_id = swab16(ip_hdr(skb)->id);
10361 pbd->tcp_pseudo_csum =
a2fbb9ea
ET
10362 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
10363 ip_hdr(skb)->daddr,
10364 0, IPPROTO_TCP, 0));
755735eb
EG
10365
10366 } else
10367 pbd->tcp_pseudo_csum =
10368 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
10369 &ipv6_hdr(skb)->daddr,
10370 0, IPPROTO_TCP, 0));
10371
a2fbb9ea
ET
10372 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
10373 }
10374
755735eb
EG
10375 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
10376 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
a2fbb9ea 10377
755735eb
EG
10378 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10379 tx_bd = &fp->tx_desc_ring[bd_prod];
a2fbb9ea 10380
755735eb
EG
10381 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
10382 frag->size, PCI_DMA_TODEVICE);
a2fbb9ea 10383
755735eb
EG
10384 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10385 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10386 tx_bd->nbytes = cpu_to_le16(frag->size);
10387 tx_bd->vlan = cpu_to_le16(pkt_prod);
10388 tx_bd->bd_flags.as_bitfield = 0;
a2fbb9ea 10389
755735eb
EG
10390 DP(NETIF_MSG_TX_QUEUED,
10391 "frag %d bd @%p addr (%x:%x) nbytes %d flags %x\n",
10392 i, tx_bd, tx_bd->addr_hi, tx_bd->addr_lo,
10393 le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield);
a2fbb9ea
ET
10394 }
10395
755735eb 10396 /* now at last mark the BD as the last BD */
a2fbb9ea
ET
10397 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_END_BD;
10398
10399 DP(NETIF_MSG_TX_QUEUED, "last bd @%p flags %x\n",
10400 tx_bd, tx_bd->bd_flags.as_bitfield);
10401
a2fbb9ea
ET
10402 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10403
755735eb 10404 /* now send a tx doorbell, counting the next BD
a2fbb9ea
ET
10405 * if the packet contains or ends with it
10406 */
10407 if (TX_BD_POFF(bd_prod) < nbd)
10408 nbd++;
10409
10410 if (pbd)
10411 DP(NETIF_MSG_TX_QUEUED,
10412 "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
10413 " tcp_flags %x xsum %x seq %u hlen %u\n",
10414 pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
10415 pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
755735eb 10416 pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
a2fbb9ea 10417
755735eb 10418 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
a2fbb9ea 10419
58f4c4cf
EG
10420 /*
10421 * Make sure that the BD data is updated before updating the producer
10422 * since FW might read the BD right after the producer is updated.
10423 * This is only applicable for weak-ordered memory model archs such
10424 * as IA-64. The following barrier is also mandatory since FW will
10425 * assumes packets must have BDs.
10426 */
10427 wmb();
10428
96fc1784
ET
10429 fp->hw_tx_prods->bds_prod =
10430 cpu_to_le16(le16_to_cpu(fp->hw_tx_prods->bds_prod) + nbd);
a2fbb9ea 10431 mb(); /* FW restriction: must not reorder writing nbd and packets */
96fc1784
ET
10432 fp->hw_tx_prods->packets_prod =
10433 cpu_to_le32(le32_to_cpu(fp->hw_tx_prods->packets_prod) + 1);
755735eb 10434 DOORBELL(bp, FP_IDX(fp), 0);
a2fbb9ea
ET
10435
10436 mmiowb();
10437
755735eb 10438 fp->tx_bd_prod += nbd;
a2fbb9ea
ET
10439 dev->trans_start = jiffies;
10440
10441 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
58f4c4cf
EG
10442 /* We want bnx2x_tx_int to "see" the updated tx_bd_prod
10443 if we put Tx into XOFF state. */
10444 smp_mb();
555f6c78 10445 netif_tx_stop_queue(txq);
de832a55 10446 fp->eth_q_stats.driver_xoff++;
a2fbb9ea 10447 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
555f6c78 10448 netif_tx_wake_queue(txq);
a2fbb9ea
ET
10449 }
10450 fp->tx_pkt++;
10451
10452 return NETDEV_TX_OK;
10453}
10454
bb2a0f7a 10455/* called with rtnl_lock */
a2fbb9ea
ET
10456static int bnx2x_open(struct net_device *dev)
10457{
10458 struct bnx2x *bp = netdev_priv(dev);
10459
6eccabb3
EG
10460 netif_carrier_off(dev);
10461
a2fbb9ea
ET
10462 bnx2x_set_power_state(bp, PCI_D0);
10463
bb2a0f7a 10464 return bnx2x_nic_load(bp, LOAD_OPEN);
a2fbb9ea
ET
10465}
10466
bb2a0f7a 10467/* called with rtnl_lock */
a2fbb9ea
ET
10468static int bnx2x_close(struct net_device *dev)
10469{
a2fbb9ea
ET
10470 struct bnx2x *bp = netdev_priv(dev);
10471
10472 /* Unload the driver, release IRQs */
bb2a0f7a
YG
10473 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
10474 if (atomic_read(&bp->pdev->enable_cnt) == 1)
10475 if (!CHIP_REV_IS_SLOW(bp))
10476 bnx2x_set_power_state(bp, PCI_D3hot);
a2fbb9ea
ET
10477
10478 return 0;
10479}
10480
34f80b04
EG
10481/* called with netif_tx_lock from set_multicast */
10482static void bnx2x_set_rx_mode(struct net_device *dev)
10483{
10484 struct bnx2x *bp = netdev_priv(dev);
10485 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
10486 int port = BP_PORT(bp);
10487
10488 if (bp->state != BNX2X_STATE_OPEN) {
10489 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
10490 return;
10491 }
10492
10493 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
10494
10495 if (dev->flags & IFF_PROMISC)
10496 rx_mode = BNX2X_RX_MODE_PROMISC;
10497
10498 else if ((dev->flags & IFF_ALLMULTI) ||
10499 ((dev->mc_count > BNX2X_MAX_MULTICAST) && CHIP_IS_E1(bp)))
10500 rx_mode = BNX2X_RX_MODE_ALLMULTI;
10501
10502 else { /* some multicasts */
10503 if (CHIP_IS_E1(bp)) {
10504 int i, old, offset;
10505 struct dev_mc_list *mclist;
10506 struct mac_configuration_cmd *config =
10507 bnx2x_sp(bp, mcast_config);
10508
10509 for (i = 0, mclist = dev->mc_list;
10510 mclist && (i < dev->mc_count);
10511 i++, mclist = mclist->next) {
10512
10513 config->config_table[i].
10514 cam_entry.msb_mac_addr =
10515 swab16(*(u16 *)&mclist->dmi_addr[0]);
10516 config->config_table[i].
10517 cam_entry.middle_mac_addr =
10518 swab16(*(u16 *)&mclist->dmi_addr[2]);
10519 config->config_table[i].
10520 cam_entry.lsb_mac_addr =
10521 swab16(*(u16 *)&mclist->dmi_addr[4]);
10522 config->config_table[i].cam_entry.flags =
10523 cpu_to_le16(port);
10524 config->config_table[i].
10525 target_table_entry.flags = 0;
10526 config->config_table[i].
10527 target_table_entry.client_id = 0;
10528 config->config_table[i].
10529 target_table_entry.vlan_id = 0;
10530
10531 DP(NETIF_MSG_IFUP,
10532 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
10533 config->config_table[i].
10534 cam_entry.msb_mac_addr,
10535 config->config_table[i].
10536 cam_entry.middle_mac_addr,
10537 config->config_table[i].
10538 cam_entry.lsb_mac_addr);
10539 }
8d9c5f34 10540 old = config->hdr.length;
34f80b04
EG
10541 if (old > i) {
10542 for (; i < old; i++) {
10543 if (CAM_IS_INVALID(config->
10544 config_table[i])) {
af246401 10545 /* already invalidated */
34f80b04
EG
10546 break;
10547 }
10548 /* invalidate */
10549 CAM_INVALIDATE(config->
10550 config_table[i]);
10551 }
10552 }
10553
10554 if (CHIP_REV_IS_SLOW(bp))
10555 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
10556 else
10557 offset = BNX2X_MAX_MULTICAST*(1 + port);
10558
8d9c5f34 10559 config->hdr.length = i;
34f80b04 10560 config->hdr.offset = offset;
8d9c5f34 10561 config->hdr.client_id = bp->fp->cl_id;
34f80b04
EG
10562 config->hdr.reserved1 = 0;
10563
10564 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
10565 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
10566 U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
10567 0);
10568 } else { /* E1H */
10569 /* Accept one or more multicasts */
10570 struct dev_mc_list *mclist;
10571 u32 mc_filter[MC_HASH_SIZE];
10572 u32 crc, bit, regidx;
10573 int i;
10574
10575 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
10576
10577 for (i = 0, mclist = dev->mc_list;
10578 mclist && (i < dev->mc_count);
10579 i++, mclist = mclist->next) {
10580
7c510e4b
JB
10581 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
10582 mclist->dmi_addr);
34f80b04
EG
10583
10584 crc = crc32c_le(0, mclist->dmi_addr, ETH_ALEN);
10585 bit = (crc >> 24) & 0xff;
10586 regidx = bit >> 5;
10587 bit &= 0x1f;
10588 mc_filter[regidx] |= (1 << bit);
10589 }
10590
10591 for (i = 0; i < MC_HASH_SIZE; i++)
10592 REG_WR(bp, MC_HASH_OFFSET(bp, i),
10593 mc_filter[i]);
10594 }
10595 }
10596
10597 bp->rx_mode = rx_mode;
10598 bnx2x_set_storm_rx_mode(bp);
10599}
10600
10601/* called with rtnl_lock */
a2fbb9ea
ET
10602static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
10603{
10604 struct sockaddr *addr = p;
10605 struct bnx2x *bp = netdev_priv(dev);
10606
34f80b04 10607 if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
a2fbb9ea
ET
10608 return -EINVAL;
10609
10610 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
34f80b04
EG
10611 if (netif_running(dev)) {
10612 if (CHIP_IS_E1(bp))
3101c2bc 10613 bnx2x_set_mac_addr_e1(bp, 1);
34f80b04 10614 else
3101c2bc 10615 bnx2x_set_mac_addr_e1h(bp, 1);
34f80b04 10616 }
a2fbb9ea
ET
10617
10618 return 0;
10619}
10620
c18487ee 10621/* called with rtnl_lock */
a2fbb9ea
ET
10622static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
10623{
10624 struct mii_ioctl_data *data = if_mii(ifr);
10625 struct bnx2x *bp = netdev_priv(dev);
3196a88a 10626 int port = BP_PORT(bp);
a2fbb9ea
ET
10627 int err;
10628
10629 switch (cmd) {
10630 case SIOCGMIIPHY:
34f80b04 10631 data->phy_id = bp->port.phy_addr;
a2fbb9ea 10632
c14423fe 10633 /* fallthrough */
c18487ee 10634
a2fbb9ea 10635 case SIOCGMIIREG: {
c18487ee 10636 u16 mii_regval;
a2fbb9ea 10637
c18487ee
YR
10638 if (!netif_running(dev))
10639 return -EAGAIN;
a2fbb9ea 10640
34f80b04 10641 mutex_lock(&bp->port.phy_mutex);
3196a88a 10642 err = bnx2x_cl45_read(bp, port, 0, bp->port.phy_addr,
c18487ee
YR
10643 DEFAULT_PHY_DEV_ADDR,
10644 (data->reg_num & 0x1f), &mii_regval);
10645 data->val_out = mii_regval;
34f80b04 10646 mutex_unlock(&bp->port.phy_mutex);
a2fbb9ea
ET
10647 return err;
10648 }
10649
10650 case SIOCSMIIREG:
10651 if (!capable(CAP_NET_ADMIN))
10652 return -EPERM;
10653
c18487ee
YR
10654 if (!netif_running(dev))
10655 return -EAGAIN;
10656
34f80b04 10657 mutex_lock(&bp->port.phy_mutex);
3196a88a 10658 err = bnx2x_cl45_write(bp, port, 0, bp->port.phy_addr,
c18487ee
YR
10659 DEFAULT_PHY_DEV_ADDR,
10660 (data->reg_num & 0x1f), data->val_in);
34f80b04 10661 mutex_unlock(&bp->port.phy_mutex);
a2fbb9ea
ET
10662 return err;
10663
10664 default:
10665 /* do nothing */
10666 break;
10667 }
10668
10669 return -EOPNOTSUPP;
10670}
10671
34f80b04 10672/* called with rtnl_lock */
a2fbb9ea
ET
10673static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
10674{
10675 struct bnx2x *bp = netdev_priv(dev);
34f80b04 10676 int rc = 0;
a2fbb9ea
ET
10677
10678 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
10679 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
10680 return -EINVAL;
10681
10682 /* This does not race with packet allocation
c14423fe 10683 * because the actual alloc size is
a2fbb9ea
ET
10684 * only updated as part of load
10685 */
10686 dev->mtu = new_mtu;
10687
10688 if (netif_running(dev)) {
34f80b04
EG
10689 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10690 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
a2fbb9ea 10691 }
34f80b04
EG
10692
10693 return rc;
a2fbb9ea
ET
10694}
10695
10696static void bnx2x_tx_timeout(struct net_device *dev)
10697{
10698 struct bnx2x *bp = netdev_priv(dev);
10699
10700#ifdef BNX2X_STOP_ON_ERROR
10701 if (!bp->panic)
10702 bnx2x_panic();
10703#endif
10704 /* This allows the netif to be shutdown gracefully before resetting */
10705 schedule_work(&bp->reset_task);
10706}
10707
10708#ifdef BCM_VLAN
34f80b04 10709/* called with rtnl_lock */
a2fbb9ea
ET
10710static void bnx2x_vlan_rx_register(struct net_device *dev,
10711 struct vlan_group *vlgrp)
10712{
10713 struct bnx2x *bp = netdev_priv(dev);
10714
10715 bp->vlgrp = vlgrp;
0c6671b0
EG
10716
10717 /* Set flags according to the required capabilities */
10718 bp->flags &= ~(HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
10719
10720 if (dev->features & NETIF_F_HW_VLAN_TX)
10721 bp->flags |= HW_VLAN_TX_FLAG;
10722
10723 if (dev->features & NETIF_F_HW_VLAN_RX)
10724 bp->flags |= HW_VLAN_RX_FLAG;
10725
a2fbb9ea 10726 if (netif_running(dev))
49d66772 10727 bnx2x_set_client_config(bp);
a2fbb9ea 10728}
34f80b04 10729
a2fbb9ea
ET
10730#endif
10731
10732#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
10733static void poll_bnx2x(struct net_device *dev)
10734{
10735 struct bnx2x *bp = netdev_priv(dev);
10736
10737 disable_irq(bp->pdev->irq);
10738 bnx2x_interrupt(bp->pdev->irq, dev);
10739 enable_irq(bp->pdev->irq);
10740}
10741#endif
10742
c64213cd
SH
10743static const struct net_device_ops bnx2x_netdev_ops = {
10744 .ndo_open = bnx2x_open,
10745 .ndo_stop = bnx2x_close,
10746 .ndo_start_xmit = bnx2x_start_xmit,
10747 .ndo_set_multicast_list = bnx2x_set_rx_mode,
10748 .ndo_set_mac_address = bnx2x_change_mac_addr,
10749 .ndo_validate_addr = eth_validate_addr,
10750 .ndo_do_ioctl = bnx2x_ioctl,
10751 .ndo_change_mtu = bnx2x_change_mtu,
10752 .ndo_tx_timeout = bnx2x_tx_timeout,
10753#ifdef BCM_VLAN
10754 .ndo_vlan_rx_register = bnx2x_vlan_rx_register,
10755#endif
10756#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
10757 .ndo_poll_controller = poll_bnx2x,
10758#endif
10759};
10760
10761
34f80b04
EG
10762static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
10763 struct net_device *dev)
a2fbb9ea
ET
10764{
10765 struct bnx2x *bp;
10766 int rc;
10767
10768 SET_NETDEV_DEV(dev, &pdev->dev);
10769 bp = netdev_priv(dev);
10770
34f80b04
EG
10771 bp->dev = dev;
10772 bp->pdev = pdev;
a2fbb9ea 10773 bp->flags = 0;
34f80b04 10774 bp->func = PCI_FUNC(pdev->devfn);
a2fbb9ea
ET
10775
10776 rc = pci_enable_device(pdev);
10777 if (rc) {
10778 printk(KERN_ERR PFX "Cannot enable PCI device, aborting\n");
10779 goto err_out;
10780 }
10781
10782 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
10783 printk(KERN_ERR PFX "Cannot find PCI device base address,"
10784 " aborting\n");
10785 rc = -ENODEV;
10786 goto err_out_disable;
10787 }
10788
10789 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
10790 printk(KERN_ERR PFX "Cannot find second PCI device"
10791 " base address, aborting\n");
10792 rc = -ENODEV;
10793 goto err_out_disable;
10794 }
10795
34f80b04
EG
10796 if (atomic_read(&pdev->enable_cnt) == 1) {
10797 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
10798 if (rc) {
10799 printk(KERN_ERR PFX "Cannot obtain PCI resources,"
10800 " aborting\n");
10801 goto err_out_disable;
10802 }
a2fbb9ea 10803
34f80b04
EG
10804 pci_set_master(pdev);
10805 pci_save_state(pdev);
10806 }
a2fbb9ea
ET
10807
10808 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
10809 if (bp->pm_cap == 0) {
10810 printk(KERN_ERR PFX "Cannot find power management"
10811 " capability, aborting\n");
10812 rc = -EIO;
10813 goto err_out_release;
10814 }
10815
10816 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
10817 if (bp->pcie_cap == 0) {
10818 printk(KERN_ERR PFX "Cannot find PCI Express capability,"
10819 " aborting\n");
10820 rc = -EIO;
10821 goto err_out_release;
10822 }
10823
10824 if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0) {
10825 bp->flags |= USING_DAC_FLAG;
10826 if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) != 0) {
10827 printk(KERN_ERR PFX "pci_set_consistent_dma_mask"
10828 " failed, aborting\n");
10829 rc = -EIO;
10830 goto err_out_release;
10831 }
10832
10833 } else if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0) {
10834 printk(KERN_ERR PFX "System does not support DMA,"
10835 " aborting\n");
10836 rc = -EIO;
10837 goto err_out_release;
10838 }
10839
34f80b04
EG
10840 dev->mem_start = pci_resource_start(pdev, 0);
10841 dev->base_addr = dev->mem_start;
10842 dev->mem_end = pci_resource_end(pdev, 0);
a2fbb9ea
ET
10843
10844 dev->irq = pdev->irq;
10845
275f165f 10846 bp->regview = pci_ioremap_bar(pdev, 0);
a2fbb9ea
ET
10847 if (!bp->regview) {
10848 printk(KERN_ERR PFX "Cannot map register space, aborting\n");
10849 rc = -ENOMEM;
10850 goto err_out_release;
10851 }
10852
34f80b04
EG
10853 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
10854 min_t(u64, BNX2X_DB_SIZE,
10855 pci_resource_len(pdev, 2)));
a2fbb9ea
ET
10856 if (!bp->doorbells) {
10857 printk(KERN_ERR PFX "Cannot map doorbell space, aborting\n");
10858 rc = -ENOMEM;
10859 goto err_out_unmap;
10860 }
10861
10862 bnx2x_set_power_state(bp, PCI_D0);
10863
34f80b04
EG
10864 /* clean indirect addresses */
10865 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
10866 PCICFG_VENDOR_ID_OFFSET);
10867 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
10868 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
10869 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
10870 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
a2fbb9ea 10871
34f80b04 10872 dev->watchdog_timeo = TX_TIMEOUT;
a2fbb9ea 10873
c64213cd 10874 dev->netdev_ops = &bnx2x_netdev_ops;
34f80b04 10875 dev->ethtool_ops = &bnx2x_ethtool_ops;
34f80b04
EG
10876 dev->features |= NETIF_F_SG;
10877 dev->features |= NETIF_F_HW_CSUM;
10878 if (bp->flags & USING_DAC_FLAG)
10879 dev->features |= NETIF_F_HIGHDMA;
10880#ifdef BCM_VLAN
10881 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
0c6671b0 10882 bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
34f80b04
EG
10883#endif
10884 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb 10885 dev->features |= NETIF_F_TSO6;
a2fbb9ea
ET
10886
10887 return 0;
10888
10889err_out_unmap:
10890 if (bp->regview) {
10891 iounmap(bp->regview);
10892 bp->regview = NULL;
10893 }
a2fbb9ea
ET
10894 if (bp->doorbells) {
10895 iounmap(bp->doorbells);
10896 bp->doorbells = NULL;
10897 }
10898
10899err_out_release:
34f80b04
EG
10900 if (atomic_read(&pdev->enable_cnt) == 1)
10901 pci_release_regions(pdev);
a2fbb9ea
ET
10902
10903err_out_disable:
10904 pci_disable_device(pdev);
10905 pci_set_drvdata(pdev, NULL);
10906
10907err_out:
10908 return rc;
10909}
10910
25047950
ET
10911static int __devinit bnx2x_get_pcie_width(struct bnx2x *bp)
10912{
10913 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
10914
10915 val = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
10916 return val;
10917}
10918
10919/* return value of 1=2.5GHz 2=5GHz */
10920static int __devinit bnx2x_get_pcie_speed(struct bnx2x *bp)
10921{
10922 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
10923
10924 val = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
10925 return val;
10926}
10927
a2fbb9ea
ET
10928static int __devinit bnx2x_init_one(struct pci_dev *pdev,
10929 const struct pci_device_id *ent)
10930{
10931 static int version_printed;
10932 struct net_device *dev = NULL;
10933 struct bnx2x *bp;
25047950 10934 int rc;
a2fbb9ea
ET
10935
10936 if (version_printed++ == 0)
10937 printk(KERN_INFO "%s", version);
10938
10939 /* dev zeroed in init_etherdev */
555f6c78 10940 dev = alloc_etherdev_mq(sizeof(*bp), MAX_CONTEXT);
34f80b04
EG
10941 if (!dev) {
10942 printk(KERN_ERR PFX "Cannot allocate net device\n");
a2fbb9ea 10943 return -ENOMEM;
34f80b04 10944 }
a2fbb9ea 10945
a2fbb9ea
ET
10946 bp = netdev_priv(dev);
10947 bp->msglevel = debug;
10948
34f80b04 10949 rc = bnx2x_init_dev(pdev, dev);
a2fbb9ea
ET
10950 if (rc < 0) {
10951 free_netdev(dev);
10952 return rc;
10953 }
10954
a2fbb9ea
ET
10955 pci_set_drvdata(pdev, dev);
10956
34f80b04 10957 rc = bnx2x_init_bp(bp);
693fc0d1
EG
10958 if (rc)
10959 goto init_one_exit;
10960
10961 rc = register_netdev(dev);
34f80b04 10962 if (rc) {
693fc0d1 10963 dev_err(&pdev->dev, "Cannot register net device\n");
34f80b04
EG
10964 goto init_one_exit;
10965 }
10966
25047950 10967 printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
87942b46 10968 " IRQ %d, ", dev->name, board_info[ent->driver_data].name,
34f80b04 10969 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
25047950
ET
10970 bnx2x_get_pcie_width(bp),
10971 (bnx2x_get_pcie_speed(bp) == 2) ? "5GHz (Gen2)" : "2.5GHz",
10972 dev->base_addr, bp->pdev->irq);
e174961c 10973 printk(KERN_CONT "node addr %pM\n", dev->dev_addr);
a2fbb9ea 10974 return 0;
34f80b04
EG
10975
10976init_one_exit:
10977 if (bp->regview)
10978 iounmap(bp->regview);
10979
10980 if (bp->doorbells)
10981 iounmap(bp->doorbells);
10982
10983 free_netdev(dev);
10984
10985 if (atomic_read(&pdev->enable_cnt) == 1)
10986 pci_release_regions(pdev);
10987
10988 pci_disable_device(pdev);
10989 pci_set_drvdata(pdev, NULL);
10990
10991 return rc;
a2fbb9ea
ET
10992}
10993
10994static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
10995{
10996 struct net_device *dev = pci_get_drvdata(pdev);
228241eb
ET
10997 struct bnx2x *bp;
10998
10999 if (!dev) {
228241eb
ET
11000 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
11001 return;
11002 }
228241eb 11003 bp = netdev_priv(dev);
a2fbb9ea 11004
a2fbb9ea
ET
11005 unregister_netdev(dev);
11006
11007 if (bp->regview)
11008 iounmap(bp->regview);
11009
11010 if (bp->doorbells)
11011 iounmap(bp->doorbells);
11012
11013 free_netdev(dev);
34f80b04
EG
11014
11015 if (atomic_read(&pdev->enable_cnt) == 1)
11016 pci_release_regions(pdev);
11017
a2fbb9ea
ET
11018 pci_disable_device(pdev);
11019 pci_set_drvdata(pdev, NULL);
11020}
11021
11022static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
11023{
11024 struct net_device *dev = pci_get_drvdata(pdev);
228241eb
ET
11025 struct bnx2x *bp;
11026
34f80b04
EG
11027 if (!dev) {
11028 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
11029 return -ENODEV;
11030 }
11031 bp = netdev_priv(dev);
a2fbb9ea 11032
34f80b04 11033 rtnl_lock();
a2fbb9ea 11034
34f80b04 11035 pci_save_state(pdev);
228241eb 11036
34f80b04
EG
11037 if (!netif_running(dev)) {
11038 rtnl_unlock();
11039 return 0;
11040 }
a2fbb9ea
ET
11041
11042 netif_device_detach(dev);
a2fbb9ea 11043
da5a662a 11044 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
34f80b04 11045
a2fbb9ea 11046 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
228241eb 11047
34f80b04
EG
11048 rtnl_unlock();
11049
a2fbb9ea
ET
11050 return 0;
11051}
11052
11053static int bnx2x_resume(struct pci_dev *pdev)
11054{
11055 struct net_device *dev = pci_get_drvdata(pdev);
228241eb 11056 struct bnx2x *bp;
a2fbb9ea
ET
11057 int rc;
11058
228241eb
ET
11059 if (!dev) {
11060 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
11061 return -ENODEV;
11062 }
228241eb 11063 bp = netdev_priv(dev);
a2fbb9ea 11064
34f80b04
EG
11065 rtnl_lock();
11066
228241eb 11067 pci_restore_state(pdev);
34f80b04
EG
11068
11069 if (!netif_running(dev)) {
11070 rtnl_unlock();
11071 return 0;
11072 }
11073
a2fbb9ea
ET
11074 bnx2x_set_power_state(bp, PCI_D0);
11075 netif_device_attach(dev);
11076
da5a662a 11077 rc = bnx2x_nic_load(bp, LOAD_OPEN);
a2fbb9ea 11078
34f80b04
EG
11079 rtnl_unlock();
11080
11081 return rc;
a2fbb9ea
ET
11082}
11083
f8ef6e44
YG
11084static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
11085{
11086 int i;
11087
11088 bp->state = BNX2X_STATE_ERROR;
11089
11090 bp->rx_mode = BNX2X_RX_MODE_NONE;
11091
11092 bnx2x_netif_stop(bp, 0);
11093
11094 del_timer_sync(&bp->timer);
11095 bp->stats_state = STATS_STATE_DISABLED;
11096 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
11097
11098 /* Release IRQs */
11099 bnx2x_free_irq(bp);
11100
11101 if (CHIP_IS_E1(bp)) {
11102 struct mac_configuration_cmd *config =
11103 bnx2x_sp(bp, mcast_config);
11104
8d9c5f34 11105 for (i = 0; i < config->hdr.length; i++)
f8ef6e44
YG
11106 CAM_INVALIDATE(config->config_table[i]);
11107 }
11108
11109 /* Free SKBs, SGEs, TPA pool and driver internals */
11110 bnx2x_free_skbs(bp);
555f6c78 11111 for_each_rx_queue(bp, i)
f8ef6e44 11112 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
555f6c78 11113 for_each_rx_queue(bp, i)
7cde1c8b 11114 netif_napi_del(&bnx2x_fp(bp, i, napi));
f8ef6e44
YG
11115 bnx2x_free_mem(bp);
11116
11117 bp->state = BNX2X_STATE_CLOSED;
11118
11119 netif_carrier_off(bp->dev);
11120
11121 return 0;
11122}
11123
11124static void bnx2x_eeh_recover(struct bnx2x *bp)
11125{
11126 u32 val;
11127
11128 mutex_init(&bp->port.phy_mutex);
11129
11130 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
11131 bp->link_params.shmem_base = bp->common.shmem_base;
11132 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
11133
11134 if (!bp->common.shmem_base ||
11135 (bp->common.shmem_base < 0xA0000) ||
11136 (bp->common.shmem_base >= 0xC0000)) {
11137 BNX2X_DEV_INFO("MCP not active\n");
11138 bp->flags |= NO_MCP_FLAG;
11139 return;
11140 }
11141
11142 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
11143 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
11144 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
11145 BNX2X_ERR("BAD MCP validity signature\n");
11146
11147 if (!BP_NOMCP(bp)) {
11148 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
11149 & DRV_MSG_SEQ_NUMBER_MASK);
11150 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
11151 }
11152}
11153
493adb1f
WX
11154/**
11155 * bnx2x_io_error_detected - called when PCI error is detected
11156 * @pdev: Pointer to PCI device
11157 * @state: The current pci connection state
11158 *
11159 * This function is called after a PCI bus error affecting
11160 * this device has been detected.
11161 */
11162static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
11163 pci_channel_state_t state)
11164{
11165 struct net_device *dev = pci_get_drvdata(pdev);
11166 struct bnx2x *bp = netdev_priv(dev);
11167
11168 rtnl_lock();
11169
11170 netif_device_detach(dev);
11171
11172 if (netif_running(dev))
f8ef6e44 11173 bnx2x_eeh_nic_unload(bp);
493adb1f
WX
11174
11175 pci_disable_device(pdev);
11176
11177 rtnl_unlock();
11178
11179 /* Request a slot reset */
11180 return PCI_ERS_RESULT_NEED_RESET;
11181}
11182
11183/**
11184 * bnx2x_io_slot_reset - called after the PCI bus has been reset
11185 * @pdev: Pointer to PCI device
11186 *
11187 * Restart the card from scratch, as if from a cold-boot.
11188 */
11189static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
11190{
11191 struct net_device *dev = pci_get_drvdata(pdev);
11192 struct bnx2x *bp = netdev_priv(dev);
11193
11194 rtnl_lock();
11195
11196 if (pci_enable_device(pdev)) {
11197 dev_err(&pdev->dev,
11198 "Cannot re-enable PCI device after reset\n");
11199 rtnl_unlock();
11200 return PCI_ERS_RESULT_DISCONNECT;
11201 }
11202
11203 pci_set_master(pdev);
11204 pci_restore_state(pdev);
11205
11206 if (netif_running(dev))
11207 bnx2x_set_power_state(bp, PCI_D0);
11208
11209 rtnl_unlock();
11210
11211 return PCI_ERS_RESULT_RECOVERED;
11212}
11213
11214/**
11215 * bnx2x_io_resume - called when traffic can start flowing again
11216 * @pdev: Pointer to PCI device
11217 *
11218 * This callback is called when the error recovery driver tells us that
11219 * its OK to resume normal operation.
11220 */
11221static void bnx2x_io_resume(struct pci_dev *pdev)
11222{
11223 struct net_device *dev = pci_get_drvdata(pdev);
11224 struct bnx2x *bp = netdev_priv(dev);
11225
11226 rtnl_lock();
11227
f8ef6e44
YG
11228 bnx2x_eeh_recover(bp);
11229
493adb1f 11230 if (netif_running(dev))
f8ef6e44 11231 bnx2x_nic_load(bp, LOAD_NORMAL);
493adb1f
WX
11232
11233 netif_device_attach(dev);
11234
11235 rtnl_unlock();
11236}
11237
11238static struct pci_error_handlers bnx2x_err_handler = {
11239 .error_detected = bnx2x_io_error_detected,
11240 .slot_reset = bnx2x_io_slot_reset,
11241 .resume = bnx2x_io_resume,
11242};
11243
a2fbb9ea 11244static struct pci_driver bnx2x_pci_driver = {
493adb1f
WX
11245 .name = DRV_MODULE_NAME,
11246 .id_table = bnx2x_pci_tbl,
11247 .probe = bnx2x_init_one,
11248 .remove = __devexit_p(bnx2x_remove_one),
11249 .suspend = bnx2x_suspend,
11250 .resume = bnx2x_resume,
11251 .err_handler = &bnx2x_err_handler,
a2fbb9ea
ET
11252};
11253
11254static int __init bnx2x_init(void)
11255{
1cf167f2
EG
11256 bnx2x_wq = create_singlethread_workqueue("bnx2x");
11257 if (bnx2x_wq == NULL) {
11258 printk(KERN_ERR PFX "Cannot create workqueue\n");
11259 return -ENOMEM;
11260 }
11261
a2fbb9ea
ET
11262 return pci_register_driver(&bnx2x_pci_driver);
11263}
11264
11265static void __exit bnx2x_cleanup(void)
11266{
11267 pci_unregister_driver(&bnx2x_pci_driver);
1cf167f2
EG
11268
11269 destroy_workqueue(bnx2x_wq);
a2fbb9ea
ET
11270}
11271
11272module_init(bnx2x_init);
11273module_exit(bnx2x_cleanup);
11274
This page took 0.831907 seconds and 5 git commands to generate.