bnx2x: GMII not SGMII
[deliverable/linux.git] / drivers / net / bnx2x_main.c
CommitLineData
34f80b04 1/* bnx2x_main.c: Broadcom Everest network driver.
a2fbb9ea 2 *
d05c26ce 3 * Copyright (c) 2007-2009 Broadcom Corporation
a2fbb9ea
ET
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
24e3fcef
EG
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
a2fbb9ea
ET
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath rework by Vladislav Zolotarov
c14423fe 14 * Statistics and Link management by Yitchak Gertner
a2fbb9ea
ET
15 *
16 */
17
a2fbb9ea
ET
18#include <linux/module.h>
19#include <linux/moduleparam.h>
20#include <linux/kernel.h>
21#include <linux/device.h> /* for dev_info() */
22#include <linux/timer.h>
23#include <linux/errno.h>
24#include <linux/ioport.h>
25#include <linux/slab.h>
26#include <linux/vmalloc.h>
27#include <linux/interrupt.h>
28#include <linux/pci.h>
29#include <linux/init.h>
30#include <linux/netdevice.h>
31#include <linux/etherdevice.h>
32#include <linux/skbuff.h>
33#include <linux/dma-mapping.h>
34#include <linux/bitops.h>
35#include <linux/irq.h>
36#include <linux/delay.h>
37#include <asm/byteorder.h>
38#include <linux/time.h>
39#include <linux/ethtool.h>
40#include <linux/mii.h>
0c6671b0 41#include <linux/if_vlan.h>
a2fbb9ea
ET
42#include <net/ip.h>
43#include <net/tcp.h>
44#include <net/checksum.h>
34f80b04 45#include <net/ip6_checksum.h>
a2fbb9ea
ET
46#include <linux/workqueue.h>
47#include <linux/crc32.h>
34f80b04 48#include <linux/crc32c.h>
a2fbb9ea
ET
49#include <linux/prefetch.h>
50#include <linux/zlib.h>
a2fbb9ea
ET
51#include <linux/io.h>
52
53#include "bnx2x_reg.h"
54#include "bnx2x_fw_defs.h"
55#include "bnx2x_hsi.h"
c18487ee 56#include "bnx2x_link.h"
a2fbb9ea
ET
57#include "bnx2x.h"
58#include "bnx2x_init.h"
59
e8b5fc51
VZ
60#define DRV_MODULE_VERSION "1.45.26"
61#define DRV_MODULE_RELDATE "2009/01/26"
34f80b04 62#define BNX2X_BC_VER 0x040200
a2fbb9ea 63
34f80b04
EG
64/* Time in jiffies before concluding the transmitter is hung */
65#define TX_TIMEOUT (5*HZ)
a2fbb9ea 66
53a10565 67static char version[] __devinitdata =
34f80b04 68 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
a2fbb9ea
ET
69 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
70
24e3fcef 71MODULE_AUTHOR("Eliezer Tamir");
e47d7e6e 72MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
a2fbb9ea
ET
73MODULE_LICENSE("GPL");
74MODULE_VERSION(DRV_MODULE_VERSION);
a2fbb9ea 75
555f6c78
EG
76static int multi_mode = 1;
77module_param(multi_mode, int, 0);
78
19680c48 79static int disable_tpa;
a2fbb9ea 80static int poll;
a2fbb9ea 81static int debug;
34f80b04 82static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
a2fbb9ea 83
19680c48 84module_param(disable_tpa, int, 0);
8badd27a
EG
85
86static int int_mode;
87module_param(int_mode, int, 0);
88MODULE_PARM_DESC(int_mode, " Force interrupt mode (1 INT#x; 2 MSI)");
89
a2fbb9ea 90module_param(poll, int, 0);
a2fbb9ea 91module_param(debug, int, 0);
19680c48 92MODULE_PARM_DESC(disable_tpa, "disable the TPA (LRO) feature");
a2fbb9ea 93MODULE_PARM_DESC(poll, "use polling (for debug)");
c14423fe 94MODULE_PARM_DESC(debug, "default debug msglevel");
a2fbb9ea 95
1cf167f2 96static struct workqueue_struct *bnx2x_wq;
a2fbb9ea
ET
97
98enum bnx2x_board_type {
99 BCM57710 = 0,
34f80b04
EG
100 BCM57711 = 1,
101 BCM57711E = 2,
a2fbb9ea
ET
102};
103
34f80b04 104/* indexed by board_type, above */
53a10565 105static struct {
a2fbb9ea
ET
106 char *name;
107} board_info[] __devinitdata = {
34f80b04
EG
108 { "Broadcom NetXtreme II BCM57710 XGb" },
109 { "Broadcom NetXtreme II BCM57711 XGb" },
110 { "Broadcom NetXtreme II BCM57711E XGb" }
a2fbb9ea
ET
111};
112
34f80b04 113
a2fbb9ea
ET
114static const struct pci_device_id bnx2x_pci_tbl[] = {
115 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57710,
116 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57710 },
34f80b04
EG
117 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711,
118 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711 },
119 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711E,
120 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711E },
a2fbb9ea
ET
121 { 0 }
122};
123
124MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
125
126/****************************************************************************
127* General service functions
128****************************************************************************/
129
130/* used only at init
131 * locking is done by mcp
132 */
133static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
134{
135 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
136 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
137 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
138 PCICFG_VENDOR_ID_OFFSET);
139}
140
a2fbb9ea
ET
141static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
142{
143 u32 val;
144
145 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
146 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
147 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
148 PCICFG_VENDOR_ID_OFFSET);
149
150 return val;
151}
a2fbb9ea
ET
152
153static const u32 dmae_reg_go_c[] = {
154 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
155 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
156 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
157 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
158};
159
160/* copy command into DMAE command memory and set DMAE command go */
161static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
162 int idx)
163{
164 u32 cmd_offset;
165 int i;
166
167 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
168 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
169 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
170
ad8d3948
EG
171 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
172 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
a2fbb9ea
ET
173 }
174 REG_WR(bp, dmae_reg_go_c[idx], 1);
175}
176
ad8d3948
EG
177void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
178 u32 len32)
a2fbb9ea 179{
ad8d3948 180 struct dmae_command *dmae = &bp->init_dmae;
a2fbb9ea 181 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
ad8d3948
EG
182 int cnt = 200;
183
184 if (!bp->dmae_ready) {
185 u32 *data = bnx2x_sp(bp, wb_data[0]);
186
187 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
188 " using indirect\n", dst_addr, len32);
189 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
190 return;
191 }
192
193 mutex_lock(&bp->dmae_mutex);
a2fbb9ea
ET
194
195 memset(dmae, 0, sizeof(struct dmae_command));
196
197 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
198 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
199 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
200#ifdef __BIG_ENDIAN
201 DMAE_CMD_ENDIANITY_B_DW_SWAP |
202#else
203 DMAE_CMD_ENDIANITY_DW_SWAP |
204#endif
34f80b04
EG
205 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
206 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea
ET
207 dmae->src_addr_lo = U64_LO(dma_addr);
208 dmae->src_addr_hi = U64_HI(dma_addr);
209 dmae->dst_addr_lo = dst_addr >> 2;
210 dmae->dst_addr_hi = 0;
211 dmae->len = len32;
212 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
213 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
ad8d3948 214 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 215
ad8d3948 216 DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
a2fbb9ea
ET
217 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
218 "dst_addr [%x:%08x (%08x)]\n"
219 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
220 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
221 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, dst_addr,
222 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
ad8d3948 223 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
a2fbb9ea
ET
224 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
225 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
a2fbb9ea
ET
226
227 *wb_comp = 0;
228
34f80b04 229 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
a2fbb9ea
ET
230
231 udelay(5);
ad8d3948
EG
232
233 while (*wb_comp != DMAE_COMP_VAL) {
234 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
235
ad8d3948 236 if (!cnt) {
a2fbb9ea
ET
237 BNX2X_ERR("dmae timeout!\n");
238 break;
239 }
ad8d3948 240 cnt--;
12469401
YG
241 /* adjust delay for emulation/FPGA */
242 if (CHIP_REV_IS_SLOW(bp))
243 msleep(100);
244 else
245 udelay(5);
a2fbb9ea 246 }
ad8d3948
EG
247
248 mutex_unlock(&bp->dmae_mutex);
a2fbb9ea
ET
249}
250
c18487ee 251void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
a2fbb9ea 252{
ad8d3948 253 struct dmae_command *dmae = &bp->init_dmae;
a2fbb9ea 254 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
ad8d3948
EG
255 int cnt = 200;
256
257 if (!bp->dmae_ready) {
258 u32 *data = bnx2x_sp(bp, wb_data[0]);
259 int i;
260
261 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
262 " using indirect\n", src_addr, len32);
263 for (i = 0; i < len32; i++)
264 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
265 return;
266 }
267
268 mutex_lock(&bp->dmae_mutex);
a2fbb9ea
ET
269
270 memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
271 memset(dmae, 0, sizeof(struct dmae_command));
272
273 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
274 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
275 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
276#ifdef __BIG_ENDIAN
277 DMAE_CMD_ENDIANITY_B_DW_SWAP |
278#else
279 DMAE_CMD_ENDIANITY_DW_SWAP |
280#endif
34f80b04
EG
281 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
282 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea
ET
283 dmae->src_addr_lo = src_addr >> 2;
284 dmae->src_addr_hi = 0;
285 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
286 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
287 dmae->len = len32;
288 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
289 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
ad8d3948 290 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 291
ad8d3948 292 DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
a2fbb9ea
ET
293 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
294 "dst_addr [%x:%08x (%08x)]\n"
295 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
296 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
297 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, src_addr,
298 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
a2fbb9ea
ET
299
300 *wb_comp = 0;
301
34f80b04 302 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
a2fbb9ea
ET
303
304 udelay(5);
ad8d3948
EG
305
306 while (*wb_comp != DMAE_COMP_VAL) {
307
ad8d3948 308 if (!cnt) {
a2fbb9ea
ET
309 BNX2X_ERR("dmae timeout!\n");
310 break;
311 }
ad8d3948 312 cnt--;
12469401
YG
313 /* adjust delay for emulation/FPGA */
314 if (CHIP_REV_IS_SLOW(bp))
315 msleep(100);
316 else
317 udelay(5);
a2fbb9ea 318 }
ad8d3948 319 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
a2fbb9ea
ET
320 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
321 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
ad8d3948
EG
322
323 mutex_unlock(&bp->dmae_mutex);
324}
325
326/* used only for slowpath so not inlined */
327static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
328{
329 u32 wb_write[2];
330
331 wb_write[0] = val_hi;
332 wb_write[1] = val_lo;
333 REG_WR_DMAE(bp, reg, wb_write, 2);
a2fbb9ea 334}
a2fbb9ea 335
ad8d3948
EG
336#ifdef USE_WB_RD
337static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
338{
339 u32 wb_data[2];
340
341 REG_RD_DMAE(bp, reg, wb_data, 2);
342
343 return HILO_U64(wb_data[0], wb_data[1]);
344}
345#endif
346
a2fbb9ea
ET
347static int bnx2x_mc_assert(struct bnx2x *bp)
348{
a2fbb9ea 349 char last_idx;
34f80b04
EG
350 int i, rc = 0;
351 u32 row0, row1, row2, row3;
352
353 /* XSTORM */
354 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
355 XSTORM_ASSERT_LIST_INDEX_OFFSET);
356 if (last_idx)
357 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
358
359 /* print the asserts */
360 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
361
362 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
363 XSTORM_ASSERT_LIST_OFFSET(i));
364 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
365 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
366 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
367 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
368 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
369 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
370
371 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
372 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
373 " 0x%08x 0x%08x 0x%08x\n",
374 i, row3, row2, row1, row0);
375 rc++;
376 } else {
377 break;
378 }
379 }
380
381 /* TSTORM */
382 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
383 TSTORM_ASSERT_LIST_INDEX_OFFSET);
384 if (last_idx)
385 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
386
387 /* print the asserts */
388 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
389
390 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
391 TSTORM_ASSERT_LIST_OFFSET(i));
392 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
393 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
394 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
395 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
396 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
397 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
398
399 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
400 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
401 " 0x%08x 0x%08x 0x%08x\n",
402 i, row3, row2, row1, row0);
403 rc++;
404 } else {
405 break;
406 }
407 }
408
409 /* CSTORM */
410 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
411 CSTORM_ASSERT_LIST_INDEX_OFFSET);
412 if (last_idx)
413 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
414
415 /* print the asserts */
416 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
417
418 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
419 CSTORM_ASSERT_LIST_OFFSET(i));
420 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
421 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
422 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
423 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
424 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
425 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
426
427 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
428 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
429 " 0x%08x 0x%08x 0x%08x\n",
430 i, row3, row2, row1, row0);
431 rc++;
432 } else {
433 break;
434 }
435 }
436
437 /* USTORM */
438 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
439 USTORM_ASSERT_LIST_INDEX_OFFSET);
440 if (last_idx)
441 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
442
443 /* print the asserts */
444 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
445
446 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
447 USTORM_ASSERT_LIST_OFFSET(i));
448 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
449 USTORM_ASSERT_LIST_OFFSET(i) + 4);
450 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
451 USTORM_ASSERT_LIST_OFFSET(i) + 8);
452 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
453 USTORM_ASSERT_LIST_OFFSET(i) + 12);
454
455 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
456 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
457 " 0x%08x 0x%08x 0x%08x\n",
458 i, row3, row2, row1, row0);
459 rc++;
460 } else {
461 break;
a2fbb9ea
ET
462 }
463 }
34f80b04 464
a2fbb9ea
ET
465 return rc;
466}
c14423fe 467
a2fbb9ea
ET
468static void bnx2x_fw_dump(struct bnx2x *bp)
469{
470 u32 mark, offset;
471 u32 data[9];
472 int word;
473
474 mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
49d66772
ET
475 mark = ((mark + 0x3) & ~0x3);
476 printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n" KERN_ERR, mark);
a2fbb9ea
ET
477
478 for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
479 for (word = 0; word < 8; word++)
480 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
481 offset + 4*word));
482 data[8] = 0x0;
49d66772 483 printk(KERN_CONT "%s", (char *)data);
a2fbb9ea
ET
484 }
485 for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
486 for (word = 0; word < 8; word++)
487 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
488 offset + 4*word));
489 data[8] = 0x0;
49d66772 490 printk(KERN_CONT "%s", (char *)data);
a2fbb9ea
ET
491 }
492 printk("\n" KERN_ERR PFX "end of fw dump\n");
493}
494
495static void bnx2x_panic_dump(struct bnx2x *bp)
496{
497 int i;
498 u16 j, start, end;
499
66e855f3
YG
500 bp->stats_state = STATS_STATE_DISABLED;
501 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
502
a2fbb9ea
ET
503 BNX2X_ERR("begin crash dump -----------------\n");
504
505 for_each_queue(bp, i) {
506 struct bnx2x_fastpath *fp = &bp->fp[i];
507 struct eth_tx_db_data *hw_prods = fp->hw_tx_prods;
508
509 BNX2X_ERR("queue[%d]: tx_pkt_prod(%x) tx_pkt_cons(%x)"
34f80b04 510 " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)\n",
a2fbb9ea 511 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
34f80b04 512 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
66e855f3
YG
513 BNX2X_ERR(" rx_bd_prod(%x) rx_bd_cons(%x)"
514 " *rx_bd_cons_sb(%x) rx_comp_prod(%x)"
515 " rx_comp_cons(%x) *rx_cons_sb(%x)\n",
516 fp->rx_bd_prod, fp->rx_bd_cons,
517 le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
518 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
519 BNX2X_ERR(" rx_sge_prod(%x) last_max_sge(%x)"
520 " fp_c_idx(%x) *sb_c_idx(%x) fp_u_idx(%x)"
521 " *sb_u_idx(%x) bd data(%x,%x)\n",
522 fp->rx_sge_prod, fp->last_max_sge, fp->fp_c_idx,
523 fp->status_blk->c_status_block.status_block_index,
524 fp->fp_u_idx,
525 fp->status_blk->u_status_block.status_block_index,
526 hw_prods->packets_prod, hw_prods->bds_prod);
a2fbb9ea
ET
527
528 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
529 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
530 for (j = start; j < end; j++) {
531 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
532
533 BNX2X_ERR("packet[%x]=[%p,%x]\n", j,
534 sw_bd->skb, sw_bd->first_bd);
535 }
536
537 start = TX_BD(fp->tx_bd_cons - 10);
538 end = TX_BD(fp->tx_bd_cons + 254);
539 for (j = start; j < end; j++) {
540 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
541
542 BNX2X_ERR("tx_bd[%x]=[%x:%x:%x:%x]\n",
543 j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
544 }
545
546 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
547 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
548 for (j = start; j < end; j++) {
549 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
550 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
551
552 BNX2X_ERR("rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
34f80b04 553 j, rx_bd[1], rx_bd[0], sw_bd->skb);
a2fbb9ea
ET
554 }
555
3196a88a
EG
556 start = RX_SGE(fp->rx_sge_prod);
557 end = RX_SGE(fp->last_max_sge);
7a9b2557
VZ
558 for (j = start; j < end; j++) {
559 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
560 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
561
562 BNX2X_ERR("rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
563 j, rx_sge[1], rx_sge[0], sw_page->page);
564 }
565
a2fbb9ea
ET
566 start = RCQ_BD(fp->rx_comp_cons - 10);
567 end = RCQ_BD(fp->rx_comp_cons + 503);
568 for (j = start; j < end; j++) {
569 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
570
571 BNX2X_ERR("cqe[%x]=[%x:%x:%x:%x]\n",
572 j, cqe[0], cqe[1], cqe[2], cqe[3]);
573 }
574 }
575
49d66772
ET
576 BNX2X_ERR("def_c_idx(%u) def_u_idx(%u) def_x_idx(%u)"
577 " def_t_idx(%u) def_att_idx(%u) attn_state(%u)"
a2fbb9ea 578 " spq_prod_idx(%u)\n",
49d66772 579 bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
a2fbb9ea
ET
580 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
581
34f80b04 582 bnx2x_fw_dump(bp);
a2fbb9ea
ET
583 bnx2x_mc_assert(bp);
584 BNX2X_ERR("end crash dump -----------------\n");
a2fbb9ea
ET
585}
586
615f8fd9 587static void bnx2x_int_enable(struct bnx2x *bp)
a2fbb9ea 588{
34f80b04 589 int port = BP_PORT(bp);
a2fbb9ea
ET
590 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
591 u32 val = REG_RD(bp, addr);
592 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
8badd27a 593 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
a2fbb9ea
ET
594
595 if (msix) {
8badd27a
EG
596 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
597 HC_CONFIG_0_REG_INT_LINE_EN_0);
a2fbb9ea
ET
598 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
599 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
8badd27a
EG
600 } else if (msi) {
601 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
602 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
603 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
604 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
a2fbb9ea
ET
605 } else {
606 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
615f8fd9 607 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
a2fbb9ea
ET
608 HC_CONFIG_0_REG_INT_LINE_EN_0 |
609 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
615f8fd9 610
8badd27a
EG
611 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
612 val, port, addr);
615f8fd9
ET
613
614 REG_WR(bp, addr, val);
615
a2fbb9ea
ET
616 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
617 }
618
8badd27a
EG
619 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n",
620 val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
a2fbb9ea
ET
621
622 REG_WR(bp, addr, val);
34f80b04
EG
623
624 if (CHIP_IS_E1H(bp)) {
625 /* init leading/trailing edge */
626 if (IS_E1HMF(bp)) {
8badd27a 627 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
34f80b04 628 if (bp->port.pmf)
4acac6a5
EG
629 /* enable nig and gpio3 attention */
630 val |= 0x1100;
34f80b04
EG
631 } else
632 val = 0xffff;
633
634 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
635 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
636 }
a2fbb9ea
ET
637}
638
615f8fd9 639static void bnx2x_int_disable(struct bnx2x *bp)
a2fbb9ea 640{
34f80b04 641 int port = BP_PORT(bp);
a2fbb9ea
ET
642 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
643 u32 val = REG_RD(bp, addr);
644
645 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
646 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
647 HC_CONFIG_0_REG_INT_LINE_EN_0 |
648 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
649
650 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
651 val, port, addr);
652
8badd27a
EG
653 /* flush all outstanding writes */
654 mmiowb();
655
a2fbb9ea
ET
656 REG_WR(bp, addr, val);
657 if (REG_RD(bp, addr) != val)
658 BNX2X_ERR("BUG! proper val not read from IGU!\n");
659}
660
f8ef6e44 661static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
a2fbb9ea 662{
a2fbb9ea 663 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
8badd27a 664 int i, offset;
a2fbb9ea 665
34f80b04 666 /* disable interrupt handling */
a2fbb9ea 667 atomic_inc(&bp->intr_sem);
f8ef6e44
YG
668 if (disable_hw)
669 /* prevent the HW from sending interrupts */
670 bnx2x_int_disable(bp);
a2fbb9ea
ET
671
672 /* make sure all ISRs are done */
673 if (msix) {
8badd27a
EG
674 synchronize_irq(bp->msix_table[0].vector);
675 offset = 1;
a2fbb9ea 676 for_each_queue(bp, i)
8badd27a 677 synchronize_irq(bp->msix_table[i + offset].vector);
a2fbb9ea
ET
678 } else
679 synchronize_irq(bp->pdev->irq);
680
681 /* make sure sp_task is not running */
1cf167f2
EG
682 cancel_delayed_work(&bp->sp_task);
683 flush_workqueue(bnx2x_wq);
a2fbb9ea
ET
684}
685
34f80b04 686/* fast path */
a2fbb9ea
ET
687
688/*
34f80b04 689 * General service functions
a2fbb9ea
ET
690 */
691
34f80b04 692static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
a2fbb9ea
ET
693 u8 storm, u16 index, u8 op, u8 update)
694{
5c862848
EG
695 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
696 COMMAND_REG_INT_ACK);
a2fbb9ea
ET
697 struct igu_ack_register igu_ack;
698
699 igu_ack.status_block_index = index;
700 igu_ack.sb_id_and_flags =
34f80b04 701 ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
a2fbb9ea
ET
702 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
703 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
704 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
705
5c862848
EG
706 DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
707 (*(u32 *)&igu_ack), hc_addr);
708 REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
a2fbb9ea
ET
709}
710
711static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
712{
713 struct host_status_block *fpsb = fp->status_blk;
714 u16 rc = 0;
715
716 barrier(); /* status block is written to by the chip */
717 if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
718 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
719 rc |= 1;
720 }
721 if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
722 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
723 rc |= 2;
724 }
725 return rc;
726}
727
a2fbb9ea
ET
728static u16 bnx2x_ack_int(struct bnx2x *bp)
729{
5c862848
EG
730 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
731 COMMAND_REG_SIMD_MASK);
732 u32 result = REG_RD(bp, hc_addr);
a2fbb9ea 733
5c862848
EG
734 DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
735 result, hc_addr);
a2fbb9ea 736
a2fbb9ea
ET
737 return result;
738}
739
740
741/*
742 * fast path service functions
743 */
744
237907c1
EG
745static inline int bnx2x_has_tx_work(struct bnx2x_fastpath *fp)
746{
747 u16 tx_cons_sb;
748
749 /* Tell compiler that status block fields can change */
750 barrier();
751 tx_cons_sb = le16_to_cpu(*fp->tx_cons_sb);
e8b5fc51
VZ
752 return (fp->tx_pkt_cons != tx_cons_sb);
753}
754
755static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp)
756{
757 /* Tell compiler that consumer and producer can change */
758 barrier();
759 return (fp->tx_pkt_prod != fp->tx_pkt_cons);
760
237907c1
EG
761}
762
a2fbb9ea
ET
763/* free skb in the packet ring at pos idx
764 * return idx of last bd freed
765 */
766static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
767 u16 idx)
768{
769 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
770 struct eth_tx_bd *tx_bd;
771 struct sk_buff *skb = tx_buf->skb;
34f80b04 772 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
a2fbb9ea
ET
773 int nbd;
774
775 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
776 idx, tx_buf, skb);
777
778 /* unmap first bd */
779 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
780 tx_bd = &fp->tx_desc_ring[bd_idx];
781 pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_bd),
782 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
783
784 nbd = le16_to_cpu(tx_bd->nbd) - 1;
34f80b04 785 new_cons = nbd + tx_buf->first_bd;
a2fbb9ea
ET
786#ifdef BNX2X_STOP_ON_ERROR
787 if (nbd > (MAX_SKB_FRAGS + 2)) {
34f80b04 788 BNX2X_ERR("BAD nbd!\n");
a2fbb9ea
ET
789 bnx2x_panic();
790 }
791#endif
792
793 /* Skip a parse bd and the TSO split header bd
794 since they have no mapping */
795 if (nbd)
796 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
797
798 if (tx_bd->bd_flags.as_bitfield & (ETH_TX_BD_FLAGS_IP_CSUM |
799 ETH_TX_BD_FLAGS_TCP_CSUM |
800 ETH_TX_BD_FLAGS_SW_LSO)) {
801 if (--nbd)
802 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
803 tx_bd = &fp->tx_desc_ring[bd_idx];
804 /* is this a TSO split header bd? */
805 if (tx_bd->bd_flags.as_bitfield & ETH_TX_BD_FLAGS_SW_LSO) {
806 if (--nbd)
807 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
808 }
809 }
810
811 /* now free frags */
812 while (nbd > 0) {
813
814 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
815 tx_bd = &fp->tx_desc_ring[bd_idx];
816 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_bd),
817 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
818 if (--nbd)
819 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
820 }
821
822 /* release skb */
53e5e96e 823 WARN_ON(!skb);
a2fbb9ea
ET
824 dev_kfree_skb(skb);
825 tx_buf->first_bd = 0;
826 tx_buf->skb = NULL;
827
34f80b04 828 return new_cons;
a2fbb9ea
ET
829}
830
34f80b04 831static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
a2fbb9ea 832{
34f80b04
EG
833 s16 used;
834 u16 prod;
835 u16 cons;
a2fbb9ea 836
34f80b04 837 barrier(); /* Tell compiler that prod and cons can change */
a2fbb9ea
ET
838 prod = fp->tx_bd_prod;
839 cons = fp->tx_bd_cons;
840
34f80b04
EG
841 /* NUM_TX_RINGS = number of "next-page" entries
842 It will be used as a threshold */
843 used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
a2fbb9ea 844
34f80b04 845#ifdef BNX2X_STOP_ON_ERROR
53e5e96e
IJ
846 WARN_ON(used < 0);
847 WARN_ON(used > fp->bp->tx_ring_size);
848 WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
34f80b04 849#endif
a2fbb9ea 850
34f80b04 851 return (s16)(fp->bp->tx_ring_size) - used;
a2fbb9ea
ET
852}
853
854static void bnx2x_tx_int(struct bnx2x_fastpath *fp, int work)
855{
856 struct bnx2x *bp = fp->bp;
555f6c78 857 struct netdev_queue *txq;
a2fbb9ea
ET
858 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
859 int done = 0;
860
861#ifdef BNX2X_STOP_ON_ERROR
862 if (unlikely(bp->panic))
863 return;
864#endif
865
555f6c78 866 txq = netdev_get_tx_queue(bp->dev, fp->index);
a2fbb9ea
ET
867 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
868 sw_cons = fp->tx_pkt_cons;
869
870 while (sw_cons != hw_cons) {
871 u16 pkt_cons;
872
873 pkt_cons = TX_BD(sw_cons);
874
875 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
876
34f80b04 877 DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %u\n",
a2fbb9ea
ET
878 hw_cons, sw_cons, pkt_cons);
879
34f80b04 880/* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
a2fbb9ea
ET
881 rmb();
882 prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
883 }
884*/
885 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
886 sw_cons++;
887 done++;
888
889 if (done == work)
890 break;
891 }
892
893 fp->tx_pkt_cons = sw_cons;
894 fp->tx_bd_cons = bd_cons;
895
555f6c78
EG
896 /* Need to make the tx_bd_cons update visible to start_xmit()
897 * before checking for netif_tx_queue_stopped(). Without the
a2fbb9ea
ET
898 * memory barrier, there is a small possibility that start_xmit()
899 * will miss it and cause the queue to be stopped forever.
900 */
901 smp_mb();
902
903 /* TBD need a thresh? */
555f6c78 904 if (unlikely(netif_tx_queue_stopped(txq))) {
a2fbb9ea 905
555f6c78 906 __netif_tx_lock(txq, smp_processor_id());
a2fbb9ea 907
555f6c78 908 if ((netif_tx_queue_stopped(txq)) &&
da5a662a 909 (bp->state == BNX2X_STATE_OPEN) &&
a2fbb9ea 910 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
555f6c78 911 netif_tx_wake_queue(txq);
a2fbb9ea 912
555f6c78 913 __netif_tx_unlock(txq);
a2fbb9ea
ET
914 }
915}
916
3196a88a 917
a2fbb9ea
ET
918static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
919 union eth_rx_cqe *rr_cqe)
920{
921 struct bnx2x *bp = fp->bp;
922 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
923 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
924
34f80b04 925 DP(BNX2X_MSG_SP,
a2fbb9ea 926 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
34f80b04
EG
927 FP_IDX(fp), cid, command, bp->state,
928 rr_cqe->ramrod_cqe.ramrod_type);
a2fbb9ea
ET
929
930 bp->spq_left++;
931
34f80b04 932 if (FP_IDX(fp)) {
a2fbb9ea
ET
933 switch (command | fp->state) {
934 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
935 BNX2X_FP_STATE_OPENING):
936 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
937 cid);
938 fp->state = BNX2X_FP_STATE_OPEN;
939 break;
940
941 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
942 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
943 cid);
944 fp->state = BNX2X_FP_STATE_HALTED;
945 break;
946
947 default:
34f80b04
EG
948 BNX2X_ERR("unexpected MC reply (%d) "
949 "fp->state is %x\n", command, fp->state);
950 break;
a2fbb9ea 951 }
34f80b04 952 mb(); /* force bnx2x_wait_ramrod() to see the change */
a2fbb9ea
ET
953 return;
954 }
c14423fe 955
a2fbb9ea
ET
956 switch (command | bp->state) {
957 case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
958 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
959 bp->state = BNX2X_STATE_OPEN;
960 break;
961
962 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
963 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
964 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
965 fp->state = BNX2X_FP_STATE_HALTED;
966 break;
967
a2fbb9ea 968 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
34f80b04 969 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
49d66772 970 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
a2fbb9ea
ET
971 break;
972
3196a88a 973
a2fbb9ea 974 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
34f80b04 975 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
a2fbb9ea 976 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
bb2a0f7a 977 bp->set_mac_pending = 0;
a2fbb9ea
ET
978 break;
979
49d66772 980 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
34f80b04 981 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
49d66772
ET
982 break;
983
a2fbb9ea 984 default:
34f80b04 985 BNX2X_ERR("unexpected MC reply (%d) bp->state is %x\n",
a2fbb9ea 986 command, bp->state);
34f80b04 987 break;
a2fbb9ea 988 }
34f80b04 989 mb(); /* force bnx2x_wait_ramrod() to see the change */
a2fbb9ea
ET
990}
991
7a9b2557
VZ
992static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
993 struct bnx2x_fastpath *fp, u16 index)
994{
995 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
996 struct page *page = sw_buf->page;
997 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
998
999 /* Skip "next page" elements */
1000 if (!page)
1001 return;
1002
1003 pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
4f40f2cb 1004 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
1005 __free_pages(page, PAGES_PER_SGE_SHIFT);
1006
1007 sw_buf->page = NULL;
1008 sge->addr_hi = 0;
1009 sge->addr_lo = 0;
1010}
1011
1012static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
1013 struct bnx2x_fastpath *fp, int last)
1014{
1015 int i;
1016
1017 for (i = 0; i < last; i++)
1018 bnx2x_free_rx_sge(bp, fp, i);
1019}
1020
1021static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
1022 struct bnx2x_fastpath *fp, u16 index)
1023{
1024 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
1025 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1026 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1027 dma_addr_t mapping;
1028
1029 if (unlikely(page == NULL))
1030 return -ENOMEM;
1031
4f40f2cb 1032 mapping = pci_map_page(bp->pdev, page, 0, SGE_PAGE_SIZE*PAGES_PER_SGE,
7a9b2557 1033 PCI_DMA_FROMDEVICE);
8d8bb39b 1034 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
7a9b2557
VZ
1035 __free_pages(page, PAGES_PER_SGE_SHIFT);
1036 return -ENOMEM;
1037 }
1038
1039 sw_buf->page = page;
1040 pci_unmap_addr_set(sw_buf, mapping, mapping);
1041
1042 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1043 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1044
1045 return 0;
1046}
1047
a2fbb9ea
ET
1048static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1049 struct bnx2x_fastpath *fp, u16 index)
1050{
1051 struct sk_buff *skb;
1052 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1053 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1054 dma_addr_t mapping;
1055
1056 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1057 if (unlikely(skb == NULL))
1058 return -ENOMEM;
1059
437cf2f1 1060 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_size,
a2fbb9ea 1061 PCI_DMA_FROMDEVICE);
8d8bb39b 1062 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
a2fbb9ea
ET
1063 dev_kfree_skb(skb);
1064 return -ENOMEM;
1065 }
1066
1067 rx_buf->skb = skb;
1068 pci_unmap_addr_set(rx_buf, mapping, mapping);
1069
1070 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1071 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1072
1073 return 0;
1074}
1075
1076/* note that we are not allocating a new skb,
1077 * we are just moving one from cons to prod
1078 * we are not creating a new mapping,
1079 * so there is no need to check for dma_mapping_error().
1080 */
1081static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1082 struct sk_buff *skb, u16 cons, u16 prod)
1083{
1084 struct bnx2x *bp = fp->bp;
1085 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1086 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1087 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1088 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1089
1090 pci_dma_sync_single_for_device(bp->pdev,
1091 pci_unmap_addr(cons_rx_buf, mapping),
87942b46 1092 RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
a2fbb9ea
ET
1093
1094 prod_rx_buf->skb = cons_rx_buf->skb;
1095 pci_unmap_addr_set(prod_rx_buf, mapping,
1096 pci_unmap_addr(cons_rx_buf, mapping));
1097 *prod_bd = *cons_bd;
1098}
1099
7a9b2557
VZ
1100static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1101 u16 idx)
1102{
1103 u16 last_max = fp->last_max_sge;
1104
1105 if (SUB_S16(idx, last_max) > 0)
1106 fp->last_max_sge = idx;
1107}
1108
1109static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1110{
1111 int i, j;
1112
1113 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1114 int idx = RX_SGE_CNT * i - 1;
1115
1116 for (j = 0; j < 2; j++) {
1117 SGE_MASK_CLEAR_BIT(fp, idx);
1118 idx--;
1119 }
1120 }
1121}
1122
1123static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1124 struct eth_fast_path_rx_cqe *fp_cqe)
1125{
1126 struct bnx2x *bp = fp->bp;
4f40f2cb 1127 u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
7a9b2557 1128 le16_to_cpu(fp_cqe->len_on_bd)) >>
4f40f2cb 1129 SGE_PAGE_SHIFT;
7a9b2557
VZ
1130 u16 last_max, last_elem, first_elem;
1131 u16 delta = 0;
1132 u16 i;
1133
1134 if (!sge_len)
1135 return;
1136
1137 /* First mark all used pages */
1138 for (i = 0; i < sge_len; i++)
1139 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1140
1141 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1142 sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1143
1144 /* Here we assume that the last SGE index is the biggest */
1145 prefetch((void *)(fp->sge_mask));
1146 bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1147
1148 last_max = RX_SGE(fp->last_max_sge);
1149 last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1150 first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1151
1152 /* If ring is not full */
1153 if (last_elem + 1 != first_elem)
1154 last_elem++;
1155
1156 /* Now update the prod */
1157 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1158 if (likely(fp->sge_mask[i]))
1159 break;
1160
1161 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1162 delta += RX_SGE_MASK_ELEM_SZ;
1163 }
1164
1165 if (delta > 0) {
1166 fp->rx_sge_prod += delta;
1167 /* clear page-end entries */
1168 bnx2x_clear_sge_mask_next_elems(fp);
1169 }
1170
1171 DP(NETIF_MSG_RX_STATUS,
1172 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
1173 fp->last_max_sge, fp->rx_sge_prod);
1174}
1175
1176static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1177{
1178 /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1179 memset(fp->sge_mask, 0xff,
1180 (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1181
33471629
EG
1182 /* Clear the two last indices in the page to 1:
1183 these are the indices that correspond to the "next" element,
7a9b2557
VZ
1184 hence will never be indicated and should be removed from
1185 the calculations. */
1186 bnx2x_clear_sge_mask_next_elems(fp);
1187}
1188
1189static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1190 struct sk_buff *skb, u16 cons, u16 prod)
1191{
1192 struct bnx2x *bp = fp->bp;
1193 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1194 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1195 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1196 dma_addr_t mapping;
1197
1198 /* move empty skb from pool to prod and map it */
1199 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1200 mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
437cf2f1 1201 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
1202 pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1203
1204 /* move partial skb from cons to pool (don't unmap yet) */
1205 fp->tpa_pool[queue] = *cons_rx_buf;
1206
1207 /* mark bin state as start - print error if current state != stop */
1208 if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1209 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1210
1211 fp->tpa_state[queue] = BNX2X_TPA_START;
1212
1213 /* point prod_bd to new skb */
1214 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1215 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1216
1217#ifdef BNX2X_STOP_ON_ERROR
1218 fp->tpa_queue_used |= (1 << queue);
1219#ifdef __powerpc64__
1220 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1221#else
1222 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1223#endif
1224 fp->tpa_queue_used);
1225#endif
1226}
1227
1228static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1229 struct sk_buff *skb,
1230 struct eth_fast_path_rx_cqe *fp_cqe,
1231 u16 cqe_idx)
1232{
1233 struct sw_rx_page *rx_pg, old_rx_pg;
7a9b2557
VZ
1234 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1235 u32 i, frag_len, frag_size, pages;
1236 int err;
1237 int j;
1238
1239 frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
4f40f2cb 1240 pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
7a9b2557
VZ
1241
1242 /* This is needed in order to enable forwarding support */
1243 if (frag_size)
4f40f2cb 1244 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
7a9b2557
VZ
1245 max(frag_size, (u32)len_on_bd));
1246
1247#ifdef BNX2X_STOP_ON_ERROR
4f40f2cb
EG
1248 if (pages >
1249 min((u32)8, (u32)MAX_SKB_FRAGS) * SGE_PAGE_SIZE * PAGES_PER_SGE) {
7a9b2557
VZ
1250 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1251 pages, cqe_idx);
1252 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
1253 fp_cqe->pkt_len, len_on_bd);
1254 bnx2x_panic();
1255 return -EINVAL;
1256 }
1257#endif
1258
1259 /* Run through the SGL and compose the fragmented skb */
1260 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1261 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1262
1263 /* FW gives the indices of the SGE as if the ring is an array
1264 (meaning that "next" element will consume 2 indices) */
4f40f2cb 1265 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
7a9b2557 1266 rx_pg = &fp->rx_page_ring[sge_idx];
7a9b2557
VZ
1267 old_rx_pg = *rx_pg;
1268
1269 /* If we fail to allocate a substitute page, we simply stop
1270 where we are and drop the whole packet */
1271 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1272 if (unlikely(err)) {
de832a55 1273 fp->eth_q_stats.rx_skb_alloc_failed++;
7a9b2557
VZ
1274 return err;
1275 }
1276
1277 /* Unmap the page as we r going to pass it to the stack */
1278 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
4f40f2cb 1279 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
1280
1281 /* Add one frag and update the appropriate fields in the skb */
1282 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1283
1284 skb->data_len += frag_len;
1285 skb->truesize += frag_len;
1286 skb->len += frag_len;
1287
1288 frag_size -= frag_len;
1289 }
1290
1291 return 0;
1292}
1293
1294static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1295 u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1296 u16 cqe_idx)
1297{
1298 struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1299 struct sk_buff *skb = rx_buf->skb;
1300 /* alloc new skb */
1301 struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1302
1303 /* Unmap skb in the pool anyway, as we are going to change
1304 pool entry status to BNX2X_TPA_STOP even if new skb allocation
1305 fails. */
1306 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
437cf2f1 1307 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
7a9b2557 1308
7a9b2557 1309 if (likely(new_skb)) {
66e855f3
YG
1310 /* fix ip xsum and give it to the stack */
1311 /* (no need to map the new skb) */
0c6671b0
EG
1312#ifdef BCM_VLAN
1313 int is_vlan_cqe =
1314 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1315 PARSING_FLAGS_VLAN);
1316 int is_not_hwaccel_vlan_cqe =
1317 (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
1318#endif
7a9b2557
VZ
1319
1320 prefetch(skb);
1321 prefetch(((char *)(skb)) + 128);
1322
7a9b2557
VZ
1323#ifdef BNX2X_STOP_ON_ERROR
1324 if (pad + len > bp->rx_buf_size) {
1325 BNX2X_ERR("skb_put is about to fail... "
1326 "pad %d len %d rx_buf_size %d\n",
1327 pad, len, bp->rx_buf_size);
1328 bnx2x_panic();
1329 return;
1330 }
1331#endif
1332
1333 skb_reserve(skb, pad);
1334 skb_put(skb, len);
1335
1336 skb->protocol = eth_type_trans(skb, bp->dev);
1337 skb->ip_summed = CHECKSUM_UNNECESSARY;
1338
1339 {
1340 struct iphdr *iph;
1341
1342 iph = (struct iphdr *)skb->data;
0c6671b0
EG
1343#ifdef BCM_VLAN
1344 /* If there is no Rx VLAN offloading -
1345 take VLAN tag into an account */
1346 if (unlikely(is_not_hwaccel_vlan_cqe))
1347 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
1348#endif
7a9b2557
VZ
1349 iph->check = 0;
1350 iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1351 }
1352
1353 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1354 &cqe->fast_path_cqe, cqe_idx)) {
1355#ifdef BCM_VLAN
0c6671b0
EG
1356 if ((bp->vlgrp != NULL) && is_vlan_cqe &&
1357 (!is_not_hwaccel_vlan_cqe))
7a9b2557
VZ
1358 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1359 le16_to_cpu(cqe->fast_path_cqe.
1360 vlan_tag));
1361 else
1362#endif
1363 netif_receive_skb(skb);
1364 } else {
1365 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1366 " - dropping packet!\n");
1367 dev_kfree_skb(skb);
1368 }
1369
7a9b2557
VZ
1370
1371 /* put new skb in bin */
1372 fp->tpa_pool[queue].skb = new_skb;
1373
1374 } else {
66e855f3 1375 /* else drop the packet and keep the buffer in the bin */
7a9b2557
VZ
1376 DP(NETIF_MSG_RX_STATUS,
1377 "Failed to allocate new skb - dropping packet!\n");
de832a55 1378 fp->eth_q_stats.rx_skb_alloc_failed++;
7a9b2557
VZ
1379 }
1380
1381 fp->tpa_state[queue] = BNX2X_TPA_STOP;
1382}
1383
1384static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1385 struct bnx2x_fastpath *fp,
1386 u16 bd_prod, u16 rx_comp_prod,
1387 u16 rx_sge_prod)
1388{
8d9c5f34 1389 struct ustorm_eth_rx_producers rx_prods = {0};
7a9b2557
VZ
1390 int i;
1391
1392 /* Update producers */
1393 rx_prods.bd_prod = bd_prod;
1394 rx_prods.cqe_prod = rx_comp_prod;
1395 rx_prods.sge_prod = rx_sge_prod;
1396
58f4c4cf
EG
1397 /*
1398 * Make sure that the BD and SGE data is updated before updating the
1399 * producers since FW might read the BD/SGE right after the producer
1400 * is updated.
1401 * This is only applicable for weak-ordered memory model archs such
1402 * as IA-64. The following barrier is also mandatory since FW will
1403 * assumes BDs must have buffers.
1404 */
1405 wmb();
1406
8d9c5f34
EG
1407 for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++)
1408 REG_WR(bp, BAR_USTRORM_INTMEM +
1409 USTORM_RX_PRODS_OFFSET(BP_PORT(bp), FP_CL_ID(fp)) + i*4,
7a9b2557
VZ
1410 ((u32 *)&rx_prods)[i]);
1411
58f4c4cf
EG
1412 mmiowb(); /* keep prod updates ordered */
1413
7a9b2557 1414 DP(NETIF_MSG_RX_STATUS,
555f6c78
EG
1415 "queue[%d]: wrote bd_prod %u cqe_prod %u sge_prod %u\n",
1416 fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
7a9b2557
VZ
1417}
1418
a2fbb9ea
ET
1419static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1420{
1421 struct bnx2x *bp = fp->bp;
34f80b04 1422 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
a2fbb9ea
ET
1423 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1424 int rx_pkt = 0;
1425
1426#ifdef BNX2X_STOP_ON_ERROR
1427 if (unlikely(bp->panic))
1428 return 0;
1429#endif
1430
34f80b04
EG
1431 /* CQ "next element" is of the size of the regular element,
1432 that's why it's ok here */
a2fbb9ea
ET
1433 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1434 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1435 hw_comp_cons++;
1436
1437 bd_cons = fp->rx_bd_cons;
1438 bd_prod = fp->rx_bd_prod;
34f80b04 1439 bd_prod_fw = bd_prod;
a2fbb9ea
ET
1440 sw_comp_cons = fp->rx_comp_cons;
1441 sw_comp_prod = fp->rx_comp_prod;
1442
1443 /* Memory barrier necessary as speculative reads of the rx
1444 * buffer can be ahead of the index in the status block
1445 */
1446 rmb();
1447
1448 DP(NETIF_MSG_RX_STATUS,
1449 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
34f80b04 1450 FP_IDX(fp), hw_comp_cons, sw_comp_cons);
a2fbb9ea
ET
1451
1452 while (sw_comp_cons != hw_comp_cons) {
34f80b04 1453 struct sw_rx_bd *rx_buf = NULL;
a2fbb9ea
ET
1454 struct sk_buff *skb;
1455 union eth_rx_cqe *cqe;
34f80b04
EG
1456 u8 cqe_fp_flags;
1457 u16 len, pad;
a2fbb9ea
ET
1458
1459 comp_ring_cons = RCQ_BD(sw_comp_cons);
1460 bd_prod = RX_BD(bd_prod);
1461 bd_cons = RX_BD(bd_cons);
1462
1463 cqe = &fp->rx_comp_ring[comp_ring_cons];
34f80b04 1464 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
a2fbb9ea 1465
a2fbb9ea 1466 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
34f80b04
EG
1467 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
1468 cqe_fp_flags, cqe->fast_path_cqe.status_flags,
68d59484 1469 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
34f80b04
EG
1470 le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1471 le16_to_cpu(cqe->fast_path_cqe.pkt_len));
a2fbb9ea
ET
1472
1473 /* is this a slowpath msg? */
34f80b04 1474 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
a2fbb9ea
ET
1475 bnx2x_sp_event(fp, cqe);
1476 goto next_cqe;
1477
1478 /* this is an rx packet */
1479 } else {
1480 rx_buf = &fp->rx_buf_ring[bd_cons];
1481 skb = rx_buf->skb;
a2fbb9ea
ET
1482 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1483 pad = cqe->fast_path_cqe.placement_offset;
1484
7a9b2557
VZ
1485 /* If CQE is marked both TPA_START and TPA_END
1486 it is a non-TPA CQE */
1487 if ((!fp->disable_tpa) &&
1488 (TPA_TYPE(cqe_fp_flags) !=
1489 (TPA_TYPE_START | TPA_TYPE_END))) {
3196a88a 1490 u16 queue = cqe->fast_path_cqe.queue_index;
7a9b2557
VZ
1491
1492 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1493 DP(NETIF_MSG_RX_STATUS,
1494 "calling tpa_start on queue %d\n",
1495 queue);
1496
1497 bnx2x_tpa_start(fp, queue, skb,
1498 bd_cons, bd_prod);
1499 goto next_rx;
1500 }
1501
1502 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1503 DP(NETIF_MSG_RX_STATUS,
1504 "calling tpa_stop on queue %d\n",
1505 queue);
1506
1507 if (!BNX2X_RX_SUM_FIX(cqe))
1508 BNX2X_ERR("STOP on none TCP "
1509 "data\n");
1510
1511 /* This is a size of the linear data
1512 on this skb */
1513 len = le16_to_cpu(cqe->fast_path_cqe.
1514 len_on_bd);
1515 bnx2x_tpa_stop(bp, fp, queue, pad,
1516 len, cqe, comp_ring_cons);
1517#ifdef BNX2X_STOP_ON_ERROR
1518 if (bp->panic)
1519 return -EINVAL;
1520#endif
1521
1522 bnx2x_update_sge_prod(fp,
1523 &cqe->fast_path_cqe);
1524 goto next_cqe;
1525 }
1526 }
1527
a2fbb9ea
ET
1528 pci_dma_sync_single_for_device(bp->pdev,
1529 pci_unmap_addr(rx_buf, mapping),
1530 pad + RX_COPY_THRESH,
1531 PCI_DMA_FROMDEVICE);
1532 prefetch(skb);
1533 prefetch(((char *)(skb)) + 128);
1534
1535 /* is this an error packet? */
34f80b04 1536 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
a2fbb9ea 1537 DP(NETIF_MSG_RX_ERR,
34f80b04
EG
1538 "ERROR flags %x rx packet %u\n",
1539 cqe_fp_flags, sw_comp_cons);
de832a55 1540 fp->eth_q_stats.rx_err_discard_pkt++;
a2fbb9ea
ET
1541 goto reuse_rx;
1542 }
1543
1544 /* Since we don't have a jumbo ring
1545 * copy small packets if mtu > 1500
1546 */
1547 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1548 (len <= RX_COPY_THRESH)) {
1549 struct sk_buff *new_skb;
1550
1551 new_skb = netdev_alloc_skb(bp->dev,
1552 len + pad);
1553 if (new_skb == NULL) {
1554 DP(NETIF_MSG_RX_ERR,
34f80b04 1555 "ERROR packet dropped "
a2fbb9ea 1556 "because of alloc failure\n");
de832a55 1557 fp->eth_q_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
1558 goto reuse_rx;
1559 }
1560
1561 /* aligned copy */
1562 skb_copy_from_linear_data_offset(skb, pad,
1563 new_skb->data + pad, len);
1564 skb_reserve(new_skb, pad);
1565 skb_put(new_skb, len);
1566
1567 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1568
1569 skb = new_skb;
1570
1571 } else if (bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0) {
1572 pci_unmap_single(bp->pdev,
1573 pci_unmap_addr(rx_buf, mapping),
437cf2f1 1574 bp->rx_buf_size,
a2fbb9ea
ET
1575 PCI_DMA_FROMDEVICE);
1576 skb_reserve(skb, pad);
1577 skb_put(skb, len);
1578
1579 } else {
1580 DP(NETIF_MSG_RX_ERR,
34f80b04 1581 "ERROR packet dropped because "
a2fbb9ea 1582 "of alloc failure\n");
de832a55 1583 fp->eth_q_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
1584reuse_rx:
1585 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1586 goto next_rx;
1587 }
1588
1589 skb->protocol = eth_type_trans(skb, bp->dev);
1590
1591 skb->ip_summed = CHECKSUM_NONE;
66e855f3 1592 if (bp->rx_csum) {
1adcd8be
EG
1593 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1594 skb->ip_summed = CHECKSUM_UNNECESSARY;
66e855f3 1595 else
de832a55 1596 fp->eth_q_stats.hw_csum_err++;
66e855f3 1597 }
a2fbb9ea
ET
1598 }
1599
748e5439 1600 skb_record_rx_queue(skb, fp->index);
a2fbb9ea 1601#ifdef BCM_VLAN
0c6671b0 1602 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
34f80b04
EG
1603 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1604 PARSING_FLAGS_VLAN))
a2fbb9ea
ET
1605 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1606 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1607 else
1608#endif
34f80b04 1609 netif_receive_skb(skb);
a2fbb9ea 1610
a2fbb9ea
ET
1611
1612next_rx:
1613 rx_buf->skb = NULL;
1614
1615 bd_cons = NEXT_RX_IDX(bd_cons);
1616 bd_prod = NEXT_RX_IDX(bd_prod);
34f80b04
EG
1617 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1618 rx_pkt++;
a2fbb9ea
ET
1619next_cqe:
1620 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1621 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
a2fbb9ea 1622
34f80b04 1623 if (rx_pkt == budget)
a2fbb9ea
ET
1624 break;
1625 } /* while */
1626
1627 fp->rx_bd_cons = bd_cons;
34f80b04 1628 fp->rx_bd_prod = bd_prod_fw;
a2fbb9ea
ET
1629 fp->rx_comp_cons = sw_comp_cons;
1630 fp->rx_comp_prod = sw_comp_prod;
1631
7a9b2557
VZ
1632 /* Update producers */
1633 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1634 fp->rx_sge_prod);
a2fbb9ea
ET
1635
1636 fp->rx_pkt += rx_pkt;
1637 fp->rx_calls++;
1638
1639 return rx_pkt;
1640}
1641
1642static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1643{
1644 struct bnx2x_fastpath *fp = fp_cookie;
1645 struct bnx2x *bp = fp->bp;
34f80b04 1646 int index = FP_IDX(fp);
a2fbb9ea 1647
da5a662a
VZ
1648 /* Return here if interrupt is disabled */
1649 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1650 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1651 return IRQ_HANDLED;
1652 }
1653
34f80b04
EG
1654 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
1655 index, FP_SB_ID(fp));
1656 bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID, 0, IGU_INT_DISABLE, 0);
a2fbb9ea
ET
1657
1658#ifdef BNX2X_STOP_ON_ERROR
1659 if (unlikely(bp->panic))
1660 return IRQ_HANDLED;
1661#endif
1662
1663 prefetch(fp->rx_cons_sb);
1664 prefetch(fp->tx_cons_sb);
1665 prefetch(&fp->status_blk->c_status_block.status_block_index);
1666 prefetch(&fp->status_blk->u_status_block.status_block_index);
1667
288379f0 1668 napi_schedule(&bnx2x_fp(bp, index, napi));
34f80b04 1669
a2fbb9ea
ET
1670 return IRQ_HANDLED;
1671}
1672
1673static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1674{
555f6c78 1675 struct bnx2x *bp = netdev_priv(dev_instance);
a2fbb9ea 1676 u16 status = bnx2x_ack_int(bp);
34f80b04 1677 u16 mask;
a2fbb9ea 1678
34f80b04 1679 /* Return here if interrupt is shared and it's not for us */
a2fbb9ea
ET
1680 if (unlikely(status == 0)) {
1681 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1682 return IRQ_NONE;
1683 }
34f80b04 1684 DP(NETIF_MSG_INTR, "got an interrupt status %u\n", status);
a2fbb9ea 1685
34f80b04 1686 /* Return here if interrupt is disabled */
a2fbb9ea
ET
1687 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1688 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1689 return IRQ_HANDLED;
1690 }
1691
3196a88a
EG
1692#ifdef BNX2X_STOP_ON_ERROR
1693 if (unlikely(bp->panic))
1694 return IRQ_HANDLED;
1695#endif
1696
34f80b04
EG
1697 mask = 0x2 << bp->fp[0].sb_id;
1698 if (status & mask) {
a2fbb9ea
ET
1699 struct bnx2x_fastpath *fp = &bp->fp[0];
1700
1701 prefetch(fp->rx_cons_sb);
1702 prefetch(fp->tx_cons_sb);
1703 prefetch(&fp->status_blk->c_status_block.status_block_index);
1704 prefetch(&fp->status_blk->u_status_block.status_block_index);
1705
288379f0 1706 napi_schedule(&bnx2x_fp(bp, 0, napi));
a2fbb9ea 1707
34f80b04 1708 status &= ~mask;
a2fbb9ea
ET
1709 }
1710
a2fbb9ea 1711
34f80b04 1712 if (unlikely(status & 0x1)) {
1cf167f2 1713 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
a2fbb9ea
ET
1714
1715 status &= ~0x1;
1716 if (!status)
1717 return IRQ_HANDLED;
1718 }
1719
34f80b04
EG
1720 if (status)
1721 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1722 status);
a2fbb9ea 1723
c18487ee 1724 return IRQ_HANDLED;
a2fbb9ea
ET
1725}
1726
c18487ee 1727/* end of fast path */
a2fbb9ea 1728
bb2a0f7a 1729static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
a2fbb9ea 1730
c18487ee
YR
1731/* Link */
1732
1733/*
1734 * General service functions
1735 */
a2fbb9ea 1736
4a37fb66 1737static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
c18487ee
YR
1738{
1739 u32 lock_status;
1740 u32 resource_bit = (1 << resource);
4a37fb66
YG
1741 int func = BP_FUNC(bp);
1742 u32 hw_lock_control_reg;
c18487ee 1743 int cnt;
a2fbb9ea 1744
c18487ee
YR
1745 /* Validating that the resource is within range */
1746 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1747 DP(NETIF_MSG_HW,
1748 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1749 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1750 return -EINVAL;
1751 }
a2fbb9ea 1752
4a37fb66
YG
1753 if (func <= 5) {
1754 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1755 } else {
1756 hw_lock_control_reg =
1757 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1758 }
1759
c18487ee 1760 /* Validating that the resource is not already taken */
4a37fb66 1761 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1762 if (lock_status & resource_bit) {
1763 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1764 lock_status, resource_bit);
1765 return -EEXIST;
1766 }
a2fbb9ea 1767
46230476
EG
1768 /* Try for 5 second every 5ms */
1769 for (cnt = 0; cnt < 1000; cnt++) {
c18487ee 1770 /* Try to acquire the lock */
4a37fb66
YG
1771 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1772 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1773 if (lock_status & resource_bit)
1774 return 0;
a2fbb9ea 1775
c18487ee 1776 msleep(5);
a2fbb9ea 1777 }
c18487ee
YR
1778 DP(NETIF_MSG_HW, "Timeout\n");
1779 return -EAGAIN;
1780}
a2fbb9ea 1781
4a37fb66 1782static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
c18487ee
YR
1783{
1784 u32 lock_status;
1785 u32 resource_bit = (1 << resource);
4a37fb66
YG
1786 int func = BP_FUNC(bp);
1787 u32 hw_lock_control_reg;
a2fbb9ea 1788
c18487ee
YR
1789 /* Validating that the resource is within range */
1790 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1791 DP(NETIF_MSG_HW,
1792 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1793 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1794 return -EINVAL;
1795 }
1796
4a37fb66
YG
1797 if (func <= 5) {
1798 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1799 } else {
1800 hw_lock_control_reg =
1801 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1802 }
1803
c18487ee 1804 /* Validating that the resource is currently taken */
4a37fb66 1805 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1806 if (!(lock_status & resource_bit)) {
1807 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1808 lock_status, resource_bit);
1809 return -EFAULT;
a2fbb9ea
ET
1810 }
1811
4a37fb66 1812 REG_WR(bp, hw_lock_control_reg, resource_bit);
c18487ee
YR
1813 return 0;
1814}
1815
1816/* HW Lock for shared dual port PHYs */
4a37fb66 1817static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
c18487ee 1818{
34f80b04 1819 mutex_lock(&bp->port.phy_mutex);
a2fbb9ea 1820
46c6a674
EG
1821 if (bp->port.need_hw_lock)
1822 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
c18487ee 1823}
a2fbb9ea 1824
4a37fb66 1825static void bnx2x_release_phy_lock(struct bnx2x *bp)
c18487ee 1826{
46c6a674
EG
1827 if (bp->port.need_hw_lock)
1828 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
a2fbb9ea 1829
34f80b04 1830 mutex_unlock(&bp->port.phy_mutex);
c18487ee 1831}
a2fbb9ea 1832
4acac6a5
EG
1833int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1834{
1835 /* The GPIO should be swapped if swap register is set and active */
1836 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1837 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1838 int gpio_shift = gpio_num +
1839 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1840 u32 gpio_mask = (1 << gpio_shift);
1841 u32 gpio_reg;
1842 int value;
1843
1844 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1845 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1846 return -EINVAL;
1847 }
1848
1849 /* read GPIO value */
1850 gpio_reg = REG_RD(bp, MISC_REG_GPIO);
1851
1852 /* get the requested pin value */
1853 if ((gpio_reg & gpio_mask) == gpio_mask)
1854 value = 1;
1855 else
1856 value = 0;
1857
1858 DP(NETIF_MSG_LINK, "pin %d value 0x%x\n", gpio_num, value);
1859
1860 return value;
1861}
1862
17de50b7 1863int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
c18487ee
YR
1864{
1865 /* The GPIO should be swapped if swap register is set and active */
1866 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
17de50b7 1867 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
c18487ee
YR
1868 int gpio_shift = gpio_num +
1869 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1870 u32 gpio_mask = (1 << gpio_shift);
1871 u32 gpio_reg;
a2fbb9ea 1872
c18487ee
YR
1873 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1874 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1875 return -EINVAL;
1876 }
a2fbb9ea 1877
4a37fb66 1878 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
c18487ee
YR
1879 /* read GPIO and mask except the float bits */
1880 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
a2fbb9ea 1881
c18487ee
YR
1882 switch (mode) {
1883 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1884 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1885 gpio_num, gpio_shift);
1886 /* clear FLOAT and set CLR */
1887 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1888 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1889 break;
a2fbb9ea 1890
c18487ee
YR
1891 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1892 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1893 gpio_num, gpio_shift);
1894 /* clear FLOAT and set SET */
1895 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1896 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1897 break;
a2fbb9ea 1898
17de50b7 1899 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
c18487ee
YR
1900 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1901 gpio_num, gpio_shift);
1902 /* set FLOAT */
1903 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1904 break;
a2fbb9ea 1905
c18487ee
YR
1906 default:
1907 break;
a2fbb9ea
ET
1908 }
1909
c18487ee 1910 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
4a37fb66 1911 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
f1410647 1912
c18487ee 1913 return 0;
a2fbb9ea
ET
1914}
1915
4acac6a5
EG
1916int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1917{
1918 /* The GPIO should be swapped if swap register is set and active */
1919 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1920 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1921 int gpio_shift = gpio_num +
1922 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1923 u32 gpio_mask = (1 << gpio_shift);
1924 u32 gpio_reg;
1925
1926 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1927 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1928 return -EINVAL;
1929 }
1930
1931 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1932 /* read GPIO int */
1933 gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
1934
1935 switch (mode) {
1936 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
1937 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
1938 "output low\n", gpio_num, gpio_shift);
1939 /* clear SET and set CLR */
1940 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1941 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1942 break;
1943
1944 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
1945 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
1946 "output high\n", gpio_num, gpio_shift);
1947 /* clear CLR and set SET */
1948 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1949 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1950 break;
1951
1952 default:
1953 break;
1954 }
1955
1956 REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
1957 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1958
1959 return 0;
1960}
1961
c18487ee 1962static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
a2fbb9ea 1963{
c18487ee
YR
1964 u32 spio_mask = (1 << spio_num);
1965 u32 spio_reg;
a2fbb9ea 1966
c18487ee
YR
1967 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
1968 (spio_num > MISC_REGISTERS_SPIO_7)) {
1969 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
1970 return -EINVAL;
a2fbb9ea
ET
1971 }
1972
4a37fb66 1973 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
c18487ee
YR
1974 /* read SPIO and mask except the float bits */
1975 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
a2fbb9ea 1976
c18487ee 1977 switch (mode) {
6378c025 1978 case MISC_REGISTERS_SPIO_OUTPUT_LOW:
c18487ee
YR
1979 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
1980 /* clear FLOAT and set CLR */
1981 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1982 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
1983 break;
a2fbb9ea 1984
6378c025 1985 case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
c18487ee
YR
1986 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
1987 /* clear FLOAT and set SET */
1988 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1989 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
1990 break;
a2fbb9ea 1991
c18487ee
YR
1992 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
1993 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
1994 /* set FLOAT */
1995 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1996 break;
a2fbb9ea 1997
c18487ee
YR
1998 default:
1999 break;
a2fbb9ea
ET
2000 }
2001
c18487ee 2002 REG_WR(bp, MISC_REG_SPIO, spio_reg);
4a37fb66 2003 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
c18487ee 2004
a2fbb9ea
ET
2005 return 0;
2006}
2007
c18487ee 2008static void bnx2x_calc_fc_adv(struct bnx2x *bp)
a2fbb9ea 2009{
ad33ea3a
EG
2010 switch (bp->link_vars.ieee_fc &
2011 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
c18487ee 2012 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
34f80b04 2013 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
c18487ee
YR
2014 ADVERTISED_Pause);
2015 break;
2016 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
34f80b04 2017 bp->port.advertising |= (ADVERTISED_Asym_Pause |
c18487ee
YR
2018 ADVERTISED_Pause);
2019 break;
2020 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
34f80b04 2021 bp->port.advertising |= ADVERTISED_Asym_Pause;
c18487ee
YR
2022 break;
2023 default:
34f80b04 2024 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
c18487ee
YR
2025 ADVERTISED_Pause);
2026 break;
2027 }
2028}
f1410647 2029
c18487ee
YR
2030static void bnx2x_link_report(struct bnx2x *bp)
2031{
2032 if (bp->link_vars.link_up) {
2033 if (bp->state == BNX2X_STATE_OPEN)
2034 netif_carrier_on(bp->dev);
2035 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
f1410647 2036
c18487ee 2037 printk("%d Mbps ", bp->link_vars.line_speed);
f1410647 2038
c18487ee
YR
2039 if (bp->link_vars.duplex == DUPLEX_FULL)
2040 printk("full duplex");
2041 else
2042 printk("half duplex");
f1410647 2043
c0700f90
DM
2044 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
2045 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
c18487ee 2046 printk(", receive ");
c0700f90 2047 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
c18487ee
YR
2048 printk("& transmit ");
2049 } else {
2050 printk(", transmit ");
2051 }
2052 printk("flow control ON");
2053 }
2054 printk("\n");
f1410647 2055
c18487ee
YR
2056 } else { /* link_down */
2057 netif_carrier_off(bp->dev);
2058 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
f1410647 2059 }
c18487ee
YR
2060}
2061
2062static u8 bnx2x_initial_phy_init(struct bnx2x *bp)
2063{
19680c48
EG
2064 if (!BP_NOMCP(bp)) {
2065 u8 rc;
a2fbb9ea 2066
19680c48 2067 /* Initialize link parameters structure variables */
8c99e7b0
YR
2068 /* It is recommended to turn off RX FC for jumbo frames
2069 for better performance */
2070 if (IS_E1HMF(bp))
c0700f90 2071 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
8c99e7b0 2072 else if (bp->dev->mtu > 5000)
c0700f90 2073 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
8c99e7b0 2074 else
c0700f90 2075 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
a2fbb9ea 2076
4a37fb66 2077 bnx2x_acquire_phy_lock(bp);
19680c48 2078 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
4a37fb66 2079 bnx2x_release_phy_lock(bp);
a2fbb9ea 2080
3c96c68b
EG
2081 bnx2x_calc_fc_adv(bp);
2082
19680c48
EG
2083 if (bp->link_vars.link_up)
2084 bnx2x_link_report(bp);
a2fbb9ea 2085
34f80b04 2086
19680c48
EG
2087 return rc;
2088 }
2089 BNX2X_ERR("Bootcode is missing -not initializing link\n");
2090 return -EINVAL;
a2fbb9ea
ET
2091}
2092
c18487ee 2093static void bnx2x_link_set(struct bnx2x *bp)
a2fbb9ea 2094{
19680c48 2095 if (!BP_NOMCP(bp)) {
4a37fb66 2096 bnx2x_acquire_phy_lock(bp);
19680c48 2097 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
4a37fb66 2098 bnx2x_release_phy_lock(bp);
a2fbb9ea 2099
19680c48
EG
2100 bnx2x_calc_fc_adv(bp);
2101 } else
2102 BNX2X_ERR("Bootcode is missing -not setting link\n");
c18487ee 2103}
a2fbb9ea 2104
c18487ee
YR
2105static void bnx2x__link_reset(struct bnx2x *bp)
2106{
19680c48 2107 if (!BP_NOMCP(bp)) {
4a37fb66 2108 bnx2x_acquire_phy_lock(bp);
589abe3a 2109 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
4a37fb66 2110 bnx2x_release_phy_lock(bp);
19680c48
EG
2111 } else
2112 BNX2X_ERR("Bootcode is missing -not resetting link\n");
c18487ee 2113}
a2fbb9ea 2114
c18487ee
YR
2115static u8 bnx2x_link_test(struct bnx2x *bp)
2116{
2117 u8 rc;
a2fbb9ea 2118
4a37fb66 2119 bnx2x_acquire_phy_lock(bp);
c18487ee 2120 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
4a37fb66 2121 bnx2x_release_phy_lock(bp);
a2fbb9ea 2122
c18487ee
YR
2123 return rc;
2124}
a2fbb9ea 2125
8a1c38d1 2126static void bnx2x_init_port_minmax(struct bnx2x *bp)
34f80b04 2127{
8a1c38d1
EG
2128 u32 r_param = bp->link_vars.line_speed / 8;
2129 u32 fair_periodic_timeout_usec;
2130 u32 t_fair;
34f80b04 2131
8a1c38d1
EG
2132 memset(&(bp->cmng.rs_vars), 0,
2133 sizeof(struct rate_shaping_vars_per_port));
2134 memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
34f80b04 2135
8a1c38d1
EG
2136 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2137 bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
34f80b04 2138
8a1c38d1
EG
2139 /* this is the threshold below which no timer arming will occur
2140 1.25 coefficient is for the threshold to be a little bigger
2141 than the real time, to compensate for timer in-accuracy */
2142 bp->cmng.rs_vars.rs_threshold =
34f80b04
EG
2143 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2144
8a1c38d1
EG
2145 /* resolution of fairness timer */
2146 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2147 /* for 10G it is 1000usec. for 1G it is 10000usec. */
2148 t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
34f80b04 2149
8a1c38d1
EG
2150 /* this is the threshold below which we won't arm the timer anymore */
2151 bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
34f80b04 2152
8a1c38d1
EG
2153 /* we multiply by 1e3/8 to get bytes/msec.
2154 We don't want the credits to pass a credit
2155 of the t_fair*FAIR_MEM (algorithm resolution) */
2156 bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
2157 /* since each tick is 4 usec */
2158 bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
34f80b04
EG
2159}
2160
8a1c38d1 2161static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
34f80b04
EG
2162{
2163 struct rate_shaping_vars_per_vn m_rs_vn;
2164 struct fairness_vars_per_vn m_fair_vn;
2165 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2166 u16 vn_min_rate, vn_max_rate;
2167 int i;
2168
2169 /* If function is hidden - set min and max to zeroes */
2170 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2171 vn_min_rate = 0;
2172 vn_max_rate = 0;
2173
2174 } else {
2175 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2176 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
8a1c38d1 2177 /* If fairness is enabled (not all min rates are zeroes) and
34f80b04 2178 if current min rate is zero - set it to 1.
33471629 2179 This is a requirement of the algorithm. */
8a1c38d1 2180 if (bp->vn_weight_sum && (vn_min_rate == 0))
34f80b04
EG
2181 vn_min_rate = DEF_MIN_RATE;
2182 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2183 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2184 }
2185
8a1c38d1
EG
2186 DP(NETIF_MSG_IFUP,
2187 "func %d: vn_min_rate=%d vn_max_rate=%d vn_weight_sum=%d\n",
2188 func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
34f80b04
EG
2189
2190 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2191 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2192
2193 /* global vn counter - maximal Mbps for this vn */
2194 m_rs_vn.vn_counter.rate = vn_max_rate;
2195
2196 /* quota - number of bytes transmitted in this period */
2197 m_rs_vn.vn_counter.quota =
2198 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2199
8a1c38d1 2200 if (bp->vn_weight_sum) {
34f80b04
EG
2201 /* credit for each period of the fairness algorithm:
2202 number of bytes in T_FAIR (the vn share the port rate).
8a1c38d1
EG
2203 vn_weight_sum should not be larger than 10000, thus
2204 T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
2205 than zero */
34f80b04 2206 m_fair_vn.vn_credit_delta =
8a1c38d1
EG
2207 max((u32)(vn_min_rate * (T_FAIR_COEF /
2208 (8 * bp->vn_weight_sum))),
2209 (u32)(bp->cmng.fair_vars.fair_threshold * 2));
34f80b04
EG
2210 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2211 m_fair_vn.vn_credit_delta);
2212 }
2213
34f80b04
EG
2214 /* Store it to internal memory */
2215 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2216 REG_WR(bp, BAR_XSTRORM_INTMEM +
2217 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2218 ((u32 *)(&m_rs_vn))[i]);
2219
2220 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2221 REG_WR(bp, BAR_XSTRORM_INTMEM +
2222 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2223 ((u32 *)(&m_fair_vn))[i]);
2224}
2225
8a1c38d1 2226
c18487ee
YR
2227/* This function is called upon link interrupt */
2228static void bnx2x_link_attn(struct bnx2x *bp)
2229{
bb2a0f7a
YG
2230 /* Make sure that we are synced with the current statistics */
2231 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2232
c18487ee 2233 bnx2x_link_update(&bp->link_params, &bp->link_vars);
a2fbb9ea 2234
bb2a0f7a
YG
2235 if (bp->link_vars.link_up) {
2236
1c06328c
EG
2237 /* dropless flow control */
2238 if (CHIP_IS_E1H(bp)) {
2239 int port = BP_PORT(bp);
2240 u32 pause_enabled = 0;
2241
2242 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2243 pause_enabled = 1;
2244
2245 REG_WR(bp, BAR_USTRORM_INTMEM +
2246 USTORM_PAUSE_ENABLED_OFFSET(port),
2247 pause_enabled);
2248 }
2249
bb2a0f7a
YG
2250 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2251 struct host_port_stats *pstats;
2252
2253 pstats = bnx2x_sp(bp, port_stats);
2254 /* reset old bmac stats */
2255 memset(&(pstats->mac_stx[0]), 0,
2256 sizeof(struct mac_stx));
2257 }
2258 if ((bp->state == BNX2X_STATE_OPEN) ||
2259 (bp->state == BNX2X_STATE_DISABLED))
2260 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2261 }
2262
c18487ee
YR
2263 /* indicate link status */
2264 bnx2x_link_report(bp);
34f80b04
EG
2265
2266 if (IS_E1HMF(bp)) {
8a1c38d1 2267 int port = BP_PORT(bp);
34f80b04 2268 int func;
8a1c38d1 2269 int vn;
34f80b04
EG
2270
2271 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2272 if (vn == BP_E1HVN(bp))
2273 continue;
2274
8a1c38d1 2275 func = ((vn << 1) | port);
34f80b04
EG
2276
2277 /* Set the attention towards other drivers
2278 on the same port */
2279 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2280 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2281 }
34f80b04 2282
8a1c38d1
EG
2283 if (bp->link_vars.link_up) {
2284 int i;
2285
2286 /* Init rate shaping and fairness contexts */
2287 bnx2x_init_port_minmax(bp);
34f80b04 2288
34f80b04 2289 for (vn = VN_0; vn < E1HVN_MAX; vn++)
8a1c38d1
EG
2290 bnx2x_init_vn_minmax(bp, 2*vn + port);
2291
2292 /* Store it to internal memory */
2293 for (i = 0;
2294 i < sizeof(struct cmng_struct_per_port) / 4; i++)
2295 REG_WR(bp, BAR_XSTRORM_INTMEM +
2296 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2297 ((u32 *)(&bp->cmng))[i]);
2298 }
34f80b04 2299 }
c18487ee 2300}
a2fbb9ea 2301
c18487ee
YR
2302static void bnx2x__link_status_update(struct bnx2x *bp)
2303{
2304 if (bp->state != BNX2X_STATE_OPEN)
2305 return;
a2fbb9ea 2306
c18487ee 2307 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
a2fbb9ea 2308
bb2a0f7a
YG
2309 if (bp->link_vars.link_up)
2310 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2311 else
2312 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2313
c18487ee
YR
2314 /* indicate link status */
2315 bnx2x_link_report(bp);
a2fbb9ea 2316}
a2fbb9ea 2317
34f80b04
EG
2318static void bnx2x_pmf_update(struct bnx2x *bp)
2319{
2320 int port = BP_PORT(bp);
2321 u32 val;
2322
2323 bp->port.pmf = 1;
2324 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2325
2326 /* enable nig attention */
2327 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2328 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2329 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
bb2a0f7a
YG
2330
2331 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
34f80b04
EG
2332}
2333
c18487ee 2334/* end of Link */
a2fbb9ea
ET
2335
2336/* slow path */
2337
2338/*
2339 * General service functions
2340 */
2341
2342/* the slow path queue is odd since completions arrive on the fastpath ring */
2343static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2344 u32 data_hi, u32 data_lo, int common)
2345{
34f80b04 2346 int func = BP_FUNC(bp);
a2fbb9ea 2347
34f80b04
EG
2348 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2349 "SPQE (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
a2fbb9ea
ET
2350 (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2351 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2352 HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2353
2354#ifdef BNX2X_STOP_ON_ERROR
2355 if (unlikely(bp->panic))
2356 return -EIO;
2357#endif
2358
34f80b04 2359 spin_lock_bh(&bp->spq_lock);
a2fbb9ea
ET
2360
2361 if (!bp->spq_left) {
2362 BNX2X_ERR("BUG! SPQ ring full!\n");
34f80b04 2363 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
2364 bnx2x_panic();
2365 return -EBUSY;
2366 }
f1410647 2367
a2fbb9ea
ET
2368 /* CID needs port number to be encoded int it */
2369 bp->spq_prod_bd->hdr.conn_and_cmd_data =
2370 cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2371 HW_CID(bp, cid)));
2372 bp->spq_prod_bd->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2373 if (common)
2374 bp->spq_prod_bd->hdr.type |=
2375 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2376
2377 bp->spq_prod_bd->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2378 bp->spq_prod_bd->data.mac_config_addr.lo = cpu_to_le32(data_lo);
2379
2380 bp->spq_left--;
2381
2382 if (bp->spq_prod_bd == bp->spq_last_bd) {
2383 bp->spq_prod_bd = bp->spq;
2384 bp->spq_prod_idx = 0;
2385 DP(NETIF_MSG_TIMER, "end of spq\n");
2386
2387 } else {
2388 bp->spq_prod_bd++;
2389 bp->spq_prod_idx++;
2390 }
2391
34f80b04 2392 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
a2fbb9ea
ET
2393 bp->spq_prod_idx);
2394
34f80b04 2395 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
2396 return 0;
2397}
2398
2399/* acquire split MCP access lock register */
4a37fb66 2400static int bnx2x_acquire_alr(struct bnx2x *bp)
a2fbb9ea 2401{
a2fbb9ea 2402 u32 i, j, val;
34f80b04 2403 int rc = 0;
a2fbb9ea
ET
2404
2405 might_sleep();
2406 i = 100;
2407 for (j = 0; j < i*10; j++) {
2408 val = (1UL << 31);
2409 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2410 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2411 if (val & (1L << 31))
2412 break;
2413
2414 msleep(5);
2415 }
a2fbb9ea 2416 if (!(val & (1L << 31))) {
19680c48 2417 BNX2X_ERR("Cannot acquire MCP access lock register\n");
a2fbb9ea
ET
2418 rc = -EBUSY;
2419 }
2420
2421 return rc;
2422}
2423
4a37fb66
YG
2424/* release split MCP access lock register */
2425static void bnx2x_release_alr(struct bnx2x *bp)
a2fbb9ea
ET
2426{
2427 u32 val = 0;
2428
2429 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2430}
2431
2432static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2433{
2434 struct host_def_status_block *def_sb = bp->def_status_blk;
2435 u16 rc = 0;
2436
2437 barrier(); /* status block is written to by the chip */
a2fbb9ea
ET
2438 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2439 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2440 rc |= 1;
2441 }
2442 if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2443 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2444 rc |= 2;
2445 }
2446 if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2447 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2448 rc |= 4;
2449 }
2450 if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2451 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2452 rc |= 8;
2453 }
2454 if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2455 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2456 rc |= 16;
2457 }
2458 return rc;
2459}
2460
2461/*
2462 * slow path service functions
2463 */
2464
2465static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2466{
34f80b04 2467 int port = BP_PORT(bp);
5c862848
EG
2468 u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2469 COMMAND_REG_ATTN_BITS_SET);
a2fbb9ea
ET
2470 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2471 MISC_REG_AEU_MASK_ATTN_FUNC_0;
877e9aa4
ET
2472 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2473 NIG_REG_MASK_INTERRUPT_PORT0;
3fcaf2e5 2474 u32 aeu_mask;
87942b46 2475 u32 nig_mask = 0;
a2fbb9ea 2476
a2fbb9ea
ET
2477 if (bp->attn_state & asserted)
2478 BNX2X_ERR("IGU ERROR\n");
2479
3fcaf2e5
EG
2480 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2481 aeu_mask = REG_RD(bp, aeu_addr);
2482
a2fbb9ea 2483 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
3fcaf2e5
EG
2484 aeu_mask, asserted);
2485 aeu_mask &= ~(asserted & 0xff);
2486 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
a2fbb9ea 2487
3fcaf2e5
EG
2488 REG_WR(bp, aeu_addr, aeu_mask);
2489 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
a2fbb9ea 2490
3fcaf2e5 2491 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
a2fbb9ea 2492 bp->attn_state |= asserted;
3fcaf2e5 2493 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
a2fbb9ea
ET
2494
2495 if (asserted & ATTN_HARD_WIRED_MASK) {
2496 if (asserted & ATTN_NIG_FOR_FUNC) {
a2fbb9ea 2497
a5e9a7cf
EG
2498 bnx2x_acquire_phy_lock(bp);
2499
877e9aa4 2500 /* save nig interrupt mask */
87942b46 2501 nig_mask = REG_RD(bp, nig_int_mask_addr);
877e9aa4 2502 REG_WR(bp, nig_int_mask_addr, 0);
a2fbb9ea 2503
c18487ee 2504 bnx2x_link_attn(bp);
a2fbb9ea
ET
2505
2506 /* handle unicore attn? */
2507 }
2508 if (asserted & ATTN_SW_TIMER_4_FUNC)
2509 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2510
2511 if (asserted & GPIO_2_FUNC)
2512 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2513
2514 if (asserted & GPIO_3_FUNC)
2515 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2516
2517 if (asserted & GPIO_4_FUNC)
2518 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2519
2520 if (port == 0) {
2521 if (asserted & ATTN_GENERAL_ATTN_1) {
2522 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2523 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2524 }
2525 if (asserted & ATTN_GENERAL_ATTN_2) {
2526 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2527 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2528 }
2529 if (asserted & ATTN_GENERAL_ATTN_3) {
2530 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2531 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2532 }
2533 } else {
2534 if (asserted & ATTN_GENERAL_ATTN_4) {
2535 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2536 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2537 }
2538 if (asserted & ATTN_GENERAL_ATTN_5) {
2539 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2540 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2541 }
2542 if (asserted & ATTN_GENERAL_ATTN_6) {
2543 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2544 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2545 }
2546 }
2547
2548 } /* if hardwired */
2549
5c862848
EG
2550 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2551 asserted, hc_addr);
2552 REG_WR(bp, hc_addr, asserted);
a2fbb9ea
ET
2553
2554 /* now set back the mask */
a5e9a7cf 2555 if (asserted & ATTN_NIG_FOR_FUNC) {
87942b46 2556 REG_WR(bp, nig_int_mask_addr, nig_mask);
a5e9a7cf
EG
2557 bnx2x_release_phy_lock(bp);
2558 }
a2fbb9ea
ET
2559}
2560
877e9aa4 2561static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
a2fbb9ea 2562{
34f80b04 2563 int port = BP_PORT(bp);
877e9aa4
ET
2564 int reg_offset;
2565 u32 val;
2566
34f80b04
EG
2567 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2568 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
877e9aa4 2569
34f80b04 2570 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
877e9aa4
ET
2571
2572 val = REG_RD(bp, reg_offset);
2573 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2574 REG_WR(bp, reg_offset, val);
2575
2576 BNX2X_ERR("SPIO5 hw attention\n");
2577
35b19ba5
EG
2578 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
2579 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
877e9aa4
ET
2580 /* Fan failure attention */
2581
17de50b7 2582 /* The PHY reset is controlled by GPIO 1 */
877e9aa4 2583 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
17de50b7
EG
2584 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2585 /* Low power mode is controlled by GPIO 2 */
877e9aa4 2586 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
17de50b7 2587 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
877e9aa4 2588 /* mark the failure */
c18487ee 2589 bp->link_params.ext_phy_config &=
877e9aa4 2590 ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
c18487ee 2591 bp->link_params.ext_phy_config |=
877e9aa4
ET
2592 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2593 SHMEM_WR(bp,
2594 dev_info.port_hw_config[port].
2595 external_phy_config,
c18487ee 2596 bp->link_params.ext_phy_config);
877e9aa4
ET
2597 /* log the failure */
2598 printk(KERN_ERR PFX "Fan Failure on Network"
2599 " Controller %s has caused the driver to"
2600 " shutdown the card to prevent permanent"
2601 " damage. Please contact Dell Support for"
2602 " assistance\n", bp->dev->name);
2603 break;
2604
2605 default:
2606 break;
2607 }
2608 }
34f80b04 2609
589abe3a
EG
2610 if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
2611 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
2612 bnx2x_acquire_phy_lock(bp);
2613 bnx2x_handle_module_detect_int(&bp->link_params);
2614 bnx2x_release_phy_lock(bp);
2615 }
2616
34f80b04
EG
2617 if (attn & HW_INTERRUT_ASSERT_SET_0) {
2618
2619 val = REG_RD(bp, reg_offset);
2620 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2621 REG_WR(bp, reg_offset, val);
2622
2623 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2624 (attn & HW_INTERRUT_ASSERT_SET_0));
2625 bnx2x_panic();
2626 }
877e9aa4
ET
2627}
2628
2629static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2630{
2631 u32 val;
2632
2633 if (attn & BNX2X_DOORQ_ASSERT) {
2634
2635 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2636 BNX2X_ERR("DB hw attention 0x%x\n", val);
2637 /* DORQ discard attention */
2638 if (val & 0x2)
2639 BNX2X_ERR("FATAL error from DORQ\n");
2640 }
34f80b04
EG
2641
2642 if (attn & HW_INTERRUT_ASSERT_SET_1) {
2643
2644 int port = BP_PORT(bp);
2645 int reg_offset;
2646
2647 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2648 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2649
2650 val = REG_RD(bp, reg_offset);
2651 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2652 REG_WR(bp, reg_offset, val);
2653
2654 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2655 (attn & HW_INTERRUT_ASSERT_SET_1));
2656 bnx2x_panic();
2657 }
877e9aa4
ET
2658}
2659
2660static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2661{
2662 u32 val;
2663
2664 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2665
2666 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2667 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2668 /* CFC error attention */
2669 if (val & 0x2)
2670 BNX2X_ERR("FATAL error from CFC\n");
2671 }
2672
2673 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
2674
2675 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
2676 BNX2X_ERR("PXP hw attention 0x%x\n", val);
2677 /* RQ_USDMDP_FIFO_OVERFLOW */
2678 if (val & 0x18000)
2679 BNX2X_ERR("FATAL error from PXP\n");
2680 }
34f80b04
EG
2681
2682 if (attn & HW_INTERRUT_ASSERT_SET_2) {
2683
2684 int port = BP_PORT(bp);
2685 int reg_offset;
2686
2687 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
2688 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
2689
2690 val = REG_RD(bp, reg_offset);
2691 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
2692 REG_WR(bp, reg_offset, val);
2693
2694 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
2695 (attn & HW_INTERRUT_ASSERT_SET_2));
2696 bnx2x_panic();
2697 }
877e9aa4
ET
2698}
2699
2700static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
2701{
34f80b04
EG
2702 u32 val;
2703
877e9aa4
ET
2704 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
2705
34f80b04
EG
2706 if (attn & BNX2X_PMF_LINK_ASSERT) {
2707 int func = BP_FUNC(bp);
2708
2709 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
2710 bnx2x__link_status_update(bp);
2711 if (SHMEM_RD(bp, func_mb[func].drv_status) &
2712 DRV_STATUS_PMF)
2713 bnx2x_pmf_update(bp);
2714
2715 } else if (attn & BNX2X_MC_ASSERT_BITS) {
877e9aa4
ET
2716
2717 BNX2X_ERR("MC assert!\n");
2718 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
2719 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
2720 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
2721 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
2722 bnx2x_panic();
2723
2724 } else if (attn & BNX2X_MCP_ASSERT) {
2725
2726 BNX2X_ERR("MCP assert!\n");
2727 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
34f80b04 2728 bnx2x_fw_dump(bp);
877e9aa4
ET
2729
2730 } else
2731 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
2732 }
2733
2734 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
34f80b04
EG
2735 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
2736 if (attn & BNX2X_GRC_TIMEOUT) {
2737 val = CHIP_IS_E1H(bp) ?
2738 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
2739 BNX2X_ERR("GRC time-out 0x%08x\n", val);
2740 }
2741 if (attn & BNX2X_GRC_RSV) {
2742 val = CHIP_IS_E1H(bp) ?
2743 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
2744 BNX2X_ERR("GRC reserved 0x%08x\n", val);
2745 }
877e9aa4 2746 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
877e9aa4
ET
2747 }
2748}
2749
2750static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
2751{
a2fbb9ea
ET
2752 struct attn_route attn;
2753 struct attn_route group_mask;
34f80b04 2754 int port = BP_PORT(bp);
877e9aa4 2755 int index;
a2fbb9ea
ET
2756 u32 reg_addr;
2757 u32 val;
3fcaf2e5 2758 u32 aeu_mask;
a2fbb9ea
ET
2759
2760 /* need to take HW lock because MCP or other port might also
2761 try to handle this event */
4a37fb66 2762 bnx2x_acquire_alr(bp);
a2fbb9ea
ET
2763
2764 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
2765 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
2766 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
2767 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
34f80b04
EG
2768 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
2769 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
a2fbb9ea
ET
2770
2771 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
2772 if (deasserted & (1 << index)) {
2773 group_mask = bp->attn_group[index];
2774
34f80b04
EG
2775 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
2776 index, group_mask.sig[0], group_mask.sig[1],
2777 group_mask.sig[2], group_mask.sig[3]);
a2fbb9ea 2778
877e9aa4
ET
2779 bnx2x_attn_int_deasserted3(bp,
2780 attn.sig[3] & group_mask.sig[3]);
2781 bnx2x_attn_int_deasserted1(bp,
2782 attn.sig[1] & group_mask.sig[1]);
2783 bnx2x_attn_int_deasserted2(bp,
2784 attn.sig[2] & group_mask.sig[2]);
2785 bnx2x_attn_int_deasserted0(bp,
2786 attn.sig[0] & group_mask.sig[0]);
a2fbb9ea 2787
a2fbb9ea
ET
2788 if ((attn.sig[0] & group_mask.sig[0] &
2789 HW_PRTY_ASSERT_SET_0) ||
2790 (attn.sig[1] & group_mask.sig[1] &
2791 HW_PRTY_ASSERT_SET_1) ||
2792 (attn.sig[2] & group_mask.sig[2] &
2793 HW_PRTY_ASSERT_SET_2))
6378c025 2794 BNX2X_ERR("FATAL HW block parity attention\n");
a2fbb9ea
ET
2795 }
2796 }
2797
4a37fb66 2798 bnx2x_release_alr(bp);
a2fbb9ea 2799
5c862848 2800 reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
a2fbb9ea
ET
2801
2802 val = ~deasserted;
3fcaf2e5
EG
2803 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2804 val, reg_addr);
5c862848 2805 REG_WR(bp, reg_addr, val);
a2fbb9ea 2806
a2fbb9ea 2807 if (~bp->attn_state & deasserted)
3fcaf2e5 2808 BNX2X_ERR("IGU ERROR\n");
a2fbb9ea
ET
2809
2810 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2811 MISC_REG_AEU_MASK_ATTN_FUNC_0;
2812
3fcaf2e5
EG
2813 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2814 aeu_mask = REG_RD(bp, reg_addr);
2815
2816 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
2817 aeu_mask, deasserted);
2818 aeu_mask |= (deasserted & 0xff);
2819 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
a2fbb9ea 2820
3fcaf2e5
EG
2821 REG_WR(bp, reg_addr, aeu_mask);
2822 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
a2fbb9ea
ET
2823
2824 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2825 bp->attn_state &= ~deasserted;
2826 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2827}
2828
2829static void bnx2x_attn_int(struct bnx2x *bp)
2830{
2831 /* read local copy of bits */
68d59484
EG
2832 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
2833 attn_bits);
2834 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
2835 attn_bits_ack);
a2fbb9ea
ET
2836 u32 attn_state = bp->attn_state;
2837
2838 /* look for changed bits */
2839 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
2840 u32 deasserted = ~attn_bits & attn_ack & attn_state;
2841
2842 DP(NETIF_MSG_HW,
2843 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
2844 attn_bits, attn_ack, asserted, deasserted);
2845
2846 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
34f80b04 2847 BNX2X_ERR("BAD attention state\n");
a2fbb9ea
ET
2848
2849 /* handle bits that were raised */
2850 if (asserted)
2851 bnx2x_attn_int_asserted(bp, asserted);
2852
2853 if (deasserted)
2854 bnx2x_attn_int_deasserted(bp, deasserted);
2855}
2856
2857static void bnx2x_sp_task(struct work_struct *work)
2858{
1cf167f2 2859 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
a2fbb9ea
ET
2860 u16 status;
2861
34f80b04 2862
a2fbb9ea
ET
2863 /* Return here if interrupt is disabled */
2864 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3196a88a 2865 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
2866 return;
2867 }
2868
2869 status = bnx2x_update_dsb_idx(bp);
34f80b04
EG
2870/* if (status == 0) */
2871/* BNX2X_ERR("spurious slowpath interrupt!\n"); */
a2fbb9ea 2872
3196a88a 2873 DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
a2fbb9ea 2874
877e9aa4
ET
2875 /* HW attentions */
2876 if (status & 0x1)
a2fbb9ea 2877 bnx2x_attn_int(bp);
a2fbb9ea 2878
68d59484 2879 bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
a2fbb9ea
ET
2880 IGU_INT_NOP, 1);
2881 bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
2882 IGU_INT_NOP, 1);
2883 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
2884 IGU_INT_NOP, 1);
2885 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
2886 IGU_INT_NOP, 1);
2887 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
2888 IGU_INT_ENABLE, 1);
877e9aa4 2889
a2fbb9ea
ET
2890}
2891
2892static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
2893{
2894 struct net_device *dev = dev_instance;
2895 struct bnx2x *bp = netdev_priv(dev);
2896
2897 /* Return here if interrupt is disabled */
2898 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3196a88a 2899 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
2900 return IRQ_HANDLED;
2901 }
2902
8d9c5f34 2903 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0);
a2fbb9ea
ET
2904
2905#ifdef BNX2X_STOP_ON_ERROR
2906 if (unlikely(bp->panic))
2907 return IRQ_HANDLED;
2908#endif
2909
1cf167f2 2910 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
a2fbb9ea
ET
2911
2912 return IRQ_HANDLED;
2913}
2914
2915/* end of slow path */
2916
2917/* Statistics */
2918
2919/****************************************************************************
2920* Macros
2921****************************************************************************/
2922
a2fbb9ea
ET
2923/* sum[hi:lo] += add[hi:lo] */
2924#define ADD_64(s_hi, a_hi, s_lo, a_lo) \
2925 do { \
2926 s_lo += a_lo; \
f5ba6772 2927 s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
a2fbb9ea
ET
2928 } while (0)
2929
2930/* difference = minuend - subtrahend */
2931#define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
2932 do { \
bb2a0f7a
YG
2933 if (m_lo < s_lo) { \
2934 /* underflow */ \
a2fbb9ea 2935 d_hi = m_hi - s_hi; \
bb2a0f7a 2936 if (d_hi > 0) { \
6378c025 2937 /* we can 'loan' 1 */ \
a2fbb9ea
ET
2938 d_hi--; \
2939 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
bb2a0f7a 2940 } else { \
6378c025 2941 /* m_hi <= s_hi */ \
a2fbb9ea
ET
2942 d_hi = 0; \
2943 d_lo = 0; \
2944 } \
bb2a0f7a
YG
2945 } else { \
2946 /* m_lo >= s_lo */ \
a2fbb9ea 2947 if (m_hi < s_hi) { \
bb2a0f7a
YG
2948 d_hi = 0; \
2949 d_lo = 0; \
2950 } else { \
6378c025 2951 /* m_hi >= s_hi */ \
bb2a0f7a
YG
2952 d_hi = m_hi - s_hi; \
2953 d_lo = m_lo - s_lo; \
a2fbb9ea
ET
2954 } \
2955 } \
2956 } while (0)
2957
bb2a0f7a 2958#define UPDATE_STAT64(s, t) \
a2fbb9ea 2959 do { \
bb2a0f7a
YG
2960 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
2961 diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
2962 pstats->mac_stx[0].t##_hi = new->s##_hi; \
2963 pstats->mac_stx[0].t##_lo = new->s##_lo; \
2964 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
2965 pstats->mac_stx[1].t##_lo, diff.lo); \
a2fbb9ea
ET
2966 } while (0)
2967
bb2a0f7a 2968#define UPDATE_STAT64_NIG(s, t) \
a2fbb9ea 2969 do { \
bb2a0f7a
YG
2970 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
2971 diff.lo, new->s##_lo, old->s##_lo); \
2972 ADD_64(estats->t##_hi, diff.hi, \
2973 estats->t##_lo, diff.lo); \
a2fbb9ea
ET
2974 } while (0)
2975
2976/* sum[hi:lo] += add */
2977#define ADD_EXTEND_64(s_hi, s_lo, a) \
2978 do { \
2979 s_lo += a; \
2980 s_hi += (s_lo < a) ? 1 : 0; \
2981 } while (0)
2982
bb2a0f7a 2983#define UPDATE_EXTEND_STAT(s) \
a2fbb9ea 2984 do { \
bb2a0f7a
YG
2985 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
2986 pstats->mac_stx[1].s##_lo, \
2987 new->s); \
a2fbb9ea
ET
2988 } while (0)
2989
bb2a0f7a 2990#define UPDATE_EXTEND_TSTAT(s, t) \
a2fbb9ea
ET
2991 do { \
2992 diff = le32_to_cpu(tclient->s) - old_tclient->s; \
2993 old_tclient->s = le32_to_cpu(tclient->s); \
de832a55
EG
2994 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
2995 } while (0)
2996
2997#define UPDATE_EXTEND_USTAT(s, t) \
2998 do { \
2999 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3000 old_uclient->s = uclient->s; \
3001 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
bb2a0f7a
YG
3002 } while (0)
3003
3004#define UPDATE_EXTEND_XSTAT(s, t) \
3005 do { \
3006 diff = le32_to_cpu(xclient->s) - old_xclient->s; \
3007 old_xclient->s = le32_to_cpu(xclient->s); \
de832a55
EG
3008 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3009 } while (0)
3010
3011/* minuend -= subtrahend */
3012#define SUB_64(m_hi, s_hi, m_lo, s_lo) \
3013 do { \
3014 DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
3015 } while (0)
3016
3017/* minuend[hi:lo] -= subtrahend */
3018#define SUB_EXTEND_64(m_hi, m_lo, s) \
3019 do { \
3020 SUB_64(m_hi, 0, m_lo, s); \
3021 } while (0)
3022
3023#define SUB_EXTEND_USTAT(s, t) \
3024 do { \
3025 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3026 SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
a2fbb9ea
ET
3027 } while (0)
3028
3029/*
3030 * General service functions
3031 */
3032
3033static inline long bnx2x_hilo(u32 *hiref)
3034{
3035 u32 lo = *(hiref + 1);
3036#if (BITS_PER_LONG == 64)
3037 u32 hi = *hiref;
3038
3039 return HILO_U64(hi, lo);
3040#else
3041 return lo;
3042#endif
3043}
3044
3045/*
3046 * Init service functions
3047 */
3048
bb2a0f7a
YG
3049static void bnx2x_storm_stats_post(struct bnx2x *bp)
3050{
3051 if (!bp->stats_pending) {
3052 struct eth_query_ramrod_data ramrod_data = {0};
de832a55 3053 int i, rc;
bb2a0f7a
YG
3054
3055 ramrod_data.drv_counter = bp->stats_counter++;
8d9c5f34 3056 ramrod_data.collect_port = bp->port.pmf ? 1 : 0;
de832a55
EG
3057 for_each_queue(bp, i)
3058 ramrod_data.ctr_id_vector |= (1 << bp->fp[i].cl_id);
bb2a0f7a
YG
3059
3060 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3061 ((u32 *)&ramrod_data)[1],
3062 ((u32 *)&ramrod_data)[0], 0);
3063 if (rc == 0) {
3064 /* stats ramrod has it's own slot on the spq */
3065 bp->spq_left++;
3066 bp->stats_pending = 1;
3067 }
3068 }
3069}
3070
3071static void bnx2x_stats_init(struct bnx2x *bp)
3072{
3073 int port = BP_PORT(bp);
de832a55 3074 int i;
bb2a0f7a 3075
de832a55 3076 bp->stats_pending = 0;
bb2a0f7a
YG
3077 bp->executer_idx = 0;
3078 bp->stats_counter = 0;
3079
3080 /* port stats */
3081 if (!BP_NOMCP(bp))
3082 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
3083 else
3084 bp->port.port_stx = 0;
3085 DP(BNX2X_MSG_STATS, "port_stx 0x%x\n", bp->port.port_stx);
3086
3087 memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
3088 bp->port.old_nig_stats.brb_discard =
3089 REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
66e855f3
YG
3090 bp->port.old_nig_stats.brb_truncate =
3091 REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
bb2a0f7a
YG
3092 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
3093 &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
3094 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
3095 &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
3096
3097 /* function stats */
de832a55
EG
3098 for_each_queue(bp, i) {
3099 struct bnx2x_fastpath *fp = &bp->fp[i];
3100
3101 memset(&fp->old_tclient, 0,
3102 sizeof(struct tstorm_per_client_stats));
3103 memset(&fp->old_uclient, 0,
3104 sizeof(struct ustorm_per_client_stats));
3105 memset(&fp->old_xclient, 0,
3106 sizeof(struct xstorm_per_client_stats));
3107 memset(&fp->eth_q_stats, 0, sizeof(struct bnx2x_eth_q_stats));
3108 }
3109
bb2a0f7a 3110 memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
bb2a0f7a
YG
3111 memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
3112
3113 bp->stats_state = STATS_STATE_DISABLED;
3114 if (IS_E1HMF(bp) && bp->port.pmf && bp->port.port_stx)
3115 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
3116}
3117
3118static void bnx2x_hw_stats_post(struct bnx2x *bp)
3119{
3120 struct dmae_command *dmae = &bp->stats_dmae;
3121 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3122
3123 *stats_comp = DMAE_COMP_VAL;
de832a55
EG
3124 if (CHIP_REV_IS_SLOW(bp))
3125 return;
bb2a0f7a
YG
3126
3127 /* loader */
3128 if (bp->executer_idx) {
3129 int loader_idx = PMF_DMAE_C(bp);
3130
3131 memset(dmae, 0, sizeof(struct dmae_command));
3132
3133 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3134 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3135 DMAE_CMD_DST_RESET |
3136#ifdef __BIG_ENDIAN
3137 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3138#else
3139 DMAE_CMD_ENDIANITY_DW_SWAP |
3140#endif
3141 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3142 DMAE_CMD_PORT_0) |
3143 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3144 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3145 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3146 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3147 sizeof(struct dmae_command) *
3148 (loader_idx + 1)) >> 2;
3149 dmae->dst_addr_hi = 0;
3150 dmae->len = sizeof(struct dmae_command) >> 2;
3151 if (CHIP_IS_E1(bp))
3152 dmae->len--;
3153 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3154 dmae->comp_addr_hi = 0;
3155 dmae->comp_val = 1;
3156
3157 *stats_comp = 0;
3158 bnx2x_post_dmae(bp, dmae, loader_idx);
3159
3160 } else if (bp->func_stx) {
3161 *stats_comp = 0;
3162 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3163 }
3164}
3165
3166static int bnx2x_stats_comp(struct bnx2x *bp)
3167{
3168 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3169 int cnt = 10;
3170
3171 might_sleep();
3172 while (*stats_comp != DMAE_COMP_VAL) {
bb2a0f7a
YG
3173 if (!cnt) {
3174 BNX2X_ERR("timeout waiting for stats finished\n");
3175 break;
3176 }
3177 cnt--;
12469401 3178 msleep(1);
bb2a0f7a
YG
3179 }
3180 return 1;
3181}
3182
3183/*
3184 * Statistics service functions
3185 */
3186
3187static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3188{
3189 struct dmae_command *dmae;
3190 u32 opcode;
3191 int loader_idx = PMF_DMAE_C(bp);
3192 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3193
3194 /* sanity */
3195 if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3196 BNX2X_ERR("BUG!\n");
3197 return;
3198 }
3199
3200 bp->executer_idx = 0;
3201
3202 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3203 DMAE_CMD_C_ENABLE |
3204 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3205#ifdef __BIG_ENDIAN
3206 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3207#else
3208 DMAE_CMD_ENDIANITY_DW_SWAP |
3209#endif
3210 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3211 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3212
3213 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3214 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3215 dmae->src_addr_lo = bp->port.port_stx >> 2;
3216 dmae->src_addr_hi = 0;
3217 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3218 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3219 dmae->len = DMAE_LEN32_RD_MAX;
3220 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3221 dmae->comp_addr_hi = 0;
3222 dmae->comp_val = 1;
3223
3224 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3225 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3226 dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3227 dmae->src_addr_hi = 0;
7a9b2557
VZ
3228 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3229 DMAE_LEN32_RD_MAX * 4);
3230 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3231 DMAE_LEN32_RD_MAX * 4);
bb2a0f7a
YG
3232 dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3233 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3234 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3235 dmae->comp_val = DMAE_COMP_VAL;
3236
3237 *stats_comp = 0;
3238 bnx2x_hw_stats_post(bp);
3239 bnx2x_stats_comp(bp);
3240}
3241
3242static void bnx2x_port_stats_init(struct bnx2x *bp)
a2fbb9ea
ET
3243{
3244 struct dmae_command *dmae;
34f80b04 3245 int port = BP_PORT(bp);
bb2a0f7a 3246 int vn = BP_E1HVN(bp);
a2fbb9ea 3247 u32 opcode;
bb2a0f7a 3248 int loader_idx = PMF_DMAE_C(bp);
a2fbb9ea 3249 u32 mac_addr;
bb2a0f7a
YG
3250 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3251
3252 /* sanity */
3253 if (!bp->link_vars.link_up || !bp->port.pmf) {
3254 BNX2X_ERR("BUG!\n");
3255 return;
3256 }
a2fbb9ea
ET
3257
3258 bp->executer_idx = 0;
bb2a0f7a
YG
3259
3260 /* MCP */
3261 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3262 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3263 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 3264#ifdef __BIG_ENDIAN
bb2a0f7a 3265 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 3266#else
bb2a0f7a 3267 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 3268#endif
bb2a0f7a
YG
3269 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3270 (vn << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea 3271
bb2a0f7a 3272 if (bp->port.port_stx) {
a2fbb9ea
ET
3273
3274 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3275 dmae->opcode = opcode;
bb2a0f7a
YG
3276 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3277 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3278 dmae->dst_addr_lo = bp->port.port_stx >> 2;
a2fbb9ea 3279 dmae->dst_addr_hi = 0;
bb2a0f7a
YG
3280 dmae->len = sizeof(struct host_port_stats) >> 2;
3281 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3282 dmae->comp_addr_hi = 0;
3283 dmae->comp_val = 1;
a2fbb9ea
ET
3284 }
3285
bb2a0f7a
YG
3286 if (bp->func_stx) {
3287
3288 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3289 dmae->opcode = opcode;
3290 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3291 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3292 dmae->dst_addr_lo = bp->func_stx >> 2;
3293 dmae->dst_addr_hi = 0;
3294 dmae->len = sizeof(struct host_func_stats) >> 2;
3295 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3296 dmae->comp_addr_hi = 0;
3297 dmae->comp_val = 1;
a2fbb9ea
ET
3298 }
3299
bb2a0f7a 3300 /* MAC */
a2fbb9ea
ET
3301 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3302 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3303 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3304#ifdef __BIG_ENDIAN
3305 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3306#else
3307 DMAE_CMD_ENDIANITY_DW_SWAP |
3308#endif
bb2a0f7a
YG
3309 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3310 (vn << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea 3311
c18487ee 3312 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
a2fbb9ea
ET
3313
3314 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3315 NIG_REG_INGRESS_BMAC0_MEM);
3316
3317 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3318 BIGMAC_REGISTER_TX_STAT_GTBYT */
3319 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3320 dmae->opcode = opcode;
3321 dmae->src_addr_lo = (mac_addr +
3322 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3323 dmae->src_addr_hi = 0;
3324 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3325 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3326 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3327 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3328 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3329 dmae->comp_addr_hi = 0;
3330 dmae->comp_val = 1;
3331
3332 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3333 BIGMAC_REGISTER_RX_STAT_GRIPJ */
3334 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3335 dmae->opcode = opcode;
3336 dmae->src_addr_lo = (mac_addr +
3337 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3338 dmae->src_addr_hi = 0;
3339 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3340 offsetof(struct bmac_stats, rx_stat_gr64_lo));
a2fbb9ea 3341 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3342 offsetof(struct bmac_stats, rx_stat_gr64_lo));
a2fbb9ea
ET
3343 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3344 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3345 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3346 dmae->comp_addr_hi = 0;
3347 dmae->comp_val = 1;
3348
c18487ee 3349 } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
a2fbb9ea
ET
3350
3351 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3352
3353 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3354 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3355 dmae->opcode = opcode;
3356 dmae->src_addr_lo = (mac_addr +
3357 EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3358 dmae->src_addr_hi = 0;
3359 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3360 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3361 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3362 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3363 dmae->comp_addr_hi = 0;
3364 dmae->comp_val = 1;
3365
3366 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3367 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3368 dmae->opcode = opcode;
3369 dmae->src_addr_lo = (mac_addr +
3370 EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3371 dmae->src_addr_hi = 0;
3372 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3373 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
a2fbb9ea 3374 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3375 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
a2fbb9ea
ET
3376 dmae->len = 1;
3377 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3378 dmae->comp_addr_hi = 0;
3379 dmae->comp_val = 1;
3380
3381 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3382 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3383 dmae->opcode = opcode;
3384 dmae->src_addr_lo = (mac_addr +
3385 EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3386 dmae->src_addr_hi = 0;
3387 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3388 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
a2fbb9ea 3389 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3390 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
a2fbb9ea
ET
3391 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3392 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3393 dmae->comp_addr_hi = 0;
3394 dmae->comp_val = 1;
3395 }
3396
3397 /* NIG */
bb2a0f7a
YG
3398 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3399 dmae->opcode = opcode;
3400 dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3401 NIG_REG_STAT0_BRB_DISCARD) >> 2;
3402 dmae->src_addr_hi = 0;
3403 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3404 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3405 dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3406 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3407 dmae->comp_addr_hi = 0;
3408 dmae->comp_val = 1;
3409
3410 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3411 dmae->opcode = opcode;
3412 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3413 NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3414 dmae->src_addr_hi = 0;
3415 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3416 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3417 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3418 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3419 dmae->len = (2*sizeof(u32)) >> 2;
3420 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3421 dmae->comp_addr_hi = 0;
3422 dmae->comp_val = 1;
3423
a2fbb9ea
ET
3424 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3425 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3426 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3427 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3428#ifdef __BIG_ENDIAN
3429 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3430#else
3431 DMAE_CMD_ENDIANITY_DW_SWAP |
3432#endif
bb2a0f7a
YG
3433 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3434 (vn << DMAE_CMD_E1HVN_SHIFT));
3435 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3436 NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
a2fbb9ea 3437 dmae->src_addr_hi = 0;
bb2a0f7a
YG
3438 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3439 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3440 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3441 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3442 dmae->len = (2*sizeof(u32)) >> 2;
3443 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3444 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3445 dmae->comp_val = DMAE_COMP_VAL;
3446
3447 *stats_comp = 0;
a2fbb9ea
ET
3448}
3449
bb2a0f7a 3450static void bnx2x_func_stats_init(struct bnx2x *bp)
a2fbb9ea 3451{
bb2a0f7a
YG
3452 struct dmae_command *dmae = &bp->stats_dmae;
3453 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 3454
bb2a0f7a
YG
3455 /* sanity */
3456 if (!bp->func_stx) {
3457 BNX2X_ERR("BUG!\n");
3458 return;
3459 }
a2fbb9ea 3460
bb2a0f7a
YG
3461 bp->executer_idx = 0;
3462 memset(dmae, 0, sizeof(struct dmae_command));
a2fbb9ea 3463
bb2a0f7a
YG
3464 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3465 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3466 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3467#ifdef __BIG_ENDIAN
3468 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3469#else
3470 DMAE_CMD_ENDIANITY_DW_SWAP |
3471#endif
3472 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3473 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3474 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3475 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3476 dmae->dst_addr_lo = bp->func_stx >> 2;
3477 dmae->dst_addr_hi = 0;
3478 dmae->len = sizeof(struct host_func_stats) >> 2;
3479 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3480 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3481 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 3482
bb2a0f7a
YG
3483 *stats_comp = 0;
3484}
a2fbb9ea 3485
bb2a0f7a
YG
3486static void bnx2x_stats_start(struct bnx2x *bp)
3487{
3488 if (bp->port.pmf)
3489 bnx2x_port_stats_init(bp);
3490
3491 else if (bp->func_stx)
3492 bnx2x_func_stats_init(bp);
3493
3494 bnx2x_hw_stats_post(bp);
3495 bnx2x_storm_stats_post(bp);
3496}
3497
3498static void bnx2x_stats_pmf_start(struct bnx2x *bp)
3499{
3500 bnx2x_stats_comp(bp);
3501 bnx2x_stats_pmf_update(bp);
3502 bnx2x_stats_start(bp);
3503}
3504
3505static void bnx2x_stats_restart(struct bnx2x *bp)
3506{
3507 bnx2x_stats_comp(bp);
3508 bnx2x_stats_start(bp);
3509}
3510
3511static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3512{
3513 struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3514 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
de832a55 3515 struct bnx2x_eth_stats *estats = &bp->eth_stats;
bb2a0f7a
YG
3516 struct regpair diff;
3517
3518 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3519 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3520 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3521 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3522 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3523 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
66e855f3 3524 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
bb2a0f7a 3525 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
de832a55 3526 UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf);
bb2a0f7a
YG
3527 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3528 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3529 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3530 UPDATE_STAT64(tx_stat_gt127,
3531 tx_stat_etherstatspkts65octetsto127octets);
3532 UPDATE_STAT64(tx_stat_gt255,
3533 tx_stat_etherstatspkts128octetsto255octets);
3534 UPDATE_STAT64(tx_stat_gt511,
3535 tx_stat_etherstatspkts256octetsto511octets);
3536 UPDATE_STAT64(tx_stat_gt1023,
3537 tx_stat_etherstatspkts512octetsto1023octets);
3538 UPDATE_STAT64(tx_stat_gt1518,
3539 tx_stat_etherstatspkts1024octetsto1522octets);
3540 UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3541 UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3542 UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3543 UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3544 UPDATE_STAT64(tx_stat_gterr,
3545 tx_stat_dot3statsinternalmactransmiterrors);
3546 UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
de832a55
EG
3547
3548 estats->pause_frames_received_hi =
3549 pstats->mac_stx[1].rx_stat_bmac_xpf_hi;
3550 estats->pause_frames_received_lo =
3551 pstats->mac_stx[1].rx_stat_bmac_xpf_lo;
3552
3553 estats->pause_frames_sent_hi =
3554 pstats->mac_stx[1].tx_stat_outxoffsent_hi;
3555 estats->pause_frames_sent_lo =
3556 pstats->mac_stx[1].tx_stat_outxoffsent_lo;
bb2a0f7a
YG
3557}
3558
3559static void bnx2x_emac_stats_update(struct bnx2x *bp)
3560{
3561 struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3562 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
de832a55 3563 struct bnx2x_eth_stats *estats = &bp->eth_stats;
bb2a0f7a
YG
3564
3565 UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3566 UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3567 UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3568 UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3569 UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3570 UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3571 UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3572 UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3573 UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3574 UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3575 UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3576 UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3577 UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3578 UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3579 UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3580 UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3581 UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3582 UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3583 UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3584 UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3585 UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3586 UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3587 UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3588 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3589 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3590 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3591 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3592 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3593 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3594 UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3595 UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
de832a55
EG
3596
3597 estats->pause_frames_received_hi =
3598 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
3599 estats->pause_frames_received_lo =
3600 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
3601 ADD_64(estats->pause_frames_received_hi,
3602 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
3603 estats->pause_frames_received_lo,
3604 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
3605
3606 estats->pause_frames_sent_hi =
3607 pstats->mac_stx[1].tx_stat_outxonsent_hi;
3608 estats->pause_frames_sent_lo =
3609 pstats->mac_stx[1].tx_stat_outxonsent_lo;
3610 ADD_64(estats->pause_frames_sent_hi,
3611 pstats->mac_stx[1].tx_stat_outxoffsent_hi,
3612 estats->pause_frames_sent_lo,
3613 pstats->mac_stx[1].tx_stat_outxoffsent_lo);
bb2a0f7a
YG
3614}
3615
3616static int bnx2x_hw_stats_update(struct bnx2x *bp)
3617{
3618 struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3619 struct nig_stats *old = &(bp->port.old_nig_stats);
3620 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3621 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3622 struct regpair diff;
de832a55 3623 u32 nig_timer_max;
bb2a0f7a
YG
3624
3625 if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3626 bnx2x_bmac_stats_update(bp);
3627
3628 else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3629 bnx2x_emac_stats_update(bp);
3630
3631 else { /* unreached */
3632 BNX2X_ERR("stats updated by dmae but no MAC active\n");
3633 return -1;
3634 }
a2fbb9ea 3635
bb2a0f7a
YG
3636 ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3637 new->brb_discard - old->brb_discard);
66e855f3
YG
3638 ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3639 new->brb_truncate - old->brb_truncate);
a2fbb9ea 3640
bb2a0f7a
YG
3641 UPDATE_STAT64_NIG(egress_mac_pkt0,
3642 etherstatspkts1024octetsto1522octets);
3643 UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
a2fbb9ea 3644
bb2a0f7a 3645 memcpy(old, new, sizeof(struct nig_stats));
a2fbb9ea 3646
bb2a0f7a
YG
3647 memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3648 sizeof(struct mac_stx));
3649 estats->brb_drop_hi = pstats->brb_drop_hi;
3650 estats->brb_drop_lo = pstats->brb_drop_lo;
a2fbb9ea 3651
bb2a0f7a 3652 pstats->host_port_stats_start = ++pstats->host_port_stats_end;
a2fbb9ea 3653
de832a55
EG
3654 nig_timer_max = SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
3655 if (nig_timer_max != estats->nig_timer_max) {
3656 estats->nig_timer_max = nig_timer_max;
3657 BNX2X_ERR("NIG timer max (%u)\n", estats->nig_timer_max);
3658 }
3659
bb2a0f7a 3660 return 0;
a2fbb9ea
ET
3661}
3662
bb2a0f7a 3663static int bnx2x_storm_stats_update(struct bnx2x *bp)
a2fbb9ea
ET
3664{
3665 struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
bb2a0f7a 3666 struct tstorm_per_port_stats *tport =
de832a55 3667 &stats->tstorm_common.port_statistics;
bb2a0f7a
YG
3668 struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
3669 struct bnx2x_eth_stats *estats = &bp->eth_stats;
de832a55
EG
3670 int i;
3671
3672 memset(&(fstats->total_bytes_received_hi), 0,
3673 sizeof(struct host_func_stats) - 2*sizeof(u32));
3674 estats->error_bytes_received_hi = 0;
3675 estats->error_bytes_received_lo = 0;
3676 estats->etherstatsoverrsizepkts_hi = 0;
3677 estats->etherstatsoverrsizepkts_lo = 0;
3678 estats->no_buff_discard_hi = 0;
3679 estats->no_buff_discard_lo = 0;
a2fbb9ea 3680
de832a55
EG
3681 for_each_queue(bp, i) {
3682 struct bnx2x_fastpath *fp = &bp->fp[i];
3683 int cl_id = fp->cl_id;
3684 struct tstorm_per_client_stats *tclient =
3685 &stats->tstorm_common.client_statistics[cl_id];
3686 struct tstorm_per_client_stats *old_tclient = &fp->old_tclient;
3687 struct ustorm_per_client_stats *uclient =
3688 &stats->ustorm_common.client_statistics[cl_id];
3689 struct ustorm_per_client_stats *old_uclient = &fp->old_uclient;
3690 struct xstorm_per_client_stats *xclient =
3691 &stats->xstorm_common.client_statistics[cl_id];
3692 struct xstorm_per_client_stats *old_xclient = &fp->old_xclient;
3693 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
3694 u32 diff;
3695
3696 /* are storm stats valid? */
3697 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
bb2a0f7a 3698 bp->stats_counter) {
de832a55
EG
3699 DP(BNX2X_MSG_STATS, "[%d] stats not updated by xstorm"
3700 " xstorm counter (%d) != stats_counter (%d)\n",
3701 i, xclient->stats_counter, bp->stats_counter);
3702 return -1;
3703 }
3704 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
bb2a0f7a 3705 bp->stats_counter) {
de832a55
EG
3706 DP(BNX2X_MSG_STATS, "[%d] stats not updated by tstorm"
3707 " tstorm counter (%d) != stats_counter (%d)\n",
3708 i, tclient->stats_counter, bp->stats_counter);
3709 return -2;
3710 }
3711 if ((u16)(le16_to_cpu(uclient->stats_counter) + 1) !=
3712 bp->stats_counter) {
3713 DP(BNX2X_MSG_STATS, "[%d] stats not updated by ustorm"
3714 " ustorm counter (%d) != stats_counter (%d)\n",
3715 i, uclient->stats_counter, bp->stats_counter);
3716 return -4;
3717 }
a2fbb9ea 3718
de832a55
EG
3719 qstats->total_bytes_received_hi =
3720 qstats->valid_bytes_received_hi =
a2fbb9ea 3721 le32_to_cpu(tclient->total_rcv_bytes.hi);
de832a55
EG
3722 qstats->total_bytes_received_lo =
3723 qstats->valid_bytes_received_lo =
a2fbb9ea 3724 le32_to_cpu(tclient->total_rcv_bytes.lo);
bb2a0f7a 3725
de832a55 3726 qstats->error_bytes_received_hi =
bb2a0f7a 3727 le32_to_cpu(tclient->rcv_error_bytes.hi);
de832a55 3728 qstats->error_bytes_received_lo =
bb2a0f7a 3729 le32_to_cpu(tclient->rcv_error_bytes.lo);
bb2a0f7a 3730
de832a55
EG
3731 ADD_64(qstats->total_bytes_received_hi,
3732 qstats->error_bytes_received_hi,
3733 qstats->total_bytes_received_lo,
3734 qstats->error_bytes_received_lo);
3735
3736 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts,
3737 total_unicast_packets_received);
3738 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
3739 total_multicast_packets_received);
3740 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
3741 total_broadcast_packets_received);
3742 UPDATE_EXTEND_TSTAT(packets_too_big_discard,
3743 etherstatsoverrsizepkts);
3744 UPDATE_EXTEND_TSTAT(no_buff_discard, no_buff_discard);
3745
3746 SUB_EXTEND_USTAT(ucast_no_buff_pkts,
3747 total_unicast_packets_received);
3748 SUB_EXTEND_USTAT(mcast_no_buff_pkts,
3749 total_multicast_packets_received);
3750 SUB_EXTEND_USTAT(bcast_no_buff_pkts,
3751 total_broadcast_packets_received);
3752 UPDATE_EXTEND_USTAT(ucast_no_buff_pkts, no_buff_discard);
3753 UPDATE_EXTEND_USTAT(mcast_no_buff_pkts, no_buff_discard);
3754 UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard);
3755
3756 qstats->total_bytes_transmitted_hi =
bb2a0f7a 3757 le32_to_cpu(xclient->total_sent_bytes.hi);
de832a55 3758 qstats->total_bytes_transmitted_lo =
bb2a0f7a
YG
3759 le32_to_cpu(xclient->total_sent_bytes.lo);
3760
de832a55
EG
3761 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
3762 total_unicast_packets_transmitted);
3763 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
3764 total_multicast_packets_transmitted);
3765 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
3766 total_broadcast_packets_transmitted);
3767
3768 old_tclient->checksum_discard = tclient->checksum_discard;
3769 old_tclient->ttl0_discard = tclient->ttl0_discard;
3770
3771 ADD_64(fstats->total_bytes_received_hi,
3772 qstats->total_bytes_received_hi,
3773 fstats->total_bytes_received_lo,
3774 qstats->total_bytes_received_lo);
3775 ADD_64(fstats->total_bytes_transmitted_hi,
3776 qstats->total_bytes_transmitted_hi,
3777 fstats->total_bytes_transmitted_lo,
3778 qstats->total_bytes_transmitted_lo);
3779 ADD_64(fstats->total_unicast_packets_received_hi,
3780 qstats->total_unicast_packets_received_hi,
3781 fstats->total_unicast_packets_received_lo,
3782 qstats->total_unicast_packets_received_lo);
3783 ADD_64(fstats->total_multicast_packets_received_hi,
3784 qstats->total_multicast_packets_received_hi,
3785 fstats->total_multicast_packets_received_lo,
3786 qstats->total_multicast_packets_received_lo);
3787 ADD_64(fstats->total_broadcast_packets_received_hi,
3788 qstats->total_broadcast_packets_received_hi,
3789 fstats->total_broadcast_packets_received_lo,
3790 qstats->total_broadcast_packets_received_lo);
3791 ADD_64(fstats->total_unicast_packets_transmitted_hi,
3792 qstats->total_unicast_packets_transmitted_hi,
3793 fstats->total_unicast_packets_transmitted_lo,
3794 qstats->total_unicast_packets_transmitted_lo);
3795 ADD_64(fstats->total_multicast_packets_transmitted_hi,
3796 qstats->total_multicast_packets_transmitted_hi,
3797 fstats->total_multicast_packets_transmitted_lo,
3798 qstats->total_multicast_packets_transmitted_lo);
3799 ADD_64(fstats->total_broadcast_packets_transmitted_hi,
3800 qstats->total_broadcast_packets_transmitted_hi,
3801 fstats->total_broadcast_packets_transmitted_lo,
3802 qstats->total_broadcast_packets_transmitted_lo);
3803 ADD_64(fstats->valid_bytes_received_hi,
3804 qstats->valid_bytes_received_hi,
3805 fstats->valid_bytes_received_lo,
3806 qstats->valid_bytes_received_lo);
3807
3808 ADD_64(estats->error_bytes_received_hi,
3809 qstats->error_bytes_received_hi,
3810 estats->error_bytes_received_lo,
3811 qstats->error_bytes_received_lo);
3812 ADD_64(estats->etherstatsoverrsizepkts_hi,
3813 qstats->etherstatsoverrsizepkts_hi,
3814 estats->etherstatsoverrsizepkts_lo,
3815 qstats->etherstatsoverrsizepkts_lo);
3816 ADD_64(estats->no_buff_discard_hi, qstats->no_buff_discard_hi,
3817 estats->no_buff_discard_lo, qstats->no_buff_discard_lo);
3818 }
3819
3820 ADD_64(fstats->total_bytes_received_hi,
3821 estats->rx_stat_ifhcinbadoctets_hi,
3822 fstats->total_bytes_received_lo,
3823 estats->rx_stat_ifhcinbadoctets_lo);
bb2a0f7a
YG
3824
3825 memcpy(estats, &(fstats->total_bytes_received_hi),
3826 sizeof(struct host_func_stats) - 2*sizeof(u32));
3827
de832a55
EG
3828 ADD_64(estats->etherstatsoverrsizepkts_hi,
3829 estats->rx_stat_dot3statsframestoolong_hi,
3830 estats->etherstatsoverrsizepkts_lo,
3831 estats->rx_stat_dot3statsframestoolong_lo);
3832 ADD_64(estats->error_bytes_received_hi,
3833 estats->rx_stat_ifhcinbadoctets_hi,
3834 estats->error_bytes_received_lo,
3835 estats->rx_stat_ifhcinbadoctets_lo);
3836
3837 if (bp->port.pmf) {
3838 estats->mac_filter_discard =
3839 le32_to_cpu(tport->mac_filter_discard);
3840 estats->xxoverflow_discard =
3841 le32_to_cpu(tport->xxoverflow_discard);
3842 estats->brb_truncate_discard =
bb2a0f7a 3843 le32_to_cpu(tport->brb_truncate_discard);
de832a55
EG
3844 estats->mac_discard = le32_to_cpu(tport->mac_discard);
3845 }
bb2a0f7a
YG
3846
3847 fstats->host_func_stats_start = ++fstats->host_func_stats_end;
a2fbb9ea 3848
de832a55
EG
3849 bp->stats_pending = 0;
3850
a2fbb9ea
ET
3851 return 0;
3852}
3853
bb2a0f7a 3854static void bnx2x_net_stats_update(struct bnx2x *bp)
a2fbb9ea 3855{
bb2a0f7a 3856 struct bnx2x_eth_stats *estats = &bp->eth_stats;
a2fbb9ea 3857 struct net_device_stats *nstats = &bp->dev->stats;
de832a55 3858 int i;
a2fbb9ea
ET
3859
3860 nstats->rx_packets =
3861 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
3862 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
3863 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
3864
3865 nstats->tx_packets =
3866 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
3867 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
3868 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
3869
de832a55 3870 nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
a2fbb9ea 3871
0e39e645 3872 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
a2fbb9ea 3873
de832a55
EG
3874 nstats->rx_dropped = estats->mac_discard;
3875 for_each_queue(bp, i)
3876 nstats->rx_dropped +=
3877 le32_to_cpu(bp->fp[i].old_tclient.checksum_discard);
3878
a2fbb9ea
ET
3879 nstats->tx_dropped = 0;
3880
3881 nstats->multicast =
de832a55 3882 bnx2x_hilo(&estats->total_multicast_packets_received_hi);
a2fbb9ea 3883
bb2a0f7a 3884 nstats->collisions =
de832a55 3885 bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi);
bb2a0f7a
YG
3886
3887 nstats->rx_length_errors =
de832a55
EG
3888 bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) +
3889 bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi);
3890 nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) +
3891 bnx2x_hilo(&estats->brb_truncate_hi);
3892 nstats->rx_crc_errors =
3893 bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi);
3894 nstats->rx_frame_errors =
3895 bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi);
3896 nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi);
a2fbb9ea
ET
3897 nstats->rx_missed_errors = estats->xxoverflow_discard;
3898
3899 nstats->rx_errors = nstats->rx_length_errors +
3900 nstats->rx_over_errors +
3901 nstats->rx_crc_errors +
3902 nstats->rx_frame_errors +
0e39e645
ET
3903 nstats->rx_fifo_errors +
3904 nstats->rx_missed_errors;
a2fbb9ea 3905
bb2a0f7a 3906 nstats->tx_aborted_errors =
de832a55
EG
3907 bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) +
3908 bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi);
3909 nstats->tx_carrier_errors =
3910 bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi);
a2fbb9ea
ET
3911 nstats->tx_fifo_errors = 0;
3912 nstats->tx_heartbeat_errors = 0;
3913 nstats->tx_window_errors = 0;
3914
3915 nstats->tx_errors = nstats->tx_aborted_errors +
de832a55
EG
3916 nstats->tx_carrier_errors +
3917 bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi);
3918}
3919
3920static void bnx2x_drv_stats_update(struct bnx2x *bp)
3921{
3922 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3923 int i;
3924
3925 estats->driver_xoff = 0;
3926 estats->rx_err_discard_pkt = 0;
3927 estats->rx_skb_alloc_failed = 0;
3928 estats->hw_csum_err = 0;
3929 for_each_queue(bp, i) {
3930 struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats;
3931
3932 estats->driver_xoff += qstats->driver_xoff;
3933 estats->rx_err_discard_pkt += qstats->rx_err_discard_pkt;
3934 estats->rx_skb_alloc_failed += qstats->rx_skb_alloc_failed;
3935 estats->hw_csum_err += qstats->hw_csum_err;
3936 }
a2fbb9ea
ET
3937}
3938
bb2a0f7a 3939static void bnx2x_stats_update(struct bnx2x *bp)
a2fbb9ea 3940{
bb2a0f7a 3941 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 3942
bb2a0f7a
YG
3943 if (*stats_comp != DMAE_COMP_VAL)
3944 return;
3945
3946 if (bp->port.pmf)
de832a55 3947 bnx2x_hw_stats_update(bp);
a2fbb9ea 3948
de832a55
EG
3949 if (bnx2x_storm_stats_update(bp) && (bp->stats_pending++ == 3)) {
3950 BNX2X_ERR("storm stats were not updated for 3 times\n");
3951 bnx2x_panic();
3952 return;
a2fbb9ea
ET
3953 }
3954
de832a55
EG
3955 bnx2x_net_stats_update(bp);
3956 bnx2x_drv_stats_update(bp);
3957
a2fbb9ea 3958 if (bp->msglevel & NETIF_MSG_TIMER) {
de832a55
EG
3959 struct tstorm_per_client_stats *old_tclient =
3960 &bp->fp->old_tclient;
3961 struct bnx2x_eth_q_stats *qstats = &bp->fp->eth_q_stats;
bb2a0f7a 3962 struct bnx2x_eth_stats *estats = &bp->eth_stats;
a2fbb9ea 3963 struct net_device_stats *nstats = &bp->dev->stats;
34f80b04 3964 int i;
a2fbb9ea
ET
3965
3966 printk(KERN_DEBUG "%s:\n", bp->dev->name);
3967 printk(KERN_DEBUG " tx avail (%4x) tx hc idx (%x)"
3968 " tx pkt (%lx)\n",
3969 bnx2x_tx_avail(bp->fp),
7a9b2557 3970 le16_to_cpu(*bp->fp->tx_cons_sb), nstats->tx_packets);
a2fbb9ea
ET
3971 printk(KERN_DEBUG " rx usage (%4x) rx hc idx (%x)"
3972 " rx pkt (%lx)\n",
7a9b2557
VZ
3973 (u16)(le16_to_cpu(*bp->fp->rx_cons_sb) -
3974 bp->fp->rx_comp_cons),
3975 le16_to_cpu(*bp->fp->rx_cons_sb), nstats->rx_packets);
de832a55
EG
3976 printk(KERN_DEBUG " %s (Xoff events %u) brb drops %u "
3977 "brb truncate %u\n",
3978 (netif_queue_stopped(bp->dev) ? "Xoff" : "Xon"),
3979 qstats->driver_xoff,
3980 estats->brb_drop_lo, estats->brb_truncate_lo);
a2fbb9ea 3981 printk(KERN_DEBUG "tstats: checksum_discard %u "
de832a55 3982 "packets_too_big_discard %lu no_buff_discard %lu "
a2fbb9ea
ET
3983 "mac_discard %u mac_filter_discard %u "
3984 "xxovrflow_discard %u brb_truncate_discard %u "
3985 "ttl0_discard %u\n",
bb2a0f7a 3986 old_tclient->checksum_discard,
de832a55
EG
3987 bnx2x_hilo(&qstats->etherstatsoverrsizepkts_hi),
3988 bnx2x_hilo(&qstats->no_buff_discard_hi),
3989 estats->mac_discard, estats->mac_filter_discard,
3990 estats->xxoverflow_discard, estats->brb_truncate_discard,
bb2a0f7a 3991 old_tclient->ttl0_discard);
a2fbb9ea
ET
3992
3993 for_each_queue(bp, i) {
3994 printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
3995 bnx2x_fp(bp, i, tx_pkt),
3996 bnx2x_fp(bp, i, rx_pkt),
3997 bnx2x_fp(bp, i, rx_calls));
3998 }
3999 }
4000
bb2a0f7a
YG
4001 bnx2x_hw_stats_post(bp);
4002 bnx2x_storm_stats_post(bp);
4003}
a2fbb9ea 4004
bb2a0f7a
YG
4005static void bnx2x_port_stats_stop(struct bnx2x *bp)
4006{
4007 struct dmae_command *dmae;
4008 u32 opcode;
4009 int loader_idx = PMF_DMAE_C(bp);
4010 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 4011
bb2a0f7a 4012 bp->executer_idx = 0;
a2fbb9ea 4013
bb2a0f7a
YG
4014 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4015 DMAE_CMD_C_ENABLE |
4016 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 4017#ifdef __BIG_ENDIAN
bb2a0f7a 4018 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 4019#else
bb2a0f7a 4020 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 4021#endif
bb2a0f7a
YG
4022 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4023 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4024
4025 if (bp->port.port_stx) {
4026
4027 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4028 if (bp->func_stx)
4029 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
4030 else
4031 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4032 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4033 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4034 dmae->dst_addr_lo = bp->port.port_stx >> 2;
a2fbb9ea 4035 dmae->dst_addr_hi = 0;
bb2a0f7a
YG
4036 dmae->len = sizeof(struct host_port_stats) >> 2;
4037 if (bp->func_stx) {
4038 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4039 dmae->comp_addr_hi = 0;
4040 dmae->comp_val = 1;
4041 } else {
4042 dmae->comp_addr_lo =
4043 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4044 dmae->comp_addr_hi =
4045 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4046 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 4047
bb2a0f7a
YG
4048 *stats_comp = 0;
4049 }
a2fbb9ea
ET
4050 }
4051
bb2a0f7a
YG
4052 if (bp->func_stx) {
4053
4054 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4055 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4056 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
4057 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
4058 dmae->dst_addr_lo = bp->func_stx >> 2;
4059 dmae->dst_addr_hi = 0;
4060 dmae->len = sizeof(struct host_func_stats) >> 2;
4061 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4062 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4063 dmae->comp_val = DMAE_COMP_VAL;
4064
4065 *stats_comp = 0;
a2fbb9ea 4066 }
bb2a0f7a
YG
4067}
4068
4069static void bnx2x_stats_stop(struct bnx2x *bp)
4070{
4071 int update = 0;
4072
4073 bnx2x_stats_comp(bp);
4074
4075 if (bp->port.pmf)
4076 update = (bnx2x_hw_stats_update(bp) == 0);
4077
4078 update |= (bnx2x_storm_stats_update(bp) == 0);
4079
4080 if (update) {
4081 bnx2x_net_stats_update(bp);
a2fbb9ea 4082
bb2a0f7a
YG
4083 if (bp->port.pmf)
4084 bnx2x_port_stats_stop(bp);
4085
4086 bnx2x_hw_stats_post(bp);
4087 bnx2x_stats_comp(bp);
a2fbb9ea
ET
4088 }
4089}
4090
bb2a0f7a
YG
4091static void bnx2x_stats_do_nothing(struct bnx2x *bp)
4092{
4093}
4094
4095static const struct {
4096 void (*action)(struct bnx2x *bp);
4097 enum bnx2x_stats_state next_state;
4098} bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
4099/* state event */
4100{
4101/* DISABLED PMF */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
4102/* LINK_UP */ {bnx2x_stats_start, STATS_STATE_ENABLED},
4103/* UPDATE */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
4104/* STOP */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
4105},
4106{
4107/* ENABLED PMF */ {bnx2x_stats_pmf_start, STATS_STATE_ENABLED},
4108/* LINK_UP */ {bnx2x_stats_restart, STATS_STATE_ENABLED},
4109/* UPDATE */ {bnx2x_stats_update, STATS_STATE_ENABLED},
4110/* STOP */ {bnx2x_stats_stop, STATS_STATE_DISABLED}
4111}
4112};
4113
4114static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
4115{
4116 enum bnx2x_stats_state state = bp->stats_state;
4117
4118 bnx2x_stats_stm[state][event].action(bp);
4119 bp->stats_state = bnx2x_stats_stm[state][event].next_state;
4120
4121 if ((event != STATS_EVENT_UPDATE) || (bp->msglevel & NETIF_MSG_TIMER))
4122 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
4123 state, event, bp->stats_state);
4124}
4125
a2fbb9ea
ET
4126static void bnx2x_timer(unsigned long data)
4127{
4128 struct bnx2x *bp = (struct bnx2x *) data;
4129
4130 if (!netif_running(bp->dev))
4131 return;
4132
4133 if (atomic_read(&bp->intr_sem) != 0)
f1410647 4134 goto timer_restart;
a2fbb9ea
ET
4135
4136 if (poll) {
4137 struct bnx2x_fastpath *fp = &bp->fp[0];
4138 int rc;
4139
4140 bnx2x_tx_int(fp, 1000);
4141 rc = bnx2x_rx_int(fp, 1000);
4142 }
4143
34f80b04
EG
4144 if (!BP_NOMCP(bp)) {
4145 int func = BP_FUNC(bp);
a2fbb9ea
ET
4146 u32 drv_pulse;
4147 u32 mcp_pulse;
4148
4149 ++bp->fw_drv_pulse_wr_seq;
4150 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
4151 /* TBD - add SYSTEM_TIME */
4152 drv_pulse = bp->fw_drv_pulse_wr_seq;
34f80b04 4153 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
a2fbb9ea 4154
34f80b04 4155 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
a2fbb9ea
ET
4156 MCP_PULSE_SEQ_MASK);
4157 /* The delta between driver pulse and mcp response
4158 * should be 1 (before mcp response) or 0 (after mcp response)
4159 */
4160 if ((drv_pulse != mcp_pulse) &&
4161 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
4162 /* someone lost a heartbeat... */
4163 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
4164 drv_pulse, mcp_pulse);
4165 }
4166 }
4167
bb2a0f7a
YG
4168 if ((bp->state == BNX2X_STATE_OPEN) ||
4169 (bp->state == BNX2X_STATE_DISABLED))
4170 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
a2fbb9ea 4171
f1410647 4172timer_restart:
a2fbb9ea
ET
4173 mod_timer(&bp->timer, jiffies + bp->current_interval);
4174}
4175
4176/* end of Statistics */
4177
4178/* nic init */
4179
4180/*
4181 * nic init service functions
4182 */
4183
34f80b04 4184static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
a2fbb9ea 4185{
34f80b04
EG
4186 int port = BP_PORT(bp);
4187
4188 bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
4189 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
35302989 4190 sizeof(struct ustorm_status_block)/4);
34f80b04
EG
4191 bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
4192 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
35302989 4193 sizeof(struct cstorm_status_block)/4);
34f80b04
EG
4194}
4195
5c862848
EG
4196static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
4197 dma_addr_t mapping, int sb_id)
34f80b04
EG
4198{
4199 int port = BP_PORT(bp);
bb2a0f7a 4200 int func = BP_FUNC(bp);
a2fbb9ea 4201 int index;
34f80b04 4202 u64 section;
a2fbb9ea
ET
4203
4204 /* USTORM */
4205 section = ((u64)mapping) + offsetof(struct host_status_block,
4206 u_status_block);
34f80b04 4207 sb->u_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4208
4209 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4210 USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
a2fbb9ea 4211 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4212 ((USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
a2fbb9ea 4213 U64_HI(section));
bb2a0f7a
YG
4214 REG_WR8(bp, BAR_USTRORM_INTMEM + FP_USB_FUNC_OFF +
4215 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
a2fbb9ea
ET
4216
4217 for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
4218 REG_WR16(bp, BAR_USTRORM_INTMEM +
34f80b04 4219 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
a2fbb9ea
ET
4220
4221 /* CSTORM */
4222 section = ((u64)mapping) + offsetof(struct host_status_block,
4223 c_status_block);
34f80b04 4224 sb->c_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4225
4226 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4227 CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
a2fbb9ea 4228 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4229 ((CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
a2fbb9ea 4230 U64_HI(section));
7a9b2557
VZ
4231 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
4232 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
a2fbb9ea
ET
4233
4234 for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4235 REG_WR16(bp, BAR_CSTRORM_INTMEM +
34f80b04
EG
4236 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4237
4238 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4239}
4240
4241static void bnx2x_zero_def_sb(struct bnx2x *bp)
4242{
4243 int func = BP_FUNC(bp);
a2fbb9ea 4244
34f80b04
EG
4245 bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
4246 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4247 sizeof(struct ustorm_def_status_block)/4);
4248 bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
4249 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4250 sizeof(struct cstorm_def_status_block)/4);
4251 bnx2x_init_fill(bp, BAR_XSTRORM_INTMEM +
4252 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4253 sizeof(struct xstorm_def_status_block)/4);
4254 bnx2x_init_fill(bp, BAR_TSTRORM_INTMEM +
4255 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4256 sizeof(struct tstorm_def_status_block)/4);
a2fbb9ea
ET
4257}
4258
4259static void bnx2x_init_def_sb(struct bnx2x *bp,
4260 struct host_def_status_block *def_sb,
34f80b04 4261 dma_addr_t mapping, int sb_id)
a2fbb9ea 4262{
34f80b04
EG
4263 int port = BP_PORT(bp);
4264 int func = BP_FUNC(bp);
a2fbb9ea
ET
4265 int index, val, reg_offset;
4266 u64 section;
4267
4268 /* ATTN */
4269 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4270 atten_status_block);
34f80b04 4271 def_sb->atten_status_block.status_block_id = sb_id;
a2fbb9ea 4272
49d66772
ET
4273 bp->attn_state = 0;
4274
a2fbb9ea
ET
4275 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4276 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4277
34f80b04 4278 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
a2fbb9ea
ET
4279 bp->attn_group[index].sig[0] = REG_RD(bp,
4280 reg_offset + 0x10*index);
4281 bp->attn_group[index].sig[1] = REG_RD(bp,
4282 reg_offset + 0x4 + 0x10*index);
4283 bp->attn_group[index].sig[2] = REG_RD(bp,
4284 reg_offset + 0x8 + 0x10*index);
4285 bp->attn_group[index].sig[3] = REG_RD(bp,
4286 reg_offset + 0xc + 0x10*index);
4287 }
4288
a2fbb9ea
ET
4289 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4290 HC_REG_ATTN_MSG0_ADDR_L);
4291
4292 REG_WR(bp, reg_offset, U64_LO(section));
4293 REG_WR(bp, reg_offset + 4, U64_HI(section));
4294
4295 reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4296
4297 val = REG_RD(bp, reg_offset);
34f80b04 4298 val |= sb_id;
a2fbb9ea
ET
4299 REG_WR(bp, reg_offset, val);
4300
4301 /* USTORM */
4302 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4303 u_def_status_block);
34f80b04 4304 def_sb->u_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4305
4306 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4307 USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4308 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4309 ((USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4310 U64_HI(section));
5c862848 4311 REG_WR8(bp, BAR_USTRORM_INTMEM + DEF_USB_FUNC_OFF +
34f80b04 4312 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4313
4314 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
4315 REG_WR16(bp, BAR_USTRORM_INTMEM +
34f80b04 4316 USTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
a2fbb9ea
ET
4317
4318 /* CSTORM */
4319 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4320 c_def_status_block);
34f80b04 4321 def_sb->c_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4322
4323 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4324 CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4325 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4326 ((CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4327 U64_HI(section));
5c862848 4328 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
34f80b04 4329 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4330
4331 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4332 REG_WR16(bp, BAR_CSTRORM_INTMEM +
34f80b04 4333 CSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
a2fbb9ea
ET
4334
4335 /* TSTORM */
4336 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4337 t_def_status_block);
34f80b04 4338 def_sb->t_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4339
4340 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4341 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4342 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4343 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4344 U64_HI(section));
5c862848 4345 REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
34f80b04 4346 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4347
4348 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4349 REG_WR16(bp, BAR_TSTRORM_INTMEM +
34f80b04 4350 TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
a2fbb9ea
ET
4351
4352 /* XSTORM */
4353 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4354 x_def_status_block);
34f80b04 4355 def_sb->x_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4356
4357 REG_WR(bp, BAR_XSTRORM_INTMEM +
34f80b04 4358 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4359 REG_WR(bp, BAR_XSTRORM_INTMEM +
34f80b04 4360 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4361 U64_HI(section));
5c862848 4362 REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
34f80b04 4363 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4364
4365 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4366 REG_WR16(bp, BAR_XSTRORM_INTMEM +
34f80b04 4367 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
49d66772 4368
bb2a0f7a 4369 bp->stats_pending = 0;
66e855f3 4370 bp->set_mac_pending = 0;
bb2a0f7a 4371
34f80b04 4372 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea
ET
4373}
4374
4375static void bnx2x_update_coalesce(struct bnx2x *bp)
4376{
34f80b04 4377 int port = BP_PORT(bp);
a2fbb9ea
ET
4378 int i;
4379
4380 for_each_queue(bp, i) {
34f80b04 4381 int sb_id = bp->fp[i].sb_id;
a2fbb9ea
ET
4382
4383 /* HC_INDEX_U_ETH_RX_CQ_CONS */
4384 REG_WR8(bp, BAR_USTRORM_INTMEM +
34f80b04 4385 USTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
5c862848 4386 U_SB_ETH_RX_CQ_INDEX),
34f80b04 4387 bp->rx_ticks/12);
a2fbb9ea 4388 REG_WR16(bp, BAR_USTRORM_INTMEM +
34f80b04 4389 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
5c862848
EG
4390 U_SB_ETH_RX_CQ_INDEX),
4391 bp->rx_ticks ? 0 : 1);
a2fbb9ea
ET
4392
4393 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4394 REG_WR8(bp, BAR_CSTRORM_INTMEM +
34f80b04 4395 CSTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
5c862848 4396 C_SB_ETH_TX_CQ_INDEX),
34f80b04 4397 bp->tx_ticks/12);
a2fbb9ea 4398 REG_WR16(bp, BAR_CSTRORM_INTMEM +
34f80b04 4399 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
5c862848 4400 C_SB_ETH_TX_CQ_INDEX),
34f80b04 4401 bp->tx_ticks ? 0 : 1);
a2fbb9ea
ET
4402 }
4403}
4404
7a9b2557
VZ
4405static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4406 struct bnx2x_fastpath *fp, int last)
4407{
4408 int i;
4409
4410 for (i = 0; i < last; i++) {
4411 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
4412 struct sk_buff *skb = rx_buf->skb;
4413
4414 if (skb == NULL) {
4415 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
4416 continue;
4417 }
4418
4419 if (fp->tpa_state[i] == BNX2X_TPA_START)
4420 pci_unmap_single(bp->pdev,
4421 pci_unmap_addr(rx_buf, mapping),
437cf2f1 4422 bp->rx_buf_size,
7a9b2557
VZ
4423 PCI_DMA_FROMDEVICE);
4424
4425 dev_kfree_skb(skb);
4426 rx_buf->skb = NULL;
4427 }
4428}
4429
a2fbb9ea
ET
4430static void bnx2x_init_rx_rings(struct bnx2x *bp)
4431{
7a9b2557 4432 int func = BP_FUNC(bp);
32626230
EG
4433 int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
4434 ETH_MAX_AGGREGATION_QUEUES_E1H;
4435 u16 ring_prod, cqe_ring_prod;
a2fbb9ea 4436 int i, j;
a2fbb9ea 4437
87942b46 4438 bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN;
0f00846d
EG
4439 DP(NETIF_MSG_IFUP,
4440 "mtu %d rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
a2fbb9ea 4441
7a9b2557 4442 if (bp->flags & TPA_ENABLE_FLAG) {
7a9b2557 4443
555f6c78 4444 for_each_rx_queue(bp, j) {
32626230 4445 struct bnx2x_fastpath *fp = &bp->fp[j];
7a9b2557 4446
32626230 4447 for (i = 0; i < max_agg_queues; i++) {
7a9b2557
VZ
4448 fp->tpa_pool[i].skb =
4449 netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4450 if (!fp->tpa_pool[i].skb) {
4451 BNX2X_ERR("Failed to allocate TPA "
4452 "skb pool for queue[%d] - "
4453 "disabling TPA on this "
4454 "queue!\n", j);
4455 bnx2x_free_tpa_pool(bp, fp, i);
4456 fp->disable_tpa = 1;
4457 break;
4458 }
4459 pci_unmap_addr_set((struct sw_rx_bd *)
4460 &bp->fp->tpa_pool[i],
4461 mapping, 0);
4462 fp->tpa_state[i] = BNX2X_TPA_STOP;
4463 }
4464 }
4465 }
4466
555f6c78 4467 for_each_rx_queue(bp, j) {
a2fbb9ea
ET
4468 struct bnx2x_fastpath *fp = &bp->fp[j];
4469
4470 fp->rx_bd_cons = 0;
4471 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
7a9b2557
VZ
4472 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
4473
4474 /* "next page" elements initialization */
4475 /* SGE ring */
4476 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
4477 struct eth_rx_sge *sge;
4478
4479 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
4480 sge->addr_hi =
4481 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
4482 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4483 sge->addr_lo =
4484 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
4485 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4486 }
4487
4488 bnx2x_init_sge_ring_bit_mask(fp);
a2fbb9ea 4489
7a9b2557 4490 /* RX BD ring */
a2fbb9ea
ET
4491 for (i = 1; i <= NUM_RX_RINGS; i++) {
4492 struct eth_rx_bd *rx_bd;
4493
4494 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
4495 rx_bd->addr_hi =
4496 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
34f80b04 4497 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
a2fbb9ea
ET
4498 rx_bd->addr_lo =
4499 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
34f80b04 4500 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
a2fbb9ea
ET
4501 }
4502
34f80b04 4503 /* CQ ring */
a2fbb9ea
ET
4504 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4505 struct eth_rx_cqe_next_page *nextpg;
4506
4507 nextpg = (struct eth_rx_cqe_next_page *)
4508 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4509 nextpg->addr_hi =
4510 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
34f80b04 4511 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
a2fbb9ea
ET
4512 nextpg->addr_lo =
4513 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
34f80b04 4514 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
a2fbb9ea
ET
4515 }
4516
7a9b2557
VZ
4517 /* Allocate SGEs and initialize the ring elements */
4518 for (i = 0, ring_prod = 0;
4519 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
a2fbb9ea 4520
7a9b2557
VZ
4521 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
4522 BNX2X_ERR("was only able to allocate "
4523 "%d rx sges\n", i);
4524 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
4525 /* Cleanup already allocated elements */
4526 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
32626230 4527 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
7a9b2557
VZ
4528 fp->disable_tpa = 1;
4529 ring_prod = 0;
4530 break;
4531 }
4532 ring_prod = NEXT_SGE_IDX(ring_prod);
4533 }
4534 fp->rx_sge_prod = ring_prod;
4535
4536 /* Allocate BDs and initialize BD ring */
66e855f3 4537 fp->rx_comp_cons = 0;
7a9b2557 4538 cqe_ring_prod = ring_prod = 0;
a2fbb9ea
ET
4539 for (i = 0; i < bp->rx_ring_size; i++) {
4540 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
4541 BNX2X_ERR("was only able to allocate "
de832a55
EG
4542 "%d rx skbs on queue[%d]\n", i, j);
4543 fp->eth_q_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
4544 break;
4545 }
4546 ring_prod = NEXT_RX_IDX(ring_prod);
7a9b2557 4547 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
53e5e96e 4548 WARN_ON(ring_prod <= i);
a2fbb9ea
ET
4549 }
4550
7a9b2557
VZ
4551 fp->rx_bd_prod = ring_prod;
4552 /* must not have more available CQEs than BDs */
4553 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
4554 cqe_ring_prod);
a2fbb9ea
ET
4555 fp->rx_pkt = fp->rx_calls = 0;
4556
7a9b2557
VZ
4557 /* Warning!
4558 * this will generate an interrupt (to the TSTORM)
4559 * must only be done after chip is initialized
4560 */
4561 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
4562 fp->rx_sge_prod);
a2fbb9ea
ET
4563 if (j != 0)
4564 continue;
4565
4566 REG_WR(bp, BAR_USTRORM_INTMEM +
7a9b2557 4567 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
a2fbb9ea
ET
4568 U64_LO(fp->rx_comp_mapping));
4569 REG_WR(bp, BAR_USTRORM_INTMEM +
7a9b2557 4570 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
a2fbb9ea
ET
4571 U64_HI(fp->rx_comp_mapping));
4572 }
4573}
4574
4575static void bnx2x_init_tx_ring(struct bnx2x *bp)
4576{
4577 int i, j;
4578
555f6c78 4579 for_each_tx_queue(bp, j) {
a2fbb9ea
ET
4580 struct bnx2x_fastpath *fp = &bp->fp[j];
4581
4582 for (i = 1; i <= NUM_TX_RINGS; i++) {
4583 struct eth_tx_bd *tx_bd =
4584 &fp->tx_desc_ring[TX_DESC_CNT * i - 1];
4585
4586 tx_bd->addr_hi =
4587 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
34f80b04 4588 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
a2fbb9ea
ET
4589 tx_bd->addr_lo =
4590 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
34f80b04 4591 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
a2fbb9ea
ET
4592 }
4593
4594 fp->tx_pkt_prod = 0;
4595 fp->tx_pkt_cons = 0;
4596 fp->tx_bd_prod = 0;
4597 fp->tx_bd_cons = 0;
4598 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
4599 fp->tx_pkt = 0;
4600 }
4601}
4602
4603static void bnx2x_init_sp_ring(struct bnx2x *bp)
4604{
34f80b04 4605 int func = BP_FUNC(bp);
a2fbb9ea
ET
4606
4607 spin_lock_init(&bp->spq_lock);
4608
4609 bp->spq_left = MAX_SPQ_PENDING;
4610 bp->spq_prod_idx = 0;
a2fbb9ea
ET
4611 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
4612 bp->spq_prod_bd = bp->spq;
4613 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
4614
34f80b04 4615 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
a2fbb9ea 4616 U64_LO(bp->spq_mapping));
34f80b04
EG
4617 REG_WR(bp,
4618 XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
a2fbb9ea
ET
4619 U64_HI(bp->spq_mapping));
4620
34f80b04 4621 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
a2fbb9ea
ET
4622 bp->spq_prod_idx);
4623}
4624
4625static void bnx2x_init_context(struct bnx2x *bp)
4626{
4627 int i;
4628
4629 for_each_queue(bp, i) {
4630 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
4631 struct bnx2x_fastpath *fp = &bp->fp[i];
de832a55 4632 u8 cl_id = fp->cl_id;
34f80b04 4633 u8 sb_id = FP_SB_ID(fp);
a2fbb9ea 4634
34f80b04
EG
4635 context->ustorm_st_context.common.sb_index_numbers =
4636 BNX2X_RX_SB_INDEX_NUM;
4637 context->ustorm_st_context.common.clientId = FP_CL_ID(fp);
4638 context->ustorm_st_context.common.status_block_id = sb_id;
4639 context->ustorm_st_context.common.flags =
de832a55
EG
4640 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT |
4641 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS);
4642 context->ustorm_st_context.common.statistics_counter_id =
4643 cl_id;
8d9c5f34 4644 context->ustorm_st_context.common.mc_alignment_log_size =
0f00846d 4645 BNX2X_RX_ALIGN_SHIFT;
34f80b04 4646 context->ustorm_st_context.common.bd_buff_size =
437cf2f1 4647 bp->rx_buf_size;
34f80b04 4648 context->ustorm_st_context.common.bd_page_base_hi =
a2fbb9ea 4649 U64_HI(fp->rx_desc_mapping);
34f80b04 4650 context->ustorm_st_context.common.bd_page_base_lo =
a2fbb9ea 4651 U64_LO(fp->rx_desc_mapping);
7a9b2557
VZ
4652 if (!fp->disable_tpa) {
4653 context->ustorm_st_context.common.flags |=
4654 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA |
4655 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_SGE_RING);
4656 context->ustorm_st_context.common.sge_buff_size =
8d9c5f34
EG
4657 (u16)min((u32)SGE_PAGE_SIZE*PAGES_PER_SGE,
4658 (u32)0xffff);
7a9b2557
VZ
4659 context->ustorm_st_context.common.sge_page_base_hi =
4660 U64_HI(fp->rx_sge_mapping);
4661 context->ustorm_st_context.common.sge_page_base_lo =
4662 U64_LO(fp->rx_sge_mapping);
4663 }
4664
8d9c5f34
EG
4665 context->ustorm_ag_context.cdu_usage =
4666 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4667 CDU_REGION_NUMBER_UCM_AG,
4668 ETH_CONNECTION_TYPE);
4669
4670 context->xstorm_st_context.tx_bd_page_base_hi =
4671 U64_HI(fp->tx_desc_mapping);
4672 context->xstorm_st_context.tx_bd_page_base_lo =
4673 U64_LO(fp->tx_desc_mapping);
4674 context->xstorm_st_context.db_data_addr_hi =
4675 U64_HI(fp->tx_prods_mapping);
4676 context->xstorm_st_context.db_data_addr_lo =
4677 U64_LO(fp->tx_prods_mapping);
4678 context->xstorm_st_context.statistics_data = (fp->cl_id |
4679 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
a2fbb9ea 4680 context->cstorm_st_context.sb_index_number =
5c862848 4681 C_SB_ETH_TX_CQ_INDEX;
34f80b04 4682 context->cstorm_st_context.status_block_id = sb_id;
a2fbb9ea
ET
4683
4684 context->xstorm_ag_context.cdu_reserved =
4685 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4686 CDU_REGION_NUMBER_XCM_AG,
4687 ETH_CONNECTION_TYPE);
a2fbb9ea
ET
4688 }
4689}
4690
4691static void bnx2x_init_ind_table(struct bnx2x *bp)
4692{
26c8fa4d 4693 int func = BP_FUNC(bp);
a2fbb9ea
ET
4694 int i;
4695
555f6c78 4696 if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
a2fbb9ea
ET
4697 return;
4698
555f6c78
EG
4699 DP(NETIF_MSG_IFUP,
4700 "Initializing indirection table multi_mode %d\n", bp->multi_mode);
a2fbb9ea 4701 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
34f80b04 4702 REG_WR8(bp, BAR_TSTRORM_INTMEM +
26c8fa4d 4703 TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
555f6c78 4704 BP_CL_ID(bp) + (i % bp->num_rx_queues));
a2fbb9ea
ET
4705}
4706
49d66772
ET
4707static void bnx2x_set_client_config(struct bnx2x *bp)
4708{
49d66772 4709 struct tstorm_eth_client_config tstorm_client = {0};
34f80b04
EG
4710 int port = BP_PORT(bp);
4711 int i;
49d66772 4712
e7799c5f 4713 tstorm_client.mtu = bp->dev->mtu;
49d66772 4714 tstorm_client.config_flags =
de832a55
EG
4715 (TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE |
4716 TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE);
49d66772 4717#ifdef BCM_VLAN
0c6671b0 4718 if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
49d66772 4719 tstorm_client.config_flags |=
8d9c5f34 4720 TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE;
49d66772
ET
4721 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
4722 }
4723#endif
49d66772 4724
7a9b2557
VZ
4725 if (bp->flags & TPA_ENABLE_FLAG) {
4726 tstorm_client.max_sges_for_packet =
4f40f2cb 4727 SGE_PAGE_ALIGN(tstorm_client.mtu) >> SGE_PAGE_SHIFT;
7a9b2557
VZ
4728 tstorm_client.max_sges_for_packet =
4729 ((tstorm_client.max_sges_for_packet +
4730 PAGES_PER_SGE - 1) & (~(PAGES_PER_SGE - 1))) >>
4731 PAGES_PER_SGE_SHIFT;
4732
4733 tstorm_client.config_flags |=
4734 TSTORM_ETH_CLIENT_CONFIG_ENABLE_SGE_RING;
4735 }
4736
49d66772 4737 for_each_queue(bp, i) {
de832a55
EG
4738 tstorm_client.statistics_counter_id = bp->fp[i].cl_id;
4739
49d66772 4740 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4741 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
49d66772
ET
4742 ((u32 *)&tstorm_client)[0]);
4743 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4744 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
49d66772
ET
4745 ((u32 *)&tstorm_client)[1]);
4746 }
4747
34f80b04
EG
4748 DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
4749 ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
49d66772
ET
4750}
4751
a2fbb9ea
ET
4752static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
4753{
a2fbb9ea 4754 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
34f80b04
EG
4755 int mode = bp->rx_mode;
4756 int mask = (1 << BP_L_ID(bp));
4757 int func = BP_FUNC(bp);
a2fbb9ea
ET
4758 int i;
4759
3196a88a 4760 DP(NETIF_MSG_IFUP, "rx mode %d mask 0x%x\n", mode, mask);
a2fbb9ea
ET
4761
4762 switch (mode) {
4763 case BNX2X_RX_MODE_NONE: /* no Rx */
34f80b04
EG
4764 tstorm_mac_filter.ucast_drop_all = mask;
4765 tstorm_mac_filter.mcast_drop_all = mask;
4766 tstorm_mac_filter.bcast_drop_all = mask;
a2fbb9ea
ET
4767 break;
4768 case BNX2X_RX_MODE_NORMAL:
34f80b04 4769 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea
ET
4770 break;
4771 case BNX2X_RX_MODE_ALLMULTI:
34f80b04
EG
4772 tstorm_mac_filter.mcast_accept_all = mask;
4773 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea
ET
4774 break;
4775 case BNX2X_RX_MODE_PROMISC:
34f80b04
EG
4776 tstorm_mac_filter.ucast_accept_all = mask;
4777 tstorm_mac_filter.mcast_accept_all = mask;
4778 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea
ET
4779 break;
4780 default:
34f80b04
EG
4781 BNX2X_ERR("BAD rx mode (%d)\n", mode);
4782 break;
a2fbb9ea
ET
4783 }
4784
4785 for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
4786 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4787 TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
a2fbb9ea
ET
4788 ((u32 *)&tstorm_mac_filter)[i]);
4789
34f80b04 4790/* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
a2fbb9ea
ET
4791 ((u32 *)&tstorm_mac_filter)[i]); */
4792 }
a2fbb9ea 4793
49d66772
ET
4794 if (mode != BNX2X_RX_MODE_NONE)
4795 bnx2x_set_client_config(bp);
a2fbb9ea
ET
4796}
4797
471de716
EG
4798static void bnx2x_init_internal_common(struct bnx2x *bp)
4799{
4800 int i;
4801
3cdf1db7
YG
4802 if (bp->flags & TPA_ENABLE_FLAG) {
4803 struct tstorm_eth_tpa_exist tpa = {0};
4804
4805 tpa.tpa_exist = 1;
4806
4807 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET,
4808 ((u32 *)&tpa)[0]);
4809 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET + 4,
4810 ((u32 *)&tpa)[1]);
4811 }
4812
471de716
EG
4813 /* Zero this manually as its initialization is
4814 currently missing in the initTool */
4815 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
4816 REG_WR(bp, BAR_USTRORM_INTMEM +
4817 USTORM_AGG_DATA_OFFSET + i * 4, 0);
4818}
4819
4820static void bnx2x_init_internal_port(struct bnx2x *bp)
4821{
4822 int port = BP_PORT(bp);
4823
4824 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4825 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4826 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4827 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4828}
4829
8a1c38d1
EG
4830/* Calculates the sum of vn_min_rates.
4831 It's needed for further normalizing of the min_rates.
4832 Returns:
4833 sum of vn_min_rates.
4834 or
4835 0 - if all the min_rates are 0.
4836 In the later case fainess algorithm should be deactivated.
4837 If not all min_rates are zero then those that are zeroes will be set to 1.
4838 */
4839static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
4840{
4841 int all_zero = 1;
4842 int port = BP_PORT(bp);
4843 int vn;
4844
4845 bp->vn_weight_sum = 0;
4846 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
4847 int func = 2*vn + port;
4848 u32 vn_cfg =
4849 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
4850 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
4851 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
4852
4853 /* Skip hidden vns */
4854 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
4855 continue;
4856
4857 /* If min rate is zero - set it to 1 */
4858 if (!vn_min_rate)
4859 vn_min_rate = DEF_MIN_RATE;
4860 else
4861 all_zero = 0;
4862
4863 bp->vn_weight_sum += vn_min_rate;
4864 }
4865
4866 /* ... only if all min rates are zeros - disable fairness */
4867 if (all_zero)
4868 bp->vn_weight_sum = 0;
4869}
4870
471de716 4871static void bnx2x_init_internal_func(struct bnx2x *bp)
a2fbb9ea 4872{
a2fbb9ea
ET
4873 struct tstorm_eth_function_common_config tstorm_config = {0};
4874 struct stats_indication_flags stats_flags = {0};
34f80b04
EG
4875 int port = BP_PORT(bp);
4876 int func = BP_FUNC(bp);
de832a55
EG
4877 int i, j;
4878 u32 offset;
471de716 4879 u16 max_agg_size;
a2fbb9ea
ET
4880
4881 if (is_multi(bp)) {
555f6c78 4882 tstorm_config.config_flags = MULTI_FLAGS(bp);
a2fbb9ea
ET
4883 tstorm_config.rss_result_mask = MULTI_MASK;
4884 }
8d9c5f34
EG
4885 if (IS_E1HMF(bp))
4886 tstorm_config.config_flags |=
4887 TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM;
a2fbb9ea 4888
34f80b04
EG
4889 tstorm_config.leading_client_id = BP_L_ID(bp);
4890
a2fbb9ea 4891 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4892 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
a2fbb9ea
ET
4893 (*(u32 *)&tstorm_config));
4894
c14423fe 4895 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
a2fbb9ea
ET
4896 bnx2x_set_storm_rx_mode(bp);
4897
de832a55
EG
4898 for_each_queue(bp, i) {
4899 u8 cl_id = bp->fp[i].cl_id;
4900
4901 /* reset xstorm per client statistics */
4902 offset = BAR_XSTRORM_INTMEM +
4903 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
4904 for (j = 0;
4905 j < sizeof(struct xstorm_per_client_stats) / 4; j++)
4906 REG_WR(bp, offset + j*4, 0);
4907
4908 /* reset tstorm per client statistics */
4909 offset = BAR_TSTRORM_INTMEM +
4910 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
4911 for (j = 0;
4912 j < sizeof(struct tstorm_per_client_stats) / 4; j++)
4913 REG_WR(bp, offset + j*4, 0);
4914
4915 /* reset ustorm per client statistics */
4916 offset = BAR_USTRORM_INTMEM +
4917 USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
4918 for (j = 0;
4919 j < sizeof(struct ustorm_per_client_stats) / 4; j++)
4920 REG_WR(bp, offset + j*4, 0);
66e855f3
YG
4921 }
4922
4923 /* Init statistics related context */
34f80b04 4924 stats_flags.collect_eth = 1;
a2fbb9ea 4925
66e855f3 4926 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 4927 ((u32 *)&stats_flags)[0]);
66e855f3 4928 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
4929 ((u32 *)&stats_flags)[1]);
4930
66e855f3 4931 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 4932 ((u32 *)&stats_flags)[0]);
66e855f3 4933 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
4934 ((u32 *)&stats_flags)[1]);
4935
de832a55
EG
4936 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func),
4937 ((u32 *)&stats_flags)[0]);
4938 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func) + 4,
4939 ((u32 *)&stats_flags)[1]);
4940
66e855f3 4941 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 4942 ((u32 *)&stats_flags)[0]);
66e855f3 4943 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
4944 ((u32 *)&stats_flags)[1]);
4945
66e855f3
YG
4946 REG_WR(bp, BAR_XSTRORM_INTMEM +
4947 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4948 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4949 REG_WR(bp, BAR_XSTRORM_INTMEM +
4950 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4951 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4952
4953 REG_WR(bp, BAR_TSTRORM_INTMEM +
4954 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4955 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4956 REG_WR(bp, BAR_TSTRORM_INTMEM +
4957 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4958 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
34f80b04 4959
de832a55
EG
4960 REG_WR(bp, BAR_USTRORM_INTMEM +
4961 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4962 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4963 REG_WR(bp, BAR_USTRORM_INTMEM +
4964 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4965 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4966
34f80b04
EG
4967 if (CHIP_IS_E1H(bp)) {
4968 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
4969 IS_E1HMF(bp));
4970 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
4971 IS_E1HMF(bp));
4972 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
4973 IS_E1HMF(bp));
4974 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
4975 IS_E1HMF(bp));
4976
7a9b2557
VZ
4977 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
4978 bp->e1hov);
34f80b04
EG
4979 }
4980
4f40f2cb
EG
4981 /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
4982 max_agg_size =
4983 min((u32)(min((u32)8, (u32)MAX_SKB_FRAGS) *
4984 SGE_PAGE_SIZE * PAGES_PER_SGE),
4985 (u32)0xffff);
555f6c78 4986 for_each_rx_queue(bp, i) {
7a9b2557 4987 struct bnx2x_fastpath *fp = &bp->fp[i];
7a9b2557
VZ
4988
4989 REG_WR(bp, BAR_USTRORM_INTMEM +
4990 USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)),
4991 U64_LO(fp->rx_comp_mapping));
4992 REG_WR(bp, BAR_USTRORM_INTMEM +
4993 USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)) + 4,
4994 U64_HI(fp->rx_comp_mapping));
4995
7a9b2557
VZ
4996 REG_WR16(bp, BAR_USTRORM_INTMEM +
4997 USTORM_MAX_AGG_SIZE_OFFSET(port, FP_CL_ID(fp)),
4998 max_agg_size);
4999 }
8a1c38d1 5000
1c06328c
EG
5001 /* dropless flow control */
5002 if (CHIP_IS_E1H(bp)) {
5003 struct ustorm_eth_rx_pause_data_e1h rx_pause = {0};
5004
5005 rx_pause.bd_thr_low = 250;
5006 rx_pause.cqe_thr_low = 250;
5007 rx_pause.cos = 1;
5008 rx_pause.sge_thr_low = 0;
5009 rx_pause.bd_thr_high = 350;
5010 rx_pause.cqe_thr_high = 350;
5011 rx_pause.sge_thr_high = 0;
5012
5013 for_each_rx_queue(bp, i) {
5014 struct bnx2x_fastpath *fp = &bp->fp[i];
5015
5016 if (!fp->disable_tpa) {
5017 rx_pause.sge_thr_low = 150;
5018 rx_pause.sge_thr_high = 250;
5019 }
5020
5021
5022 offset = BAR_USTRORM_INTMEM +
5023 USTORM_ETH_RING_PAUSE_DATA_OFFSET(port,
5024 fp->cl_id);
5025 for (j = 0;
5026 j < sizeof(struct ustorm_eth_rx_pause_data_e1h)/4;
5027 j++)
5028 REG_WR(bp, offset + j*4,
5029 ((u32 *)&rx_pause)[j]);
5030 }
5031 }
5032
8a1c38d1
EG
5033 memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
5034
5035 /* Init rate shaping and fairness contexts */
5036 if (IS_E1HMF(bp)) {
5037 int vn;
5038
5039 /* During init there is no active link
5040 Until link is up, set link rate to 10Gbps */
5041 bp->link_vars.line_speed = SPEED_10000;
5042 bnx2x_init_port_minmax(bp);
5043
5044 bnx2x_calc_vn_weight_sum(bp);
5045
5046 for (vn = VN_0; vn < E1HVN_MAX; vn++)
5047 bnx2x_init_vn_minmax(bp, 2*vn + port);
5048
5049 /* Enable rate shaping and fairness */
5050 bp->cmng.flags.cmng_enables =
5051 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
5052 if (bp->vn_weight_sum)
5053 bp->cmng.flags.cmng_enables |=
5054 CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
5055 else
5056 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
5057 " fairness will be disabled\n");
5058 } else {
5059 /* rate shaping and fairness are disabled */
5060 DP(NETIF_MSG_IFUP,
5061 "single function mode minmax will be disabled\n");
5062 }
5063
5064
5065 /* Store it to internal memory */
5066 if (bp->port.pmf)
5067 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
5068 REG_WR(bp, BAR_XSTRORM_INTMEM +
5069 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
5070 ((u32 *)(&bp->cmng))[i]);
a2fbb9ea
ET
5071}
5072
471de716
EG
5073static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
5074{
5075 switch (load_code) {
5076 case FW_MSG_CODE_DRV_LOAD_COMMON:
5077 bnx2x_init_internal_common(bp);
5078 /* no break */
5079
5080 case FW_MSG_CODE_DRV_LOAD_PORT:
5081 bnx2x_init_internal_port(bp);
5082 /* no break */
5083
5084 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5085 bnx2x_init_internal_func(bp);
5086 break;
5087
5088 default:
5089 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5090 break;
5091 }
5092}
5093
5094static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
a2fbb9ea
ET
5095{
5096 int i;
5097
5098 for_each_queue(bp, i) {
5099 struct bnx2x_fastpath *fp = &bp->fp[i];
5100
34f80b04 5101 fp->bp = bp;
a2fbb9ea 5102 fp->state = BNX2X_FP_STATE_CLOSED;
a2fbb9ea 5103 fp->index = i;
34f80b04
EG
5104 fp->cl_id = BP_L_ID(bp) + i;
5105 fp->sb_id = fp->cl_id;
5106 DP(NETIF_MSG_IFUP,
5107 "bnx2x_init_sb(%p,%p) index %d cl_id %d sb %d\n",
5108 bp, fp->status_blk, i, FP_CL_ID(fp), FP_SB_ID(fp));
5c862848
EG
5109 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
5110 FP_SB_ID(fp));
5111 bnx2x_update_fpsb_idx(fp);
a2fbb9ea
ET
5112 }
5113
5c862848
EG
5114 bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
5115 DEF_SB_ID);
5116 bnx2x_update_dsb_idx(bp);
a2fbb9ea
ET
5117 bnx2x_update_coalesce(bp);
5118 bnx2x_init_rx_rings(bp);
5119 bnx2x_init_tx_ring(bp);
5120 bnx2x_init_sp_ring(bp);
5121 bnx2x_init_context(bp);
471de716 5122 bnx2x_init_internal(bp, load_code);
a2fbb9ea 5123 bnx2x_init_ind_table(bp);
0ef00459
EG
5124 bnx2x_stats_init(bp);
5125
5126 /* At this point, we are ready for interrupts */
5127 atomic_set(&bp->intr_sem, 0);
5128
5129 /* flush all before enabling interrupts */
5130 mb();
5131 mmiowb();
5132
615f8fd9 5133 bnx2x_int_enable(bp);
a2fbb9ea
ET
5134}
5135
5136/* end of nic init */
5137
5138/*
5139 * gzip service functions
5140 */
5141
5142static int bnx2x_gunzip_init(struct bnx2x *bp)
5143{
5144 bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
5145 &bp->gunzip_mapping);
5146 if (bp->gunzip_buf == NULL)
5147 goto gunzip_nomem1;
5148
5149 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
5150 if (bp->strm == NULL)
5151 goto gunzip_nomem2;
5152
5153 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
5154 GFP_KERNEL);
5155 if (bp->strm->workspace == NULL)
5156 goto gunzip_nomem3;
5157
5158 return 0;
5159
5160gunzip_nomem3:
5161 kfree(bp->strm);
5162 bp->strm = NULL;
5163
5164gunzip_nomem2:
5165 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5166 bp->gunzip_mapping);
5167 bp->gunzip_buf = NULL;
5168
5169gunzip_nomem1:
5170 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
34f80b04 5171 " un-compression\n", bp->dev->name);
a2fbb9ea
ET
5172 return -ENOMEM;
5173}
5174
5175static void bnx2x_gunzip_end(struct bnx2x *bp)
5176{
5177 kfree(bp->strm->workspace);
5178
5179 kfree(bp->strm);
5180 bp->strm = NULL;
5181
5182 if (bp->gunzip_buf) {
5183 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5184 bp->gunzip_mapping);
5185 bp->gunzip_buf = NULL;
5186 }
5187}
5188
5189static int bnx2x_gunzip(struct bnx2x *bp, u8 *zbuf, int len)
5190{
5191 int n, rc;
5192
5193 /* check gzip header */
5194 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
5195 return -EINVAL;
5196
5197 n = 10;
5198
34f80b04 5199#define FNAME 0x8
a2fbb9ea
ET
5200
5201 if (zbuf[3] & FNAME)
5202 while ((zbuf[n++] != 0) && (n < len));
5203
5204 bp->strm->next_in = zbuf + n;
5205 bp->strm->avail_in = len - n;
5206 bp->strm->next_out = bp->gunzip_buf;
5207 bp->strm->avail_out = FW_BUF_SIZE;
5208
5209 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
5210 if (rc != Z_OK)
5211 return rc;
5212
5213 rc = zlib_inflate(bp->strm, Z_FINISH);
5214 if ((rc != Z_OK) && (rc != Z_STREAM_END))
5215 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
5216 bp->dev->name, bp->strm->msg);
5217
5218 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
5219 if (bp->gunzip_outlen & 0x3)
5220 printk(KERN_ERR PFX "%s: Firmware decompression error:"
5221 " gunzip_outlen (%d) not aligned\n",
5222 bp->dev->name, bp->gunzip_outlen);
5223 bp->gunzip_outlen >>= 2;
5224
5225 zlib_inflateEnd(bp->strm);
5226
5227 if (rc == Z_STREAM_END)
5228 return 0;
5229
5230 return rc;
5231}
5232
5233/* nic load/unload */
5234
5235/*
34f80b04 5236 * General service functions
a2fbb9ea
ET
5237 */
5238
5239/* send a NIG loopback debug packet */
5240static void bnx2x_lb_pckt(struct bnx2x *bp)
5241{
a2fbb9ea 5242 u32 wb_write[3];
a2fbb9ea
ET
5243
5244 /* Ethernet source and destination addresses */
a2fbb9ea
ET
5245 wb_write[0] = 0x55555555;
5246 wb_write[1] = 0x55555555;
34f80b04 5247 wb_write[2] = 0x20; /* SOP */
a2fbb9ea 5248 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
5249
5250 /* NON-IP protocol */
a2fbb9ea
ET
5251 wb_write[0] = 0x09000000;
5252 wb_write[1] = 0x55555555;
34f80b04 5253 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
a2fbb9ea 5254 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
5255}
5256
5257/* some of the internal memories
5258 * are not directly readable from the driver
5259 * to test them we send debug packets
5260 */
5261static int bnx2x_int_mem_test(struct bnx2x *bp)
5262{
5263 int factor;
5264 int count, i;
5265 u32 val = 0;
5266
ad8d3948 5267 if (CHIP_REV_IS_FPGA(bp))
a2fbb9ea 5268 factor = 120;
ad8d3948
EG
5269 else if (CHIP_REV_IS_EMUL(bp))
5270 factor = 200;
5271 else
a2fbb9ea 5272 factor = 1;
a2fbb9ea
ET
5273
5274 DP(NETIF_MSG_HW, "start part1\n");
5275
5276 /* Disable inputs of parser neighbor blocks */
5277 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5278 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5279 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3196a88a 5280 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
a2fbb9ea
ET
5281
5282 /* Write 0 to parser credits for CFC search request */
5283 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5284
5285 /* send Ethernet packet */
5286 bnx2x_lb_pckt(bp);
5287
5288 /* TODO do i reset NIG statistic? */
5289 /* Wait until NIG register shows 1 packet of size 0x10 */
5290 count = 1000 * factor;
5291 while (count) {
34f80b04 5292
a2fbb9ea
ET
5293 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5294 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
5295 if (val == 0x10)
5296 break;
5297
5298 msleep(10);
5299 count--;
5300 }
5301 if (val != 0x10) {
5302 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5303 return -1;
5304 }
5305
5306 /* Wait until PRS register shows 1 packet */
5307 count = 1000 * factor;
5308 while (count) {
5309 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
a2fbb9ea
ET
5310 if (val == 1)
5311 break;
5312
5313 msleep(10);
5314 count--;
5315 }
5316 if (val != 0x1) {
5317 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5318 return -2;
5319 }
5320
5321 /* Reset and init BRB, PRS */
34f80b04 5322 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
a2fbb9ea 5323 msleep(50);
34f80b04 5324 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
a2fbb9ea
ET
5325 msleep(50);
5326 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5327 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5328
5329 DP(NETIF_MSG_HW, "part2\n");
5330
5331 /* Disable inputs of parser neighbor blocks */
5332 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5333 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5334 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3196a88a 5335 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
a2fbb9ea
ET
5336
5337 /* Write 0 to parser credits for CFC search request */
5338 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5339
5340 /* send 10 Ethernet packets */
5341 for (i = 0; i < 10; i++)
5342 bnx2x_lb_pckt(bp);
5343
5344 /* Wait until NIG register shows 10 + 1
5345 packets of size 11*0x10 = 0xb0 */
5346 count = 1000 * factor;
5347 while (count) {
34f80b04 5348
a2fbb9ea
ET
5349 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5350 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
5351 if (val == 0xb0)
5352 break;
5353
5354 msleep(10);
5355 count--;
5356 }
5357 if (val != 0xb0) {
5358 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5359 return -3;
5360 }
5361
5362 /* Wait until PRS register shows 2 packets */
5363 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5364 if (val != 2)
5365 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5366
5367 /* Write 1 to parser credits for CFC search request */
5368 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5369
5370 /* Wait until PRS register shows 3 packets */
5371 msleep(10 * factor);
5372 /* Wait until NIG register shows 1 packet of size 0x10 */
5373 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5374 if (val != 3)
5375 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5376
5377 /* clear NIG EOP FIFO */
5378 for (i = 0; i < 11; i++)
5379 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5380 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5381 if (val != 1) {
5382 BNX2X_ERR("clear of NIG failed\n");
5383 return -4;
5384 }
5385
5386 /* Reset and init BRB, PRS, NIG */
5387 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5388 msleep(50);
5389 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5390 msleep(50);
5391 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5392 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5393#ifndef BCM_ISCSI
5394 /* set NIC mode */
5395 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5396#endif
5397
5398 /* Enable inputs of parser neighbor blocks */
5399 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5400 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5401 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
3196a88a 5402 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
a2fbb9ea
ET
5403
5404 DP(NETIF_MSG_HW, "done\n");
5405
5406 return 0; /* OK */
5407}
5408
5409static void enable_blocks_attention(struct bnx2x *bp)
5410{
5411 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5412 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5413 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5414 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5415 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5416 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5417 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5418 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5419 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
34f80b04
EG
5420/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5421/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5422 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5423 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5424 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
34f80b04
EG
5425/* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5426/* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5427 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5428 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5429 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5430 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
34f80b04
EG
5431/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5432/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5433 if (CHIP_REV_IS_FPGA(bp))
5434 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
5435 else
5436 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
a2fbb9ea
ET
5437 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5438 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5439 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
34f80b04
EG
5440/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5441/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5442 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5443 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
34f80b04
EG
5444/* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5445 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
a2fbb9ea
ET
5446}
5447
34f80b04 5448
81f75bbf
EG
5449static void bnx2x_reset_common(struct bnx2x *bp)
5450{
5451 /* reset_common */
5452 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5453 0xd3ffff7f);
5454 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
5455}
5456
34f80b04 5457static int bnx2x_init_common(struct bnx2x *bp)
a2fbb9ea 5458{
a2fbb9ea 5459 u32 val, i;
a2fbb9ea 5460
34f80b04 5461 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp));
a2fbb9ea 5462
81f75bbf 5463 bnx2x_reset_common(bp);
34f80b04
EG
5464 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
5465 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
a2fbb9ea 5466
34f80b04
EG
5467 bnx2x_init_block(bp, MISC_COMMON_START, MISC_COMMON_END);
5468 if (CHIP_IS_E1H(bp))
5469 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
a2fbb9ea 5470
34f80b04
EG
5471 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
5472 msleep(30);
5473 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
a2fbb9ea 5474
34f80b04
EG
5475 bnx2x_init_block(bp, PXP_COMMON_START, PXP_COMMON_END);
5476 if (CHIP_IS_E1(bp)) {
5477 /* enable HW interrupt from PXP on USDM overflow
5478 bit 16 on INT_MASK_0 */
5479 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5480 }
a2fbb9ea 5481
34f80b04
EG
5482 bnx2x_init_block(bp, PXP2_COMMON_START, PXP2_COMMON_END);
5483 bnx2x_init_pxp(bp);
a2fbb9ea
ET
5484
5485#ifdef __BIG_ENDIAN
34f80b04
EG
5486 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
5487 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
5488 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
5489 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
5490 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
8badd27a
EG
5491 /* make sure this value is 0 */
5492 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
34f80b04
EG
5493
5494/* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
5495 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
5496 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
5497 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
5498 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
a2fbb9ea
ET
5499#endif
5500
34f80b04 5501 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
a2fbb9ea 5502#ifdef BCM_ISCSI
34f80b04
EG
5503 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
5504 REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
5505 REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
a2fbb9ea
ET
5506#endif
5507
34f80b04
EG
5508 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
5509 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
a2fbb9ea 5510
34f80b04
EG
5511 /* let the HW do it's magic ... */
5512 msleep(100);
5513 /* finish PXP init */
5514 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
5515 if (val != 1) {
5516 BNX2X_ERR("PXP2 CFG failed\n");
5517 return -EBUSY;
5518 }
5519 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
5520 if (val != 1) {
5521 BNX2X_ERR("PXP2 RD_INIT failed\n");
5522 return -EBUSY;
5523 }
a2fbb9ea 5524
34f80b04
EG
5525 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
5526 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
a2fbb9ea 5527
34f80b04 5528 bnx2x_init_block(bp, DMAE_COMMON_START, DMAE_COMMON_END);
a2fbb9ea 5529
34f80b04
EG
5530 /* clean the DMAE memory */
5531 bp->dmae_ready = 1;
5532 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
a2fbb9ea 5533
34f80b04
EG
5534 bnx2x_init_block(bp, TCM_COMMON_START, TCM_COMMON_END);
5535 bnx2x_init_block(bp, UCM_COMMON_START, UCM_COMMON_END);
5536 bnx2x_init_block(bp, CCM_COMMON_START, CCM_COMMON_END);
5537 bnx2x_init_block(bp, XCM_COMMON_START, XCM_COMMON_END);
a2fbb9ea 5538
34f80b04
EG
5539 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
5540 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
5541 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
5542 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
5543
5544 bnx2x_init_block(bp, QM_COMMON_START, QM_COMMON_END);
5545 /* soft reset pulse */
5546 REG_WR(bp, QM_REG_SOFT_RESET, 1);
5547 REG_WR(bp, QM_REG_SOFT_RESET, 0);
a2fbb9ea
ET
5548
5549#ifdef BCM_ISCSI
34f80b04 5550 bnx2x_init_block(bp, TIMERS_COMMON_START, TIMERS_COMMON_END);
a2fbb9ea 5551#endif
a2fbb9ea 5552
34f80b04
EG
5553 bnx2x_init_block(bp, DQ_COMMON_START, DQ_COMMON_END);
5554 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
5555 if (!CHIP_REV_IS_SLOW(bp)) {
5556 /* enable hw interrupt from doorbell Q */
5557 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5558 }
a2fbb9ea 5559
34f80b04 5560 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
34f80b04 5561 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
26c8fa4d 5562 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
3196a88a
EG
5563 /* set NIC mode */
5564 REG_WR(bp, PRS_REG_NIC_MODE, 1);
34f80b04
EG
5565 if (CHIP_IS_E1H(bp))
5566 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
a2fbb9ea 5567
34f80b04
EG
5568 bnx2x_init_block(bp, TSDM_COMMON_START, TSDM_COMMON_END);
5569 bnx2x_init_block(bp, CSDM_COMMON_START, CSDM_COMMON_END);
5570 bnx2x_init_block(bp, USDM_COMMON_START, USDM_COMMON_END);
5571 bnx2x_init_block(bp, XSDM_COMMON_START, XSDM_COMMON_END);
a2fbb9ea 5572
34f80b04
EG
5573 if (CHIP_IS_E1H(bp)) {
5574 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5575 STORM_INTMEM_SIZE_E1H/2);
5576 bnx2x_init_fill(bp,
5577 TSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5578 0, STORM_INTMEM_SIZE_E1H/2);
5579 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5580 STORM_INTMEM_SIZE_E1H/2);
5581 bnx2x_init_fill(bp,
5582 CSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5583 0, STORM_INTMEM_SIZE_E1H/2);
5584 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5585 STORM_INTMEM_SIZE_E1H/2);
5586 bnx2x_init_fill(bp,
5587 XSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5588 0, STORM_INTMEM_SIZE_E1H/2);
5589 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5590 STORM_INTMEM_SIZE_E1H/2);
5591 bnx2x_init_fill(bp,
5592 USTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5593 0, STORM_INTMEM_SIZE_E1H/2);
5594 } else { /* E1 */
ad8d3948
EG
5595 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5596 STORM_INTMEM_SIZE_E1);
5597 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5598 STORM_INTMEM_SIZE_E1);
5599 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5600 STORM_INTMEM_SIZE_E1);
5601 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5602 STORM_INTMEM_SIZE_E1);
34f80b04 5603 }
a2fbb9ea 5604
34f80b04
EG
5605 bnx2x_init_block(bp, TSEM_COMMON_START, TSEM_COMMON_END);
5606 bnx2x_init_block(bp, USEM_COMMON_START, USEM_COMMON_END);
5607 bnx2x_init_block(bp, CSEM_COMMON_START, CSEM_COMMON_END);
5608 bnx2x_init_block(bp, XSEM_COMMON_START, XSEM_COMMON_END);
a2fbb9ea 5609
34f80b04
EG
5610 /* sync semi rtc */
5611 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5612 0x80000000);
5613 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
5614 0x80000000);
a2fbb9ea 5615
34f80b04
EG
5616 bnx2x_init_block(bp, UPB_COMMON_START, UPB_COMMON_END);
5617 bnx2x_init_block(bp, XPB_COMMON_START, XPB_COMMON_END);
5618 bnx2x_init_block(bp, PBF_COMMON_START, PBF_COMMON_END);
a2fbb9ea 5619
34f80b04
EG
5620 REG_WR(bp, SRC_REG_SOFT_RST, 1);
5621 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
5622 REG_WR(bp, i, 0xc0cac01a);
5623 /* TODO: replace with something meaningful */
5624 }
8d9c5f34 5625 bnx2x_init_block(bp, SRCH_COMMON_START, SRCH_COMMON_END);
34f80b04 5626 REG_WR(bp, SRC_REG_SOFT_RST, 0);
a2fbb9ea 5627
34f80b04
EG
5628 if (sizeof(union cdu_context) != 1024)
5629 /* we currently assume that a context is 1024 bytes */
5630 printk(KERN_ALERT PFX "please adjust the size of"
5631 " cdu_context(%ld)\n", (long)sizeof(union cdu_context));
a2fbb9ea 5632
34f80b04
EG
5633 bnx2x_init_block(bp, CDU_COMMON_START, CDU_COMMON_END);
5634 val = (4 << 24) + (0 << 12) + 1024;
5635 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
5636 if (CHIP_IS_E1(bp)) {
5637 /* !!! fix pxp client crdit until excel update */
5638 REG_WR(bp, CDU_REG_CDU_DEBUG, 0x264);
5639 REG_WR(bp, CDU_REG_CDU_DEBUG, 0);
5640 }
a2fbb9ea 5641
34f80b04
EG
5642 bnx2x_init_block(bp, CFC_COMMON_START, CFC_COMMON_END);
5643 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
8d9c5f34
EG
5644 /* enable context validation interrupt from CFC */
5645 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5646
5647 /* set the thresholds to prevent CFC/CDU race */
5648 REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
a2fbb9ea 5649
34f80b04
EG
5650 bnx2x_init_block(bp, HC_COMMON_START, HC_COMMON_END);
5651 bnx2x_init_block(bp, MISC_AEU_COMMON_START, MISC_AEU_COMMON_END);
a2fbb9ea 5652
34f80b04
EG
5653 /* PXPCS COMMON comes here */
5654 /* Reset PCIE errors for debug */
5655 REG_WR(bp, 0x2814, 0xffffffff);
5656 REG_WR(bp, 0x3820, 0xffffffff);
a2fbb9ea 5657
34f80b04
EG
5658 /* EMAC0 COMMON comes here */
5659 /* EMAC1 COMMON comes here */
5660 /* DBU COMMON comes here */
5661 /* DBG COMMON comes here */
5662
5663 bnx2x_init_block(bp, NIG_COMMON_START, NIG_COMMON_END);
5664 if (CHIP_IS_E1H(bp)) {
5665 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
5666 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
5667 }
5668
5669 if (CHIP_REV_IS_SLOW(bp))
5670 msleep(200);
5671
5672 /* finish CFC init */
5673 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
5674 if (val != 1) {
5675 BNX2X_ERR("CFC LL_INIT failed\n");
5676 return -EBUSY;
5677 }
5678 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
5679 if (val != 1) {
5680 BNX2X_ERR("CFC AC_INIT failed\n");
5681 return -EBUSY;
5682 }
5683 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
5684 if (val != 1) {
5685 BNX2X_ERR("CFC CAM_INIT failed\n");
5686 return -EBUSY;
5687 }
5688 REG_WR(bp, CFC_REG_DEBUG0, 0);
f1410647 5689
34f80b04
EG
5690 /* read NIG statistic
5691 to see if this is our first up since powerup */
5692 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5693 val = *bnx2x_sp(bp, wb_data[0]);
5694
5695 /* do internal memory self test */
5696 if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
5697 BNX2X_ERR("internal mem self test failed\n");
5698 return -EBUSY;
5699 }
5700
35b19ba5 5701 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
46c6a674
EG
5702 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
5703 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
5704 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
5705 bp->port.need_hw_lock = 1;
5706 break;
5707
35b19ba5 5708 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
34f80b04
EG
5709 /* Fan failure is indicated by SPIO 5 */
5710 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
5711 MISC_REGISTERS_SPIO_INPUT_HI_Z);
5712
5713 /* set to active low mode */
5714 val = REG_RD(bp, MISC_REG_SPIO_INT);
5715 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
f1410647 5716 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
34f80b04 5717 REG_WR(bp, MISC_REG_SPIO_INT, val);
f1410647 5718
34f80b04
EG
5719 /* enable interrupt to signal the IGU */
5720 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
5721 val |= (1 << MISC_REGISTERS_SPIO_5);
5722 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
5723 break;
f1410647 5724
34f80b04
EG
5725 default:
5726 break;
5727 }
f1410647 5728
34f80b04
EG
5729 /* clear PXP2 attentions */
5730 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
a2fbb9ea 5731
34f80b04 5732 enable_blocks_attention(bp);
a2fbb9ea 5733
6bbca910
YR
5734 if (!BP_NOMCP(bp)) {
5735 bnx2x_acquire_phy_lock(bp);
5736 bnx2x_common_init_phy(bp, bp->common.shmem_base);
5737 bnx2x_release_phy_lock(bp);
5738 } else
5739 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
5740
34f80b04
EG
5741 return 0;
5742}
a2fbb9ea 5743
34f80b04
EG
5744static int bnx2x_init_port(struct bnx2x *bp)
5745{
5746 int port = BP_PORT(bp);
1c06328c 5747 u32 low, high;
34f80b04 5748 u32 val;
a2fbb9ea 5749
34f80b04
EG
5750 DP(BNX2X_MSG_MCP, "starting port init port %x\n", port);
5751
5752 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
a2fbb9ea
ET
5753
5754 /* Port PXP comes here */
5755 /* Port PXP2 comes here */
a2fbb9ea
ET
5756#ifdef BCM_ISCSI
5757 /* Port0 1
5758 * Port1 385 */
5759 i++;
5760 wb_write[0] = ONCHIP_ADDR1(bp->timers_mapping);
5761 wb_write[1] = ONCHIP_ADDR2(bp->timers_mapping);
5762 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5763 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
5764
5765 /* Port0 2
5766 * Port1 386 */
5767 i++;
5768 wb_write[0] = ONCHIP_ADDR1(bp->qm_mapping);
5769 wb_write[1] = ONCHIP_ADDR2(bp->qm_mapping);
5770 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5771 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
5772
5773 /* Port0 3
5774 * Port1 387 */
5775 i++;
5776 wb_write[0] = ONCHIP_ADDR1(bp->t1_mapping);
5777 wb_write[1] = ONCHIP_ADDR2(bp->t1_mapping);
5778 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5779 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
5780#endif
34f80b04 5781 /* Port CMs come here */
8d9c5f34
EG
5782 bnx2x_init_block(bp, (port ? XCM_PORT1_START : XCM_PORT0_START),
5783 (port ? XCM_PORT1_END : XCM_PORT0_END));
a2fbb9ea
ET
5784
5785 /* Port QM comes here */
a2fbb9ea
ET
5786#ifdef BCM_ISCSI
5787 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + func*4, 1024/64*20);
5788 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + func*4, 31);
5789
5790 bnx2x_init_block(bp, func ? TIMERS_PORT1_START : TIMERS_PORT0_START,
5791 func ? TIMERS_PORT1_END : TIMERS_PORT0_END);
5792#endif
5793 /* Port DQ comes here */
1c06328c
EG
5794
5795 bnx2x_init_block(bp, (port ? BRB1_PORT1_START : BRB1_PORT0_START),
5796 (port ? BRB1_PORT1_END : BRB1_PORT0_END));
5797 if (CHIP_REV_IS_SLOW(bp) && !CHIP_IS_E1H(bp)) {
5798 /* no pause for emulation and FPGA */
5799 low = 0;
5800 high = 513;
5801 } else {
5802 if (IS_E1HMF(bp))
5803 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
5804 else if (bp->dev->mtu > 4096) {
5805 if (bp->flags & ONE_PORT_FLAG)
5806 low = 160;
5807 else {
5808 val = bp->dev->mtu;
5809 /* (24*1024 + val*4)/256 */
5810 low = 96 + (val/64) + ((val % 64) ? 1 : 0);
5811 }
5812 } else
5813 low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
5814 high = low + 56; /* 14*1024/256 */
5815 }
5816 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
5817 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
5818
5819
ad8d3948 5820 /* Port PRS comes here */
a2fbb9ea
ET
5821 /* Port TSDM comes here */
5822 /* Port CSDM comes here */
5823 /* Port USDM comes here */
5824 /* Port XSDM comes here */
34f80b04
EG
5825 bnx2x_init_block(bp, port ? TSEM_PORT1_START : TSEM_PORT0_START,
5826 port ? TSEM_PORT1_END : TSEM_PORT0_END);
5827 bnx2x_init_block(bp, port ? USEM_PORT1_START : USEM_PORT0_START,
5828 port ? USEM_PORT1_END : USEM_PORT0_END);
5829 bnx2x_init_block(bp, port ? CSEM_PORT1_START : CSEM_PORT0_START,
5830 port ? CSEM_PORT1_END : CSEM_PORT0_END);
5831 bnx2x_init_block(bp, port ? XSEM_PORT1_START : XSEM_PORT0_START,
5832 port ? XSEM_PORT1_END : XSEM_PORT0_END);
a2fbb9ea 5833 /* Port UPB comes here */
34f80b04
EG
5834 /* Port XPB comes here */
5835
5836 bnx2x_init_block(bp, port ? PBF_PORT1_START : PBF_PORT0_START,
5837 port ? PBF_PORT1_END : PBF_PORT0_END);
a2fbb9ea
ET
5838
5839 /* configure PBF to work without PAUSE mtu 9000 */
34f80b04 5840 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
a2fbb9ea
ET
5841
5842 /* update threshold */
34f80b04 5843 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
a2fbb9ea 5844 /* update init credit */
34f80b04 5845 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
a2fbb9ea
ET
5846
5847 /* probe changes */
34f80b04 5848 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
a2fbb9ea 5849 msleep(5);
34f80b04 5850 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
a2fbb9ea
ET
5851
5852#ifdef BCM_ISCSI
5853 /* tell the searcher where the T2 table is */
5854 REG_WR(bp, SRC_REG_COUNTFREE0 + func*4, 16*1024/64);
5855
5856 wb_write[0] = U64_LO(bp->t2_mapping);
5857 wb_write[1] = U64_HI(bp->t2_mapping);
5858 REG_WR_DMAE(bp, SRC_REG_FIRSTFREE0 + func*4, wb_write, 2);
5859 wb_write[0] = U64_LO((u64)bp->t2_mapping + 16*1024 - 64);
5860 wb_write[1] = U64_HI((u64)bp->t2_mapping + 16*1024 - 64);
5861 REG_WR_DMAE(bp, SRC_REG_LASTFREE0 + func*4, wb_write, 2);
5862
5863 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + func*4, 10);
5864 /* Port SRCH comes here */
5865#endif
5866 /* Port CDU comes here */
5867 /* Port CFC comes here */
34f80b04
EG
5868
5869 if (CHIP_IS_E1(bp)) {
5870 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5871 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5872 }
5873 bnx2x_init_block(bp, port ? HC_PORT1_START : HC_PORT0_START,
5874 port ? HC_PORT1_END : HC_PORT0_END);
5875
5876 bnx2x_init_block(bp, port ? MISC_AEU_PORT1_START :
a2fbb9ea 5877 MISC_AEU_PORT0_START,
34f80b04
EG
5878 port ? MISC_AEU_PORT1_END : MISC_AEU_PORT0_END);
5879 /* init aeu_mask_attn_func_0/1:
5880 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
5881 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
5882 * bits 4-7 are used for "per vn group attention" */
5883 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
5884 (IS_E1HMF(bp) ? 0xF7 : 0x7));
5885
a2fbb9ea
ET
5886 /* Port PXPCS comes here */
5887 /* Port EMAC0 comes here */
5888 /* Port EMAC1 comes here */
5889 /* Port DBU comes here */
5890 /* Port DBG comes here */
34f80b04
EG
5891 bnx2x_init_block(bp, port ? NIG_PORT1_START : NIG_PORT0_START,
5892 port ? NIG_PORT1_END : NIG_PORT0_END);
5893
5894 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
5895
5896 if (CHIP_IS_E1H(bp)) {
34f80b04
EG
5897 /* 0x2 disable e1hov, 0x1 enable */
5898 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
5899 (IS_E1HMF(bp) ? 0x1 : 0x2));
5900
1c06328c
EG
5901 /* support pause requests from USDM, TSDM and BRB */
5902 REG_WR(bp, NIG_REG_LLFC_EGRESS_SRC_ENABLE_0 + port*4, 0x7);
5903
5904 {
5905 REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
5906 REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
5907 REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
5908 }
34f80b04
EG
5909 }
5910
a2fbb9ea
ET
5911 /* Port MCP comes here */
5912 /* Port DMAE comes here */
5913
35b19ba5 5914 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
589abe3a
EG
5915 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
5916 {
5917 u32 swap_val, swap_override, aeu_gpio_mask, offset;
5918
5919 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
5920 MISC_REGISTERS_GPIO_INPUT_HI_Z, port);
5921
5922 /* The GPIO should be swapped if the swap register is
5923 set and active */
5924 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
5925 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
5926
5927 /* Select function upon port-swap configuration */
5928 if (port == 0) {
5929 offset = MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
5930 aeu_gpio_mask = (swap_val && swap_override) ?
5931 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1 :
5932 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0;
5933 } else {
5934 offset = MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0;
5935 aeu_gpio_mask = (swap_val && swap_override) ?
5936 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 :
5937 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1;
5938 }
5939 val = REG_RD(bp, offset);
5940 /* add GPIO3 to group */
5941 val |= aeu_gpio_mask;
5942 REG_WR(bp, offset, val);
5943 }
5944 break;
5945
35b19ba5 5946 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
f1410647
ET
5947 /* add SPIO 5 to group 0 */
5948 val = REG_RD(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
5949 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
5950 REG_WR(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0, val);
5951 break;
5952
5953 default:
5954 break;
5955 }
5956
c18487ee 5957 bnx2x__link_reset(bp);
a2fbb9ea 5958
34f80b04
EG
5959 return 0;
5960}
5961
5962#define ILT_PER_FUNC (768/2)
5963#define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
5964/* the phys address is shifted right 12 bits and has an added
5965 1=valid bit added to the 53rd bit
5966 then since this is a wide register(TM)
5967 we split it into two 32 bit writes
5968 */
5969#define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
5970#define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
5971#define PXP_ONE_ILT(x) (((x) << 10) | x)
5972#define PXP_ILT_RANGE(f, l) (((l) << 10) | f)
5973
5974#define CNIC_ILT_LINES 0
5975
5976static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
5977{
5978 int reg;
5979
5980 if (CHIP_IS_E1H(bp))
5981 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
5982 else /* E1 */
5983 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
5984
5985 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
5986}
5987
5988static int bnx2x_init_func(struct bnx2x *bp)
5989{
5990 int port = BP_PORT(bp);
5991 int func = BP_FUNC(bp);
8badd27a 5992 u32 addr, val;
34f80b04
EG
5993 int i;
5994
5995 DP(BNX2X_MSG_MCP, "starting func init func %x\n", func);
5996
8badd27a
EG
5997 /* set MSI reconfigure capability */
5998 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
5999 val = REG_RD(bp, addr);
6000 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
6001 REG_WR(bp, addr, val);
6002
34f80b04
EG
6003 i = FUNC_ILT_BASE(func);
6004
6005 bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
6006 if (CHIP_IS_E1H(bp)) {
6007 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
6008 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
6009 } else /* E1 */
6010 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
6011 PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
6012
6013
6014 if (CHIP_IS_E1H(bp)) {
6015 for (i = 0; i < 9; i++)
6016 bnx2x_init_block(bp,
6017 cm_start[func][i], cm_end[func][i]);
6018
6019 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
6020 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
6021 }
6022
6023 /* HC init per function */
6024 if (CHIP_IS_E1H(bp)) {
6025 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
6026
6027 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6028 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6029 }
6030 bnx2x_init_block(bp, hc_limits[func][0], hc_limits[func][1]);
6031
c14423fe 6032 /* Reset PCIE errors for debug */
a2fbb9ea
ET
6033 REG_WR(bp, 0x2114, 0xffffffff);
6034 REG_WR(bp, 0x2120, 0xffffffff);
a2fbb9ea 6035
34f80b04
EG
6036 return 0;
6037}
6038
6039static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
6040{
6041 int i, rc = 0;
a2fbb9ea 6042
34f80b04
EG
6043 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
6044 BP_FUNC(bp), load_code);
a2fbb9ea 6045
34f80b04
EG
6046 bp->dmae_ready = 0;
6047 mutex_init(&bp->dmae_mutex);
6048 bnx2x_gunzip_init(bp);
a2fbb9ea 6049
34f80b04
EG
6050 switch (load_code) {
6051 case FW_MSG_CODE_DRV_LOAD_COMMON:
6052 rc = bnx2x_init_common(bp);
6053 if (rc)
6054 goto init_hw_err;
6055 /* no break */
6056
6057 case FW_MSG_CODE_DRV_LOAD_PORT:
6058 bp->dmae_ready = 1;
6059 rc = bnx2x_init_port(bp);
6060 if (rc)
6061 goto init_hw_err;
6062 /* no break */
6063
6064 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
6065 bp->dmae_ready = 1;
6066 rc = bnx2x_init_func(bp);
6067 if (rc)
6068 goto init_hw_err;
6069 break;
6070
6071 default:
6072 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
6073 break;
6074 }
6075
6076 if (!BP_NOMCP(bp)) {
6077 int func = BP_FUNC(bp);
a2fbb9ea
ET
6078
6079 bp->fw_drv_pulse_wr_seq =
34f80b04 6080 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
a2fbb9ea 6081 DRV_PULSE_SEQ_MASK);
34f80b04
EG
6082 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
6083 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x func_stx 0x%x\n",
6084 bp->fw_drv_pulse_wr_seq, bp->func_stx);
6085 } else
6086 bp->func_stx = 0;
a2fbb9ea 6087
34f80b04
EG
6088 /* this needs to be done before gunzip end */
6089 bnx2x_zero_def_sb(bp);
6090 for_each_queue(bp, i)
6091 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
6092
6093init_hw_err:
6094 bnx2x_gunzip_end(bp);
6095
6096 return rc;
a2fbb9ea
ET
6097}
6098
c14423fe 6099/* send the MCP a request, block until there is a reply */
a2fbb9ea
ET
6100static u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
6101{
34f80b04 6102 int func = BP_FUNC(bp);
f1410647
ET
6103 u32 seq = ++bp->fw_seq;
6104 u32 rc = 0;
19680c48
EG
6105 u32 cnt = 1;
6106 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
a2fbb9ea 6107
34f80b04 6108 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
f1410647 6109 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
a2fbb9ea 6110
19680c48
EG
6111 do {
6112 /* let the FW do it's magic ... */
6113 msleep(delay);
a2fbb9ea 6114
19680c48 6115 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
a2fbb9ea 6116
19680c48
EG
6117 /* Give the FW up to 2 second (200*10ms) */
6118 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 200));
6119
6120 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
6121 cnt*delay, rc, seq);
a2fbb9ea
ET
6122
6123 /* is this a reply to our command? */
6124 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) {
6125 rc &= FW_MSG_CODE_MASK;
f1410647 6126
a2fbb9ea
ET
6127 } else {
6128 /* FW BUG! */
6129 BNX2X_ERR("FW failed to respond!\n");
6130 bnx2x_fw_dump(bp);
6131 rc = 0;
6132 }
f1410647 6133
a2fbb9ea
ET
6134 return rc;
6135}
6136
6137static void bnx2x_free_mem(struct bnx2x *bp)
6138{
6139
6140#define BNX2X_PCI_FREE(x, y, size) \
6141 do { \
6142 if (x) { \
6143 pci_free_consistent(bp->pdev, size, x, y); \
6144 x = NULL; \
6145 y = 0; \
6146 } \
6147 } while (0)
6148
6149#define BNX2X_FREE(x) \
6150 do { \
6151 if (x) { \
6152 vfree(x); \
6153 x = NULL; \
6154 } \
6155 } while (0)
6156
6157 int i;
6158
6159 /* fastpath */
555f6c78 6160 /* Common */
a2fbb9ea
ET
6161 for_each_queue(bp, i) {
6162
555f6c78 6163 /* status blocks */
a2fbb9ea
ET
6164 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
6165 bnx2x_fp(bp, i, status_blk_mapping),
6166 sizeof(struct host_status_block) +
6167 sizeof(struct eth_tx_db_data));
555f6c78
EG
6168 }
6169 /* Rx */
6170 for_each_rx_queue(bp, i) {
a2fbb9ea 6171
555f6c78 6172 /* fastpath rx rings: rx_buf rx_desc rx_comp */
a2fbb9ea
ET
6173 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
6174 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
6175 bnx2x_fp(bp, i, rx_desc_mapping),
6176 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6177
6178 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
6179 bnx2x_fp(bp, i, rx_comp_mapping),
6180 sizeof(struct eth_fast_path_rx_cqe) *
6181 NUM_RCQ_BD);
a2fbb9ea 6182
7a9b2557 6183 /* SGE ring */
32626230 6184 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
7a9b2557
VZ
6185 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
6186 bnx2x_fp(bp, i, rx_sge_mapping),
6187 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
6188 }
555f6c78
EG
6189 /* Tx */
6190 for_each_tx_queue(bp, i) {
6191
6192 /* fastpath tx rings: tx_buf tx_desc */
6193 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
6194 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
6195 bnx2x_fp(bp, i, tx_desc_mapping),
6196 sizeof(struct eth_tx_bd) * NUM_TX_BD);
6197 }
a2fbb9ea
ET
6198 /* end of fastpath */
6199
6200 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
34f80b04 6201 sizeof(struct host_def_status_block));
a2fbb9ea
ET
6202
6203 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
34f80b04 6204 sizeof(struct bnx2x_slowpath));
a2fbb9ea
ET
6205
6206#ifdef BCM_ISCSI
6207 BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
6208 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
6209 BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
6210 BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
6211#endif
7a9b2557 6212 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
a2fbb9ea
ET
6213
6214#undef BNX2X_PCI_FREE
6215#undef BNX2X_KFREE
6216}
6217
6218static int bnx2x_alloc_mem(struct bnx2x *bp)
6219{
6220
6221#define BNX2X_PCI_ALLOC(x, y, size) \
6222 do { \
6223 x = pci_alloc_consistent(bp->pdev, size, y); \
6224 if (x == NULL) \
6225 goto alloc_mem_err; \
6226 memset(x, 0, size); \
6227 } while (0)
6228
6229#define BNX2X_ALLOC(x, size) \
6230 do { \
6231 x = vmalloc(size); \
6232 if (x == NULL) \
6233 goto alloc_mem_err; \
6234 memset(x, 0, size); \
6235 } while (0)
6236
6237 int i;
6238
6239 /* fastpath */
555f6c78 6240 /* Common */
a2fbb9ea
ET
6241 for_each_queue(bp, i) {
6242 bnx2x_fp(bp, i, bp) = bp;
6243
555f6c78 6244 /* status blocks */
a2fbb9ea
ET
6245 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
6246 &bnx2x_fp(bp, i, status_blk_mapping),
6247 sizeof(struct host_status_block) +
6248 sizeof(struct eth_tx_db_data));
555f6c78
EG
6249 }
6250 /* Rx */
6251 for_each_rx_queue(bp, i) {
a2fbb9ea 6252
555f6c78 6253 /* fastpath rx rings: rx_buf rx_desc rx_comp */
a2fbb9ea
ET
6254 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
6255 sizeof(struct sw_rx_bd) * NUM_RX_BD);
6256 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
6257 &bnx2x_fp(bp, i, rx_desc_mapping),
6258 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6259
6260 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
6261 &bnx2x_fp(bp, i, rx_comp_mapping),
6262 sizeof(struct eth_fast_path_rx_cqe) *
6263 NUM_RCQ_BD);
6264
7a9b2557
VZ
6265 /* SGE ring */
6266 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
6267 sizeof(struct sw_rx_page) * NUM_RX_SGE);
6268 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
6269 &bnx2x_fp(bp, i, rx_sge_mapping),
6270 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
a2fbb9ea 6271 }
555f6c78
EG
6272 /* Tx */
6273 for_each_tx_queue(bp, i) {
6274
6275 bnx2x_fp(bp, i, hw_tx_prods) =
6276 (void *)(bnx2x_fp(bp, i, status_blk) + 1);
6277
6278 bnx2x_fp(bp, i, tx_prods_mapping) =
6279 bnx2x_fp(bp, i, status_blk_mapping) +
6280 sizeof(struct host_status_block);
6281
6282 /* fastpath tx rings: tx_buf tx_desc */
6283 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
6284 sizeof(struct sw_tx_bd) * NUM_TX_BD);
6285 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
6286 &bnx2x_fp(bp, i, tx_desc_mapping),
6287 sizeof(struct eth_tx_bd) * NUM_TX_BD);
6288 }
a2fbb9ea
ET
6289 /* end of fastpath */
6290
6291 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
6292 sizeof(struct host_def_status_block));
6293
6294 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
6295 sizeof(struct bnx2x_slowpath));
6296
6297#ifdef BCM_ISCSI
6298 BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
6299
6300 /* Initialize T1 */
6301 for (i = 0; i < 64*1024; i += 64) {
6302 *(u64 *)((char *)bp->t1 + i + 56) = 0x0UL;
6303 *(u64 *)((char *)bp->t1 + i + 3) = 0x0UL;
6304 }
6305
6306 /* allocate searcher T2 table
6307 we allocate 1/4 of alloc num for T2
6308 (which is not entered into the ILT) */
6309 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
6310
6311 /* Initialize T2 */
6312 for (i = 0; i < 16*1024; i += 64)
6313 * (u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
6314
c14423fe 6315 /* now fixup the last line in the block to point to the next block */
a2fbb9ea
ET
6316 *(u64 *)((char *)bp->t2 + 1024*16-8) = bp->t2_mapping;
6317
6318 /* Timer block array (MAX_CONN*8) phys uncached for now 1024 conns */
6319 BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
6320
6321 /* QM queues (128*MAX_CONN) */
6322 BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
6323#endif
6324
6325 /* Slow path ring */
6326 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
6327
6328 return 0;
6329
6330alloc_mem_err:
6331 bnx2x_free_mem(bp);
6332 return -ENOMEM;
6333
6334#undef BNX2X_PCI_ALLOC
6335#undef BNX2X_ALLOC
6336}
6337
6338static void bnx2x_free_tx_skbs(struct bnx2x *bp)
6339{
6340 int i;
6341
555f6c78 6342 for_each_tx_queue(bp, i) {
a2fbb9ea
ET
6343 struct bnx2x_fastpath *fp = &bp->fp[i];
6344
6345 u16 bd_cons = fp->tx_bd_cons;
6346 u16 sw_prod = fp->tx_pkt_prod;
6347 u16 sw_cons = fp->tx_pkt_cons;
6348
a2fbb9ea
ET
6349 while (sw_cons != sw_prod) {
6350 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
6351 sw_cons++;
6352 }
6353 }
6354}
6355
6356static void bnx2x_free_rx_skbs(struct bnx2x *bp)
6357{
6358 int i, j;
6359
555f6c78 6360 for_each_rx_queue(bp, j) {
a2fbb9ea
ET
6361 struct bnx2x_fastpath *fp = &bp->fp[j];
6362
a2fbb9ea
ET
6363 for (i = 0; i < NUM_RX_BD; i++) {
6364 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
6365 struct sk_buff *skb = rx_buf->skb;
6366
6367 if (skb == NULL)
6368 continue;
6369
6370 pci_unmap_single(bp->pdev,
6371 pci_unmap_addr(rx_buf, mapping),
437cf2f1 6372 bp->rx_buf_size,
a2fbb9ea
ET
6373 PCI_DMA_FROMDEVICE);
6374
6375 rx_buf->skb = NULL;
6376 dev_kfree_skb(skb);
6377 }
7a9b2557 6378 if (!fp->disable_tpa)
32626230
EG
6379 bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
6380 ETH_MAX_AGGREGATION_QUEUES_E1 :
7a9b2557 6381 ETH_MAX_AGGREGATION_QUEUES_E1H);
a2fbb9ea
ET
6382 }
6383}
6384
6385static void bnx2x_free_skbs(struct bnx2x *bp)
6386{
6387 bnx2x_free_tx_skbs(bp);
6388 bnx2x_free_rx_skbs(bp);
6389}
6390
6391static void bnx2x_free_msix_irqs(struct bnx2x *bp)
6392{
34f80b04 6393 int i, offset = 1;
a2fbb9ea
ET
6394
6395 free_irq(bp->msix_table[0].vector, bp->dev);
c14423fe 6396 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
a2fbb9ea
ET
6397 bp->msix_table[0].vector);
6398
6399 for_each_queue(bp, i) {
c14423fe 6400 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
34f80b04 6401 "state %x\n", i, bp->msix_table[i + offset].vector,
a2fbb9ea
ET
6402 bnx2x_fp(bp, i, state));
6403
34f80b04 6404 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
a2fbb9ea 6405 }
a2fbb9ea
ET
6406}
6407
6408static void bnx2x_free_irq(struct bnx2x *bp)
6409{
a2fbb9ea 6410 if (bp->flags & USING_MSIX_FLAG) {
a2fbb9ea
ET
6411 bnx2x_free_msix_irqs(bp);
6412 pci_disable_msix(bp->pdev);
a2fbb9ea
ET
6413 bp->flags &= ~USING_MSIX_FLAG;
6414
8badd27a
EG
6415 } else if (bp->flags & USING_MSI_FLAG) {
6416 free_irq(bp->pdev->irq, bp->dev);
6417 pci_disable_msi(bp->pdev);
6418 bp->flags &= ~USING_MSI_FLAG;
6419
a2fbb9ea
ET
6420 } else
6421 free_irq(bp->pdev->irq, bp->dev);
6422}
6423
6424static int bnx2x_enable_msix(struct bnx2x *bp)
6425{
8badd27a
EG
6426 int i, rc, offset = 1;
6427 int igu_vec = 0;
a2fbb9ea 6428
8badd27a
EG
6429 bp->msix_table[0].entry = igu_vec;
6430 DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n", igu_vec);
a2fbb9ea 6431
34f80b04 6432 for_each_queue(bp, i) {
8badd27a 6433 igu_vec = BP_L_ID(bp) + offset + i;
34f80b04
EG
6434 bp->msix_table[i + offset].entry = igu_vec;
6435 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
6436 "(fastpath #%u)\n", i + offset, igu_vec, i);
a2fbb9ea
ET
6437 }
6438
34f80b04 6439 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
555f6c78 6440 BNX2X_NUM_QUEUES(bp) + offset);
34f80b04 6441 if (rc) {
8badd27a
EG
6442 DP(NETIF_MSG_IFUP, "MSI-X is not attainable rc %d\n", rc);
6443 return rc;
34f80b04 6444 }
8badd27a 6445
a2fbb9ea
ET
6446 bp->flags |= USING_MSIX_FLAG;
6447
6448 return 0;
a2fbb9ea
ET
6449}
6450
a2fbb9ea
ET
6451static int bnx2x_req_msix_irqs(struct bnx2x *bp)
6452{
34f80b04 6453 int i, rc, offset = 1;
a2fbb9ea 6454
a2fbb9ea
ET
6455 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
6456 bp->dev->name, bp->dev);
a2fbb9ea
ET
6457 if (rc) {
6458 BNX2X_ERR("request sp irq failed\n");
6459 return -EBUSY;
6460 }
6461
6462 for_each_queue(bp, i) {
555f6c78
EG
6463 struct bnx2x_fastpath *fp = &bp->fp[i];
6464
6465 sprintf(fp->name, "%s.fp%d", bp->dev->name, i);
34f80b04 6466 rc = request_irq(bp->msix_table[i + offset].vector,
555f6c78 6467 bnx2x_msix_fp_int, 0, fp->name, fp);
a2fbb9ea 6468 if (rc) {
555f6c78 6469 BNX2X_ERR("request fp #%d irq failed rc %d\n", i, rc);
a2fbb9ea
ET
6470 bnx2x_free_msix_irqs(bp);
6471 return -EBUSY;
6472 }
6473
555f6c78 6474 fp->state = BNX2X_FP_STATE_IRQ;
a2fbb9ea
ET
6475 }
6476
555f6c78
EG
6477 i = BNX2X_NUM_QUEUES(bp);
6478 if (is_multi(bp))
6479 printk(KERN_INFO PFX
6480 "%s: using MSI-X IRQs: sp %d fp %d - %d\n",
6481 bp->dev->name, bp->msix_table[0].vector,
6482 bp->msix_table[offset].vector,
6483 bp->msix_table[offset + i - 1].vector);
6484 else
6485 printk(KERN_INFO PFX "%s: using MSI-X IRQs: sp %d fp %d\n",
6486 bp->dev->name, bp->msix_table[0].vector,
6487 bp->msix_table[offset + i - 1].vector);
6488
a2fbb9ea 6489 return 0;
a2fbb9ea
ET
6490}
6491
8badd27a
EG
6492static int bnx2x_enable_msi(struct bnx2x *bp)
6493{
6494 int rc;
6495
6496 rc = pci_enable_msi(bp->pdev);
6497 if (rc) {
6498 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
6499 return -1;
6500 }
6501 bp->flags |= USING_MSI_FLAG;
6502
6503 return 0;
6504}
6505
a2fbb9ea
ET
6506static int bnx2x_req_irq(struct bnx2x *bp)
6507{
8badd27a 6508 unsigned long flags;
34f80b04 6509 int rc;
a2fbb9ea 6510
8badd27a
EG
6511 if (bp->flags & USING_MSI_FLAG)
6512 flags = 0;
6513 else
6514 flags = IRQF_SHARED;
6515
6516 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
34f80b04 6517 bp->dev->name, bp->dev);
a2fbb9ea
ET
6518 if (!rc)
6519 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
6520
6521 return rc;
a2fbb9ea
ET
6522}
6523
65abd74d
YG
6524static void bnx2x_napi_enable(struct bnx2x *bp)
6525{
6526 int i;
6527
555f6c78 6528 for_each_rx_queue(bp, i)
65abd74d
YG
6529 napi_enable(&bnx2x_fp(bp, i, napi));
6530}
6531
6532static void bnx2x_napi_disable(struct bnx2x *bp)
6533{
6534 int i;
6535
555f6c78 6536 for_each_rx_queue(bp, i)
65abd74d
YG
6537 napi_disable(&bnx2x_fp(bp, i, napi));
6538}
6539
6540static void bnx2x_netif_start(struct bnx2x *bp)
6541{
6542 if (atomic_dec_and_test(&bp->intr_sem)) {
6543 if (netif_running(bp->dev)) {
65abd74d
YG
6544 bnx2x_napi_enable(bp);
6545 bnx2x_int_enable(bp);
555f6c78
EG
6546 if (bp->state == BNX2X_STATE_OPEN)
6547 netif_tx_wake_all_queues(bp->dev);
65abd74d
YG
6548 }
6549 }
6550}
6551
f8ef6e44 6552static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
65abd74d 6553{
f8ef6e44 6554 bnx2x_int_disable_sync(bp, disable_hw);
e94d8af3 6555 bnx2x_napi_disable(bp);
65abd74d 6556 if (netif_running(bp->dev)) {
65abd74d
YG
6557 netif_tx_disable(bp->dev);
6558 bp->dev->trans_start = jiffies; /* prevent tx timeout */
6559 }
6560}
6561
a2fbb9ea
ET
6562/*
6563 * Init service functions
6564 */
6565
3101c2bc 6566static void bnx2x_set_mac_addr_e1(struct bnx2x *bp, int set)
a2fbb9ea
ET
6567{
6568 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
34f80b04 6569 int port = BP_PORT(bp);
a2fbb9ea
ET
6570
6571 /* CAM allocation
6572 * unicasts 0-31:port0 32-63:port1
6573 * multicast 64-127:port0 128-191:port1
6574 */
8d9c5f34 6575 config->hdr.length = 2;
af246401 6576 config->hdr.offset = port ? 32 : 0;
34f80b04 6577 config->hdr.client_id = BP_CL_ID(bp);
a2fbb9ea
ET
6578 config->hdr.reserved1 = 0;
6579
6580 /* primary MAC */
6581 config->config_table[0].cam_entry.msb_mac_addr =
6582 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6583 config->config_table[0].cam_entry.middle_mac_addr =
6584 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6585 config->config_table[0].cam_entry.lsb_mac_addr =
6586 swab16(*(u16 *)&bp->dev->dev_addr[4]);
34f80b04 6587 config->config_table[0].cam_entry.flags = cpu_to_le16(port);
3101c2bc
YG
6588 if (set)
6589 config->config_table[0].target_table_entry.flags = 0;
6590 else
6591 CAM_INVALIDATE(config->config_table[0]);
a2fbb9ea
ET
6592 config->config_table[0].target_table_entry.client_id = 0;
6593 config->config_table[0].target_table_entry.vlan_id = 0;
6594
3101c2bc
YG
6595 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
6596 (set ? "setting" : "clearing"),
a2fbb9ea
ET
6597 config->config_table[0].cam_entry.msb_mac_addr,
6598 config->config_table[0].cam_entry.middle_mac_addr,
6599 config->config_table[0].cam_entry.lsb_mac_addr);
6600
6601 /* broadcast */
6602 config->config_table[1].cam_entry.msb_mac_addr = 0xffff;
6603 config->config_table[1].cam_entry.middle_mac_addr = 0xffff;
6604 config->config_table[1].cam_entry.lsb_mac_addr = 0xffff;
34f80b04 6605 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
3101c2bc
YG
6606 if (set)
6607 config->config_table[1].target_table_entry.flags =
a2fbb9ea 6608 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
3101c2bc
YG
6609 else
6610 CAM_INVALIDATE(config->config_table[1]);
a2fbb9ea
ET
6611 config->config_table[1].target_table_entry.client_id = 0;
6612 config->config_table[1].target_table_entry.vlan_id = 0;
6613
6614 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6615 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6616 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6617}
6618
3101c2bc 6619static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp, int set)
34f80b04
EG
6620{
6621 struct mac_configuration_cmd_e1h *config =
6622 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
6623
3101c2bc 6624 if (set && (bp->state != BNX2X_STATE_OPEN)) {
34f80b04
EG
6625 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
6626 return;
6627 }
6628
6629 /* CAM allocation for E1H
6630 * unicasts: by func number
6631 * multicast: 20+FUNC*20, 20 each
6632 */
8d9c5f34 6633 config->hdr.length = 1;
34f80b04
EG
6634 config->hdr.offset = BP_FUNC(bp);
6635 config->hdr.client_id = BP_CL_ID(bp);
6636 config->hdr.reserved1 = 0;
6637
6638 /* primary MAC */
6639 config->config_table[0].msb_mac_addr =
6640 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6641 config->config_table[0].middle_mac_addr =
6642 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6643 config->config_table[0].lsb_mac_addr =
6644 swab16(*(u16 *)&bp->dev->dev_addr[4]);
6645 config->config_table[0].client_id = BP_L_ID(bp);
6646 config->config_table[0].vlan_id = 0;
6647 config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
3101c2bc
YG
6648 if (set)
6649 config->config_table[0].flags = BP_PORT(bp);
6650 else
6651 config->config_table[0].flags =
6652 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
34f80b04 6653
3101c2bc
YG
6654 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) E1HOV %d CLID %d\n",
6655 (set ? "setting" : "clearing"),
34f80b04
EG
6656 config->config_table[0].msb_mac_addr,
6657 config->config_table[0].middle_mac_addr,
6658 config->config_table[0].lsb_mac_addr, bp->e1hov, BP_L_ID(bp));
6659
6660 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6661 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6662 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6663}
6664
a2fbb9ea
ET
6665static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6666 int *state_p, int poll)
6667{
6668 /* can take a while if any port is running */
34f80b04 6669 int cnt = 500;
a2fbb9ea 6670
c14423fe
ET
6671 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
6672 poll ? "polling" : "waiting", state, idx);
a2fbb9ea
ET
6673
6674 might_sleep();
34f80b04 6675 while (cnt--) {
a2fbb9ea
ET
6676 if (poll) {
6677 bnx2x_rx_int(bp->fp, 10);
34f80b04
EG
6678 /* if index is different from 0
6679 * the reply for some commands will
3101c2bc 6680 * be on the non default queue
a2fbb9ea
ET
6681 */
6682 if (idx)
6683 bnx2x_rx_int(&bp->fp[idx], 10);
6684 }
a2fbb9ea 6685
3101c2bc 6686 mb(); /* state is changed by bnx2x_sp_event() */
49d66772 6687 if (*state_p == state)
a2fbb9ea
ET
6688 return 0;
6689
a2fbb9ea 6690 msleep(1);
a2fbb9ea
ET
6691 }
6692
a2fbb9ea 6693 /* timeout! */
49d66772
ET
6694 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
6695 poll ? "polling" : "waiting", state, idx);
34f80b04
EG
6696#ifdef BNX2X_STOP_ON_ERROR
6697 bnx2x_panic();
6698#endif
a2fbb9ea 6699
49d66772 6700 return -EBUSY;
a2fbb9ea
ET
6701}
6702
6703static int bnx2x_setup_leading(struct bnx2x *bp)
6704{
34f80b04 6705 int rc;
a2fbb9ea 6706
c14423fe 6707 /* reset IGU state */
34f80b04 6708 bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea
ET
6709
6710 /* SETUP ramrod */
6711 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
6712
34f80b04
EG
6713 /* Wait for completion */
6714 rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
a2fbb9ea 6715
34f80b04 6716 return rc;
a2fbb9ea
ET
6717}
6718
6719static int bnx2x_setup_multi(struct bnx2x *bp, int index)
6720{
555f6c78
EG
6721 struct bnx2x_fastpath *fp = &bp->fp[index];
6722
a2fbb9ea 6723 /* reset IGU state */
555f6c78 6724 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea 6725
228241eb 6726 /* SETUP ramrod */
555f6c78
EG
6727 fp->state = BNX2X_FP_STATE_OPENING;
6728 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0,
6729 fp->cl_id, 0);
a2fbb9ea
ET
6730
6731 /* Wait for completion */
6732 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
555f6c78 6733 &(fp->state), 0);
a2fbb9ea
ET
6734}
6735
a2fbb9ea 6736static int bnx2x_poll(struct napi_struct *napi, int budget);
a2fbb9ea 6737
8badd27a 6738static void bnx2x_set_int_mode(struct bnx2x *bp)
a2fbb9ea 6739{
555f6c78 6740 int num_queues;
a2fbb9ea 6741
8badd27a
EG
6742 switch (int_mode) {
6743 case INT_MODE_INTx:
6744 case INT_MODE_MSI:
555f6c78
EG
6745 num_queues = 1;
6746 bp->num_rx_queues = num_queues;
6747 bp->num_tx_queues = num_queues;
6748 DP(NETIF_MSG_IFUP,
6749 "set number of queues to %d\n", num_queues);
8badd27a
EG
6750 break;
6751
6752 case INT_MODE_MSIX:
6753 default:
555f6c78
EG
6754 if (bp->multi_mode == ETH_RSS_MODE_REGULAR)
6755 num_queues = min_t(u32, num_online_cpus(),
6756 BNX2X_MAX_QUEUES(bp));
34f80b04 6757 else
555f6c78
EG
6758 num_queues = 1;
6759 bp->num_rx_queues = num_queues;
6760 bp->num_tx_queues = num_queues;
6761 DP(NETIF_MSG_IFUP, "set number of rx queues to %d"
6762 " number of tx queues to %d\n",
6763 bp->num_rx_queues, bp->num_tx_queues);
2dfe0e1f
EG
6764 /* if we can't use MSI-X we only need one fp,
6765 * so try to enable MSI-X with the requested number of fp's
6766 * and fallback to MSI or legacy INTx with one fp
6767 */
8badd27a 6768 if (bnx2x_enable_msix(bp)) {
34f80b04 6769 /* failed to enable MSI-X */
555f6c78
EG
6770 num_queues = 1;
6771 bp->num_rx_queues = num_queues;
6772 bp->num_tx_queues = num_queues;
6773 if (bp->multi_mode)
6774 BNX2X_ERR("Multi requested but failed to "
6775 "enable MSI-X set number of "
6776 "queues to %d\n", num_queues);
a2fbb9ea 6777 }
8badd27a 6778 break;
a2fbb9ea 6779 }
555f6c78 6780 bp->dev->real_num_tx_queues = bp->num_tx_queues;
8badd27a
EG
6781}
6782
6783static void bnx2x_set_rx_mode(struct net_device *dev);
6784
6785/* must be called with rtnl_lock */
6786static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
6787{
6788 u32 load_code;
6789 int i, rc = 0;
6790#ifdef BNX2X_STOP_ON_ERROR
6791 DP(NETIF_MSG_IFUP, "enter load_mode %d\n", load_mode);
6792 if (unlikely(bp->panic))
6793 return -EPERM;
6794#endif
6795
6796 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
6797
6798 bnx2x_set_int_mode(bp);
c14423fe 6799
a2fbb9ea
ET
6800 if (bnx2x_alloc_mem(bp))
6801 return -ENOMEM;
6802
555f6c78 6803 for_each_rx_queue(bp, i)
7a9b2557
VZ
6804 bnx2x_fp(bp, i, disable_tpa) =
6805 ((bp->flags & TPA_ENABLE_FLAG) == 0);
6806
555f6c78 6807 for_each_rx_queue(bp, i)
2dfe0e1f
EG
6808 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
6809 bnx2x_poll, 128);
6810
6811#ifdef BNX2X_STOP_ON_ERROR
555f6c78 6812 for_each_rx_queue(bp, i) {
2dfe0e1f
EG
6813 struct bnx2x_fastpath *fp = &bp->fp[i];
6814
6815 fp->poll_no_work = 0;
6816 fp->poll_calls = 0;
6817 fp->poll_max_calls = 0;
6818 fp->poll_complete = 0;
6819 fp->poll_exit = 0;
6820 }
6821#endif
6822 bnx2x_napi_enable(bp);
6823
34f80b04
EG
6824 if (bp->flags & USING_MSIX_FLAG) {
6825 rc = bnx2x_req_msix_irqs(bp);
6826 if (rc) {
6827 pci_disable_msix(bp->pdev);
2dfe0e1f 6828 goto load_error1;
34f80b04
EG
6829 }
6830 } else {
8badd27a
EG
6831 if ((rc != -ENOMEM) && (int_mode != INT_MODE_INTx))
6832 bnx2x_enable_msi(bp);
34f80b04
EG
6833 bnx2x_ack_int(bp);
6834 rc = bnx2x_req_irq(bp);
6835 if (rc) {
2dfe0e1f 6836 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
8badd27a
EG
6837 if (bp->flags & USING_MSI_FLAG)
6838 pci_disable_msi(bp->pdev);
2dfe0e1f 6839 goto load_error1;
a2fbb9ea 6840 }
8badd27a
EG
6841 if (bp->flags & USING_MSI_FLAG) {
6842 bp->dev->irq = bp->pdev->irq;
6843 printk(KERN_INFO PFX "%s: using MSI IRQ %d\n",
6844 bp->dev->name, bp->pdev->irq);
6845 }
a2fbb9ea
ET
6846 }
6847
2dfe0e1f
EG
6848 /* Send LOAD_REQUEST command to MCP
6849 Returns the type of LOAD command:
6850 if it is the first port to be initialized
6851 common blocks should be initialized, otherwise - not
6852 */
6853 if (!BP_NOMCP(bp)) {
6854 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
6855 if (!load_code) {
6856 BNX2X_ERR("MCP response failure, aborting\n");
6857 rc = -EBUSY;
6858 goto load_error2;
6859 }
6860 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
6861 rc = -EBUSY; /* other port in diagnostic mode */
6862 goto load_error2;
6863 }
6864
6865 } else {
6866 int port = BP_PORT(bp);
6867
6868 DP(NETIF_MSG_IFUP, "NO MCP load counts before us %d, %d, %d\n",
6869 load_count[0], load_count[1], load_count[2]);
6870 load_count[0]++;
6871 load_count[1 + port]++;
6872 DP(NETIF_MSG_IFUP, "NO MCP new load counts %d, %d, %d\n",
6873 load_count[0], load_count[1], load_count[2]);
6874 if (load_count[0] == 1)
6875 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
6876 else if (load_count[1 + port] == 1)
6877 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
6878 else
6879 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
6880 }
6881
6882 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
6883 (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
6884 bp->port.pmf = 1;
6885 else
6886 bp->port.pmf = 0;
6887 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
a2fbb9ea 6888
a2fbb9ea 6889 /* Initialize HW */
34f80b04
EG
6890 rc = bnx2x_init_hw(bp, load_code);
6891 if (rc) {
a2fbb9ea 6892 BNX2X_ERR("HW init failed, aborting\n");
2dfe0e1f 6893 goto load_error2;
a2fbb9ea
ET
6894 }
6895
a2fbb9ea 6896 /* Setup NIC internals and enable interrupts */
471de716 6897 bnx2x_nic_init(bp, load_code);
a2fbb9ea
ET
6898
6899 /* Send LOAD_DONE command to MCP */
34f80b04 6900 if (!BP_NOMCP(bp)) {
228241eb
ET
6901 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
6902 if (!load_code) {
da5a662a 6903 BNX2X_ERR("MCP response failure, aborting\n");
34f80b04 6904 rc = -EBUSY;
2dfe0e1f 6905 goto load_error3;
a2fbb9ea
ET
6906 }
6907 }
6908
6909 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
6910
34f80b04
EG
6911 rc = bnx2x_setup_leading(bp);
6912 if (rc) {
da5a662a 6913 BNX2X_ERR("Setup leading failed!\n");
2dfe0e1f 6914 goto load_error3;
34f80b04 6915 }
a2fbb9ea 6916
34f80b04
EG
6917 if (CHIP_IS_E1H(bp))
6918 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
6919 BNX2X_ERR("!!! mf_cfg function disabled\n");
6920 bp->state = BNX2X_STATE_DISABLED;
6921 }
a2fbb9ea 6922
34f80b04
EG
6923 if (bp->state == BNX2X_STATE_OPEN)
6924 for_each_nondefault_queue(bp, i) {
6925 rc = bnx2x_setup_multi(bp, i);
6926 if (rc)
2dfe0e1f 6927 goto load_error3;
34f80b04 6928 }
a2fbb9ea 6929
34f80b04 6930 if (CHIP_IS_E1(bp))
3101c2bc 6931 bnx2x_set_mac_addr_e1(bp, 1);
34f80b04 6932 else
3101c2bc 6933 bnx2x_set_mac_addr_e1h(bp, 1);
34f80b04
EG
6934
6935 if (bp->port.pmf)
6936 bnx2x_initial_phy_init(bp);
a2fbb9ea
ET
6937
6938 /* Start fast path */
34f80b04
EG
6939 switch (load_mode) {
6940 case LOAD_NORMAL:
6941 /* Tx queue should be only reenabled */
555f6c78 6942 netif_tx_wake_all_queues(bp->dev);
2dfe0e1f 6943 /* Initialize the receive filter. */
34f80b04
EG
6944 bnx2x_set_rx_mode(bp->dev);
6945 break;
6946
6947 case LOAD_OPEN:
555f6c78 6948 netif_tx_start_all_queues(bp->dev);
2dfe0e1f 6949 /* Initialize the receive filter. */
34f80b04 6950 bnx2x_set_rx_mode(bp->dev);
34f80b04 6951 break;
a2fbb9ea 6952
34f80b04 6953 case LOAD_DIAG:
2dfe0e1f 6954 /* Initialize the receive filter. */
a2fbb9ea 6955 bnx2x_set_rx_mode(bp->dev);
34f80b04
EG
6956 bp->state = BNX2X_STATE_DIAG;
6957 break;
6958
6959 default:
6960 break;
a2fbb9ea
ET
6961 }
6962
34f80b04
EG
6963 if (!bp->port.pmf)
6964 bnx2x__link_status_update(bp);
6965
a2fbb9ea
ET
6966 /* start the timer */
6967 mod_timer(&bp->timer, jiffies + bp->current_interval);
6968
34f80b04 6969
a2fbb9ea
ET
6970 return 0;
6971
2dfe0e1f
EG
6972load_error3:
6973 bnx2x_int_disable_sync(bp, 1);
6974 if (!BP_NOMCP(bp)) {
6975 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
6976 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6977 }
6978 bp->port.pmf = 0;
7a9b2557
VZ
6979 /* Free SKBs, SGEs, TPA pool and driver internals */
6980 bnx2x_free_skbs(bp);
555f6c78 6981 for_each_rx_queue(bp, i)
3196a88a 6982 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
2dfe0e1f 6983load_error2:
d1014634
YG
6984 /* Release IRQs */
6985 bnx2x_free_irq(bp);
2dfe0e1f
EG
6986load_error1:
6987 bnx2x_napi_disable(bp);
555f6c78 6988 for_each_rx_queue(bp, i)
7cde1c8b 6989 netif_napi_del(&bnx2x_fp(bp, i, napi));
a2fbb9ea
ET
6990 bnx2x_free_mem(bp);
6991
6992 /* TBD we really need to reset the chip
6993 if we want to recover from this */
34f80b04 6994 return rc;
a2fbb9ea
ET
6995}
6996
6997static int bnx2x_stop_multi(struct bnx2x *bp, int index)
6998{
555f6c78 6999 struct bnx2x_fastpath *fp = &bp->fp[index];
a2fbb9ea
ET
7000 int rc;
7001
c14423fe 7002 /* halt the connection */
555f6c78
EG
7003 fp->state = BNX2X_FP_STATE_HALTING;
7004 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, fp->cl_id, 0);
a2fbb9ea 7005
34f80b04 7006 /* Wait for completion */
a2fbb9ea 7007 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
555f6c78 7008 &(fp->state), 1);
c14423fe 7009 if (rc) /* timeout */
a2fbb9ea
ET
7010 return rc;
7011
7012 /* delete cfc entry */
7013 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
7014
34f80b04
EG
7015 /* Wait for completion */
7016 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
555f6c78 7017 &(fp->state), 1);
34f80b04 7018 return rc;
a2fbb9ea
ET
7019}
7020
da5a662a 7021static int bnx2x_stop_leading(struct bnx2x *bp)
a2fbb9ea 7022{
49d66772 7023 u16 dsb_sp_prod_idx;
c14423fe 7024 /* if the other port is handling traffic,
a2fbb9ea 7025 this can take a lot of time */
34f80b04
EG
7026 int cnt = 500;
7027 int rc;
a2fbb9ea
ET
7028
7029 might_sleep();
7030
7031 /* Send HALT ramrod */
7032 bp->fp[0].state = BNX2X_FP_STATE_HALTING;
34f80b04 7033 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, BP_CL_ID(bp), 0);
a2fbb9ea 7034
34f80b04
EG
7035 /* Wait for completion */
7036 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
7037 &(bp->fp[0].state), 1);
7038 if (rc) /* timeout */
da5a662a 7039 return rc;
a2fbb9ea 7040
49d66772 7041 dsb_sp_prod_idx = *bp->dsb_sp_prod;
a2fbb9ea 7042
228241eb 7043 /* Send PORT_DELETE ramrod */
a2fbb9ea
ET
7044 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
7045
49d66772 7046 /* Wait for completion to arrive on default status block
a2fbb9ea
ET
7047 we are going to reset the chip anyway
7048 so there is not much to do if this times out
7049 */
34f80b04 7050 while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
34f80b04
EG
7051 if (!cnt) {
7052 DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
7053 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
7054 *bp->dsb_sp_prod, dsb_sp_prod_idx);
7055#ifdef BNX2X_STOP_ON_ERROR
7056 bnx2x_panic();
da5a662a
VZ
7057#else
7058 rc = -EBUSY;
34f80b04
EG
7059#endif
7060 break;
7061 }
7062 cnt--;
da5a662a 7063 msleep(1);
5650d9d4 7064 rmb(); /* Refresh the dsb_sp_prod */
49d66772
ET
7065 }
7066 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
7067 bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
da5a662a
VZ
7068
7069 return rc;
a2fbb9ea
ET
7070}
7071
34f80b04
EG
7072static void bnx2x_reset_func(struct bnx2x *bp)
7073{
7074 int port = BP_PORT(bp);
7075 int func = BP_FUNC(bp);
7076 int base, i;
7077
7078 /* Configure IGU */
7079 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
7080 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
7081
34f80b04
EG
7082 /* Clear ILT */
7083 base = FUNC_ILT_BASE(func);
7084 for (i = base; i < base + ILT_PER_FUNC; i++)
7085 bnx2x_ilt_wr(bp, i, 0);
7086}
7087
7088static void bnx2x_reset_port(struct bnx2x *bp)
7089{
7090 int port = BP_PORT(bp);
7091 u32 val;
7092
7093 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
7094
7095 /* Do not rcv packets to BRB */
7096 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
7097 /* Do not direct rcv packets that are not for MCP to the BRB */
7098 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
7099 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7100
7101 /* Configure AEU */
7102 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
7103
7104 msleep(100);
7105 /* Check for BRB port occupancy */
7106 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
7107 if (val)
7108 DP(NETIF_MSG_IFDOWN,
33471629 7109 "BRB1 is not empty %d blocks are occupied\n", val);
34f80b04
EG
7110
7111 /* TODO: Close Doorbell port? */
7112}
7113
34f80b04
EG
7114static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
7115{
7116 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
7117 BP_FUNC(bp), reset_code);
7118
7119 switch (reset_code) {
7120 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
7121 bnx2x_reset_port(bp);
7122 bnx2x_reset_func(bp);
7123 bnx2x_reset_common(bp);
7124 break;
7125
7126 case FW_MSG_CODE_DRV_UNLOAD_PORT:
7127 bnx2x_reset_port(bp);
7128 bnx2x_reset_func(bp);
7129 break;
7130
7131 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
7132 bnx2x_reset_func(bp);
7133 break;
49d66772 7134
34f80b04
EG
7135 default:
7136 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
7137 break;
7138 }
7139}
7140
33471629 7141/* must be called with rtnl_lock */
34f80b04 7142static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
a2fbb9ea 7143{
da5a662a 7144 int port = BP_PORT(bp);
a2fbb9ea 7145 u32 reset_code = 0;
da5a662a 7146 int i, cnt, rc;
a2fbb9ea
ET
7147
7148 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
7149
228241eb
ET
7150 bp->rx_mode = BNX2X_RX_MODE_NONE;
7151 bnx2x_set_storm_rx_mode(bp);
a2fbb9ea 7152
f8ef6e44 7153 bnx2x_netif_stop(bp, 1);
e94d8af3 7154
34f80b04
EG
7155 del_timer_sync(&bp->timer);
7156 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
7157 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
bb2a0f7a 7158 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04 7159
70b9986c
EG
7160 /* Release IRQs */
7161 bnx2x_free_irq(bp);
7162
555f6c78
EG
7163 /* Wait until tx fastpath tasks complete */
7164 for_each_tx_queue(bp, i) {
228241eb
ET
7165 struct bnx2x_fastpath *fp = &bp->fp[i];
7166
34f80b04
EG
7167 cnt = 1000;
7168 smp_rmb();
e8b5fc51 7169 while (bnx2x_has_tx_work_unload(fp)) {
da5a662a 7170
65abd74d 7171 bnx2x_tx_int(fp, 1000);
34f80b04
EG
7172 if (!cnt) {
7173 BNX2X_ERR("timeout waiting for queue[%d]\n",
7174 i);
7175#ifdef BNX2X_STOP_ON_ERROR
7176 bnx2x_panic();
7177 return -EBUSY;
7178#else
7179 break;
7180#endif
7181 }
7182 cnt--;
da5a662a 7183 msleep(1);
34f80b04
EG
7184 smp_rmb();
7185 }
228241eb 7186 }
da5a662a
VZ
7187 /* Give HW time to discard old tx messages */
7188 msleep(1);
a2fbb9ea 7189
3101c2bc
YG
7190 if (CHIP_IS_E1(bp)) {
7191 struct mac_configuration_cmd *config =
7192 bnx2x_sp(bp, mcast_config);
7193
7194 bnx2x_set_mac_addr_e1(bp, 0);
7195
8d9c5f34 7196 for (i = 0; i < config->hdr.length; i++)
3101c2bc
YG
7197 CAM_INVALIDATE(config->config_table[i]);
7198
8d9c5f34 7199 config->hdr.length = i;
3101c2bc
YG
7200 if (CHIP_REV_IS_SLOW(bp))
7201 config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
7202 else
7203 config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
7204 config->hdr.client_id = BP_CL_ID(bp);
7205 config->hdr.reserved1 = 0;
7206
7207 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7208 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
7209 U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
7210
7211 } else { /* E1H */
65abd74d
YG
7212 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
7213
3101c2bc
YG
7214 bnx2x_set_mac_addr_e1h(bp, 0);
7215
7216 for (i = 0; i < MC_HASH_SIZE; i++)
7217 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
7218 }
7219
65abd74d
YG
7220 if (unload_mode == UNLOAD_NORMAL)
7221 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7222
7223 else if (bp->flags & NO_WOL_FLAG) {
7224 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
7225 if (CHIP_IS_E1H(bp))
7226 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
7227
7228 } else if (bp->wol) {
7229 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
7230 u8 *mac_addr = bp->dev->dev_addr;
7231 u32 val;
7232 /* The mac address is written to entries 1-4 to
7233 preserve entry 0 which is used by the PMF */
7234 u8 entry = (BP_E1HVN(bp) + 1)*8;
7235
7236 val = (mac_addr[0] << 8) | mac_addr[1];
7237 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
7238
7239 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
7240 (mac_addr[4] << 8) | mac_addr[5];
7241 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
7242
7243 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
7244
7245 } else
7246 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
da5a662a 7247
34f80b04
EG
7248 /* Close multi and leading connections
7249 Completions for ramrods are collected in a synchronous way */
a2fbb9ea
ET
7250 for_each_nondefault_queue(bp, i)
7251 if (bnx2x_stop_multi(bp, i))
228241eb 7252 goto unload_error;
a2fbb9ea 7253
da5a662a
VZ
7254 rc = bnx2x_stop_leading(bp);
7255 if (rc) {
34f80b04 7256 BNX2X_ERR("Stop leading failed!\n");
da5a662a 7257#ifdef BNX2X_STOP_ON_ERROR
34f80b04 7258 return -EBUSY;
da5a662a
VZ
7259#else
7260 goto unload_error;
34f80b04 7261#endif
228241eb
ET
7262 }
7263
7264unload_error:
34f80b04 7265 if (!BP_NOMCP(bp))
228241eb 7266 reset_code = bnx2x_fw_command(bp, reset_code);
34f80b04
EG
7267 else {
7268 DP(NETIF_MSG_IFDOWN, "NO MCP load counts %d, %d, %d\n",
7269 load_count[0], load_count[1], load_count[2]);
7270 load_count[0]--;
da5a662a 7271 load_count[1 + port]--;
34f80b04
EG
7272 DP(NETIF_MSG_IFDOWN, "NO MCP new load counts %d, %d, %d\n",
7273 load_count[0], load_count[1], load_count[2]);
7274 if (load_count[0] == 0)
7275 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
da5a662a 7276 else if (load_count[1 + port] == 0)
34f80b04
EG
7277 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
7278 else
7279 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
7280 }
a2fbb9ea 7281
34f80b04
EG
7282 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
7283 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
7284 bnx2x__link_reset(bp);
a2fbb9ea
ET
7285
7286 /* Reset the chip */
228241eb 7287 bnx2x_reset_chip(bp, reset_code);
a2fbb9ea
ET
7288
7289 /* Report UNLOAD_DONE to MCP */
34f80b04 7290 if (!BP_NOMCP(bp))
a2fbb9ea 7291 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
9a035440 7292 bp->port.pmf = 0;
a2fbb9ea 7293
7a9b2557 7294 /* Free SKBs, SGEs, TPA pool and driver internals */
a2fbb9ea 7295 bnx2x_free_skbs(bp);
555f6c78 7296 for_each_rx_queue(bp, i)
3196a88a 7297 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
555f6c78 7298 for_each_rx_queue(bp, i)
7cde1c8b 7299 netif_napi_del(&bnx2x_fp(bp, i, napi));
a2fbb9ea
ET
7300 bnx2x_free_mem(bp);
7301
7302 bp->state = BNX2X_STATE_CLOSED;
228241eb 7303
a2fbb9ea
ET
7304 netif_carrier_off(bp->dev);
7305
7306 return 0;
7307}
7308
34f80b04
EG
7309static void bnx2x_reset_task(struct work_struct *work)
7310{
7311 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
7312
7313#ifdef BNX2X_STOP_ON_ERROR
7314 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
7315 " so reset not done to allow debug dump,\n"
7316 KERN_ERR " you will need to reboot when done\n");
7317 return;
7318#endif
7319
7320 rtnl_lock();
7321
7322 if (!netif_running(bp->dev))
7323 goto reset_task_exit;
7324
7325 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
7326 bnx2x_nic_load(bp, LOAD_NORMAL);
7327
7328reset_task_exit:
7329 rtnl_unlock();
7330}
7331
a2fbb9ea
ET
7332/* end of nic load/unload */
7333
7334/* ethtool_ops */
7335
7336/*
7337 * Init service functions
7338 */
7339
f1ef27ef
EG
7340static inline u32 bnx2x_get_pretend_reg(struct bnx2x *bp, int func)
7341{
7342 switch (func) {
7343 case 0: return PXP2_REG_PGL_PRETEND_FUNC_F0;
7344 case 1: return PXP2_REG_PGL_PRETEND_FUNC_F1;
7345 case 2: return PXP2_REG_PGL_PRETEND_FUNC_F2;
7346 case 3: return PXP2_REG_PGL_PRETEND_FUNC_F3;
7347 case 4: return PXP2_REG_PGL_PRETEND_FUNC_F4;
7348 case 5: return PXP2_REG_PGL_PRETEND_FUNC_F5;
7349 case 6: return PXP2_REG_PGL_PRETEND_FUNC_F6;
7350 case 7: return PXP2_REG_PGL_PRETEND_FUNC_F7;
7351 default:
7352 BNX2X_ERR("Unsupported function index: %d\n", func);
7353 return (u32)(-1);
7354 }
7355}
7356
7357static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp, int orig_func)
7358{
7359 u32 reg = bnx2x_get_pretend_reg(bp, orig_func), new_val;
7360
7361 /* Flush all outstanding writes */
7362 mmiowb();
7363
7364 /* Pretend to be function 0 */
7365 REG_WR(bp, reg, 0);
7366 /* Flush the GRC transaction (in the chip) */
7367 new_val = REG_RD(bp, reg);
7368 if (new_val != 0) {
7369 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (0,%d)!\n",
7370 new_val);
7371 BUG();
7372 }
7373
7374 /* From now we are in the "like-E1" mode */
7375 bnx2x_int_disable(bp);
7376
7377 /* Flush all outstanding writes */
7378 mmiowb();
7379
7380 /* Restore the original funtion settings */
7381 REG_WR(bp, reg, orig_func);
7382 new_val = REG_RD(bp, reg);
7383 if (new_val != orig_func) {
7384 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (%d,%d)!\n",
7385 orig_func, new_val);
7386 BUG();
7387 }
7388}
7389
7390static inline void bnx2x_undi_int_disable(struct bnx2x *bp, int func)
7391{
7392 if (CHIP_IS_E1H(bp))
7393 bnx2x_undi_int_disable_e1h(bp, func);
7394 else
7395 bnx2x_int_disable(bp);
7396}
7397
34f80b04
EG
7398static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
7399{
7400 u32 val;
7401
7402 /* Check if there is any driver already loaded */
7403 val = REG_RD(bp, MISC_REG_UNPREPARED);
7404 if (val == 0x1) {
7405 /* Check if it is the UNDI driver
7406 * UNDI driver initializes CID offset for normal bell to 0x7
7407 */
4a37fb66 7408 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
34f80b04
EG
7409 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
7410 if (val == 0x7) {
7411 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
da5a662a 7412 /* save our func */
34f80b04 7413 int func = BP_FUNC(bp);
da5a662a
VZ
7414 u32 swap_en;
7415 u32 swap_val;
34f80b04 7416
b4661739
EG
7417 /* clear the UNDI indication */
7418 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
7419
34f80b04
EG
7420 BNX2X_DEV_INFO("UNDI is active! reset device\n");
7421
7422 /* try unload UNDI on port 0 */
7423 bp->func = 0;
da5a662a
VZ
7424 bp->fw_seq =
7425 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7426 DRV_MSG_SEQ_NUMBER_MASK);
34f80b04 7427 reset_code = bnx2x_fw_command(bp, reset_code);
34f80b04
EG
7428
7429 /* if UNDI is loaded on the other port */
7430 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
7431
da5a662a
VZ
7432 /* send "DONE" for previous unload */
7433 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7434
7435 /* unload UNDI on port 1 */
34f80b04 7436 bp->func = 1;
da5a662a
VZ
7437 bp->fw_seq =
7438 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7439 DRV_MSG_SEQ_NUMBER_MASK);
7440 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7441
7442 bnx2x_fw_command(bp, reset_code);
34f80b04
EG
7443 }
7444
b4661739
EG
7445 /* now it's safe to release the lock */
7446 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7447
f1ef27ef 7448 bnx2x_undi_int_disable(bp, func);
da5a662a
VZ
7449
7450 /* close input traffic and wait for it */
7451 /* Do not rcv packets to BRB */
7452 REG_WR(bp,
7453 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
7454 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
7455 /* Do not direct rcv packets that are not for MCP to
7456 * the BRB */
7457 REG_WR(bp,
7458 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
7459 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7460 /* clear AEU */
7461 REG_WR(bp,
7462 (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
7463 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
7464 msleep(10);
7465
7466 /* save NIG port swap info */
7467 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
7468 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
34f80b04
EG
7469 /* reset device */
7470 REG_WR(bp,
7471 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
da5a662a 7472 0xd3ffffff);
34f80b04
EG
7473 REG_WR(bp,
7474 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
7475 0x1403);
da5a662a
VZ
7476 /* take the NIG out of reset and restore swap values */
7477 REG_WR(bp,
7478 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
7479 MISC_REGISTERS_RESET_REG_1_RST_NIG);
7480 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
7481 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
7482
7483 /* send unload done to the MCP */
7484 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7485
7486 /* restore our func and fw_seq */
7487 bp->func = func;
7488 bp->fw_seq =
7489 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7490 DRV_MSG_SEQ_NUMBER_MASK);
b4661739
EG
7491
7492 } else
7493 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
34f80b04
EG
7494 }
7495}
7496
7497static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
7498{
7499 u32 val, val2, val3, val4, id;
72ce58c3 7500 u16 pmc;
34f80b04
EG
7501
7502 /* Get the chip revision id and number. */
7503 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
7504 val = REG_RD(bp, MISC_REG_CHIP_NUM);
7505 id = ((val & 0xffff) << 16);
7506 val = REG_RD(bp, MISC_REG_CHIP_REV);
7507 id |= ((val & 0xf) << 12);
7508 val = REG_RD(bp, MISC_REG_CHIP_METAL);
7509 id |= ((val & 0xff) << 4);
5a40e08e 7510 val = REG_RD(bp, MISC_REG_BOND_ID);
34f80b04
EG
7511 id |= (val & 0xf);
7512 bp->common.chip_id = id;
7513 bp->link_params.chip_id = bp->common.chip_id;
7514 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
7515
1c06328c
EG
7516 val = (REG_RD(bp, 0x2874) & 0x55);
7517 if ((bp->common.chip_id & 0x1) ||
7518 (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
7519 bp->flags |= ONE_PORT_FLAG;
7520 BNX2X_DEV_INFO("single port device\n");
7521 }
7522
34f80b04
EG
7523 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
7524 bp->common.flash_size = (NVRAM_1MB_SIZE <<
7525 (val & MCPR_NVM_CFG4_FLASH_SIZE));
7526 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
7527 bp->common.flash_size, bp->common.flash_size);
7528
7529 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
7530 bp->link_params.shmem_base = bp->common.shmem_base;
7531 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
7532
7533 if (!bp->common.shmem_base ||
7534 (bp->common.shmem_base < 0xA0000) ||
7535 (bp->common.shmem_base >= 0xC0000)) {
7536 BNX2X_DEV_INFO("MCP not active\n");
7537 bp->flags |= NO_MCP_FLAG;
7538 return;
7539 }
7540
7541 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
7542 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7543 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7544 BNX2X_ERR("BAD MCP validity signature\n");
7545
7546 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
35b19ba5 7547 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
34f80b04
EG
7548
7549 bp->link_params.hw_led_mode = ((bp->common.hw_config &
7550 SHARED_HW_CFG_LED_MODE_MASK) >>
7551 SHARED_HW_CFG_LED_MODE_SHIFT);
7552
7553 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
7554 bp->common.bc_ver = val;
7555 BNX2X_DEV_INFO("bc_ver %X\n", val);
7556 if (val < BNX2X_BC_VER) {
7557 /* for now only warn
7558 * later we might need to enforce this */
7559 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
7560 " please upgrade BC\n", BNX2X_BC_VER, val);
7561 }
72ce58c3
EG
7562
7563 if (BP_E1HVN(bp) == 0) {
7564 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
7565 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
7566 } else {
7567 /* no WOL capability for E1HVN != 0 */
7568 bp->flags |= NO_WOL_FLAG;
7569 }
7570 BNX2X_DEV_INFO("%sWoL capable\n",
7571 (bp->flags & NO_WOL_FLAG) ? "Not " : "");
34f80b04
EG
7572
7573 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
7574 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
7575 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
7576 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
7577
7578 printk(KERN_INFO PFX "part number %X-%X-%X-%X\n",
7579 val, val2, val3, val4);
7580}
7581
7582static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
7583 u32 switch_cfg)
a2fbb9ea 7584{
34f80b04 7585 int port = BP_PORT(bp);
a2fbb9ea
ET
7586 u32 ext_phy_type;
7587
a2fbb9ea
ET
7588 switch (switch_cfg) {
7589 case SWITCH_CFG_1G:
7590 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
7591
c18487ee
YR
7592 ext_phy_type =
7593 SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea
ET
7594 switch (ext_phy_type) {
7595 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
7596 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7597 ext_phy_type);
7598
34f80b04
EG
7599 bp->port.supported |= (SUPPORTED_10baseT_Half |
7600 SUPPORTED_10baseT_Full |
7601 SUPPORTED_100baseT_Half |
7602 SUPPORTED_100baseT_Full |
7603 SUPPORTED_1000baseT_Full |
7604 SUPPORTED_2500baseX_Full |
7605 SUPPORTED_TP |
7606 SUPPORTED_FIBRE |
7607 SUPPORTED_Autoneg |
7608 SUPPORTED_Pause |
7609 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
7610 break;
7611
7612 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
7613 BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
7614 ext_phy_type);
7615
34f80b04
EG
7616 bp->port.supported |= (SUPPORTED_10baseT_Half |
7617 SUPPORTED_10baseT_Full |
7618 SUPPORTED_100baseT_Half |
7619 SUPPORTED_100baseT_Full |
7620 SUPPORTED_1000baseT_Full |
7621 SUPPORTED_TP |
7622 SUPPORTED_FIBRE |
7623 SUPPORTED_Autoneg |
7624 SUPPORTED_Pause |
7625 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
7626 break;
7627
7628 default:
7629 BNX2X_ERR("NVRAM config error. "
7630 "BAD SerDes ext_phy_config 0x%x\n",
c18487ee 7631 bp->link_params.ext_phy_config);
a2fbb9ea
ET
7632 return;
7633 }
7634
34f80b04
EG
7635 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
7636 port*0x10);
7637 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea
ET
7638 break;
7639
7640 case SWITCH_CFG_10G:
7641 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
7642
c18487ee
YR
7643 ext_phy_type =
7644 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea
ET
7645 switch (ext_phy_type) {
7646 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7647 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7648 ext_phy_type);
7649
34f80b04
EG
7650 bp->port.supported |= (SUPPORTED_10baseT_Half |
7651 SUPPORTED_10baseT_Full |
7652 SUPPORTED_100baseT_Half |
7653 SUPPORTED_100baseT_Full |
7654 SUPPORTED_1000baseT_Full |
7655 SUPPORTED_2500baseX_Full |
7656 SUPPORTED_10000baseT_Full |
7657 SUPPORTED_TP |
7658 SUPPORTED_FIBRE |
7659 SUPPORTED_Autoneg |
7660 SUPPORTED_Pause |
7661 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
7662 break;
7663
589abe3a
EG
7664 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
7665 BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
34f80b04 7666 ext_phy_type);
f1410647 7667
34f80b04 7668 bp->port.supported |= (SUPPORTED_10000baseT_Full |
589abe3a 7669 SUPPORTED_1000baseT_Full |
34f80b04 7670 SUPPORTED_FIBRE |
589abe3a 7671 SUPPORTED_Autoneg |
34f80b04
EG
7672 SUPPORTED_Pause |
7673 SUPPORTED_Asym_Pause);
f1410647
ET
7674 break;
7675
589abe3a
EG
7676 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
7677 BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
f1410647
ET
7678 ext_phy_type);
7679
34f80b04 7680 bp->port.supported |= (SUPPORTED_10000baseT_Full |
589abe3a 7681 SUPPORTED_2500baseX_Full |
34f80b04 7682 SUPPORTED_1000baseT_Full |
589abe3a
EG
7683 SUPPORTED_FIBRE |
7684 SUPPORTED_Autoneg |
7685 SUPPORTED_Pause |
7686 SUPPORTED_Asym_Pause);
7687 break;
7688
7689 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
7690 BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
7691 ext_phy_type);
7692
7693 bp->port.supported |= (SUPPORTED_10000baseT_Full |
34f80b04
EG
7694 SUPPORTED_FIBRE |
7695 SUPPORTED_Pause |
7696 SUPPORTED_Asym_Pause);
f1410647
ET
7697 break;
7698
589abe3a
EG
7699 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
7700 BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
a2fbb9ea
ET
7701 ext_phy_type);
7702
34f80b04
EG
7703 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7704 SUPPORTED_1000baseT_Full |
7705 SUPPORTED_FIBRE |
34f80b04
EG
7706 SUPPORTED_Pause |
7707 SUPPORTED_Asym_Pause);
f1410647
ET
7708 break;
7709
589abe3a
EG
7710 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
7711 BNX2X_DEV_INFO("ext_phy_type 0x%x (8726)\n",
c18487ee
YR
7712 ext_phy_type);
7713
34f80b04 7714 bp->port.supported |= (SUPPORTED_10000baseT_Full |
34f80b04 7715 SUPPORTED_1000baseT_Full |
34f80b04 7716 SUPPORTED_Autoneg |
589abe3a 7717 SUPPORTED_FIBRE |
34f80b04
EG
7718 SUPPORTED_Pause |
7719 SUPPORTED_Asym_Pause);
c18487ee
YR
7720 break;
7721
f1410647
ET
7722 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7723 BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
7724 ext_phy_type);
7725
34f80b04
EG
7726 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7727 SUPPORTED_TP |
7728 SUPPORTED_Autoneg |
7729 SUPPORTED_Pause |
7730 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
7731 break;
7732
28577185
EG
7733 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
7734 BNX2X_DEV_INFO("ext_phy_type 0x%x (BCM8481)\n",
7735 ext_phy_type);
7736
7737 bp->port.supported |= (SUPPORTED_10baseT_Half |
7738 SUPPORTED_10baseT_Full |
7739 SUPPORTED_100baseT_Half |
7740 SUPPORTED_100baseT_Full |
7741 SUPPORTED_1000baseT_Full |
7742 SUPPORTED_10000baseT_Full |
7743 SUPPORTED_TP |
7744 SUPPORTED_Autoneg |
7745 SUPPORTED_Pause |
7746 SUPPORTED_Asym_Pause);
7747 break;
7748
c18487ee
YR
7749 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
7750 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
7751 bp->link_params.ext_phy_config);
7752 break;
7753
a2fbb9ea
ET
7754 default:
7755 BNX2X_ERR("NVRAM config error. "
7756 "BAD XGXS ext_phy_config 0x%x\n",
c18487ee 7757 bp->link_params.ext_phy_config);
a2fbb9ea
ET
7758 return;
7759 }
7760
34f80b04
EG
7761 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
7762 port*0x18);
7763 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea 7764
a2fbb9ea
ET
7765 break;
7766
7767 default:
7768 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
34f80b04 7769 bp->port.link_config);
a2fbb9ea
ET
7770 return;
7771 }
34f80b04 7772 bp->link_params.phy_addr = bp->port.phy_addr;
a2fbb9ea
ET
7773
7774 /* mask what we support according to speed_cap_mask */
c18487ee
YR
7775 if (!(bp->link_params.speed_cap_mask &
7776 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
34f80b04 7777 bp->port.supported &= ~SUPPORTED_10baseT_Half;
a2fbb9ea 7778
c18487ee
YR
7779 if (!(bp->link_params.speed_cap_mask &
7780 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
34f80b04 7781 bp->port.supported &= ~SUPPORTED_10baseT_Full;
a2fbb9ea 7782
c18487ee
YR
7783 if (!(bp->link_params.speed_cap_mask &
7784 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
34f80b04 7785 bp->port.supported &= ~SUPPORTED_100baseT_Half;
a2fbb9ea 7786
c18487ee
YR
7787 if (!(bp->link_params.speed_cap_mask &
7788 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
34f80b04 7789 bp->port.supported &= ~SUPPORTED_100baseT_Full;
a2fbb9ea 7790
c18487ee
YR
7791 if (!(bp->link_params.speed_cap_mask &
7792 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
34f80b04
EG
7793 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
7794 SUPPORTED_1000baseT_Full);
a2fbb9ea 7795
c18487ee
YR
7796 if (!(bp->link_params.speed_cap_mask &
7797 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
34f80b04 7798 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
a2fbb9ea 7799
c18487ee
YR
7800 if (!(bp->link_params.speed_cap_mask &
7801 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
34f80b04 7802 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
a2fbb9ea 7803
34f80b04 7804 BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
a2fbb9ea
ET
7805}
7806
34f80b04 7807static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
a2fbb9ea 7808{
c18487ee 7809 bp->link_params.req_duplex = DUPLEX_FULL;
a2fbb9ea 7810
34f80b04 7811 switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
a2fbb9ea 7812 case PORT_FEATURE_LINK_SPEED_AUTO:
34f80b04 7813 if (bp->port.supported & SUPPORTED_Autoneg) {
c18487ee 7814 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
34f80b04 7815 bp->port.advertising = bp->port.supported;
a2fbb9ea 7816 } else {
c18487ee
YR
7817 u32 ext_phy_type =
7818 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7819
7820 if ((ext_phy_type ==
7821 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
7822 (ext_phy_type ==
7823 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
a2fbb9ea 7824 /* force 10G, no AN */
c18487ee 7825 bp->link_params.req_line_speed = SPEED_10000;
34f80b04 7826 bp->port.advertising =
a2fbb9ea
ET
7827 (ADVERTISED_10000baseT_Full |
7828 ADVERTISED_FIBRE);
7829 break;
7830 }
7831 BNX2X_ERR("NVRAM config error. "
7832 "Invalid link_config 0x%x"
7833 " Autoneg not supported\n",
34f80b04 7834 bp->port.link_config);
a2fbb9ea
ET
7835 return;
7836 }
7837 break;
7838
7839 case PORT_FEATURE_LINK_SPEED_10M_FULL:
34f80b04 7840 if (bp->port.supported & SUPPORTED_10baseT_Full) {
c18487ee 7841 bp->link_params.req_line_speed = SPEED_10;
34f80b04
EG
7842 bp->port.advertising = (ADVERTISED_10baseT_Full |
7843 ADVERTISED_TP);
a2fbb9ea
ET
7844 } else {
7845 BNX2X_ERR("NVRAM config error. "
7846 "Invalid link_config 0x%x"
7847 " speed_cap_mask 0x%x\n",
34f80b04 7848 bp->port.link_config,
c18487ee 7849 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7850 return;
7851 }
7852 break;
7853
7854 case PORT_FEATURE_LINK_SPEED_10M_HALF:
34f80b04 7855 if (bp->port.supported & SUPPORTED_10baseT_Half) {
c18487ee
YR
7856 bp->link_params.req_line_speed = SPEED_10;
7857 bp->link_params.req_duplex = DUPLEX_HALF;
34f80b04
EG
7858 bp->port.advertising = (ADVERTISED_10baseT_Half |
7859 ADVERTISED_TP);
a2fbb9ea
ET
7860 } else {
7861 BNX2X_ERR("NVRAM config error. "
7862 "Invalid link_config 0x%x"
7863 " speed_cap_mask 0x%x\n",
34f80b04 7864 bp->port.link_config,
c18487ee 7865 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7866 return;
7867 }
7868 break;
7869
7870 case PORT_FEATURE_LINK_SPEED_100M_FULL:
34f80b04 7871 if (bp->port.supported & SUPPORTED_100baseT_Full) {
c18487ee 7872 bp->link_params.req_line_speed = SPEED_100;
34f80b04
EG
7873 bp->port.advertising = (ADVERTISED_100baseT_Full |
7874 ADVERTISED_TP);
a2fbb9ea
ET
7875 } else {
7876 BNX2X_ERR("NVRAM config error. "
7877 "Invalid link_config 0x%x"
7878 " speed_cap_mask 0x%x\n",
34f80b04 7879 bp->port.link_config,
c18487ee 7880 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7881 return;
7882 }
7883 break;
7884
7885 case PORT_FEATURE_LINK_SPEED_100M_HALF:
34f80b04 7886 if (bp->port.supported & SUPPORTED_100baseT_Half) {
c18487ee
YR
7887 bp->link_params.req_line_speed = SPEED_100;
7888 bp->link_params.req_duplex = DUPLEX_HALF;
34f80b04
EG
7889 bp->port.advertising = (ADVERTISED_100baseT_Half |
7890 ADVERTISED_TP);
a2fbb9ea
ET
7891 } else {
7892 BNX2X_ERR("NVRAM config error. "
7893 "Invalid link_config 0x%x"
7894 " speed_cap_mask 0x%x\n",
34f80b04 7895 bp->port.link_config,
c18487ee 7896 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7897 return;
7898 }
7899 break;
7900
7901 case PORT_FEATURE_LINK_SPEED_1G:
34f80b04 7902 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
c18487ee 7903 bp->link_params.req_line_speed = SPEED_1000;
34f80b04
EG
7904 bp->port.advertising = (ADVERTISED_1000baseT_Full |
7905 ADVERTISED_TP);
a2fbb9ea
ET
7906 } else {
7907 BNX2X_ERR("NVRAM config error. "
7908 "Invalid link_config 0x%x"
7909 " speed_cap_mask 0x%x\n",
34f80b04 7910 bp->port.link_config,
c18487ee 7911 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7912 return;
7913 }
7914 break;
7915
7916 case PORT_FEATURE_LINK_SPEED_2_5G:
34f80b04 7917 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
c18487ee 7918 bp->link_params.req_line_speed = SPEED_2500;
34f80b04
EG
7919 bp->port.advertising = (ADVERTISED_2500baseX_Full |
7920 ADVERTISED_TP);
a2fbb9ea
ET
7921 } else {
7922 BNX2X_ERR("NVRAM config error. "
7923 "Invalid link_config 0x%x"
7924 " speed_cap_mask 0x%x\n",
34f80b04 7925 bp->port.link_config,
c18487ee 7926 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7927 return;
7928 }
7929 break;
7930
7931 case PORT_FEATURE_LINK_SPEED_10G_CX4:
7932 case PORT_FEATURE_LINK_SPEED_10G_KX4:
7933 case PORT_FEATURE_LINK_SPEED_10G_KR:
34f80b04 7934 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
c18487ee 7935 bp->link_params.req_line_speed = SPEED_10000;
34f80b04
EG
7936 bp->port.advertising = (ADVERTISED_10000baseT_Full |
7937 ADVERTISED_FIBRE);
a2fbb9ea
ET
7938 } else {
7939 BNX2X_ERR("NVRAM config error. "
7940 "Invalid link_config 0x%x"
7941 " speed_cap_mask 0x%x\n",
34f80b04 7942 bp->port.link_config,
c18487ee 7943 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7944 return;
7945 }
7946 break;
7947
7948 default:
7949 BNX2X_ERR("NVRAM config error. "
7950 "BAD link speed link_config 0x%x\n",
34f80b04 7951 bp->port.link_config);
c18487ee 7952 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
34f80b04 7953 bp->port.advertising = bp->port.supported;
a2fbb9ea
ET
7954 break;
7955 }
a2fbb9ea 7956
34f80b04
EG
7957 bp->link_params.req_flow_ctrl = (bp->port.link_config &
7958 PORT_FEATURE_FLOW_CONTROL_MASK);
c0700f90 7959 if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
4ab84d45 7960 !(bp->port.supported & SUPPORTED_Autoneg))
c0700f90 7961 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
a2fbb9ea 7962
c18487ee 7963 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x"
f1410647 7964 " advertising 0x%x\n",
c18487ee
YR
7965 bp->link_params.req_line_speed,
7966 bp->link_params.req_duplex,
34f80b04 7967 bp->link_params.req_flow_ctrl, bp->port.advertising);
a2fbb9ea
ET
7968}
7969
34f80b04 7970static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
a2fbb9ea 7971{
34f80b04
EG
7972 int port = BP_PORT(bp);
7973 u32 val, val2;
589abe3a 7974 u32 config;
a2fbb9ea 7975
c18487ee 7976 bp->link_params.bp = bp;
34f80b04 7977 bp->link_params.port = port;
c18487ee 7978
c18487ee 7979 bp->link_params.serdes_config =
f1410647 7980 SHMEM_RD(bp, dev_info.port_hw_config[port].serdes_config);
c18487ee 7981 bp->link_params.lane_config =
a2fbb9ea 7982 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
c18487ee 7983 bp->link_params.ext_phy_config =
a2fbb9ea
ET
7984 SHMEM_RD(bp,
7985 dev_info.port_hw_config[port].external_phy_config);
c18487ee 7986 bp->link_params.speed_cap_mask =
a2fbb9ea
ET
7987 SHMEM_RD(bp,
7988 dev_info.port_hw_config[port].speed_capability_mask);
7989
34f80b04 7990 bp->port.link_config =
a2fbb9ea
ET
7991 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
7992
589abe3a
EG
7993 config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
7994 if (config & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_ENABLED)
7995 bp->link_params.feature_config_flags |=
7996 FEATURE_CONFIG_MODULE_ENFORCMENT_ENABLED;
7997 else
7998 bp->link_params.feature_config_flags &=
7999 ~FEATURE_CONFIG_MODULE_ENFORCMENT_ENABLED;
8000
34f80b04
EG
8001 BNX2X_DEV_INFO("serdes_config 0x%08x lane_config 0x%08x\n"
8002 KERN_INFO " ext_phy_config 0x%08x speed_cap_mask 0x%08x"
8003 " link_config 0x%08x\n",
c18487ee
YR
8004 bp->link_params.serdes_config,
8005 bp->link_params.lane_config,
8006 bp->link_params.ext_phy_config,
34f80b04 8007 bp->link_params.speed_cap_mask, bp->port.link_config);
a2fbb9ea 8008
34f80b04 8009 bp->link_params.switch_cfg = (bp->port.link_config &
c18487ee
YR
8010 PORT_FEATURE_CONNECTED_SWITCH_MASK);
8011 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
a2fbb9ea
ET
8012
8013 bnx2x_link_settings_requested(bp);
8014
8015 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
8016 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
8017 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
8018 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8019 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8020 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8021 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
8022 bp->dev->dev_addr[5] = (u8)(val & 0xff);
c18487ee
YR
8023 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
8024 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
34f80b04
EG
8025}
8026
8027static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
8028{
8029 int func = BP_FUNC(bp);
8030 u32 val, val2;
8031 int rc = 0;
a2fbb9ea 8032
34f80b04 8033 bnx2x_get_common_hwinfo(bp);
a2fbb9ea 8034
34f80b04
EG
8035 bp->e1hov = 0;
8036 bp->e1hmf = 0;
8037 if (CHIP_IS_E1H(bp)) {
8038 bp->mf_config =
8039 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
a2fbb9ea 8040
3196a88a
EG
8041 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].e1hov_tag) &
8042 FUNC_MF_CFG_E1HOV_TAG_MASK);
34f80b04 8043 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
a2fbb9ea 8044
34f80b04
EG
8045 bp->e1hov = val;
8046 bp->e1hmf = 1;
8047 BNX2X_DEV_INFO("MF mode E1HOV for func %d is %d "
8048 "(0x%04x)\n",
8049 func, bp->e1hov, bp->e1hov);
8050 } else {
8051 BNX2X_DEV_INFO("Single function mode\n");
8052 if (BP_E1HVN(bp)) {
8053 BNX2X_ERR("!!! No valid E1HOV for func %d,"
8054 " aborting\n", func);
8055 rc = -EPERM;
8056 }
8057 }
8058 }
a2fbb9ea 8059
34f80b04
EG
8060 if (!BP_NOMCP(bp)) {
8061 bnx2x_get_port_hwinfo(bp);
8062
8063 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
8064 DRV_MSG_SEQ_NUMBER_MASK);
8065 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
8066 }
8067
8068 if (IS_E1HMF(bp)) {
8069 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
8070 val = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_lower);
8071 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
8072 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
8073 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
8074 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8075 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8076 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8077 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
8078 bp->dev->dev_addr[5] = (u8)(val & 0xff);
8079 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
8080 ETH_ALEN);
8081 memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
8082 ETH_ALEN);
a2fbb9ea 8083 }
34f80b04
EG
8084
8085 return rc;
a2fbb9ea
ET
8086 }
8087
34f80b04
EG
8088 if (BP_NOMCP(bp)) {
8089 /* only supposed to happen on emulation/FPGA */
33471629 8090 BNX2X_ERR("warning random MAC workaround active\n");
34f80b04
EG
8091 random_ether_addr(bp->dev->dev_addr);
8092 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8093 }
a2fbb9ea 8094
34f80b04
EG
8095 return rc;
8096}
8097
8098static int __devinit bnx2x_init_bp(struct bnx2x *bp)
8099{
8100 int func = BP_FUNC(bp);
87942b46 8101 int timer_interval;
34f80b04
EG
8102 int rc;
8103
da5a662a
VZ
8104 /* Disable interrupt handling until HW is initialized */
8105 atomic_set(&bp->intr_sem, 1);
8106
34f80b04 8107 mutex_init(&bp->port.phy_mutex);
a2fbb9ea 8108
1cf167f2 8109 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
34f80b04
EG
8110 INIT_WORK(&bp->reset_task, bnx2x_reset_task);
8111
8112 rc = bnx2x_get_hwinfo(bp);
8113
8114 /* need to reset chip if undi was active */
8115 if (!BP_NOMCP(bp))
8116 bnx2x_undi_unload(bp);
8117
8118 if (CHIP_REV_IS_FPGA(bp))
8119 printk(KERN_ERR PFX "FPGA detected\n");
8120
8121 if (BP_NOMCP(bp) && (func == 0))
8122 printk(KERN_ERR PFX
8123 "MCP disabled, must load devices in order!\n");
8124
555f6c78 8125 /* Set multi queue mode */
8badd27a
EG
8126 if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
8127 ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
555f6c78 8128 printk(KERN_ERR PFX
8badd27a 8129 "Multi disabled since int_mode requested is not MSI-X\n");
555f6c78
EG
8130 multi_mode = ETH_RSS_MODE_DISABLED;
8131 }
8132 bp->multi_mode = multi_mode;
8133
8134
7a9b2557
VZ
8135 /* Set TPA flags */
8136 if (disable_tpa) {
8137 bp->flags &= ~TPA_ENABLE_FLAG;
8138 bp->dev->features &= ~NETIF_F_LRO;
8139 } else {
8140 bp->flags |= TPA_ENABLE_FLAG;
8141 bp->dev->features |= NETIF_F_LRO;
8142 }
8143
8144
34f80b04
EG
8145 bp->tx_ring_size = MAX_TX_AVAIL;
8146 bp->rx_ring_size = MAX_RX_AVAIL;
8147
8148 bp->rx_csum = 1;
34f80b04
EG
8149
8150 bp->tx_ticks = 50;
8151 bp->rx_ticks = 25;
8152
87942b46
EG
8153 timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
8154 bp->current_interval = (poll ? poll : timer_interval);
34f80b04
EG
8155
8156 init_timer(&bp->timer);
8157 bp->timer.expires = jiffies + bp->current_interval;
8158 bp->timer.data = (unsigned long) bp;
8159 bp->timer.function = bnx2x_timer;
8160
8161 return rc;
a2fbb9ea
ET
8162}
8163
8164/*
8165 * ethtool service functions
8166 */
8167
8168/* All ethtool functions called with rtnl_lock */
8169
8170static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8171{
8172 struct bnx2x *bp = netdev_priv(dev);
8173
34f80b04
EG
8174 cmd->supported = bp->port.supported;
8175 cmd->advertising = bp->port.advertising;
a2fbb9ea
ET
8176
8177 if (netif_carrier_ok(dev)) {
c18487ee
YR
8178 cmd->speed = bp->link_vars.line_speed;
8179 cmd->duplex = bp->link_vars.duplex;
a2fbb9ea 8180 } else {
c18487ee
YR
8181 cmd->speed = bp->link_params.req_line_speed;
8182 cmd->duplex = bp->link_params.req_duplex;
a2fbb9ea 8183 }
34f80b04
EG
8184 if (IS_E1HMF(bp)) {
8185 u16 vn_max_rate;
8186
8187 vn_max_rate = ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
8188 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
8189 if (vn_max_rate < cmd->speed)
8190 cmd->speed = vn_max_rate;
8191 }
a2fbb9ea 8192
c18487ee
YR
8193 if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
8194 u32 ext_phy_type =
8195 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
f1410647
ET
8196
8197 switch (ext_phy_type) {
8198 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
f1410647 8199 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
c18487ee 8200 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
589abe3a
EG
8201 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
8202 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
8203 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
f1410647
ET
8204 cmd->port = PORT_FIBRE;
8205 break;
8206
8207 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
28577185 8208 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
f1410647
ET
8209 cmd->port = PORT_TP;
8210 break;
8211
c18487ee
YR
8212 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
8213 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
8214 bp->link_params.ext_phy_config);
8215 break;
8216
f1410647
ET
8217 default:
8218 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
c18487ee
YR
8219 bp->link_params.ext_phy_config);
8220 break;
f1410647
ET
8221 }
8222 } else
a2fbb9ea 8223 cmd->port = PORT_TP;
a2fbb9ea 8224
34f80b04 8225 cmd->phy_address = bp->port.phy_addr;
a2fbb9ea
ET
8226 cmd->transceiver = XCVR_INTERNAL;
8227
c18487ee 8228 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
a2fbb9ea 8229 cmd->autoneg = AUTONEG_ENABLE;
f1410647 8230 else
a2fbb9ea 8231 cmd->autoneg = AUTONEG_DISABLE;
a2fbb9ea
ET
8232
8233 cmd->maxtxpkt = 0;
8234 cmd->maxrxpkt = 0;
8235
8236 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
8237 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
8238 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
8239 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
8240 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
8241 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
8242 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
8243
8244 return 0;
8245}
8246
8247static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8248{
8249 struct bnx2x *bp = netdev_priv(dev);
8250 u32 advertising;
8251
34f80b04
EG
8252 if (IS_E1HMF(bp))
8253 return 0;
8254
a2fbb9ea
ET
8255 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
8256 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
8257 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
8258 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
8259 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
8260 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
8261 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
8262
a2fbb9ea 8263 if (cmd->autoneg == AUTONEG_ENABLE) {
34f80b04
EG
8264 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
8265 DP(NETIF_MSG_LINK, "Autoneg not supported\n");
a2fbb9ea 8266 return -EINVAL;
f1410647 8267 }
a2fbb9ea
ET
8268
8269 /* advertise the requested speed and duplex if supported */
34f80b04 8270 cmd->advertising &= bp->port.supported;
a2fbb9ea 8271
c18487ee
YR
8272 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
8273 bp->link_params.req_duplex = DUPLEX_FULL;
34f80b04
EG
8274 bp->port.advertising |= (ADVERTISED_Autoneg |
8275 cmd->advertising);
a2fbb9ea
ET
8276
8277 } else { /* forced speed */
8278 /* advertise the requested speed and duplex if supported */
8279 switch (cmd->speed) {
8280 case SPEED_10:
8281 if (cmd->duplex == DUPLEX_FULL) {
34f80b04 8282 if (!(bp->port.supported &
f1410647
ET
8283 SUPPORTED_10baseT_Full)) {
8284 DP(NETIF_MSG_LINK,
8285 "10M full not supported\n");
a2fbb9ea 8286 return -EINVAL;
f1410647 8287 }
a2fbb9ea
ET
8288
8289 advertising = (ADVERTISED_10baseT_Full |
8290 ADVERTISED_TP);
8291 } else {
34f80b04 8292 if (!(bp->port.supported &
f1410647
ET
8293 SUPPORTED_10baseT_Half)) {
8294 DP(NETIF_MSG_LINK,
8295 "10M half not supported\n");
a2fbb9ea 8296 return -EINVAL;
f1410647 8297 }
a2fbb9ea
ET
8298
8299 advertising = (ADVERTISED_10baseT_Half |
8300 ADVERTISED_TP);
8301 }
8302 break;
8303
8304 case SPEED_100:
8305 if (cmd->duplex == DUPLEX_FULL) {
34f80b04 8306 if (!(bp->port.supported &
f1410647
ET
8307 SUPPORTED_100baseT_Full)) {
8308 DP(NETIF_MSG_LINK,
8309 "100M full not supported\n");
a2fbb9ea 8310 return -EINVAL;
f1410647 8311 }
a2fbb9ea
ET
8312
8313 advertising = (ADVERTISED_100baseT_Full |
8314 ADVERTISED_TP);
8315 } else {
34f80b04 8316 if (!(bp->port.supported &
f1410647
ET
8317 SUPPORTED_100baseT_Half)) {
8318 DP(NETIF_MSG_LINK,
8319 "100M half not supported\n");
a2fbb9ea 8320 return -EINVAL;
f1410647 8321 }
a2fbb9ea
ET
8322
8323 advertising = (ADVERTISED_100baseT_Half |
8324 ADVERTISED_TP);
8325 }
8326 break;
8327
8328 case SPEED_1000:
f1410647
ET
8329 if (cmd->duplex != DUPLEX_FULL) {
8330 DP(NETIF_MSG_LINK, "1G half not supported\n");
a2fbb9ea 8331 return -EINVAL;
f1410647 8332 }
a2fbb9ea 8333
34f80b04 8334 if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
f1410647 8335 DP(NETIF_MSG_LINK, "1G full not supported\n");
a2fbb9ea 8336 return -EINVAL;
f1410647 8337 }
a2fbb9ea
ET
8338
8339 advertising = (ADVERTISED_1000baseT_Full |
8340 ADVERTISED_TP);
8341 break;
8342
8343 case SPEED_2500:
f1410647
ET
8344 if (cmd->duplex != DUPLEX_FULL) {
8345 DP(NETIF_MSG_LINK,
8346 "2.5G half not supported\n");
a2fbb9ea 8347 return -EINVAL;
f1410647 8348 }
a2fbb9ea 8349
34f80b04 8350 if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
f1410647
ET
8351 DP(NETIF_MSG_LINK,
8352 "2.5G full not supported\n");
a2fbb9ea 8353 return -EINVAL;
f1410647 8354 }
a2fbb9ea 8355
f1410647 8356 advertising = (ADVERTISED_2500baseX_Full |
a2fbb9ea
ET
8357 ADVERTISED_TP);
8358 break;
8359
8360 case SPEED_10000:
f1410647
ET
8361 if (cmd->duplex != DUPLEX_FULL) {
8362 DP(NETIF_MSG_LINK, "10G half not supported\n");
a2fbb9ea 8363 return -EINVAL;
f1410647 8364 }
a2fbb9ea 8365
34f80b04 8366 if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
f1410647 8367 DP(NETIF_MSG_LINK, "10G full not supported\n");
a2fbb9ea 8368 return -EINVAL;
f1410647 8369 }
a2fbb9ea
ET
8370
8371 advertising = (ADVERTISED_10000baseT_Full |
8372 ADVERTISED_FIBRE);
8373 break;
8374
8375 default:
f1410647 8376 DP(NETIF_MSG_LINK, "Unsupported speed\n");
a2fbb9ea
ET
8377 return -EINVAL;
8378 }
8379
c18487ee
YR
8380 bp->link_params.req_line_speed = cmd->speed;
8381 bp->link_params.req_duplex = cmd->duplex;
34f80b04 8382 bp->port.advertising = advertising;
a2fbb9ea
ET
8383 }
8384
c18487ee 8385 DP(NETIF_MSG_LINK, "req_line_speed %d\n"
a2fbb9ea 8386 DP_LEVEL " req_duplex %d advertising 0x%x\n",
c18487ee 8387 bp->link_params.req_line_speed, bp->link_params.req_duplex,
34f80b04 8388 bp->port.advertising);
a2fbb9ea 8389
34f80b04 8390 if (netif_running(dev)) {
bb2a0f7a 8391 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
8392 bnx2x_link_set(bp);
8393 }
a2fbb9ea
ET
8394
8395 return 0;
8396}
8397
c18487ee
YR
8398#define PHY_FW_VER_LEN 10
8399
a2fbb9ea
ET
8400static void bnx2x_get_drvinfo(struct net_device *dev,
8401 struct ethtool_drvinfo *info)
8402{
8403 struct bnx2x *bp = netdev_priv(dev);
f0e53a84 8404 u8 phy_fw_ver[PHY_FW_VER_LEN];
a2fbb9ea
ET
8405
8406 strcpy(info->driver, DRV_MODULE_NAME);
8407 strcpy(info->version, DRV_MODULE_VERSION);
c18487ee
YR
8408
8409 phy_fw_ver[0] = '\0';
34f80b04 8410 if (bp->port.pmf) {
4a37fb66 8411 bnx2x_acquire_phy_lock(bp);
34f80b04
EG
8412 bnx2x_get_ext_phy_fw_version(&bp->link_params,
8413 (bp->state != BNX2X_STATE_CLOSED),
8414 phy_fw_ver, PHY_FW_VER_LEN);
4a37fb66 8415 bnx2x_release_phy_lock(bp);
34f80b04 8416 }
c18487ee 8417
f0e53a84
EG
8418 snprintf(info->fw_version, 32, "BC:%d.%d.%d%s%s",
8419 (bp->common.bc_ver & 0xff0000) >> 16,
8420 (bp->common.bc_ver & 0xff00) >> 8,
8421 (bp->common.bc_ver & 0xff),
8422 ((phy_fw_ver[0] != '\0') ? " PHY:" : ""), phy_fw_ver);
a2fbb9ea
ET
8423 strcpy(info->bus_info, pci_name(bp->pdev));
8424 info->n_stats = BNX2X_NUM_STATS;
8425 info->testinfo_len = BNX2X_NUM_TESTS;
34f80b04 8426 info->eedump_len = bp->common.flash_size;
a2fbb9ea
ET
8427 info->regdump_len = 0;
8428}
8429
8430static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8431{
8432 struct bnx2x *bp = netdev_priv(dev);
8433
8434 if (bp->flags & NO_WOL_FLAG) {
8435 wol->supported = 0;
8436 wol->wolopts = 0;
8437 } else {
8438 wol->supported = WAKE_MAGIC;
8439 if (bp->wol)
8440 wol->wolopts = WAKE_MAGIC;
8441 else
8442 wol->wolopts = 0;
8443 }
8444 memset(&wol->sopass, 0, sizeof(wol->sopass));
8445}
8446
8447static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8448{
8449 struct bnx2x *bp = netdev_priv(dev);
8450
8451 if (wol->wolopts & ~WAKE_MAGIC)
8452 return -EINVAL;
8453
8454 if (wol->wolopts & WAKE_MAGIC) {
8455 if (bp->flags & NO_WOL_FLAG)
8456 return -EINVAL;
8457
8458 bp->wol = 1;
34f80b04 8459 } else
a2fbb9ea 8460 bp->wol = 0;
34f80b04 8461
a2fbb9ea
ET
8462 return 0;
8463}
8464
8465static u32 bnx2x_get_msglevel(struct net_device *dev)
8466{
8467 struct bnx2x *bp = netdev_priv(dev);
8468
8469 return bp->msglevel;
8470}
8471
8472static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
8473{
8474 struct bnx2x *bp = netdev_priv(dev);
8475
8476 if (capable(CAP_NET_ADMIN))
8477 bp->msglevel = level;
8478}
8479
8480static int bnx2x_nway_reset(struct net_device *dev)
8481{
8482 struct bnx2x *bp = netdev_priv(dev);
8483
34f80b04
EG
8484 if (!bp->port.pmf)
8485 return 0;
a2fbb9ea 8486
34f80b04 8487 if (netif_running(dev)) {
bb2a0f7a 8488 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
8489 bnx2x_link_set(bp);
8490 }
a2fbb9ea
ET
8491
8492 return 0;
8493}
8494
8495static int bnx2x_get_eeprom_len(struct net_device *dev)
8496{
8497 struct bnx2x *bp = netdev_priv(dev);
8498
34f80b04 8499 return bp->common.flash_size;
a2fbb9ea
ET
8500}
8501
8502static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
8503{
34f80b04 8504 int port = BP_PORT(bp);
a2fbb9ea
ET
8505 int count, i;
8506 u32 val = 0;
8507
8508 /* adjust timeout for emulation/FPGA */
8509 count = NVRAM_TIMEOUT_COUNT;
8510 if (CHIP_REV_IS_SLOW(bp))
8511 count *= 100;
8512
8513 /* request access to nvram interface */
8514 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
8515 (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
8516
8517 for (i = 0; i < count*10; i++) {
8518 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
8519 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
8520 break;
8521
8522 udelay(5);
8523 }
8524
8525 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
34f80b04 8526 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
a2fbb9ea
ET
8527 return -EBUSY;
8528 }
8529
8530 return 0;
8531}
8532
8533static int bnx2x_release_nvram_lock(struct bnx2x *bp)
8534{
34f80b04 8535 int port = BP_PORT(bp);
a2fbb9ea
ET
8536 int count, i;
8537 u32 val = 0;
8538
8539 /* adjust timeout for emulation/FPGA */
8540 count = NVRAM_TIMEOUT_COUNT;
8541 if (CHIP_REV_IS_SLOW(bp))
8542 count *= 100;
8543
8544 /* relinquish nvram interface */
8545 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
8546 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
8547
8548 for (i = 0; i < count*10; i++) {
8549 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
8550 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
8551 break;
8552
8553 udelay(5);
8554 }
8555
8556 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
34f80b04 8557 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
a2fbb9ea
ET
8558 return -EBUSY;
8559 }
8560
8561 return 0;
8562}
8563
8564static void bnx2x_enable_nvram_access(struct bnx2x *bp)
8565{
8566 u32 val;
8567
8568 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
8569
8570 /* enable both bits, even on read */
8571 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
8572 (val | MCPR_NVM_ACCESS_ENABLE_EN |
8573 MCPR_NVM_ACCESS_ENABLE_WR_EN));
8574}
8575
8576static void bnx2x_disable_nvram_access(struct bnx2x *bp)
8577{
8578 u32 val;
8579
8580 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
8581
8582 /* disable both bits, even after read */
8583 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
8584 (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
8585 MCPR_NVM_ACCESS_ENABLE_WR_EN)));
8586}
8587
8588static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, u32 *ret_val,
8589 u32 cmd_flags)
8590{
f1410647 8591 int count, i, rc;
a2fbb9ea
ET
8592 u32 val;
8593
8594 /* build the command word */
8595 cmd_flags |= MCPR_NVM_COMMAND_DOIT;
8596
8597 /* need to clear DONE bit separately */
8598 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8599
8600 /* address of the NVRAM to read from */
8601 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8602 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8603
8604 /* issue a read command */
8605 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8606
8607 /* adjust timeout for emulation/FPGA */
8608 count = NVRAM_TIMEOUT_COUNT;
8609 if (CHIP_REV_IS_SLOW(bp))
8610 count *= 100;
8611
8612 /* wait for completion */
8613 *ret_val = 0;
8614 rc = -EBUSY;
8615 for (i = 0; i < count; i++) {
8616 udelay(5);
8617 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8618
8619 if (val & MCPR_NVM_COMMAND_DONE) {
8620 val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
a2fbb9ea
ET
8621 /* we read nvram data in cpu order
8622 * but ethtool sees it as an array of bytes
8623 * converting to big-endian will do the work */
8624 val = cpu_to_be32(val);
8625 *ret_val = val;
8626 rc = 0;
8627 break;
8628 }
8629 }
8630
8631 return rc;
8632}
8633
8634static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
8635 int buf_size)
8636{
8637 int rc;
8638 u32 cmd_flags;
8639 u32 val;
8640
8641 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
34f80b04 8642 DP(BNX2X_MSG_NVM,
c14423fe 8643 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
a2fbb9ea
ET
8644 offset, buf_size);
8645 return -EINVAL;
8646 }
8647
34f80b04
EG
8648 if (offset + buf_size > bp->common.flash_size) {
8649 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 8650 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 8651 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
8652 return -EINVAL;
8653 }
8654
8655 /* request access to nvram interface */
8656 rc = bnx2x_acquire_nvram_lock(bp);
8657 if (rc)
8658 return rc;
8659
8660 /* enable access to nvram interface */
8661 bnx2x_enable_nvram_access(bp);
8662
8663 /* read the first word(s) */
8664 cmd_flags = MCPR_NVM_COMMAND_FIRST;
8665 while ((buf_size > sizeof(u32)) && (rc == 0)) {
8666 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8667 memcpy(ret_buf, &val, 4);
8668
8669 /* advance to the next dword */
8670 offset += sizeof(u32);
8671 ret_buf += sizeof(u32);
8672 buf_size -= sizeof(u32);
8673 cmd_flags = 0;
8674 }
8675
8676 if (rc == 0) {
8677 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8678 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8679 memcpy(ret_buf, &val, 4);
8680 }
8681
8682 /* disable access to nvram interface */
8683 bnx2x_disable_nvram_access(bp);
8684 bnx2x_release_nvram_lock(bp);
8685
8686 return rc;
8687}
8688
8689static int bnx2x_get_eeprom(struct net_device *dev,
8690 struct ethtool_eeprom *eeprom, u8 *eebuf)
8691{
8692 struct bnx2x *bp = netdev_priv(dev);
8693 int rc;
8694
2add3acb
EG
8695 if (!netif_running(dev))
8696 return -EAGAIN;
8697
34f80b04 8698 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
a2fbb9ea
ET
8699 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
8700 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8701 eeprom->len, eeprom->len);
8702
8703 /* parameters already validated in ethtool_get_eeprom */
8704
8705 rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
8706
8707 return rc;
8708}
8709
8710static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
8711 u32 cmd_flags)
8712{
f1410647 8713 int count, i, rc;
a2fbb9ea
ET
8714
8715 /* build the command word */
8716 cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
8717
8718 /* need to clear DONE bit separately */
8719 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8720
8721 /* write the data */
8722 REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
8723
8724 /* address of the NVRAM to write to */
8725 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8726 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8727
8728 /* issue the write command */
8729 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8730
8731 /* adjust timeout for emulation/FPGA */
8732 count = NVRAM_TIMEOUT_COUNT;
8733 if (CHIP_REV_IS_SLOW(bp))
8734 count *= 100;
8735
8736 /* wait for completion */
8737 rc = -EBUSY;
8738 for (i = 0; i < count; i++) {
8739 udelay(5);
8740 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8741 if (val & MCPR_NVM_COMMAND_DONE) {
8742 rc = 0;
8743 break;
8744 }
8745 }
8746
8747 return rc;
8748}
8749
f1410647 8750#define BYTE_OFFSET(offset) (8 * (offset & 0x03))
a2fbb9ea
ET
8751
8752static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
8753 int buf_size)
8754{
8755 int rc;
8756 u32 cmd_flags;
8757 u32 align_offset;
8758 u32 val;
8759
34f80b04
EG
8760 if (offset + buf_size > bp->common.flash_size) {
8761 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 8762 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 8763 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
8764 return -EINVAL;
8765 }
8766
8767 /* request access to nvram interface */
8768 rc = bnx2x_acquire_nvram_lock(bp);
8769 if (rc)
8770 return rc;
8771
8772 /* enable access to nvram interface */
8773 bnx2x_enable_nvram_access(bp);
8774
8775 cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
8776 align_offset = (offset & ~0x03);
8777 rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
8778
8779 if (rc == 0) {
8780 val &= ~(0xff << BYTE_OFFSET(offset));
8781 val |= (*data_buf << BYTE_OFFSET(offset));
8782
8783 /* nvram data is returned as an array of bytes
8784 * convert it back to cpu order */
8785 val = be32_to_cpu(val);
8786
a2fbb9ea
ET
8787 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
8788 cmd_flags);
8789 }
8790
8791 /* disable access to nvram interface */
8792 bnx2x_disable_nvram_access(bp);
8793 bnx2x_release_nvram_lock(bp);
8794
8795 return rc;
8796}
8797
8798static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
8799 int buf_size)
8800{
8801 int rc;
8802 u32 cmd_flags;
8803 u32 val;
8804 u32 written_so_far;
8805
34f80b04 8806 if (buf_size == 1) /* ethtool */
a2fbb9ea 8807 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
a2fbb9ea
ET
8808
8809 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
34f80b04 8810 DP(BNX2X_MSG_NVM,
c14423fe 8811 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
a2fbb9ea
ET
8812 offset, buf_size);
8813 return -EINVAL;
8814 }
8815
34f80b04
EG
8816 if (offset + buf_size > bp->common.flash_size) {
8817 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 8818 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 8819 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
8820 return -EINVAL;
8821 }
8822
8823 /* request access to nvram interface */
8824 rc = bnx2x_acquire_nvram_lock(bp);
8825 if (rc)
8826 return rc;
8827
8828 /* enable access to nvram interface */
8829 bnx2x_enable_nvram_access(bp);
8830
8831 written_so_far = 0;
8832 cmd_flags = MCPR_NVM_COMMAND_FIRST;
8833 while ((written_so_far < buf_size) && (rc == 0)) {
8834 if (written_so_far == (buf_size - sizeof(u32)))
8835 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8836 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
8837 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8838 else if ((offset % NVRAM_PAGE_SIZE) == 0)
8839 cmd_flags |= MCPR_NVM_COMMAND_FIRST;
8840
8841 memcpy(&val, data_buf, 4);
a2fbb9ea
ET
8842
8843 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
8844
8845 /* advance to the next dword */
8846 offset += sizeof(u32);
8847 data_buf += sizeof(u32);
8848 written_so_far += sizeof(u32);
8849 cmd_flags = 0;
8850 }
8851
8852 /* disable access to nvram interface */
8853 bnx2x_disable_nvram_access(bp);
8854 bnx2x_release_nvram_lock(bp);
8855
8856 return rc;
8857}
8858
8859static int bnx2x_set_eeprom(struct net_device *dev,
8860 struct ethtool_eeprom *eeprom, u8 *eebuf)
8861{
8862 struct bnx2x *bp = netdev_priv(dev);
8863 int rc;
8864
9f4c9583
EG
8865 if (!netif_running(dev))
8866 return -EAGAIN;
8867
34f80b04 8868 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
a2fbb9ea
ET
8869 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
8870 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8871 eeprom->len, eeprom->len);
8872
8873 /* parameters already validated in ethtool_set_eeprom */
8874
c18487ee 8875 /* If the magic number is PHY (0x00504859) upgrade the PHY FW */
34f80b04
EG
8876 if (eeprom->magic == 0x00504859)
8877 if (bp->port.pmf) {
8878
4a37fb66 8879 bnx2x_acquire_phy_lock(bp);
34f80b04
EG
8880 rc = bnx2x_flash_download(bp, BP_PORT(bp),
8881 bp->link_params.ext_phy_config,
8882 (bp->state != BNX2X_STATE_CLOSED),
8883 eebuf, eeprom->len);
bb2a0f7a
YG
8884 if ((bp->state == BNX2X_STATE_OPEN) ||
8885 (bp->state == BNX2X_STATE_DISABLED)) {
34f80b04 8886 rc |= bnx2x_link_reset(&bp->link_params,
589abe3a 8887 &bp->link_vars, 1);
34f80b04
EG
8888 rc |= bnx2x_phy_init(&bp->link_params,
8889 &bp->link_vars);
bb2a0f7a 8890 }
4a37fb66 8891 bnx2x_release_phy_lock(bp);
34f80b04
EG
8892
8893 } else /* Only the PMF can access the PHY */
8894 return -EINVAL;
8895 else
c18487ee 8896 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
a2fbb9ea
ET
8897
8898 return rc;
8899}
8900
8901static int bnx2x_get_coalesce(struct net_device *dev,
8902 struct ethtool_coalesce *coal)
8903{
8904 struct bnx2x *bp = netdev_priv(dev);
8905
8906 memset(coal, 0, sizeof(struct ethtool_coalesce));
8907
8908 coal->rx_coalesce_usecs = bp->rx_ticks;
8909 coal->tx_coalesce_usecs = bp->tx_ticks;
a2fbb9ea
ET
8910
8911 return 0;
8912}
8913
8914static int bnx2x_set_coalesce(struct net_device *dev,
8915 struct ethtool_coalesce *coal)
8916{
8917 struct bnx2x *bp = netdev_priv(dev);
8918
8919 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
8920 if (bp->rx_ticks > 3000)
8921 bp->rx_ticks = 3000;
8922
8923 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
8924 if (bp->tx_ticks > 0x3000)
8925 bp->tx_ticks = 0x3000;
8926
34f80b04 8927 if (netif_running(dev))
a2fbb9ea
ET
8928 bnx2x_update_coalesce(bp);
8929
8930 return 0;
8931}
8932
8933static void bnx2x_get_ringparam(struct net_device *dev,
8934 struct ethtool_ringparam *ering)
8935{
8936 struct bnx2x *bp = netdev_priv(dev);
8937
8938 ering->rx_max_pending = MAX_RX_AVAIL;
8939 ering->rx_mini_max_pending = 0;
8940 ering->rx_jumbo_max_pending = 0;
8941
8942 ering->rx_pending = bp->rx_ring_size;
8943 ering->rx_mini_pending = 0;
8944 ering->rx_jumbo_pending = 0;
8945
8946 ering->tx_max_pending = MAX_TX_AVAIL;
8947 ering->tx_pending = bp->tx_ring_size;
8948}
8949
8950static int bnx2x_set_ringparam(struct net_device *dev,
8951 struct ethtool_ringparam *ering)
8952{
8953 struct bnx2x *bp = netdev_priv(dev);
34f80b04 8954 int rc = 0;
a2fbb9ea
ET
8955
8956 if ((ering->rx_pending > MAX_RX_AVAIL) ||
8957 (ering->tx_pending > MAX_TX_AVAIL) ||
8958 (ering->tx_pending <= MAX_SKB_FRAGS + 4))
8959 return -EINVAL;
8960
8961 bp->rx_ring_size = ering->rx_pending;
8962 bp->tx_ring_size = ering->tx_pending;
8963
34f80b04
EG
8964 if (netif_running(dev)) {
8965 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8966 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
a2fbb9ea
ET
8967 }
8968
34f80b04 8969 return rc;
a2fbb9ea
ET
8970}
8971
8972static void bnx2x_get_pauseparam(struct net_device *dev,
8973 struct ethtool_pauseparam *epause)
8974{
8975 struct bnx2x *bp = netdev_priv(dev);
8976
c0700f90 8977 epause->autoneg = (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
c18487ee
YR
8978 (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
8979
c0700f90
DM
8980 epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) ==
8981 BNX2X_FLOW_CTRL_RX);
8982 epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) ==
8983 BNX2X_FLOW_CTRL_TX);
a2fbb9ea
ET
8984
8985 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
8986 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
8987 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
8988}
8989
8990static int bnx2x_set_pauseparam(struct net_device *dev,
8991 struct ethtool_pauseparam *epause)
8992{
8993 struct bnx2x *bp = netdev_priv(dev);
8994
34f80b04
EG
8995 if (IS_E1HMF(bp))
8996 return 0;
8997
a2fbb9ea
ET
8998 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
8999 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
9000 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9001
c0700f90 9002 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
a2fbb9ea 9003
f1410647 9004 if (epause->rx_pause)
c0700f90 9005 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_RX;
c18487ee 9006
f1410647 9007 if (epause->tx_pause)
c0700f90 9008 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_TX;
c18487ee 9009
c0700f90
DM
9010 if (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO)
9011 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
a2fbb9ea 9012
c18487ee 9013 if (epause->autoneg) {
34f80b04 9014 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
3196a88a 9015 DP(NETIF_MSG_LINK, "autoneg not supported\n");
c18487ee
YR
9016 return -EINVAL;
9017 }
a2fbb9ea 9018
c18487ee 9019 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
c0700f90 9020 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
c18487ee 9021 }
a2fbb9ea 9022
c18487ee
YR
9023 DP(NETIF_MSG_LINK,
9024 "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
34f80b04
EG
9025
9026 if (netif_running(dev)) {
bb2a0f7a 9027 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
9028 bnx2x_link_set(bp);
9029 }
a2fbb9ea
ET
9030
9031 return 0;
9032}
9033
df0f2343
VZ
9034static int bnx2x_set_flags(struct net_device *dev, u32 data)
9035{
9036 struct bnx2x *bp = netdev_priv(dev);
9037 int changed = 0;
9038 int rc = 0;
9039
9040 /* TPA requires Rx CSUM offloading */
9041 if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
9042 if (!(dev->features & NETIF_F_LRO)) {
9043 dev->features |= NETIF_F_LRO;
9044 bp->flags |= TPA_ENABLE_FLAG;
9045 changed = 1;
9046 }
9047
9048 } else if (dev->features & NETIF_F_LRO) {
9049 dev->features &= ~NETIF_F_LRO;
9050 bp->flags &= ~TPA_ENABLE_FLAG;
9051 changed = 1;
9052 }
9053
9054 if (changed && netif_running(dev)) {
9055 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9056 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
9057 }
9058
9059 return rc;
9060}
9061
a2fbb9ea
ET
9062static u32 bnx2x_get_rx_csum(struct net_device *dev)
9063{
9064 struct bnx2x *bp = netdev_priv(dev);
9065
9066 return bp->rx_csum;
9067}
9068
9069static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
9070{
9071 struct bnx2x *bp = netdev_priv(dev);
df0f2343 9072 int rc = 0;
a2fbb9ea
ET
9073
9074 bp->rx_csum = data;
df0f2343
VZ
9075
9076 /* Disable TPA, when Rx CSUM is disabled. Otherwise all
9077 TPA'ed packets will be discarded due to wrong TCP CSUM */
9078 if (!data) {
9079 u32 flags = ethtool_op_get_flags(dev);
9080
9081 rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
9082 }
9083
9084 return rc;
a2fbb9ea
ET
9085}
9086
9087static int bnx2x_set_tso(struct net_device *dev, u32 data)
9088{
755735eb 9089 if (data) {
a2fbb9ea 9090 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb
EG
9091 dev->features |= NETIF_F_TSO6;
9092 } else {
a2fbb9ea 9093 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb
EG
9094 dev->features &= ~NETIF_F_TSO6;
9095 }
9096
a2fbb9ea
ET
9097 return 0;
9098}
9099
f3c87cdd 9100static const struct {
a2fbb9ea
ET
9101 char string[ETH_GSTRING_LEN];
9102} bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
f3c87cdd
YG
9103 { "register_test (offline)" },
9104 { "memory_test (offline)" },
9105 { "loopback_test (offline)" },
9106 { "nvram_test (online)" },
9107 { "interrupt_test (online)" },
9108 { "link_test (online)" },
d3d4f495 9109 { "idle check (online)" }
a2fbb9ea
ET
9110};
9111
9112static int bnx2x_self_test_count(struct net_device *dev)
9113{
9114 return BNX2X_NUM_TESTS;
9115}
9116
f3c87cdd
YG
9117static int bnx2x_test_registers(struct bnx2x *bp)
9118{
9119 int idx, i, rc = -ENODEV;
9120 u32 wr_val = 0;
9dabc424 9121 int port = BP_PORT(bp);
f3c87cdd
YG
9122 static const struct {
9123 u32 offset0;
9124 u32 offset1;
9125 u32 mask;
9126 } reg_tbl[] = {
9127/* 0 */ { BRB1_REG_PAUSE_LOW_THRESHOLD_0, 4, 0x000003ff },
9128 { DORQ_REG_DB_ADDR0, 4, 0xffffffff },
9129 { HC_REG_AGG_INT_0, 4, 0x000003ff },
9130 { PBF_REG_MAC_IF0_ENABLE, 4, 0x00000001 },
9131 { PBF_REG_P0_INIT_CRD, 4, 0x000007ff },
9132 { PRS_REG_CID_PORT_0, 4, 0x00ffffff },
9133 { PXP2_REG_PSWRQ_CDU0_L2P, 4, 0x000fffff },
9134 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
9135 { PXP2_REG_PSWRQ_TM0_L2P, 4, 0x000fffff },
9136 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
9137/* 10 */ { PXP2_REG_PSWRQ_TSDM0_L2P, 4, 0x000fffff },
9138 { QM_REG_CONNNUM_0, 4, 0x000fffff },
9139 { TM_REG_LIN0_MAX_ACTIVE_CID, 4, 0x0003ffff },
9140 { SRC_REG_KEYRSS0_0, 40, 0xffffffff },
9141 { SRC_REG_KEYRSS0_7, 40, 0xffffffff },
9142 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
9143 { XCM_REG_WU_DA_CNT_CMD00, 4, 0x00000003 },
9144 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 4, 0x000000ff },
9145 { NIG_REG_EGRESS_MNG0_FIFO, 20, 0xffffffff },
9146 { NIG_REG_LLH0_T_BIT, 4, 0x00000001 },
9147/* 20 */ { NIG_REG_EMAC0_IN_EN, 4, 0x00000001 },
9148 { NIG_REG_BMAC0_IN_EN, 4, 0x00000001 },
9149 { NIG_REG_XCM0_OUT_EN, 4, 0x00000001 },
9150 { NIG_REG_BRB0_OUT_EN, 4, 0x00000001 },
9151 { NIG_REG_LLH0_XCM_MASK, 4, 0x00000007 },
9152 { NIG_REG_LLH0_ACPI_PAT_6_LEN, 68, 0x000000ff },
9153 { NIG_REG_LLH0_ACPI_PAT_0_CRC, 68, 0xffffffff },
9154 { NIG_REG_LLH0_DEST_MAC_0_0, 160, 0xffffffff },
9155 { NIG_REG_LLH0_DEST_IP_0_1, 160, 0xffffffff },
9156 { NIG_REG_LLH0_IPV4_IPV6_0, 160, 0x00000001 },
9157/* 30 */ { NIG_REG_LLH0_DEST_UDP_0, 160, 0x0000ffff },
9158 { NIG_REG_LLH0_DEST_TCP_0, 160, 0x0000ffff },
9159 { NIG_REG_LLH0_VLAN_ID_0, 160, 0x00000fff },
9160 { NIG_REG_XGXS_SERDES0_MODE_SEL, 4, 0x00000001 },
9161 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
9162 { NIG_REG_STATUS_INTERRUPT_PORT0, 4, 0x07ffffff },
9163 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
9164 { NIG_REG_SERDES0_CTRL_PHY_ADDR, 16, 0x0000001f },
9165
9166 { 0xffffffff, 0, 0x00000000 }
9167 };
9168
9169 if (!netif_running(bp->dev))
9170 return rc;
9171
9172 /* Repeat the test twice:
9173 First by writing 0x00000000, second by writing 0xffffffff */
9174 for (idx = 0; idx < 2; idx++) {
9175
9176 switch (idx) {
9177 case 0:
9178 wr_val = 0;
9179 break;
9180 case 1:
9181 wr_val = 0xffffffff;
9182 break;
9183 }
9184
9185 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
9186 u32 offset, mask, save_val, val;
f3c87cdd
YG
9187
9188 offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
9189 mask = reg_tbl[i].mask;
9190
9191 save_val = REG_RD(bp, offset);
9192
9193 REG_WR(bp, offset, wr_val);
9194 val = REG_RD(bp, offset);
9195
9196 /* Restore the original register's value */
9197 REG_WR(bp, offset, save_val);
9198
9199 /* verify that value is as expected value */
9200 if ((val & mask) != (wr_val & mask))
9201 goto test_reg_exit;
9202 }
9203 }
9204
9205 rc = 0;
9206
9207test_reg_exit:
9208 return rc;
9209}
9210
9211static int bnx2x_test_memory(struct bnx2x *bp)
9212{
9213 int i, j, rc = -ENODEV;
9214 u32 val;
9215 static const struct {
9216 u32 offset;
9217 int size;
9218 } mem_tbl[] = {
9219 { CCM_REG_XX_DESCR_TABLE, CCM_REG_XX_DESCR_TABLE_SIZE },
9220 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
9221 { CFC_REG_LINK_LIST, CFC_REG_LINK_LIST_SIZE },
9222 { DMAE_REG_CMD_MEM, DMAE_REG_CMD_MEM_SIZE },
9223 { TCM_REG_XX_DESCR_TABLE, TCM_REG_XX_DESCR_TABLE_SIZE },
9224 { UCM_REG_XX_DESCR_TABLE, UCM_REG_XX_DESCR_TABLE_SIZE },
9225 { XCM_REG_XX_DESCR_TABLE, XCM_REG_XX_DESCR_TABLE_SIZE },
9226
9227 { 0xffffffff, 0 }
9228 };
9229 static const struct {
9230 char *name;
9231 u32 offset;
9dabc424
YG
9232 u32 e1_mask;
9233 u32 e1h_mask;
f3c87cdd 9234 } prty_tbl[] = {
9dabc424
YG
9235 { "CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, 0x3ffc0, 0 },
9236 { "CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, 0x2, 0x2 },
9237 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0, 0 },
9238 { "TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, 0x3ffc0, 0 },
9239 { "UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, 0x3ffc0, 0 },
9240 { "XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, 0x3ffc1, 0 },
9241
9242 { NULL, 0xffffffff, 0, 0 }
f3c87cdd
YG
9243 };
9244
9245 if (!netif_running(bp->dev))
9246 return rc;
9247
9248 /* Go through all the memories */
9249 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
9250 for (j = 0; j < mem_tbl[i].size; j++)
9251 REG_RD(bp, mem_tbl[i].offset + j*4);
9252
9253 /* Check the parity status */
9254 for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
9255 val = REG_RD(bp, prty_tbl[i].offset);
9dabc424
YG
9256 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
9257 (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
f3c87cdd
YG
9258 DP(NETIF_MSG_HW,
9259 "%s is 0x%x\n", prty_tbl[i].name, val);
9260 goto test_mem_exit;
9261 }
9262 }
9263
9264 rc = 0;
9265
9266test_mem_exit:
9267 return rc;
9268}
9269
f3c87cdd
YG
9270static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
9271{
9272 int cnt = 1000;
9273
9274 if (link_up)
9275 while (bnx2x_link_test(bp) && cnt--)
9276 msleep(10);
9277}
9278
9279static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
9280{
9281 unsigned int pkt_size, num_pkts, i;
9282 struct sk_buff *skb;
9283 unsigned char *packet;
9284 struct bnx2x_fastpath *fp = &bp->fp[0];
9285 u16 tx_start_idx, tx_idx;
9286 u16 rx_start_idx, rx_idx;
9287 u16 pkt_prod;
9288 struct sw_tx_bd *tx_buf;
9289 struct eth_tx_bd *tx_bd;
9290 dma_addr_t mapping;
9291 union eth_rx_cqe *cqe;
9292 u8 cqe_fp_flags;
9293 struct sw_rx_bd *rx_buf;
9294 u16 len;
9295 int rc = -ENODEV;
9296
9297 if (loopback_mode == BNX2X_MAC_LOOPBACK) {
9298 bp->link_params.loopback_mode = LOOPBACK_BMAC;
f3c87cdd 9299 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
f3c87cdd
YG
9300
9301 } else if (loopback_mode == BNX2X_PHY_LOOPBACK) {
3910c8ae 9302 u16 cnt = 1000;
f3c87cdd 9303 bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
f3c87cdd 9304 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
f3c87cdd 9305 /* wait until link state is restored */
3910c8ae
EG
9306 if (link_up)
9307 while (cnt-- && bnx2x_test_link(&bp->link_params,
9308 &bp->link_vars))
9309 msleep(10);
f3c87cdd
YG
9310 } else
9311 return -EINVAL;
9312
9313 pkt_size = 1514;
9314 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
9315 if (!skb) {
9316 rc = -ENOMEM;
9317 goto test_loopback_exit;
9318 }
9319 packet = skb_put(skb, pkt_size);
9320 memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
9321 memset(packet + ETH_ALEN, 0, (ETH_HLEN - ETH_ALEN));
9322 for (i = ETH_HLEN; i < pkt_size; i++)
9323 packet[i] = (unsigned char) (i & 0xff);
9324
9325 num_pkts = 0;
9326 tx_start_idx = le16_to_cpu(*fp->tx_cons_sb);
9327 rx_start_idx = le16_to_cpu(*fp->rx_cons_sb);
9328
9329 pkt_prod = fp->tx_pkt_prod++;
9330 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
9331 tx_buf->first_bd = fp->tx_bd_prod;
9332 tx_buf->skb = skb;
9333
9334 tx_bd = &fp->tx_desc_ring[TX_BD(fp->tx_bd_prod)];
9335 mapping = pci_map_single(bp->pdev, skb->data,
9336 skb_headlen(skb), PCI_DMA_TODEVICE);
9337 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9338 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9339 tx_bd->nbd = cpu_to_le16(1);
9340 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
9341 tx_bd->vlan = cpu_to_le16(pkt_prod);
9342 tx_bd->bd_flags.as_bitfield = (ETH_TX_BD_FLAGS_START_BD |
9343 ETH_TX_BD_FLAGS_END_BD);
9344 tx_bd->general_data = ((UNICAST_ADDRESS <<
9345 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT) | 1);
9346
58f4c4cf
EG
9347 wmb();
9348
f3c87cdd
YG
9349 fp->hw_tx_prods->bds_prod =
9350 cpu_to_le16(le16_to_cpu(fp->hw_tx_prods->bds_prod) + 1);
9351 mb(); /* FW restriction: must not reorder writing nbd and packets */
9352 fp->hw_tx_prods->packets_prod =
9353 cpu_to_le32(le32_to_cpu(fp->hw_tx_prods->packets_prod) + 1);
9354 DOORBELL(bp, FP_IDX(fp), 0);
9355
9356 mmiowb();
9357
9358 num_pkts++;
9359 fp->tx_bd_prod++;
9360 bp->dev->trans_start = jiffies;
9361
9362 udelay(100);
9363
9364 tx_idx = le16_to_cpu(*fp->tx_cons_sb);
9365 if (tx_idx != tx_start_idx + num_pkts)
9366 goto test_loopback_exit;
9367
9368 rx_idx = le16_to_cpu(*fp->rx_cons_sb);
9369 if (rx_idx != rx_start_idx + num_pkts)
9370 goto test_loopback_exit;
9371
9372 cqe = &fp->rx_comp_ring[RCQ_BD(fp->rx_comp_cons)];
9373 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
9374 if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
9375 goto test_loopback_rx_exit;
9376
9377 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
9378 if (len != pkt_size)
9379 goto test_loopback_rx_exit;
9380
9381 rx_buf = &fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)];
9382 skb = rx_buf->skb;
9383 skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
9384 for (i = ETH_HLEN; i < pkt_size; i++)
9385 if (*(skb->data + i) != (unsigned char) (i & 0xff))
9386 goto test_loopback_rx_exit;
9387
9388 rc = 0;
9389
9390test_loopback_rx_exit:
f3c87cdd
YG
9391
9392 fp->rx_bd_cons = NEXT_RX_IDX(fp->rx_bd_cons);
9393 fp->rx_bd_prod = NEXT_RX_IDX(fp->rx_bd_prod);
9394 fp->rx_comp_cons = NEXT_RCQ_IDX(fp->rx_comp_cons);
9395 fp->rx_comp_prod = NEXT_RCQ_IDX(fp->rx_comp_prod);
9396
9397 /* Update producers */
9398 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
9399 fp->rx_sge_prod);
f3c87cdd
YG
9400
9401test_loopback_exit:
9402 bp->link_params.loopback_mode = LOOPBACK_NONE;
9403
9404 return rc;
9405}
9406
9407static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
9408{
9409 int rc = 0;
9410
9411 if (!netif_running(bp->dev))
9412 return BNX2X_LOOPBACK_FAILED;
9413
f8ef6e44 9414 bnx2x_netif_stop(bp, 1);
3910c8ae 9415 bnx2x_acquire_phy_lock(bp);
f3c87cdd
YG
9416
9417 if (bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up)) {
9418 DP(NETIF_MSG_PROBE, "MAC loopback failed\n");
9419 rc |= BNX2X_MAC_LOOPBACK_FAILED;
9420 }
9421
9422 if (bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up)) {
9423 DP(NETIF_MSG_PROBE, "PHY loopback failed\n");
9424 rc |= BNX2X_PHY_LOOPBACK_FAILED;
9425 }
9426
3910c8ae 9427 bnx2x_release_phy_lock(bp);
f3c87cdd
YG
9428 bnx2x_netif_start(bp);
9429
9430 return rc;
9431}
9432
9433#define CRC32_RESIDUAL 0xdebb20e3
9434
9435static int bnx2x_test_nvram(struct bnx2x *bp)
9436{
9437 static const struct {
9438 int offset;
9439 int size;
9440 } nvram_tbl[] = {
9441 { 0, 0x14 }, /* bootstrap */
9442 { 0x14, 0xec }, /* dir */
9443 { 0x100, 0x350 }, /* manuf_info */
9444 { 0x450, 0xf0 }, /* feature_info */
9445 { 0x640, 0x64 }, /* upgrade_key_info */
9446 { 0x6a4, 0x64 },
9447 { 0x708, 0x70 }, /* manuf_key_info */
9448 { 0x778, 0x70 },
9449 { 0, 0 }
9450 };
9451 u32 buf[0x350 / 4];
9452 u8 *data = (u8 *)buf;
9453 int i, rc;
9454 u32 magic, csum;
9455
9456 rc = bnx2x_nvram_read(bp, 0, data, 4);
9457 if (rc) {
9458 DP(NETIF_MSG_PROBE, "magic value read (rc -%d)\n", -rc);
9459 goto test_nvram_exit;
9460 }
9461
9462 magic = be32_to_cpu(buf[0]);
9463 if (magic != 0x669955aa) {
9464 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
9465 rc = -ENODEV;
9466 goto test_nvram_exit;
9467 }
9468
9469 for (i = 0; nvram_tbl[i].size; i++) {
9470
9471 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
9472 nvram_tbl[i].size);
9473 if (rc) {
9474 DP(NETIF_MSG_PROBE,
9475 "nvram_tbl[%d] read data (rc -%d)\n", i, -rc);
9476 goto test_nvram_exit;
9477 }
9478
9479 csum = ether_crc_le(nvram_tbl[i].size, data);
9480 if (csum != CRC32_RESIDUAL) {
9481 DP(NETIF_MSG_PROBE,
9482 "nvram_tbl[%d] csum value (0x%08x)\n", i, csum);
9483 rc = -ENODEV;
9484 goto test_nvram_exit;
9485 }
9486 }
9487
9488test_nvram_exit:
9489 return rc;
9490}
9491
9492static int bnx2x_test_intr(struct bnx2x *bp)
9493{
9494 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
9495 int i, rc;
9496
9497 if (!netif_running(bp->dev))
9498 return -ENODEV;
9499
8d9c5f34 9500 config->hdr.length = 0;
af246401
EG
9501 if (CHIP_IS_E1(bp))
9502 config->hdr.offset = (BP_PORT(bp) ? 32 : 0);
9503 else
9504 config->hdr.offset = BP_FUNC(bp);
f3c87cdd
YG
9505 config->hdr.client_id = BP_CL_ID(bp);
9506 config->hdr.reserved1 = 0;
9507
9508 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
9509 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
9510 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
9511 if (rc == 0) {
9512 bp->set_mac_pending++;
9513 for (i = 0; i < 10; i++) {
9514 if (!bp->set_mac_pending)
9515 break;
9516 msleep_interruptible(10);
9517 }
9518 if (i == 10)
9519 rc = -ENODEV;
9520 }
9521
9522 return rc;
9523}
9524
a2fbb9ea
ET
9525static void bnx2x_self_test(struct net_device *dev,
9526 struct ethtool_test *etest, u64 *buf)
9527{
9528 struct bnx2x *bp = netdev_priv(dev);
a2fbb9ea
ET
9529
9530 memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
9531
f3c87cdd 9532 if (!netif_running(dev))
a2fbb9ea 9533 return;
a2fbb9ea 9534
33471629 9535 /* offline tests are not supported in MF mode */
f3c87cdd
YG
9536 if (IS_E1HMF(bp))
9537 etest->flags &= ~ETH_TEST_FL_OFFLINE;
9538
9539 if (etest->flags & ETH_TEST_FL_OFFLINE) {
9540 u8 link_up;
9541
9542 link_up = bp->link_vars.link_up;
9543 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9544 bnx2x_nic_load(bp, LOAD_DIAG);
9545 /* wait until link state is restored */
9546 bnx2x_wait_for_link(bp, link_up);
9547
9548 if (bnx2x_test_registers(bp) != 0) {
9549 buf[0] = 1;
9550 etest->flags |= ETH_TEST_FL_FAILED;
9551 }
9552 if (bnx2x_test_memory(bp) != 0) {
9553 buf[1] = 1;
9554 etest->flags |= ETH_TEST_FL_FAILED;
9555 }
9556 buf[2] = bnx2x_test_loopback(bp, link_up);
9557 if (buf[2] != 0)
9558 etest->flags |= ETH_TEST_FL_FAILED;
a2fbb9ea 9559
f3c87cdd
YG
9560 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9561 bnx2x_nic_load(bp, LOAD_NORMAL);
9562 /* wait until link state is restored */
9563 bnx2x_wait_for_link(bp, link_up);
9564 }
9565 if (bnx2x_test_nvram(bp) != 0) {
9566 buf[3] = 1;
a2fbb9ea
ET
9567 etest->flags |= ETH_TEST_FL_FAILED;
9568 }
f3c87cdd
YG
9569 if (bnx2x_test_intr(bp) != 0) {
9570 buf[4] = 1;
9571 etest->flags |= ETH_TEST_FL_FAILED;
9572 }
9573 if (bp->port.pmf)
9574 if (bnx2x_link_test(bp) != 0) {
9575 buf[5] = 1;
9576 etest->flags |= ETH_TEST_FL_FAILED;
9577 }
f3c87cdd
YG
9578
9579#ifdef BNX2X_EXTRA_DEBUG
9580 bnx2x_panic_dump(bp);
9581#endif
a2fbb9ea
ET
9582}
9583
de832a55
EG
9584static const struct {
9585 long offset;
9586 int size;
9587 u8 string[ETH_GSTRING_LEN];
9588} bnx2x_q_stats_arr[BNX2X_NUM_Q_STATS] = {
9589/* 1 */ { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%d]: rx_bytes" },
9590 { Q_STATS_OFFSET32(error_bytes_received_hi),
9591 8, "[%d]: rx_error_bytes" },
9592 { Q_STATS_OFFSET32(total_unicast_packets_received_hi),
9593 8, "[%d]: rx_ucast_packets" },
9594 { Q_STATS_OFFSET32(total_multicast_packets_received_hi),
9595 8, "[%d]: rx_mcast_packets" },
9596 { Q_STATS_OFFSET32(total_broadcast_packets_received_hi),
9597 8, "[%d]: rx_bcast_packets" },
9598 { Q_STATS_OFFSET32(no_buff_discard_hi), 8, "[%d]: rx_discards" },
9599 { Q_STATS_OFFSET32(rx_err_discard_pkt),
9600 4, "[%d]: rx_phy_ip_err_discards"},
9601 { Q_STATS_OFFSET32(rx_skb_alloc_failed),
9602 4, "[%d]: rx_skb_alloc_discard" },
9603 { Q_STATS_OFFSET32(hw_csum_err), 4, "[%d]: rx_csum_offload_errors" },
9604
9605/* 10 */{ Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%d]: tx_bytes" },
9606 { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
9607 8, "[%d]: tx_packets" }
9608};
9609
bb2a0f7a
YG
9610static const struct {
9611 long offset;
9612 int size;
9613 u32 flags;
66e855f3
YG
9614#define STATS_FLAGS_PORT 1
9615#define STATS_FLAGS_FUNC 2
de832a55 9616#define STATS_FLAGS_BOTH (STATS_FLAGS_FUNC | STATS_FLAGS_PORT)
66e855f3 9617 u8 string[ETH_GSTRING_LEN];
bb2a0f7a 9618} bnx2x_stats_arr[BNX2X_NUM_STATS] = {
de832a55
EG
9619/* 1 */ { STATS_OFFSET32(total_bytes_received_hi),
9620 8, STATS_FLAGS_BOTH, "rx_bytes" },
66e855f3 9621 { STATS_OFFSET32(error_bytes_received_hi),
de832a55 9622 8, STATS_FLAGS_BOTH, "rx_error_bytes" },
bb2a0f7a 9623 { STATS_OFFSET32(total_unicast_packets_received_hi),
de832a55 9624 8, STATS_FLAGS_BOTH, "rx_ucast_packets" },
bb2a0f7a 9625 { STATS_OFFSET32(total_multicast_packets_received_hi),
de832a55 9626 8, STATS_FLAGS_BOTH, "rx_mcast_packets" },
bb2a0f7a 9627 { STATS_OFFSET32(total_broadcast_packets_received_hi),
de832a55 9628 8, STATS_FLAGS_BOTH, "rx_bcast_packets" },
bb2a0f7a 9629 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
66e855f3 9630 8, STATS_FLAGS_PORT, "rx_crc_errors" },
bb2a0f7a 9631 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
66e855f3 9632 8, STATS_FLAGS_PORT, "rx_align_errors" },
de832a55
EG
9633 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
9634 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
9635 { STATS_OFFSET32(etherstatsoverrsizepkts_hi),
9636 8, STATS_FLAGS_PORT, "rx_oversize_packets" },
9637/* 10 */{ STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
9638 8, STATS_FLAGS_PORT, "rx_fragments" },
9639 { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
9640 8, STATS_FLAGS_PORT, "rx_jabbers" },
9641 { STATS_OFFSET32(no_buff_discard_hi),
9642 8, STATS_FLAGS_BOTH, "rx_discards" },
9643 { STATS_OFFSET32(mac_filter_discard),
9644 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
9645 { STATS_OFFSET32(xxoverflow_discard),
9646 4, STATS_FLAGS_PORT, "rx_fw_discards" },
9647 { STATS_OFFSET32(brb_drop_hi),
9648 8, STATS_FLAGS_PORT, "rx_brb_discard" },
9649 { STATS_OFFSET32(brb_truncate_hi),
9650 8, STATS_FLAGS_PORT, "rx_brb_truncate" },
9651 { STATS_OFFSET32(pause_frames_received_hi),
9652 8, STATS_FLAGS_PORT, "rx_pause_frames" },
9653 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
9654 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
9655 { STATS_OFFSET32(nig_timer_max),
9656 4, STATS_FLAGS_PORT, "rx_constant_pause_events" },
9657/* 20 */{ STATS_OFFSET32(rx_err_discard_pkt),
9658 4, STATS_FLAGS_BOTH, "rx_phy_ip_err_discards"},
9659 { STATS_OFFSET32(rx_skb_alloc_failed),
9660 4, STATS_FLAGS_BOTH, "rx_skb_alloc_discard" },
9661 { STATS_OFFSET32(hw_csum_err),
9662 4, STATS_FLAGS_BOTH, "rx_csum_offload_errors" },
9663
9664 { STATS_OFFSET32(total_bytes_transmitted_hi),
9665 8, STATS_FLAGS_BOTH, "tx_bytes" },
9666 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
9667 8, STATS_FLAGS_PORT, "tx_error_bytes" },
9668 { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
9669 8, STATS_FLAGS_BOTH, "tx_packets" },
9670 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
9671 8, STATS_FLAGS_PORT, "tx_mac_errors" },
9672 { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
9673 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
bb2a0f7a 9674 { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
66e855f3 9675 8, STATS_FLAGS_PORT, "tx_single_collisions" },
bb2a0f7a 9676 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
66e855f3 9677 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
de832a55 9678/* 30 */{ STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
66e855f3 9679 8, STATS_FLAGS_PORT, "tx_deferred" },
bb2a0f7a 9680 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
66e855f3 9681 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
bb2a0f7a 9682 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
66e855f3 9683 8, STATS_FLAGS_PORT, "tx_late_collisions" },
bb2a0f7a 9684 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
66e855f3 9685 8, STATS_FLAGS_PORT, "tx_total_collisions" },
bb2a0f7a 9686 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
66e855f3 9687 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
bb2a0f7a 9688 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
66e855f3 9689 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
bb2a0f7a 9690 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
66e855f3 9691 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
bb2a0f7a 9692 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
66e855f3 9693 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
bb2a0f7a 9694 { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
66e855f3 9695 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
bb2a0f7a 9696 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
66e855f3 9697 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
de832a55 9698/* 40 */{ STATS_OFFSET32(etherstatspktsover1522octets_hi),
66e855f3 9699 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
de832a55
EG
9700 { STATS_OFFSET32(pause_frames_sent_hi),
9701 8, STATS_FLAGS_PORT, "tx_pause_frames" }
a2fbb9ea
ET
9702};
9703
de832a55
EG
9704#define IS_PORT_STAT(i) \
9705 ((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT)
9706#define IS_FUNC_STAT(i) (bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC)
9707#define IS_E1HMF_MODE_STAT(bp) \
9708 (IS_E1HMF(bp) && !(bp->msglevel & BNX2X_MSG_STATS))
66e855f3 9709
a2fbb9ea
ET
9710static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
9711{
bb2a0f7a 9712 struct bnx2x *bp = netdev_priv(dev);
de832a55 9713 int i, j, k;
bb2a0f7a 9714
a2fbb9ea
ET
9715 switch (stringset) {
9716 case ETH_SS_STATS:
de832a55
EG
9717 if (is_multi(bp)) {
9718 k = 0;
9719 for_each_queue(bp, i) {
9720 for (j = 0; j < BNX2X_NUM_Q_STATS; j++)
9721 sprintf(buf + (k + j)*ETH_GSTRING_LEN,
9722 bnx2x_q_stats_arr[j].string, i);
9723 k += BNX2X_NUM_Q_STATS;
9724 }
9725 if (IS_E1HMF_MODE_STAT(bp))
9726 break;
9727 for (j = 0; j < BNX2X_NUM_STATS; j++)
9728 strcpy(buf + (k + j)*ETH_GSTRING_LEN,
9729 bnx2x_stats_arr[j].string);
9730 } else {
9731 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
9732 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
9733 continue;
9734 strcpy(buf + j*ETH_GSTRING_LEN,
9735 bnx2x_stats_arr[i].string);
9736 j++;
9737 }
bb2a0f7a 9738 }
a2fbb9ea
ET
9739 break;
9740
9741 case ETH_SS_TEST:
9742 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
9743 break;
9744 }
9745}
9746
9747static int bnx2x_get_stats_count(struct net_device *dev)
9748{
bb2a0f7a 9749 struct bnx2x *bp = netdev_priv(dev);
de832a55 9750 int i, num_stats;
bb2a0f7a 9751
de832a55
EG
9752 if (is_multi(bp)) {
9753 num_stats = BNX2X_NUM_Q_STATS * BNX2X_NUM_QUEUES(bp);
9754 if (!IS_E1HMF_MODE_STAT(bp))
9755 num_stats += BNX2X_NUM_STATS;
9756 } else {
9757 if (IS_E1HMF_MODE_STAT(bp)) {
9758 num_stats = 0;
9759 for (i = 0; i < BNX2X_NUM_STATS; i++)
9760 if (IS_FUNC_STAT(i))
9761 num_stats++;
9762 } else
9763 num_stats = BNX2X_NUM_STATS;
bb2a0f7a 9764 }
de832a55 9765
bb2a0f7a 9766 return num_stats;
a2fbb9ea
ET
9767}
9768
9769static void bnx2x_get_ethtool_stats(struct net_device *dev,
9770 struct ethtool_stats *stats, u64 *buf)
9771{
9772 struct bnx2x *bp = netdev_priv(dev);
de832a55
EG
9773 u32 *hw_stats, *offset;
9774 int i, j, k;
bb2a0f7a 9775
de832a55
EG
9776 if (is_multi(bp)) {
9777 k = 0;
9778 for_each_queue(bp, i) {
9779 hw_stats = (u32 *)&bp->fp[i].eth_q_stats;
9780 for (j = 0; j < BNX2X_NUM_Q_STATS; j++) {
9781 if (bnx2x_q_stats_arr[j].size == 0) {
9782 /* skip this counter */
9783 buf[k + j] = 0;
9784 continue;
9785 }
9786 offset = (hw_stats +
9787 bnx2x_q_stats_arr[j].offset);
9788 if (bnx2x_q_stats_arr[j].size == 4) {
9789 /* 4-byte counter */
9790 buf[k + j] = (u64) *offset;
9791 continue;
9792 }
9793 /* 8-byte counter */
9794 buf[k + j] = HILO_U64(*offset, *(offset + 1));
9795 }
9796 k += BNX2X_NUM_Q_STATS;
9797 }
9798 if (IS_E1HMF_MODE_STAT(bp))
9799 return;
9800 hw_stats = (u32 *)&bp->eth_stats;
9801 for (j = 0; j < BNX2X_NUM_STATS; j++) {
9802 if (bnx2x_stats_arr[j].size == 0) {
9803 /* skip this counter */
9804 buf[k + j] = 0;
9805 continue;
9806 }
9807 offset = (hw_stats + bnx2x_stats_arr[j].offset);
9808 if (bnx2x_stats_arr[j].size == 4) {
9809 /* 4-byte counter */
9810 buf[k + j] = (u64) *offset;
9811 continue;
9812 }
9813 /* 8-byte counter */
9814 buf[k + j] = HILO_U64(*offset, *(offset + 1));
a2fbb9ea 9815 }
de832a55
EG
9816 } else {
9817 hw_stats = (u32 *)&bp->eth_stats;
9818 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
9819 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
9820 continue;
9821 if (bnx2x_stats_arr[i].size == 0) {
9822 /* skip this counter */
9823 buf[j] = 0;
9824 j++;
9825 continue;
9826 }
9827 offset = (hw_stats + bnx2x_stats_arr[i].offset);
9828 if (bnx2x_stats_arr[i].size == 4) {
9829 /* 4-byte counter */
9830 buf[j] = (u64) *offset;
9831 j++;
9832 continue;
9833 }
9834 /* 8-byte counter */
9835 buf[j] = HILO_U64(*offset, *(offset + 1));
bb2a0f7a 9836 j++;
a2fbb9ea 9837 }
a2fbb9ea
ET
9838 }
9839}
9840
9841static int bnx2x_phys_id(struct net_device *dev, u32 data)
9842{
9843 struct bnx2x *bp = netdev_priv(dev);
34f80b04 9844 int port = BP_PORT(bp);
a2fbb9ea
ET
9845 int i;
9846
34f80b04
EG
9847 if (!netif_running(dev))
9848 return 0;
9849
9850 if (!bp->port.pmf)
9851 return 0;
9852
a2fbb9ea
ET
9853 if (data == 0)
9854 data = 2;
9855
9856 for (i = 0; i < (data * 2); i++) {
c18487ee 9857 if ((i % 2) == 0)
34f80b04 9858 bnx2x_set_led(bp, port, LED_MODE_OPER, SPEED_1000,
c18487ee
YR
9859 bp->link_params.hw_led_mode,
9860 bp->link_params.chip_id);
9861 else
34f80b04 9862 bnx2x_set_led(bp, port, LED_MODE_OFF, 0,
c18487ee
YR
9863 bp->link_params.hw_led_mode,
9864 bp->link_params.chip_id);
9865
a2fbb9ea
ET
9866 msleep_interruptible(500);
9867 if (signal_pending(current))
9868 break;
9869 }
9870
c18487ee 9871 if (bp->link_vars.link_up)
34f80b04 9872 bnx2x_set_led(bp, port, LED_MODE_OPER,
c18487ee
YR
9873 bp->link_vars.line_speed,
9874 bp->link_params.hw_led_mode,
9875 bp->link_params.chip_id);
a2fbb9ea
ET
9876
9877 return 0;
9878}
9879
9880static struct ethtool_ops bnx2x_ethtool_ops = {
7a9b2557
VZ
9881 .get_settings = bnx2x_get_settings,
9882 .set_settings = bnx2x_set_settings,
9883 .get_drvinfo = bnx2x_get_drvinfo,
a2fbb9ea
ET
9884 .get_wol = bnx2x_get_wol,
9885 .set_wol = bnx2x_set_wol,
7a9b2557
VZ
9886 .get_msglevel = bnx2x_get_msglevel,
9887 .set_msglevel = bnx2x_set_msglevel,
9888 .nway_reset = bnx2x_nway_reset,
9889 .get_link = ethtool_op_get_link,
9890 .get_eeprom_len = bnx2x_get_eeprom_len,
9891 .get_eeprom = bnx2x_get_eeprom,
9892 .set_eeprom = bnx2x_set_eeprom,
9893 .get_coalesce = bnx2x_get_coalesce,
9894 .set_coalesce = bnx2x_set_coalesce,
9895 .get_ringparam = bnx2x_get_ringparam,
9896 .set_ringparam = bnx2x_set_ringparam,
9897 .get_pauseparam = bnx2x_get_pauseparam,
9898 .set_pauseparam = bnx2x_set_pauseparam,
9899 .get_rx_csum = bnx2x_get_rx_csum,
9900 .set_rx_csum = bnx2x_set_rx_csum,
9901 .get_tx_csum = ethtool_op_get_tx_csum,
755735eb 9902 .set_tx_csum = ethtool_op_set_tx_hw_csum,
7a9b2557
VZ
9903 .set_flags = bnx2x_set_flags,
9904 .get_flags = ethtool_op_get_flags,
9905 .get_sg = ethtool_op_get_sg,
9906 .set_sg = ethtool_op_set_sg,
a2fbb9ea
ET
9907 .get_tso = ethtool_op_get_tso,
9908 .set_tso = bnx2x_set_tso,
9909 .self_test_count = bnx2x_self_test_count,
7a9b2557
VZ
9910 .self_test = bnx2x_self_test,
9911 .get_strings = bnx2x_get_strings,
a2fbb9ea
ET
9912 .phys_id = bnx2x_phys_id,
9913 .get_stats_count = bnx2x_get_stats_count,
bb2a0f7a 9914 .get_ethtool_stats = bnx2x_get_ethtool_stats,
a2fbb9ea
ET
9915};
9916
9917/* end of ethtool_ops */
9918
9919/****************************************************************************
9920* General service functions
9921****************************************************************************/
9922
9923static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
9924{
9925 u16 pmcsr;
9926
9927 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
9928
9929 switch (state) {
9930 case PCI_D0:
34f80b04 9931 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
a2fbb9ea
ET
9932 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
9933 PCI_PM_CTRL_PME_STATUS));
9934
9935 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
33471629 9936 /* delay required during transition out of D3hot */
a2fbb9ea 9937 msleep(20);
34f80b04 9938 break;
a2fbb9ea 9939
34f80b04
EG
9940 case PCI_D3hot:
9941 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
9942 pmcsr |= 3;
a2fbb9ea 9943
34f80b04
EG
9944 if (bp->wol)
9945 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
a2fbb9ea 9946
34f80b04
EG
9947 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
9948 pmcsr);
a2fbb9ea 9949
34f80b04
EG
9950 /* No more memory access after this point until
9951 * device is brought back to D0.
9952 */
9953 break;
9954
9955 default:
9956 return -EINVAL;
9957 }
9958 return 0;
a2fbb9ea
ET
9959}
9960
237907c1
EG
9961static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
9962{
9963 u16 rx_cons_sb;
9964
9965 /* Tell compiler that status block fields can change */
9966 barrier();
9967 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
9968 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
9969 rx_cons_sb++;
9970 return (fp->rx_comp_cons != rx_cons_sb);
9971}
9972
34f80b04
EG
9973/*
9974 * net_device service functions
9975 */
9976
a2fbb9ea
ET
9977static int bnx2x_poll(struct napi_struct *napi, int budget)
9978{
9979 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
9980 napi);
9981 struct bnx2x *bp = fp->bp;
9982 int work_done = 0;
9983
9984#ifdef BNX2X_STOP_ON_ERROR
9985 if (unlikely(bp->panic))
34f80b04 9986 goto poll_panic;
a2fbb9ea
ET
9987#endif
9988
9989 prefetch(fp->tx_buf_ring[TX_BD(fp->tx_pkt_cons)].skb);
9990 prefetch(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb);
9991 prefetch((char *)(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb) + 256);
9992
9993 bnx2x_update_fpsb_idx(fp);
9994
237907c1 9995 if (bnx2x_has_tx_work(fp))
a2fbb9ea
ET
9996 bnx2x_tx_int(fp, budget);
9997
237907c1 9998 if (bnx2x_has_rx_work(fp))
a2fbb9ea 9999 work_done = bnx2x_rx_int(fp, budget);
da5a662a 10000 rmb(); /* BNX2X_HAS_WORK() reads the status block */
a2fbb9ea
ET
10001
10002 /* must not complete if we consumed full budget */
da5a662a 10003 if ((work_done < budget) && !BNX2X_HAS_WORK(fp)) {
a2fbb9ea
ET
10004
10005#ifdef BNX2X_STOP_ON_ERROR
34f80b04 10006poll_panic:
a2fbb9ea 10007#endif
288379f0 10008 napi_complete(napi);
a2fbb9ea 10009
34f80b04 10010 bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID,
a2fbb9ea 10011 le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
34f80b04 10012 bnx2x_ack_sb(bp, FP_SB_ID(fp), CSTORM_ID,
a2fbb9ea
ET
10013 le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
10014 }
a2fbb9ea
ET
10015 return work_done;
10016}
10017
755735eb
EG
10018
10019/* we split the first BD into headers and data BDs
33471629 10020 * to ease the pain of our fellow microcode engineers
755735eb
EG
10021 * we use one mapping for both BDs
10022 * So far this has only been observed to happen
10023 * in Other Operating Systems(TM)
10024 */
10025static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
10026 struct bnx2x_fastpath *fp,
10027 struct eth_tx_bd **tx_bd, u16 hlen,
10028 u16 bd_prod, int nbd)
10029{
10030 struct eth_tx_bd *h_tx_bd = *tx_bd;
10031 struct eth_tx_bd *d_tx_bd;
10032 dma_addr_t mapping;
10033 int old_len = le16_to_cpu(h_tx_bd->nbytes);
10034
10035 /* first fix first BD */
10036 h_tx_bd->nbd = cpu_to_le16(nbd);
10037 h_tx_bd->nbytes = cpu_to_le16(hlen);
10038
10039 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
10040 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
10041 h_tx_bd->addr_lo, h_tx_bd->nbd);
10042
10043 /* now get a new data BD
10044 * (after the pbd) and fill it */
10045 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10046 d_tx_bd = &fp->tx_desc_ring[bd_prod];
10047
10048 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
10049 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
10050
10051 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10052 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10053 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
10054 d_tx_bd->vlan = 0;
10055 /* this marks the BD as one that has no individual mapping
10056 * the FW ignores this flag in a BD not marked start
10057 */
10058 d_tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_SW_LSO;
10059 DP(NETIF_MSG_TX_QUEUED,
10060 "TSO split data size is %d (%x:%x)\n",
10061 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
10062
10063 /* update tx_bd for marking the last BD flag */
10064 *tx_bd = d_tx_bd;
10065
10066 return bd_prod;
10067}
10068
10069static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
10070{
10071 if (fix > 0)
10072 csum = (u16) ~csum_fold(csum_sub(csum,
10073 csum_partial(t_header - fix, fix, 0)));
10074
10075 else if (fix < 0)
10076 csum = (u16) ~csum_fold(csum_add(csum,
10077 csum_partial(t_header, -fix, 0)));
10078
10079 return swab16(csum);
10080}
10081
10082static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
10083{
10084 u32 rc;
10085
10086 if (skb->ip_summed != CHECKSUM_PARTIAL)
10087 rc = XMIT_PLAIN;
10088
10089 else {
10090 if (skb->protocol == ntohs(ETH_P_IPV6)) {
10091 rc = XMIT_CSUM_V6;
10092 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
10093 rc |= XMIT_CSUM_TCP;
10094
10095 } else {
10096 rc = XMIT_CSUM_V4;
10097 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
10098 rc |= XMIT_CSUM_TCP;
10099 }
10100 }
10101
10102 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
10103 rc |= XMIT_GSO_V4;
10104
10105 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
10106 rc |= XMIT_GSO_V6;
10107
10108 return rc;
10109}
10110
632da4d6 10111#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
755735eb
EG
10112/* check if packet requires linearization (packet is too fragmented) */
10113static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
10114 u32 xmit_type)
10115{
10116 int to_copy = 0;
10117 int hlen = 0;
10118 int first_bd_sz = 0;
10119
10120 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
10121 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
10122
10123 if (xmit_type & XMIT_GSO) {
10124 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
10125 /* Check if LSO packet needs to be copied:
10126 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
10127 int wnd_size = MAX_FETCH_BD - 3;
33471629 10128 /* Number of windows to check */
755735eb
EG
10129 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
10130 int wnd_idx = 0;
10131 int frag_idx = 0;
10132 u32 wnd_sum = 0;
10133
10134 /* Headers length */
10135 hlen = (int)(skb_transport_header(skb) - skb->data) +
10136 tcp_hdrlen(skb);
10137
10138 /* Amount of data (w/o headers) on linear part of SKB*/
10139 first_bd_sz = skb_headlen(skb) - hlen;
10140
10141 wnd_sum = first_bd_sz;
10142
10143 /* Calculate the first sum - it's special */
10144 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
10145 wnd_sum +=
10146 skb_shinfo(skb)->frags[frag_idx].size;
10147
10148 /* If there was data on linear skb data - check it */
10149 if (first_bd_sz > 0) {
10150 if (unlikely(wnd_sum < lso_mss)) {
10151 to_copy = 1;
10152 goto exit_lbl;
10153 }
10154
10155 wnd_sum -= first_bd_sz;
10156 }
10157
10158 /* Others are easier: run through the frag list and
10159 check all windows */
10160 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
10161 wnd_sum +=
10162 skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
10163
10164 if (unlikely(wnd_sum < lso_mss)) {
10165 to_copy = 1;
10166 break;
10167 }
10168 wnd_sum -=
10169 skb_shinfo(skb)->frags[wnd_idx].size;
10170 }
10171
10172 } else {
10173 /* in non-LSO too fragmented packet should always
10174 be linearized */
10175 to_copy = 1;
10176 }
10177 }
10178
10179exit_lbl:
10180 if (unlikely(to_copy))
10181 DP(NETIF_MSG_TX_QUEUED,
10182 "Linearization IS REQUIRED for %s packet. "
10183 "num_frags %d hlen %d first_bd_sz %d\n",
10184 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
10185 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
10186
10187 return to_copy;
10188}
632da4d6 10189#endif
755735eb
EG
10190
10191/* called with netif_tx_lock
a2fbb9ea 10192 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
755735eb 10193 * netif_wake_queue()
a2fbb9ea
ET
10194 */
10195static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
10196{
10197 struct bnx2x *bp = netdev_priv(dev);
10198 struct bnx2x_fastpath *fp;
555f6c78 10199 struct netdev_queue *txq;
a2fbb9ea
ET
10200 struct sw_tx_bd *tx_buf;
10201 struct eth_tx_bd *tx_bd;
10202 struct eth_tx_parse_bd *pbd = NULL;
10203 u16 pkt_prod, bd_prod;
755735eb 10204 int nbd, fp_index;
a2fbb9ea 10205 dma_addr_t mapping;
755735eb
EG
10206 u32 xmit_type = bnx2x_xmit_type(bp, skb);
10207 int vlan_off = (bp->e1hov ? 4 : 0);
10208 int i;
10209 u8 hlen = 0;
a2fbb9ea
ET
10210
10211#ifdef BNX2X_STOP_ON_ERROR
10212 if (unlikely(bp->panic))
10213 return NETDEV_TX_BUSY;
10214#endif
10215
555f6c78
EG
10216 fp_index = skb_get_queue_mapping(skb);
10217 txq = netdev_get_tx_queue(dev, fp_index);
10218
a2fbb9ea 10219 fp = &bp->fp[fp_index];
755735eb 10220
231fd58a 10221 if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
de832a55 10222 fp->eth_q_stats.driver_xoff++,
555f6c78 10223 netif_tx_stop_queue(txq);
a2fbb9ea
ET
10224 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
10225 return NETDEV_TX_BUSY;
10226 }
10227
755735eb
EG
10228 DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x protocol %x protocol(%x,%x)"
10229 " gso type %x xmit_type %x\n",
10230 skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
10231 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
10232
632da4d6 10233#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
33471629 10234 /* First, check if we need to linearize the skb
755735eb
EG
10235 (due to FW restrictions) */
10236 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
10237 /* Statistics of linearization */
10238 bp->lin_cnt++;
10239 if (skb_linearize(skb) != 0) {
10240 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
10241 "silently dropping this SKB\n");
10242 dev_kfree_skb_any(skb);
da5a662a 10243 return NETDEV_TX_OK;
755735eb
EG
10244 }
10245 }
632da4d6 10246#endif
755735eb 10247
a2fbb9ea 10248 /*
755735eb 10249 Please read carefully. First we use one BD which we mark as start,
a2fbb9ea 10250 then for TSO or xsum we have a parsing info BD,
755735eb 10251 and only then we have the rest of the TSO BDs.
a2fbb9ea
ET
10252 (don't forget to mark the last one as last,
10253 and to unmap only AFTER you write to the BD ...)
755735eb 10254 And above all, all pdb sizes are in words - NOT DWORDS!
a2fbb9ea
ET
10255 */
10256
10257 pkt_prod = fp->tx_pkt_prod++;
755735eb 10258 bd_prod = TX_BD(fp->tx_bd_prod);
a2fbb9ea 10259
755735eb 10260 /* get a tx_buf and first BD */
a2fbb9ea
ET
10261 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
10262 tx_bd = &fp->tx_desc_ring[bd_prod];
10263
10264 tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
10265 tx_bd->general_data = (UNICAST_ADDRESS <<
10266 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT);
3196a88a
EG
10267 /* header nbd */
10268 tx_bd->general_data |= (1 << ETH_TX_BD_HDR_NBDS_SHIFT);
a2fbb9ea 10269
755735eb
EG
10270 /* remember the first BD of the packet */
10271 tx_buf->first_bd = fp->tx_bd_prod;
10272 tx_buf->skb = skb;
a2fbb9ea
ET
10273
10274 DP(NETIF_MSG_TX_QUEUED,
10275 "sending pkt %u @%p next_idx %u bd %u @%p\n",
10276 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_bd);
10277
0c6671b0
EG
10278#ifdef BCM_VLAN
10279 if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
10280 (bp->flags & HW_VLAN_TX_FLAG)) {
755735eb
EG
10281 tx_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
10282 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
10283 vlan_off += 4;
10284 } else
0c6671b0 10285#endif
755735eb 10286 tx_bd->vlan = cpu_to_le16(pkt_prod);
a2fbb9ea 10287
755735eb 10288 if (xmit_type) {
755735eb 10289 /* turn on parsing and get a BD */
a2fbb9ea
ET
10290 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10291 pbd = (void *)&fp->tx_desc_ring[bd_prod];
755735eb
EG
10292
10293 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
10294 }
10295
10296 if (xmit_type & XMIT_CSUM) {
10297 hlen = (skb_network_header(skb) - skb->data + vlan_off) / 2;
a2fbb9ea
ET
10298
10299 /* for now NS flag is not used in Linux */
755735eb 10300 pbd->global_data = (hlen |
96fc1784 10301 ((skb->protocol == ntohs(ETH_P_8021Q)) <<
a2fbb9ea 10302 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
a2fbb9ea 10303
755735eb
EG
10304 pbd->ip_hlen = (skb_transport_header(skb) -
10305 skb_network_header(skb)) / 2;
10306
10307 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
a2fbb9ea 10308
755735eb
EG
10309 pbd->total_hlen = cpu_to_le16(hlen);
10310 hlen = hlen*2 - vlan_off;
a2fbb9ea 10311
755735eb
EG
10312 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_TCP_CSUM;
10313
10314 if (xmit_type & XMIT_CSUM_V4)
a2fbb9ea 10315 tx_bd->bd_flags.as_bitfield |=
755735eb
EG
10316 ETH_TX_BD_FLAGS_IP_CSUM;
10317 else
10318 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
10319
10320 if (xmit_type & XMIT_CSUM_TCP) {
10321 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
10322
10323 } else {
10324 s8 fix = SKB_CS_OFF(skb); /* signed! */
10325
a2fbb9ea 10326 pbd->global_data |= ETH_TX_PARSE_BD_CS_ANY_FLG;
755735eb 10327 pbd->cs_offset = fix / 2;
a2fbb9ea 10328
755735eb
EG
10329 DP(NETIF_MSG_TX_QUEUED,
10330 "hlen %d offset %d fix %d csum before fix %x\n",
10331 le16_to_cpu(pbd->total_hlen), pbd->cs_offset, fix,
10332 SKB_CS(skb));
10333
10334 /* HW bug: fixup the CSUM */
10335 pbd->tcp_pseudo_csum =
10336 bnx2x_csum_fix(skb_transport_header(skb),
10337 SKB_CS(skb), fix);
10338
10339 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
10340 pbd->tcp_pseudo_csum);
10341 }
a2fbb9ea
ET
10342 }
10343
10344 mapping = pci_map_single(bp->pdev, skb->data,
755735eb 10345 skb_headlen(skb), PCI_DMA_TODEVICE);
a2fbb9ea
ET
10346
10347 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10348 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
6378c025 10349 nbd = skb_shinfo(skb)->nr_frags + ((pbd == NULL) ? 1 : 2);
a2fbb9ea
ET
10350 tx_bd->nbd = cpu_to_le16(nbd);
10351 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
10352
10353 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
755735eb
EG
10354 " nbytes %d flags %x vlan %x\n",
10355 tx_bd, tx_bd->addr_hi, tx_bd->addr_lo, le16_to_cpu(tx_bd->nbd),
10356 le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield,
10357 le16_to_cpu(tx_bd->vlan));
a2fbb9ea 10358
755735eb 10359 if (xmit_type & XMIT_GSO) {
a2fbb9ea
ET
10360
10361 DP(NETIF_MSG_TX_QUEUED,
10362 "TSO packet len %d hlen %d total len %d tso size %d\n",
10363 skb->len, hlen, skb_headlen(skb),
10364 skb_shinfo(skb)->gso_size);
10365
10366 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
10367
755735eb
EG
10368 if (unlikely(skb_headlen(skb) > hlen))
10369 bd_prod = bnx2x_tx_split(bp, fp, &tx_bd, hlen,
10370 bd_prod, ++nbd);
a2fbb9ea
ET
10371
10372 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
10373 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
755735eb
EG
10374 pbd->tcp_flags = pbd_tcp_flags(skb);
10375
10376 if (xmit_type & XMIT_GSO_V4) {
10377 pbd->ip_id = swab16(ip_hdr(skb)->id);
10378 pbd->tcp_pseudo_csum =
a2fbb9ea
ET
10379 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
10380 ip_hdr(skb)->daddr,
10381 0, IPPROTO_TCP, 0));
755735eb
EG
10382
10383 } else
10384 pbd->tcp_pseudo_csum =
10385 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
10386 &ipv6_hdr(skb)->daddr,
10387 0, IPPROTO_TCP, 0));
10388
a2fbb9ea
ET
10389 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
10390 }
10391
755735eb
EG
10392 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
10393 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
a2fbb9ea 10394
755735eb
EG
10395 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10396 tx_bd = &fp->tx_desc_ring[bd_prod];
a2fbb9ea 10397
755735eb
EG
10398 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
10399 frag->size, PCI_DMA_TODEVICE);
a2fbb9ea 10400
755735eb
EG
10401 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10402 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10403 tx_bd->nbytes = cpu_to_le16(frag->size);
10404 tx_bd->vlan = cpu_to_le16(pkt_prod);
10405 tx_bd->bd_flags.as_bitfield = 0;
a2fbb9ea 10406
755735eb
EG
10407 DP(NETIF_MSG_TX_QUEUED,
10408 "frag %d bd @%p addr (%x:%x) nbytes %d flags %x\n",
10409 i, tx_bd, tx_bd->addr_hi, tx_bd->addr_lo,
10410 le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield);
a2fbb9ea
ET
10411 }
10412
755735eb 10413 /* now at last mark the BD as the last BD */
a2fbb9ea
ET
10414 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_END_BD;
10415
10416 DP(NETIF_MSG_TX_QUEUED, "last bd @%p flags %x\n",
10417 tx_bd, tx_bd->bd_flags.as_bitfield);
10418
a2fbb9ea
ET
10419 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10420
755735eb 10421 /* now send a tx doorbell, counting the next BD
a2fbb9ea
ET
10422 * if the packet contains or ends with it
10423 */
10424 if (TX_BD_POFF(bd_prod) < nbd)
10425 nbd++;
10426
10427 if (pbd)
10428 DP(NETIF_MSG_TX_QUEUED,
10429 "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
10430 " tcp_flags %x xsum %x seq %u hlen %u\n",
10431 pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
10432 pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
755735eb 10433 pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
a2fbb9ea 10434
755735eb 10435 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
a2fbb9ea 10436
58f4c4cf
EG
10437 /*
10438 * Make sure that the BD data is updated before updating the producer
10439 * since FW might read the BD right after the producer is updated.
10440 * This is only applicable for weak-ordered memory model archs such
10441 * as IA-64. The following barrier is also mandatory since FW will
10442 * assumes packets must have BDs.
10443 */
10444 wmb();
10445
96fc1784
ET
10446 fp->hw_tx_prods->bds_prod =
10447 cpu_to_le16(le16_to_cpu(fp->hw_tx_prods->bds_prod) + nbd);
a2fbb9ea 10448 mb(); /* FW restriction: must not reorder writing nbd and packets */
96fc1784
ET
10449 fp->hw_tx_prods->packets_prod =
10450 cpu_to_le32(le32_to_cpu(fp->hw_tx_prods->packets_prod) + 1);
755735eb 10451 DOORBELL(bp, FP_IDX(fp), 0);
a2fbb9ea
ET
10452
10453 mmiowb();
10454
755735eb 10455 fp->tx_bd_prod += nbd;
a2fbb9ea
ET
10456 dev->trans_start = jiffies;
10457
10458 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
58f4c4cf
EG
10459 /* We want bnx2x_tx_int to "see" the updated tx_bd_prod
10460 if we put Tx into XOFF state. */
10461 smp_mb();
555f6c78 10462 netif_tx_stop_queue(txq);
de832a55 10463 fp->eth_q_stats.driver_xoff++;
a2fbb9ea 10464 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
555f6c78 10465 netif_tx_wake_queue(txq);
a2fbb9ea
ET
10466 }
10467 fp->tx_pkt++;
10468
10469 return NETDEV_TX_OK;
10470}
10471
bb2a0f7a 10472/* called with rtnl_lock */
a2fbb9ea
ET
10473static int bnx2x_open(struct net_device *dev)
10474{
10475 struct bnx2x *bp = netdev_priv(dev);
10476
6eccabb3
EG
10477 netif_carrier_off(dev);
10478
a2fbb9ea
ET
10479 bnx2x_set_power_state(bp, PCI_D0);
10480
bb2a0f7a 10481 return bnx2x_nic_load(bp, LOAD_OPEN);
a2fbb9ea
ET
10482}
10483
bb2a0f7a 10484/* called with rtnl_lock */
a2fbb9ea
ET
10485static int bnx2x_close(struct net_device *dev)
10486{
a2fbb9ea
ET
10487 struct bnx2x *bp = netdev_priv(dev);
10488
10489 /* Unload the driver, release IRQs */
bb2a0f7a
YG
10490 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
10491 if (atomic_read(&bp->pdev->enable_cnt) == 1)
10492 if (!CHIP_REV_IS_SLOW(bp))
10493 bnx2x_set_power_state(bp, PCI_D3hot);
a2fbb9ea
ET
10494
10495 return 0;
10496}
10497
34f80b04
EG
10498/* called with netif_tx_lock from set_multicast */
10499static void bnx2x_set_rx_mode(struct net_device *dev)
10500{
10501 struct bnx2x *bp = netdev_priv(dev);
10502 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
10503 int port = BP_PORT(bp);
10504
10505 if (bp->state != BNX2X_STATE_OPEN) {
10506 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
10507 return;
10508 }
10509
10510 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
10511
10512 if (dev->flags & IFF_PROMISC)
10513 rx_mode = BNX2X_RX_MODE_PROMISC;
10514
10515 else if ((dev->flags & IFF_ALLMULTI) ||
10516 ((dev->mc_count > BNX2X_MAX_MULTICAST) && CHIP_IS_E1(bp)))
10517 rx_mode = BNX2X_RX_MODE_ALLMULTI;
10518
10519 else { /* some multicasts */
10520 if (CHIP_IS_E1(bp)) {
10521 int i, old, offset;
10522 struct dev_mc_list *mclist;
10523 struct mac_configuration_cmd *config =
10524 bnx2x_sp(bp, mcast_config);
10525
10526 for (i = 0, mclist = dev->mc_list;
10527 mclist && (i < dev->mc_count);
10528 i++, mclist = mclist->next) {
10529
10530 config->config_table[i].
10531 cam_entry.msb_mac_addr =
10532 swab16(*(u16 *)&mclist->dmi_addr[0]);
10533 config->config_table[i].
10534 cam_entry.middle_mac_addr =
10535 swab16(*(u16 *)&mclist->dmi_addr[2]);
10536 config->config_table[i].
10537 cam_entry.lsb_mac_addr =
10538 swab16(*(u16 *)&mclist->dmi_addr[4]);
10539 config->config_table[i].cam_entry.flags =
10540 cpu_to_le16(port);
10541 config->config_table[i].
10542 target_table_entry.flags = 0;
10543 config->config_table[i].
10544 target_table_entry.client_id = 0;
10545 config->config_table[i].
10546 target_table_entry.vlan_id = 0;
10547
10548 DP(NETIF_MSG_IFUP,
10549 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
10550 config->config_table[i].
10551 cam_entry.msb_mac_addr,
10552 config->config_table[i].
10553 cam_entry.middle_mac_addr,
10554 config->config_table[i].
10555 cam_entry.lsb_mac_addr);
10556 }
8d9c5f34 10557 old = config->hdr.length;
34f80b04
EG
10558 if (old > i) {
10559 for (; i < old; i++) {
10560 if (CAM_IS_INVALID(config->
10561 config_table[i])) {
af246401 10562 /* already invalidated */
34f80b04
EG
10563 break;
10564 }
10565 /* invalidate */
10566 CAM_INVALIDATE(config->
10567 config_table[i]);
10568 }
10569 }
10570
10571 if (CHIP_REV_IS_SLOW(bp))
10572 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
10573 else
10574 offset = BNX2X_MAX_MULTICAST*(1 + port);
10575
8d9c5f34 10576 config->hdr.length = i;
34f80b04 10577 config->hdr.offset = offset;
8d9c5f34 10578 config->hdr.client_id = bp->fp->cl_id;
34f80b04
EG
10579 config->hdr.reserved1 = 0;
10580
10581 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
10582 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
10583 U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
10584 0);
10585 } else { /* E1H */
10586 /* Accept one or more multicasts */
10587 struct dev_mc_list *mclist;
10588 u32 mc_filter[MC_HASH_SIZE];
10589 u32 crc, bit, regidx;
10590 int i;
10591
10592 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
10593
10594 for (i = 0, mclist = dev->mc_list;
10595 mclist && (i < dev->mc_count);
10596 i++, mclist = mclist->next) {
10597
7c510e4b
JB
10598 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
10599 mclist->dmi_addr);
34f80b04
EG
10600
10601 crc = crc32c_le(0, mclist->dmi_addr, ETH_ALEN);
10602 bit = (crc >> 24) & 0xff;
10603 regidx = bit >> 5;
10604 bit &= 0x1f;
10605 mc_filter[regidx] |= (1 << bit);
10606 }
10607
10608 for (i = 0; i < MC_HASH_SIZE; i++)
10609 REG_WR(bp, MC_HASH_OFFSET(bp, i),
10610 mc_filter[i]);
10611 }
10612 }
10613
10614 bp->rx_mode = rx_mode;
10615 bnx2x_set_storm_rx_mode(bp);
10616}
10617
10618/* called with rtnl_lock */
a2fbb9ea
ET
10619static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
10620{
10621 struct sockaddr *addr = p;
10622 struct bnx2x *bp = netdev_priv(dev);
10623
34f80b04 10624 if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
a2fbb9ea
ET
10625 return -EINVAL;
10626
10627 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
34f80b04
EG
10628 if (netif_running(dev)) {
10629 if (CHIP_IS_E1(bp))
3101c2bc 10630 bnx2x_set_mac_addr_e1(bp, 1);
34f80b04 10631 else
3101c2bc 10632 bnx2x_set_mac_addr_e1h(bp, 1);
34f80b04 10633 }
a2fbb9ea
ET
10634
10635 return 0;
10636}
10637
c18487ee 10638/* called with rtnl_lock */
a2fbb9ea
ET
10639static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
10640{
10641 struct mii_ioctl_data *data = if_mii(ifr);
10642 struct bnx2x *bp = netdev_priv(dev);
3196a88a 10643 int port = BP_PORT(bp);
a2fbb9ea
ET
10644 int err;
10645
10646 switch (cmd) {
10647 case SIOCGMIIPHY:
34f80b04 10648 data->phy_id = bp->port.phy_addr;
a2fbb9ea 10649
c14423fe 10650 /* fallthrough */
c18487ee 10651
a2fbb9ea 10652 case SIOCGMIIREG: {
c18487ee 10653 u16 mii_regval;
a2fbb9ea 10654
c18487ee
YR
10655 if (!netif_running(dev))
10656 return -EAGAIN;
a2fbb9ea 10657
34f80b04 10658 mutex_lock(&bp->port.phy_mutex);
3196a88a 10659 err = bnx2x_cl45_read(bp, port, 0, bp->port.phy_addr,
c18487ee
YR
10660 DEFAULT_PHY_DEV_ADDR,
10661 (data->reg_num & 0x1f), &mii_regval);
10662 data->val_out = mii_regval;
34f80b04 10663 mutex_unlock(&bp->port.phy_mutex);
a2fbb9ea
ET
10664 return err;
10665 }
10666
10667 case SIOCSMIIREG:
10668 if (!capable(CAP_NET_ADMIN))
10669 return -EPERM;
10670
c18487ee
YR
10671 if (!netif_running(dev))
10672 return -EAGAIN;
10673
34f80b04 10674 mutex_lock(&bp->port.phy_mutex);
3196a88a 10675 err = bnx2x_cl45_write(bp, port, 0, bp->port.phy_addr,
c18487ee
YR
10676 DEFAULT_PHY_DEV_ADDR,
10677 (data->reg_num & 0x1f), data->val_in);
34f80b04 10678 mutex_unlock(&bp->port.phy_mutex);
a2fbb9ea
ET
10679 return err;
10680
10681 default:
10682 /* do nothing */
10683 break;
10684 }
10685
10686 return -EOPNOTSUPP;
10687}
10688
34f80b04 10689/* called with rtnl_lock */
a2fbb9ea
ET
10690static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
10691{
10692 struct bnx2x *bp = netdev_priv(dev);
34f80b04 10693 int rc = 0;
a2fbb9ea
ET
10694
10695 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
10696 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
10697 return -EINVAL;
10698
10699 /* This does not race with packet allocation
c14423fe 10700 * because the actual alloc size is
a2fbb9ea
ET
10701 * only updated as part of load
10702 */
10703 dev->mtu = new_mtu;
10704
10705 if (netif_running(dev)) {
34f80b04
EG
10706 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10707 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
a2fbb9ea 10708 }
34f80b04
EG
10709
10710 return rc;
a2fbb9ea
ET
10711}
10712
10713static void bnx2x_tx_timeout(struct net_device *dev)
10714{
10715 struct bnx2x *bp = netdev_priv(dev);
10716
10717#ifdef BNX2X_STOP_ON_ERROR
10718 if (!bp->panic)
10719 bnx2x_panic();
10720#endif
10721 /* This allows the netif to be shutdown gracefully before resetting */
10722 schedule_work(&bp->reset_task);
10723}
10724
10725#ifdef BCM_VLAN
34f80b04 10726/* called with rtnl_lock */
a2fbb9ea
ET
10727static void bnx2x_vlan_rx_register(struct net_device *dev,
10728 struct vlan_group *vlgrp)
10729{
10730 struct bnx2x *bp = netdev_priv(dev);
10731
10732 bp->vlgrp = vlgrp;
0c6671b0
EG
10733
10734 /* Set flags according to the required capabilities */
10735 bp->flags &= ~(HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
10736
10737 if (dev->features & NETIF_F_HW_VLAN_TX)
10738 bp->flags |= HW_VLAN_TX_FLAG;
10739
10740 if (dev->features & NETIF_F_HW_VLAN_RX)
10741 bp->flags |= HW_VLAN_RX_FLAG;
10742
a2fbb9ea 10743 if (netif_running(dev))
49d66772 10744 bnx2x_set_client_config(bp);
a2fbb9ea 10745}
34f80b04 10746
a2fbb9ea
ET
10747#endif
10748
10749#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
10750static void poll_bnx2x(struct net_device *dev)
10751{
10752 struct bnx2x *bp = netdev_priv(dev);
10753
10754 disable_irq(bp->pdev->irq);
10755 bnx2x_interrupt(bp->pdev->irq, dev);
10756 enable_irq(bp->pdev->irq);
10757}
10758#endif
10759
c64213cd
SH
10760static const struct net_device_ops bnx2x_netdev_ops = {
10761 .ndo_open = bnx2x_open,
10762 .ndo_stop = bnx2x_close,
10763 .ndo_start_xmit = bnx2x_start_xmit,
10764 .ndo_set_multicast_list = bnx2x_set_rx_mode,
10765 .ndo_set_mac_address = bnx2x_change_mac_addr,
10766 .ndo_validate_addr = eth_validate_addr,
10767 .ndo_do_ioctl = bnx2x_ioctl,
10768 .ndo_change_mtu = bnx2x_change_mtu,
10769 .ndo_tx_timeout = bnx2x_tx_timeout,
10770#ifdef BCM_VLAN
10771 .ndo_vlan_rx_register = bnx2x_vlan_rx_register,
10772#endif
10773#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
10774 .ndo_poll_controller = poll_bnx2x,
10775#endif
10776};
10777
10778
34f80b04
EG
10779static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
10780 struct net_device *dev)
a2fbb9ea
ET
10781{
10782 struct bnx2x *bp;
10783 int rc;
10784
10785 SET_NETDEV_DEV(dev, &pdev->dev);
10786 bp = netdev_priv(dev);
10787
34f80b04
EG
10788 bp->dev = dev;
10789 bp->pdev = pdev;
a2fbb9ea 10790 bp->flags = 0;
34f80b04 10791 bp->func = PCI_FUNC(pdev->devfn);
a2fbb9ea
ET
10792
10793 rc = pci_enable_device(pdev);
10794 if (rc) {
10795 printk(KERN_ERR PFX "Cannot enable PCI device, aborting\n");
10796 goto err_out;
10797 }
10798
10799 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
10800 printk(KERN_ERR PFX "Cannot find PCI device base address,"
10801 " aborting\n");
10802 rc = -ENODEV;
10803 goto err_out_disable;
10804 }
10805
10806 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
10807 printk(KERN_ERR PFX "Cannot find second PCI device"
10808 " base address, aborting\n");
10809 rc = -ENODEV;
10810 goto err_out_disable;
10811 }
10812
34f80b04
EG
10813 if (atomic_read(&pdev->enable_cnt) == 1) {
10814 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
10815 if (rc) {
10816 printk(KERN_ERR PFX "Cannot obtain PCI resources,"
10817 " aborting\n");
10818 goto err_out_disable;
10819 }
a2fbb9ea 10820
34f80b04
EG
10821 pci_set_master(pdev);
10822 pci_save_state(pdev);
10823 }
a2fbb9ea
ET
10824
10825 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
10826 if (bp->pm_cap == 0) {
10827 printk(KERN_ERR PFX "Cannot find power management"
10828 " capability, aborting\n");
10829 rc = -EIO;
10830 goto err_out_release;
10831 }
10832
10833 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
10834 if (bp->pcie_cap == 0) {
10835 printk(KERN_ERR PFX "Cannot find PCI Express capability,"
10836 " aborting\n");
10837 rc = -EIO;
10838 goto err_out_release;
10839 }
10840
10841 if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0) {
10842 bp->flags |= USING_DAC_FLAG;
10843 if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) != 0) {
10844 printk(KERN_ERR PFX "pci_set_consistent_dma_mask"
10845 " failed, aborting\n");
10846 rc = -EIO;
10847 goto err_out_release;
10848 }
10849
10850 } else if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0) {
10851 printk(KERN_ERR PFX "System does not support DMA,"
10852 " aborting\n");
10853 rc = -EIO;
10854 goto err_out_release;
10855 }
10856
34f80b04
EG
10857 dev->mem_start = pci_resource_start(pdev, 0);
10858 dev->base_addr = dev->mem_start;
10859 dev->mem_end = pci_resource_end(pdev, 0);
a2fbb9ea
ET
10860
10861 dev->irq = pdev->irq;
10862
275f165f 10863 bp->regview = pci_ioremap_bar(pdev, 0);
a2fbb9ea
ET
10864 if (!bp->regview) {
10865 printk(KERN_ERR PFX "Cannot map register space, aborting\n");
10866 rc = -ENOMEM;
10867 goto err_out_release;
10868 }
10869
34f80b04
EG
10870 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
10871 min_t(u64, BNX2X_DB_SIZE,
10872 pci_resource_len(pdev, 2)));
a2fbb9ea
ET
10873 if (!bp->doorbells) {
10874 printk(KERN_ERR PFX "Cannot map doorbell space, aborting\n");
10875 rc = -ENOMEM;
10876 goto err_out_unmap;
10877 }
10878
10879 bnx2x_set_power_state(bp, PCI_D0);
10880
34f80b04
EG
10881 /* clean indirect addresses */
10882 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
10883 PCICFG_VENDOR_ID_OFFSET);
10884 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
10885 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
10886 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
10887 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
a2fbb9ea 10888
34f80b04 10889 dev->watchdog_timeo = TX_TIMEOUT;
a2fbb9ea 10890
c64213cd 10891 dev->netdev_ops = &bnx2x_netdev_ops;
34f80b04 10892 dev->ethtool_ops = &bnx2x_ethtool_ops;
34f80b04
EG
10893 dev->features |= NETIF_F_SG;
10894 dev->features |= NETIF_F_HW_CSUM;
10895 if (bp->flags & USING_DAC_FLAG)
10896 dev->features |= NETIF_F_HIGHDMA;
10897#ifdef BCM_VLAN
10898 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
0c6671b0 10899 bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
34f80b04
EG
10900#endif
10901 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb 10902 dev->features |= NETIF_F_TSO6;
a2fbb9ea
ET
10903
10904 return 0;
10905
10906err_out_unmap:
10907 if (bp->regview) {
10908 iounmap(bp->regview);
10909 bp->regview = NULL;
10910 }
a2fbb9ea
ET
10911 if (bp->doorbells) {
10912 iounmap(bp->doorbells);
10913 bp->doorbells = NULL;
10914 }
10915
10916err_out_release:
34f80b04
EG
10917 if (atomic_read(&pdev->enable_cnt) == 1)
10918 pci_release_regions(pdev);
a2fbb9ea
ET
10919
10920err_out_disable:
10921 pci_disable_device(pdev);
10922 pci_set_drvdata(pdev, NULL);
10923
10924err_out:
10925 return rc;
10926}
10927
25047950
ET
10928static int __devinit bnx2x_get_pcie_width(struct bnx2x *bp)
10929{
10930 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
10931
10932 val = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
10933 return val;
10934}
10935
10936/* return value of 1=2.5GHz 2=5GHz */
10937static int __devinit bnx2x_get_pcie_speed(struct bnx2x *bp)
10938{
10939 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
10940
10941 val = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
10942 return val;
10943}
10944
a2fbb9ea
ET
10945static int __devinit bnx2x_init_one(struct pci_dev *pdev,
10946 const struct pci_device_id *ent)
10947{
10948 static int version_printed;
10949 struct net_device *dev = NULL;
10950 struct bnx2x *bp;
25047950 10951 int rc;
a2fbb9ea
ET
10952
10953 if (version_printed++ == 0)
10954 printk(KERN_INFO "%s", version);
10955
10956 /* dev zeroed in init_etherdev */
555f6c78 10957 dev = alloc_etherdev_mq(sizeof(*bp), MAX_CONTEXT);
34f80b04
EG
10958 if (!dev) {
10959 printk(KERN_ERR PFX "Cannot allocate net device\n");
a2fbb9ea 10960 return -ENOMEM;
34f80b04 10961 }
a2fbb9ea 10962
a2fbb9ea
ET
10963 bp = netdev_priv(dev);
10964 bp->msglevel = debug;
10965
34f80b04 10966 rc = bnx2x_init_dev(pdev, dev);
a2fbb9ea
ET
10967 if (rc < 0) {
10968 free_netdev(dev);
10969 return rc;
10970 }
10971
a2fbb9ea
ET
10972 pci_set_drvdata(pdev, dev);
10973
34f80b04 10974 rc = bnx2x_init_bp(bp);
693fc0d1
EG
10975 if (rc)
10976 goto init_one_exit;
10977
10978 rc = register_netdev(dev);
34f80b04 10979 if (rc) {
693fc0d1 10980 dev_err(&pdev->dev, "Cannot register net device\n");
34f80b04
EG
10981 goto init_one_exit;
10982 }
10983
25047950 10984 printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
87942b46 10985 " IRQ %d, ", dev->name, board_info[ent->driver_data].name,
34f80b04 10986 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
25047950
ET
10987 bnx2x_get_pcie_width(bp),
10988 (bnx2x_get_pcie_speed(bp) == 2) ? "5GHz (Gen2)" : "2.5GHz",
10989 dev->base_addr, bp->pdev->irq);
e174961c 10990 printk(KERN_CONT "node addr %pM\n", dev->dev_addr);
a2fbb9ea 10991 return 0;
34f80b04
EG
10992
10993init_one_exit:
10994 if (bp->regview)
10995 iounmap(bp->regview);
10996
10997 if (bp->doorbells)
10998 iounmap(bp->doorbells);
10999
11000 free_netdev(dev);
11001
11002 if (atomic_read(&pdev->enable_cnt) == 1)
11003 pci_release_regions(pdev);
11004
11005 pci_disable_device(pdev);
11006 pci_set_drvdata(pdev, NULL);
11007
11008 return rc;
a2fbb9ea
ET
11009}
11010
11011static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
11012{
11013 struct net_device *dev = pci_get_drvdata(pdev);
228241eb
ET
11014 struct bnx2x *bp;
11015
11016 if (!dev) {
228241eb
ET
11017 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
11018 return;
11019 }
228241eb 11020 bp = netdev_priv(dev);
a2fbb9ea 11021
a2fbb9ea
ET
11022 unregister_netdev(dev);
11023
11024 if (bp->regview)
11025 iounmap(bp->regview);
11026
11027 if (bp->doorbells)
11028 iounmap(bp->doorbells);
11029
11030 free_netdev(dev);
34f80b04
EG
11031
11032 if (atomic_read(&pdev->enable_cnt) == 1)
11033 pci_release_regions(pdev);
11034
a2fbb9ea
ET
11035 pci_disable_device(pdev);
11036 pci_set_drvdata(pdev, NULL);
11037}
11038
11039static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
11040{
11041 struct net_device *dev = pci_get_drvdata(pdev);
228241eb
ET
11042 struct bnx2x *bp;
11043
34f80b04
EG
11044 if (!dev) {
11045 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
11046 return -ENODEV;
11047 }
11048 bp = netdev_priv(dev);
a2fbb9ea 11049
34f80b04 11050 rtnl_lock();
a2fbb9ea 11051
34f80b04 11052 pci_save_state(pdev);
228241eb 11053
34f80b04
EG
11054 if (!netif_running(dev)) {
11055 rtnl_unlock();
11056 return 0;
11057 }
a2fbb9ea
ET
11058
11059 netif_device_detach(dev);
a2fbb9ea 11060
da5a662a 11061 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
34f80b04 11062
a2fbb9ea 11063 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
228241eb 11064
34f80b04
EG
11065 rtnl_unlock();
11066
a2fbb9ea
ET
11067 return 0;
11068}
11069
11070static int bnx2x_resume(struct pci_dev *pdev)
11071{
11072 struct net_device *dev = pci_get_drvdata(pdev);
228241eb 11073 struct bnx2x *bp;
a2fbb9ea
ET
11074 int rc;
11075
228241eb
ET
11076 if (!dev) {
11077 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
11078 return -ENODEV;
11079 }
228241eb 11080 bp = netdev_priv(dev);
a2fbb9ea 11081
34f80b04
EG
11082 rtnl_lock();
11083
228241eb 11084 pci_restore_state(pdev);
34f80b04
EG
11085
11086 if (!netif_running(dev)) {
11087 rtnl_unlock();
11088 return 0;
11089 }
11090
a2fbb9ea
ET
11091 bnx2x_set_power_state(bp, PCI_D0);
11092 netif_device_attach(dev);
11093
da5a662a 11094 rc = bnx2x_nic_load(bp, LOAD_OPEN);
a2fbb9ea 11095
34f80b04
EG
11096 rtnl_unlock();
11097
11098 return rc;
a2fbb9ea
ET
11099}
11100
f8ef6e44
YG
11101static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
11102{
11103 int i;
11104
11105 bp->state = BNX2X_STATE_ERROR;
11106
11107 bp->rx_mode = BNX2X_RX_MODE_NONE;
11108
11109 bnx2x_netif_stop(bp, 0);
11110
11111 del_timer_sync(&bp->timer);
11112 bp->stats_state = STATS_STATE_DISABLED;
11113 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
11114
11115 /* Release IRQs */
11116 bnx2x_free_irq(bp);
11117
11118 if (CHIP_IS_E1(bp)) {
11119 struct mac_configuration_cmd *config =
11120 bnx2x_sp(bp, mcast_config);
11121
8d9c5f34 11122 for (i = 0; i < config->hdr.length; i++)
f8ef6e44
YG
11123 CAM_INVALIDATE(config->config_table[i]);
11124 }
11125
11126 /* Free SKBs, SGEs, TPA pool and driver internals */
11127 bnx2x_free_skbs(bp);
555f6c78 11128 for_each_rx_queue(bp, i)
f8ef6e44 11129 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
555f6c78 11130 for_each_rx_queue(bp, i)
7cde1c8b 11131 netif_napi_del(&bnx2x_fp(bp, i, napi));
f8ef6e44
YG
11132 bnx2x_free_mem(bp);
11133
11134 bp->state = BNX2X_STATE_CLOSED;
11135
11136 netif_carrier_off(bp->dev);
11137
11138 return 0;
11139}
11140
11141static void bnx2x_eeh_recover(struct bnx2x *bp)
11142{
11143 u32 val;
11144
11145 mutex_init(&bp->port.phy_mutex);
11146
11147 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
11148 bp->link_params.shmem_base = bp->common.shmem_base;
11149 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
11150
11151 if (!bp->common.shmem_base ||
11152 (bp->common.shmem_base < 0xA0000) ||
11153 (bp->common.shmem_base >= 0xC0000)) {
11154 BNX2X_DEV_INFO("MCP not active\n");
11155 bp->flags |= NO_MCP_FLAG;
11156 return;
11157 }
11158
11159 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
11160 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
11161 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
11162 BNX2X_ERR("BAD MCP validity signature\n");
11163
11164 if (!BP_NOMCP(bp)) {
11165 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
11166 & DRV_MSG_SEQ_NUMBER_MASK);
11167 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
11168 }
11169}
11170
493adb1f
WX
11171/**
11172 * bnx2x_io_error_detected - called when PCI error is detected
11173 * @pdev: Pointer to PCI device
11174 * @state: The current pci connection state
11175 *
11176 * This function is called after a PCI bus error affecting
11177 * this device has been detected.
11178 */
11179static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
11180 pci_channel_state_t state)
11181{
11182 struct net_device *dev = pci_get_drvdata(pdev);
11183 struct bnx2x *bp = netdev_priv(dev);
11184
11185 rtnl_lock();
11186
11187 netif_device_detach(dev);
11188
11189 if (netif_running(dev))
f8ef6e44 11190 bnx2x_eeh_nic_unload(bp);
493adb1f
WX
11191
11192 pci_disable_device(pdev);
11193
11194 rtnl_unlock();
11195
11196 /* Request a slot reset */
11197 return PCI_ERS_RESULT_NEED_RESET;
11198}
11199
11200/**
11201 * bnx2x_io_slot_reset - called after the PCI bus has been reset
11202 * @pdev: Pointer to PCI device
11203 *
11204 * Restart the card from scratch, as if from a cold-boot.
11205 */
11206static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
11207{
11208 struct net_device *dev = pci_get_drvdata(pdev);
11209 struct bnx2x *bp = netdev_priv(dev);
11210
11211 rtnl_lock();
11212
11213 if (pci_enable_device(pdev)) {
11214 dev_err(&pdev->dev,
11215 "Cannot re-enable PCI device after reset\n");
11216 rtnl_unlock();
11217 return PCI_ERS_RESULT_DISCONNECT;
11218 }
11219
11220 pci_set_master(pdev);
11221 pci_restore_state(pdev);
11222
11223 if (netif_running(dev))
11224 bnx2x_set_power_state(bp, PCI_D0);
11225
11226 rtnl_unlock();
11227
11228 return PCI_ERS_RESULT_RECOVERED;
11229}
11230
11231/**
11232 * bnx2x_io_resume - called when traffic can start flowing again
11233 * @pdev: Pointer to PCI device
11234 *
11235 * This callback is called when the error recovery driver tells us that
11236 * its OK to resume normal operation.
11237 */
11238static void bnx2x_io_resume(struct pci_dev *pdev)
11239{
11240 struct net_device *dev = pci_get_drvdata(pdev);
11241 struct bnx2x *bp = netdev_priv(dev);
11242
11243 rtnl_lock();
11244
f8ef6e44
YG
11245 bnx2x_eeh_recover(bp);
11246
493adb1f 11247 if (netif_running(dev))
f8ef6e44 11248 bnx2x_nic_load(bp, LOAD_NORMAL);
493adb1f
WX
11249
11250 netif_device_attach(dev);
11251
11252 rtnl_unlock();
11253}
11254
11255static struct pci_error_handlers bnx2x_err_handler = {
11256 .error_detected = bnx2x_io_error_detected,
11257 .slot_reset = bnx2x_io_slot_reset,
11258 .resume = bnx2x_io_resume,
11259};
11260
a2fbb9ea 11261static struct pci_driver bnx2x_pci_driver = {
493adb1f
WX
11262 .name = DRV_MODULE_NAME,
11263 .id_table = bnx2x_pci_tbl,
11264 .probe = bnx2x_init_one,
11265 .remove = __devexit_p(bnx2x_remove_one),
11266 .suspend = bnx2x_suspend,
11267 .resume = bnx2x_resume,
11268 .err_handler = &bnx2x_err_handler,
a2fbb9ea
ET
11269};
11270
11271static int __init bnx2x_init(void)
11272{
1cf167f2
EG
11273 bnx2x_wq = create_singlethread_workqueue("bnx2x");
11274 if (bnx2x_wq == NULL) {
11275 printk(KERN_ERR PFX "Cannot create workqueue\n");
11276 return -ENOMEM;
11277 }
11278
a2fbb9ea
ET
11279 return pci_register_driver(&bnx2x_pci_driver);
11280}
11281
11282static void __exit bnx2x_cleanup(void)
11283{
11284 pci_unregister_driver(&bnx2x_pci_driver);
1cf167f2
EG
11285
11286 destroy_workqueue(bnx2x_wq);
a2fbb9ea
ET
11287}
11288
11289module_init(bnx2x_init);
11290module_exit(bnx2x_cleanup);
11291
This page took 0.888184 seconds and 5 git commands to generate.