bnx2x: Disabling interrupts after iSCSI-boot
[deliverable/linux.git] / drivers / net / bnx2x_main.c
CommitLineData
34f80b04 1/* bnx2x_main.c: Broadcom Everest network driver.
a2fbb9ea 2 *
d05c26ce 3 * Copyright (c) 2007-2009 Broadcom Corporation
a2fbb9ea
ET
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
24e3fcef
EG
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
a2fbb9ea
ET
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath rework by Vladislav Zolotarov
c14423fe 14 * Statistics and Link management by Yitchak Gertner
a2fbb9ea
ET
15 *
16 */
17
a2fbb9ea
ET
18#include <linux/module.h>
19#include <linux/moduleparam.h>
20#include <linux/kernel.h>
21#include <linux/device.h> /* for dev_info() */
22#include <linux/timer.h>
23#include <linux/errno.h>
24#include <linux/ioport.h>
25#include <linux/slab.h>
26#include <linux/vmalloc.h>
27#include <linux/interrupt.h>
28#include <linux/pci.h>
29#include <linux/init.h>
30#include <linux/netdevice.h>
31#include <linux/etherdevice.h>
32#include <linux/skbuff.h>
33#include <linux/dma-mapping.h>
34#include <linux/bitops.h>
35#include <linux/irq.h>
36#include <linux/delay.h>
37#include <asm/byteorder.h>
38#include <linux/time.h>
39#include <linux/ethtool.h>
40#include <linux/mii.h>
0c6671b0 41#include <linux/if_vlan.h>
a2fbb9ea
ET
42#include <net/ip.h>
43#include <net/tcp.h>
44#include <net/checksum.h>
34f80b04 45#include <net/ip6_checksum.h>
a2fbb9ea
ET
46#include <linux/workqueue.h>
47#include <linux/crc32.h>
34f80b04 48#include <linux/crc32c.h>
a2fbb9ea
ET
49#include <linux/prefetch.h>
50#include <linux/zlib.h>
a2fbb9ea
ET
51#include <linux/io.h>
52
53#include "bnx2x_reg.h"
54#include "bnx2x_fw_defs.h"
55#include "bnx2x_hsi.h"
c18487ee 56#include "bnx2x_link.h"
a2fbb9ea
ET
57#include "bnx2x.h"
58#include "bnx2x_init.h"
59
e8b5fc51
VZ
60#define DRV_MODULE_VERSION "1.45.26"
61#define DRV_MODULE_RELDATE "2009/01/26"
34f80b04 62#define BNX2X_BC_VER 0x040200
a2fbb9ea 63
34f80b04
EG
64/* Time in jiffies before concluding the transmitter is hung */
65#define TX_TIMEOUT (5*HZ)
a2fbb9ea 66
53a10565 67static char version[] __devinitdata =
34f80b04 68 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
a2fbb9ea
ET
69 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
70
24e3fcef 71MODULE_AUTHOR("Eliezer Tamir");
e47d7e6e 72MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
a2fbb9ea
ET
73MODULE_LICENSE("GPL");
74MODULE_VERSION(DRV_MODULE_VERSION);
a2fbb9ea 75
555f6c78
EG
76static int multi_mode = 1;
77module_param(multi_mode, int, 0);
78
19680c48 79static int disable_tpa;
a2fbb9ea 80static int poll;
a2fbb9ea 81static int debug;
34f80b04 82static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
a2fbb9ea 83
19680c48 84module_param(disable_tpa, int, 0);
8badd27a
EG
85
86static int int_mode;
87module_param(int_mode, int, 0);
88MODULE_PARM_DESC(int_mode, " Force interrupt mode (1 INT#x; 2 MSI)");
89
a2fbb9ea 90module_param(poll, int, 0);
a2fbb9ea 91module_param(debug, int, 0);
19680c48 92MODULE_PARM_DESC(disable_tpa, "disable the TPA (LRO) feature");
a2fbb9ea 93MODULE_PARM_DESC(poll, "use polling (for debug)");
c14423fe 94MODULE_PARM_DESC(debug, "default debug msglevel");
a2fbb9ea 95
1cf167f2 96static struct workqueue_struct *bnx2x_wq;
a2fbb9ea
ET
97
98enum bnx2x_board_type {
99 BCM57710 = 0,
34f80b04
EG
100 BCM57711 = 1,
101 BCM57711E = 2,
a2fbb9ea
ET
102};
103
34f80b04 104/* indexed by board_type, above */
53a10565 105static struct {
a2fbb9ea
ET
106 char *name;
107} board_info[] __devinitdata = {
34f80b04
EG
108 { "Broadcom NetXtreme II BCM57710 XGb" },
109 { "Broadcom NetXtreme II BCM57711 XGb" },
110 { "Broadcom NetXtreme II BCM57711E XGb" }
a2fbb9ea
ET
111};
112
34f80b04 113
a2fbb9ea
ET
114static const struct pci_device_id bnx2x_pci_tbl[] = {
115 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57710,
116 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57710 },
34f80b04
EG
117 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711,
118 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711 },
119 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711E,
120 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711E },
a2fbb9ea
ET
121 { 0 }
122};
123
124MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
125
126/****************************************************************************
127* General service functions
128****************************************************************************/
129
130/* used only at init
131 * locking is done by mcp
132 */
133static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
134{
135 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
136 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
137 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
138 PCICFG_VENDOR_ID_OFFSET);
139}
140
a2fbb9ea
ET
141static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
142{
143 u32 val;
144
145 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
146 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
147 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
148 PCICFG_VENDOR_ID_OFFSET);
149
150 return val;
151}
a2fbb9ea
ET
152
153static const u32 dmae_reg_go_c[] = {
154 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
155 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
156 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
157 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
158};
159
160/* copy command into DMAE command memory and set DMAE command go */
161static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
162 int idx)
163{
164 u32 cmd_offset;
165 int i;
166
167 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
168 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
169 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
170
ad8d3948
EG
171 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
172 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
a2fbb9ea
ET
173 }
174 REG_WR(bp, dmae_reg_go_c[idx], 1);
175}
176
ad8d3948
EG
177void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
178 u32 len32)
a2fbb9ea 179{
ad8d3948 180 struct dmae_command *dmae = &bp->init_dmae;
a2fbb9ea 181 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
ad8d3948
EG
182 int cnt = 200;
183
184 if (!bp->dmae_ready) {
185 u32 *data = bnx2x_sp(bp, wb_data[0]);
186
187 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
188 " using indirect\n", dst_addr, len32);
189 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
190 return;
191 }
192
193 mutex_lock(&bp->dmae_mutex);
a2fbb9ea
ET
194
195 memset(dmae, 0, sizeof(struct dmae_command));
196
197 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
198 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
199 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
200#ifdef __BIG_ENDIAN
201 DMAE_CMD_ENDIANITY_B_DW_SWAP |
202#else
203 DMAE_CMD_ENDIANITY_DW_SWAP |
204#endif
34f80b04
EG
205 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
206 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea
ET
207 dmae->src_addr_lo = U64_LO(dma_addr);
208 dmae->src_addr_hi = U64_HI(dma_addr);
209 dmae->dst_addr_lo = dst_addr >> 2;
210 dmae->dst_addr_hi = 0;
211 dmae->len = len32;
212 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
213 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
ad8d3948 214 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 215
ad8d3948 216 DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
a2fbb9ea
ET
217 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
218 "dst_addr [%x:%08x (%08x)]\n"
219 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
220 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
221 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, dst_addr,
222 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
ad8d3948 223 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
a2fbb9ea
ET
224 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
225 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
a2fbb9ea
ET
226
227 *wb_comp = 0;
228
34f80b04 229 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
a2fbb9ea
ET
230
231 udelay(5);
ad8d3948
EG
232
233 while (*wb_comp != DMAE_COMP_VAL) {
234 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
235
ad8d3948 236 if (!cnt) {
a2fbb9ea
ET
237 BNX2X_ERR("dmae timeout!\n");
238 break;
239 }
ad8d3948 240 cnt--;
12469401
YG
241 /* adjust delay for emulation/FPGA */
242 if (CHIP_REV_IS_SLOW(bp))
243 msleep(100);
244 else
245 udelay(5);
a2fbb9ea 246 }
ad8d3948
EG
247
248 mutex_unlock(&bp->dmae_mutex);
a2fbb9ea
ET
249}
250
c18487ee 251void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
a2fbb9ea 252{
ad8d3948 253 struct dmae_command *dmae = &bp->init_dmae;
a2fbb9ea 254 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
ad8d3948
EG
255 int cnt = 200;
256
257 if (!bp->dmae_ready) {
258 u32 *data = bnx2x_sp(bp, wb_data[0]);
259 int i;
260
261 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
262 " using indirect\n", src_addr, len32);
263 for (i = 0; i < len32; i++)
264 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
265 return;
266 }
267
268 mutex_lock(&bp->dmae_mutex);
a2fbb9ea
ET
269
270 memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
271 memset(dmae, 0, sizeof(struct dmae_command));
272
273 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
274 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
275 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
276#ifdef __BIG_ENDIAN
277 DMAE_CMD_ENDIANITY_B_DW_SWAP |
278#else
279 DMAE_CMD_ENDIANITY_DW_SWAP |
280#endif
34f80b04
EG
281 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
282 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea
ET
283 dmae->src_addr_lo = src_addr >> 2;
284 dmae->src_addr_hi = 0;
285 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
286 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
287 dmae->len = len32;
288 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
289 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
ad8d3948 290 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 291
ad8d3948 292 DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
a2fbb9ea
ET
293 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
294 "dst_addr [%x:%08x (%08x)]\n"
295 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
296 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
297 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, src_addr,
298 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
a2fbb9ea
ET
299
300 *wb_comp = 0;
301
34f80b04 302 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
a2fbb9ea
ET
303
304 udelay(5);
ad8d3948
EG
305
306 while (*wb_comp != DMAE_COMP_VAL) {
307
ad8d3948 308 if (!cnt) {
a2fbb9ea
ET
309 BNX2X_ERR("dmae timeout!\n");
310 break;
311 }
ad8d3948 312 cnt--;
12469401
YG
313 /* adjust delay for emulation/FPGA */
314 if (CHIP_REV_IS_SLOW(bp))
315 msleep(100);
316 else
317 udelay(5);
a2fbb9ea 318 }
ad8d3948 319 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
a2fbb9ea
ET
320 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
321 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
ad8d3948
EG
322
323 mutex_unlock(&bp->dmae_mutex);
324}
325
326/* used only for slowpath so not inlined */
327static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
328{
329 u32 wb_write[2];
330
331 wb_write[0] = val_hi;
332 wb_write[1] = val_lo;
333 REG_WR_DMAE(bp, reg, wb_write, 2);
a2fbb9ea 334}
a2fbb9ea 335
ad8d3948
EG
336#ifdef USE_WB_RD
337static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
338{
339 u32 wb_data[2];
340
341 REG_RD_DMAE(bp, reg, wb_data, 2);
342
343 return HILO_U64(wb_data[0], wb_data[1]);
344}
345#endif
346
a2fbb9ea
ET
347static int bnx2x_mc_assert(struct bnx2x *bp)
348{
a2fbb9ea 349 char last_idx;
34f80b04
EG
350 int i, rc = 0;
351 u32 row0, row1, row2, row3;
352
353 /* XSTORM */
354 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
355 XSTORM_ASSERT_LIST_INDEX_OFFSET);
356 if (last_idx)
357 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
358
359 /* print the asserts */
360 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
361
362 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
363 XSTORM_ASSERT_LIST_OFFSET(i));
364 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
365 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
366 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
367 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
368 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
369 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
370
371 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
372 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
373 " 0x%08x 0x%08x 0x%08x\n",
374 i, row3, row2, row1, row0);
375 rc++;
376 } else {
377 break;
378 }
379 }
380
381 /* TSTORM */
382 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
383 TSTORM_ASSERT_LIST_INDEX_OFFSET);
384 if (last_idx)
385 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
386
387 /* print the asserts */
388 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
389
390 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
391 TSTORM_ASSERT_LIST_OFFSET(i));
392 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
393 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
394 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
395 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
396 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
397 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
398
399 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
400 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
401 " 0x%08x 0x%08x 0x%08x\n",
402 i, row3, row2, row1, row0);
403 rc++;
404 } else {
405 break;
406 }
407 }
408
409 /* CSTORM */
410 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
411 CSTORM_ASSERT_LIST_INDEX_OFFSET);
412 if (last_idx)
413 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
414
415 /* print the asserts */
416 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
417
418 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
419 CSTORM_ASSERT_LIST_OFFSET(i));
420 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
421 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
422 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
423 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
424 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
425 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
426
427 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
428 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
429 " 0x%08x 0x%08x 0x%08x\n",
430 i, row3, row2, row1, row0);
431 rc++;
432 } else {
433 break;
434 }
435 }
436
437 /* USTORM */
438 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
439 USTORM_ASSERT_LIST_INDEX_OFFSET);
440 if (last_idx)
441 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
442
443 /* print the asserts */
444 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
445
446 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
447 USTORM_ASSERT_LIST_OFFSET(i));
448 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
449 USTORM_ASSERT_LIST_OFFSET(i) + 4);
450 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
451 USTORM_ASSERT_LIST_OFFSET(i) + 8);
452 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
453 USTORM_ASSERT_LIST_OFFSET(i) + 12);
454
455 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
456 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
457 " 0x%08x 0x%08x 0x%08x\n",
458 i, row3, row2, row1, row0);
459 rc++;
460 } else {
461 break;
a2fbb9ea
ET
462 }
463 }
34f80b04 464
a2fbb9ea
ET
465 return rc;
466}
c14423fe 467
a2fbb9ea
ET
468static void bnx2x_fw_dump(struct bnx2x *bp)
469{
470 u32 mark, offset;
471 u32 data[9];
472 int word;
473
474 mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
49d66772
ET
475 mark = ((mark + 0x3) & ~0x3);
476 printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n" KERN_ERR, mark);
a2fbb9ea
ET
477
478 for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
479 for (word = 0; word < 8; word++)
480 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
481 offset + 4*word));
482 data[8] = 0x0;
49d66772 483 printk(KERN_CONT "%s", (char *)data);
a2fbb9ea
ET
484 }
485 for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
486 for (word = 0; word < 8; word++)
487 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
488 offset + 4*word));
489 data[8] = 0x0;
49d66772 490 printk(KERN_CONT "%s", (char *)data);
a2fbb9ea
ET
491 }
492 printk("\n" KERN_ERR PFX "end of fw dump\n");
493}
494
495static void bnx2x_panic_dump(struct bnx2x *bp)
496{
497 int i;
498 u16 j, start, end;
499
66e855f3
YG
500 bp->stats_state = STATS_STATE_DISABLED;
501 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
502
a2fbb9ea
ET
503 BNX2X_ERR("begin crash dump -----------------\n");
504
505 for_each_queue(bp, i) {
506 struct bnx2x_fastpath *fp = &bp->fp[i];
507 struct eth_tx_db_data *hw_prods = fp->hw_tx_prods;
508
509 BNX2X_ERR("queue[%d]: tx_pkt_prod(%x) tx_pkt_cons(%x)"
34f80b04 510 " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)\n",
a2fbb9ea 511 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
34f80b04 512 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
66e855f3
YG
513 BNX2X_ERR(" rx_bd_prod(%x) rx_bd_cons(%x)"
514 " *rx_bd_cons_sb(%x) rx_comp_prod(%x)"
515 " rx_comp_cons(%x) *rx_cons_sb(%x)\n",
516 fp->rx_bd_prod, fp->rx_bd_cons,
517 le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
518 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
519 BNX2X_ERR(" rx_sge_prod(%x) last_max_sge(%x)"
520 " fp_c_idx(%x) *sb_c_idx(%x) fp_u_idx(%x)"
521 " *sb_u_idx(%x) bd data(%x,%x)\n",
522 fp->rx_sge_prod, fp->last_max_sge, fp->fp_c_idx,
523 fp->status_blk->c_status_block.status_block_index,
524 fp->fp_u_idx,
525 fp->status_blk->u_status_block.status_block_index,
526 hw_prods->packets_prod, hw_prods->bds_prod);
a2fbb9ea
ET
527
528 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
529 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
530 for (j = start; j < end; j++) {
531 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
532
533 BNX2X_ERR("packet[%x]=[%p,%x]\n", j,
534 sw_bd->skb, sw_bd->first_bd);
535 }
536
537 start = TX_BD(fp->tx_bd_cons - 10);
538 end = TX_BD(fp->tx_bd_cons + 254);
539 for (j = start; j < end; j++) {
540 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
541
542 BNX2X_ERR("tx_bd[%x]=[%x:%x:%x:%x]\n",
543 j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
544 }
545
546 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
547 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
548 for (j = start; j < end; j++) {
549 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
550 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
551
552 BNX2X_ERR("rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
34f80b04 553 j, rx_bd[1], rx_bd[0], sw_bd->skb);
a2fbb9ea
ET
554 }
555
3196a88a
EG
556 start = RX_SGE(fp->rx_sge_prod);
557 end = RX_SGE(fp->last_max_sge);
7a9b2557
VZ
558 for (j = start; j < end; j++) {
559 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
560 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
561
562 BNX2X_ERR("rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
563 j, rx_sge[1], rx_sge[0], sw_page->page);
564 }
565
a2fbb9ea
ET
566 start = RCQ_BD(fp->rx_comp_cons - 10);
567 end = RCQ_BD(fp->rx_comp_cons + 503);
568 for (j = start; j < end; j++) {
569 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
570
571 BNX2X_ERR("cqe[%x]=[%x:%x:%x:%x]\n",
572 j, cqe[0], cqe[1], cqe[2], cqe[3]);
573 }
574 }
575
49d66772
ET
576 BNX2X_ERR("def_c_idx(%u) def_u_idx(%u) def_x_idx(%u)"
577 " def_t_idx(%u) def_att_idx(%u) attn_state(%u)"
a2fbb9ea 578 " spq_prod_idx(%u)\n",
49d66772 579 bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
a2fbb9ea
ET
580 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
581
34f80b04 582 bnx2x_fw_dump(bp);
a2fbb9ea
ET
583 bnx2x_mc_assert(bp);
584 BNX2X_ERR("end crash dump -----------------\n");
a2fbb9ea
ET
585}
586
615f8fd9 587static void bnx2x_int_enable(struct bnx2x *bp)
a2fbb9ea 588{
34f80b04 589 int port = BP_PORT(bp);
a2fbb9ea
ET
590 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
591 u32 val = REG_RD(bp, addr);
592 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
8badd27a 593 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
a2fbb9ea
ET
594
595 if (msix) {
8badd27a
EG
596 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
597 HC_CONFIG_0_REG_INT_LINE_EN_0);
a2fbb9ea
ET
598 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
599 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
8badd27a
EG
600 } else if (msi) {
601 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
602 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
603 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
604 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
a2fbb9ea
ET
605 } else {
606 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
615f8fd9 607 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
a2fbb9ea
ET
608 HC_CONFIG_0_REG_INT_LINE_EN_0 |
609 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
615f8fd9 610
8badd27a
EG
611 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
612 val, port, addr);
615f8fd9
ET
613
614 REG_WR(bp, addr, val);
615
a2fbb9ea
ET
616 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
617 }
618
8badd27a
EG
619 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n",
620 val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
a2fbb9ea
ET
621
622 REG_WR(bp, addr, val);
34f80b04
EG
623
624 if (CHIP_IS_E1H(bp)) {
625 /* init leading/trailing edge */
626 if (IS_E1HMF(bp)) {
8badd27a 627 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
34f80b04
EG
628 if (bp->port.pmf)
629 /* enable nig attention */
630 val |= 0x0100;
631 } else
632 val = 0xffff;
633
634 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
635 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
636 }
a2fbb9ea
ET
637}
638
615f8fd9 639static void bnx2x_int_disable(struct bnx2x *bp)
a2fbb9ea 640{
34f80b04 641 int port = BP_PORT(bp);
a2fbb9ea
ET
642 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
643 u32 val = REG_RD(bp, addr);
644
645 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
646 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
647 HC_CONFIG_0_REG_INT_LINE_EN_0 |
648 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
649
650 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
651 val, port, addr);
652
8badd27a
EG
653 /* flush all outstanding writes */
654 mmiowb();
655
a2fbb9ea
ET
656 REG_WR(bp, addr, val);
657 if (REG_RD(bp, addr) != val)
658 BNX2X_ERR("BUG! proper val not read from IGU!\n");
659}
660
f8ef6e44 661static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
a2fbb9ea 662{
a2fbb9ea 663 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
8badd27a 664 int i, offset;
a2fbb9ea 665
34f80b04 666 /* disable interrupt handling */
a2fbb9ea 667 atomic_inc(&bp->intr_sem);
f8ef6e44
YG
668 if (disable_hw)
669 /* prevent the HW from sending interrupts */
670 bnx2x_int_disable(bp);
a2fbb9ea
ET
671
672 /* make sure all ISRs are done */
673 if (msix) {
8badd27a
EG
674 synchronize_irq(bp->msix_table[0].vector);
675 offset = 1;
a2fbb9ea 676 for_each_queue(bp, i)
8badd27a 677 synchronize_irq(bp->msix_table[i + offset].vector);
a2fbb9ea
ET
678 } else
679 synchronize_irq(bp->pdev->irq);
680
681 /* make sure sp_task is not running */
1cf167f2
EG
682 cancel_delayed_work(&bp->sp_task);
683 flush_workqueue(bnx2x_wq);
a2fbb9ea
ET
684}
685
34f80b04 686/* fast path */
a2fbb9ea
ET
687
688/*
34f80b04 689 * General service functions
a2fbb9ea
ET
690 */
691
34f80b04 692static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
a2fbb9ea
ET
693 u8 storm, u16 index, u8 op, u8 update)
694{
5c862848
EG
695 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
696 COMMAND_REG_INT_ACK);
a2fbb9ea
ET
697 struct igu_ack_register igu_ack;
698
699 igu_ack.status_block_index = index;
700 igu_ack.sb_id_and_flags =
34f80b04 701 ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
a2fbb9ea
ET
702 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
703 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
704 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
705
5c862848
EG
706 DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
707 (*(u32 *)&igu_ack), hc_addr);
708 REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
a2fbb9ea
ET
709}
710
711static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
712{
713 struct host_status_block *fpsb = fp->status_blk;
714 u16 rc = 0;
715
716 barrier(); /* status block is written to by the chip */
717 if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
718 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
719 rc |= 1;
720 }
721 if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
722 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
723 rc |= 2;
724 }
725 return rc;
726}
727
a2fbb9ea
ET
728static u16 bnx2x_ack_int(struct bnx2x *bp)
729{
5c862848
EG
730 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
731 COMMAND_REG_SIMD_MASK);
732 u32 result = REG_RD(bp, hc_addr);
a2fbb9ea 733
5c862848
EG
734 DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
735 result, hc_addr);
a2fbb9ea 736
a2fbb9ea
ET
737 return result;
738}
739
740
741/*
742 * fast path service functions
743 */
744
237907c1
EG
745static inline int bnx2x_has_tx_work(struct bnx2x_fastpath *fp)
746{
747 u16 tx_cons_sb;
748
749 /* Tell compiler that status block fields can change */
750 barrier();
751 tx_cons_sb = le16_to_cpu(*fp->tx_cons_sb);
e8b5fc51
VZ
752 return (fp->tx_pkt_cons != tx_cons_sb);
753}
754
755static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp)
756{
757 /* Tell compiler that consumer and producer can change */
758 barrier();
759 return (fp->tx_pkt_prod != fp->tx_pkt_cons);
760
237907c1
EG
761}
762
a2fbb9ea
ET
763/* free skb in the packet ring at pos idx
764 * return idx of last bd freed
765 */
766static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
767 u16 idx)
768{
769 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
770 struct eth_tx_bd *tx_bd;
771 struct sk_buff *skb = tx_buf->skb;
34f80b04 772 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
a2fbb9ea
ET
773 int nbd;
774
775 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
776 idx, tx_buf, skb);
777
778 /* unmap first bd */
779 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
780 tx_bd = &fp->tx_desc_ring[bd_idx];
781 pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_bd),
782 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
783
784 nbd = le16_to_cpu(tx_bd->nbd) - 1;
34f80b04 785 new_cons = nbd + tx_buf->first_bd;
a2fbb9ea
ET
786#ifdef BNX2X_STOP_ON_ERROR
787 if (nbd > (MAX_SKB_FRAGS + 2)) {
34f80b04 788 BNX2X_ERR("BAD nbd!\n");
a2fbb9ea
ET
789 bnx2x_panic();
790 }
791#endif
792
793 /* Skip a parse bd and the TSO split header bd
794 since they have no mapping */
795 if (nbd)
796 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
797
798 if (tx_bd->bd_flags.as_bitfield & (ETH_TX_BD_FLAGS_IP_CSUM |
799 ETH_TX_BD_FLAGS_TCP_CSUM |
800 ETH_TX_BD_FLAGS_SW_LSO)) {
801 if (--nbd)
802 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
803 tx_bd = &fp->tx_desc_ring[bd_idx];
804 /* is this a TSO split header bd? */
805 if (tx_bd->bd_flags.as_bitfield & ETH_TX_BD_FLAGS_SW_LSO) {
806 if (--nbd)
807 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
808 }
809 }
810
811 /* now free frags */
812 while (nbd > 0) {
813
814 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
815 tx_bd = &fp->tx_desc_ring[bd_idx];
816 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_bd),
817 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
818 if (--nbd)
819 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
820 }
821
822 /* release skb */
53e5e96e 823 WARN_ON(!skb);
a2fbb9ea
ET
824 dev_kfree_skb(skb);
825 tx_buf->first_bd = 0;
826 tx_buf->skb = NULL;
827
34f80b04 828 return new_cons;
a2fbb9ea
ET
829}
830
34f80b04 831static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
a2fbb9ea 832{
34f80b04
EG
833 s16 used;
834 u16 prod;
835 u16 cons;
a2fbb9ea 836
34f80b04 837 barrier(); /* Tell compiler that prod and cons can change */
a2fbb9ea
ET
838 prod = fp->tx_bd_prod;
839 cons = fp->tx_bd_cons;
840
34f80b04
EG
841 /* NUM_TX_RINGS = number of "next-page" entries
842 It will be used as a threshold */
843 used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
a2fbb9ea 844
34f80b04 845#ifdef BNX2X_STOP_ON_ERROR
53e5e96e
IJ
846 WARN_ON(used < 0);
847 WARN_ON(used > fp->bp->tx_ring_size);
848 WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
34f80b04 849#endif
a2fbb9ea 850
34f80b04 851 return (s16)(fp->bp->tx_ring_size) - used;
a2fbb9ea
ET
852}
853
854static void bnx2x_tx_int(struct bnx2x_fastpath *fp, int work)
855{
856 struct bnx2x *bp = fp->bp;
555f6c78 857 struct netdev_queue *txq;
a2fbb9ea
ET
858 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
859 int done = 0;
860
861#ifdef BNX2X_STOP_ON_ERROR
862 if (unlikely(bp->panic))
863 return;
864#endif
865
555f6c78 866 txq = netdev_get_tx_queue(bp->dev, fp->index);
a2fbb9ea
ET
867 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
868 sw_cons = fp->tx_pkt_cons;
869
870 while (sw_cons != hw_cons) {
871 u16 pkt_cons;
872
873 pkt_cons = TX_BD(sw_cons);
874
875 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
876
34f80b04 877 DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %u\n",
a2fbb9ea
ET
878 hw_cons, sw_cons, pkt_cons);
879
34f80b04 880/* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
a2fbb9ea
ET
881 rmb();
882 prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
883 }
884*/
885 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
886 sw_cons++;
887 done++;
888
889 if (done == work)
890 break;
891 }
892
893 fp->tx_pkt_cons = sw_cons;
894 fp->tx_bd_cons = bd_cons;
895
555f6c78
EG
896 /* Need to make the tx_bd_cons update visible to start_xmit()
897 * before checking for netif_tx_queue_stopped(). Without the
a2fbb9ea
ET
898 * memory barrier, there is a small possibility that start_xmit()
899 * will miss it and cause the queue to be stopped forever.
900 */
901 smp_mb();
902
903 /* TBD need a thresh? */
555f6c78 904 if (unlikely(netif_tx_queue_stopped(txq))) {
a2fbb9ea 905
555f6c78 906 __netif_tx_lock(txq, smp_processor_id());
a2fbb9ea 907
555f6c78 908 if ((netif_tx_queue_stopped(txq)) &&
da5a662a 909 (bp->state == BNX2X_STATE_OPEN) &&
a2fbb9ea 910 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
555f6c78 911 netif_tx_wake_queue(txq);
a2fbb9ea 912
555f6c78 913 __netif_tx_unlock(txq);
a2fbb9ea
ET
914 }
915}
916
3196a88a 917
a2fbb9ea
ET
918static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
919 union eth_rx_cqe *rr_cqe)
920{
921 struct bnx2x *bp = fp->bp;
922 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
923 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
924
34f80b04 925 DP(BNX2X_MSG_SP,
a2fbb9ea 926 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
34f80b04
EG
927 FP_IDX(fp), cid, command, bp->state,
928 rr_cqe->ramrod_cqe.ramrod_type);
a2fbb9ea
ET
929
930 bp->spq_left++;
931
34f80b04 932 if (FP_IDX(fp)) {
a2fbb9ea
ET
933 switch (command | fp->state) {
934 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
935 BNX2X_FP_STATE_OPENING):
936 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
937 cid);
938 fp->state = BNX2X_FP_STATE_OPEN;
939 break;
940
941 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
942 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
943 cid);
944 fp->state = BNX2X_FP_STATE_HALTED;
945 break;
946
947 default:
34f80b04
EG
948 BNX2X_ERR("unexpected MC reply (%d) "
949 "fp->state is %x\n", command, fp->state);
950 break;
a2fbb9ea 951 }
34f80b04 952 mb(); /* force bnx2x_wait_ramrod() to see the change */
a2fbb9ea
ET
953 return;
954 }
c14423fe 955
a2fbb9ea
ET
956 switch (command | bp->state) {
957 case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
958 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
959 bp->state = BNX2X_STATE_OPEN;
960 break;
961
962 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
963 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
964 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
965 fp->state = BNX2X_FP_STATE_HALTED;
966 break;
967
a2fbb9ea 968 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
34f80b04 969 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
49d66772 970 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
a2fbb9ea
ET
971 break;
972
3196a88a 973
a2fbb9ea 974 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
34f80b04 975 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
a2fbb9ea 976 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
bb2a0f7a 977 bp->set_mac_pending = 0;
a2fbb9ea
ET
978 break;
979
49d66772 980 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
34f80b04 981 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
49d66772
ET
982 break;
983
a2fbb9ea 984 default:
34f80b04 985 BNX2X_ERR("unexpected MC reply (%d) bp->state is %x\n",
a2fbb9ea 986 command, bp->state);
34f80b04 987 break;
a2fbb9ea 988 }
34f80b04 989 mb(); /* force bnx2x_wait_ramrod() to see the change */
a2fbb9ea
ET
990}
991
7a9b2557
VZ
992static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
993 struct bnx2x_fastpath *fp, u16 index)
994{
995 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
996 struct page *page = sw_buf->page;
997 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
998
999 /* Skip "next page" elements */
1000 if (!page)
1001 return;
1002
1003 pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
4f40f2cb 1004 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
1005 __free_pages(page, PAGES_PER_SGE_SHIFT);
1006
1007 sw_buf->page = NULL;
1008 sge->addr_hi = 0;
1009 sge->addr_lo = 0;
1010}
1011
1012static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
1013 struct bnx2x_fastpath *fp, int last)
1014{
1015 int i;
1016
1017 for (i = 0; i < last; i++)
1018 bnx2x_free_rx_sge(bp, fp, i);
1019}
1020
1021static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
1022 struct bnx2x_fastpath *fp, u16 index)
1023{
1024 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
1025 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1026 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1027 dma_addr_t mapping;
1028
1029 if (unlikely(page == NULL))
1030 return -ENOMEM;
1031
4f40f2cb 1032 mapping = pci_map_page(bp->pdev, page, 0, SGE_PAGE_SIZE*PAGES_PER_SGE,
7a9b2557 1033 PCI_DMA_FROMDEVICE);
8d8bb39b 1034 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
7a9b2557
VZ
1035 __free_pages(page, PAGES_PER_SGE_SHIFT);
1036 return -ENOMEM;
1037 }
1038
1039 sw_buf->page = page;
1040 pci_unmap_addr_set(sw_buf, mapping, mapping);
1041
1042 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1043 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1044
1045 return 0;
1046}
1047
a2fbb9ea
ET
1048static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1049 struct bnx2x_fastpath *fp, u16 index)
1050{
1051 struct sk_buff *skb;
1052 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1053 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1054 dma_addr_t mapping;
1055
1056 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1057 if (unlikely(skb == NULL))
1058 return -ENOMEM;
1059
437cf2f1 1060 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_size,
a2fbb9ea 1061 PCI_DMA_FROMDEVICE);
8d8bb39b 1062 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
a2fbb9ea
ET
1063 dev_kfree_skb(skb);
1064 return -ENOMEM;
1065 }
1066
1067 rx_buf->skb = skb;
1068 pci_unmap_addr_set(rx_buf, mapping, mapping);
1069
1070 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1071 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1072
1073 return 0;
1074}
1075
1076/* note that we are not allocating a new skb,
1077 * we are just moving one from cons to prod
1078 * we are not creating a new mapping,
1079 * so there is no need to check for dma_mapping_error().
1080 */
1081static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1082 struct sk_buff *skb, u16 cons, u16 prod)
1083{
1084 struct bnx2x *bp = fp->bp;
1085 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1086 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1087 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1088 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1089
1090 pci_dma_sync_single_for_device(bp->pdev,
1091 pci_unmap_addr(cons_rx_buf, mapping),
1092 bp->rx_offset + RX_COPY_THRESH,
1093 PCI_DMA_FROMDEVICE);
1094
1095 prod_rx_buf->skb = cons_rx_buf->skb;
1096 pci_unmap_addr_set(prod_rx_buf, mapping,
1097 pci_unmap_addr(cons_rx_buf, mapping));
1098 *prod_bd = *cons_bd;
1099}
1100
7a9b2557
VZ
1101static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1102 u16 idx)
1103{
1104 u16 last_max = fp->last_max_sge;
1105
1106 if (SUB_S16(idx, last_max) > 0)
1107 fp->last_max_sge = idx;
1108}
1109
1110static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1111{
1112 int i, j;
1113
1114 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1115 int idx = RX_SGE_CNT * i - 1;
1116
1117 for (j = 0; j < 2; j++) {
1118 SGE_MASK_CLEAR_BIT(fp, idx);
1119 idx--;
1120 }
1121 }
1122}
1123
1124static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1125 struct eth_fast_path_rx_cqe *fp_cqe)
1126{
1127 struct bnx2x *bp = fp->bp;
4f40f2cb 1128 u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
7a9b2557 1129 le16_to_cpu(fp_cqe->len_on_bd)) >>
4f40f2cb 1130 SGE_PAGE_SHIFT;
7a9b2557
VZ
1131 u16 last_max, last_elem, first_elem;
1132 u16 delta = 0;
1133 u16 i;
1134
1135 if (!sge_len)
1136 return;
1137
1138 /* First mark all used pages */
1139 for (i = 0; i < sge_len; i++)
1140 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1141
1142 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1143 sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1144
1145 /* Here we assume that the last SGE index is the biggest */
1146 prefetch((void *)(fp->sge_mask));
1147 bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1148
1149 last_max = RX_SGE(fp->last_max_sge);
1150 last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1151 first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1152
1153 /* If ring is not full */
1154 if (last_elem + 1 != first_elem)
1155 last_elem++;
1156
1157 /* Now update the prod */
1158 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1159 if (likely(fp->sge_mask[i]))
1160 break;
1161
1162 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1163 delta += RX_SGE_MASK_ELEM_SZ;
1164 }
1165
1166 if (delta > 0) {
1167 fp->rx_sge_prod += delta;
1168 /* clear page-end entries */
1169 bnx2x_clear_sge_mask_next_elems(fp);
1170 }
1171
1172 DP(NETIF_MSG_RX_STATUS,
1173 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
1174 fp->last_max_sge, fp->rx_sge_prod);
1175}
1176
1177static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1178{
1179 /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1180 memset(fp->sge_mask, 0xff,
1181 (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1182
33471629
EG
1183 /* Clear the two last indices in the page to 1:
1184 these are the indices that correspond to the "next" element,
7a9b2557
VZ
1185 hence will never be indicated and should be removed from
1186 the calculations. */
1187 bnx2x_clear_sge_mask_next_elems(fp);
1188}
1189
1190static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1191 struct sk_buff *skb, u16 cons, u16 prod)
1192{
1193 struct bnx2x *bp = fp->bp;
1194 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1195 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1196 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1197 dma_addr_t mapping;
1198
1199 /* move empty skb from pool to prod and map it */
1200 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1201 mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
437cf2f1 1202 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
1203 pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1204
1205 /* move partial skb from cons to pool (don't unmap yet) */
1206 fp->tpa_pool[queue] = *cons_rx_buf;
1207
1208 /* mark bin state as start - print error if current state != stop */
1209 if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1210 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1211
1212 fp->tpa_state[queue] = BNX2X_TPA_START;
1213
1214 /* point prod_bd to new skb */
1215 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1216 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1217
1218#ifdef BNX2X_STOP_ON_ERROR
1219 fp->tpa_queue_used |= (1 << queue);
1220#ifdef __powerpc64__
1221 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1222#else
1223 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1224#endif
1225 fp->tpa_queue_used);
1226#endif
1227}
1228
1229static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1230 struct sk_buff *skb,
1231 struct eth_fast_path_rx_cqe *fp_cqe,
1232 u16 cqe_idx)
1233{
1234 struct sw_rx_page *rx_pg, old_rx_pg;
7a9b2557
VZ
1235 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1236 u32 i, frag_len, frag_size, pages;
1237 int err;
1238 int j;
1239
1240 frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
4f40f2cb 1241 pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
7a9b2557
VZ
1242
1243 /* This is needed in order to enable forwarding support */
1244 if (frag_size)
4f40f2cb 1245 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
7a9b2557
VZ
1246 max(frag_size, (u32)len_on_bd));
1247
1248#ifdef BNX2X_STOP_ON_ERROR
4f40f2cb
EG
1249 if (pages >
1250 min((u32)8, (u32)MAX_SKB_FRAGS) * SGE_PAGE_SIZE * PAGES_PER_SGE) {
7a9b2557
VZ
1251 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1252 pages, cqe_idx);
1253 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
1254 fp_cqe->pkt_len, len_on_bd);
1255 bnx2x_panic();
1256 return -EINVAL;
1257 }
1258#endif
1259
1260 /* Run through the SGL and compose the fragmented skb */
1261 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1262 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1263
1264 /* FW gives the indices of the SGE as if the ring is an array
1265 (meaning that "next" element will consume 2 indices) */
4f40f2cb 1266 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
7a9b2557 1267 rx_pg = &fp->rx_page_ring[sge_idx];
7a9b2557
VZ
1268 old_rx_pg = *rx_pg;
1269
1270 /* If we fail to allocate a substitute page, we simply stop
1271 where we are and drop the whole packet */
1272 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1273 if (unlikely(err)) {
66e855f3 1274 bp->eth_stats.rx_skb_alloc_failed++;
7a9b2557
VZ
1275 return err;
1276 }
1277
1278 /* Unmap the page as we r going to pass it to the stack */
1279 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
4f40f2cb 1280 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
1281
1282 /* Add one frag and update the appropriate fields in the skb */
1283 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1284
1285 skb->data_len += frag_len;
1286 skb->truesize += frag_len;
1287 skb->len += frag_len;
1288
1289 frag_size -= frag_len;
1290 }
1291
1292 return 0;
1293}
1294
1295static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1296 u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1297 u16 cqe_idx)
1298{
1299 struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1300 struct sk_buff *skb = rx_buf->skb;
1301 /* alloc new skb */
1302 struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1303
1304 /* Unmap skb in the pool anyway, as we are going to change
1305 pool entry status to BNX2X_TPA_STOP even if new skb allocation
1306 fails. */
1307 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
437cf2f1 1308 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
7a9b2557 1309
7a9b2557 1310 if (likely(new_skb)) {
66e855f3
YG
1311 /* fix ip xsum and give it to the stack */
1312 /* (no need to map the new skb) */
0c6671b0
EG
1313#ifdef BCM_VLAN
1314 int is_vlan_cqe =
1315 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1316 PARSING_FLAGS_VLAN);
1317 int is_not_hwaccel_vlan_cqe =
1318 (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
1319#endif
7a9b2557
VZ
1320
1321 prefetch(skb);
1322 prefetch(((char *)(skb)) + 128);
1323
7a9b2557
VZ
1324#ifdef BNX2X_STOP_ON_ERROR
1325 if (pad + len > bp->rx_buf_size) {
1326 BNX2X_ERR("skb_put is about to fail... "
1327 "pad %d len %d rx_buf_size %d\n",
1328 pad, len, bp->rx_buf_size);
1329 bnx2x_panic();
1330 return;
1331 }
1332#endif
1333
1334 skb_reserve(skb, pad);
1335 skb_put(skb, len);
1336
1337 skb->protocol = eth_type_trans(skb, bp->dev);
1338 skb->ip_summed = CHECKSUM_UNNECESSARY;
0c8dfc83 1339 skb_record_rx_queue(skb, queue);
7a9b2557
VZ
1340
1341 {
1342 struct iphdr *iph;
1343
1344 iph = (struct iphdr *)skb->data;
0c6671b0
EG
1345#ifdef BCM_VLAN
1346 /* If there is no Rx VLAN offloading -
1347 take VLAN tag into an account */
1348 if (unlikely(is_not_hwaccel_vlan_cqe))
1349 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
1350#endif
7a9b2557
VZ
1351 iph->check = 0;
1352 iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1353 }
1354
1355 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1356 &cqe->fast_path_cqe, cqe_idx)) {
1357#ifdef BCM_VLAN
0c6671b0
EG
1358 if ((bp->vlgrp != NULL) && is_vlan_cqe &&
1359 (!is_not_hwaccel_vlan_cqe))
7a9b2557
VZ
1360 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1361 le16_to_cpu(cqe->fast_path_cqe.
1362 vlan_tag));
1363 else
1364#endif
1365 netif_receive_skb(skb);
1366 } else {
1367 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1368 " - dropping packet!\n");
1369 dev_kfree_skb(skb);
1370 }
1371
7a9b2557
VZ
1372
1373 /* put new skb in bin */
1374 fp->tpa_pool[queue].skb = new_skb;
1375
1376 } else {
66e855f3 1377 /* else drop the packet and keep the buffer in the bin */
7a9b2557
VZ
1378 DP(NETIF_MSG_RX_STATUS,
1379 "Failed to allocate new skb - dropping packet!\n");
66e855f3 1380 bp->eth_stats.rx_skb_alloc_failed++;
7a9b2557
VZ
1381 }
1382
1383 fp->tpa_state[queue] = BNX2X_TPA_STOP;
1384}
1385
1386static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1387 struct bnx2x_fastpath *fp,
1388 u16 bd_prod, u16 rx_comp_prod,
1389 u16 rx_sge_prod)
1390{
8d9c5f34 1391 struct ustorm_eth_rx_producers rx_prods = {0};
7a9b2557
VZ
1392 int i;
1393
1394 /* Update producers */
1395 rx_prods.bd_prod = bd_prod;
1396 rx_prods.cqe_prod = rx_comp_prod;
1397 rx_prods.sge_prod = rx_sge_prod;
1398
58f4c4cf
EG
1399 /*
1400 * Make sure that the BD and SGE data is updated before updating the
1401 * producers since FW might read the BD/SGE right after the producer
1402 * is updated.
1403 * This is only applicable for weak-ordered memory model archs such
1404 * as IA-64. The following barrier is also mandatory since FW will
1405 * assumes BDs must have buffers.
1406 */
1407 wmb();
1408
8d9c5f34
EG
1409 for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++)
1410 REG_WR(bp, BAR_USTRORM_INTMEM +
1411 USTORM_RX_PRODS_OFFSET(BP_PORT(bp), FP_CL_ID(fp)) + i*4,
7a9b2557
VZ
1412 ((u32 *)&rx_prods)[i]);
1413
58f4c4cf
EG
1414 mmiowb(); /* keep prod updates ordered */
1415
7a9b2557 1416 DP(NETIF_MSG_RX_STATUS,
555f6c78
EG
1417 "queue[%d]: wrote bd_prod %u cqe_prod %u sge_prod %u\n",
1418 fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
7a9b2557
VZ
1419}
1420
a2fbb9ea
ET
1421static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1422{
1423 struct bnx2x *bp = fp->bp;
34f80b04 1424 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
a2fbb9ea
ET
1425 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1426 int rx_pkt = 0;
1427
1428#ifdef BNX2X_STOP_ON_ERROR
1429 if (unlikely(bp->panic))
1430 return 0;
1431#endif
1432
34f80b04
EG
1433 /* CQ "next element" is of the size of the regular element,
1434 that's why it's ok here */
a2fbb9ea
ET
1435 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1436 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1437 hw_comp_cons++;
1438
1439 bd_cons = fp->rx_bd_cons;
1440 bd_prod = fp->rx_bd_prod;
34f80b04 1441 bd_prod_fw = bd_prod;
a2fbb9ea
ET
1442 sw_comp_cons = fp->rx_comp_cons;
1443 sw_comp_prod = fp->rx_comp_prod;
1444
1445 /* Memory barrier necessary as speculative reads of the rx
1446 * buffer can be ahead of the index in the status block
1447 */
1448 rmb();
1449
1450 DP(NETIF_MSG_RX_STATUS,
1451 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
34f80b04 1452 FP_IDX(fp), hw_comp_cons, sw_comp_cons);
a2fbb9ea
ET
1453
1454 while (sw_comp_cons != hw_comp_cons) {
34f80b04 1455 struct sw_rx_bd *rx_buf = NULL;
a2fbb9ea
ET
1456 struct sk_buff *skb;
1457 union eth_rx_cqe *cqe;
34f80b04
EG
1458 u8 cqe_fp_flags;
1459 u16 len, pad;
a2fbb9ea
ET
1460
1461 comp_ring_cons = RCQ_BD(sw_comp_cons);
1462 bd_prod = RX_BD(bd_prod);
1463 bd_cons = RX_BD(bd_cons);
1464
1465 cqe = &fp->rx_comp_ring[comp_ring_cons];
34f80b04 1466 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
a2fbb9ea 1467
a2fbb9ea 1468 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
34f80b04
EG
1469 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
1470 cqe_fp_flags, cqe->fast_path_cqe.status_flags,
68d59484 1471 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
34f80b04
EG
1472 le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1473 le16_to_cpu(cqe->fast_path_cqe.pkt_len));
a2fbb9ea
ET
1474
1475 /* is this a slowpath msg? */
34f80b04 1476 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
a2fbb9ea
ET
1477 bnx2x_sp_event(fp, cqe);
1478 goto next_cqe;
1479
1480 /* this is an rx packet */
1481 } else {
1482 rx_buf = &fp->rx_buf_ring[bd_cons];
1483 skb = rx_buf->skb;
a2fbb9ea
ET
1484 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1485 pad = cqe->fast_path_cqe.placement_offset;
1486
7a9b2557
VZ
1487 /* If CQE is marked both TPA_START and TPA_END
1488 it is a non-TPA CQE */
1489 if ((!fp->disable_tpa) &&
1490 (TPA_TYPE(cqe_fp_flags) !=
1491 (TPA_TYPE_START | TPA_TYPE_END))) {
3196a88a 1492 u16 queue = cqe->fast_path_cqe.queue_index;
7a9b2557
VZ
1493
1494 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1495 DP(NETIF_MSG_RX_STATUS,
1496 "calling tpa_start on queue %d\n",
1497 queue);
1498
1499 bnx2x_tpa_start(fp, queue, skb,
1500 bd_cons, bd_prod);
1501 goto next_rx;
1502 }
1503
1504 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1505 DP(NETIF_MSG_RX_STATUS,
1506 "calling tpa_stop on queue %d\n",
1507 queue);
1508
1509 if (!BNX2X_RX_SUM_FIX(cqe))
1510 BNX2X_ERR("STOP on none TCP "
1511 "data\n");
1512
1513 /* This is a size of the linear data
1514 on this skb */
1515 len = le16_to_cpu(cqe->fast_path_cqe.
1516 len_on_bd);
1517 bnx2x_tpa_stop(bp, fp, queue, pad,
1518 len, cqe, comp_ring_cons);
1519#ifdef BNX2X_STOP_ON_ERROR
1520 if (bp->panic)
1521 return -EINVAL;
1522#endif
1523
1524 bnx2x_update_sge_prod(fp,
1525 &cqe->fast_path_cqe);
1526 goto next_cqe;
1527 }
1528 }
1529
a2fbb9ea
ET
1530 pci_dma_sync_single_for_device(bp->pdev,
1531 pci_unmap_addr(rx_buf, mapping),
1532 pad + RX_COPY_THRESH,
1533 PCI_DMA_FROMDEVICE);
1534 prefetch(skb);
1535 prefetch(((char *)(skb)) + 128);
1536
1537 /* is this an error packet? */
34f80b04 1538 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
a2fbb9ea 1539 DP(NETIF_MSG_RX_ERR,
34f80b04
EG
1540 "ERROR flags %x rx packet %u\n",
1541 cqe_fp_flags, sw_comp_cons);
66e855f3 1542 bp->eth_stats.rx_err_discard_pkt++;
a2fbb9ea
ET
1543 goto reuse_rx;
1544 }
1545
1546 /* Since we don't have a jumbo ring
1547 * copy small packets if mtu > 1500
1548 */
1549 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1550 (len <= RX_COPY_THRESH)) {
1551 struct sk_buff *new_skb;
1552
1553 new_skb = netdev_alloc_skb(bp->dev,
1554 len + pad);
1555 if (new_skb == NULL) {
1556 DP(NETIF_MSG_RX_ERR,
34f80b04 1557 "ERROR packet dropped "
a2fbb9ea 1558 "because of alloc failure\n");
66e855f3 1559 bp->eth_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
1560 goto reuse_rx;
1561 }
1562
1563 /* aligned copy */
1564 skb_copy_from_linear_data_offset(skb, pad,
1565 new_skb->data + pad, len);
1566 skb_reserve(new_skb, pad);
1567 skb_put(new_skb, len);
1568
1569 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1570
1571 skb = new_skb;
1572
1573 } else if (bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0) {
1574 pci_unmap_single(bp->pdev,
1575 pci_unmap_addr(rx_buf, mapping),
437cf2f1 1576 bp->rx_buf_size,
a2fbb9ea
ET
1577 PCI_DMA_FROMDEVICE);
1578 skb_reserve(skb, pad);
1579 skb_put(skb, len);
1580
1581 } else {
1582 DP(NETIF_MSG_RX_ERR,
34f80b04 1583 "ERROR packet dropped because "
a2fbb9ea 1584 "of alloc failure\n");
66e855f3 1585 bp->eth_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
1586reuse_rx:
1587 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1588 goto next_rx;
1589 }
1590
1591 skb->protocol = eth_type_trans(skb, bp->dev);
1592
1593 skb->ip_summed = CHECKSUM_NONE;
66e855f3 1594 if (bp->rx_csum) {
1adcd8be
EG
1595 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1596 skb->ip_summed = CHECKSUM_UNNECESSARY;
66e855f3
YG
1597 else
1598 bp->eth_stats.hw_csum_err++;
1599 }
a2fbb9ea
ET
1600 }
1601
1602#ifdef BCM_VLAN
0c6671b0 1603 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
34f80b04
EG
1604 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1605 PARSING_FLAGS_VLAN))
a2fbb9ea
ET
1606 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1607 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1608 else
1609#endif
34f80b04 1610 netif_receive_skb(skb);
a2fbb9ea 1611
a2fbb9ea
ET
1612
1613next_rx:
1614 rx_buf->skb = NULL;
1615
1616 bd_cons = NEXT_RX_IDX(bd_cons);
1617 bd_prod = NEXT_RX_IDX(bd_prod);
34f80b04
EG
1618 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1619 rx_pkt++;
a2fbb9ea
ET
1620next_cqe:
1621 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1622 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
a2fbb9ea 1623
34f80b04 1624 if (rx_pkt == budget)
a2fbb9ea
ET
1625 break;
1626 } /* while */
1627
1628 fp->rx_bd_cons = bd_cons;
34f80b04 1629 fp->rx_bd_prod = bd_prod_fw;
a2fbb9ea
ET
1630 fp->rx_comp_cons = sw_comp_cons;
1631 fp->rx_comp_prod = sw_comp_prod;
1632
7a9b2557
VZ
1633 /* Update producers */
1634 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1635 fp->rx_sge_prod);
a2fbb9ea
ET
1636
1637 fp->rx_pkt += rx_pkt;
1638 fp->rx_calls++;
1639
1640 return rx_pkt;
1641}
1642
1643static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1644{
1645 struct bnx2x_fastpath *fp = fp_cookie;
1646 struct bnx2x *bp = fp->bp;
34f80b04 1647 int index = FP_IDX(fp);
a2fbb9ea 1648
da5a662a
VZ
1649 /* Return here if interrupt is disabled */
1650 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1651 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1652 return IRQ_HANDLED;
1653 }
1654
34f80b04
EG
1655 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
1656 index, FP_SB_ID(fp));
1657 bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID, 0, IGU_INT_DISABLE, 0);
a2fbb9ea
ET
1658
1659#ifdef BNX2X_STOP_ON_ERROR
1660 if (unlikely(bp->panic))
1661 return IRQ_HANDLED;
1662#endif
1663
1664 prefetch(fp->rx_cons_sb);
1665 prefetch(fp->tx_cons_sb);
1666 prefetch(&fp->status_blk->c_status_block.status_block_index);
1667 prefetch(&fp->status_blk->u_status_block.status_block_index);
1668
288379f0 1669 napi_schedule(&bnx2x_fp(bp, index, napi));
34f80b04 1670
a2fbb9ea
ET
1671 return IRQ_HANDLED;
1672}
1673
1674static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1675{
555f6c78 1676 struct bnx2x *bp = netdev_priv(dev_instance);
a2fbb9ea 1677 u16 status = bnx2x_ack_int(bp);
34f80b04 1678 u16 mask;
a2fbb9ea 1679
34f80b04 1680 /* Return here if interrupt is shared and it's not for us */
a2fbb9ea
ET
1681 if (unlikely(status == 0)) {
1682 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1683 return IRQ_NONE;
1684 }
34f80b04 1685 DP(NETIF_MSG_INTR, "got an interrupt status %u\n", status);
a2fbb9ea 1686
34f80b04 1687 /* Return here if interrupt is disabled */
a2fbb9ea
ET
1688 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1689 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1690 return IRQ_HANDLED;
1691 }
1692
3196a88a
EG
1693#ifdef BNX2X_STOP_ON_ERROR
1694 if (unlikely(bp->panic))
1695 return IRQ_HANDLED;
1696#endif
1697
34f80b04
EG
1698 mask = 0x2 << bp->fp[0].sb_id;
1699 if (status & mask) {
a2fbb9ea
ET
1700 struct bnx2x_fastpath *fp = &bp->fp[0];
1701
1702 prefetch(fp->rx_cons_sb);
1703 prefetch(fp->tx_cons_sb);
1704 prefetch(&fp->status_blk->c_status_block.status_block_index);
1705 prefetch(&fp->status_blk->u_status_block.status_block_index);
1706
288379f0 1707 napi_schedule(&bnx2x_fp(bp, 0, napi));
a2fbb9ea 1708
34f80b04 1709 status &= ~mask;
a2fbb9ea
ET
1710 }
1711
a2fbb9ea 1712
34f80b04 1713 if (unlikely(status & 0x1)) {
1cf167f2 1714 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
a2fbb9ea
ET
1715
1716 status &= ~0x1;
1717 if (!status)
1718 return IRQ_HANDLED;
1719 }
1720
34f80b04
EG
1721 if (status)
1722 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1723 status);
a2fbb9ea 1724
c18487ee 1725 return IRQ_HANDLED;
a2fbb9ea
ET
1726}
1727
c18487ee 1728/* end of fast path */
a2fbb9ea 1729
bb2a0f7a 1730static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
a2fbb9ea 1731
c18487ee
YR
1732/* Link */
1733
1734/*
1735 * General service functions
1736 */
a2fbb9ea 1737
4a37fb66 1738static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
c18487ee
YR
1739{
1740 u32 lock_status;
1741 u32 resource_bit = (1 << resource);
4a37fb66
YG
1742 int func = BP_FUNC(bp);
1743 u32 hw_lock_control_reg;
c18487ee 1744 int cnt;
a2fbb9ea 1745
c18487ee
YR
1746 /* Validating that the resource is within range */
1747 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1748 DP(NETIF_MSG_HW,
1749 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1750 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1751 return -EINVAL;
1752 }
a2fbb9ea 1753
4a37fb66
YG
1754 if (func <= 5) {
1755 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1756 } else {
1757 hw_lock_control_reg =
1758 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1759 }
1760
c18487ee 1761 /* Validating that the resource is not already taken */
4a37fb66 1762 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1763 if (lock_status & resource_bit) {
1764 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1765 lock_status, resource_bit);
1766 return -EEXIST;
1767 }
a2fbb9ea 1768
46230476
EG
1769 /* Try for 5 second every 5ms */
1770 for (cnt = 0; cnt < 1000; cnt++) {
c18487ee 1771 /* Try to acquire the lock */
4a37fb66
YG
1772 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1773 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1774 if (lock_status & resource_bit)
1775 return 0;
a2fbb9ea 1776
c18487ee 1777 msleep(5);
a2fbb9ea 1778 }
c18487ee
YR
1779 DP(NETIF_MSG_HW, "Timeout\n");
1780 return -EAGAIN;
1781}
a2fbb9ea 1782
4a37fb66 1783static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
c18487ee
YR
1784{
1785 u32 lock_status;
1786 u32 resource_bit = (1 << resource);
4a37fb66
YG
1787 int func = BP_FUNC(bp);
1788 u32 hw_lock_control_reg;
a2fbb9ea 1789
c18487ee
YR
1790 /* Validating that the resource is within range */
1791 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1792 DP(NETIF_MSG_HW,
1793 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1794 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1795 return -EINVAL;
1796 }
1797
4a37fb66
YG
1798 if (func <= 5) {
1799 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1800 } else {
1801 hw_lock_control_reg =
1802 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1803 }
1804
c18487ee 1805 /* Validating that the resource is currently taken */
4a37fb66 1806 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1807 if (!(lock_status & resource_bit)) {
1808 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1809 lock_status, resource_bit);
1810 return -EFAULT;
a2fbb9ea
ET
1811 }
1812
4a37fb66 1813 REG_WR(bp, hw_lock_control_reg, resource_bit);
c18487ee
YR
1814 return 0;
1815}
1816
1817/* HW Lock for shared dual port PHYs */
4a37fb66 1818static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
c18487ee
YR
1819{
1820 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea 1821
34f80b04 1822 mutex_lock(&bp->port.phy_mutex);
a2fbb9ea 1823
c18487ee
YR
1824 if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) ||
1825 (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))
4a37fb66 1826 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
c18487ee 1827}
a2fbb9ea 1828
4a37fb66 1829static void bnx2x_release_phy_lock(struct bnx2x *bp)
c18487ee
YR
1830{
1831 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea 1832
c18487ee
YR
1833 if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) ||
1834 (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))
4a37fb66 1835 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
a2fbb9ea 1836
34f80b04 1837 mutex_unlock(&bp->port.phy_mutex);
c18487ee 1838}
a2fbb9ea 1839
17de50b7 1840int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
c18487ee
YR
1841{
1842 /* The GPIO should be swapped if swap register is set and active */
1843 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
17de50b7 1844 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
c18487ee
YR
1845 int gpio_shift = gpio_num +
1846 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1847 u32 gpio_mask = (1 << gpio_shift);
1848 u32 gpio_reg;
a2fbb9ea 1849
c18487ee
YR
1850 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1851 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1852 return -EINVAL;
1853 }
a2fbb9ea 1854
4a37fb66 1855 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
c18487ee
YR
1856 /* read GPIO and mask except the float bits */
1857 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
a2fbb9ea 1858
c18487ee
YR
1859 switch (mode) {
1860 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1861 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1862 gpio_num, gpio_shift);
1863 /* clear FLOAT and set CLR */
1864 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1865 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1866 break;
a2fbb9ea 1867
c18487ee
YR
1868 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1869 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1870 gpio_num, gpio_shift);
1871 /* clear FLOAT and set SET */
1872 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1873 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1874 break;
a2fbb9ea 1875
17de50b7 1876 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
c18487ee
YR
1877 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1878 gpio_num, gpio_shift);
1879 /* set FLOAT */
1880 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1881 break;
a2fbb9ea 1882
c18487ee
YR
1883 default:
1884 break;
a2fbb9ea
ET
1885 }
1886
c18487ee 1887 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
4a37fb66 1888 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
f1410647 1889
c18487ee 1890 return 0;
a2fbb9ea
ET
1891}
1892
c18487ee 1893static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
a2fbb9ea 1894{
c18487ee
YR
1895 u32 spio_mask = (1 << spio_num);
1896 u32 spio_reg;
a2fbb9ea 1897
c18487ee
YR
1898 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
1899 (spio_num > MISC_REGISTERS_SPIO_7)) {
1900 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
1901 return -EINVAL;
a2fbb9ea
ET
1902 }
1903
4a37fb66 1904 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
c18487ee
YR
1905 /* read SPIO and mask except the float bits */
1906 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
a2fbb9ea 1907
c18487ee 1908 switch (mode) {
6378c025 1909 case MISC_REGISTERS_SPIO_OUTPUT_LOW:
c18487ee
YR
1910 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
1911 /* clear FLOAT and set CLR */
1912 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1913 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
1914 break;
a2fbb9ea 1915
6378c025 1916 case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
c18487ee
YR
1917 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
1918 /* clear FLOAT and set SET */
1919 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1920 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
1921 break;
a2fbb9ea 1922
c18487ee
YR
1923 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
1924 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
1925 /* set FLOAT */
1926 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1927 break;
a2fbb9ea 1928
c18487ee
YR
1929 default:
1930 break;
a2fbb9ea
ET
1931 }
1932
c18487ee 1933 REG_WR(bp, MISC_REG_SPIO, spio_reg);
4a37fb66 1934 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
c18487ee 1935
a2fbb9ea
ET
1936 return 0;
1937}
1938
c18487ee 1939static void bnx2x_calc_fc_adv(struct bnx2x *bp)
a2fbb9ea 1940{
ad33ea3a
EG
1941 switch (bp->link_vars.ieee_fc &
1942 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
c18487ee 1943 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
34f80b04 1944 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
c18487ee
YR
1945 ADVERTISED_Pause);
1946 break;
1947 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
34f80b04 1948 bp->port.advertising |= (ADVERTISED_Asym_Pause |
c18487ee
YR
1949 ADVERTISED_Pause);
1950 break;
1951 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
34f80b04 1952 bp->port.advertising |= ADVERTISED_Asym_Pause;
c18487ee
YR
1953 break;
1954 default:
34f80b04 1955 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
c18487ee
YR
1956 ADVERTISED_Pause);
1957 break;
1958 }
1959}
f1410647 1960
c18487ee
YR
1961static void bnx2x_link_report(struct bnx2x *bp)
1962{
1963 if (bp->link_vars.link_up) {
1964 if (bp->state == BNX2X_STATE_OPEN)
1965 netif_carrier_on(bp->dev);
1966 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
f1410647 1967
c18487ee 1968 printk("%d Mbps ", bp->link_vars.line_speed);
f1410647 1969
c18487ee
YR
1970 if (bp->link_vars.duplex == DUPLEX_FULL)
1971 printk("full duplex");
1972 else
1973 printk("half duplex");
f1410647 1974
c0700f90
DM
1975 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
1976 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
c18487ee 1977 printk(", receive ");
c0700f90 1978 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
c18487ee
YR
1979 printk("& transmit ");
1980 } else {
1981 printk(", transmit ");
1982 }
1983 printk("flow control ON");
1984 }
1985 printk("\n");
f1410647 1986
c18487ee
YR
1987 } else { /* link_down */
1988 netif_carrier_off(bp->dev);
1989 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
f1410647 1990 }
c18487ee
YR
1991}
1992
1993static u8 bnx2x_initial_phy_init(struct bnx2x *bp)
1994{
19680c48
EG
1995 if (!BP_NOMCP(bp)) {
1996 u8 rc;
a2fbb9ea 1997
19680c48 1998 /* Initialize link parameters structure variables */
8c99e7b0
YR
1999 /* It is recommended to turn off RX FC for jumbo frames
2000 for better performance */
2001 if (IS_E1HMF(bp))
c0700f90 2002 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
8c99e7b0 2003 else if (bp->dev->mtu > 5000)
c0700f90 2004 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
8c99e7b0 2005 else
c0700f90 2006 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
a2fbb9ea 2007
4a37fb66 2008 bnx2x_acquire_phy_lock(bp);
19680c48 2009 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
4a37fb66 2010 bnx2x_release_phy_lock(bp);
a2fbb9ea 2011
3c96c68b
EG
2012 bnx2x_calc_fc_adv(bp);
2013
19680c48
EG
2014 if (bp->link_vars.link_up)
2015 bnx2x_link_report(bp);
a2fbb9ea 2016
34f80b04 2017
19680c48
EG
2018 return rc;
2019 }
2020 BNX2X_ERR("Bootcode is missing -not initializing link\n");
2021 return -EINVAL;
a2fbb9ea
ET
2022}
2023
c18487ee 2024static void bnx2x_link_set(struct bnx2x *bp)
a2fbb9ea 2025{
19680c48 2026 if (!BP_NOMCP(bp)) {
4a37fb66 2027 bnx2x_acquire_phy_lock(bp);
19680c48 2028 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
4a37fb66 2029 bnx2x_release_phy_lock(bp);
a2fbb9ea 2030
19680c48
EG
2031 bnx2x_calc_fc_adv(bp);
2032 } else
2033 BNX2X_ERR("Bootcode is missing -not setting link\n");
c18487ee 2034}
a2fbb9ea 2035
c18487ee
YR
2036static void bnx2x__link_reset(struct bnx2x *bp)
2037{
19680c48 2038 if (!BP_NOMCP(bp)) {
4a37fb66 2039 bnx2x_acquire_phy_lock(bp);
19680c48 2040 bnx2x_link_reset(&bp->link_params, &bp->link_vars);
4a37fb66 2041 bnx2x_release_phy_lock(bp);
19680c48
EG
2042 } else
2043 BNX2X_ERR("Bootcode is missing -not resetting link\n");
c18487ee 2044}
a2fbb9ea 2045
c18487ee
YR
2046static u8 bnx2x_link_test(struct bnx2x *bp)
2047{
2048 u8 rc;
a2fbb9ea 2049
4a37fb66 2050 bnx2x_acquire_phy_lock(bp);
c18487ee 2051 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
4a37fb66 2052 bnx2x_release_phy_lock(bp);
a2fbb9ea 2053
c18487ee
YR
2054 return rc;
2055}
a2fbb9ea 2056
34f80b04
EG
2057/* Calculates the sum of vn_min_rates.
2058 It's needed for further normalizing of the min_rates.
2059
2060 Returns:
2061 sum of vn_min_rates
2062 or
2063 0 - if all the min_rates are 0.
33471629 2064 In the later case fairness algorithm should be deactivated.
34f80b04
EG
2065 If not all min_rates are zero then those that are zeroes will
2066 be set to 1.
2067 */
2068static u32 bnx2x_calc_vn_wsum(struct bnx2x *bp)
2069{
2070 int i, port = BP_PORT(bp);
2071 u32 wsum = 0;
2072 int all_zero = 1;
2073
2074 for (i = 0; i < E1HVN_MAX; i++) {
2075 u32 vn_cfg =
2076 SHMEM_RD(bp, mf_cfg.func_mf_config[2*i + port].config);
2077 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2078 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2079 if (!(vn_cfg & FUNC_MF_CFG_FUNC_HIDE)) {
2080 /* If min rate is zero - set it to 1 */
2081 if (!vn_min_rate)
2082 vn_min_rate = DEF_MIN_RATE;
2083 else
2084 all_zero = 0;
2085
2086 wsum += vn_min_rate;
2087 }
2088 }
2089
2090 /* ... only if all min rates are zeros - disable FAIRNESS */
2091 if (all_zero)
2092 return 0;
2093
2094 return wsum;
2095}
2096
2097static void bnx2x_init_port_minmax(struct bnx2x *bp,
2098 int en_fness,
2099 u16 port_rate,
2100 struct cmng_struct_per_port *m_cmng_port)
2101{
2102 u32 r_param = port_rate / 8;
2103 int port = BP_PORT(bp);
2104 int i;
2105
2106 memset(m_cmng_port, 0, sizeof(struct cmng_struct_per_port));
2107
2108 /* Enable minmax only if we are in e1hmf mode */
2109 if (IS_E1HMF(bp)) {
2110 u32 fair_periodic_timeout_usec;
2111 u32 t_fair;
2112
2113 /* Enable rate shaping and fairness */
2114 m_cmng_port->flags.cmng_vn_enable = 1;
2115 m_cmng_port->flags.fairness_enable = en_fness ? 1 : 0;
2116 m_cmng_port->flags.rate_shaping_enable = 1;
2117
2118 if (!en_fness)
2119 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
2120 " fairness will be disabled\n");
2121
2122 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2123 m_cmng_port->rs_vars.rs_periodic_timeout =
2124 RS_PERIODIC_TIMEOUT_USEC / 4;
2125
2126 /* this is the threshold below which no timer arming will occur
2127 1.25 coefficient is for the threshold to be a little bigger
2128 than the real time, to compensate for timer in-accuracy */
2129 m_cmng_port->rs_vars.rs_threshold =
2130 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2131
2132 /* resolution of fairness timer */
2133 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2134 /* for 10G it is 1000usec. for 1G it is 10000usec. */
2135 t_fair = T_FAIR_COEF / port_rate;
2136
2137 /* this is the threshold below which we won't arm
2138 the timer anymore */
2139 m_cmng_port->fair_vars.fair_threshold = QM_ARB_BYTES;
2140
2141 /* we multiply by 1e3/8 to get bytes/msec.
2142 We don't want the credits to pass a credit
2143 of the T_FAIR*FAIR_MEM (algorithm resolution) */
2144 m_cmng_port->fair_vars.upper_bound =
2145 r_param * t_fair * FAIR_MEM;
2146 /* since each tick is 4 usec */
2147 m_cmng_port->fair_vars.fairness_timeout =
2148 fair_periodic_timeout_usec / 4;
2149
2150 } else {
2151 /* Disable rate shaping and fairness */
2152 m_cmng_port->flags.cmng_vn_enable = 0;
2153 m_cmng_port->flags.fairness_enable = 0;
2154 m_cmng_port->flags.rate_shaping_enable = 0;
2155
2156 DP(NETIF_MSG_IFUP,
2157 "Single function mode minmax will be disabled\n");
2158 }
2159
2160 /* Store it to internal memory */
2161 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
2162 REG_WR(bp, BAR_XSTRORM_INTMEM +
2163 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
2164 ((u32 *)(m_cmng_port))[i]);
2165}
2166
2167static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func,
2168 u32 wsum, u16 port_rate,
2169 struct cmng_struct_per_port *m_cmng_port)
2170{
2171 struct rate_shaping_vars_per_vn m_rs_vn;
2172 struct fairness_vars_per_vn m_fair_vn;
2173 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2174 u16 vn_min_rate, vn_max_rate;
2175 int i;
2176
2177 /* If function is hidden - set min and max to zeroes */
2178 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2179 vn_min_rate = 0;
2180 vn_max_rate = 0;
2181
2182 } else {
2183 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2184 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2185 /* If FAIRNESS is enabled (not all min rates are zeroes) and
2186 if current min rate is zero - set it to 1.
33471629 2187 This is a requirement of the algorithm. */
34f80b04
EG
2188 if ((vn_min_rate == 0) && wsum)
2189 vn_min_rate = DEF_MIN_RATE;
2190 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2191 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2192 }
2193
2194 DP(NETIF_MSG_IFUP, "func %d: vn_min_rate=%d vn_max_rate=%d "
2195 "wsum=%d\n", func, vn_min_rate, vn_max_rate, wsum);
2196
2197 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2198 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2199
2200 /* global vn counter - maximal Mbps for this vn */
2201 m_rs_vn.vn_counter.rate = vn_max_rate;
2202
2203 /* quota - number of bytes transmitted in this period */
2204 m_rs_vn.vn_counter.quota =
2205 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2206
2207#ifdef BNX2X_PER_PROT_QOS
2208 /* per protocol counter */
2209 for (protocol = 0; protocol < NUM_OF_PROTOCOLS; protocol++) {
2210 /* maximal Mbps for this protocol */
2211 m_rs_vn.protocol_counters[protocol].rate =
2212 protocol_max_rate[protocol];
2213 /* the quota in each timer period -
2214 number of bytes transmitted in this period */
2215 m_rs_vn.protocol_counters[protocol].quota =
2216 (u32)(rs_periodic_timeout_usec *
2217 ((double)m_rs_vn.
2218 protocol_counters[protocol].rate/8));
2219 }
2220#endif
2221
2222 if (wsum) {
2223 /* credit for each period of the fairness algorithm:
2224 number of bytes in T_FAIR (the vn share the port rate).
2225 wsum should not be larger than 10000, thus
2226 T_FAIR_COEF / (8 * wsum) will always be grater than zero */
2227 m_fair_vn.vn_credit_delta =
2228 max((u64)(vn_min_rate * (T_FAIR_COEF / (8 * wsum))),
2229 (u64)(m_cmng_port->fair_vars.fair_threshold * 2));
2230 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2231 m_fair_vn.vn_credit_delta);
2232 }
2233
2234#ifdef BNX2X_PER_PROT_QOS
2235 do {
2236 u32 protocolWeightSum = 0;
2237
2238 for (protocol = 0; protocol < NUM_OF_PROTOCOLS; protocol++)
2239 protocolWeightSum +=
2240 drvInit.protocol_min_rate[protocol];
2241 /* per protocol counter -
2242 NOT NEEDED IF NO PER-PROTOCOL CONGESTION MANAGEMENT */
2243 if (protocolWeightSum > 0) {
2244 for (protocol = 0;
2245 protocol < NUM_OF_PROTOCOLS; protocol++)
2246 /* credit for each period of the
2247 fairness algorithm - number of bytes in
2248 T_FAIR (the protocol share the vn rate) */
2249 m_fair_vn.protocol_credit_delta[protocol] =
2250 (u32)((vn_min_rate / 8) * t_fair *
2251 protocol_min_rate / protocolWeightSum);
2252 }
2253 } while (0);
2254#endif
2255
2256 /* Store it to internal memory */
2257 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2258 REG_WR(bp, BAR_XSTRORM_INTMEM +
2259 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2260 ((u32 *)(&m_rs_vn))[i]);
2261
2262 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2263 REG_WR(bp, BAR_XSTRORM_INTMEM +
2264 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2265 ((u32 *)(&m_fair_vn))[i]);
2266}
2267
c18487ee
YR
2268/* This function is called upon link interrupt */
2269static void bnx2x_link_attn(struct bnx2x *bp)
2270{
34f80b04
EG
2271 int vn;
2272
bb2a0f7a
YG
2273 /* Make sure that we are synced with the current statistics */
2274 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2275
c18487ee 2276 bnx2x_link_update(&bp->link_params, &bp->link_vars);
a2fbb9ea 2277
bb2a0f7a
YG
2278 if (bp->link_vars.link_up) {
2279
2280 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2281 struct host_port_stats *pstats;
2282
2283 pstats = bnx2x_sp(bp, port_stats);
2284 /* reset old bmac stats */
2285 memset(&(pstats->mac_stx[0]), 0,
2286 sizeof(struct mac_stx));
2287 }
2288 if ((bp->state == BNX2X_STATE_OPEN) ||
2289 (bp->state == BNX2X_STATE_DISABLED))
2290 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2291 }
2292
c18487ee
YR
2293 /* indicate link status */
2294 bnx2x_link_report(bp);
34f80b04
EG
2295
2296 if (IS_E1HMF(bp)) {
2297 int func;
2298
2299 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2300 if (vn == BP_E1HVN(bp))
2301 continue;
2302
2303 func = ((vn << 1) | BP_PORT(bp));
2304
2305 /* Set the attention towards other drivers
2306 on the same port */
2307 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2308 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2309 }
2310 }
2311
2312 if (CHIP_IS_E1H(bp) && (bp->link_vars.line_speed > 0)) {
2313 struct cmng_struct_per_port m_cmng_port;
2314 u32 wsum;
2315 int port = BP_PORT(bp);
2316
2317 /* Init RATE SHAPING and FAIRNESS contexts */
2318 wsum = bnx2x_calc_vn_wsum(bp);
2319 bnx2x_init_port_minmax(bp, (int)wsum,
2320 bp->link_vars.line_speed,
2321 &m_cmng_port);
2322 if (IS_E1HMF(bp))
2323 for (vn = VN_0; vn < E1HVN_MAX; vn++)
2324 bnx2x_init_vn_minmax(bp, 2*vn + port,
2325 wsum, bp->link_vars.line_speed,
2326 &m_cmng_port);
2327 }
c18487ee 2328}
a2fbb9ea 2329
c18487ee
YR
2330static void bnx2x__link_status_update(struct bnx2x *bp)
2331{
2332 if (bp->state != BNX2X_STATE_OPEN)
2333 return;
a2fbb9ea 2334
c18487ee 2335 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
a2fbb9ea 2336
bb2a0f7a
YG
2337 if (bp->link_vars.link_up)
2338 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2339 else
2340 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2341
c18487ee
YR
2342 /* indicate link status */
2343 bnx2x_link_report(bp);
a2fbb9ea 2344}
a2fbb9ea 2345
34f80b04
EG
2346static void bnx2x_pmf_update(struct bnx2x *bp)
2347{
2348 int port = BP_PORT(bp);
2349 u32 val;
2350
2351 bp->port.pmf = 1;
2352 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2353
2354 /* enable nig attention */
2355 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2356 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2357 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
bb2a0f7a
YG
2358
2359 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
34f80b04
EG
2360}
2361
c18487ee 2362/* end of Link */
a2fbb9ea
ET
2363
2364/* slow path */
2365
2366/*
2367 * General service functions
2368 */
2369
2370/* the slow path queue is odd since completions arrive on the fastpath ring */
2371static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2372 u32 data_hi, u32 data_lo, int common)
2373{
34f80b04 2374 int func = BP_FUNC(bp);
a2fbb9ea 2375
34f80b04
EG
2376 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2377 "SPQE (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
a2fbb9ea
ET
2378 (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2379 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2380 HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2381
2382#ifdef BNX2X_STOP_ON_ERROR
2383 if (unlikely(bp->panic))
2384 return -EIO;
2385#endif
2386
34f80b04 2387 spin_lock_bh(&bp->spq_lock);
a2fbb9ea
ET
2388
2389 if (!bp->spq_left) {
2390 BNX2X_ERR("BUG! SPQ ring full!\n");
34f80b04 2391 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
2392 bnx2x_panic();
2393 return -EBUSY;
2394 }
f1410647 2395
a2fbb9ea
ET
2396 /* CID needs port number to be encoded int it */
2397 bp->spq_prod_bd->hdr.conn_and_cmd_data =
2398 cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2399 HW_CID(bp, cid)));
2400 bp->spq_prod_bd->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2401 if (common)
2402 bp->spq_prod_bd->hdr.type |=
2403 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2404
2405 bp->spq_prod_bd->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2406 bp->spq_prod_bd->data.mac_config_addr.lo = cpu_to_le32(data_lo);
2407
2408 bp->spq_left--;
2409
2410 if (bp->spq_prod_bd == bp->spq_last_bd) {
2411 bp->spq_prod_bd = bp->spq;
2412 bp->spq_prod_idx = 0;
2413 DP(NETIF_MSG_TIMER, "end of spq\n");
2414
2415 } else {
2416 bp->spq_prod_bd++;
2417 bp->spq_prod_idx++;
2418 }
2419
34f80b04 2420 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
a2fbb9ea
ET
2421 bp->spq_prod_idx);
2422
34f80b04 2423 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
2424 return 0;
2425}
2426
2427/* acquire split MCP access lock register */
4a37fb66 2428static int bnx2x_acquire_alr(struct bnx2x *bp)
a2fbb9ea 2429{
a2fbb9ea 2430 u32 i, j, val;
34f80b04 2431 int rc = 0;
a2fbb9ea
ET
2432
2433 might_sleep();
2434 i = 100;
2435 for (j = 0; j < i*10; j++) {
2436 val = (1UL << 31);
2437 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2438 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2439 if (val & (1L << 31))
2440 break;
2441
2442 msleep(5);
2443 }
a2fbb9ea 2444 if (!(val & (1L << 31))) {
19680c48 2445 BNX2X_ERR("Cannot acquire MCP access lock register\n");
a2fbb9ea
ET
2446 rc = -EBUSY;
2447 }
2448
2449 return rc;
2450}
2451
4a37fb66
YG
2452/* release split MCP access lock register */
2453static void bnx2x_release_alr(struct bnx2x *bp)
a2fbb9ea
ET
2454{
2455 u32 val = 0;
2456
2457 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2458}
2459
2460static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2461{
2462 struct host_def_status_block *def_sb = bp->def_status_blk;
2463 u16 rc = 0;
2464
2465 barrier(); /* status block is written to by the chip */
a2fbb9ea
ET
2466 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2467 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2468 rc |= 1;
2469 }
2470 if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2471 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2472 rc |= 2;
2473 }
2474 if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2475 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2476 rc |= 4;
2477 }
2478 if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2479 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2480 rc |= 8;
2481 }
2482 if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2483 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2484 rc |= 16;
2485 }
2486 return rc;
2487}
2488
2489/*
2490 * slow path service functions
2491 */
2492
2493static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2494{
34f80b04 2495 int port = BP_PORT(bp);
5c862848
EG
2496 u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2497 COMMAND_REG_ATTN_BITS_SET);
a2fbb9ea
ET
2498 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2499 MISC_REG_AEU_MASK_ATTN_FUNC_0;
877e9aa4
ET
2500 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2501 NIG_REG_MASK_INTERRUPT_PORT0;
3fcaf2e5 2502 u32 aeu_mask;
a2fbb9ea 2503
a2fbb9ea
ET
2504 if (bp->attn_state & asserted)
2505 BNX2X_ERR("IGU ERROR\n");
2506
3fcaf2e5
EG
2507 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2508 aeu_mask = REG_RD(bp, aeu_addr);
2509
a2fbb9ea 2510 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
3fcaf2e5
EG
2511 aeu_mask, asserted);
2512 aeu_mask &= ~(asserted & 0xff);
2513 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
a2fbb9ea 2514
3fcaf2e5
EG
2515 REG_WR(bp, aeu_addr, aeu_mask);
2516 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
a2fbb9ea 2517
3fcaf2e5 2518 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
a2fbb9ea 2519 bp->attn_state |= asserted;
3fcaf2e5 2520 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
a2fbb9ea
ET
2521
2522 if (asserted & ATTN_HARD_WIRED_MASK) {
2523 if (asserted & ATTN_NIG_FOR_FUNC) {
a2fbb9ea 2524
a5e9a7cf
EG
2525 bnx2x_acquire_phy_lock(bp);
2526
877e9aa4
ET
2527 /* save nig interrupt mask */
2528 bp->nig_mask = REG_RD(bp, nig_int_mask_addr);
2529 REG_WR(bp, nig_int_mask_addr, 0);
a2fbb9ea 2530
c18487ee 2531 bnx2x_link_attn(bp);
a2fbb9ea
ET
2532
2533 /* handle unicore attn? */
2534 }
2535 if (asserted & ATTN_SW_TIMER_4_FUNC)
2536 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2537
2538 if (asserted & GPIO_2_FUNC)
2539 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2540
2541 if (asserted & GPIO_3_FUNC)
2542 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2543
2544 if (asserted & GPIO_4_FUNC)
2545 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2546
2547 if (port == 0) {
2548 if (asserted & ATTN_GENERAL_ATTN_1) {
2549 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2550 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2551 }
2552 if (asserted & ATTN_GENERAL_ATTN_2) {
2553 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2554 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2555 }
2556 if (asserted & ATTN_GENERAL_ATTN_3) {
2557 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2558 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2559 }
2560 } else {
2561 if (asserted & ATTN_GENERAL_ATTN_4) {
2562 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2563 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2564 }
2565 if (asserted & ATTN_GENERAL_ATTN_5) {
2566 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2567 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2568 }
2569 if (asserted & ATTN_GENERAL_ATTN_6) {
2570 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2571 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2572 }
2573 }
2574
2575 } /* if hardwired */
2576
5c862848
EG
2577 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2578 asserted, hc_addr);
2579 REG_WR(bp, hc_addr, asserted);
a2fbb9ea
ET
2580
2581 /* now set back the mask */
a5e9a7cf 2582 if (asserted & ATTN_NIG_FOR_FUNC) {
877e9aa4 2583 REG_WR(bp, nig_int_mask_addr, bp->nig_mask);
a5e9a7cf
EG
2584 bnx2x_release_phy_lock(bp);
2585 }
a2fbb9ea
ET
2586}
2587
877e9aa4 2588static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
a2fbb9ea 2589{
34f80b04 2590 int port = BP_PORT(bp);
877e9aa4
ET
2591 int reg_offset;
2592 u32 val;
2593
34f80b04
EG
2594 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2595 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
877e9aa4 2596
34f80b04 2597 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
877e9aa4
ET
2598
2599 val = REG_RD(bp, reg_offset);
2600 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2601 REG_WR(bp, reg_offset, val);
2602
2603 BNX2X_ERR("SPIO5 hw attention\n");
2604
34f80b04 2605 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
7add905f 2606 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1021G:
877e9aa4
ET
2607 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
2608 /* Fan failure attention */
2609
17de50b7 2610 /* The PHY reset is controlled by GPIO 1 */
877e9aa4 2611 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
17de50b7
EG
2612 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2613 /* Low power mode is controlled by GPIO 2 */
877e9aa4 2614 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
17de50b7 2615 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
877e9aa4 2616 /* mark the failure */
c18487ee 2617 bp->link_params.ext_phy_config &=
877e9aa4 2618 ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
c18487ee 2619 bp->link_params.ext_phy_config |=
877e9aa4
ET
2620 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2621 SHMEM_WR(bp,
2622 dev_info.port_hw_config[port].
2623 external_phy_config,
c18487ee 2624 bp->link_params.ext_phy_config);
877e9aa4
ET
2625 /* log the failure */
2626 printk(KERN_ERR PFX "Fan Failure on Network"
2627 " Controller %s has caused the driver to"
2628 " shutdown the card to prevent permanent"
2629 " damage. Please contact Dell Support for"
2630 " assistance\n", bp->dev->name);
2631 break;
2632
2633 default:
2634 break;
2635 }
2636 }
34f80b04
EG
2637
2638 if (attn & HW_INTERRUT_ASSERT_SET_0) {
2639
2640 val = REG_RD(bp, reg_offset);
2641 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2642 REG_WR(bp, reg_offset, val);
2643
2644 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2645 (attn & HW_INTERRUT_ASSERT_SET_0));
2646 bnx2x_panic();
2647 }
877e9aa4
ET
2648}
2649
2650static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2651{
2652 u32 val;
2653
2654 if (attn & BNX2X_DOORQ_ASSERT) {
2655
2656 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2657 BNX2X_ERR("DB hw attention 0x%x\n", val);
2658 /* DORQ discard attention */
2659 if (val & 0x2)
2660 BNX2X_ERR("FATAL error from DORQ\n");
2661 }
34f80b04
EG
2662
2663 if (attn & HW_INTERRUT_ASSERT_SET_1) {
2664
2665 int port = BP_PORT(bp);
2666 int reg_offset;
2667
2668 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2669 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2670
2671 val = REG_RD(bp, reg_offset);
2672 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2673 REG_WR(bp, reg_offset, val);
2674
2675 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2676 (attn & HW_INTERRUT_ASSERT_SET_1));
2677 bnx2x_panic();
2678 }
877e9aa4
ET
2679}
2680
2681static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2682{
2683 u32 val;
2684
2685 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2686
2687 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2688 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2689 /* CFC error attention */
2690 if (val & 0x2)
2691 BNX2X_ERR("FATAL error from CFC\n");
2692 }
2693
2694 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
2695
2696 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
2697 BNX2X_ERR("PXP hw attention 0x%x\n", val);
2698 /* RQ_USDMDP_FIFO_OVERFLOW */
2699 if (val & 0x18000)
2700 BNX2X_ERR("FATAL error from PXP\n");
2701 }
34f80b04
EG
2702
2703 if (attn & HW_INTERRUT_ASSERT_SET_2) {
2704
2705 int port = BP_PORT(bp);
2706 int reg_offset;
2707
2708 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
2709 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
2710
2711 val = REG_RD(bp, reg_offset);
2712 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
2713 REG_WR(bp, reg_offset, val);
2714
2715 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
2716 (attn & HW_INTERRUT_ASSERT_SET_2));
2717 bnx2x_panic();
2718 }
877e9aa4
ET
2719}
2720
2721static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
2722{
34f80b04
EG
2723 u32 val;
2724
877e9aa4
ET
2725 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
2726
34f80b04
EG
2727 if (attn & BNX2X_PMF_LINK_ASSERT) {
2728 int func = BP_FUNC(bp);
2729
2730 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
2731 bnx2x__link_status_update(bp);
2732 if (SHMEM_RD(bp, func_mb[func].drv_status) &
2733 DRV_STATUS_PMF)
2734 bnx2x_pmf_update(bp);
2735
2736 } else if (attn & BNX2X_MC_ASSERT_BITS) {
877e9aa4
ET
2737
2738 BNX2X_ERR("MC assert!\n");
2739 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
2740 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
2741 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
2742 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
2743 bnx2x_panic();
2744
2745 } else if (attn & BNX2X_MCP_ASSERT) {
2746
2747 BNX2X_ERR("MCP assert!\n");
2748 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
34f80b04 2749 bnx2x_fw_dump(bp);
877e9aa4
ET
2750
2751 } else
2752 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
2753 }
2754
2755 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
34f80b04
EG
2756 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
2757 if (attn & BNX2X_GRC_TIMEOUT) {
2758 val = CHIP_IS_E1H(bp) ?
2759 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
2760 BNX2X_ERR("GRC time-out 0x%08x\n", val);
2761 }
2762 if (attn & BNX2X_GRC_RSV) {
2763 val = CHIP_IS_E1H(bp) ?
2764 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
2765 BNX2X_ERR("GRC reserved 0x%08x\n", val);
2766 }
877e9aa4 2767 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
877e9aa4
ET
2768 }
2769}
2770
2771static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
2772{
a2fbb9ea
ET
2773 struct attn_route attn;
2774 struct attn_route group_mask;
34f80b04 2775 int port = BP_PORT(bp);
877e9aa4 2776 int index;
a2fbb9ea
ET
2777 u32 reg_addr;
2778 u32 val;
3fcaf2e5 2779 u32 aeu_mask;
a2fbb9ea
ET
2780
2781 /* need to take HW lock because MCP or other port might also
2782 try to handle this event */
4a37fb66 2783 bnx2x_acquire_alr(bp);
a2fbb9ea
ET
2784
2785 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
2786 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
2787 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
2788 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
34f80b04
EG
2789 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
2790 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
a2fbb9ea
ET
2791
2792 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
2793 if (deasserted & (1 << index)) {
2794 group_mask = bp->attn_group[index];
2795
34f80b04
EG
2796 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
2797 index, group_mask.sig[0], group_mask.sig[1],
2798 group_mask.sig[2], group_mask.sig[3]);
a2fbb9ea 2799
877e9aa4
ET
2800 bnx2x_attn_int_deasserted3(bp,
2801 attn.sig[3] & group_mask.sig[3]);
2802 bnx2x_attn_int_deasserted1(bp,
2803 attn.sig[1] & group_mask.sig[1]);
2804 bnx2x_attn_int_deasserted2(bp,
2805 attn.sig[2] & group_mask.sig[2]);
2806 bnx2x_attn_int_deasserted0(bp,
2807 attn.sig[0] & group_mask.sig[0]);
a2fbb9ea 2808
a2fbb9ea
ET
2809 if ((attn.sig[0] & group_mask.sig[0] &
2810 HW_PRTY_ASSERT_SET_0) ||
2811 (attn.sig[1] & group_mask.sig[1] &
2812 HW_PRTY_ASSERT_SET_1) ||
2813 (attn.sig[2] & group_mask.sig[2] &
2814 HW_PRTY_ASSERT_SET_2))
6378c025 2815 BNX2X_ERR("FATAL HW block parity attention\n");
a2fbb9ea
ET
2816 }
2817 }
2818
4a37fb66 2819 bnx2x_release_alr(bp);
a2fbb9ea 2820
5c862848 2821 reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
a2fbb9ea
ET
2822
2823 val = ~deasserted;
3fcaf2e5
EG
2824 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2825 val, reg_addr);
5c862848 2826 REG_WR(bp, reg_addr, val);
a2fbb9ea 2827
a2fbb9ea 2828 if (~bp->attn_state & deasserted)
3fcaf2e5 2829 BNX2X_ERR("IGU ERROR\n");
a2fbb9ea
ET
2830
2831 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2832 MISC_REG_AEU_MASK_ATTN_FUNC_0;
2833
3fcaf2e5
EG
2834 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2835 aeu_mask = REG_RD(bp, reg_addr);
2836
2837 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
2838 aeu_mask, deasserted);
2839 aeu_mask |= (deasserted & 0xff);
2840 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
a2fbb9ea 2841
3fcaf2e5
EG
2842 REG_WR(bp, reg_addr, aeu_mask);
2843 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
a2fbb9ea
ET
2844
2845 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2846 bp->attn_state &= ~deasserted;
2847 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2848}
2849
2850static void bnx2x_attn_int(struct bnx2x *bp)
2851{
2852 /* read local copy of bits */
68d59484
EG
2853 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
2854 attn_bits);
2855 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
2856 attn_bits_ack);
a2fbb9ea
ET
2857 u32 attn_state = bp->attn_state;
2858
2859 /* look for changed bits */
2860 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
2861 u32 deasserted = ~attn_bits & attn_ack & attn_state;
2862
2863 DP(NETIF_MSG_HW,
2864 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
2865 attn_bits, attn_ack, asserted, deasserted);
2866
2867 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
34f80b04 2868 BNX2X_ERR("BAD attention state\n");
a2fbb9ea
ET
2869
2870 /* handle bits that were raised */
2871 if (asserted)
2872 bnx2x_attn_int_asserted(bp, asserted);
2873
2874 if (deasserted)
2875 bnx2x_attn_int_deasserted(bp, deasserted);
2876}
2877
2878static void bnx2x_sp_task(struct work_struct *work)
2879{
1cf167f2 2880 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
a2fbb9ea
ET
2881 u16 status;
2882
34f80b04 2883
a2fbb9ea
ET
2884 /* Return here if interrupt is disabled */
2885 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3196a88a 2886 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
2887 return;
2888 }
2889
2890 status = bnx2x_update_dsb_idx(bp);
34f80b04
EG
2891/* if (status == 0) */
2892/* BNX2X_ERR("spurious slowpath interrupt!\n"); */
a2fbb9ea 2893
3196a88a 2894 DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
a2fbb9ea 2895
877e9aa4
ET
2896 /* HW attentions */
2897 if (status & 0x1)
a2fbb9ea 2898 bnx2x_attn_int(bp);
a2fbb9ea 2899
bb2a0f7a
YG
2900 /* CStorm events: query_stats, port delete ramrod */
2901 if (status & 0x2)
2902 bp->stats_pending = 0;
2903
68d59484 2904 bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
a2fbb9ea
ET
2905 IGU_INT_NOP, 1);
2906 bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
2907 IGU_INT_NOP, 1);
2908 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
2909 IGU_INT_NOP, 1);
2910 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
2911 IGU_INT_NOP, 1);
2912 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
2913 IGU_INT_ENABLE, 1);
877e9aa4 2914
a2fbb9ea
ET
2915}
2916
2917static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
2918{
2919 struct net_device *dev = dev_instance;
2920 struct bnx2x *bp = netdev_priv(dev);
2921
2922 /* Return here if interrupt is disabled */
2923 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3196a88a 2924 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
2925 return IRQ_HANDLED;
2926 }
2927
8d9c5f34 2928 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0);
a2fbb9ea
ET
2929
2930#ifdef BNX2X_STOP_ON_ERROR
2931 if (unlikely(bp->panic))
2932 return IRQ_HANDLED;
2933#endif
2934
1cf167f2 2935 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
a2fbb9ea
ET
2936
2937 return IRQ_HANDLED;
2938}
2939
2940/* end of slow path */
2941
2942/* Statistics */
2943
2944/****************************************************************************
2945* Macros
2946****************************************************************************/
2947
a2fbb9ea
ET
2948/* sum[hi:lo] += add[hi:lo] */
2949#define ADD_64(s_hi, a_hi, s_lo, a_lo) \
2950 do { \
2951 s_lo += a_lo; \
f5ba6772 2952 s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
a2fbb9ea
ET
2953 } while (0)
2954
2955/* difference = minuend - subtrahend */
2956#define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
2957 do { \
bb2a0f7a
YG
2958 if (m_lo < s_lo) { \
2959 /* underflow */ \
a2fbb9ea 2960 d_hi = m_hi - s_hi; \
bb2a0f7a 2961 if (d_hi > 0) { \
6378c025 2962 /* we can 'loan' 1 */ \
a2fbb9ea
ET
2963 d_hi--; \
2964 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
bb2a0f7a 2965 } else { \
6378c025 2966 /* m_hi <= s_hi */ \
a2fbb9ea
ET
2967 d_hi = 0; \
2968 d_lo = 0; \
2969 } \
bb2a0f7a
YG
2970 } else { \
2971 /* m_lo >= s_lo */ \
a2fbb9ea 2972 if (m_hi < s_hi) { \
bb2a0f7a
YG
2973 d_hi = 0; \
2974 d_lo = 0; \
2975 } else { \
6378c025 2976 /* m_hi >= s_hi */ \
bb2a0f7a
YG
2977 d_hi = m_hi - s_hi; \
2978 d_lo = m_lo - s_lo; \
a2fbb9ea
ET
2979 } \
2980 } \
2981 } while (0)
2982
bb2a0f7a 2983#define UPDATE_STAT64(s, t) \
a2fbb9ea 2984 do { \
bb2a0f7a
YG
2985 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
2986 diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
2987 pstats->mac_stx[0].t##_hi = new->s##_hi; \
2988 pstats->mac_stx[0].t##_lo = new->s##_lo; \
2989 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
2990 pstats->mac_stx[1].t##_lo, diff.lo); \
a2fbb9ea
ET
2991 } while (0)
2992
bb2a0f7a 2993#define UPDATE_STAT64_NIG(s, t) \
a2fbb9ea 2994 do { \
bb2a0f7a
YG
2995 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
2996 diff.lo, new->s##_lo, old->s##_lo); \
2997 ADD_64(estats->t##_hi, diff.hi, \
2998 estats->t##_lo, diff.lo); \
a2fbb9ea
ET
2999 } while (0)
3000
3001/* sum[hi:lo] += add */
3002#define ADD_EXTEND_64(s_hi, s_lo, a) \
3003 do { \
3004 s_lo += a; \
3005 s_hi += (s_lo < a) ? 1 : 0; \
3006 } while (0)
3007
bb2a0f7a 3008#define UPDATE_EXTEND_STAT(s) \
a2fbb9ea 3009 do { \
bb2a0f7a
YG
3010 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
3011 pstats->mac_stx[1].s##_lo, \
3012 new->s); \
a2fbb9ea
ET
3013 } while (0)
3014
bb2a0f7a 3015#define UPDATE_EXTEND_TSTAT(s, t) \
a2fbb9ea
ET
3016 do { \
3017 diff = le32_to_cpu(tclient->s) - old_tclient->s; \
3018 old_tclient->s = le32_to_cpu(tclient->s); \
bb2a0f7a
YG
3019 ADD_EXTEND_64(fstats->t##_hi, fstats->t##_lo, diff); \
3020 } while (0)
3021
3022#define UPDATE_EXTEND_XSTAT(s, t) \
3023 do { \
3024 diff = le32_to_cpu(xclient->s) - old_xclient->s; \
3025 old_xclient->s = le32_to_cpu(xclient->s); \
3026 ADD_EXTEND_64(fstats->t##_hi, fstats->t##_lo, diff); \
a2fbb9ea
ET
3027 } while (0)
3028
3029/*
3030 * General service functions
3031 */
3032
3033static inline long bnx2x_hilo(u32 *hiref)
3034{
3035 u32 lo = *(hiref + 1);
3036#if (BITS_PER_LONG == 64)
3037 u32 hi = *hiref;
3038
3039 return HILO_U64(hi, lo);
3040#else
3041 return lo;
3042#endif
3043}
3044
3045/*
3046 * Init service functions
3047 */
3048
bb2a0f7a
YG
3049static void bnx2x_storm_stats_post(struct bnx2x *bp)
3050{
3051 if (!bp->stats_pending) {
3052 struct eth_query_ramrod_data ramrod_data = {0};
3053 int rc;
3054
3055 ramrod_data.drv_counter = bp->stats_counter++;
8d9c5f34 3056 ramrod_data.collect_port = bp->port.pmf ? 1 : 0;
bb2a0f7a
YG
3057 ramrod_data.ctr_id_vector = (1 << BP_CL_ID(bp));
3058
3059 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3060 ((u32 *)&ramrod_data)[1],
3061 ((u32 *)&ramrod_data)[0], 0);
3062 if (rc == 0) {
3063 /* stats ramrod has it's own slot on the spq */
3064 bp->spq_left++;
3065 bp->stats_pending = 1;
3066 }
3067 }
3068}
3069
3070static void bnx2x_stats_init(struct bnx2x *bp)
3071{
3072 int port = BP_PORT(bp);
3073
3074 bp->executer_idx = 0;
3075 bp->stats_counter = 0;
3076
3077 /* port stats */
3078 if (!BP_NOMCP(bp))
3079 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
3080 else
3081 bp->port.port_stx = 0;
3082 DP(BNX2X_MSG_STATS, "port_stx 0x%x\n", bp->port.port_stx);
3083
3084 memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
3085 bp->port.old_nig_stats.brb_discard =
3086 REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
66e855f3
YG
3087 bp->port.old_nig_stats.brb_truncate =
3088 REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
bb2a0f7a
YG
3089 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
3090 &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
3091 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
3092 &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
3093
3094 /* function stats */
3095 memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
3096 memset(&bp->old_tclient, 0, sizeof(struct tstorm_per_client_stats));
3097 memset(&bp->old_xclient, 0, sizeof(struct xstorm_per_client_stats));
3098 memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
3099
3100 bp->stats_state = STATS_STATE_DISABLED;
3101 if (IS_E1HMF(bp) && bp->port.pmf && bp->port.port_stx)
3102 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
3103}
3104
3105static void bnx2x_hw_stats_post(struct bnx2x *bp)
3106{
3107 struct dmae_command *dmae = &bp->stats_dmae;
3108 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3109
3110 *stats_comp = DMAE_COMP_VAL;
3111
3112 /* loader */
3113 if (bp->executer_idx) {
3114 int loader_idx = PMF_DMAE_C(bp);
3115
3116 memset(dmae, 0, sizeof(struct dmae_command));
3117
3118 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3119 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3120 DMAE_CMD_DST_RESET |
3121#ifdef __BIG_ENDIAN
3122 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3123#else
3124 DMAE_CMD_ENDIANITY_DW_SWAP |
3125#endif
3126 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3127 DMAE_CMD_PORT_0) |
3128 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3129 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3130 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3131 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3132 sizeof(struct dmae_command) *
3133 (loader_idx + 1)) >> 2;
3134 dmae->dst_addr_hi = 0;
3135 dmae->len = sizeof(struct dmae_command) >> 2;
3136 if (CHIP_IS_E1(bp))
3137 dmae->len--;
3138 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3139 dmae->comp_addr_hi = 0;
3140 dmae->comp_val = 1;
3141
3142 *stats_comp = 0;
3143 bnx2x_post_dmae(bp, dmae, loader_idx);
3144
3145 } else if (bp->func_stx) {
3146 *stats_comp = 0;
3147 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3148 }
3149}
3150
3151static int bnx2x_stats_comp(struct bnx2x *bp)
3152{
3153 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3154 int cnt = 10;
3155
3156 might_sleep();
3157 while (*stats_comp != DMAE_COMP_VAL) {
bb2a0f7a
YG
3158 if (!cnt) {
3159 BNX2X_ERR("timeout waiting for stats finished\n");
3160 break;
3161 }
3162 cnt--;
12469401 3163 msleep(1);
bb2a0f7a
YG
3164 }
3165 return 1;
3166}
3167
3168/*
3169 * Statistics service functions
3170 */
3171
3172static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3173{
3174 struct dmae_command *dmae;
3175 u32 opcode;
3176 int loader_idx = PMF_DMAE_C(bp);
3177 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3178
3179 /* sanity */
3180 if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3181 BNX2X_ERR("BUG!\n");
3182 return;
3183 }
3184
3185 bp->executer_idx = 0;
3186
3187 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3188 DMAE_CMD_C_ENABLE |
3189 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3190#ifdef __BIG_ENDIAN
3191 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3192#else
3193 DMAE_CMD_ENDIANITY_DW_SWAP |
3194#endif
3195 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3196 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3197
3198 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3199 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3200 dmae->src_addr_lo = bp->port.port_stx >> 2;
3201 dmae->src_addr_hi = 0;
3202 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3203 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3204 dmae->len = DMAE_LEN32_RD_MAX;
3205 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3206 dmae->comp_addr_hi = 0;
3207 dmae->comp_val = 1;
3208
3209 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3210 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3211 dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3212 dmae->src_addr_hi = 0;
7a9b2557
VZ
3213 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3214 DMAE_LEN32_RD_MAX * 4);
3215 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3216 DMAE_LEN32_RD_MAX * 4);
bb2a0f7a
YG
3217 dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3218 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3219 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3220 dmae->comp_val = DMAE_COMP_VAL;
3221
3222 *stats_comp = 0;
3223 bnx2x_hw_stats_post(bp);
3224 bnx2x_stats_comp(bp);
3225}
3226
3227static void bnx2x_port_stats_init(struct bnx2x *bp)
a2fbb9ea
ET
3228{
3229 struct dmae_command *dmae;
34f80b04 3230 int port = BP_PORT(bp);
bb2a0f7a 3231 int vn = BP_E1HVN(bp);
a2fbb9ea 3232 u32 opcode;
bb2a0f7a 3233 int loader_idx = PMF_DMAE_C(bp);
a2fbb9ea 3234 u32 mac_addr;
bb2a0f7a
YG
3235 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3236
3237 /* sanity */
3238 if (!bp->link_vars.link_up || !bp->port.pmf) {
3239 BNX2X_ERR("BUG!\n");
3240 return;
3241 }
a2fbb9ea
ET
3242
3243 bp->executer_idx = 0;
bb2a0f7a
YG
3244
3245 /* MCP */
3246 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3247 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3248 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 3249#ifdef __BIG_ENDIAN
bb2a0f7a 3250 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 3251#else
bb2a0f7a 3252 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 3253#endif
bb2a0f7a
YG
3254 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3255 (vn << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea 3256
bb2a0f7a 3257 if (bp->port.port_stx) {
a2fbb9ea
ET
3258
3259 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3260 dmae->opcode = opcode;
bb2a0f7a
YG
3261 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3262 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3263 dmae->dst_addr_lo = bp->port.port_stx >> 2;
a2fbb9ea 3264 dmae->dst_addr_hi = 0;
bb2a0f7a
YG
3265 dmae->len = sizeof(struct host_port_stats) >> 2;
3266 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3267 dmae->comp_addr_hi = 0;
3268 dmae->comp_val = 1;
a2fbb9ea
ET
3269 }
3270
bb2a0f7a
YG
3271 if (bp->func_stx) {
3272
3273 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3274 dmae->opcode = opcode;
3275 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3276 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3277 dmae->dst_addr_lo = bp->func_stx >> 2;
3278 dmae->dst_addr_hi = 0;
3279 dmae->len = sizeof(struct host_func_stats) >> 2;
3280 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3281 dmae->comp_addr_hi = 0;
3282 dmae->comp_val = 1;
a2fbb9ea
ET
3283 }
3284
bb2a0f7a 3285 /* MAC */
a2fbb9ea
ET
3286 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3287 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3288 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3289#ifdef __BIG_ENDIAN
3290 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3291#else
3292 DMAE_CMD_ENDIANITY_DW_SWAP |
3293#endif
bb2a0f7a
YG
3294 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3295 (vn << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea 3296
c18487ee 3297 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
a2fbb9ea
ET
3298
3299 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3300 NIG_REG_INGRESS_BMAC0_MEM);
3301
3302 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3303 BIGMAC_REGISTER_TX_STAT_GTBYT */
3304 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3305 dmae->opcode = opcode;
3306 dmae->src_addr_lo = (mac_addr +
3307 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3308 dmae->src_addr_hi = 0;
3309 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3310 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3311 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3312 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3313 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3314 dmae->comp_addr_hi = 0;
3315 dmae->comp_val = 1;
3316
3317 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3318 BIGMAC_REGISTER_RX_STAT_GRIPJ */
3319 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3320 dmae->opcode = opcode;
3321 dmae->src_addr_lo = (mac_addr +
3322 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3323 dmae->src_addr_hi = 0;
3324 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3325 offsetof(struct bmac_stats, rx_stat_gr64_lo));
a2fbb9ea 3326 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3327 offsetof(struct bmac_stats, rx_stat_gr64_lo));
a2fbb9ea
ET
3328 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3329 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3330 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3331 dmae->comp_addr_hi = 0;
3332 dmae->comp_val = 1;
3333
c18487ee 3334 } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
a2fbb9ea
ET
3335
3336 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3337
3338 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3339 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3340 dmae->opcode = opcode;
3341 dmae->src_addr_lo = (mac_addr +
3342 EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3343 dmae->src_addr_hi = 0;
3344 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3345 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3346 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3347 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3348 dmae->comp_addr_hi = 0;
3349 dmae->comp_val = 1;
3350
3351 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3352 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3353 dmae->opcode = opcode;
3354 dmae->src_addr_lo = (mac_addr +
3355 EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3356 dmae->src_addr_hi = 0;
3357 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3358 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
a2fbb9ea 3359 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3360 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
a2fbb9ea
ET
3361 dmae->len = 1;
3362 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3363 dmae->comp_addr_hi = 0;
3364 dmae->comp_val = 1;
3365
3366 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3367 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3368 dmae->opcode = opcode;
3369 dmae->src_addr_lo = (mac_addr +
3370 EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3371 dmae->src_addr_hi = 0;
3372 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3373 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
a2fbb9ea 3374 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3375 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
a2fbb9ea
ET
3376 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3377 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3378 dmae->comp_addr_hi = 0;
3379 dmae->comp_val = 1;
3380 }
3381
3382 /* NIG */
bb2a0f7a
YG
3383 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3384 dmae->opcode = opcode;
3385 dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3386 NIG_REG_STAT0_BRB_DISCARD) >> 2;
3387 dmae->src_addr_hi = 0;
3388 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3389 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3390 dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3391 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3392 dmae->comp_addr_hi = 0;
3393 dmae->comp_val = 1;
3394
3395 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3396 dmae->opcode = opcode;
3397 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3398 NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3399 dmae->src_addr_hi = 0;
3400 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3401 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3402 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3403 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3404 dmae->len = (2*sizeof(u32)) >> 2;
3405 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3406 dmae->comp_addr_hi = 0;
3407 dmae->comp_val = 1;
3408
a2fbb9ea
ET
3409 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3410 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3411 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3412 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3413#ifdef __BIG_ENDIAN
3414 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3415#else
3416 DMAE_CMD_ENDIANITY_DW_SWAP |
3417#endif
bb2a0f7a
YG
3418 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3419 (vn << DMAE_CMD_E1HVN_SHIFT));
3420 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3421 NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
a2fbb9ea 3422 dmae->src_addr_hi = 0;
bb2a0f7a
YG
3423 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3424 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3425 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3426 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3427 dmae->len = (2*sizeof(u32)) >> 2;
3428 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3429 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3430 dmae->comp_val = DMAE_COMP_VAL;
3431
3432 *stats_comp = 0;
a2fbb9ea
ET
3433}
3434
bb2a0f7a 3435static void bnx2x_func_stats_init(struct bnx2x *bp)
a2fbb9ea 3436{
bb2a0f7a
YG
3437 struct dmae_command *dmae = &bp->stats_dmae;
3438 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 3439
bb2a0f7a
YG
3440 /* sanity */
3441 if (!bp->func_stx) {
3442 BNX2X_ERR("BUG!\n");
3443 return;
3444 }
a2fbb9ea 3445
bb2a0f7a
YG
3446 bp->executer_idx = 0;
3447 memset(dmae, 0, sizeof(struct dmae_command));
a2fbb9ea 3448
bb2a0f7a
YG
3449 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3450 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3451 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3452#ifdef __BIG_ENDIAN
3453 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3454#else
3455 DMAE_CMD_ENDIANITY_DW_SWAP |
3456#endif
3457 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3458 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3459 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3460 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3461 dmae->dst_addr_lo = bp->func_stx >> 2;
3462 dmae->dst_addr_hi = 0;
3463 dmae->len = sizeof(struct host_func_stats) >> 2;
3464 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3465 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3466 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 3467
bb2a0f7a
YG
3468 *stats_comp = 0;
3469}
a2fbb9ea 3470
bb2a0f7a
YG
3471static void bnx2x_stats_start(struct bnx2x *bp)
3472{
3473 if (bp->port.pmf)
3474 bnx2x_port_stats_init(bp);
3475
3476 else if (bp->func_stx)
3477 bnx2x_func_stats_init(bp);
3478
3479 bnx2x_hw_stats_post(bp);
3480 bnx2x_storm_stats_post(bp);
3481}
3482
3483static void bnx2x_stats_pmf_start(struct bnx2x *bp)
3484{
3485 bnx2x_stats_comp(bp);
3486 bnx2x_stats_pmf_update(bp);
3487 bnx2x_stats_start(bp);
3488}
3489
3490static void bnx2x_stats_restart(struct bnx2x *bp)
3491{
3492 bnx2x_stats_comp(bp);
3493 bnx2x_stats_start(bp);
3494}
3495
3496static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3497{
3498 struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3499 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3500 struct regpair diff;
3501
3502 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3503 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3504 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3505 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3506 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3507 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
66e855f3 3508 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
bb2a0f7a
YG
3509 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
3510 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffpauseframesreceived);
3511 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3512 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3513 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3514 UPDATE_STAT64(tx_stat_gt127,
3515 tx_stat_etherstatspkts65octetsto127octets);
3516 UPDATE_STAT64(tx_stat_gt255,
3517 tx_stat_etherstatspkts128octetsto255octets);
3518 UPDATE_STAT64(tx_stat_gt511,
3519 tx_stat_etherstatspkts256octetsto511octets);
3520 UPDATE_STAT64(tx_stat_gt1023,
3521 tx_stat_etherstatspkts512octetsto1023octets);
3522 UPDATE_STAT64(tx_stat_gt1518,
3523 tx_stat_etherstatspkts1024octetsto1522octets);
3524 UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3525 UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3526 UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3527 UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3528 UPDATE_STAT64(tx_stat_gterr,
3529 tx_stat_dot3statsinternalmactransmiterrors);
3530 UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
3531}
3532
3533static void bnx2x_emac_stats_update(struct bnx2x *bp)
3534{
3535 struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3536 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3537
3538 UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3539 UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3540 UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3541 UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3542 UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3543 UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3544 UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3545 UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3546 UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3547 UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3548 UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3549 UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3550 UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3551 UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3552 UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3553 UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3554 UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3555 UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3556 UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3557 UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3558 UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3559 UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3560 UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3561 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3562 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3563 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3564 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3565 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3566 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3567 UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3568 UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
3569}
3570
3571static int bnx2x_hw_stats_update(struct bnx2x *bp)
3572{
3573 struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3574 struct nig_stats *old = &(bp->port.old_nig_stats);
3575 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3576 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3577 struct regpair diff;
3578
3579 if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3580 bnx2x_bmac_stats_update(bp);
3581
3582 else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3583 bnx2x_emac_stats_update(bp);
3584
3585 else { /* unreached */
3586 BNX2X_ERR("stats updated by dmae but no MAC active\n");
3587 return -1;
3588 }
a2fbb9ea 3589
bb2a0f7a
YG
3590 ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3591 new->brb_discard - old->brb_discard);
66e855f3
YG
3592 ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3593 new->brb_truncate - old->brb_truncate);
a2fbb9ea 3594
bb2a0f7a
YG
3595 UPDATE_STAT64_NIG(egress_mac_pkt0,
3596 etherstatspkts1024octetsto1522octets);
3597 UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
a2fbb9ea 3598
bb2a0f7a 3599 memcpy(old, new, sizeof(struct nig_stats));
a2fbb9ea 3600
bb2a0f7a
YG
3601 memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3602 sizeof(struct mac_stx));
3603 estats->brb_drop_hi = pstats->brb_drop_hi;
3604 estats->brb_drop_lo = pstats->brb_drop_lo;
a2fbb9ea 3605
bb2a0f7a 3606 pstats->host_port_stats_start = ++pstats->host_port_stats_end;
a2fbb9ea 3607
bb2a0f7a 3608 return 0;
a2fbb9ea
ET
3609}
3610
bb2a0f7a 3611static int bnx2x_storm_stats_update(struct bnx2x *bp)
a2fbb9ea
ET
3612{
3613 struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
bb2a0f7a
YG
3614 int cl_id = BP_CL_ID(bp);
3615 struct tstorm_per_port_stats *tport =
3616 &stats->tstorm_common.port_statistics;
a2fbb9ea 3617 struct tstorm_per_client_stats *tclient =
bb2a0f7a 3618 &stats->tstorm_common.client_statistics[cl_id];
a2fbb9ea 3619 struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
bb2a0f7a
YG
3620 struct xstorm_per_client_stats *xclient =
3621 &stats->xstorm_common.client_statistics[cl_id];
3622 struct xstorm_per_client_stats *old_xclient = &bp->old_xclient;
3623 struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
3624 struct bnx2x_eth_stats *estats = &bp->eth_stats;
a2fbb9ea
ET
3625 u32 diff;
3626
bb2a0f7a
YG
3627 /* are storm stats valid? */
3628 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
3629 bp->stats_counter) {
3630 DP(BNX2X_MSG_STATS, "stats not updated by tstorm"
3631 " tstorm counter (%d) != stats_counter (%d)\n",
3632 tclient->stats_counter, bp->stats_counter);
a2fbb9ea
ET
3633 return -1;
3634 }
bb2a0f7a
YG
3635 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
3636 bp->stats_counter) {
3637 DP(BNX2X_MSG_STATS, "stats not updated by xstorm"
3638 " xstorm counter (%d) != stats_counter (%d)\n",
3639 xclient->stats_counter, bp->stats_counter);
a2fbb9ea
ET
3640 return -2;
3641 }
a2fbb9ea 3642
bb2a0f7a
YG
3643 fstats->total_bytes_received_hi =
3644 fstats->valid_bytes_received_hi =
a2fbb9ea 3645 le32_to_cpu(tclient->total_rcv_bytes.hi);
bb2a0f7a
YG
3646 fstats->total_bytes_received_lo =
3647 fstats->valid_bytes_received_lo =
a2fbb9ea 3648 le32_to_cpu(tclient->total_rcv_bytes.lo);
bb2a0f7a
YG
3649
3650 estats->error_bytes_received_hi =
3651 le32_to_cpu(tclient->rcv_error_bytes.hi);
3652 estats->error_bytes_received_lo =
3653 le32_to_cpu(tclient->rcv_error_bytes.lo);
3654 ADD_64(estats->error_bytes_received_hi,
3655 estats->rx_stat_ifhcinbadoctets_hi,
3656 estats->error_bytes_received_lo,
3657 estats->rx_stat_ifhcinbadoctets_lo);
3658
3659 ADD_64(fstats->total_bytes_received_hi,
3660 estats->error_bytes_received_hi,
3661 fstats->total_bytes_received_lo,
3662 estats->error_bytes_received_lo);
3663
3664 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts, total_unicast_packets_received);
a2fbb9ea 3665 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
bb2a0f7a 3666 total_multicast_packets_received);
a2fbb9ea 3667 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
bb2a0f7a
YG
3668 total_broadcast_packets_received);
3669
3670 fstats->total_bytes_transmitted_hi =
3671 le32_to_cpu(xclient->total_sent_bytes.hi);
3672 fstats->total_bytes_transmitted_lo =
3673 le32_to_cpu(xclient->total_sent_bytes.lo);
3674
3675 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
3676 total_unicast_packets_transmitted);
3677 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
3678 total_multicast_packets_transmitted);
3679 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
3680 total_broadcast_packets_transmitted);
3681
3682 memcpy(estats, &(fstats->total_bytes_received_hi),
3683 sizeof(struct host_func_stats) - 2*sizeof(u32));
3684
3685 estats->mac_filter_discard = le32_to_cpu(tport->mac_filter_discard);
3686 estats->xxoverflow_discard = le32_to_cpu(tport->xxoverflow_discard);
3687 estats->brb_truncate_discard =
3688 le32_to_cpu(tport->brb_truncate_discard);
3689 estats->mac_discard = le32_to_cpu(tport->mac_discard);
3690
3691 old_tclient->rcv_unicast_bytes.hi =
a2fbb9ea 3692 le32_to_cpu(tclient->rcv_unicast_bytes.hi);
bb2a0f7a 3693 old_tclient->rcv_unicast_bytes.lo =
a2fbb9ea 3694 le32_to_cpu(tclient->rcv_unicast_bytes.lo);
bb2a0f7a 3695 old_tclient->rcv_broadcast_bytes.hi =
a2fbb9ea 3696 le32_to_cpu(tclient->rcv_broadcast_bytes.hi);
bb2a0f7a 3697 old_tclient->rcv_broadcast_bytes.lo =
a2fbb9ea 3698 le32_to_cpu(tclient->rcv_broadcast_bytes.lo);
bb2a0f7a 3699 old_tclient->rcv_multicast_bytes.hi =
a2fbb9ea 3700 le32_to_cpu(tclient->rcv_multicast_bytes.hi);
bb2a0f7a 3701 old_tclient->rcv_multicast_bytes.lo =
a2fbb9ea 3702 le32_to_cpu(tclient->rcv_multicast_bytes.lo);
bb2a0f7a 3703 old_tclient->total_rcv_pkts = le32_to_cpu(tclient->total_rcv_pkts);
a2fbb9ea 3704
bb2a0f7a
YG
3705 old_tclient->checksum_discard = le32_to_cpu(tclient->checksum_discard);
3706 old_tclient->packets_too_big_discard =
a2fbb9ea 3707 le32_to_cpu(tclient->packets_too_big_discard);
bb2a0f7a
YG
3708 estats->no_buff_discard =
3709 old_tclient->no_buff_discard = le32_to_cpu(tclient->no_buff_discard);
3710 old_tclient->ttl0_discard = le32_to_cpu(tclient->ttl0_discard);
3711
3712 old_xclient->total_sent_pkts = le32_to_cpu(xclient->total_sent_pkts);
3713 old_xclient->unicast_bytes_sent.hi =
3714 le32_to_cpu(xclient->unicast_bytes_sent.hi);
3715 old_xclient->unicast_bytes_sent.lo =
3716 le32_to_cpu(xclient->unicast_bytes_sent.lo);
3717 old_xclient->multicast_bytes_sent.hi =
3718 le32_to_cpu(xclient->multicast_bytes_sent.hi);
3719 old_xclient->multicast_bytes_sent.lo =
3720 le32_to_cpu(xclient->multicast_bytes_sent.lo);
3721 old_xclient->broadcast_bytes_sent.hi =
3722 le32_to_cpu(xclient->broadcast_bytes_sent.hi);
3723 old_xclient->broadcast_bytes_sent.lo =
3724 le32_to_cpu(xclient->broadcast_bytes_sent.lo);
3725
3726 fstats->host_func_stats_start = ++fstats->host_func_stats_end;
a2fbb9ea
ET
3727
3728 return 0;
3729}
3730
bb2a0f7a 3731static void bnx2x_net_stats_update(struct bnx2x *bp)
a2fbb9ea 3732{
bb2a0f7a
YG
3733 struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
3734 struct bnx2x_eth_stats *estats = &bp->eth_stats;
a2fbb9ea
ET
3735 struct net_device_stats *nstats = &bp->dev->stats;
3736
3737 nstats->rx_packets =
3738 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
3739 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
3740 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
3741
3742 nstats->tx_packets =
3743 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
3744 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
3745 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
3746
bb2a0f7a 3747 nstats->rx_bytes = bnx2x_hilo(&estats->valid_bytes_received_hi);
a2fbb9ea 3748
0e39e645 3749 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
a2fbb9ea 3750
bb2a0f7a
YG
3751 nstats->rx_dropped = old_tclient->checksum_discard +
3752 estats->mac_discard;
a2fbb9ea
ET
3753 nstats->tx_dropped = 0;
3754
3755 nstats->multicast =
3756 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi);
3757
bb2a0f7a
YG
3758 nstats->collisions =
3759 estats->tx_stat_dot3statssinglecollisionframes_lo +
3760 estats->tx_stat_dot3statsmultiplecollisionframes_lo +
3761 estats->tx_stat_dot3statslatecollisions_lo +
3762 estats->tx_stat_dot3statsexcessivecollisions_lo;
a2fbb9ea 3763
bb2a0f7a
YG
3764 estats->jabber_packets_received =
3765 old_tclient->packets_too_big_discard +
3766 estats->rx_stat_dot3statsframestoolong_lo;
3767
3768 nstats->rx_length_errors =
3769 estats->rx_stat_etherstatsundersizepkts_lo +
3770 estats->jabber_packets_received;
66e855f3 3771 nstats->rx_over_errors = estats->brb_drop_lo + estats->brb_truncate_lo;
bb2a0f7a
YG
3772 nstats->rx_crc_errors = estats->rx_stat_dot3statsfcserrors_lo;
3773 nstats->rx_frame_errors = estats->rx_stat_dot3statsalignmenterrors_lo;
3774 nstats->rx_fifo_errors = old_tclient->no_buff_discard;
a2fbb9ea
ET
3775 nstats->rx_missed_errors = estats->xxoverflow_discard;
3776
3777 nstats->rx_errors = nstats->rx_length_errors +
3778 nstats->rx_over_errors +
3779 nstats->rx_crc_errors +
3780 nstats->rx_frame_errors +
0e39e645
ET
3781 nstats->rx_fifo_errors +
3782 nstats->rx_missed_errors;
a2fbb9ea 3783
bb2a0f7a
YG
3784 nstats->tx_aborted_errors =
3785 estats->tx_stat_dot3statslatecollisions_lo +
3786 estats->tx_stat_dot3statsexcessivecollisions_lo;
3787 nstats->tx_carrier_errors = estats->rx_stat_falsecarriererrors_lo;
a2fbb9ea
ET
3788 nstats->tx_fifo_errors = 0;
3789 nstats->tx_heartbeat_errors = 0;
3790 nstats->tx_window_errors = 0;
3791
3792 nstats->tx_errors = nstats->tx_aborted_errors +
3793 nstats->tx_carrier_errors;
a2fbb9ea
ET
3794}
3795
bb2a0f7a 3796static void bnx2x_stats_update(struct bnx2x *bp)
a2fbb9ea 3797{
bb2a0f7a
YG
3798 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3799 int update = 0;
a2fbb9ea 3800
bb2a0f7a
YG
3801 if (*stats_comp != DMAE_COMP_VAL)
3802 return;
3803
3804 if (bp->port.pmf)
3805 update = (bnx2x_hw_stats_update(bp) == 0);
a2fbb9ea 3806
bb2a0f7a 3807 update |= (bnx2x_storm_stats_update(bp) == 0);
a2fbb9ea 3808
bb2a0f7a
YG
3809 if (update)
3810 bnx2x_net_stats_update(bp);
a2fbb9ea 3811
bb2a0f7a
YG
3812 else {
3813 if (bp->stats_pending) {
3814 bp->stats_pending++;
3815 if (bp->stats_pending == 3) {
3816 BNX2X_ERR("stats not updated for 3 times\n");
3817 bnx2x_panic();
3818 return;
3819 }
3820 }
a2fbb9ea
ET
3821 }
3822
3823 if (bp->msglevel & NETIF_MSG_TIMER) {
bb2a0f7a
YG
3824 struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
3825 struct bnx2x_eth_stats *estats = &bp->eth_stats;
a2fbb9ea 3826 struct net_device_stats *nstats = &bp->dev->stats;
34f80b04 3827 int i;
a2fbb9ea
ET
3828
3829 printk(KERN_DEBUG "%s:\n", bp->dev->name);
3830 printk(KERN_DEBUG " tx avail (%4x) tx hc idx (%x)"
3831 " tx pkt (%lx)\n",
3832 bnx2x_tx_avail(bp->fp),
7a9b2557 3833 le16_to_cpu(*bp->fp->tx_cons_sb), nstats->tx_packets);
a2fbb9ea
ET
3834 printk(KERN_DEBUG " rx usage (%4x) rx hc idx (%x)"
3835 " rx pkt (%lx)\n",
7a9b2557
VZ
3836 (u16)(le16_to_cpu(*bp->fp->rx_cons_sb) -
3837 bp->fp->rx_comp_cons),
3838 le16_to_cpu(*bp->fp->rx_cons_sb), nstats->rx_packets);
a2fbb9ea 3839 printk(KERN_DEBUG " %s (Xoff events %u) brb drops %u\n",
6378c025 3840 netif_queue_stopped(bp->dev) ? "Xoff" : "Xon",
bb2a0f7a 3841 estats->driver_xoff, estats->brb_drop_lo);
a2fbb9ea
ET
3842 printk(KERN_DEBUG "tstats: checksum_discard %u "
3843 "packets_too_big_discard %u no_buff_discard %u "
3844 "mac_discard %u mac_filter_discard %u "
3845 "xxovrflow_discard %u brb_truncate_discard %u "
3846 "ttl0_discard %u\n",
bb2a0f7a
YG
3847 old_tclient->checksum_discard,
3848 old_tclient->packets_too_big_discard,
3849 old_tclient->no_buff_discard, estats->mac_discard,
a2fbb9ea 3850 estats->mac_filter_discard, estats->xxoverflow_discard,
bb2a0f7a
YG
3851 estats->brb_truncate_discard,
3852 old_tclient->ttl0_discard);
a2fbb9ea
ET
3853
3854 for_each_queue(bp, i) {
3855 printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
3856 bnx2x_fp(bp, i, tx_pkt),
3857 bnx2x_fp(bp, i, rx_pkt),
3858 bnx2x_fp(bp, i, rx_calls));
3859 }
3860 }
3861
bb2a0f7a
YG
3862 bnx2x_hw_stats_post(bp);
3863 bnx2x_storm_stats_post(bp);
3864}
a2fbb9ea 3865
bb2a0f7a
YG
3866static void bnx2x_port_stats_stop(struct bnx2x *bp)
3867{
3868 struct dmae_command *dmae;
3869 u32 opcode;
3870 int loader_idx = PMF_DMAE_C(bp);
3871 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 3872
bb2a0f7a 3873 bp->executer_idx = 0;
a2fbb9ea 3874
bb2a0f7a
YG
3875 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3876 DMAE_CMD_C_ENABLE |
3877 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 3878#ifdef __BIG_ENDIAN
bb2a0f7a 3879 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 3880#else
bb2a0f7a 3881 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 3882#endif
bb2a0f7a
YG
3883 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3884 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3885
3886 if (bp->port.port_stx) {
3887
3888 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3889 if (bp->func_stx)
3890 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3891 else
3892 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3893 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3894 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3895 dmae->dst_addr_lo = bp->port.port_stx >> 2;
a2fbb9ea 3896 dmae->dst_addr_hi = 0;
bb2a0f7a
YG
3897 dmae->len = sizeof(struct host_port_stats) >> 2;
3898 if (bp->func_stx) {
3899 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3900 dmae->comp_addr_hi = 0;
3901 dmae->comp_val = 1;
3902 } else {
3903 dmae->comp_addr_lo =
3904 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3905 dmae->comp_addr_hi =
3906 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3907 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 3908
bb2a0f7a
YG
3909 *stats_comp = 0;
3910 }
a2fbb9ea
ET
3911 }
3912
bb2a0f7a
YG
3913 if (bp->func_stx) {
3914
3915 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3916 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3917 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3918 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3919 dmae->dst_addr_lo = bp->func_stx >> 2;
3920 dmae->dst_addr_hi = 0;
3921 dmae->len = sizeof(struct host_func_stats) >> 2;
3922 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3923 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3924 dmae->comp_val = DMAE_COMP_VAL;
3925
3926 *stats_comp = 0;
a2fbb9ea 3927 }
bb2a0f7a
YG
3928}
3929
3930static void bnx2x_stats_stop(struct bnx2x *bp)
3931{
3932 int update = 0;
3933
3934 bnx2x_stats_comp(bp);
3935
3936 if (bp->port.pmf)
3937 update = (bnx2x_hw_stats_update(bp) == 0);
3938
3939 update |= (bnx2x_storm_stats_update(bp) == 0);
3940
3941 if (update) {
3942 bnx2x_net_stats_update(bp);
a2fbb9ea 3943
bb2a0f7a
YG
3944 if (bp->port.pmf)
3945 bnx2x_port_stats_stop(bp);
3946
3947 bnx2x_hw_stats_post(bp);
3948 bnx2x_stats_comp(bp);
a2fbb9ea
ET
3949 }
3950}
3951
bb2a0f7a
YG
3952static void bnx2x_stats_do_nothing(struct bnx2x *bp)
3953{
3954}
3955
3956static const struct {
3957 void (*action)(struct bnx2x *bp);
3958 enum bnx2x_stats_state next_state;
3959} bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
3960/* state event */
3961{
3962/* DISABLED PMF */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
3963/* LINK_UP */ {bnx2x_stats_start, STATS_STATE_ENABLED},
3964/* UPDATE */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
3965/* STOP */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
3966},
3967{
3968/* ENABLED PMF */ {bnx2x_stats_pmf_start, STATS_STATE_ENABLED},
3969/* LINK_UP */ {bnx2x_stats_restart, STATS_STATE_ENABLED},
3970/* UPDATE */ {bnx2x_stats_update, STATS_STATE_ENABLED},
3971/* STOP */ {bnx2x_stats_stop, STATS_STATE_DISABLED}
3972}
3973};
3974
3975static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
3976{
3977 enum bnx2x_stats_state state = bp->stats_state;
3978
3979 bnx2x_stats_stm[state][event].action(bp);
3980 bp->stats_state = bnx2x_stats_stm[state][event].next_state;
3981
3982 if ((event != STATS_EVENT_UPDATE) || (bp->msglevel & NETIF_MSG_TIMER))
3983 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
3984 state, event, bp->stats_state);
3985}
3986
a2fbb9ea
ET
3987static void bnx2x_timer(unsigned long data)
3988{
3989 struct bnx2x *bp = (struct bnx2x *) data;
3990
3991 if (!netif_running(bp->dev))
3992 return;
3993
3994 if (atomic_read(&bp->intr_sem) != 0)
f1410647 3995 goto timer_restart;
a2fbb9ea
ET
3996
3997 if (poll) {
3998 struct bnx2x_fastpath *fp = &bp->fp[0];
3999 int rc;
4000
4001 bnx2x_tx_int(fp, 1000);
4002 rc = bnx2x_rx_int(fp, 1000);
4003 }
4004
34f80b04
EG
4005 if (!BP_NOMCP(bp)) {
4006 int func = BP_FUNC(bp);
a2fbb9ea
ET
4007 u32 drv_pulse;
4008 u32 mcp_pulse;
4009
4010 ++bp->fw_drv_pulse_wr_seq;
4011 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
4012 /* TBD - add SYSTEM_TIME */
4013 drv_pulse = bp->fw_drv_pulse_wr_seq;
34f80b04 4014 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
a2fbb9ea 4015
34f80b04 4016 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
a2fbb9ea
ET
4017 MCP_PULSE_SEQ_MASK);
4018 /* The delta between driver pulse and mcp response
4019 * should be 1 (before mcp response) or 0 (after mcp response)
4020 */
4021 if ((drv_pulse != mcp_pulse) &&
4022 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
4023 /* someone lost a heartbeat... */
4024 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
4025 drv_pulse, mcp_pulse);
4026 }
4027 }
4028
bb2a0f7a
YG
4029 if ((bp->state == BNX2X_STATE_OPEN) ||
4030 (bp->state == BNX2X_STATE_DISABLED))
4031 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
a2fbb9ea 4032
f1410647 4033timer_restart:
a2fbb9ea
ET
4034 mod_timer(&bp->timer, jiffies + bp->current_interval);
4035}
4036
4037/* end of Statistics */
4038
4039/* nic init */
4040
4041/*
4042 * nic init service functions
4043 */
4044
34f80b04 4045static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
a2fbb9ea 4046{
34f80b04
EG
4047 int port = BP_PORT(bp);
4048
4049 bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
4050 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
35302989 4051 sizeof(struct ustorm_status_block)/4);
34f80b04
EG
4052 bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
4053 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
35302989 4054 sizeof(struct cstorm_status_block)/4);
34f80b04
EG
4055}
4056
5c862848
EG
4057static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
4058 dma_addr_t mapping, int sb_id)
34f80b04
EG
4059{
4060 int port = BP_PORT(bp);
bb2a0f7a 4061 int func = BP_FUNC(bp);
a2fbb9ea 4062 int index;
34f80b04 4063 u64 section;
a2fbb9ea
ET
4064
4065 /* USTORM */
4066 section = ((u64)mapping) + offsetof(struct host_status_block,
4067 u_status_block);
34f80b04 4068 sb->u_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4069
4070 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4071 USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
a2fbb9ea 4072 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4073 ((USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
a2fbb9ea 4074 U64_HI(section));
bb2a0f7a
YG
4075 REG_WR8(bp, BAR_USTRORM_INTMEM + FP_USB_FUNC_OFF +
4076 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
a2fbb9ea
ET
4077
4078 for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
4079 REG_WR16(bp, BAR_USTRORM_INTMEM +
34f80b04 4080 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
a2fbb9ea
ET
4081
4082 /* CSTORM */
4083 section = ((u64)mapping) + offsetof(struct host_status_block,
4084 c_status_block);
34f80b04 4085 sb->c_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4086
4087 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4088 CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
a2fbb9ea 4089 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4090 ((CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
a2fbb9ea 4091 U64_HI(section));
7a9b2557
VZ
4092 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
4093 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
a2fbb9ea
ET
4094
4095 for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4096 REG_WR16(bp, BAR_CSTRORM_INTMEM +
34f80b04
EG
4097 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4098
4099 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4100}
4101
4102static void bnx2x_zero_def_sb(struct bnx2x *bp)
4103{
4104 int func = BP_FUNC(bp);
a2fbb9ea 4105
34f80b04
EG
4106 bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
4107 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4108 sizeof(struct ustorm_def_status_block)/4);
4109 bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
4110 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4111 sizeof(struct cstorm_def_status_block)/4);
4112 bnx2x_init_fill(bp, BAR_XSTRORM_INTMEM +
4113 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4114 sizeof(struct xstorm_def_status_block)/4);
4115 bnx2x_init_fill(bp, BAR_TSTRORM_INTMEM +
4116 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4117 sizeof(struct tstorm_def_status_block)/4);
a2fbb9ea
ET
4118}
4119
4120static void bnx2x_init_def_sb(struct bnx2x *bp,
4121 struct host_def_status_block *def_sb,
34f80b04 4122 dma_addr_t mapping, int sb_id)
a2fbb9ea 4123{
34f80b04
EG
4124 int port = BP_PORT(bp);
4125 int func = BP_FUNC(bp);
a2fbb9ea
ET
4126 int index, val, reg_offset;
4127 u64 section;
4128
4129 /* ATTN */
4130 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4131 atten_status_block);
34f80b04 4132 def_sb->atten_status_block.status_block_id = sb_id;
a2fbb9ea 4133
49d66772
ET
4134 bp->attn_state = 0;
4135
a2fbb9ea
ET
4136 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4137 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4138
34f80b04 4139 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
a2fbb9ea
ET
4140 bp->attn_group[index].sig[0] = REG_RD(bp,
4141 reg_offset + 0x10*index);
4142 bp->attn_group[index].sig[1] = REG_RD(bp,
4143 reg_offset + 0x4 + 0x10*index);
4144 bp->attn_group[index].sig[2] = REG_RD(bp,
4145 reg_offset + 0x8 + 0x10*index);
4146 bp->attn_group[index].sig[3] = REG_RD(bp,
4147 reg_offset + 0xc + 0x10*index);
4148 }
4149
a2fbb9ea
ET
4150 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4151 HC_REG_ATTN_MSG0_ADDR_L);
4152
4153 REG_WR(bp, reg_offset, U64_LO(section));
4154 REG_WR(bp, reg_offset + 4, U64_HI(section));
4155
4156 reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4157
4158 val = REG_RD(bp, reg_offset);
34f80b04 4159 val |= sb_id;
a2fbb9ea
ET
4160 REG_WR(bp, reg_offset, val);
4161
4162 /* USTORM */
4163 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4164 u_def_status_block);
34f80b04 4165 def_sb->u_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4166
4167 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4168 USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4169 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4170 ((USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4171 U64_HI(section));
5c862848 4172 REG_WR8(bp, BAR_USTRORM_INTMEM + DEF_USB_FUNC_OFF +
34f80b04 4173 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4174
4175 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
4176 REG_WR16(bp, BAR_USTRORM_INTMEM +
34f80b04 4177 USTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
a2fbb9ea
ET
4178
4179 /* CSTORM */
4180 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4181 c_def_status_block);
34f80b04 4182 def_sb->c_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4183
4184 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4185 CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4186 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4187 ((CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4188 U64_HI(section));
5c862848 4189 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
34f80b04 4190 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4191
4192 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4193 REG_WR16(bp, BAR_CSTRORM_INTMEM +
34f80b04 4194 CSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
a2fbb9ea
ET
4195
4196 /* TSTORM */
4197 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4198 t_def_status_block);
34f80b04 4199 def_sb->t_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4200
4201 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4202 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4203 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4204 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4205 U64_HI(section));
5c862848 4206 REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
34f80b04 4207 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4208
4209 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4210 REG_WR16(bp, BAR_TSTRORM_INTMEM +
34f80b04 4211 TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
a2fbb9ea
ET
4212
4213 /* XSTORM */
4214 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4215 x_def_status_block);
34f80b04 4216 def_sb->x_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4217
4218 REG_WR(bp, BAR_XSTRORM_INTMEM +
34f80b04 4219 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4220 REG_WR(bp, BAR_XSTRORM_INTMEM +
34f80b04 4221 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4222 U64_HI(section));
5c862848 4223 REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
34f80b04 4224 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4225
4226 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4227 REG_WR16(bp, BAR_XSTRORM_INTMEM +
34f80b04 4228 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
49d66772 4229
bb2a0f7a 4230 bp->stats_pending = 0;
66e855f3 4231 bp->set_mac_pending = 0;
bb2a0f7a 4232
34f80b04 4233 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea
ET
4234}
4235
4236static void bnx2x_update_coalesce(struct bnx2x *bp)
4237{
34f80b04 4238 int port = BP_PORT(bp);
a2fbb9ea
ET
4239 int i;
4240
4241 for_each_queue(bp, i) {
34f80b04 4242 int sb_id = bp->fp[i].sb_id;
a2fbb9ea
ET
4243
4244 /* HC_INDEX_U_ETH_RX_CQ_CONS */
4245 REG_WR8(bp, BAR_USTRORM_INTMEM +
34f80b04 4246 USTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
5c862848 4247 U_SB_ETH_RX_CQ_INDEX),
34f80b04 4248 bp->rx_ticks/12);
a2fbb9ea 4249 REG_WR16(bp, BAR_USTRORM_INTMEM +
34f80b04 4250 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
5c862848
EG
4251 U_SB_ETH_RX_CQ_INDEX),
4252 bp->rx_ticks ? 0 : 1);
a2fbb9ea
ET
4253
4254 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4255 REG_WR8(bp, BAR_CSTRORM_INTMEM +
34f80b04 4256 CSTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
5c862848 4257 C_SB_ETH_TX_CQ_INDEX),
34f80b04 4258 bp->tx_ticks/12);
a2fbb9ea 4259 REG_WR16(bp, BAR_CSTRORM_INTMEM +
34f80b04 4260 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
5c862848 4261 C_SB_ETH_TX_CQ_INDEX),
34f80b04 4262 bp->tx_ticks ? 0 : 1);
a2fbb9ea
ET
4263 }
4264}
4265
7a9b2557
VZ
4266static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4267 struct bnx2x_fastpath *fp, int last)
4268{
4269 int i;
4270
4271 for (i = 0; i < last; i++) {
4272 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
4273 struct sk_buff *skb = rx_buf->skb;
4274
4275 if (skb == NULL) {
4276 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
4277 continue;
4278 }
4279
4280 if (fp->tpa_state[i] == BNX2X_TPA_START)
4281 pci_unmap_single(bp->pdev,
4282 pci_unmap_addr(rx_buf, mapping),
437cf2f1 4283 bp->rx_buf_size,
7a9b2557
VZ
4284 PCI_DMA_FROMDEVICE);
4285
4286 dev_kfree_skb(skb);
4287 rx_buf->skb = NULL;
4288 }
4289}
4290
a2fbb9ea
ET
4291static void bnx2x_init_rx_rings(struct bnx2x *bp)
4292{
7a9b2557 4293 int func = BP_FUNC(bp);
32626230
EG
4294 int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
4295 ETH_MAX_AGGREGATION_QUEUES_E1H;
4296 u16 ring_prod, cqe_ring_prod;
a2fbb9ea 4297 int i, j;
a2fbb9ea 4298
0f00846d
EG
4299 bp->rx_buf_size += bp->rx_offset + ETH_OVREHEAD + BNX2X_RX_ALIGN;
4300 DP(NETIF_MSG_IFUP,
4301 "mtu %d rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
a2fbb9ea 4302
7a9b2557 4303 if (bp->flags & TPA_ENABLE_FLAG) {
7a9b2557 4304
555f6c78 4305 for_each_rx_queue(bp, j) {
32626230 4306 struct bnx2x_fastpath *fp = &bp->fp[j];
7a9b2557 4307
32626230 4308 for (i = 0; i < max_agg_queues; i++) {
7a9b2557
VZ
4309 fp->tpa_pool[i].skb =
4310 netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4311 if (!fp->tpa_pool[i].skb) {
4312 BNX2X_ERR("Failed to allocate TPA "
4313 "skb pool for queue[%d] - "
4314 "disabling TPA on this "
4315 "queue!\n", j);
4316 bnx2x_free_tpa_pool(bp, fp, i);
4317 fp->disable_tpa = 1;
4318 break;
4319 }
4320 pci_unmap_addr_set((struct sw_rx_bd *)
4321 &bp->fp->tpa_pool[i],
4322 mapping, 0);
4323 fp->tpa_state[i] = BNX2X_TPA_STOP;
4324 }
4325 }
4326 }
4327
555f6c78 4328 for_each_rx_queue(bp, j) {
a2fbb9ea
ET
4329 struct bnx2x_fastpath *fp = &bp->fp[j];
4330
4331 fp->rx_bd_cons = 0;
4332 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
7a9b2557
VZ
4333 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
4334
4335 /* "next page" elements initialization */
4336 /* SGE ring */
4337 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
4338 struct eth_rx_sge *sge;
4339
4340 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
4341 sge->addr_hi =
4342 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
4343 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4344 sge->addr_lo =
4345 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
4346 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4347 }
4348
4349 bnx2x_init_sge_ring_bit_mask(fp);
a2fbb9ea 4350
7a9b2557 4351 /* RX BD ring */
a2fbb9ea
ET
4352 for (i = 1; i <= NUM_RX_RINGS; i++) {
4353 struct eth_rx_bd *rx_bd;
4354
4355 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
4356 rx_bd->addr_hi =
4357 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
34f80b04 4358 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
a2fbb9ea
ET
4359 rx_bd->addr_lo =
4360 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
34f80b04 4361 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
a2fbb9ea
ET
4362 }
4363
34f80b04 4364 /* CQ ring */
a2fbb9ea
ET
4365 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4366 struct eth_rx_cqe_next_page *nextpg;
4367
4368 nextpg = (struct eth_rx_cqe_next_page *)
4369 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4370 nextpg->addr_hi =
4371 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
34f80b04 4372 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
a2fbb9ea
ET
4373 nextpg->addr_lo =
4374 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
34f80b04 4375 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
a2fbb9ea
ET
4376 }
4377
7a9b2557
VZ
4378 /* Allocate SGEs and initialize the ring elements */
4379 for (i = 0, ring_prod = 0;
4380 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
a2fbb9ea 4381
7a9b2557
VZ
4382 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
4383 BNX2X_ERR("was only able to allocate "
4384 "%d rx sges\n", i);
4385 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
4386 /* Cleanup already allocated elements */
4387 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
32626230 4388 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
7a9b2557
VZ
4389 fp->disable_tpa = 1;
4390 ring_prod = 0;
4391 break;
4392 }
4393 ring_prod = NEXT_SGE_IDX(ring_prod);
4394 }
4395 fp->rx_sge_prod = ring_prod;
4396
4397 /* Allocate BDs and initialize BD ring */
66e855f3 4398 fp->rx_comp_cons = 0;
7a9b2557 4399 cqe_ring_prod = ring_prod = 0;
a2fbb9ea
ET
4400 for (i = 0; i < bp->rx_ring_size; i++) {
4401 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
4402 BNX2X_ERR("was only able to allocate "
4403 "%d rx skbs\n", i);
66e855f3 4404 bp->eth_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
4405 break;
4406 }
4407 ring_prod = NEXT_RX_IDX(ring_prod);
7a9b2557 4408 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
53e5e96e 4409 WARN_ON(ring_prod <= i);
a2fbb9ea
ET
4410 }
4411
7a9b2557
VZ
4412 fp->rx_bd_prod = ring_prod;
4413 /* must not have more available CQEs than BDs */
4414 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
4415 cqe_ring_prod);
a2fbb9ea
ET
4416 fp->rx_pkt = fp->rx_calls = 0;
4417
7a9b2557
VZ
4418 /* Warning!
4419 * this will generate an interrupt (to the TSTORM)
4420 * must only be done after chip is initialized
4421 */
4422 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
4423 fp->rx_sge_prod);
a2fbb9ea
ET
4424 if (j != 0)
4425 continue;
4426
4427 REG_WR(bp, BAR_USTRORM_INTMEM +
7a9b2557 4428 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
a2fbb9ea
ET
4429 U64_LO(fp->rx_comp_mapping));
4430 REG_WR(bp, BAR_USTRORM_INTMEM +
7a9b2557 4431 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
a2fbb9ea
ET
4432 U64_HI(fp->rx_comp_mapping));
4433 }
4434}
4435
4436static void bnx2x_init_tx_ring(struct bnx2x *bp)
4437{
4438 int i, j;
4439
555f6c78 4440 for_each_tx_queue(bp, j) {
a2fbb9ea
ET
4441 struct bnx2x_fastpath *fp = &bp->fp[j];
4442
4443 for (i = 1; i <= NUM_TX_RINGS; i++) {
4444 struct eth_tx_bd *tx_bd =
4445 &fp->tx_desc_ring[TX_DESC_CNT * i - 1];
4446
4447 tx_bd->addr_hi =
4448 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
34f80b04 4449 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
a2fbb9ea
ET
4450 tx_bd->addr_lo =
4451 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
34f80b04 4452 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
a2fbb9ea
ET
4453 }
4454
4455 fp->tx_pkt_prod = 0;
4456 fp->tx_pkt_cons = 0;
4457 fp->tx_bd_prod = 0;
4458 fp->tx_bd_cons = 0;
4459 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
4460 fp->tx_pkt = 0;
4461 }
4462}
4463
4464static void bnx2x_init_sp_ring(struct bnx2x *bp)
4465{
34f80b04 4466 int func = BP_FUNC(bp);
a2fbb9ea
ET
4467
4468 spin_lock_init(&bp->spq_lock);
4469
4470 bp->spq_left = MAX_SPQ_PENDING;
4471 bp->spq_prod_idx = 0;
a2fbb9ea
ET
4472 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
4473 bp->spq_prod_bd = bp->spq;
4474 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
4475
34f80b04 4476 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
a2fbb9ea 4477 U64_LO(bp->spq_mapping));
34f80b04
EG
4478 REG_WR(bp,
4479 XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
a2fbb9ea
ET
4480 U64_HI(bp->spq_mapping));
4481
34f80b04 4482 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
a2fbb9ea
ET
4483 bp->spq_prod_idx);
4484}
4485
4486static void bnx2x_init_context(struct bnx2x *bp)
4487{
4488 int i;
4489
4490 for_each_queue(bp, i) {
4491 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
4492 struct bnx2x_fastpath *fp = &bp->fp[i];
34f80b04 4493 u8 sb_id = FP_SB_ID(fp);
a2fbb9ea 4494
34f80b04
EG
4495 context->ustorm_st_context.common.sb_index_numbers =
4496 BNX2X_RX_SB_INDEX_NUM;
4497 context->ustorm_st_context.common.clientId = FP_CL_ID(fp);
4498 context->ustorm_st_context.common.status_block_id = sb_id;
4499 context->ustorm_st_context.common.flags =
4500 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT;
8d9c5f34 4501 context->ustorm_st_context.common.mc_alignment_log_size =
0f00846d 4502 BNX2X_RX_ALIGN_SHIFT;
34f80b04 4503 context->ustorm_st_context.common.bd_buff_size =
437cf2f1 4504 bp->rx_buf_size;
34f80b04 4505 context->ustorm_st_context.common.bd_page_base_hi =
a2fbb9ea 4506 U64_HI(fp->rx_desc_mapping);
34f80b04 4507 context->ustorm_st_context.common.bd_page_base_lo =
a2fbb9ea 4508 U64_LO(fp->rx_desc_mapping);
7a9b2557
VZ
4509 if (!fp->disable_tpa) {
4510 context->ustorm_st_context.common.flags |=
4511 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA |
4512 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_SGE_RING);
4513 context->ustorm_st_context.common.sge_buff_size =
8d9c5f34
EG
4514 (u16)min((u32)SGE_PAGE_SIZE*PAGES_PER_SGE,
4515 (u32)0xffff);
7a9b2557
VZ
4516 context->ustorm_st_context.common.sge_page_base_hi =
4517 U64_HI(fp->rx_sge_mapping);
4518 context->ustorm_st_context.common.sge_page_base_lo =
4519 U64_LO(fp->rx_sge_mapping);
4520 }
4521
8d9c5f34
EG
4522 context->ustorm_ag_context.cdu_usage =
4523 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4524 CDU_REGION_NUMBER_UCM_AG,
4525 ETH_CONNECTION_TYPE);
4526
4527 context->xstorm_st_context.tx_bd_page_base_hi =
4528 U64_HI(fp->tx_desc_mapping);
4529 context->xstorm_st_context.tx_bd_page_base_lo =
4530 U64_LO(fp->tx_desc_mapping);
4531 context->xstorm_st_context.db_data_addr_hi =
4532 U64_HI(fp->tx_prods_mapping);
4533 context->xstorm_st_context.db_data_addr_lo =
4534 U64_LO(fp->tx_prods_mapping);
4535 context->xstorm_st_context.statistics_data = (fp->cl_id |
4536 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
a2fbb9ea 4537 context->cstorm_st_context.sb_index_number =
5c862848 4538 C_SB_ETH_TX_CQ_INDEX;
34f80b04 4539 context->cstorm_st_context.status_block_id = sb_id;
a2fbb9ea
ET
4540
4541 context->xstorm_ag_context.cdu_reserved =
4542 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4543 CDU_REGION_NUMBER_XCM_AG,
4544 ETH_CONNECTION_TYPE);
a2fbb9ea
ET
4545 }
4546}
4547
4548static void bnx2x_init_ind_table(struct bnx2x *bp)
4549{
26c8fa4d 4550 int func = BP_FUNC(bp);
a2fbb9ea
ET
4551 int i;
4552
555f6c78 4553 if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
a2fbb9ea
ET
4554 return;
4555
555f6c78
EG
4556 DP(NETIF_MSG_IFUP,
4557 "Initializing indirection table multi_mode %d\n", bp->multi_mode);
a2fbb9ea 4558 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
34f80b04 4559 REG_WR8(bp, BAR_TSTRORM_INTMEM +
26c8fa4d 4560 TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
555f6c78 4561 BP_CL_ID(bp) + (i % bp->num_rx_queues));
a2fbb9ea
ET
4562}
4563
49d66772
ET
4564static void bnx2x_set_client_config(struct bnx2x *bp)
4565{
49d66772 4566 struct tstorm_eth_client_config tstorm_client = {0};
34f80b04
EG
4567 int port = BP_PORT(bp);
4568 int i;
49d66772 4569
e7799c5f 4570 tstorm_client.mtu = bp->dev->mtu;
66e855f3 4571 tstorm_client.statistics_counter_id = BP_CL_ID(bp);
49d66772
ET
4572 tstorm_client.config_flags =
4573 TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE;
4574#ifdef BCM_VLAN
0c6671b0 4575 if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
49d66772 4576 tstorm_client.config_flags |=
8d9c5f34 4577 TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE;
49d66772
ET
4578 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
4579 }
4580#endif
49d66772 4581
7a9b2557
VZ
4582 if (bp->flags & TPA_ENABLE_FLAG) {
4583 tstorm_client.max_sges_for_packet =
4f40f2cb 4584 SGE_PAGE_ALIGN(tstorm_client.mtu) >> SGE_PAGE_SHIFT;
7a9b2557
VZ
4585 tstorm_client.max_sges_for_packet =
4586 ((tstorm_client.max_sges_for_packet +
4587 PAGES_PER_SGE - 1) & (~(PAGES_PER_SGE - 1))) >>
4588 PAGES_PER_SGE_SHIFT;
4589
4590 tstorm_client.config_flags |=
4591 TSTORM_ETH_CLIENT_CONFIG_ENABLE_SGE_RING;
4592 }
4593
49d66772
ET
4594 for_each_queue(bp, i) {
4595 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4596 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
49d66772
ET
4597 ((u32 *)&tstorm_client)[0]);
4598 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4599 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
49d66772
ET
4600 ((u32 *)&tstorm_client)[1]);
4601 }
4602
34f80b04
EG
4603 DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
4604 ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
49d66772
ET
4605}
4606
a2fbb9ea
ET
4607static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
4608{
a2fbb9ea 4609 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
34f80b04
EG
4610 int mode = bp->rx_mode;
4611 int mask = (1 << BP_L_ID(bp));
4612 int func = BP_FUNC(bp);
a2fbb9ea
ET
4613 int i;
4614
3196a88a 4615 DP(NETIF_MSG_IFUP, "rx mode %d mask 0x%x\n", mode, mask);
a2fbb9ea
ET
4616
4617 switch (mode) {
4618 case BNX2X_RX_MODE_NONE: /* no Rx */
34f80b04
EG
4619 tstorm_mac_filter.ucast_drop_all = mask;
4620 tstorm_mac_filter.mcast_drop_all = mask;
4621 tstorm_mac_filter.bcast_drop_all = mask;
a2fbb9ea
ET
4622 break;
4623 case BNX2X_RX_MODE_NORMAL:
34f80b04 4624 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea
ET
4625 break;
4626 case BNX2X_RX_MODE_ALLMULTI:
34f80b04
EG
4627 tstorm_mac_filter.mcast_accept_all = mask;
4628 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea
ET
4629 break;
4630 case BNX2X_RX_MODE_PROMISC:
34f80b04
EG
4631 tstorm_mac_filter.ucast_accept_all = mask;
4632 tstorm_mac_filter.mcast_accept_all = mask;
4633 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea
ET
4634 break;
4635 default:
34f80b04
EG
4636 BNX2X_ERR("BAD rx mode (%d)\n", mode);
4637 break;
a2fbb9ea
ET
4638 }
4639
4640 for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
4641 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4642 TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
a2fbb9ea
ET
4643 ((u32 *)&tstorm_mac_filter)[i]);
4644
34f80b04 4645/* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
a2fbb9ea
ET
4646 ((u32 *)&tstorm_mac_filter)[i]); */
4647 }
a2fbb9ea 4648
49d66772
ET
4649 if (mode != BNX2X_RX_MODE_NONE)
4650 bnx2x_set_client_config(bp);
a2fbb9ea
ET
4651}
4652
471de716
EG
4653static void bnx2x_init_internal_common(struct bnx2x *bp)
4654{
4655 int i;
4656
3cdf1db7
YG
4657 if (bp->flags & TPA_ENABLE_FLAG) {
4658 struct tstorm_eth_tpa_exist tpa = {0};
4659
4660 tpa.tpa_exist = 1;
4661
4662 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET,
4663 ((u32 *)&tpa)[0]);
4664 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET + 4,
4665 ((u32 *)&tpa)[1]);
4666 }
4667
471de716
EG
4668 /* Zero this manually as its initialization is
4669 currently missing in the initTool */
4670 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
4671 REG_WR(bp, BAR_USTRORM_INTMEM +
4672 USTORM_AGG_DATA_OFFSET + i * 4, 0);
4673}
4674
4675static void bnx2x_init_internal_port(struct bnx2x *bp)
4676{
4677 int port = BP_PORT(bp);
4678
4679 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4680 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4681 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4682 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4683}
4684
4685static void bnx2x_init_internal_func(struct bnx2x *bp)
a2fbb9ea 4686{
a2fbb9ea
ET
4687 struct tstorm_eth_function_common_config tstorm_config = {0};
4688 struct stats_indication_flags stats_flags = {0};
34f80b04
EG
4689 int port = BP_PORT(bp);
4690 int func = BP_FUNC(bp);
4691 int i;
471de716 4692 u16 max_agg_size;
a2fbb9ea
ET
4693
4694 if (is_multi(bp)) {
555f6c78 4695 tstorm_config.config_flags = MULTI_FLAGS(bp);
a2fbb9ea
ET
4696 tstorm_config.rss_result_mask = MULTI_MASK;
4697 }
8d9c5f34
EG
4698 if (IS_E1HMF(bp))
4699 tstorm_config.config_flags |=
4700 TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM;
a2fbb9ea 4701
34f80b04
EG
4702 tstorm_config.leading_client_id = BP_L_ID(bp);
4703
a2fbb9ea 4704 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4705 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
a2fbb9ea
ET
4706 (*(u32 *)&tstorm_config));
4707
c14423fe 4708 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
a2fbb9ea
ET
4709 bnx2x_set_storm_rx_mode(bp);
4710
66e855f3
YG
4711 /* reset xstorm per client statistics */
4712 for (i = 0; i < sizeof(struct xstorm_per_client_stats) / 4; i++) {
4713 REG_WR(bp, BAR_XSTRORM_INTMEM +
4714 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, BP_CL_ID(bp)) +
4715 i*4, 0);
4716 }
4717 /* reset tstorm per client statistics */
4718 for (i = 0; i < sizeof(struct tstorm_per_client_stats) / 4; i++) {
4719 REG_WR(bp, BAR_TSTRORM_INTMEM +
4720 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, BP_CL_ID(bp)) +
4721 i*4, 0);
4722 }
4723
4724 /* Init statistics related context */
34f80b04 4725 stats_flags.collect_eth = 1;
a2fbb9ea 4726
66e855f3 4727 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 4728 ((u32 *)&stats_flags)[0]);
66e855f3 4729 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
4730 ((u32 *)&stats_flags)[1]);
4731
66e855f3 4732 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 4733 ((u32 *)&stats_flags)[0]);
66e855f3 4734 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
4735 ((u32 *)&stats_flags)[1]);
4736
66e855f3 4737 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 4738 ((u32 *)&stats_flags)[0]);
66e855f3 4739 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
4740 ((u32 *)&stats_flags)[1]);
4741
66e855f3
YG
4742 REG_WR(bp, BAR_XSTRORM_INTMEM +
4743 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4744 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4745 REG_WR(bp, BAR_XSTRORM_INTMEM +
4746 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4747 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4748
4749 REG_WR(bp, BAR_TSTRORM_INTMEM +
4750 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4751 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4752 REG_WR(bp, BAR_TSTRORM_INTMEM +
4753 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4754 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
34f80b04
EG
4755
4756 if (CHIP_IS_E1H(bp)) {
4757 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
4758 IS_E1HMF(bp));
4759 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
4760 IS_E1HMF(bp));
4761 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
4762 IS_E1HMF(bp));
4763 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
4764 IS_E1HMF(bp));
4765
7a9b2557
VZ
4766 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
4767 bp->e1hov);
34f80b04
EG
4768 }
4769
4f40f2cb
EG
4770 /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
4771 max_agg_size =
4772 min((u32)(min((u32)8, (u32)MAX_SKB_FRAGS) *
4773 SGE_PAGE_SIZE * PAGES_PER_SGE),
4774 (u32)0xffff);
555f6c78 4775 for_each_rx_queue(bp, i) {
7a9b2557 4776 struct bnx2x_fastpath *fp = &bp->fp[i];
7a9b2557
VZ
4777
4778 REG_WR(bp, BAR_USTRORM_INTMEM +
4779 USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)),
4780 U64_LO(fp->rx_comp_mapping));
4781 REG_WR(bp, BAR_USTRORM_INTMEM +
4782 USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)) + 4,
4783 U64_HI(fp->rx_comp_mapping));
4784
7a9b2557
VZ
4785 REG_WR16(bp, BAR_USTRORM_INTMEM +
4786 USTORM_MAX_AGG_SIZE_OFFSET(port, FP_CL_ID(fp)),
4787 max_agg_size);
4788 }
a2fbb9ea
ET
4789}
4790
471de716
EG
4791static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
4792{
4793 switch (load_code) {
4794 case FW_MSG_CODE_DRV_LOAD_COMMON:
4795 bnx2x_init_internal_common(bp);
4796 /* no break */
4797
4798 case FW_MSG_CODE_DRV_LOAD_PORT:
4799 bnx2x_init_internal_port(bp);
4800 /* no break */
4801
4802 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
4803 bnx2x_init_internal_func(bp);
4804 break;
4805
4806 default:
4807 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
4808 break;
4809 }
4810}
4811
4812static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
a2fbb9ea
ET
4813{
4814 int i;
4815
4816 for_each_queue(bp, i) {
4817 struct bnx2x_fastpath *fp = &bp->fp[i];
4818
34f80b04 4819 fp->bp = bp;
a2fbb9ea 4820 fp->state = BNX2X_FP_STATE_CLOSED;
a2fbb9ea 4821 fp->index = i;
34f80b04
EG
4822 fp->cl_id = BP_L_ID(bp) + i;
4823 fp->sb_id = fp->cl_id;
4824 DP(NETIF_MSG_IFUP,
4825 "bnx2x_init_sb(%p,%p) index %d cl_id %d sb %d\n",
4826 bp, fp->status_blk, i, FP_CL_ID(fp), FP_SB_ID(fp));
5c862848
EG
4827 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
4828 FP_SB_ID(fp));
4829 bnx2x_update_fpsb_idx(fp);
a2fbb9ea
ET
4830 }
4831
5c862848
EG
4832 bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
4833 DEF_SB_ID);
4834 bnx2x_update_dsb_idx(bp);
a2fbb9ea
ET
4835 bnx2x_update_coalesce(bp);
4836 bnx2x_init_rx_rings(bp);
4837 bnx2x_init_tx_ring(bp);
4838 bnx2x_init_sp_ring(bp);
4839 bnx2x_init_context(bp);
471de716 4840 bnx2x_init_internal(bp, load_code);
a2fbb9ea 4841 bnx2x_init_ind_table(bp);
0ef00459
EG
4842 bnx2x_stats_init(bp);
4843
4844 /* At this point, we are ready for interrupts */
4845 atomic_set(&bp->intr_sem, 0);
4846
4847 /* flush all before enabling interrupts */
4848 mb();
4849 mmiowb();
4850
615f8fd9 4851 bnx2x_int_enable(bp);
a2fbb9ea
ET
4852}
4853
4854/* end of nic init */
4855
4856/*
4857 * gzip service functions
4858 */
4859
4860static int bnx2x_gunzip_init(struct bnx2x *bp)
4861{
4862 bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
4863 &bp->gunzip_mapping);
4864 if (bp->gunzip_buf == NULL)
4865 goto gunzip_nomem1;
4866
4867 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
4868 if (bp->strm == NULL)
4869 goto gunzip_nomem2;
4870
4871 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
4872 GFP_KERNEL);
4873 if (bp->strm->workspace == NULL)
4874 goto gunzip_nomem3;
4875
4876 return 0;
4877
4878gunzip_nomem3:
4879 kfree(bp->strm);
4880 bp->strm = NULL;
4881
4882gunzip_nomem2:
4883 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
4884 bp->gunzip_mapping);
4885 bp->gunzip_buf = NULL;
4886
4887gunzip_nomem1:
4888 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
34f80b04 4889 " un-compression\n", bp->dev->name);
a2fbb9ea
ET
4890 return -ENOMEM;
4891}
4892
4893static void bnx2x_gunzip_end(struct bnx2x *bp)
4894{
4895 kfree(bp->strm->workspace);
4896
4897 kfree(bp->strm);
4898 bp->strm = NULL;
4899
4900 if (bp->gunzip_buf) {
4901 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
4902 bp->gunzip_mapping);
4903 bp->gunzip_buf = NULL;
4904 }
4905}
4906
4907static int bnx2x_gunzip(struct bnx2x *bp, u8 *zbuf, int len)
4908{
4909 int n, rc;
4910
4911 /* check gzip header */
4912 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
4913 return -EINVAL;
4914
4915 n = 10;
4916
34f80b04 4917#define FNAME 0x8
a2fbb9ea
ET
4918
4919 if (zbuf[3] & FNAME)
4920 while ((zbuf[n++] != 0) && (n < len));
4921
4922 bp->strm->next_in = zbuf + n;
4923 bp->strm->avail_in = len - n;
4924 bp->strm->next_out = bp->gunzip_buf;
4925 bp->strm->avail_out = FW_BUF_SIZE;
4926
4927 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
4928 if (rc != Z_OK)
4929 return rc;
4930
4931 rc = zlib_inflate(bp->strm, Z_FINISH);
4932 if ((rc != Z_OK) && (rc != Z_STREAM_END))
4933 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
4934 bp->dev->name, bp->strm->msg);
4935
4936 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
4937 if (bp->gunzip_outlen & 0x3)
4938 printk(KERN_ERR PFX "%s: Firmware decompression error:"
4939 " gunzip_outlen (%d) not aligned\n",
4940 bp->dev->name, bp->gunzip_outlen);
4941 bp->gunzip_outlen >>= 2;
4942
4943 zlib_inflateEnd(bp->strm);
4944
4945 if (rc == Z_STREAM_END)
4946 return 0;
4947
4948 return rc;
4949}
4950
4951/* nic load/unload */
4952
4953/*
34f80b04 4954 * General service functions
a2fbb9ea
ET
4955 */
4956
4957/* send a NIG loopback debug packet */
4958static void bnx2x_lb_pckt(struct bnx2x *bp)
4959{
a2fbb9ea 4960 u32 wb_write[3];
a2fbb9ea
ET
4961
4962 /* Ethernet source and destination addresses */
a2fbb9ea
ET
4963 wb_write[0] = 0x55555555;
4964 wb_write[1] = 0x55555555;
34f80b04 4965 wb_write[2] = 0x20; /* SOP */
a2fbb9ea 4966 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
4967
4968 /* NON-IP protocol */
a2fbb9ea
ET
4969 wb_write[0] = 0x09000000;
4970 wb_write[1] = 0x55555555;
34f80b04 4971 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
a2fbb9ea 4972 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
4973}
4974
4975/* some of the internal memories
4976 * are not directly readable from the driver
4977 * to test them we send debug packets
4978 */
4979static int bnx2x_int_mem_test(struct bnx2x *bp)
4980{
4981 int factor;
4982 int count, i;
4983 u32 val = 0;
4984
ad8d3948 4985 if (CHIP_REV_IS_FPGA(bp))
a2fbb9ea 4986 factor = 120;
ad8d3948
EG
4987 else if (CHIP_REV_IS_EMUL(bp))
4988 factor = 200;
4989 else
a2fbb9ea 4990 factor = 1;
a2fbb9ea
ET
4991
4992 DP(NETIF_MSG_HW, "start part1\n");
4993
4994 /* Disable inputs of parser neighbor blocks */
4995 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4996 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4997 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3196a88a 4998 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
a2fbb9ea
ET
4999
5000 /* Write 0 to parser credits for CFC search request */
5001 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5002
5003 /* send Ethernet packet */
5004 bnx2x_lb_pckt(bp);
5005
5006 /* TODO do i reset NIG statistic? */
5007 /* Wait until NIG register shows 1 packet of size 0x10 */
5008 count = 1000 * factor;
5009 while (count) {
34f80b04 5010
a2fbb9ea
ET
5011 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5012 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
5013 if (val == 0x10)
5014 break;
5015
5016 msleep(10);
5017 count--;
5018 }
5019 if (val != 0x10) {
5020 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5021 return -1;
5022 }
5023
5024 /* Wait until PRS register shows 1 packet */
5025 count = 1000 * factor;
5026 while (count) {
5027 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
a2fbb9ea
ET
5028 if (val == 1)
5029 break;
5030
5031 msleep(10);
5032 count--;
5033 }
5034 if (val != 0x1) {
5035 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5036 return -2;
5037 }
5038
5039 /* Reset and init BRB, PRS */
34f80b04 5040 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
a2fbb9ea 5041 msleep(50);
34f80b04 5042 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
a2fbb9ea
ET
5043 msleep(50);
5044 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5045 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5046
5047 DP(NETIF_MSG_HW, "part2\n");
5048
5049 /* Disable inputs of parser neighbor blocks */
5050 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5051 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5052 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3196a88a 5053 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
a2fbb9ea
ET
5054
5055 /* Write 0 to parser credits for CFC search request */
5056 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5057
5058 /* send 10 Ethernet packets */
5059 for (i = 0; i < 10; i++)
5060 bnx2x_lb_pckt(bp);
5061
5062 /* Wait until NIG register shows 10 + 1
5063 packets of size 11*0x10 = 0xb0 */
5064 count = 1000 * factor;
5065 while (count) {
34f80b04 5066
a2fbb9ea
ET
5067 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5068 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
5069 if (val == 0xb0)
5070 break;
5071
5072 msleep(10);
5073 count--;
5074 }
5075 if (val != 0xb0) {
5076 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5077 return -3;
5078 }
5079
5080 /* Wait until PRS register shows 2 packets */
5081 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5082 if (val != 2)
5083 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5084
5085 /* Write 1 to parser credits for CFC search request */
5086 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5087
5088 /* Wait until PRS register shows 3 packets */
5089 msleep(10 * factor);
5090 /* Wait until NIG register shows 1 packet of size 0x10 */
5091 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5092 if (val != 3)
5093 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5094
5095 /* clear NIG EOP FIFO */
5096 for (i = 0; i < 11; i++)
5097 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5098 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5099 if (val != 1) {
5100 BNX2X_ERR("clear of NIG failed\n");
5101 return -4;
5102 }
5103
5104 /* Reset and init BRB, PRS, NIG */
5105 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5106 msleep(50);
5107 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5108 msleep(50);
5109 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5110 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5111#ifndef BCM_ISCSI
5112 /* set NIC mode */
5113 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5114#endif
5115
5116 /* Enable inputs of parser neighbor blocks */
5117 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5118 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5119 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
3196a88a 5120 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
a2fbb9ea
ET
5121
5122 DP(NETIF_MSG_HW, "done\n");
5123
5124 return 0; /* OK */
5125}
5126
5127static void enable_blocks_attention(struct bnx2x *bp)
5128{
5129 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5130 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5131 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5132 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5133 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5134 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5135 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5136 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5137 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
34f80b04
EG
5138/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5139/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5140 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5141 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5142 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
34f80b04
EG
5143/* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5144/* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5145 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5146 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5147 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5148 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
34f80b04
EG
5149/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5150/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5151 if (CHIP_REV_IS_FPGA(bp))
5152 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
5153 else
5154 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
a2fbb9ea
ET
5155 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5156 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5157 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
34f80b04
EG
5158/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5159/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5160 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5161 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
34f80b04
EG
5162/* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5163 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
a2fbb9ea
ET
5164}
5165
34f80b04 5166
81f75bbf
EG
5167static void bnx2x_reset_common(struct bnx2x *bp)
5168{
5169 /* reset_common */
5170 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5171 0xd3ffff7f);
5172 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
5173}
5174
34f80b04 5175static int bnx2x_init_common(struct bnx2x *bp)
a2fbb9ea 5176{
a2fbb9ea 5177 u32 val, i;
a2fbb9ea 5178
34f80b04 5179 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp));
a2fbb9ea 5180
81f75bbf 5181 bnx2x_reset_common(bp);
34f80b04
EG
5182 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
5183 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
a2fbb9ea 5184
34f80b04
EG
5185 bnx2x_init_block(bp, MISC_COMMON_START, MISC_COMMON_END);
5186 if (CHIP_IS_E1H(bp))
5187 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
a2fbb9ea 5188
34f80b04
EG
5189 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
5190 msleep(30);
5191 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
a2fbb9ea 5192
34f80b04
EG
5193 bnx2x_init_block(bp, PXP_COMMON_START, PXP_COMMON_END);
5194 if (CHIP_IS_E1(bp)) {
5195 /* enable HW interrupt from PXP on USDM overflow
5196 bit 16 on INT_MASK_0 */
5197 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5198 }
a2fbb9ea 5199
34f80b04
EG
5200 bnx2x_init_block(bp, PXP2_COMMON_START, PXP2_COMMON_END);
5201 bnx2x_init_pxp(bp);
a2fbb9ea
ET
5202
5203#ifdef __BIG_ENDIAN
34f80b04
EG
5204 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
5205 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
5206 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
5207 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
5208 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
8badd27a
EG
5209 /* make sure this value is 0 */
5210 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
34f80b04
EG
5211
5212/* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
5213 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
5214 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
5215 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
5216 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
a2fbb9ea
ET
5217#endif
5218
34f80b04 5219 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
a2fbb9ea 5220#ifdef BCM_ISCSI
34f80b04
EG
5221 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
5222 REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
5223 REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
a2fbb9ea
ET
5224#endif
5225
34f80b04
EG
5226 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
5227 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
a2fbb9ea 5228
34f80b04
EG
5229 /* let the HW do it's magic ... */
5230 msleep(100);
5231 /* finish PXP init */
5232 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
5233 if (val != 1) {
5234 BNX2X_ERR("PXP2 CFG failed\n");
5235 return -EBUSY;
5236 }
5237 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
5238 if (val != 1) {
5239 BNX2X_ERR("PXP2 RD_INIT failed\n");
5240 return -EBUSY;
5241 }
a2fbb9ea 5242
34f80b04
EG
5243 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
5244 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
a2fbb9ea 5245
34f80b04 5246 bnx2x_init_block(bp, DMAE_COMMON_START, DMAE_COMMON_END);
a2fbb9ea 5247
34f80b04
EG
5248 /* clean the DMAE memory */
5249 bp->dmae_ready = 1;
5250 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
a2fbb9ea 5251
34f80b04
EG
5252 bnx2x_init_block(bp, TCM_COMMON_START, TCM_COMMON_END);
5253 bnx2x_init_block(bp, UCM_COMMON_START, UCM_COMMON_END);
5254 bnx2x_init_block(bp, CCM_COMMON_START, CCM_COMMON_END);
5255 bnx2x_init_block(bp, XCM_COMMON_START, XCM_COMMON_END);
a2fbb9ea 5256
34f80b04
EG
5257 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
5258 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
5259 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
5260 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
5261
5262 bnx2x_init_block(bp, QM_COMMON_START, QM_COMMON_END);
5263 /* soft reset pulse */
5264 REG_WR(bp, QM_REG_SOFT_RESET, 1);
5265 REG_WR(bp, QM_REG_SOFT_RESET, 0);
a2fbb9ea
ET
5266
5267#ifdef BCM_ISCSI
34f80b04 5268 bnx2x_init_block(bp, TIMERS_COMMON_START, TIMERS_COMMON_END);
a2fbb9ea 5269#endif
a2fbb9ea 5270
34f80b04
EG
5271 bnx2x_init_block(bp, DQ_COMMON_START, DQ_COMMON_END);
5272 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
5273 if (!CHIP_REV_IS_SLOW(bp)) {
5274 /* enable hw interrupt from doorbell Q */
5275 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5276 }
a2fbb9ea 5277
34f80b04
EG
5278 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5279 if (CHIP_REV_IS_SLOW(bp)) {
5280 /* fix for emulation and FPGA for no pause */
5281 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0, 513);
5282 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_1, 513);
5283 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0, 0);
5284 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_1, 0);
5285 }
a2fbb9ea 5286
34f80b04 5287 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
26c8fa4d 5288 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
3196a88a
EG
5289 /* set NIC mode */
5290 REG_WR(bp, PRS_REG_NIC_MODE, 1);
34f80b04
EG
5291 if (CHIP_IS_E1H(bp))
5292 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
a2fbb9ea 5293
34f80b04
EG
5294 bnx2x_init_block(bp, TSDM_COMMON_START, TSDM_COMMON_END);
5295 bnx2x_init_block(bp, CSDM_COMMON_START, CSDM_COMMON_END);
5296 bnx2x_init_block(bp, USDM_COMMON_START, USDM_COMMON_END);
5297 bnx2x_init_block(bp, XSDM_COMMON_START, XSDM_COMMON_END);
a2fbb9ea 5298
34f80b04
EG
5299 if (CHIP_IS_E1H(bp)) {
5300 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5301 STORM_INTMEM_SIZE_E1H/2);
5302 bnx2x_init_fill(bp,
5303 TSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5304 0, STORM_INTMEM_SIZE_E1H/2);
5305 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5306 STORM_INTMEM_SIZE_E1H/2);
5307 bnx2x_init_fill(bp,
5308 CSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5309 0, STORM_INTMEM_SIZE_E1H/2);
5310 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5311 STORM_INTMEM_SIZE_E1H/2);
5312 bnx2x_init_fill(bp,
5313 XSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5314 0, STORM_INTMEM_SIZE_E1H/2);
5315 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5316 STORM_INTMEM_SIZE_E1H/2);
5317 bnx2x_init_fill(bp,
5318 USTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5319 0, STORM_INTMEM_SIZE_E1H/2);
5320 } else { /* E1 */
ad8d3948
EG
5321 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5322 STORM_INTMEM_SIZE_E1);
5323 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5324 STORM_INTMEM_SIZE_E1);
5325 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5326 STORM_INTMEM_SIZE_E1);
5327 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5328 STORM_INTMEM_SIZE_E1);
34f80b04 5329 }
a2fbb9ea 5330
34f80b04
EG
5331 bnx2x_init_block(bp, TSEM_COMMON_START, TSEM_COMMON_END);
5332 bnx2x_init_block(bp, USEM_COMMON_START, USEM_COMMON_END);
5333 bnx2x_init_block(bp, CSEM_COMMON_START, CSEM_COMMON_END);
5334 bnx2x_init_block(bp, XSEM_COMMON_START, XSEM_COMMON_END);
a2fbb9ea 5335
34f80b04
EG
5336 /* sync semi rtc */
5337 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5338 0x80000000);
5339 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
5340 0x80000000);
a2fbb9ea 5341
34f80b04
EG
5342 bnx2x_init_block(bp, UPB_COMMON_START, UPB_COMMON_END);
5343 bnx2x_init_block(bp, XPB_COMMON_START, XPB_COMMON_END);
5344 bnx2x_init_block(bp, PBF_COMMON_START, PBF_COMMON_END);
a2fbb9ea 5345
34f80b04
EG
5346 REG_WR(bp, SRC_REG_SOFT_RST, 1);
5347 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
5348 REG_WR(bp, i, 0xc0cac01a);
5349 /* TODO: replace with something meaningful */
5350 }
8d9c5f34 5351 bnx2x_init_block(bp, SRCH_COMMON_START, SRCH_COMMON_END);
34f80b04 5352 REG_WR(bp, SRC_REG_SOFT_RST, 0);
a2fbb9ea 5353
34f80b04
EG
5354 if (sizeof(union cdu_context) != 1024)
5355 /* we currently assume that a context is 1024 bytes */
5356 printk(KERN_ALERT PFX "please adjust the size of"
5357 " cdu_context(%ld)\n", (long)sizeof(union cdu_context));
a2fbb9ea 5358
34f80b04
EG
5359 bnx2x_init_block(bp, CDU_COMMON_START, CDU_COMMON_END);
5360 val = (4 << 24) + (0 << 12) + 1024;
5361 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
5362 if (CHIP_IS_E1(bp)) {
5363 /* !!! fix pxp client crdit until excel update */
5364 REG_WR(bp, CDU_REG_CDU_DEBUG, 0x264);
5365 REG_WR(bp, CDU_REG_CDU_DEBUG, 0);
5366 }
a2fbb9ea 5367
34f80b04
EG
5368 bnx2x_init_block(bp, CFC_COMMON_START, CFC_COMMON_END);
5369 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
8d9c5f34
EG
5370 /* enable context validation interrupt from CFC */
5371 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5372
5373 /* set the thresholds to prevent CFC/CDU race */
5374 REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
a2fbb9ea 5375
34f80b04
EG
5376 bnx2x_init_block(bp, HC_COMMON_START, HC_COMMON_END);
5377 bnx2x_init_block(bp, MISC_AEU_COMMON_START, MISC_AEU_COMMON_END);
a2fbb9ea 5378
34f80b04
EG
5379 /* PXPCS COMMON comes here */
5380 /* Reset PCIE errors for debug */
5381 REG_WR(bp, 0x2814, 0xffffffff);
5382 REG_WR(bp, 0x3820, 0xffffffff);
a2fbb9ea 5383
34f80b04
EG
5384 /* EMAC0 COMMON comes here */
5385 /* EMAC1 COMMON comes here */
5386 /* DBU COMMON comes here */
5387 /* DBG COMMON comes here */
5388
5389 bnx2x_init_block(bp, NIG_COMMON_START, NIG_COMMON_END);
5390 if (CHIP_IS_E1H(bp)) {
5391 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
5392 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
5393 }
5394
5395 if (CHIP_REV_IS_SLOW(bp))
5396 msleep(200);
5397
5398 /* finish CFC init */
5399 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
5400 if (val != 1) {
5401 BNX2X_ERR("CFC LL_INIT failed\n");
5402 return -EBUSY;
5403 }
5404 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
5405 if (val != 1) {
5406 BNX2X_ERR("CFC AC_INIT failed\n");
5407 return -EBUSY;
5408 }
5409 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
5410 if (val != 1) {
5411 BNX2X_ERR("CFC CAM_INIT failed\n");
5412 return -EBUSY;
5413 }
5414 REG_WR(bp, CFC_REG_DEBUG0, 0);
f1410647 5415
34f80b04
EG
5416 /* read NIG statistic
5417 to see if this is our first up since powerup */
5418 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5419 val = *bnx2x_sp(bp, wb_data[0]);
5420
5421 /* do internal memory self test */
5422 if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
5423 BNX2X_ERR("internal mem self test failed\n");
5424 return -EBUSY;
5425 }
5426
5427 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
7add905f 5428 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1021G:
34f80b04
EG
5429 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
5430 /* Fan failure is indicated by SPIO 5 */
5431 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
5432 MISC_REGISTERS_SPIO_INPUT_HI_Z);
5433
5434 /* set to active low mode */
5435 val = REG_RD(bp, MISC_REG_SPIO_INT);
5436 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
f1410647 5437 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
34f80b04 5438 REG_WR(bp, MISC_REG_SPIO_INT, val);
f1410647 5439
34f80b04
EG
5440 /* enable interrupt to signal the IGU */
5441 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
5442 val |= (1 << MISC_REGISTERS_SPIO_5);
5443 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
5444 break;
f1410647 5445
34f80b04
EG
5446 default:
5447 break;
5448 }
f1410647 5449
34f80b04
EG
5450 /* clear PXP2 attentions */
5451 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
a2fbb9ea 5452
34f80b04 5453 enable_blocks_attention(bp);
a2fbb9ea 5454
6bbca910
YR
5455 if (!BP_NOMCP(bp)) {
5456 bnx2x_acquire_phy_lock(bp);
5457 bnx2x_common_init_phy(bp, bp->common.shmem_base);
5458 bnx2x_release_phy_lock(bp);
5459 } else
5460 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
5461
34f80b04
EG
5462 return 0;
5463}
a2fbb9ea 5464
34f80b04
EG
5465static int bnx2x_init_port(struct bnx2x *bp)
5466{
5467 int port = BP_PORT(bp);
5468 u32 val;
a2fbb9ea 5469
34f80b04
EG
5470 DP(BNX2X_MSG_MCP, "starting port init port %x\n", port);
5471
5472 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
a2fbb9ea
ET
5473
5474 /* Port PXP comes here */
5475 /* Port PXP2 comes here */
a2fbb9ea
ET
5476#ifdef BCM_ISCSI
5477 /* Port0 1
5478 * Port1 385 */
5479 i++;
5480 wb_write[0] = ONCHIP_ADDR1(bp->timers_mapping);
5481 wb_write[1] = ONCHIP_ADDR2(bp->timers_mapping);
5482 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5483 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
5484
5485 /* Port0 2
5486 * Port1 386 */
5487 i++;
5488 wb_write[0] = ONCHIP_ADDR1(bp->qm_mapping);
5489 wb_write[1] = ONCHIP_ADDR2(bp->qm_mapping);
5490 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5491 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
5492
5493 /* Port0 3
5494 * Port1 387 */
5495 i++;
5496 wb_write[0] = ONCHIP_ADDR1(bp->t1_mapping);
5497 wb_write[1] = ONCHIP_ADDR2(bp->t1_mapping);
5498 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5499 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
5500#endif
34f80b04 5501 /* Port CMs come here */
8d9c5f34
EG
5502 bnx2x_init_block(bp, (port ? XCM_PORT1_START : XCM_PORT0_START),
5503 (port ? XCM_PORT1_END : XCM_PORT0_END));
a2fbb9ea
ET
5504
5505 /* Port QM comes here */
a2fbb9ea
ET
5506#ifdef BCM_ISCSI
5507 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + func*4, 1024/64*20);
5508 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + func*4, 31);
5509
5510 bnx2x_init_block(bp, func ? TIMERS_PORT1_START : TIMERS_PORT0_START,
5511 func ? TIMERS_PORT1_END : TIMERS_PORT0_END);
5512#endif
5513 /* Port DQ comes here */
5514 /* Port BRB1 comes here */
ad8d3948 5515 /* Port PRS comes here */
a2fbb9ea
ET
5516 /* Port TSDM comes here */
5517 /* Port CSDM comes here */
5518 /* Port USDM comes here */
5519 /* Port XSDM comes here */
34f80b04
EG
5520 bnx2x_init_block(bp, port ? TSEM_PORT1_START : TSEM_PORT0_START,
5521 port ? TSEM_PORT1_END : TSEM_PORT0_END);
5522 bnx2x_init_block(bp, port ? USEM_PORT1_START : USEM_PORT0_START,
5523 port ? USEM_PORT1_END : USEM_PORT0_END);
5524 bnx2x_init_block(bp, port ? CSEM_PORT1_START : CSEM_PORT0_START,
5525 port ? CSEM_PORT1_END : CSEM_PORT0_END);
5526 bnx2x_init_block(bp, port ? XSEM_PORT1_START : XSEM_PORT0_START,
5527 port ? XSEM_PORT1_END : XSEM_PORT0_END);
a2fbb9ea 5528 /* Port UPB comes here */
34f80b04
EG
5529 /* Port XPB comes here */
5530
5531 bnx2x_init_block(bp, port ? PBF_PORT1_START : PBF_PORT0_START,
5532 port ? PBF_PORT1_END : PBF_PORT0_END);
a2fbb9ea
ET
5533
5534 /* configure PBF to work without PAUSE mtu 9000 */
34f80b04 5535 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
a2fbb9ea
ET
5536
5537 /* update threshold */
34f80b04 5538 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
a2fbb9ea 5539 /* update init credit */
34f80b04 5540 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
a2fbb9ea
ET
5541
5542 /* probe changes */
34f80b04 5543 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
a2fbb9ea 5544 msleep(5);
34f80b04 5545 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
a2fbb9ea
ET
5546
5547#ifdef BCM_ISCSI
5548 /* tell the searcher where the T2 table is */
5549 REG_WR(bp, SRC_REG_COUNTFREE0 + func*4, 16*1024/64);
5550
5551 wb_write[0] = U64_LO(bp->t2_mapping);
5552 wb_write[1] = U64_HI(bp->t2_mapping);
5553 REG_WR_DMAE(bp, SRC_REG_FIRSTFREE0 + func*4, wb_write, 2);
5554 wb_write[0] = U64_LO((u64)bp->t2_mapping + 16*1024 - 64);
5555 wb_write[1] = U64_HI((u64)bp->t2_mapping + 16*1024 - 64);
5556 REG_WR_DMAE(bp, SRC_REG_LASTFREE0 + func*4, wb_write, 2);
5557
5558 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + func*4, 10);
5559 /* Port SRCH comes here */
5560#endif
5561 /* Port CDU comes here */
5562 /* Port CFC comes here */
34f80b04
EG
5563
5564 if (CHIP_IS_E1(bp)) {
5565 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5566 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5567 }
5568 bnx2x_init_block(bp, port ? HC_PORT1_START : HC_PORT0_START,
5569 port ? HC_PORT1_END : HC_PORT0_END);
5570
5571 bnx2x_init_block(bp, port ? MISC_AEU_PORT1_START :
a2fbb9ea 5572 MISC_AEU_PORT0_START,
34f80b04
EG
5573 port ? MISC_AEU_PORT1_END : MISC_AEU_PORT0_END);
5574 /* init aeu_mask_attn_func_0/1:
5575 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
5576 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
5577 * bits 4-7 are used for "per vn group attention" */
5578 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
5579 (IS_E1HMF(bp) ? 0xF7 : 0x7));
5580
a2fbb9ea
ET
5581 /* Port PXPCS comes here */
5582 /* Port EMAC0 comes here */
5583 /* Port EMAC1 comes here */
5584 /* Port DBU comes here */
5585 /* Port DBG comes here */
34f80b04
EG
5586 bnx2x_init_block(bp, port ? NIG_PORT1_START : NIG_PORT0_START,
5587 port ? NIG_PORT1_END : NIG_PORT0_END);
5588
5589 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
5590
5591 if (CHIP_IS_E1H(bp)) {
5592 u32 wsum;
5593 struct cmng_struct_per_port m_cmng_port;
5594 int vn;
5595
5596 /* 0x2 disable e1hov, 0x1 enable */
5597 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
5598 (IS_E1HMF(bp) ? 0x1 : 0x2));
5599
5600 /* Init RATE SHAPING and FAIRNESS contexts.
5601 Initialize as if there is 10G link. */
5602 wsum = bnx2x_calc_vn_wsum(bp);
5603 bnx2x_init_port_minmax(bp, (int)wsum, 10000, &m_cmng_port);
5604 if (IS_E1HMF(bp))
5605 for (vn = VN_0; vn < E1HVN_MAX; vn++)
5606 bnx2x_init_vn_minmax(bp, 2*vn + port,
5607 wsum, 10000, &m_cmng_port);
5608 }
5609
a2fbb9ea
ET
5610 /* Port MCP comes here */
5611 /* Port DMAE comes here */
5612
34f80b04 5613 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
7add905f 5614 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1021G:
f1410647
ET
5615 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
5616 /* add SPIO 5 to group 0 */
5617 val = REG_RD(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
5618 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
5619 REG_WR(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0, val);
5620 break;
5621
5622 default:
5623 break;
5624 }
5625
c18487ee 5626 bnx2x__link_reset(bp);
a2fbb9ea 5627
34f80b04
EG
5628 return 0;
5629}
5630
5631#define ILT_PER_FUNC (768/2)
5632#define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
5633/* the phys address is shifted right 12 bits and has an added
5634 1=valid bit added to the 53rd bit
5635 then since this is a wide register(TM)
5636 we split it into two 32 bit writes
5637 */
5638#define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
5639#define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
5640#define PXP_ONE_ILT(x) (((x) << 10) | x)
5641#define PXP_ILT_RANGE(f, l) (((l) << 10) | f)
5642
5643#define CNIC_ILT_LINES 0
5644
5645static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
5646{
5647 int reg;
5648
5649 if (CHIP_IS_E1H(bp))
5650 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
5651 else /* E1 */
5652 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
5653
5654 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
5655}
5656
5657static int bnx2x_init_func(struct bnx2x *bp)
5658{
5659 int port = BP_PORT(bp);
5660 int func = BP_FUNC(bp);
8badd27a 5661 u32 addr, val;
34f80b04
EG
5662 int i;
5663
5664 DP(BNX2X_MSG_MCP, "starting func init func %x\n", func);
5665
8badd27a
EG
5666 /* set MSI reconfigure capability */
5667 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
5668 val = REG_RD(bp, addr);
5669 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
5670 REG_WR(bp, addr, val);
5671
34f80b04
EG
5672 i = FUNC_ILT_BASE(func);
5673
5674 bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
5675 if (CHIP_IS_E1H(bp)) {
5676 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
5677 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
5678 } else /* E1 */
5679 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
5680 PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
5681
5682
5683 if (CHIP_IS_E1H(bp)) {
5684 for (i = 0; i < 9; i++)
5685 bnx2x_init_block(bp,
5686 cm_start[func][i], cm_end[func][i]);
5687
5688 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
5689 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
5690 }
5691
5692 /* HC init per function */
5693 if (CHIP_IS_E1H(bp)) {
5694 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
5695
5696 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5697 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5698 }
5699 bnx2x_init_block(bp, hc_limits[func][0], hc_limits[func][1]);
5700
c14423fe 5701 /* Reset PCIE errors for debug */
a2fbb9ea
ET
5702 REG_WR(bp, 0x2114, 0xffffffff);
5703 REG_WR(bp, 0x2120, 0xffffffff);
a2fbb9ea 5704
34f80b04
EG
5705 return 0;
5706}
5707
5708static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
5709{
5710 int i, rc = 0;
a2fbb9ea 5711
34f80b04
EG
5712 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
5713 BP_FUNC(bp), load_code);
a2fbb9ea 5714
34f80b04
EG
5715 bp->dmae_ready = 0;
5716 mutex_init(&bp->dmae_mutex);
5717 bnx2x_gunzip_init(bp);
a2fbb9ea 5718
34f80b04
EG
5719 switch (load_code) {
5720 case FW_MSG_CODE_DRV_LOAD_COMMON:
5721 rc = bnx2x_init_common(bp);
5722 if (rc)
5723 goto init_hw_err;
5724 /* no break */
5725
5726 case FW_MSG_CODE_DRV_LOAD_PORT:
5727 bp->dmae_ready = 1;
5728 rc = bnx2x_init_port(bp);
5729 if (rc)
5730 goto init_hw_err;
5731 /* no break */
5732
5733 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5734 bp->dmae_ready = 1;
5735 rc = bnx2x_init_func(bp);
5736 if (rc)
5737 goto init_hw_err;
5738 break;
5739
5740 default:
5741 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5742 break;
5743 }
5744
5745 if (!BP_NOMCP(bp)) {
5746 int func = BP_FUNC(bp);
a2fbb9ea
ET
5747
5748 bp->fw_drv_pulse_wr_seq =
34f80b04 5749 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
a2fbb9ea 5750 DRV_PULSE_SEQ_MASK);
34f80b04
EG
5751 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
5752 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x func_stx 0x%x\n",
5753 bp->fw_drv_pulse_wr_seq, bp->func_stx);
5754 } else
5755 bp->func_stx = 0;
a2fbb9ea 5756
34f80b04
EG
5757 /* this needs to be done before gunzip end */
5758 bnx2x_zero_def_sb(bp);
5759 for_each_queue(bp, i)
5760 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
5761
5762init_hw_err:
5763 bnx2x_gunzip_end(bp);
5764
5765 return rc;
a2fbb9ea
ET
5766}
5767
c14423fe 5768/* send the MCP a request, block until there is a reply */
a2fbb9ea
ET
5769static u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
5770{
34f80b04 5771 int func = BP_FUNC(bp);
f1410647
ET
5772 u32 seq = ++bp->fw_seq;
5773 u32 rc = 0;
19680c48
EG
5774 u32 cnt = 1;
5775 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
a2fbb9ea 5776
34f80b04 5777 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
f1410647 5778 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
a2fbb9ea 5779
19680c48
EG
5780 do {
5781 /* let the FW do it's magic ... */
5782 msleep(delay);
a2fbb9ea 5783
19680c48 5784 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
a2fbb9ea 5785
19680c48
EG
5786 /* Give the FW up to 2 second (200*10ms) */
5787 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 200));
5788
5789 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
5790 cnt*delay, rc, seq);
a2fbb9ea
ET
5791
5792 /* is this a reply to our command? */
5793 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) {
5794 rc &= FW_MSG_CODE_MASK;
f1410647 5795
a2fbb9ea
ET
5796 } else {
5797 /* FW BUG! */
5798 BNX2X_ERR("FW failed to respond!\n");
5799 bnx2x_fw_dump(bp);
5800 rc = 0;
5801 }
f1410647 5802
a2fbb9ea
ET
5803 return rc;
5804}
5805
5806static void bnx2x_free_mem(struct bnx2x *bp)
5807{
5808
5809#define BNX2X_PCI_FREE(x, y, size) \
5810 do { \
5811 if (x) { \
5812 pci_free_consistent(bp->pdev, size, x, y); \
5813 x = NULL; \
5814 y = 0; \
5815 } \
5816 } while (0)
5817
5818#define BNX2X_FREE(x) \
5819 do { \
5820 if (x) { \
5821 vfree(x); \
5822 x = NULL; \
5823 } \
5824 } while (0)
5825
5826 int i;
5827
5828 /* fastpath */
555f6c78 5829 /* Common */
a2fbb9ea
ET
5830 for_each_queue(bp, i) {
5831
555f6c78 5832 /* status blocks */
a2fbb9ea
ET
5833 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
5834 bnx2x_fp(bp, i, status_blk_mapping),
5835 sizeof(struct host_status_block) +
5836 sizeof(struct eth_tx_db_data));
555f6c78
EG
5837 }
5838 /* Rx */
5839 for_each_rx_queue(bp, i) {
a2fbb9ea 5840
555f6c78 5841 /* fastpath rx rings: rx_buf rx_desc rx_comp */
a2fbb9ea
ET
5842 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
5843 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
5844 bnx2x_fp(bp, i, rx_desc_mapping),
5845 sizeof(struct eth_rx_bd) * NUM_RX_BD);
5846
5847 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
5848 bnx2x_fp(bp, i, rx_comp_mapping),
5849 sizeof(struct eth_fast_path_rx_cqe) *
5850 NUM_RCQ_BD);
a2fbb9ea 5851
7a9b2557 5852 /* SGE ring */
32626230 5853 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
7a9b2557
VZ
5854 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
5855 bnx2x_fp(bp, i, rx_sge_mapping),
5856 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
5857 }
555f6c78
EG
5858 /* Tx */
5859 for_each_tx_queue(bp, i) {
5860
5861 /* fastpath tx rings: tx_buf tx_desc */
5862 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
5863 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
5864 bnx2x_fp(bp, i, tx_desc_mapping),
5865 sizeof(struct eth_tx_bd) * NUM_TX_BD);
5866 }
a2fbb9ea
ET
5867 /* end of fastpath */
5868
5869 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
34f80b04 5870 sizeof(struct host_def_status_block));
a2fbb9ea
ET
5871
5872 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
34f80b04 5873 sizeof(struct bnx2x_slowpath));
a2fbb9ea
ET
5874
5875#ifdef BCM_ISCSI
5876 BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
5877 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
5878 BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
5879 BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
5880#endif
7a9b2557 5881 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
a2fbb9ea
ET
5882
5883#undef BNX2X_PCI_FREE
5884#undef BNX2X_KFREE
5885}
5886
5887static int bnx2x_alloc_mem(struct bnx2x *bp)
5888{
5889
5890#define BNX2X_PCI_ALLOC(x, y, size) \
5891 do { \
5892 x = pci_alloc_consistent(bp->pdev, size, y); \
5893 if (x == NULL) \
5894 goto alloc_mem_err; \
5895 memset(x, 0, size); \
5896 } while (0)
5897
5898#define BNX2X_ALLOC(x, size) \
5899 do { \
5900 x = vmalloc(size); \
5901 if (x == NULL) \
5902 goto alloc_mem_err; \
5903 memset(x, 0, size); \
5904 } while (0)
5905
5906 int i;
5907
5908 /* fastpath */
555f6c78 5909 /* Common */
a2fbb9ea
ET
5910 for_each_queue(bp, i) {
5911 bnx2x_fp(bp, i, bp) = bp;
5912
555f6c78 5913 /* status blocks */
a2fbb9ea
ET
5914 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
5915 &bnx2x_fp(bp, i, status_blk_mapping),
5916 sizeof(struct host_status_block) +
5917 sizeof(struct eth_tx_db_data));
555f6c78
EG
5918 }
5919 /* Rx */
5920 for_each_rx_queue(bp, i) {
a2fbb9ea 5921
555f6c78 5922 /* fastpath rx rings: rx_buf rx_desc rx_comp */
a2fbb9ea
ET
5923 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
5924 sizeof(struct sw_rx_bd) * NUM_RX_BD);
5925 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
5926 &bnx2x_fp(bp, i, rx_desc_mapping),
5927 sizeof(struct eth_rx_bd) * NUM_RX_BD);
5928
5929 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
5930 &bnx2x_fp(bp, i, rx_comp_mapping),
5931 sizeof(struct eth_fast_path_rx_cqe) *
5932 NUM_RCQ_BD);
5933
7a9b2557
VZ
5934 /* SGE ring */
5935 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
5936 sizeof(struct sw_rx_page) * NUM_RX_SGE);
5937 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
5938 &bnx2x_fp(bp, i, rx_sge_mapping),
5939 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
a2fbb9ea 5940 }
555f6c78
EG
5941 /* Tx */
5942 for_each_tx_queue(bp, i) {
5943
5944 bnx2x_fp(bp, i, hw_tx_prods) =
5945 (void *)(bnx2x_fp(bp, i, status_blk) + 1);
5946
5947 bnx2x_fp(bp, i, tx_prods_mapping) =
5948 bnx2x_fp(bp, i, status_blk_mapping) +
5949 sizeof(struct host_status_block);
5950
5951 /* fastpath tx rings: tx_buf tx_desc */
5952 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
5953 sizeof(struct sw_tx_bd) * NUM_TX_BD);
5954 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
5955 &bnx2x_fp(bp, i, tx_desc_mapping),
5956 sizeof(struct eth_tx_bd) * NUM_TX_BD);
5957 }
a2fbb9ea
ET
5958 /* end of fastpath */
5959
5960 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
5961 sizeof(struct host_def_status_block));
5962
5963 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
5964 sizeof(struct bnx2x_slowpath));
5965
5966#ifdef BCM_ISCSI
5967 BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
5968
5969 /* Initialize T1 */
5970 for (i = 0; i < 64*1024; i += 64) {
5971 *(u64 *)((char *)bp->t1 + i + 56) = 0x0UL;
5972 *(u64 *)((char *)bp->t1 + i + 3) = 0x0UL;
5973 }
5974
5975 /* allocate searcher T2 table
5976 we allocate 1/4 of alloc num for T2
5977 (which is not entered into the ILT) */
5978 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
5979
5980 /* Initialize T2 */
5981 for (i = 0; i < 16*1024; i += 64)
5982 * (u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
5983
c14423fe 5984 /* now fixup the last line in the block to point to the next block */
a2fbb9ea
ET
5985 *(u64 *)((char *)bp->t2 + 1024*16-8) = bp->t2_mapping;
5986
5987 /* Timer block array (MAX_CONN*8) phys uncached for now 1024 conns */
5988 BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
5989
5990 /* QM queues (128*MAX_CONN) */
5991 BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
5992#endif
5993
5994 /* Slow path ring */
5995 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
5996
5997 return 0;
5998
5999alloc_mem_err:
6000 bnx2x_free_mem(bp);
6001 return -ENOMEM;
6002
6003#undef BNX2X_PCI_ALLOC
6004#undef BNX2X_ALLOC
6005}
6006
6007static void bnx2x_free_tx_skbs(struct bnx2x *bp)
6008{
6009 int i;
6010
555f6c78 6011 for_each_tx_queue(bp, i) {
a2fbb9ea
ET
6012 struct bnx2x_fastpath *fp = &bp->fp[i];
6013
6014 u16 bd_cons = fp->tx_bd_cons;
6015 u16 sw_prod = fp->tx_pkt_prod;
6016 u16 sw_cons = fp->tx_pkt_cons;
6017
a2fbb9ea
ET
6018 while (sw_cons != sw_prod) {
6019 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
6020 sw_cons++;
6021 }
6022 }
6023}
6024
6025static void bnx2x_free_rx_skbs(struct bnx2x *bp)
6026{
6027 int i, j;
6028
555f6c78 6029 for_each_rx_queue(bp, j) {
a2fbb9ea
ET
6030 struct bnx2x_fastpath *fp = &bp->fp[j];
6031
a2fbb9ea
ET
6032 for (i = 0; i < NUM_RX_BD; i++) {
6033 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
6034 struct sk_buff *skb = rx_buf->skb;
6035
6036 if (skb == NULL)
6037 continue;
6038
6039 pci_unmap_single(bp->pdev,
6040 pci_unmap_addr(rx_buf, mapping),
437cf2f1 6041 bp->rx_buf_size,
a2fbb9ea
ET
6042 PCI_DMA_FROMDEVICE);
6043
6044 rx_buf->skb = NULL;
6045 dev_kfree_skb(skb);
6046 }
7a9b2557 6047 if (!fp->disable_tpa)
32626230
EG
6048 bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
6049 ETH_MAX_AGGREGATION_QUEUES_E1 :
7a9b2557 6050 ETH_MAX_AGGREGATION_QUEUES_E1H);
a2fbb9ea
ET
6051 }
6052}
6053
6054static void bnx2x_free_skbs(struct bnx2x *bp)
6055{
6056 bnx2x_free_tx_skbs(bp);
6057 bnx2x_free_rx_skbs(bp);
6058}
6059
6060static void bnx2x_free_msix_irqs(struct bnx2x *bp)
6061{
34f80b04 6062 int i, offset = 1;
a2fbb9ea
ET
6063
6064 free_irq(bp->msix_table[0].vector, bp->dev);
c14423fe 6065 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
a2fbb9ea
ET
6066 bp->msix_table[0].vector);
6067
6068 for_each_queue(bp, i) {
c14423fe 6069 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
34f80b04 6070 "state %x\n", i, bp->msix_table[i + offset].vector,
a2fbb9ea
ET
6071 bnx2x_fp(bp, i, state));
6072
34f80b04 6073 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
a2fbb9ea 6074 }
a2fbb9ea
ET
6075}
6076
6077static void bnx2x_free_irq(struct bnx2x *bp)
6078{
a2fbb9ea 6079 if (bp->flags & USING_MSIX_FLAG) {
a2fbb9ea
ET
6080 bnx2x_free_msix_irqs(bp);
6081 pci_disable_msix(bp->pdev);
a2fbb9ea
ET
6082 bp->flags &= ~USING_MSIX_FLAG;
6083
8badd27a
EG
6084 } else if (bp->flags & USING_MSI_FLAG) {
6085 free_irq(bp->pdev->irq, bp->dev);
6086 pci_disable_msi(bp->pdev);
6087 bp->flags &= ~USING_MSI_FLAG;
6088
a2fbb9ea
ET
6089 } else
6090 free_irq(bp->pdev->irq, bp->dev);
6091}
6092
6093static int bnx2x_enable_msix(struct bnx2x *bp)
6094{
8badd27a
EG
6095 int i, rc, offset = 1;
6096 int igu_vec = 0;
a2fbb9ea 6097
8badd27a
EG
6098 bp->msix_table[0].entry = igu_vec;
6099 DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n", igu_vec);
a2fbb9ea 6100
34f80b04 6101 for_each_queue(bp, i) {
8badd27a 6102 igu_vec = BP_L_ID(bp) + offset + i;
34f80b04
EG
6103 bp->msix_table[i + offset].entry = igu_vec;
6104 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
6105 "(fastpath #%u)\n", i + offset, igu_vec, i);
a2fbb9ea
ET
6106 }
6107
34f80b04 6108 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
555f6c78 6109 BNX2X_NUM_QUEUES(bp) + offset);
34f80b04 6110 if (rc) {
8badd27a
EG
6111 DP(NETIF_MSG_IFUP, "MSI-X is not attainable rc %d\n", rc);
6112 return rc;
34f80b04 6113 }
8badd27a 6114
a2fbb9ea
ET
6115 bp->flags |= USING_MSIX_FLAG;
6116
6117 return 0;
a2fbb9ea
ET
6118}
6119
a2fbb9ea
ET
6120static int bnx2x_req_msix_irqs(struct bnx2x *bp)
6121{
34f80b04 6122 int i, rc, offset = 1;
a2fbb9ea 6123
a2fbb9ea
ET
6124 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
6125 bp->dev->name, bp->dev);
a2fbb9ea
ET
6126 if (rc) {
6127 BNX2X_ERR("request sp irq failed\n");
6128 return -EBUSY;
6129 }
6130
6131 for_each_queue(bp, i) {
555f6c78
EG
6132 struct bnx2x_fastpath *fp = &bp->fp[i];
6133
6134 sprintf(fp->name, "%s.fp%d", bp->dev->name, i);
34f80b04 6135 rc = request_irq(bp->msix_table[i + offset].vector,
555f6c78 6136 bnx2x_msix_fp_int, 0, fp->name, fp);
a2fbb9ea 6137 if (rc) {
555f6c78 6138 BNX2X_ERR("request fp #%d irq failed rc %d\n", i, rc);
a2fbb9ea
ET
6139 bnx2x_free_msix_irqs(bp);
6140 return -EBUSY;
6141 }
6142
555f6c78 6143 fp->state = BNX2X_FP_STATE_IRQ;
a2fbb9ea
ET
6144 }
6145
555f6c78
EG
6146 i = BNX2X_NUM_QUEUES(bp);
6147 if (is_multi(bp))
6148 printk(KERN_INFO PFX
6149 "%s: using MSI-X IRQs: sp %d fp %d - %d\n",
6150 bp->dev->name, bp->msix_table[0].vector,
6151 bp->msix_table[offset].vector,
6152 bp->msix_table[offset + i - 1].vector);
6153 else
6154 printk(KERN_INFO PFX "%s: using MSI-X IRQs: sp %d fp %d\n",
6155 bp->dev->name, bp->msix_table[0].vector,
6156 bp->msix_table[offset + i - 1].vector);
6157
a2fbb9ea 6158 return 0;
a2fbb9ea
ET
6159}
6160
8badd27a
EG
6161static int bnx2x_enable_msi(struct bnx2x *bp)
6162{
6163 int rc;
6164
6165 rc = pci_enable_msi(bp->pdev);
6166 if (rc) {
6167 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
6168 return -1;
6169 }
6170 bp->flags |= USING_MSI_FLAG;
6171
6172 return 0;
6173}
6174
a2fbb9ea
ET
6175static int bnx2x_req_irq(struct bnx2x *bp)
6176{
8badd27a 6177 unsigned long flags;
34f80b04 6178 int rc;
a2fbb9ea 6179
8badd27a
EG
6180 if (bp->flags & USING_MSI_FLAG)
6181 flags = 0;
6182 else
6183 flags = IRQF_SHARED;
6184
6185 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
34f80b04 6186 bp->dev->name, bp->dev);
a2fbb9ea
ET
6187 if (!rc)
6188 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
6189
6190 return rc;
a2fbb9ea
ET
6191}
6192
65abd74d
YG
6193static void bnx2x_napi_enable(struct bnx2x *bp)
6194{
6195 int i;
6196
555f6c78 6197 for_each_rx_queue(bp, i)
65abd74d
YG
6198 napi_enable(&bnx2x_fp(bp, i, napi));
6199}
6200
6201static void bnx2x_napi_disable(struct bnx2x *bp)
6202{
6203 int i;
6204
555f6c78 6205 for_each_rx_queue(bp, i)
65abd74d
YG
6206 napi_disable(&bnx2x_fp(bp, i, napi));
6207}
6208
6209static void bnx2x_netif_start(struct bnx2x *bp)
6210{
6211 if (atomic_dec_and_test(&bp->intr_sem)) {
6212 if (netif_running(bp->dev)) {
65abd74d
YG
6213 bnx2x_napi_enable(bp);
6214 bnx2x_int_enable(bp);
555f6c78
EG
6215 if (bp->state == BNX2X_STATE_OPEN)
6216 netif_tx_wake_all_queues(bp->dev);
65abd74d
YG
6217 }
6218 }
6219}
6220
f8ef6e44 6221static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
65abd74d 6222{
f8ef6e44 6223 bnx2x_int_disable_sync(bp, disable_hw);
e94d8af3 6224 bnx2x_napi_disable(bp);
65abd74d 6225 if (netif_running(bp->dev)) {
65abd74d
YG
6226 netif_tx_disable(bp->dev);
6227 bp->dev->trans_start = jiffies; /* prevent tx timeout */
6228 }
6229}
6230
a2fbb9ea
ET
6231/*
6232 * Init service functions
6233 */
6234
3101c2bc 6235static void bnx2x_set_mac_addr_e1(struct bnx2x *bp, int set)
a2fbb9ea
ET
6236{
6237 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
34f80b04 6238 int port = BP_PORT(bp);
a2fbb9ea
ET
6239
6240 /* CAM allocation
6241 * unicasts 0-31:port0 32-63:port1
6242 * multicast 64-127:port0 128-191:port1
6243 */
8d9c5f34 6244 config->hdr.length = 2;
af246401 6245 config->hdr.offset = port ? 32 : 0;
34f80b04 6246 config->hdr.client_id = BP_CL_ID(bp);
a2fbb9ea
ET
6247 config->hdr.reserved1 = 0;
6248
6249 /* primary MAC */
6250 config->config_table[0].cam_entry.msb_mac_addr =
6251 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6252 config->config_table[0].cam_entry.middle_mac_addr =
6253 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6254 config->config_table[0].cam_entry.lsb_mac_addr =
6255 swab16(*(u16 *)&bp->dev->dev_addr[4]);
34f80b04 6256 config->config_table[0].cam_entry.flags = cpu_to_le16(port);
3101c2bc
YG
6257 if (set)
6258 config->config_table[0].target_table_entry.flags = 0;
6259 else
6260 CAM_INVALIDATE(config->config_table[0]);
a2fbb9ea
ET
6261 config->config_table[0].target_table_entry.client_id = 0;
6262 config->config_table[0].target_table_entry.vlan_id = 0;
6263
3101c2bc
YG
6264 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
6265 (set ? "setting" : "clearing"),
a2fbb9ea
ET
6266 config->config_table[0].cam_entry.msb_mac_addr,
6267 config->config_table[0].cam_entry.middle_mac_addr,
6268 config->config_table[0].cam_entry.lsb_mac_addr);
6269
6270 /* broadcast */
6271 config->config_table[1].cam_entry.msb_mac_addr = 0xffff;
6272 config->config_table[1].cam_entry.middle_mac_addr = 0xffff;
6273 config->config_table[1].cam_entry.lsb_mac_addr = 0xffff;
34f80b04 6274 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
3101c2bc
YG
6275 if (set)
6276 config->config_table[1].target_table_entry.flags =
a2fbb9ea 6277 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
3101c2bc
YG
6278 else
6279 CAM_INVALIDATE(config->config_table[1]);
a2fbb9ea
ET
6280 config->config_table[1].target_table_entry.client_id = 0;
6281 config->config_table[1].target_table_entry.vlan_id = 0;
6282
6283 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6284 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6285 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6286}
6287
3101c2bc 6288static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp, int set)
34f80b04
EG
6289{
6290 struct mac_configuration_cmd_e1h *config =
6291 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
6292
3101c2bc 6293 if (set && (bp->state != BNX2X_STATE_OPEN)) {
34f80b04
EG
6294 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
6295 return;
6296 }
6297
6298 /* CAM allocation for E1H
6299 * unicasts: by func number
6300 * multicast: 20+FUNC*20, 20 each
6301 */
8d9c5f34 6302 config->hdr.length = 1;
34f80b04
EG
6303 config->hdr.offset = BP_FUNC(bp);
6304 config->hdr.client_id = BP_CL_ID(bp);
6305 config->hdr.reserved1 = 0;
6306
6307 /* primary MAC */
6308 config->config_table[0].msb_mac_addr =
6309 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6310 config->config_table[0].middle_mac_addr =
6311 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6312 config->config_table[0].lsb_mac_addr =
6313 swab16(*(u16 *)&bp->dev->dev_addr[4]);
6314 config->config_table[0].client_id = BP_L_ID(bp);
6315 config->config_table[0].vlan_id = 0;
6316 config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
3101c2bc
YG
6317 if (set)
6318 config->config_table[0].flags = BP_PORT(bp);
6319 else
6320 config->config_table[0].flags =
6321 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
34f80b04 6322
3101c2bc
YG
6323 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) E1HOV %d CLID %d\n",
6324 (set ? "setting" : "clearing"),
34f80b04
EG
6325 config->config_table[0].msb_mac_addr,
6326 config->config_table[0].middle_mac_addr,
6327 config->config_table[0].lsb_mac_addr, bp->e1hov, BP_L_ID(bp));
6328
6329 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6330 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6331 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6332}
6333
a2fbb9ea
ET
6334static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6335 int *state_p, int poll)
6336{
6337 /* can take a while if any port is running */
34f80b04 6338 int cnt = 500;
a2fbb9ea 6339
c14423fe
ET
6340 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
6341 poll ? "polling" : "waiting", state, idx);
a2fbb9ea
ET
6342
6343 might_sleep();
34f80b04 6344 while (cnt--) {
a2fbb9ea
ET
6345 if (poll) {
6346 bnx2x_rx_int(bp->fp, 10);
34f80b04
EG
6347 /* if index is different from 0
6348 * the reply for some commands will
3101c2bc 6349 * be on the non default queue
a2fbb9ea
ET
6350 */
6351 if (idx)
6352 bnx2x_rx_int(&bp->fp[idx], 10);
6353 }
a2fbb9ea 6354
3101c2bc 6355 mb(); /* state is changed by bnx2x_sp_event() */
49d66772 6356 if (*state_p == state)
a2fbb9ea
ET
6357 return 0;
6358
a2fbb9ea 6359 msleep(1);
a2fbb9ea
ET
6360 }
6361
a2fbb9ea 6362 /* timeout! */
49d66772
ET
6363 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
6364 poll ? "polling" : "waiting", state, idx);
34f80b04
EG
6365#ifdef BNX2X_STOP_ON_ERROR
6366 bnx2x_panic();
6367#endif
a2fbb9ea 6368
49d66772 6369 return -EBUSY;
a2fbb9ea
ET
6370}
6371
6372static int bnx2x_setup_leading(struct bnx2x *bp)
6373{
34f80b04 6374 int rc;
a2fbb9ea 6375
c14423fe 6376 /* reset IGU state */
34f80b04 6377 bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea
ET
6378
6379 /* SETUP ramrod */
6380 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
6381
34f80b04
EG
6382 /* Wait for completion */
6383 rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
a2fbb9ea 6384
34f80b04 6385 return rc;
a2fbb9ea
ET
6386}
6387
6388static int bnx2x_setup_multi(struct bnx2x *bp, int index)
6389{
555f6c78
EG
6390 struct bnx2x_fastpath *fp = &bp->fp[index];
6391
a2fbb9ea 6392 /* reset IGU state */
555f6c78 6393 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea 6394
228241eb 6395 /* SETUP ramrod */
555f6c78
EG
6396 fp->state = BNX2X_FP_STATE_OPENING;
6397 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0,
6398 fp->cl_id, 0);
a2fbb9ea
ET
6399
6400 /* Wait for completion */
6401 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
555f6c78 6402 &(fp->state), 0);
a2fbb9ea
ET
6403}
6404
a2fbb9ea 6405static int bnx2x_poll(struct napi_struct *napi, int budget);
a2fbb9ea 6406
8badd27a 6407static void bnx2x_set_int_mode(struct bnx2x *bp)
a2fbb9ea 6408{
555f6c78 6409 int num_queues;
a2fbb9ea 6410
8badd27a
EG
6411 switch (int_mode) {
6412 case INT_MODE_INTx:
6413 case INT_MODE_MSI:
555f6c78
EG
6414 num_queues = 1;
6415 bp->num_rx_queues = num_queues;
6416 bp->num_tx_queues = num_queues;
6417 DP(NETIF_MSG_IFUP,
6418 "set number of queues to %d\n", num_queues);
8badd27a
EG
6419 break;
6420
6421 case INT_MODE_MSIX:
6422 default:
555f6c78
EG
6423 if (bp->multi_mode == ETH_RSS_MODE_REGULAR)
6424 num_queues = min_t(u32, num_online_cpus(),
6425 BNX2X_MAX_QUEUES(bp));
34f80b04 6426 else
555f6c78
EG
6427 num_queues = 1;
6428 bp->num_rx_queues = num_queues;
6429 bp->num_tx_queues = num_queues;
6430 DP(NETIF_MSG_IFUP, "set number of rx queues to %d"
6431 " number of tx queues to %d\n",
6432 bp->num_rx_queues, bp->num_tx_queues);
2dfe0e1f
EG
6433 /* if we can't use MSI-X we only need one fp,
6434 * so try to enable MSI-X with the requested number of fp's
6435 * and fallback to MSI or legacy INTx with one fp
6436 */
8badd27a 6437 if (bnx2x_enable_msix(bp)) {
34f80b04 6438 /* failed to enable MSI-X */
555f6c78
EG
6439 num_queues = 1;
6440 bp->num_rx_queues = num_queues;
6441 bp->num_tx_queues = num_queues;
6442 if (bp->multi_mode)
6443 BNX2X_ERR("Multi requested but failed to "
6444 "enable MSI-X set number of "
6445 "queues to %d\n", num_queues);
a2fbb9ea 6446 }
8badd27a 6447 break;
a2fbb9ea 6448 }
555f6c78 6449 bp->dev->real_num_tx_queues = bp->num_tx_queues;
8badd27a
EG
6450}
6451
6452static void bnx2x_set_rx_mode(struct net_device *dev);
6453
6454/* must be called with rtnl_lock */
6455static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
6456{
6457 u32 load_code;
6458 int i, rc = 0;
6459#ifdef BNX2X_STOP_ON_ERROR
6460 DP(NETIF_MSG_IFUP, "enter load_mode %d\n", load_mode);
6461 if (unlikely(bp->panic))
6462 return -EPERM;
6463#endif
6464
6465 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
6466
6467 bnx2x_set_int_mode(bp);
c14423fe 6468
a2fbb9ea
ET
6469 if (bnx2x_alloc_mem(bp))
6470 return -ENOMEM;
6471
555f6c78 6472 for_each_rx_queue(bp, i)
7a9b2557
VZ
6473 bnx2x_fp(bp, i, disable_tpa) =
6474 ((bp->flags & TPA_ENABLE_FLAG) == 0);
6475
555f6c78 6476 for_each_rx_queue(bp, i)
2dfe0e1f
EG
6477 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
6478 bnx2x_poll, 128);
6479
6480#ifdef BNX2X_STOP_ON_ERROR
555f6c78 6481 for_each_rx_queue(bp, i) {
2dfe0e1f
EG
6482 struct bnx2x_fastpath *fp = &bp->fp[i];
6483
6484 fp->poll_no_work = 0;
6485 fp->poll_calls = 0;
6486 fp->poll_max_calls = 0;
6487 fp->poll_complete = 0;
6488 fp->poll_exit = 0;
6489 }
6490#endif
6491 bnx2x_napi_enable(bp);
6492
34f80b04
EG
6493 if (bp->flags & USING_MSIX_FLAG) {
6494 rc = bnx2x_req_msix_irqs(bp);
6495 if (rc) {
6496 pci_disable_msix(bp->pdev);
2dfe0e1f 6497 goto load_error1;
34f80b04
EG
6498 }
6499 } else {
8badd27a
EG
6500 if ((rc != -ENOMEM) && (int_mode != INT_MODE_INTx))
6501 bnx2x_enable_msi(bp);
34f80b04
EG
6502 bnx2x_ack_int(bp);
6503 rc = bnx2x_req_irq(bp);
6504 if (rc) {
2dfe0e1f 6505 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
8badd27a
EG
6506 if (bp->flags & USING_MSI_FLAG)
6507 pci_disable_msi(bp->pdev);
2dfe0e1f 6508 goto load_error1;
a2fbb9ea 6509 }
8badd27a
EG
6510 if (bp->flags & USING_MSI_FLAG) {
6511 bp->dev->irq = bp->pdev->irq;
6512 printk(KERN_INFO PFX "%s: using MSI IRQ %d\n",
6513 bp->dev->name, bp->pdev->irq);
6514 }
a2fbb9ea
ET
6515 }
6516
2dfe0e1f
EG
6517 /* Send LOAD_REQUEST command to MCP
6518 Returns the type of LOAD command:
6519 if it is the first port to be initialized
6520 common blocks should be initialized, otherwise - not
6521 */
6522 if (!BP_NOMCP(bp)) {
6523 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
6524 if (!load_code) {
6525 BNX2X_ERR("MCP response failure, aborting\n");
6526 rc = -EBUSY;
6527 goto load_error2;
6528 }
6529 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
6530 rc = -EBUSY; /* other port in diagnostic mode */
6531 goto load_error2;
6532 }
6533
6534 } else {
6535 int port = BP_PORT(bp);
6536
6537 DP(NETIF_MSG_IFUP, "NO MCP load counts before us %d, %d, %d\n",
6538 load_count[0], load_count[1], load_count[2]);
6539 load_count[0]++;
6540 load_count[1 + port]++;
6541 DP(NETIF_MSG_IFUP, "NO MCP new load counts %d, %d, %d\n",
6542 load_count[0], load_count[1], load_count[2]);
6543 if (load_count[0] == 1)
6544 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
6545 else if (load_count[1 + port] == 1)
6546 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
6547 else
6548 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
6549 }
6550
6551 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
6552 (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
6553 bp->port.pmf = 1;
6554 else
6555 bp->port.pmf = 0;
6556 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
a2fbb9ea 6557
a2fbb9ea 6558 /* Initialize HW */
34f80b04
EG
6559 rc = bnx2x_init_hw(bp, load_code);
6560 if (rc) {
a2fbb9ea 6561 BNX2X_ERR("HW init failed, aborting\n");
2dfe0e1f 6562 goto load_error2;
a2fbb9ea
ET
6563 }
6564
a2fbb9ea 6565 /* Setup NIC internals and enable interrupts */
471de716 6566 bnx2x_nic_init(bp, load_code);
a2fbb9ea
ET
6567
6568 /* Send LOAD_DONE command to MCP */
34f80b04 6569 if (!BP_NOMCP(bp)) {
228241eb
ET
6570 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
6571 if (!load_code) {
da5a662a 6572 BNX2X_ERR("MCP response failure, aborting\n");
34f80b04 6573 rc = -EBUSY;
2dfe0e1f 6574 goto load_error3;
a2fbb9ea
ET
6575 }
6576 }
6577
6578 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
6579
34f80b04
EG
6580 rc = bnx2x_setup_leading(bp);
6581 if (rc) {
da5a662a 6582 BNX2X_ERR("Setup leading failed!\n");
2dfe0e1f 6583 goto load_error3;
34f80b04 6584 }
a2fbb9ea 6585
34f80b04
EG
6586 if (CHIP_IS_E1H(bp))
6587 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
6588 BNX2X_ERR("!!! mf_cfg function disabled\n");
6589 bp->state = BNX2X_STATE_DISABLED;
6590 }
a2fbb9ea 6591
34f80b04
EG
6592 if (bp->state == BNX2X_STATE_OPEN)
6593 for_each_nondefault_queue(bp, i) {
6594 rc = bnx2x_setup_multi(bp, i);
6595 if (rc)
2dfe0e1f 6596 goto load_error3;
34f80b04 6597 }
a2fbb9ea 6598
34f80b04 6599 if (CHIP_IS_E1(bp))
3101c2bc 6600 bnx2x_set_mac_addr_e1(bp, 1);
34f80b04 6601 else
3101c2bc 6602 bnx2x_set_mac_addr_e1h(bp, 1);
34f80b04
EG
6603
6604 if (bp->port.pmf)
6605 bnx2x_initial_phy_init(bp);
a2fbb9ea
ET
6606
6607 /* Start fast path */
34f80b04
EG
6608 switch (load_mode) {
6609 case LOAD_NORMAL:
6610 /* Tx queue should be only reenabled */
555f6c78 6611 netif_tx_wake_all_queues(bp->dev);
2dfe0e1f 6612 /* Initialize the receive filter. */
34f80b04
EG
6613 bnx2x_set_rx_mode(bp->dev);
6614 break;
6615
6616 case LOAD_OPEN:
555f6c78 6617 netif_tx_start_all_queues(bp->dev);
2dfe0e1f 6618 /* Initialize the receive filter. */
34f80b04 6619 bnx2x_set_rx_mode(bp->dev);
34f80b04 6620 break;
a2fbb9ea 6621
34f80b04 6622 case LOAD_DIAG:
2dfe0e1f 6623 /* Initialize the receive filter. */
a2fbb9ea 6624 bnx2x_set_rx_mode(bp->dev);
34f80b04
EG
6625 bp->state = BNX2X_STATE_DIAG;
6626 break;
6627
6628 default:
6629 break;
a2fbb9ea
ET
6630 }
6631
34f80b04
EG
6632 if (!bp->port.pmf)
6633 bnx2x__link_status_update(bp);
6634
a2fbb9ea
ET
6635 /* start the timer */
6636 mod_timer(&bp->timer, jiffies + bp->current_interval);
6637
34f80b04 6638
a2fbb9ea
ET
6639 return 0;
6640
2dfe0e1f
EG
6641load_error3:
6642 bnx2x_int_disable_sync(bp, 1);
6643 if (!BP_NOMCP(bp)) {
6644 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
6645 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6646 }
6647 bp->port.pmf = 0;
7a9b2557
VZ
6648 /* Free SKBs, SGEs, TPA pool and driver internals */
6649 bnx2x_free_skbs(bp);
555f6c78 6650 for_each_rx_queue(bp, i)
3196a88a 6651 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
2dfe0e1f 6652load_error2:
d1014634
YG
6653 /* Release IRQs */
6654 bnx2x_free_irq(bp);
2dfe0e1f
EG
6655load_error1:
6656 bnx2x_napi_disable(bp);
555f6c78 6657 for_each_rx_queue(bp, i)
7cde1c8b 6658 netif_napi_del(&bnx2x_fp(bp, i, napi));
a2fbb9ea
ET
6659 bnx2x_free_mem(bp);
6660
6661 /* TBD we really need to reset the chip
6662 if we want to recover from this */
34f80b04 6663 return rc;
a2fbb9ea
ET
6664}
6665
6666static int bnx2x_stop_multi(struct bnx2x *bp, int index)
6667{
555f6c78 6668 struct bnx2x_fastpath *fp = &bp->fp[index];
a2fbb9ea
ET
6669 int rc;
6670
c14423fe 6671 /* halt the connection */
555f6c78
EG
6672 fp->state = BNX2X_FP_STATE_HALTING;
6673 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, fp->cl_id, 0);
a2fbb9ea 6674
34f80b04 6675 /* Wait for completion */
a2fbb9ea 6676 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
555f6c78 6677 &(fp->state), 1);
c14423fe 6678 if (rc) /* timeout */
a2fbb9ea
ET
6679 return rc;
6680
6681 /* delete cfc entry */
6682 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
6683
34f80b04
EG
6684 /* Wait for completion */
6685 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
555f6c78 6686 &(fp->state), 1);
34f80b04 6687 return rc;
a2fbb9ea
ET
6688}
6689
da5a662a 6690static int bnx2x_stop_leading(struct bnx2x *bp)
a2fbb9ea 6691{
49d66772 6692 u16 dsb_sp_prod_idx;
c14423fe 6693 /* if the other port is handling traffic,
a2fbb9ea 6694 this can take a lot of time */
34f80b04
EG
6695 int cnt = 500;
6696 int rc;
a2fbb9ea
ET
6697
6698 might_sleep();
6699
6700 /* Send HALT ramrod */
6701 bp->fp[0].state = BNX2X_FP_STATE_HALTING;
34f80b04 6702 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, BP_CL_ID(bp), 0);
a2fbb9ea 6703
34f80b04
EG
6704 /* Wait for completion */
6705 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
6706 &(bp->fp[0].state), 1);
6707 if (rc) /* timeout */
da5a662a 6708 return rc;
a2fbb9ea 6709
49d66772 6710 dsb_sp_prod_idx = *bp->dsb_sp_prod;
a2fbb9ea 6711
228241eb 6712 /* Send PORT_DELETE ramrod */
a2fbb9ea
ET
6713 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
6714
49d66772 6715 /* Wait for completion to arrive on default status block
a2fbb9ea
ET
6716 we are going to reset the chip anyway
6717 so there is not much to do if this times out
6718 */
34f80b04 6719 while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
34f80b04
EG
6720 if (!cnt) {
6721 DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
6722 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
6723 *bp->dsb_sp_prod, dsb_sp_prod_idx);
6724#ifdef BNX2X_STOP_ON_ERROR
6725 bnx2x_panic();
da5a662a
VZ
6726#else
6727 rc = -EBUSY;
34f80b04
EG
6728#endif
6729 break;
6730 }
6731 cnt--;
da5a662a 6732 msleep(1);
5650d9d4 6733 rmb(); /* Refresh the dsb_sp_prod */
49d66772
ET
6734 }
6735 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
6736 bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
da5a662a
VZ
6737
6738 return rc;
a2fbb9ea
ET
6739}
6740
34f80b04
EG
6741static void bnx2x_reset_func(struct bnx2x *bp)
6742{
6743 int port = BP_PORT(bp);
6744 int func = BP_FUNC(bp);
6745 int base, i;
6746
6747 /* Configure IGU */
6748 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6749 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6750
34f80b04
EG
6751 /* Clear ILT */
6752 base = FUNC_ILT_BASE(func);
6753 for (i = base; i < base + ILT_PER_FUNC; i++)
6754 bnx2x_ilt_wr(bp, i, 0);
6755}
6756
6757static void bnx2x_reset_port(struct bnx2x *bp)
6758{
6759 int port = BP_PORT(bp);
6760 u32 val;
6761
6762 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
6763
6764 /* Do not rcv packets to BRB */
6765 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
6766 /* Do not direct rcv packets that are not for MCP to the BRB */
6767 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
6768 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
6769
6770 /* Configure AEU */
6771 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
6772
6773 msleep(100);
6774 /* Check for BRB port occupancy */
6775 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
6776 if (val)
6777 DP(NETIF_MSG_IFDOWN,
33471629 6778 "BRB1 is not empty %d blocks are occupied\n", val);
34f80b04
EG
6779
6780 /* TODO: Close Doorbell port? */
6781}
6782
34f80b04
EG
6783static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
6784{
6785 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
6786 BP_FUNC(bp), reset_code);
6787
6788 switch (reset_code) {
6789 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
6790 bnx2x_reset_port(bp);
6791 bnx2x_reset_func(bp);
6792 bnx2x_reset_common(bp);
6793 break;
6794
6795 case FW_MSG_CODE_DRV_UNLOAD_PORT:
6796 bnx2x_reset_port(bp);
6797 bnx2x_reset_func(bp);
6798 break;
6799
6800 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
6801 bnx2x_reset_func(bp);
6802 break;
49d66772 6803
34f80b04
EG
6804 default:
6805 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
6806 break;
6807 }
6808}
6809
33471629 6810/* must be called with rtnl_lock */
34f80b04 6811static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
a2fbb9ea 6812{
da5a662a 6813 int port = BP_PORT(bp);
a2fbb9ea 6814 u32 reset_code = 0;
da5a662a 6815 int i, cnt, rc;
a2fbb9ea
ET
6816
6817 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
6818
228241eb
ET
6819 bp->rx_mode = BNX2X_RX_MODE_NONE;
6820 bnx2x_set_storm_rx_mode(bp);
a2fbb9ea 6821
f8ef6e44 6822 bnx2x_netif_stop(bp, 1);
e94d8af3 6823
34f80b04
EG
6824 del_timer_sync(&bp->timer);
6825 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
6826 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
bb2a0f7a 6827 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04 6828
70b9986c
EG
6829 /* Release IRQs */
6830 bnx2x_free_irq(bp);
6831
555f6c78
EG
6832 /* Wait until tx fastpath tasks complete */
6833 for_each_tx_queue(bp, i) {
228241eb
ET
6834 struct bnx2x_fastpath *fp = &bp->fp[i];
6835
34f80b04
EG
6836 cnt = 1000;
6837 smp_rmb();
e8b5fc51 6838 while (bnx2x_has_tx_work_unload(fp)) {
da5a662a 6839
65abd74d 6840 bnx2x_tx_int(fp, 1000);
34f80b04
EG
6841 if (!cnt) {
6842 BNX2X_ERR("timeout waiting for queue[%d]\n",
6843 i);
6844#ifdef BNX2X_STOP_ON_ERROR
6845 bnx2x_panic();
6846 return -EBUSY;
6847#else
6848 break;
6849#endif
6850 }
6851 cnt--;
da5a662a 6852 msleep(1);
34f80b04
EG
6853 smp_rmb();
6854 }
228241eb 6855 }
da5a662a
VZ
6856 /* Give HW time to discard old tx messages */
6857 msleep(1);
a2fbb9ea 6858
3101c2bc
YG
6859 if (CHIP_IS_E1(bp)) {
6860 struct mac_configuration_cmd *config =
6861 bnx2x_sp(bp, mcast_config);
6862
6863 bnx2x_set_mac_addr_e1(bp, 0);
6864
8d9c5f34 6865 for (i = 0; i < config->hdr.length; i++)
3101c2bc
YG
6866 CAM_INVALIDATE(config->config_table[i]);
6867
8d9c5f34 6868 config->hdr.length = i;
3101c2bc
YG
6869 if (CHIP_REV_IS_SLOW(bp))
6870 config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
6871 else
6872 config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
6873 config->hdr.client_id = BP_CL_ID(bp);
6874 config->hdr.reserved1 = 0;
6875
6876 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6877 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
6878 U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
6879
6880 } else { /* E1H */
65abd74d
YG
6881 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
6882
3101c2bc
YG
6883 bnx2x_set_mac_addr_e1h(bp, 0);
6884
6885 for (i = 0; i < MC_HASH_SIZE; i++)
6886 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
6887 }
6888
65abd74d
YG
6889 if (unload_mode == UNLOAD_NORMAL)
6890 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6891
6892 else if (bp->flags & NO_WOL_FLAG) {
6893 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
6894 if (CHIP_IS_E1H(bp))
6895 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
6896
6897 } else if (bp->wol) {
6898 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
6899 u8 *mac_addr = bp->dev->dev_addr;
6900 u32 val;
6901 /* The mac address is written to entries 1-4 to
6902 preserve entry 0 which is used by the PMF */
6903 u8 entry = (BP_E1HVN(bp) + 1)*8;
6904
6905 val = (mac_addr[0] << 8) | mac_addr[1];
6906 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
6907
6908 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
6909 (mac_addr[4] << 8) | mac_addr[5];
6910 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
6911
6912 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
6913
6914 } else
6915 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
da5a662a 6916
34f80b04
EG
6917 /* Close multi and leading connections
6918 Completions for ramrods are collected in a synchronous way */
a2fbb9ea
ET
6919 for_each_nondefault_queue(bp, i)
6920 if (bnx2x_stop_multi(bp, i))
228241eb 6921 goto unload_error;
a2fbb9ea 6922
da5a662a
VZ
6923 rc = bnx2x_stop_leading(bp);
6924 if (rc) {
34f80b04 6925 BNX2X_ERR("Stop leading failed!\n");
da5a662a 6926#ifdef BNX2X_STOP_ON_ERROR
34f80b04 6927 return -EBUSY;
da5a662a
VZ
6928#else
6929 goto unload_error;
34f80b04 6930#endif
228241eb
ET
6931 }
6932
6933unload_error:
34f80b04 6934 if (!BP_NOMCP(bp))
228241eb 6935 reset_code = bnx2x_fw_command(bp, reset_code);
34f80b04
EG
6936 else {
6937 DP(NETIF_MSG_IFDOWN, "NO MCP load counts %d, %d, %d\n",
6938 load_count[0], load_count[1], load_count[2]);
6939 load_count[0]--;
da5a662a 6940 load_count[1 + port]--;
34f80b04
EG
6941 DP(NETIF_MSG_IFDOWN, "NO MCP new load counts %d, %d, %d\n",
6942 load_count[0], load_count[1], load_count[2]);
6943 if (load_count[0] == 0)
6944 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
da5a662a 6945 else if (load_count[1 + port] == 0)
34f80b04
EG
6946 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
6947 else
6948 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
6949 }
a2fbb9ea 6950
34f80b04
EG
6951 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
6952 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
6953 bnx2x__link_reset(bp);
a2fbb9ea
ET
6954
6955 /* Reset the chip */
228241eb 6956 bnx2x_reset_chip(bp, reset_code);
a2fbb9ea
ET
6957
6958 /* Report UNLOAD_DONE to MCP */
34f80b04 6959 if (!BP_NOMCP(bp))
a2fbb9ea 6960 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
9a035440 6961 bp->port.pmf = 0;
a2fbb9ea 6962
7a9b2557 6963 /* Free SKBs, SGEs, TPA pool and driver internals */
a2fbb9ea 6964 bnx2x_free_skbs(bp);
555f6c78 6965 for_each_rx_queue(bp, i)
3196a88a 6966 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
555f6c78 6967 for_each_rx_queue(bp, i)
7cde1c8b 6968 netif_napi_del(&bnx2x_fp(bp, i, napi));
a2fbb9ea
ET
6969 bnx2x_free_mem(bp);
6970
6971 bp->state = BNX2X_STATE_CLOSED;
228241eb 6972
a2fbb9ea
ET
6973 netif_carrier_off(bp->dev);
6974
6975 return 0;
6976}
6977
34f80b04
EG
6978static void bnx2x_reset_task(struct work_struct *work)
6979{
6980 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
6981
6982#ifdef BNX2X_STOP_ON_ERROR
6983 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
6984 " so reset not done to allow debug dump,\n"
6985 KERN_ERR " you will need to reboot when done\n");
6986 return;
6987#endif
6988
6989 rtnl_lock();
6990
6991 if (!netif_running(bp->dev))
6992 goto reset_task_exit;
6993
6994 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
6995 bnx2x_nic_load(bp, LOAD_NORMAL);
6996
6997reset_task_exit:
6998 rtnl_unlock();
6999}
7000
a2fbb9ea
ET
7001/* end of nic load/unload */
7002
7003/* ethtool_ops */
7004
7005/*
7006 * Init service functions
7007 */
7008
f1ef27ef
EG
7009static inline u32 bnx2x_get_pretend_reg(struct bnx2x *bp, int func)
7010{
7011 switch (func) {
7012 case 0: return PXP2_REG_PGL_PRETEND_FUNC_F0;
7013 case 1: return PXP2_REG_PGL_PRETEND_FUNC_F1;
7014 case 2: return PXP2_REG_PGL_PRETEND_FUNC_F2;
7015 case 3: return PXP2_REG_PGL_PRETEND_FUNC_F3;
7016 case 4: return PXP2_REG_PGL_PRETEND_FUNC_F4;
7017 case 5: return PXP2_REG_PGL_PRETEND_FUNC_F5;
7018 case 6: return PXP2_REG_PGL_PRETEND_FUNC_F6;
7019 case 7: return PXP2_REG_PGL_PRETEND_FUNC_F7;
7020 default:
7021 BNX2X_ERR("Unsupported function index: %d\n", func);
7022 return (u32)(-1);
7023 }
7024}
7025
7026static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp, int orig_func)
7027{
7028 u32 reg = bnx2x_get_pretend_reg(bp, orig_func), new_val;
7029
7030 /* Flush all outstanding writes */
7031 mmiowb();
7032
7033 /* Pretend to be function 0 */
7034 REG_WR(bp, reg, 0);
7035 /* Flush the GRC transaction (in the chip) */
7036 new_val = REG_RD(bp, reg);
7037 if (new_val != 0) {
7038 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (0,%d)!\n",
7039 new_val);
7040 BUG();
7041 }
7042
7043 /* From now we are in the "like-E1" mode */
7044 bnx2x_int_disable(bp);
7045
7046 /* Flush all outstanding writes */
7047 mmiowb();
7048
7049 /* Restore the original funtion settings */
7050 REG_WR(bp, reg, orig_func);
7051 new_val = REG_RD(bp, reg);
7052 if (new_val != orig_func) {
7053 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (%d,%d)!\n",
7054 orig_func, new_val);
7055 BUG();
7056 }
7057}
7058
7059static inline void bnx2x_undi_int_disable(struct bnx2x *bp, int func)
7060{
7061 if (CHIP_IS_E1H(bp))
7062 bnx2x_undi_int_disable_e1h(bp, func);
7063 else
7064 bnx2x_int_disable(bp);
7065}
7066
34f80b04
EG
7067static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
7068{
7069 u32 val;
7070
7071 /* Check if there is any driver already loaded */
7072 val = REG_RD(bp, MISC_REG_UNPREPARED);
7073 if (val == 0x1) {
7074 /* Check if it is the UNDI driver
7075 * UNDI driver initializes CID offset for normal bell to 0x7
7076 */
4a37fb66 7077 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
34f80b04
EG
7078 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
7079 if (val == 0x7) {
7080 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
da5a662a 7081 /* save our func */
34f80b04 7082 int func = BP_FUNC(bp);
da5a662a
VZ
7083 u32 swap_en;
7084 u32 swap_val;
34f80b04 7085
b4661739
EG
7086 /* clear the UNDI indication */
7087 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
7088
34f80b04
EG
7089 BNX2X_DEV_INFO("UNDI is active! reset device\n");
7090
7091 /* try unload UNDI on port 0 */
7092 bp->func = 0;
da5a662a
VZ
7093 bp->fw_seq =
7094 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7095 DRV_MSG_SEQ_NUMBER_MASK);
34f80b04 7096 reset_code = bnx2x_fw_command(bp, reset_code);
34f80b04
EG
7097
7098 /* if UNDI is loaded on the other port */
7099 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
7100
da5a662a
VZ
7101 /* send "DONE" for previous unload */
7102 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7103
7104 /* unload UNDI on port 1 */
34f80b04 7105 bp->func = 1;
da5a662a
VZ
7106 bp->fw_seq =
7107 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7108 DRV_MSG_SEQ_NUMBER_MASK);
7109 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7110
7111 bnx2x_fw_command(bp, reset_code);
34f80b04
EG
7112 }
7113
b4661739
EG
7114 /* now it's safe to release the lock */
7115 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7116
f1ef27ef 7117 bnx2x_undi_int_disable(bp, func);
da5a662a
VZ
7118
7119 /* close input traffic and wait for it */
7120 /* Do not rcv packets to BRB */
7121 REG_WR(bp,
7122 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
7123 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
7124 /* Do not direct rcv packets that are not for MCP to
7125 * the BRB */
7126 REG_WR(bp,
7127 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
7128 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7129 /* clear AEU */
7130 REG_WR(bp,
7131 (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
7132 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
7133 msleep(10);
7134
7135 /* save NIG port swap info */
7136 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
7137 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
34f80b04
EG
7138 /* reset device */
7139 REG_WR(bp,
7140 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
da5a662a 7141 0xd3ffffff);
34f80b04
EG
7142 REG_WR(bp,
7143 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
7144 0x1403);
da5a662a
VZ
7145 /* take the NIG out of reset and restore swap values */
7146 REG_WR(bp,
7147 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
7148 MISC_REGISTERS_RESET_REG_1_RST_NIG);
7149 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
7150 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
7151
7152 /* send unload done to the MCP */
7153 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7154
7155 /* restore our func and fw_seq */
7156 bp->func = func;
7157 bp->fw_seq =
7158 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7159 DRV_MSG_SEQ_NUMBER_MASK);
b4661739
EG
7160
7161 } else
7162 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
34f80b04
EG
7163 }
7164}
7165
7166static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
7167{
7168 u32 val, val2, val3, val4, id;
72ce58c3 7169 u16 pmc;
34f80b04
EG
7170
7171 /* Get the chip revision id and number. */
7172 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
7173 val = REG_RD(bp, MISC_REG_CHIP_NUM);
7174 id = ((val & 0xffff) << 16);
7175 val = REG_RD(bp, MISC_REG_CHIP_REV);
7176 id |= ((val & 0xf) << 12);
7177 val = REG_RD(bp, MISC_REG_CHIP_METAL);
7178 id |= ((val & 0xff) << 4);
5a40e08e 7179 val = REG_RD(bp, MISC_REG_BOND_ID);
34f80b04
EG
7180 id |= (val & 0xf);
7181 bp->common.chip_id = id;
7182 bp->link_params.chip_id = bp->common.chip_id;
7183 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
7184
7185 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
7186 bp->common.flash_size = (NVRAM_1MB_SIZE <<
7187 (val & MCPR_NVM_CFG4_FLASH_SIZE));
7188 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
7189 bp->common.flash_size, bp->common.flash_size);
7190
7191 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
7192 bp->link_params.shmem_base = bp->common.shmem_base;
7193 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
7194
7195 if (!bp->common.shmem_base ||
7196 (bp->common.shmem_base < 0xA0000) ||
7197 (bp->common.shmem_base >= 0xC0000)) {
7198 BNX2X_DEV_INFO("MCP not active\n");
7199 bp->flags |= NO_MCP_FLAG;
7200 return;
7201 }
7202
7203 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
7204 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7205 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7206 BNX2X_ERR("BAD MCP validity signature\n");
7207
7208 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
7209 bp->common.board = SHMEM_RD(bp, dev_info.shared_hw_config.board);
7210
7211 BNX2X_DEV_INFO("hw_config 0x%08x board 0x%08x\n",
7212 bp->common.hw_config, bp->common.board);
7213
7214 bp->link_params.hw_led_mode = ((bp->common.hw_config &
7215 SHARED_HW_CFG_LED_MODE_MASK) >>
7216 SHARED_HW_CFG_LED_MODE_SHIFT);
7217
7218 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
7219 bp->common.bc_ver = val;
7220 BNX2X_DEV_INFO("bc_ver %X\n", val);
7221 if (val < BNX2X_BC_VER) {
7222 /* for now only warn
7223 * later we might need to enforce this */
7224 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
7225 " please upgrade BC\n", BNX2X_BC_VER, val);
7226 }
72ce58c3
EG
7227
7228 if (BP_E1HVN(bp) == 0) {
7229 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
7230 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
7231 } else {
7232 /* no WOL capability for E1HVN != 0 */
7233 bp->flags |= NO_WOL_FLAG;
7234 }
7235 BNX2X_DEV_INFO("%sWoL capable\n",
7236 (bp->flags & NO_WOL_FLAG) ? "Not " : "");
34f80b04
EG
7237
7238 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
7239 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
7240 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
7241 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
7242
7243 printk(KERN_INFO PFX "part number %X-%X-%X-%X\n",
7244 val, val2, val3, val4);
7245}
7246
7247static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
7248 u32 switch_cfg)
a2fbb9ea 7249{
34f80b04 7250 int port = BP_PORT(bp);
a2fbb9ea
ET
7251 u32 ext_phy_type;
7252
a2fbb9ea
ET
7253 switch (switch_cfg) {
7254 case SWITCH_CFG_1G:
7255 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
7256
c18487ee
YR
7257 ext_phy_type =
7258 SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea
ET
7259 switch (ext_phy_type) {
7260 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
7261 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7262 ext_phy_type);
7263
34f80b04
EG
7264 bp->port.supported |= (SUPPORTED_10baseT_Half |
7265 SUPPORTED_10baseT_Full |
7266 SUPPORTED_100baseT_Half |
7267 SUPPORTED_100baseT_Full |
7268 SUPPORTED_1000baseT_Full |
7269 SUPPORTED_2500baseX_Full |
7270 SUPPORTED_TP |
7271 SUPPORTED_FIBRE |
7272 SUPPORTED_Autoneg |
7273 SUPPORTED_Pause |
7274 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
7275 break;
7276
7277 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
7278 BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
7279 ext_phy_type);
7280
34f80b04
EG
7281 bp->port.supported |= (SUPPORTED_10baseT_Half |
7282 SUPPORTED_10baseT_Full |
7283 SUPPORTED_100baseT_Half |
7284 SUPPORTED_100baseT_Full |
7285 SUPPORTED_1000baseT_Full |
7286 SUPPORTED_TP |
7287 SUPPORTED_FIBRE |
7288 SUPPORTED_Autoneg |
7289 SUPPORTED_Pause |
7290 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
7291 break;
7292
7293 default:
7294 BNX2X_ERR("NVRAM config error. "
7295 "BAD SerDes ext_phy_config 0x%x\n",
c18487ee 7296 bp->link_params.ext_phy_config);
a2fbb9ea
ET
7297 return;
7298 }
7299
34f80b04
EG
7300 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
7301 port*0x10);
7302 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea
ET
7303 break;
7304
7305 case SWITCH_CFG_10G:
7306 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
7307
c18487ee
YR
7308 ext_phy_type =
7309 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea
ET
7310 switch (ext_phy_type) {
7311 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7312 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7313 ext_phy_type);
7314
34f80b04
EG
7315 bp->port.supported |= (SUPPORTED_10baseT_Half |
7316 SUPPORTED_10baseT_Full |
7317 SUPPORTED_100baseT_Half |
7318 SUPPORTED_100baseT_Full |
7319 SUPPORTED_1000baseT_Full |
7320 SUPPORTED_2500baseX_Full |
7321 SUPPORTED_10000baseT_Full |
7322 SUPPORTED_TP |
7323 SUPPORTED_FIBRE |
7324 SUPPORTED_Autoneg |
7325 SUPPORTED_Pause |
7326 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
7327 break;
7328
7329 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
f1410647 7330 BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
34f80b04 7331 ext_phy_type);
f1410647 7332
34f80b04
EG
7333 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7334 SUPPORTED_FIBRE |
7335 SUPPORTED_Pause |
7336 SUPPORTED_Asym_Pause);
f1410647
ET
7337 break;
7338
a2fbb9ea 7339 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
f1410647
ET
7340 BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
7341 ext_phy_type);
7342
34f80b04
EG
7343 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7344 SUPPORTED_1000baseT_Full |
7345 SUPPORTED_FIBRE |
7346 SUPPORTED_Pause |
7347 SUPPORTED_Asym_Pause);
f1410647
ET
7348 break;
7349
7350 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
7351 BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
a2fbb9ea
ET
7352 ext_phy_type);
7353
34f80b04
EG
7354 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7355 SUPPORTED_1000baseT_Full |
7356 SUPPORTED_FIBRE |
7357 SUPPORTED_Autoneg |
7358 SUPPORTED_Pause |
7359 SUPPORTED_Asym_Pause);
f1410647
ET
7360 break;
7361
c18487ee
YR
7362 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
7363 BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
7364 ext_phy_type);
7365
34f80b04
EG
7366 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7367 SUPPORTED_2500baseX_Full |
7368 SUPPORTED_1000baseT_Full |
7369 SUPPORTED_FIBRE |
7370 SUPPORTED_Autoneg |
7371 SUPPORTED_Pause |
7372 SUPPORTED_Asym_Pause);
c18487ee
YR
7373 break;
7374
f1410647
ET
7375 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7376 BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
7377 ext_phy_type);
7378
34f80b04
EG
7379 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7380 SUPPORTED_TP |
7381 SUPPORTED_Autoneg |
7382 SUPPORTED_Pause |
7383 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
7384 break;
7385
c18487ee
YR
7386 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
7387 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
7388 bp->link_params.ext_phy_config);
7389 break;
7390
a2fbb9ea
ET
7391 default:
7392 BNX2X_ERR("NVRAM config error. "
7393 "BAD XGXS ext_phy_config 0x%x\n",
c18487ee 7394 bp->link_params.ext_phy_config);
a2fbb9ea
ET
7395 return;
7396 }
7397
34f80b04
EG
7398 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
7399 port*0x18);
7400 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea 7401
a2fbb9ea
ET
7402 break;
7403
7404 default:
7405 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
34f80b04 7406 bp->port.link_config);
a2fbb9ea
ET
7407 return;
7408 }
34f80b04 7409 bp->link_params.phy_addr = bp->port.phy_addr;
a2fbb9ea
ET
7410
7411 /* mask what we support according to speed_cap_mask */
c18487ee
YR
7412 if (!(bp->link_params.speed_cap_mask &
7413 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
34f80b04 7414 bp->port.supported &= ~SUPPORTED_10baseT_Half;
a2fbb9ea 7415
c18487ee
YR
7416 if (!(bp->link_params.speed_cap_mask &
7417 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
34f80b04 7418 bp->port.supported &= ~SUPPORTED_10baseT_Full;
a2fbb9ea 7419
c18487ee
YR
7420 if (!(bp->link_params.speed_cap_mask &
7421 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
34f80b04 7422 bp->port.supported &= ~SUPPORTED_100baseT_Half;
a2fbb9ea 7423
c18487ee
YR
7424 if (!(bp->link_params.speed_cap_mask &
7425 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
34f80b04 7426 bp->port.supported &= ~SUPPORTED_100baseT_Full;
a2fbb9ea 7427
c18487ee
YR
7428 if (!(bp->link_params.speed_cap_mask &
7429 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
34f80b04
EG
7430 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
7431 SUPPORTED_1000baseT_Full);
a2fbb9ea 7432
c18487ee
YR
7433 if (!(bp->link_params.speed_cap_mask &
7434 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
34f80b04 7435 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
a2fbb9ea 7436
c18487ee
YR
7437 if (!(bp->link_params.speed_cap_mask &
7438 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
34f80b04 7439 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
a2fbb9ea 7440
34f80b04 7441 BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
a2fbb9ea
ET
7442}
7443
34f80b04 7444static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
a2fbb9ea 7445{
c18487ee 7446 bp->link_params.req_duplex = DUPLEX_FULL;
a2fbb9ea 7447
34f80b04 7448 switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
a2fbb9ea 7449 case PORT_FEATURE_LINK_SPEED_AUTO:
34f80b04 7450 if (bp->port.supported & SUPPORTED_Autoneg) {
c18487ee 7451 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
34f80b04 7452 bp->port.advertising = bp->port.supported;
a2fbb9ea 7453 } else {
c18487ee
YR
7454 u32 ext_phy_type =
7455 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7456
7457 if ((ext_phy_type ==
7458 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
7459 (ext_phy_type ==
7460 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
a2fbb9ea 7461 /* force 10G, no AN */
c18487ee 7462 bp->link_params.req_line_speed = SPEED_10000;
34f80b04 7463 bp->port.advertising =
a2fbb9ea
ET
7464 (ADVERTISED_10000baseT_Full |
7465 ADVERTISED_FIBRE);
7466 break;
7467 }
7468 BNX2X_ERR("NVRAM config error. "
7469 "Invalid link_config 0x%x"
7470 " Autoneg not supported\n",
34f80b04 7471 bp->port.link_config);
a2fbb9ea
ET
7472 return;
7473 }
7474 break;
7475
7476 case PORT_FEATURE_LINK_SPEED_10M_FULL:
34f80b04 7477 if (bp->port.supported & SUPPORTED_10baseT_Full) {
c18487ee 7478 bp->link_params.req_line_speed = SPEED_10;
34f80b04
EG
7479 bp->port.advertising = (ADVERTISED_10baseT_Full |
7480 ADVERTISED_TP);
a2fbb9ea
ET
7481 } else {
7482 BNX2X_ERR("NVRAM config error. "
7483 "Invalid link_config 0x%x"
7484 " speed_cap_mask 0x%x\n",
34f80b04 7485 bp->port.link_config,
c18487ee 7486 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7487 return;
7488 }
7489 break;
7490
7491 case PORT_FEATURE_LINK_SPEED_10M_HALF:
34f80b04 7492 if (bp->port.supported & SUPPORTED_10baseT_Half) {
c18487ee
YR
7493 bp->link_params.req_line_speed = SPEED_10;
7494 bp->link_params.req_duplex = DUPLEX_HALF;
34f80b04
EG
7495 bp->port.advertising = (ADVERTISED_10baseT_Half |
7496 ADVERTISED_TP);
a2fbb9ea
ET
7497 } else {
7498 BNX2X_ERR("NVRAM config error. "
7499 "Invalid link_config 0x%x"
7500 " speed_cap_mask 0x%x\n",
34f80b04 7501 bp->port.link_config,
c18487ee 7502 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7503 return;
7504 }
7505 break;
7506
7507 case PORT_FEATURE_LINK_SPEED_100M_FULL:
34f80b04 7508 if (bp->port.supported & SUPPORTED_100baseT_Full) {
c18487ee 7509 bp->link_params.req_line_speed = SPEED_100;
34f80b04
EG
7510 bp->port.advertising = (ADVERTISED_100baseT_Full |
7511 ADVERTISED_TP);
a2fbb9ea
ET
7512 } else {
7513 BNX2X_ERR("NVRAM config error. "
7514 "Invalid link_config 0x%x"
7515 " speed_cap_mask 0x%x\n",
34f80b04 7516 bp->port.link_config,
c18487ee 7517 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7518 return;
7519 }
7520 break;
7521
7522 case PORT_FEATURE_LINK_SPEED_100M_HALF:
34f80b04 7523 if (bp->port.supported & SUPPORTED_100baseT_Half) {
c18487ee
YR
7524 bp->link_params.req_line_speed = SPEED_100;
7525 bp->link_params.req_duplex = DUPLEX_HALF;
34f80b04
EG
7526 bp->port.advertising = (ADVERTISED_100baseT_Half |
7527 ADVERTISED_TP);
a2fbb9ea
ET
7528 } else {
7529 BNX2X_ERR("NVRAM config error. "
7530 "Invalid link_config 0x%x"
7531 " speed_cap_mask 0x%x\n",
34f80b04 7532 bp->port.link_config,
c18487ee 7533 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7534 return;
7535 }
7536 break;
7537
7538 case PORT_FEATURE_LINK_SPEED_1G:
34f80b04 7539 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
c18487ee 7540 bp->link_params.req_line_speed = SPEED_1000;
34f80b04
EG
7541 bp->port.advertising = (ADVERTISED_1000baseT_Full |
7542 ADVERTISED_TP);
a2fbb9ea
ET
7543 } else {
7544 BNX2X_ERR("NVRAM config error. "
7545 "Invalid link_config 0x%x"
7546 " speed_cap_mask 0x%x\n",
34f80b04 7547 bp->port.link_config,
c18487ee 7548 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7549 return;
7550 }
7551 break;
7552
7553 case PORT_FEATURE_LINK_SPEED_2_5G:
34f80b04 7554 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
c18487ee 7555 bp->link_params.req_line_speed = SPEED_2500;
34f80b04
EG
7556 bp->port.advertising = (ADVERTISED_2500baseX_Full |
7557 ADVERTISED_TP);
a2fbb9ea
ET
7558 } else {
7559 BNX2X_ERR("NVRAM config error. "
7560 "Invalid link_config 0x%x"
7561 " speed_cap_mask 0x%x\n",
34f80b04 7562 bp->port.link_config,
c18487ee 7563 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7564 return;
7565 }
7566 break;
7567
7568 case PORT_FEATURE_LINK_SPEED_10G_CX4:
7569 case PORT_FEATURE_LINK_SPEED_10G_KX4:
7570 case PORT_FEATURE_LINK_SPEED_10G_KR:
34f80b04 7571 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
c18487ee 7572 bp->link_params.req_line_speed = SPEED_10000;
34f80b04
EG
7573 bp->port.advertising = (ADVERTISED_10000baseT_Full |
7574 ADVERTISED_FIBRE);
a2fbb9ea
ET
7575 } else {
7576 BNX2X_ERR("NVRAM config error. "
7577 "Invalid link_config 0x%x"
7578 " speed_cap_mask 0x%x\n",
34f80b04 7579 bp->port.link_config,
c18487ee 7580 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7581 return;
7582 }
7583 break;
7584
7585 default:
7586 BNX2X_ERR("NVRAM config error. "
7587 "BAD link speed link_config 0x%x\n",
34f80b04 7588 bp->port.link_config);
c18487ee 7589 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
34f80b04 7590 bp->port.advertising = bp->port.supported;
a2fbb9ea
ET
7591 break;
7592 }
a2fbb9ea 7593
34f80b04
EG
7594 bp->link_params.req_flow_ctrl = (bp->port.link_config &
7595 PORT_FEATURE_FLOW_CONTROL_MASK);
c0700f90 7596 if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
4ab84d45 7597 !(bp->port.supported & SUPPORTED_Autoneg))
c0700f90 7598 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
a2fbb9ea 7599
c18487ee 7600 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x"
f1410647 7601 " advertising 0x%x\n",
c18487ee
YR
7602 bp->link_params.req_line_speed,
7603 bp->link_params.req_duplex,
34f80b04 7604 bp->link_params.req_flow_ctrl, bp->port.advertising);
a2fbb9ea
ET
7605}
7606
34f80b04 7607static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
a2fbb9ea 7608{
34f80b04
EG
7609 int port = BP_PORT(bp);
7610 u32 val, val2;
a2fbb9ea 7611
c18487ee 7612 bp->link_params.bp = bp;
34f80b04 7613 bp->link_params.port = port;
c18487ee 7614
c18487ee 7615 bp->link_params.serdes_config =
f1410647 7616 SHMEM_RD(bp, dev_info.port_hw_config[port].serdes_config);
c18487ee 7617 bp->link_params.lane_config =
a2fbb9ea 7618 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
c18487ee 7619 bp->link_params.ext_phy_config =
a2fbb9ea
ET
7620 SHMEM_RD(bp,
7621 dev_info.port_hw_config[port].external_phy_config);
c18487ee 7622 bp->link_params.speed_cap_mask =
a2fbb9ea
ET
7623 SHMEM_RD(bp,
7624 dev_info.port_hw_config[port].speed_capability_mask);
7625
34f80b04 7626 bp->port.link_config =
a2fbb9ea
ET
7627 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
7628
34f80b04
EG
7629 BNX2X_DEV_INFO("serdes_config 0x%08x lane_config 0x%08x\n"
7630 KERN_INFO " ext_phy_config 0x%08x speed_cap_mask 0x%08x"
7631 " link_config 0x%08x\n",
c18487ee
YR
7632 bp->link_params.serdes_config,
7633 bp->link_params.lane_config,
7634 bp->link_params.ext_phy_config,
34f80b04 7635 bp->link_params.speed_cap_mask, bp->port.link_config);
a2fbb9ea 7636
34f80b04 7637 bp->link_params.switch_cfg = (bp->port.link_config &
c18487ee
YR
7638 PORT_FEATURE_CONNECTED_SWITCH_MASK);
7639 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
a2fbb9ea
ET
7640
7641 bnx2x_link_settings_requested(bp);
7642
7643 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
7644 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
7645 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
7646 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
7647 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
7648 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
7649 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
7650 bp->dev->dev_addr[5] = (u8)(val & 0xff);
c18487ee
YR
7651 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
7652 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
34f80b04
EG
7653}
7654
7655static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
7656{
7657 int func = BP_FUNC(bp);
7658 u32 val, val2;
7659 int rc = 0;
a2fbb9ea 7660
34f80b04 7661 bnx2x_get_common_hwinfo(bp);
a2fbb9ea 7662
34f80b04
EG
7663 bp->e1hov = 0;
7664 bp->e1hmf = 0;
7665 if (CHIP_IS_E1H(bp)) {
7666 bp->mf_config =
7667 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
a2fbb9ea 7668
3196a88a
EG
7669 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].e1hov_tag) &
7670 FUNC_MF_CFG_E1HOV_TAG_MASK);
34f80b04 7671 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
a2fbb9ea 7672
34f80b04
EG
7673 bp->e1hov = val;
7674 bp->e1hmf = 1;
7675 BNX2X_DEV_INFO("MF mode E1HOV for func %d is %d "
7676 "(0x%04x)\n",
7677 func, bp->e1hov, bp->e1hov);
7678 } else {
7679 BNX2X_DEV_INFO("Single function mode\n");
7680 if (BP_E1HVN(bp)) {
7681 BNX2X_ERR("!!! No valid E1HOV for func %d,"
7682 " aborting\n", func);
7683 rc = -EPERM;
7684 }
7685 }
7686 }
a2fbb9ea 7687
34f80b04
EG
7688 if (!BP_NOMCP(bp)) {
7689 bnx2x_get_port_hwinfo(bp);
7690
7691 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
7692 DRV_MSG_SEQ_NUMBER_MASK);
7693 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
7694 }
7695
7696 if (IS_E1HMF(bp)) {
7697 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
7698 val = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_lower);
7699 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
7700 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
7701 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
7702 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
7703 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
7704 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
7705 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
7706 bp->dev->dev_addr[5] = (u8)(val & 0xff);
7707 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
7708 ETH_ALEN);
7709 memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
7710 ETH_ALEN);
a2fbb9ea 7711 }
34f80b04
EG
7712
7713 return rc;
a2fbb9ea
ET
7714 }
7715
34f80b04
EG
7716 if (BP_NOMCP(bp)) {
7717 /* only supposed to happen on emulation/FPGA */
33471629 7718 BNX2X_ERR("warning random MAC workaround active\n");
34f80b04
EG
7719 random_ether_addr(bp->dev->dev_addr);
7720 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
7721 }
a2fbb9ea 7722
34f80b04
EG
7723 return rc;
7724}
7725
7726static int __devinit bnx2x_init_bp(struct bnx2x *bp)
7727{
7728 int func = BP_FUNC(bp);
7729 int rc;
7730
da5a662a
VZ
7731 /* Disable interrupt handling until HW is initialized */
7732 atomic_set(&bp->intr_sem, 1);
7733
34f80b04 7734 mutex_init(&bp->port.phy_mutex);
a2fbb9ea 7735
1cf167f2 7736 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
34f80b04
EG
7737 INIT_WORK(&bp->reset_task, bnx2x_reset_task);
7738
7739 rc = bnx2x_get_hwinfo(bp);
7740
7741 /* need to reset chip if undi was active */
7742 if (!BP_NOMCP(bp))
7743 bnx2x_undi_unload(bp);
7744
7745 if (CHIP_REV_IS_FPGA(bp))
7746 printk(KERN_ERR PFX "FPGA detected\n");
7747
7748 if (BP_NOMCP(bp) && (func == 0))
7749 printk(KERN_ERR PFX
7750 "MCP disabled, must load devices in order!\n");
7751
555f6c78 7752 /* Set multi queue mode */
8badd27a
EG
7753 if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
7754 ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
555f6c78 7755 printk(KERN_ERR PFX
8badd27a 7756 "Multi disabled since int_mode requested is not MSI-X\n");
555f6c78
EG
7757 multi_mode = ETH_RSS_MODE_DISABLED;
7758 }
7759 bp->multi_mode = multi_mode;
7760
7761
7a9b2557
VZ
7762 /* Set TPA flags */
7763 if (disable_tpa) {
7764 bp->flags &= ~TPA_ENABLE_FLAG;
7765 bp->dev->features &= ~NETIF_F_LRO;
7766 } else {
7767 bp->flags |= TPA_ENABLE_FLAG;
7768 bp->dev->features |= NETIF_F_LRO;
7769 }
7770
7771
34f80b04
EG
7772 bp->tx_ring_size = MAX_TX_AVAIL;
7773 bp->rx_ring_size = MAX_RX_AVAIL;
7774
7775 bp->rx_csum = 1;
7776 bp->rx_offset = 0;
7777
7778 bp->tx_ticks = 50;
7779 bp->rx_ticks = 25;
7780
34f80b04
EG
7781 bp->timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
7782 bp->current_interval = (poll ? poll : bp->timer_interval);
7783
7784 init_timer(&bp->timer);
7785 bp->timer.expires = jiffies + bp->current_interval;
7786 bp->timer.data = (unsigned long) bp;
7787 bp->timer.function = bnx2x_timer;
7788
7789 return rc;
a2fbb9ea
ET
7790}
7791
7792/*
7793 * ethtool service functions
7794 */
7795
7796/* All ethtool functions called with rtnl_lock */
7797
7798static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7799{
7800 struct bnx2x *bp = netdev_priv(dev);
7801
34f80b04
EG
7802 cmd->supported = bp->port.supported;
7803 cmd->advertising = bp->port.advertising;
a2fbb9ea
ET
7804
7805 if (netif_carrier_ok(dev)) {
c18487ee
YR
7806 cmd->speed = bp->link_vars.line_speed;
7807 cmd->duplex = bp->link_vars.duplex;
a2fbb9ea 7808 } else {
c18487ee
YR
7809 cmd->speed = bp->link_params.req_line_speed;
7810 cmd->duplex = bp->link_params.req_duplex;
a2fbb9ea 7811 }
34f80b04
EG
7812 if (IS_E1HMF(bp)) {
7813 u16 vn_max_rate;
7814
7815 vn_max_rate = ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
7816 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
7817 if (vn_max_rate < cmd->speed)
7818 cmd->speed = vn_max_rate;
7819 }
a2fbb9ea 7820
c18487ee
YR
7821 if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
7822 u32 ext_phy_type =
7823 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
f1410647
ET
7824
7825 switch (ext_phy_type) {
7826 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7827 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
7828 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
7829 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
c18487ee 7830 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
f1410647
ET
7831 cmd->port = PORT_FIBRE;
7832 break;
7833
7834 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7835 cmd->port = PORT_TP;
7836 break;
7837
c18487ee
YR
7838 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
7839 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
7840 bp->link_params.ext_phy_config);
7841 break;
7842
f1410647
ET
7843 default:
7844 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
c18487ee
YR
7845 bp->link_params.ext_phy_config);
7846 break;
f1410647
ET
7847 }
7848 } else
a2fbb9ea 7849 cmd->port = PORT_TP;
a2fbb9ea 7850
34f80b04 7851 cmd->phy_address = bp->port.phy_addr;
a2fbb9ea
ET
7852 cmd->transceiver = XCVR_INTERNAL;
7853
c18487ee 7854 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
a2fbb9ea 7855 cmd->autoneg = AUTONEG_ENABLE;
f1410647 7856 else
a2fbb9ea 7857 cmd->autoneg = AUTONEG_DISABLE;
a2fbb9ea
ET
7858
7859 cmd->maxtxpkt = 0;
7860 cmd->maxrxpkt = 0;
7861
7862 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
7863 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
7864 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
7865 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
7866 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
7867 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
7868 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
7869
7870 return 0;
7871}
7872
7873static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7874{
7875 struct bnx2x *bp = netdev_priv(dev);
7876 u32 advertising;
7877
34f80b04
EG
7878 if (IS_E1HMF(bp))
7879 return 0;
7880
a2fbb9ea
ET
7881 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
7882 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
7883 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
7884 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
7885 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
7886 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
7887 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
7888
a2fbb9ea 7889 if (cmd->autoneg == AUTONEG_ENABLE) {
34f80b04
EG
7890 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
7891 DP(NETIF_MSG_LINK, "Autoneg not supported\n");
a2fbb9ea 7892 return -EINVAL;
f1410647 7893 }
a2fbb9ea
ET
7894
7895 /* advertise the requested speed and duplex if supported */
34f80b04 7896 cmd->advertising &= bp->port.supported;
a2fbb9ea 7897
c18487ee
YR
7898 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
7899 bp->link_params.req_duplex = DUPLEX_FULL;
34f80b04
EG
7900 bp->port.advertising |= (ADVERTISED_Autoneg |
7901 cmd->advertising);
a2fbb9ea
ET
7902
7903 } else { /* forced speed */
7904 /* advertise the requested speed and duplex if supported */
7905 switch (cmd->speed) {
7906 case SPEED_10:
7907 if (cmd->duplex == DUPLEX_FULL) {
34f80b04 7908 if (!(bp->port.supported &
f1410647
ET
7909 SUPPORTED_10baseT_Full)) {
7910 DP(NETIF_MSG_LINK,
7911 "10M full not supported\n");
a2fbb9ea 7912 return -EINVAL;
f1410647 7913 }
a2fbb9ea
ET
7914
7915 advertising = (ADVERTISED_10baseT_Full |
7916 ADVERTISED_TP);
7917 } else {
34f80b04 7918 if (!(bp->port.supported &
f1410647
ET
7919 SUPPORTED_10baseT_Half)) {
7920 DP(NETIF_MSG_LINK,
7921 "10M half not supported\n");
a2fbb9ea 7922 return -EINVAL;
f1410647 7923 }
a2fbb9ea
ET
7924
7925 advertising = (ADVERTISED_10baseT_Half |
7926 ADVERTISED_TP);
7927 }
7928 break;
7929
7930 case SPEED_100:
7931 if (cmd->duplex == DUPLEX_FULL) {
34f80b04 7932 if (!(bp->port.supported &
f1410647
ET
7933 SUPPORTED_100baseT_Full)) {
7934 DP(NETIF_MSG_LINK,
7935 "100M full not supported\n");
a2fbb9ea 7936 return -EINVAL;
f1410647 7937 }
a2fbb9ea
ET
7938
7939 advertising = (ADVERTISED_100baseT_Full |
7940 ADVERTISED_TP);
7941 } else {
34f80b04 7942 if (!(bp->port.supported &
f1410647
ET
7943 SUPPORTED_100baseT_Half)) {
7944 DP(NETIF_MSG_LINK,
7945 "100M half not supported\n");
a2fbb9ea 7946 return -EINVAL;
f1410647 7947 }
a2fbb9ea
ET
7948
7949 advertising = (ADVERTISED_100baseT_Half |
7950 ADVERTISED_TP);
7951 }
7952 break;
7953
7954 case SPEED_1000:
f1410647
ET
7955 if (cmd->duplex != DUPLEX_FULL) {
7956 DP(NETIF_MSG_LINK, "1G half not supported\n");
a2fbb9ea 7957 return -EINVAL;
f1410647 7958 }
a2fbb9ea 7959
34f80b04 7960 if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
f1410647 7961 DP(NETIF_MSG_LINK, "1G full not supported\n");
a2fbb9ea 7962 return -EINVAL;
f1410647 7963 }
a2fbb9ea
ET
7964
7965 advertising = (ADVERTISED_1000baseT_Full |
7966 ADVERTISED_TP);
7967 break;
7968
7969 case SPEED_2500:
f1410647
ET
7970 if (cmd->duplex != DUPLEX_FULL) {
7971 DP(NETIF_MSG_LINK,
7972 "2.5G half not supported\n");
a2fbb9ea 7973 return -EINVAL;
f1410647 7974 }
a2fbb9ea 7975
34f80b04 7976 if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
f1410647
ET
7977 DP(NETIF_MSG_LINK,
7978 "2.5G full not supported\n");
a2fbb9ea 7979 return -EINVAL;
f1410647 7980 }
a2fbb9ea 7981
f1410647 7982 advertising = (ADVERTISED_2500baseX_Full |
a2fbb9ea
ET
7983 ADVERTISED_TP);
7984 break;
7985
7986 case SPEED_10000:
f1410647
ET
7987 if (cmd->duplex != DUPLEX_FULL) {
7988 DP(NETIF_MSG_LINK, "10G half not supported\n");
a2fbb9ea 7989 return -EINVAL;
f1410647 7990 }
a2fbb9ea 7991
34f80b04 7992 if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
f1410647 7993 DP(NETIF_MSG_LINK, "10G full not supported\n");
a2fbb9ea 7994 return -EINVAL;
f1410647 7995 }
a2fbb9ea
ET
7996
7997 advertising = (ADVERTISED_10000baseT_Full |
7998 ADVERTISED_FIBRE);
7999 break;
8000
8001 default:
f1410647 8002 DP(NETIF_MSG_LINK, "Unsupported speed\n");
a2fbb9ea
ET
8003 return -EINVAL;
8004 }
8005
c18487ee
YR
8006 bp->link_params.req_line_speed = cmd->speed;
8007 bp->link_params.req_duplex = cmd->duplex;
34f80b04 8008 bp->port.advertising = advertising;
a2fbb9ea
ET
8009 }
8010
c18487ee 8011 DP(NETIF_MSG_LINK, "req_line_speed %d\n"
a2fbb9ea 8012 DP_LEVEL " req_duplex %d advertising 0x%x\n",
c18487ee 8013 bp->link_params.req_line_speed, bp->link_params.req_duplex,
34f80b04 8014 bp->port.advertising);
a2fbb9ea 8015
34f80b04 8016 if (netif_running(dev)) {
bb2a0f7a 8017 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
8018 bnx2x_link_set(bp);
8019 }
a2fbb9ea
ET
8020
8021 return 0;
8022}
8023
c18487ee
YR
8024#define PHY_FW_VER_LEN 10
8025
a2fbb9ea
ET
8026static void bnx2x_get_drvinfo(struct net_device *dev,
8027 struct ethtool_drvinfo *info)
8028{
8029 struct bnx2x *bp = netdev_priv(dev);
f0e53a84 8030 u8 phy_fw_ver[PHY_FW_VER_LEN];
a2fbb9ea
ET
8031
8032 strcpy(info->driver, DRV_MODULE_NAME);
8033 strcpy(info->version, DRV_MODULE_VERSION);
c18487ee
YR
8034
8035 phy_fw_ver[0] = '\0';
34f80b04 8036 if (bp->port.pmf) {
4a37fb66 8037 bnx2x_acquire_phy_lock(bp);
34f80b04
EG
8038 bnx2x_get_ext_phy_fw_version(&bp->link_params,
8039 (bp->state != BNX2X_STATE_CLOSED),
8040 phy_fw_ver, PHY_FW_VER_LEN);
4a37fb66 8041 bnx2x_release_phy_lock(bp);
34f80b04 8042 }
c18487ee 8043
f0e53a84
EG
8044 snprintf(info->fw_version, 32, "BC:%d.%d.%d%s%s",
8045 (bp->common.bc_ver & 0xff0000) >> 16,
8046 (bp->common.bc_ver & 0xff00) >> 8,
8047 (bp->common.bc_ver & 0xff),
8048 ((phy_fw_ver[0] != '\0') ? " PHY:" : ""), phy_fw_ver);
a2fbb9ea
ET
8049 strcpy(info->bus_info, pci_name(bp->pdev));
8050 info->n_stats = BNX2X_NUM_STATS;
8051 info->testinfo_len = BNX2X_NUM_TESTS;
34f80b04 8052 info->eedump_len = bp->common.flash_size;
a2fbb9ea
ET
8053 info->regdump_len = 0;
8054}
8055
8056static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8057{
8058 struct bnx2x *bp = netdev_priv(dev);
8059
8060 if (bp->flags & NO_WOL_FLAG) {
8061 wol->supported = 0;
8062 wol->wolopts = 0;
8063 } else {
8064 wol->supported = WAKE_MAGIC;
8065 if (bp->wol)
8066 wol->wolopts = WAKE_MAGIC;
8067 else
8068 wol->wolopts = 0;
8069 }
8070 memset(&wol->sopass, 0, sizeof(wol->sopass));
8071}
8072
8073static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8074{
8075 struct bnx2x *bp = netdev_priv(dev);
8076
8077 if (wol->wolopts & ~WAKE_MAGIC)
8078 return -EINVAL;
8079
8080 if (wol->wolopts & WAKE_MAGIC) {
8081 if (bp->flags & NO_WOL_FLAG)
8082 return -EINVAL;
8083
8084 bp->wol = 1;
34f80b04 8085 } else
a2fbb9ea 8086 bp->wol = 0;
34f80b04 8087
a2fbb9ea
ET
8088 return 0;
8089}
8090
8091static u32 bnx2x_get_msglevel(struct net_device *dev)
8092{
8093 struct bnx2x *bp = netdev_priv(dev);
8094
8095 return bp->msglevel;
8096}
8097
8098static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
8099{
8100 struct bnx2x *bp = netdev_priv(dev);
8101
8102 if (capable(CAP_NET_ADMIN))
8103 bp->msglevel = level;
8104}
8105
8106static int bnx2x_nway_reset(struct net_device *dev)
8107{
8108 struct bnx2x *bp = netdev_priv(dev);
8109
34f80b04
EG
8110 if (!bp->port.pmf)
8111 return 0;
a2fbb9ea 8112
34f80b04 8113 if (netif_running(dev)) {
bb2a0f7a 8114 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
8115 bnx2x_link_set(bp);
8116 }
a2fbb9ea
ET
8117
8118 return 0;
8119}
8120
8121static int bnx2x_get_eeprom_len(struct net_device *dev)
8122{
8123 struct bnx2x *bp = netdev_priv(dev);
8124
34f80b04 8125 return bp->common.flash_size;
a2fbb9ea
ET
8126}
8127
8128static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
8129{
34f80b04 8130 int port = BP_PORT(bp);
a2fbb9ea
ET
8131 int count, i;
8132 u32 val = 0;
8133
8134 /* adjust timeout for emulation/FPGA */
8135 count = NVRAM_TIMEOUT_COUNT;
8136 if (CHIP_REV_IS_SLOW(bp))
8137 count *= 100;
8138
8139 /* request access to nvram interface */
8140 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
8141 (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
8142
8143 for (i = 0; i < count*10; i++) {
8144 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
8145 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
8146 break;
8147
8148 udelay(5);
8149 }
8150
8151 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
34f80b04 8152 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
a2fbb9ea
ET
8153 return -EBUSY;
8154 }
8155
8156 return 0;
8157}
8158
8159static int bnx2x_release_nvram_lock(struct bnx2x *bp)
8160{
34f80b04 8161 int port = BP_PORT(bp);
a2fbb9ea
ET
8162 int count, i;
8163 u32 val = 0;
8164
8165 /* adjust timeout for emulation/FPGA */
8166 count = NVRAM_TIMEOUT_COUNT;
8167 if (CHIP_REV_IS_SLOW(bp))
8168 count *= 100;
8169
8170 /* relinquish nvram interface */
8171 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
8172 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
8173
8174 for (i = 0; i < count*10; i++) {
8175 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
8176 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
8177 break;
8178
8179 udelay(5);
8180 }
8181
8182 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
34f80b04 8183 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
a2fbb9ea
ET
8184 return -EBUSY;
8185 }
8186
8187 return 0;
8188}
8189
8190static void bnx2x_enable_nvram_access(struct bnx2x *bp)
8191{
8192 u32 val;
8193
8194 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
8195
8196 /* enable both bits, even on read */
8197 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
8198 (val | MCPR_NVM_ACCESS_ENABLE_EN |
8199 MCPR_NVM_ACCESS_ENABLE_WR_EN));
8200}
8201
8202static void bnx2x_disable_nvram_access(struct bnx2x *bp)
8203{
8204 u32 val;
8205
8206 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
8207
8208 /* disable both bits, even after read */
8209 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
8210 (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
8211 MCPR_NVM_ACCESS_ENABLE_WR_EN)));
8212}
8213
8214static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, u32 *ret_val,
8215 u32 cmd_flags)
8216{
f1410647 8217 int count, i, rc;
a2fbb9ea
ET
8218 u32 val;
8219
8220 /* build the command word */
8221 cmd_flags |= MCPR_NVM_COMMAND_DOIT;
8222
8223 /* need to clear DONE bit separately */
8224 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8225
8226 /* address of the NVRAM to read from */
8227 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8228 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8229
8230 /* issue a read command */
8231 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8232
8233 /* adjust timeout for emulation/FPGA */
8234 count = NVRAM_TIMEOUT_COUNT;
8235 if (CHIP_REV_IS_SLOW(bp))
8236 count *= 100;
8237
8238 /* wait for completion */
8239 *ret_val = 0;
8240 rc = -EBUSY;
8241 for (i = 0; i < count; i++) {
8242 udelay(5);
8243 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8244
8245 if (val & MCPR_NVM_COMMAND_DONE) {
8246 val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
a2fbb9ea
ET
8247 /* we read nvram data in cpu order
8248 * but ethtool sees it as an array of bytes
8249 * converting to big-endian will do the work */
8250 val = cpu_to_be32(val);
8251 *ret_val = val;
8252 rc = 0;
8253 break;
8254 }
8255 }
8256
8257 return rc;
8258}
8259
8260static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
8261 int buf_size)
8262{
8263 int rc;
8264 u32 cmd_flags;
8265 u32 val;
8266
8267 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
34f80b04 8268 DP(BNX2X_MSG_NVM,
c14423fe 8269 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
a2fbb9ea
ET
8270 offset, buf_size);
8271 return -EINVAL;
8272 }
8273
34f80b04
EG
8274 if (offset + buf_size > bp->common.flash_size) {
8275 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 8276 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 8277 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
8278 return -EINVAL;
8279 }
8280
8281 /* request access to nvram interface */
8282 rc = bnx2x_acquire_nvram_lock(bp);
8283 if (rc)
8284 return rc;
8285
8286 /* enable access to nvram interface */
8287 bnx2x_enable_nvram_access(bp);
8288
8289 /* read the first word(s) */
8290 cmd_flags = MCPR_NVM_COMMAND_FIRST;
8291 while ((buf_size > sizeof(u32)) && (rc == 0)) {
8292 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8293 memcpy(ret_buf, &val, 4);
8294
8295 /* advance to the next dword */
8296 offset += sizeof(u32);
8297 ret_buf += sizeof(u32);
8298 buf_size -= sizeof(u32);
8299 cmd_flags = 0;
8300 }
8301
8302 if (rc == 0) {
8303 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8304 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8305 memcpy(ret_buf, &val, 4);
8306 }
8307
8308 /* disable access to nvram interface */
8309 bnx2x_disable_nvram_access(bp);
8310 bnx2x_release_nvram_lock(bp);
8311
8312 return rc;
8313}
8314
8315static int bnx2x_get_eeprom(struct net_device *dev,
8316 struct ethtool_eeprom *eeprom, u8 *eebuf)
8317{
8318 struct bnx2x *bp = netdev_priv(dev);
8319 int rc;
8320
2add3acb
EG
8321 if (!netif_running(dev))
8322 return -EAGAIN;
8323
34f80b04 8324 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
a2fbb9ea
ET
8325 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
8326 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8327 eeprom->len, eeprom->len);
8328
8329 /* parameters already validated in ethtool_get_eeprom */
8330
8331 rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
8332
8333 return rc;
8334}
8335
8336static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
8337 u32 cmd_flags)
8338{
f1410647 8339 int count, i, rc;
a2fbb9ea
ET
8340
8341 /* build the command word */
8342 cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
8343
8344 /* need to clear DONE bit separately */
8345 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8346
8347 /* write the data */
8348 REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
8349
8350 /* address of the NVRAM to write to */
8351 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8352 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8353
8354 /* issue the write command */
8355 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8356
8357 /* adjust timeout for emulation/FPGA */
8358 count = NVRAM_TIMEOUT_COUNT;
8359 if (CHIP_REV_IS_SLOW(bp))
8360 count *= 100;
8361
8362 /* wait for completion */
8363 rc = -EBUSY;
8364 for (i = 0; i < count; i++) {
8365 udelay(5);
8366 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8367 if (val & MCPR_NVM_COMMAND_DONE) {
8368 rc = 0;
8369 break;
8370 }
8371 }
8372
8373 return rc;
8374}
8375
f1410647 8376#define BYTE_OFFSET(offset) (8 * (offset & 0x03))
a2fbb9ea
ET
8377
8378static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
8379 int buf_size)
8380{
8381 int rc;
8382 u32 cmd_flags;
8383 u32 align_offset;
8384 u32 val;
8385
34f80b04
EG
8386 if (offset + buf_size > bp->common.flash_size) {
8387 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 8388 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 8389 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
8390 return -EINVAL;
8391 }
8392
8393 /* request access to nvram interface */
8394 rc = bnx2x_acquire_nvram_lock(bp);
8395 if (rc)
8396 return rc;
8397
8398 /* enable access to nvram interface */
8399 bnx2x_enable_nvram_access(bp);
8400
8401 cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
8402 align_offset = (offset & ~0x03);
8403 rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
8404
8405 if (rc == 0) {
8406 val &= ~(0xff << BYTE_OFFSET(offset));
8407 val |= (*data_buf << BYTE_OFFSET(offset));
8408
8409 /* nvram data is returned as an array of bytes
8410 * convert it back to cpu order */
8411 val = be32_to_cpu(val);
8412
a2fbb9ea
ET
8413 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
8414 cmd_flags);
8415 }
8416
8417 /* disable access to nvram interface */
8418 bnx2x_disable_nvram_access(bp);
8419 bnx2x_release_nvram_lock(bp);
8420
8421 return rc;
8422}
8423
8424static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
8425 int buf_size)
8426{
8427 int rc;
8428 u32 cmd_flags;
8429 u32 val;
8430 u32 written_so_far;
8431
34f80b04 8432 if (buf_size == 1) /* ethtool */
a2fbb9ea 8433 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
a2fbb9ea
ET
8434
8435 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
34f80b04 8436 DP(BNX2X_MSG_NVM,
c14423fe 8437 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
a2fbb9ea
ET
8438 offset, buf_size);
8439 return -EINVAL;
8440 }
8441
34f80b04
EG
8442 if (offset + buf_size > bp->common.flash_size) {
8443 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 8444 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 8445 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
8446 return -EINVAL;
8447 }
8448
8449 /* request access to nvram interface */
8450 rc = bnx2x_acquire_nvram_lock(bp);
8451 if (rc)
8452 return rc;
8453
8454 /* enable access to nvram interface */
8455 bnx2x_enable_nvram_access(bp);
8456
8457 written_so_far = 0;
8458 cmd_flags = MCPR_NVM_COMMAND_FIRST;
8459 while ((written_so_far < buf_size) && (rc == 0)) {
8460 if (written_so_far == (buf_size - sizeof(u32)))
8461 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8462 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
8463 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8464 else if ((offset % NVRAM_PAGE_SIZE) == 0)
8465 cmd_flags |= MCPR_NVM_COMMAND_FIRST;
8466
8467 memcpy(&val, data_buf, 4);
a2fbb9ea
ET
8468
8469 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
8470
8471 /* advance to the next dword */
8472 offset += sizeof(u32);
8473 data_buf += sizeof(u32);
8474 written_so_far += sizeof(u32);
8475 cmd_flags = 0;
8476 }
8477
8478 /* disable access to nvram interface */
8479 bnx2x_disable_nvram_access(bp);
8480 bnx2x_release_nvram_lock(bp);
8481
8482 return rc;
8483}
8484
8485static int bnx2x_set_eeprom(struct net_device *dev,
8486 struct ethtool_eeprom *eeprom, u8 *eebuf)
8487{
8488 struct bnx2x *bp = netdev_priv(dev);
8489 int rc;
8490
9f4c9583
EG
8491 if (!netif_running(dev))
8492 return -EAGAIN;
8493
34f80b04 8494 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
a2fbb9ea
ET
8495 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
8496 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8497 eeprom->len, eeprom->len);
8498
8499 /* parameters already validated in ethtool_set_eeprom */
8500
c18487ee 8501 /* If the magic number is PHY (0x00504859) upgrade the PHY FW */
34f80b04
EG
8502 if (eeprom->magic == 0x00504859)
8503 if (bp->port.pmf) {
8504
4a37fb66 8505 bnx2x_acquire_phy_lock(bp);
34f80b04
EG
8506 rc = bnx2x_flash_download(bp, BP_PORT(bp),
8507 bp->link_params.ext_phy_config,
8508 (bp->state != BNX2X_STATE_CLOSED),
8509 eebuf, eeprom->len);
bb2a0f7a
YG
8510 if ((bp->state == BNX2X_STATE_OPEN) ||
8511 (bp->state == BNX2X_STATE_DISABLED)) {
34f80b04
EG
8512 rc |= bnx2x_link_reset(&bp->link_params,
8513 &bp->link_vars);
8514 rc |= bnx2x_phy_init(&bp->link_params,
8515 &bp->link_vars);
bb2a0f7a 8516 }
4a37fb66 8517 bnx2x_release_phy_lock(bp);
34f80b04
EG
8518
8519 } else /* Only the PMF can access the PHY */
8520 return -EINVAL;
8521 else
c18487ee 8522 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
a2fbb9ea
ET
8523
8524 return rc;
8525}
8526
8527static int bnx2x_get_coalesce(struct net_device *dev,
8528 struct ethtool_coalesce *coal)
8529{
8530 struct bnx2x *bp = netdev_priv(dev);
8531
8532 memset(coal, 0, sizeof(struct ethtool_coalesce));
8533
8534 coal->rx_coalesce_usecs = bp->rx_ticks;
8535 coal->tx_coalesce_usecs = bp->tx_ticks;
a2fbb9ea
ET
8536
8537 return 0;
8538}
8539
8540static int bnx2x_set_coalesce(struct net_device *dev,
8541 struct ethtool_coalesce *coal)
8542{
8543 struct bnx2x *bp = netdev_priv(dev);
8544
8545 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
8546 if (bp->rx_ticks > 3000)
8547 bp->rx_ticks = 3000;
8548
8549 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
8550 if (bp->tx_ticks > 0x3000)
8551 bp->tx_ticks = 0x3000;
8552
34f80b04 8553 if (netif_running(dev))
a2fbb9ea
ET
8554 bnx2x_update_coalesce(bp);
8555
8556 return 0;
8557}
8558
8559static void bnx2x_get_ringparam(struct net_device *dev,
8560 struct ethtool_ringparam *ering)
8561{
8562 struct bnx2x *bp = netdev_priv(dev);
8563
8564 ering->rx_max_pending = MAX_RX_AVAIL;
8565 ering->rx_mini_max_pending = 0;
8566 ering->rx_jumbo_max_pending = 0;
8567
8568 ering->rx_pending = bp->rx_ring_size;
8569 ering->rx_mini_pending = 0;
8570 ering->rx_jumbo_pending = 0;
8571
8572 ering->tx_max_pending = MAX_TX_AVAIL;
8573 ering->tx_pending = bp->tx_ring_size;
8574}
8575
8576static int bnx2x_set_ringparam(struct net_device *dev,
8577 struct ethtool_ringparam *ering)
8578{
8579 struct bnx2x *bp = netdev_priv(dev);
34f80b04 8580 int rc = 0;
a2fbb9ea
ET
8581
8582 if ((ering->rx_pending > MAX_RX_AVAIL) ||
8583 (ering->tx_pending > MAX_TX_AVAIL) ||
8584 (ering->tx_pending <= MAX_SKB_FRAGS + 4))
8585 return -EINVAL;
8586
8587 bp->rx_ring_size = ering->rx_pending;
8588 bp->tx_ring_size = ering->tx_pending;
8589
34f80b04
EG
8590 if (netif_running(dev)) {
8591 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8592 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
a2fbb9ea
ET
8593 }
8594
34f80b04 8595 return rc;
a2fbb9ea
ET
8596}
8597
8598static void bnx2x_get_pauseparam(struct net_device *dev,
8599 struct ethtool_pauseparam *epause)
8600{
8601 struct bnx2x *bp = netdev_priv(dev);
8602
c0700f90 8603 epause->autoneg = (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
c18487ee
YR
8604 (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
8605
c0700f90
DM
8606 epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) ==
8607 BNX2X_FLOW_CTRL_RX);
8608 epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) ==
8609 BNX2X_FLOW_CTRL_TX);
a2fbb9ea
ET
8610
8611 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
8612 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
8613 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
8614}
8615
8616static int bnx2x_set_pauseparam(struct net_device *dev,
8617 struct ethtool_pauseparam *epause)
8618{
8619 struct bnx2x *bp = netdev_priv(dev);
8620
34f80b04
EG
8621 if (IS_E1HMF(bp))
8622 return 0;
8623
a2fbb9ea
ET
8624 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
8625 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
8626 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
8627
c0700f90 8628 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
a2fbb9ea 8629
f1410647 8630 if (epause->rx_pause)
c0700f90 8631 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_RX;
c18487ee 8632
f1410647 8633 if (epause->tx_pause)
c0700f90 8634 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_TX;
c18487ee 8635
c0700f90
DM
8636 if (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO)
8637 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
a2fbb9ea 8638
c18487ee 8639 if (epause->autoneg) {
34f80b04 8640 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
3196a88a 8641 DP(NETIF_MSG_LINK, "autoneg not supported\n");
c18487ee
YR
8642 return -EINVAL;
8643 }
a2fbb9ea 8644
c18487ee 8645 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
c0700f90 8646 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
c18487ee 8647 }
a2fbb9ea 8648
c18487ee
YR
8649 DP(NETIF_MSG_LINK,
8650 "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
34f80b04
EG
8651
8652 if (netif_running(dev)) {
bb2a0f7a 8653 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
8654 bnx2x_link_set(bp);
8655 }
a2fbb9ea
ET
8656
8657 return 0;
8658}
8659
df0f2343
VZ
8660static int bnx2x_set_flags(struct net_device *dev, u32 data)
8661{
8662 struct bnx2x *bp = netdev_priv(dev);
8663 int changed = 0;
8664 int rc = 0;
8665
8666 /* TPA requires Rx CSUM offloading */
8667 if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
8668 if (!(dev->features & NETIF_F_LRO)) {
8669 dev->features |= NETIF_F_LRO;
8670 bp->flags |= TPA_ENABLE_FLAG;
8671 changed = 1;
8672 }
8673
8674 } else if (dev->features & NETIF_F_LRO) {
8675 dev->features &= ~NETIF_F_LRO;
8676 bp->flags &= ~TPA_ENABLE_FLAG;
8677 changed = 1;
8678 }
8679
8680 if (changed && netif_running(dev)) {
8681 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8682 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
8683 }
8684
8685 return rc;
8686}
8687
a2fbb9ea
ET
8688static u32 bnx2x_get_rx_csum(struct net_device *dev)
8689{
8690 struct bnx2x *bp = netdev_priv(dev);
8691
8692 return bp->rx_csum;
8693}
8694
8695static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
8696{
8697 struct bnx2x *bp = netdev_priv(dev);
df0f2343 8698 int rc = 0;
a2fbb9ea
ET
8699
8700 bp->rx_csum = data;
df0f2343
VZ
8701
8702 /* Disable TPA, when Rx CSUM is disabled. Otherwise all
8703 TPA'ed packets will be discarded due to wrong TCP CSUM */
8704 if (!data) {
8705 u32 flags = ethtool_op_get_flags(dev);
8706
8707 rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
8708 }
8709
8710 return rc;
a2fbb9ea
ET
8711}
8712
8713static int bnx2x_set_tso(struct net_device *dev, u32 data)
8714{
755735eb 8715 if (data) {
a2fbb9ea 8716 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb
EG
8717 dev->features |= NETIF_F_TSO6;
8718 } else {
a2fbb9ea 8719 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb
EG
8720 dev->features &= ~NETIF_F_TSO6;
8721 }
8722
a2fbb9ea
ET
8723 return 0;
8724}
8725
f3c87cdd 8726static const struct {
a2fbb9ea
ET
8727 char string[ETH_GSTRING_LEN];
8728} bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
f3c87cdd
YG
8729 { "register_test (offline)" },
8730 { "memory_test (offline)" },
8731 { "loopback_test (offline)" },
8732 { "nvram_test (online)" },
8733 { "interrupt_test (online)" },
8734 { "link_test (online)" },
8735 { "idle check (online)" },
8736 { "MC errors (online)" }
a2fbb9ea
ET
8737};
8738
8739static int bnx2x_self_test_count(struct net_device *dev)
8740{
8741 return BNX2X_NUM_TESTS;
8742}
8743
f3c87cdd
YG
8744static int bnx2x_test_registers(struct bnx2x *bp)
8745{
8746 int idx, i, rc = -ENODEV;
8747 u32 wr_val = 0;
9dabc424 8748 int port = BP_PORT(bp);
f3c87cdd
YG
8749 static const struct {
8750 u32 offset0;
8751 u32 offset1;
8752 u32 mask;
8753 } reg_tbl[] = {
8754/* 0 */ { BRB1_REG_PAUSE_LOW_THRESHOLD_0, 4, 0x000003ff },
8755 { DORQ_REG_DB_ADDR0, 4, 0xffffffff },
8756 { HC_REG_AGG_INT_0, 4, 0x000003ff },
8757 { PBF_REG_MAC_IF0_ENABLE, 4, 0x00000001 },
8758 { PBF_REG_P0_INIT_CRD, 4, 0x000007ff },
8759 { PRS_REG_CID_PORT_0, 4, 0x00ffffff },
8760 { PXP2_REG_PSWRQ_CDU0_L2P, 4, 0x000fffff },
8761 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
8762 { PXP2_REG_PSWRQ_TM0_L2P, 4, 0x000fffff },
8763 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
8764/* 10 */ { PXP2_REG_PSWRQ_TSDM0_L2P, 4, 0x000fffff },
8765 { QM_REG_CONNNUM_0, 4, 0x000fffff },
8766 { TM_REG_LIN0_MAX_ACTIVE_CID, 4, 0x0003ffff },
8767 { SRC_REG_KEYRSS0_0, 40, 0xffffffff },
8768 { SRC_REG_KEYRSS0_7, 40, 0xffffffff },
8769 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
8770 { XCM_REG_WU_DA_CNT_CMD00, 4, 0x00000003 },
8771 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 4, 0x000000ff },
8772 { NIG_REG_EGRESS_MNG0_FIFO, 20, 0xffffffff },
8773 { NIG_REG_LLH0_T_BIT, 4, 0x00000001 },
8774/* 20 */ { NIG_REG_EMAC0_IN_EN, 4, 0x00000001 },
8775 { NIG_REG_BMAC0_IN_EN, 4, 0x00000001 },
8776 { NIG_REG_XCM0_OUT_EN, 4, 0x00000001 },
8777 { NIG_REG_BRB0_OUT_EN, 4, 0x00000001 },
8778 { NIG_REG_LLH0_XCM_MASK, 4, 0x00000007 },
8779 { NIG_REG_LLH0_ACPI_PAT_6_LEN, 68, 0x000000ff },
8780 { NIG_REG_LLH0_ACPI_PAT_0_CRC, 68, 0xffffffff },
8781 { NIG_REG_LLH0_DEST_MAC_0_0, 160, 0xffffffff },
8782 { NIG_REG_LLH0_DEST_IP_0_1, 160, 0xffffffff },
8783 { NIG_REG_LLH0_IPV4_IPV6_0, 160, 0x00000001 },
8784/* 30 */ { NIG_REG_LLH0_DEST_UDP_0, 160, 0x0000ffff },
8785 { NIG_REG_LLH0_DEST_TCP_0, 160, 0x0000ffff },
8786 { NIG_REG_LLH0_VLAN_ID_0, 160, 0x00000fff },
8787 { NIG_REG_XGXS_SERDES0_MODE_SEL, 4, 0x00000001 },
8788 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
8789 { NIG_REG_STATUS_INTERRUPT_PORT0, 4, 0x07ffffff },
8790 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
8791 { NIG_REG_SERDES0_CTRL_PHY_ADDR, 16, 0x0000001f },
8792
8793 { 0xffffffff, 0, 0x00000000 }
8794 };
8795
8796 if (!netif_running(bp->dev))
8797 return rc;
8798
8799 /* Repeat the test twice:
8800 First by writing 0x00000000, second by writing 0xffffffff */
8801 for (idx = 0; idx < 2; idx++) {
8802
8803 switch (idx) {
8804 case 0:
8805 wr_val = 0;
8806 break;
8807 case 1:
8808 wr_val = 0xffffffff;
8809 break;
8810 }
8811
8812 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
8813 u32 offset, mask, save_val, val;
f3c87cdd
YG
8814
8815 offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
8816 mask = reg_tbl[i].mask;
8817
8818 save_val = REG_RD(bp, offset);
8819
8820 REG_WR(bp, offset, wr_val);
8821 val = REG_RD(bp, offset);
8822
8823 /* Restore the original register's value */
8824 REG_WR(bp, offset, save_val);
8825
8826 /* verify that value is as expected value */
8827 if ((val & mask) != (wr_val & mask))
8828 goto test_reg_exit;
8829 }
8830 }
8831
8832 rc = 0;
8833
8834test_reg_exit:
8835 return rc;
8836}
8837
8838static int bnx2x_test_memory(struct bnx2x *bp)
8839{
8840 int i, j, rc = -ENODEV;
8841 u32 val;
8842 static const struct {
8843 u32 offset;
8844 int size;
8845 } mem_tbl[] = {
8846 { CCM_REG_XX_DESCR_TABLE, CCM_REG_XX_DESCR_TABLE_SIZE },
8847 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
8848 { CFC_REG_LINK_LIST, CFC_REG_LINK_LIST_SIZE },
8849 { DMAE_REG_CMD_MEM, DMAE_REG_CMD_MEM_SIZE },
8850 { TCM_REG_XX_DESCR_TABLE, TCM_REG_XX_DESCR_TABLE_SIZE },
8851 { UCM_REG_XX_DESCR_TABLE, UCM_REG_XX_DESCR_TABLE_SIZE },
8852 { XCM_REG_XX_DESCR_TABLE, XCM_REG_XX_DESCR_TABLE_SIZE },
8853
8854 { 0xffffffff, 0 }
8855 };
8856 static const struct {
8857 char *name;
8858 u32 offset;
9dabc424
YG
8859 u32 e1_mask;
8860 u32 e1h_mask;
f3c87cdd 8861 } prty_tbl[] = {
9dabc424
YG
8862 { "CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, 0x3ffc0, 0 },
8863 { "CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, 0x2, 0x2 },
8864 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0, 0 },
8865 { "TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, 0x3ffc0, 0 },
8866 { "UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, 0x3ffc0, 0 },
8867 { "XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, 0x3ffc1, 0 },
8868
8869 { NULL, 0xffffffff, 0, 0 }
f3c87cdd
YG
8870 };
8871
8872 if (!netif_running(bp->dev))
8873 return rc;
8874
8875 /* Go through all the memories */
8876 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
8877 for (j = 0; j < mem_tbl[i].size; j++)
8878 REG_RD(bp, mem_tbl[i].offset + j*4);
8879
8880 /* Check the parity status */
8881 for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
8882 val = REG_RD(bp, prty_tbl[i].offset);
9dabc424
YG
8883 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
8884 (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
f3c87cdd
YG
8885 DP(NETIF_MSG_HW,
8886 "%s is 0x%x\n", prty_tbl[i].name, val);
8887 goto test_mem_exit;
8888 }
8889 }
8890
8891 rc = 0;
8892
8893test_mem_exit:
8894 return rc;
8895}
8896
f3c87cdd
YG
8897static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
8898{
8899 int cnt = 1000;
8900
8901 if (link_up)
8902 while (bnx2x_link_test(bp) && cnt--)
8903 msleep(10);
8904}
8905
8906static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
8907{
8908 unsigned int pkt_size, num_pkts, i;
8909 struct sk_buff *skb;
8910 unsigned char *packet;
8911 struct bnx2x_fastpath *fp = &bp->fp[0];
8912 u16 tx_start_idx, tx_idx;
8913 u16 rx_start_idx, rx_idx;
8914 u16 pkt_prod;
8915 struct sw_tx_bd *tx_buf;
8916 struct eth_tx_bd *tx_bd;
8917 dma_addr_t mapping;
8918 union eth_rx_cqe *cqe;
8919 u8 cqe_fp_flags;
8920 struct sw_rx_bd *rx_buf;
8921 u16 len;
8922 int rc = -ENODEV;
8923
8924 if (loopback_mode == BNX2X_MAC_LOOPBACK) {
8925 bp->link_params.loopback_mode = LOOPBACK_BMAC;
f3c87cdd 8926 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
f3c87cdd
YG
8927
8928 } else if (loopback_mode == BNX2X_PHY_LOOPBACK) {
3910c8ae 8929 u16 cnt = 1000;
f3c87cdd 8930 bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
f3c87cdd 8931 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
f3c87cdd 8932 /* wait until link state is restored */
3910c8ae
EG
8933 if (link_up)
8934 while (cnt-- && bnx2x_test_link(&bp->link_params,
8935 &bp->link_vars))
8936 msleep(10);
f3c87cdd
YG
8937 } else
8938 return -EINVAL;
8939
8940 pkt_size = 1514;
8941 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
8942 if (!skb) {
8943 rc = -ENOMEM;
8944 goto test_loopback_exit;
8945 }
8946 packet = skb_put(skb, pkt_size);
8947 memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
8948 memset(packet + ETH_ALEN, 0, (ETH_HLEN - ETH_ALEN));
8949 for (i = ETH_HLEN; i < pkt_size; i++)
8950 packet[i] = (unsigned char) (i & 0xff);
8951
8952 num_pkts = 0;
8953 tx_start_idx = le16_to_cpu(*fp->tx_cons_sb);
8954 rx_start_idx = le16_to_cpu(*fp->rx_cons_sb);
8955
8956 pkt_prod = fp->tx_pkt_prod++;
8957 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
8958 tx_buf->first_bd = fp->tx_bd_prod;
8959 tx_buf->skb = skb;
8960
8961 tx_bd = &fp->tx_desc_ring[TX_BD(fp->tx_bd_prod)];
8962 mapping = pci_map_single(bp->pdev, skb->data,
8963 skb_headlen(skb), PCI_DMA_TODEVICE);
8964 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
8965 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
8966 tx_bd->nbd = cpu_to_le16(1);
8967 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
8968 tx_bd->vlan = cpu_to_le16(pkt_prod);
8969 tx_bd->bd_flags.as_bitfield = (ETH_TX_BD_FLAGS_START_BD |
8970 ETH_TX_BD_FLAGS_END_BD);
8971 tx_bd->general_data = ((UNICAST_ADDRESS <<
8972 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT) | 1);
8973
58f4c4cf
EG
8974 wmb();
8975
f3c87cdd
YG
8976 fp->hw_tx_prods->bds_prod =
8977 cpu_to_le16(le16_to_cpu(fp->hw_tx_prods->bds_prod) + 1);
8978 mb(); /* FW restriction: must not reorder writing nbd and packets */
8979 fp->hw_tx_prods->packets_prod =
8980 cpu_to_le32(le32_to_cpu(fp->hw_tx_prods->packets_prod) + 1);
8981 DOORBELL(bp, FP_IDX(fp), 0);
8982
8983 mmiowb();
8984
8985 num_pkts++;
8986 fp->tx_bd_prod++;
8987 bp->dev->trans_start = jiffies;
8988
8989 udelay(100);
8990
8991 tx_idx = le16_to_cpu(*fp->tx_cons_sb);
8992 if (tx_idx != tx_start_idx + num_pkts)
8993 goto test_loopback_exit;
8994
8995 rx_idx = le16_to_cpu(*fp->rx_cons_sb);
8996 if (rx_idx != rx_start_idx + num_pkts)
8997 goto test_loopback_exit;
8998
8999 cqe = &fp->rx_comp_ring[RCQ_BD(fp->rx_comp_cons)];
9000 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
9001 if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
9002 goto test_loopback_rx_exit;
9003
9004 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
9005 if (len != pkt_size)
9006 goto test_loopback_rx_exit;
9007
9008 rx_buf = &fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)];
9009 skb = rx_buf->skb;
9010 skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
9011 for (i = ETH_HLEN; i < pkt_size; i++)
9012 if (*(skb->data + i) != (unsigned char) (i & 0xff))
9013 goto test_loopback_rx_exit;
9014
9015 rc = 0;
9016
9017test_loopback_rx_exit:
f3c87cdd
YG
9018
9019 fp->rx_bd_cons = NEXT_RX_IDX(fp->rx_bd_cons);
9020 fp->rx_bd_prod = NEXT_RX_IDX(fp->rx_bd_prod);
9021 fp->rx_comp_cons = NEXT_RCQ_IDX(fp->rx_comp_cons);
9022 fp->rx_comp_prod = NEXT_RCQ_IDX(fp->rx_comp_prod);
9023
9024 /* Update producers */
9025 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
9026 fp->rx_sge_prod);
f3c87cdd
YG
9027
9028test_loopback_exit:
9029 bp->link_params.loopback_mode = LOOPBACK_NONE;
9030
9031 return rc;
9032}
9033
9034static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
9035{
9036 int rc = 0;
9037
9038 if (!netif_running(bp->dev))
9039 return BNX2X_LOOPBACK_FAILED;
9040
f8ef6e44 9041 bnx2x_netif_stop(bp, 1);
3910c8ae 9042 bnx2x_acquire_phy_lock(bp);
f3c87cdd
YG
9043
9044 if (bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up)) {
9045 DP(NETIF_MSG_PROBE, "MAC loopback failed\n");
9046 rc |= BNX2X_MAC_LOOPBACK_FAILED;
9047 }
9048
9049 if (bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up)) {
9050 DP(NETIF_MSG_PROBE, "PHY loopback failed\n");
9051 rc |= BNX2X_PHY_LOOPBACK_FAILED;
9052 }
9053
3910c8ae 9054 bnx2x_release_phy_lock(bp);
f3c87cdd
YG
9055 bnx2x_netif_start(bp);
9056
9057 return rc;
9058}
9059
9060#define CRC32_RESIDUAL 0xdebb20e3
9061
9062static int bnx2x_test_nvram(struct bnx2x *bp)
9063{
9064 static const struct {
9065 int offset;
9066 int size;
9067 } nvram_tbl[] = {
9068 { 0, 0x14 }, /* bootstrap */
9069 { 0x14, 0xec }, /* dir */
9070 { 0x100, 0x350 }, /* manuf_info */
9071 { 0x450, 0xf0 }, /* feature_info */
9072 { 0x640, 0x64 }, /* upgrade_key_info */
9073 { 0x6a4, 0x64 },
9074 { 0x708, 0x70 }, /* manuf_key_info */
9075 { 0x778, 0x70 },
9076 { 0, 0 }
9077 };
9078 u32 buf[0x350 / 4];
9079 u8 *data = (u8 *)buf;
9080 int i, rc;
9081 u32 magic, csum;
9082
9083 rc = bnx2x_nvram_read(bp, 0, data, 4);
9084 if (rc) {
9085 DP(NETIF_MSG_PROBE, "magic value read (rc -%d)\n", -rc);
9086 goto test_nvram_exit;
9087 }
9088
9089 magic = be32_to_cpu(buf[0]);
9090 if (magic != 0x669955aa) {
9091 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
9092 rc = -ENODEV;
9093 goto test_nvram_exit;
9094 }
9095
9096 for (i = 0; nvram_tbl[i].size; i++) {
9097
9098 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
9099 nvram_tbl[i].size);
9100 if (rc) {
9101 DP(NETIF_MSG_PROBE,
9102 "nvram_tbl[%d] read data (rc -%d)\n", i, -rc);
9103 goto test_nvram_exit;
9104 }
9105
9106 csum = ether_crc_le(nvram_tbl[i].size, data);
9107 if (csum != CRC32_RESIDUAL) {
9108 DP(NETIF_MSG_PROBE,
9109 "nvram_tbl[%d] csum value (0x%08x)\n", i, csum);
9110 rc = -ENODEV;
9111 goto test_nvram_exit;
9112 }
9113 }
9114
9115test_nvram_exit:
9116 return rc;
9117}
9118
9119static int bnx2x_test_intr(struct bnx2x *bp)
9120{
9121 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
9122 int i, rc;
9123
9124 if (!netif_running(bp->dev))
9125 return -ENODEV;
9126
8d9c5f34 9127 config->hdr.length = 0;
af246401
EG
9128 if (CHIP_IS_E1(bp))
9129 config->hdr.offset = (BP_PORT(bp) ? 32 : 0);
9130 else
9131 config->hdr.offset = BP_FUNC(bp);
f3c87cdd
YG
9132 config->hdr.client_id = BP_CL_ID(bp);
9133 config->hdr.reserved1 = 0;
9134
9135 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
9136 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
9137 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
9138 if (rc == 0) {
9139 bp->set_mac_pending++;
9140 for (i = 0; i < 10; i++) {
9141 if (!bp->set_mac_pending)
9142 break;
9143 msleep_interruptible(10);
9144 }
9145 if (i == 10)
9146 rc = -ENODEV;
9147 }
9148
9149 return rc;
9150}
9151
a2fbb9ea
ET
9152static void bnx2x_self_test(struct net_device *dev,
9153 struct ethtool_test *etest, u64 *buf)
9154{
9155 struct bnx2x *bp = netdev_priv(dev);
a2fbb9ea
ET
9156
9157 memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
9158
f3c87cdd 9159 if (!netif_running(dev))
a2fbb9ea 9160 return;
a2fbb9ea 9161
33471629 9162 /* offline tests are not supported in MF mode */
f3c87cdd
YG
9163 if (IS_E1HMF(bp))
9164 etest->flags &= ~ETH_TEST_FL_OFFLINE;
9165
9166 if (etest->flags & ETH_TEST_FL_OFFLINE) {
9167 u8 link_up;
9168
9169 link_up = bp->link_vars.link_up;
9170 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9171 bnx2x_nic_load(bp, LOAD_DIAG);
9172 /* wait until link state is restored */
9173 bnx2x_wait_for_link(bp, link_up);
9174
9175 if (bnx2x_test_registers(bp) != 0) {
9176 buf[0] = 1;
9177 etest->flags |= ETH_TEST_FL_FAILED;
9178 }
9179 if (bnx2x_test_memory(bp) != 0) {
9180 buf[1] = 1;
9181 etest->flags |= ETH_TEST_FL_FAILED;
9182 }
9183 buf[2] = bnx2x_test_loopback(bp, link_up);
9184 if (buf[2] != 0)
9185 etest->flags |= ETH_TEST_FL_FAILED;
a2fbb9ea 9186
f3c87cdd
YG
9187 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9188 bnx2x_nic_load(bp, LOAD_NORMAL);
9189 /* wait until link state is restored */
9190 bnx2x_wait_for_link(bp, link_up);
9191 }
9192 if (bnx2x_test_nvram(bp) != 0) {
9193 buf[3] = 1;
a2fbb9ea
ET
9194 etest->flags |= ETH_TEST_FL_FAILED;
9195 }
f3c87cdd
YG
9196 if (bnx2x_test_intr(bp) != 0) {
9197 buf[4] = 1;
9198 etest->flags |= ETH_TEST_FL_FAILED;
9199 }
9200 if (bp->port.pmf)
9201 if (bnx2x_link_test(bp) != 0) {
9202 buf[5] = 1;
9203 etest->flags |= ETH_TEST_FL_FAILED;
9204 }
9205 buf[7] = bnx2x_mc_assert(bp);
9206 if (buf[7] != 0)
9207 etest->flags |= ETH_TEST_FL_FAILED;
9208
9209#ifdef BNX2X_EXTRA_DEBUG
9210 bnx2x_panic_dump(bp);
9211#endif
a2fbb9ea
ET
9212}
9213
bb2a0f7a
YG
9214static const struct {
9215 long offset;
9216 int size;
9217 u32 flags;
66e855f3
YG
9218#define STATS_FLAGS_PORT 1
9219#define STATS_FLAGS_FUNC 2
9220 u8 string[ETH_GSTRING_LEN];
bb2a0f7a 9221} bnx2x_stats_arr[BNX2X_NUM_STATS] = {
66e855f3
YG
9222/* 1 */ { STATS_OFFSET32(valid_bytes_received_hi),
9223 8, STATS_FLAGS_FUNC, "rx_bytes" },
9224 { STATS_OFFSET32(error_bytes_received_hi),
9225 8, STATS_FLAGS_FUNC, "rx_error_bytes" },
9226 { STATS_OFFSET32(total_bytes_transmitted_hi),
9227 8, STATS_FLAGS_FUNC, "tx_bytes" },
9228 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
9229 8, STATS_FLAGS_PORT, "tx_error_bytes" },
bb2a0f7a 9230 { STATS_OFFSET32(total_unicast_packets_received_hi),
66e855f3 9231 8, STATS_FLAGS_FUNC, "rx_ucast_packets" },
bb2a0f7a 9232 { STATS_OFFSET32(total_multicast_packets_received_hi),
66e855f3 9233 8, STATS_FLAGS_FUNC, "rx_mcast_packets" },
bb2a0f7a 9234 { STATS_OFFSET32(total_broadcast_packets_received_hi),
66e855f3 9235 8, STATS_FLAGS_FUNC, "rx_bcast_packets" },
bb2a0f7a 9236 { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
66e855f3 9237 8, STATS_FLAGS_FUNC, "tx_packets" },
bb2a0f7a 9238 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
66e855f3 9239 8, STATS_FLAGS_PORT, "tx_mac_errors" },
bb2a0f7a 9240/* 10 */{ STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
66e855f3 9241 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
bb2a0f7a 9242 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
66e855f3 9243 8, STATS_FLAGS_PORT, "rx_crc_errors" },
bb2a0f7a 9244 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
66e855f3 9245 8, STATS_FLAGS_PORT, "rx_align_errors" },
bb2a0f7a 9246 { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
66e855f3 9247 8, STATS_FLAGS_PORT, "tx_single_collisions" },
bb2a0f7a 9248 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
66e855f3 9249 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
bb2a0f7a 9250 { STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
66e855f3 9251 8, STATS_FLAGS_PORT, "tx_deferred" },
bb2a0f7a 9252 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
66e855f3 9253 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
bb2a0f7a 9254 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
66e855f3 9255 8, STATS_FLAGS_PORT, "tx_late_collisions" },
bb2a0f7a 9256 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
66e855f3 9257 8, STATS_FLAGS_PORT, "tx_total_collisions" },
bb2a0f7a 9258 { STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
66e855f3
YG
9259 8, STATS_FLAGS_PORT, "rx_fragments" },
9260/* 20 */{ STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
9261 8, STATS_FLAGS_PORT, "rx_jabbers" },
bb2a0f7a 9262 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
66e855f3 9263 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
bb2a0f7a 9264 { STATS_OFFSET32(jabber_packets_received),
66e855f3 9265 4, STATS_FLAGS_FUNC, "rx_oversize_packets" },
bb2a0f7a 9266 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
66e855f3 9267 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
bb2a0f7a 9268 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
66e855f3 9269 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
bb2a0f7a 9270 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
66e855f3 9271 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
bb2a0f7a 9272 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
66e855f3 9273 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
bb2a0f7a 9274 { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
66e855f3 9275 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
bb2a0f7a 9276 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
66e855f3 9277 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
bb2a0f7a 9278 { STATS_OFFSET32(etherstatspktsover1522octets_hi),
66e855f3 9279 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
bb2a0f7a 9280/* 30 */{ STATS_OFFSET32(rx_stat_xonpauseframesreceived_hi),
66e855f3 9281 8, STATS_FLAGS_PORT, "rx_xon_frames" },
bb2a0f7a 9282 { STATS_OFFSET32(rx_stat_xoffpauseframesreceived_hi),
66e855f3
YG
9283 8, STATS_FLAGS_PORT, "rx_xoff_frames" },
9284 { STATS_OFFSET32(tx_stat_outxonsent_hi),
9285 8, STATS_FLAGS_PORT, "tx_xon_frames" },
9286 { STATS_OFFSET32(tx_stat_outxoffsent_hi),
9287 8, STATS_FLAGS_PORT, "tx_xoff_frames" },
bb2a0f7a 9288 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
66e855f3
YG
9289 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
9290 { STATS_OFFSET32(mac_filter_discard),
9291 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
9292 { STATS_OFFSET32(no_buff_discard),
9293 4, STATS_FLAGS_FUNC, "rx_discards" },
9294 { STATS_OFFSET32(xxoverflow_discard),
9295 4, STATS_FLAGS_PORT, "rx_fw_discards" },
9296 { STATS_OFFSET32(brb_drop_hi),
9297 8, STATS_FLAGS_PORT, "brb_discard" },
9298 { STATS_OFFSET32(brb_truncate_hi),
9299 8, STATS_FLAGS_PORT, "brb_truncate" },
9300/* 40 */{ STATS_OFFSET32(rx_err_discard_pkt),
9301 4, STATS_FLAGS_FUNC, "rx_phy_ip_err_discards"},
9302 { STATS_OFFSET32(rx_skb_alloc_failed),
9303 4, STATS_FLAGS_FUNC, "rx_skb_alloc_discard" },
9304/* 42 */{ STATS_OFFSET32(hw_csum_err),
9305 4, STATS_FLAGS_FUNC, "rx_csum_offload_errors" }
a2fbb9ea
ET
9306};
9307
66e855f3
YG
9308#define IS_NOT_E1HMF_STAT(bp, i) \
9309 (IS_E1HMF(bp) && (bnx2x_stats_arr[i].flags & STATS_FLAGS_PORT))
9310
a2fbb9ea
ET
9311static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
9312{
bb2a0f7a
YG
9313 struct bnx2x *bp = netdev_priv(dev);
9314 int i, j;
9315
a2fbb9ea
ET
9316 switch (stringset) {
9317 case ETH_SS_STATS:
bb2a0f7a 9318 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
66e855f3 9319 if (IS_NOT_E1HMF_STAT(bp, i))
bb2a0f7a
YG
9320 continue;
9321 strcpy(buf + j*ETH_GSTRING_LEN,
9322 bnx2x_stats_arr[i].string);
9323 j++;
9324 }
a2fbb9ea
ET
9325 break;
9326
9327 case ETH_SS_TEST:
9328 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
9329 break;
9330 }
9331}
9332
9333static int bnx2x_get_stats_count(struct net_device *dev)
9334{
bb2a0f7a
YG
9335 struct bnx2x *bp = netdev_priv(dev);
9336 int i, num_stats = 0;
9337
9338 for (i = 0; i < BNX2X_NUM_STATS; i++) {
66e855f3 9339 if (IS_NOT_E1HMF_STAT(bp, i))
bb2a0f7a
YG
9340 continue;
9341 num_stats++;
9342 }
9343 return num_stats;
a2fbb9ea
ET
9344}
9345
9346static void bnx2x_get_ethtool_stats(struct net_device *dev,
9347 struct ethtool_stats *stats, u64 *buf)
9348{
9349 struct bnx2x *bp = netdev_priv(dev);
bb2a0f7a
YG
9350 u32 *hw_stats = (u32 *)&bp->eth_stats;
9351 int i, j;
a2fbb9ea 9352
bb2a0f7a 9353 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
66e855f3 9354 if (IS_NOT_E1HMF_STAT(bp, i))
a2fbb9ea 9355 continue;
bb2a0f7a
YG
9356
9357 if (bnx2x_stats_arr[i].size == 0) {
9358 /* skip this counter */
9359 buf[j] = 0;
9360 j++;
a2fbb9ea
ET
9361 continue;
9362 }
bb2a0f7a 9363 if (bnx2x_stats_arr[i].size == 4) {
a2fbb9ea 9364 /* 4-byte counter */
bb2a0f7a
YG
9365 buf[j] = (u64) *(hw_stats + bnx2x_stats_arr[i].offset);
9366 j++;
a2fbb9ea
ET
9367 continue;
9368 }
9369 /* 8-byte counter */
bb2a0f7a
YG
9370 buf[j] = HILO_U64(*(hw_stats + bnx2x_stats_arr[i].offset),
9371 *(hw_stats + bnx2x_stats_arr[i].offset + 1));
9372 j++;
a2fbb9ea
ET
9373 }
9374}
9375
9376static int bnx2x_phys_id(struct net_device *dev, u32 data)
9377{
9378 struct bnx2x *bp = netdev_priv(dev);
34f80b04 9379 int port = BP_PORT(bp);
a2fbb9ea
ET
9380 int i;
9381
34f80b04
EG
9382 if (!netif_running(dev))
9383 return 0;
9384
9385 if (!bp->port.pmf)
9386 return 0;
9387
a2fbb9ea
ET
9388 if (data == 0)
9389 data = 2;
9390
9391 for (i = 0; i < (data * 2); i++) {
c18487ee 9392 if ((i % 2) == 0)
34f80b04 9393 bnx2x_set_led(bp, port, LED_MODE_OPER, SPEED_1000,
c18487ee
YR
9394 bp->link_params.hw_led_mode,
9395 bp->link_params.chip_id);
9396 else
34f80b04 9397 bnx2x_set_led(bp, port, LED_MODE_OFF, 0,
c18487ee
YR
9398 bp->link_params.hw_led_mode,
9399 bp->link_params.chip_id);
9400
a2fbb9ea
ET
9401 msleep_interruptible(500);
9402 if (signal_pending(current))
9403 break;
9404 }
9405
c18487ee 9406 if (bp->link_vars.link_up)
34f80b04 9407 bnx2x_set_led(bp, port, LED_MODE_OPER,
c18487ee
YR
9408 bp->link_vars.line_speed,
9409 bp->link_params.hw_led_mode,
9410 bp->link_params.chip_id);
a2fbb9ea
ET
9411
9412 return 0;
9413}
9414
9415static struct ethtool_ops bnx2x_ethtool_ops = {
7a9b2557
VZ
9416 .get_settings = bnx2x_get_settings,
9417 .set_settings = bnx2x_set_settings,
9418 .get_drvinfo = bnx2x_get_drvinfo,
a2fbb9ea
ET
9419 .get_wol = bnx2x_get_wol,
9420 .set_wol = bnx2x_set_wol,
7a9b2557
VZ
9421 .get_msglevel = bnx2x_get_msglevel,
9422 .set_msglevel = bnx2x_set_msglevel,
9423 .nway_reset = bnx2x_nway_reset,
9424 .get_link = ethtool_op_get_link,
9425 .get_eeprom_len = bnx2x_get_eeprom_len,
9426 .get_eeprom = bnx2x_get_eeprom,
9427 .set_eeprom = bnx2x_set_eeprom,
9428 .get_coalesce = bnx2x_get_coalesce,
9429 .set_coalesce = bnx2x_set_coalesce,
9430 .get_ringparam = bnx2x_get_ringparam,
9431 .set_ringparam = bnx2x_set_ringparam,
9432 .get_pauseparam = bnx2x_get_pauseparam,
9433 .set_pauseparam = bnx2x_set_pauseparam,
9434 .get_rx_csum = bnx2x_get_rx_csum,
9435 .set_rx_csum = bnx2x_set_rx_csum,
9436 .get_tx_csum = ethtool_op_get_tx_csum,
755735eb 9437 .set_tx_csum = ethtool_op_set_tx_hw_csum,
7a9b2557
VZ
9438 .set_flags = bnx2x_set_flags,
9439 .get_flags = ethtool_op_get_flags,
9440 .get_sg = ethtool_op_get_sg,
9441 .set_sg = ethtool_op_set_sg,
a2fbb9ea
ET
9442 .get_tso = ethtool_op_get_tso,
9443 .set_tso = bnx2x_set_tso,
9444 .self_test_count = bnx2x_self_test_count,
7a9b2557
VZ
9445 .self_test = bnx2x_self_test,
9446 .get_strings = bnx2x_get_strings,
a2fbb9ea
ET
9447 .phys_id = bnx2x_phys_id,
9448 .get_stats_count = bnx2x_get_stats_count,
bb2a0f7a 9449 .get_ethtool_stats = bnx2x_get_ethtool_stats,
a2fbb9ea
ET
9450};
9451
9452/* end of ethtool_ops */
9453
9454/****************************************************************************
9455* General service functions
9456****************************************************************************/
9457
9458static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
9459{
9460 u16 pmcsr;
9461
9462 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
9463
9464 switch (state) {
9465 case PCI_D0:
34f80b04 9466 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
a2fbb9ea
ET
9467 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
9468 PCI_PM_CTRL_PME_STATUS));
9469
9470 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
33471629 9471 /* delay required during transition out of D3hot */
a2fbb9ea 9472 msleep(20);
34f80b04 9473 break;
a2fbb9ea 9474
34f80b04
EG
9475 case PCI_D3hot:
9476 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
9477 pmcsr |= 3;
a2fbb9ea 9478
34f80b04
EG
9479 if (bp->wol)
9480 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
a2fbb9ea 9481
34f80b04
EG
9482 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
9483 pmcsr);
a2fbb9ea 9484
34f80b04
EG
9485 /* No more memory access after this point until
9486 * device is brought back to D0.
9487 */
9488 break;
9489
9490 default:
9491 return -EINVAL;
9492 }
9493 return 0;
a2fbb9ea
ET
9494}
9495
237907c1
EG
9496static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
9497{
9498 u16 rx_cons_sb;
9499
9500 /* Tell compiler that status block fields can change */
9501 barrier();
9502 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
9503 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
9504 rx_cons_sb++;
9505 return (fp->rx_comp_cons != rx_cons_sb);
9506}
9507
34f80b04
EG
9508/*
9509 * net_device service functions
9510 */
9511
a2fbb9ea
ET
9512static int bnx2x_poll(struct napi_struct *napi, int budget)
9513{
9514 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
9515 napi);
9516 struct bnx2x *bp = fp->bp;
9517 int work_done = 0;
9518
9519#ifdef BNX2X_STOP_ON_ERROR
9520 if (unlikely(bp->panic))
34f80b04 9521 goto poll_panic;
a2fbb9ea
ET
9522#endif
9523
9524 prefetch(fp->tx_buf_ring[TX_BD(fp->tx_pkt_cons)].skb);
9525 prefetch(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb);
9526 prefetch((char *)(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb) + 256);
9527
9528 bnx2x_update_fpsb_idx(fp);
9529
237907c1 9530 if (bnx2x_has_tx_work(fp))
a2fbb9ea
ET
9531 bnx2x_tx_int(fp, budget);
9532
237907c1 9533 if (bnx2x_has_rx_work(fp))
a2fbb9ea 9534 work_done = bnx2x_rx_int(fp, budget);
da5a662a 9535 rmb(); /* BNX2X_HAS_WORK() reads the status block */
a2fbb9ea
ET
9536
9537 /* must not complete if we consumed full budget */
da5a662a 9538 if ((work_done < budget) && !BNX2X_HAS_WORK(fp)) {
a2fbb9ea
ET
9539
9540#ifdef BNX2X_STOP_ON_ERROR
34f80b04 9541poll_panic:
a2fbb9ea 9542#endif
288379f0 9543 napi_complete(napi);
a2fbb9ea 9544
34f80b04 9545 bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID,
a2fbb9ea 9546 le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
34f80b04 9547 bnx2x_ack_sb(bp, FP_SB_ID(fp), CSTORM_ID,
a2fbb9ea
ET
9548 le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
9549 }
a2fbb9ea
ET
9550 return work_done;
9551}
9552
755735eb
EG
9553
9554/* we split the first BD into headers and data BDs
33471629 9555 * to ease the pain of our fellow microcode engineers
755735eb
EG
9556 * we use one mapping for both BDs
9557 * So far this has only been observed to happen
9558 * in Other Operating Systems(TM)
9559 */
9560static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
9561 struct bnx2x_fastpath *fp,
9562 struct eth_tx_bd **tx_bd, u16 hlen,
9563 u16 bd_prod, int nbd)
9564{
9565 struct eth_tx_bd *h_tx_bd = *tx_bd;
9566 struct eth_tx_bd *d_tx_bd;
9567 dma_addr_t mapping;
9568 int old_len = le16_to_cpu(h_tx_bd->nbytes);
9569
9570 /* first fix first BD */
9571 h_tx_bd->nbd = cpu_to_le16(nbd);
9572 h_tx_bd->nbytes = cpu_to_le16(hlen);
9573
9574 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
9575 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
9576 h_tx_bd->addr_lo, h_tx_bd->nbd);
9577
9578 /* now get a new data BD
9579 * (after the pbd) and fill it */
9580 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9581 d_tx_bd = &fp->tx_desc_ring[bd_prod];
9582
9583 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
9584 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
9585
9586 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9587 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9588 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
9589 d_tx_bd->vlan = 0;
9590 /* this marks the BD as one that has no individual mapping
9591 * the FW ignores this flag in a BD not marked start
9592 */
9593 d_tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_SW_LSO;
9594 DP(NETIF_MSG_TX_QUEUED,
9595 "TSO split data size is %d (%x:%x)\n",
9596 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
9597
9598 /* update tx_bd for marking the last BD flag */
9599 *tx_bd = d_tx_bd;
9600
9601 return bd_prod;
9602}
9603
9604static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
9605{
9606 if (fix > 0)
9607 csum = (u16) ~csum_fold(csum_sub(csum,
9608 csum_partial(t_header - fix, fix, 0)));
9609
9610 else if (fix < 0)
9611 csum = (u16) ~csum_fold(csum_add(csum,
9612 csum_partial(t_header, -fix, 0)));
9613
9614 return swab16(csum);
9615}
9616
9617static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
9618{
9619 u32 rc;
9620
9621 if (skb->ip_summed != CHECKSUM_PARTIAL)
9622 rc = XMIT_PLAIN;
9623
9624 else {
9625 if (skb->protocol == ntohs(ETH_P_IPV6)) {
9626 rc = XMIT_CSUM_V6;
9627 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
9628 rc |= XMIT_CSUM_TCP;
9629
9630 } else {
9631 rc = XMIT_CSUM_V4;
9632 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
9633 rc |= XMIT_CSUM_TCP;
9634 }
9635 }
9636
9637 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
9638 rc |= XMIT_GSO_V4;
9639
9640 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
9641 rc |= XMIT_GSO_V6;
9642
9643 return rc;
9644}
9645
632da4d6 9646#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
755735eb
EG
9647/* check if packet requires linearization (packet is too fragmented) */
9648static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
9649 u32 xmit_type)
9650{
9651 int to_copy = 0;
9652 int hlen = 0;
9653 int first_bd_sz = 0;
9654
9655 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
9656 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
9657
9658 if (xmit_type & XMIT_GSO) {
9659 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
9660 /* Check if LSO packet needs to be copied:
9661 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
9662 int wnd_size = MAX_FETCH_BD - 3;
33471629 9663 /* Number of windows to check */
755735eb
EG
9664 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
9665 int wnd_idx = 0;
9666 int frag_idx = 0;
9667 u32 wnd_sum = 0;
9668
9669 /* Headers length */
9670 hlen = (int)(skb_transport_header(skb) - skb->data) +
9671 tcp_hdrlen(skb);
9672
9673 /* Amount of data (w/o headers) on linear part of SKB*/
9674 first_bd_sz = skb_headlen(skb) - hlen;
9675
9676 wnd_sum = first_bd_sz;
9677
9678 /* Calculate the first sum - it's special */
9679 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
9680 wnd_sum +=
9681 skb_shinfo(skb)->frags[frag_idx].size;
9682
9683 /* If there was data on linear skb data - check it */
9684 if (first_bd_sz > 0) {
9685 if (unlikely(wnd_sum < lso_mss)) {
9686 to_copy = 1;
9687 goto exit_lbl;
9688 }
9689
9690 wnd_sum -= first_bd_sz;
9691 }
9692
9693 /* Others are easier: run through the frag list and
9694 check all windows */
9695 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
9696 wnd_sum +=
9697 skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
9698
9699 if (unlikely(wnd_sum < lso_mss)) {
9700 to_copy = 1;
9701 break;
9702 }
9703 wnd_sum -=
9704 skb_shinfo(skb)->frags[wnd_idx].size;
9705 }
9706
9707 } else {
9708 /* in non-LSO too fragmented packet should always
9709 be linearized */
9710 to_copy = 1;
9711 }
9712 }
9713
9714exit_lbl:
9715 if (unlikely(to_copy))
9716 DP(NETIF_MSG_TX_QUEUED,
9717 "Linearization IS REQUIRED for %s packet. "
9718 "num_frags %d hlen %d first_bd_sz %d\n",
9719 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
9720 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
9721
9722 return to_copy;
9723}
632da4d6 9724#endif
755735eb
EG
9725
9726/* called with netif_tx_lock
a2fbb9ea 9727 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
755735eb 9728 * netif_wake_queue()
a2fbb9ea
ET
9729 */
9730static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
9731{
9732 struct bnx2x *bp = netdev_priv(dev);
9733 struct bnx2x_fastpath *fp;
555f6c78 9734 struct netdev_queue *txq;
a2fbb9ea
ET
9735 struct sw_tx_bd *tx_buf;
9736 struct eth_tx_bd *tx_bd;
9737 struct eth_tx_parse_bd *pbd = NULL;
9738 u16 pkt_prod, bd_prod;
755735eb 9739 int nbd, fp_index;
a2fbb9ea 9740 dma_addr_t mapping;
755735eb
EG
9741 u32 xmit_type = bnx2x_xmit_type(bp, skb);
9742 int vlan_off = (bp->e1hov ? 4 : 0);
9743 int i;
9744 u8 hlen = 0;
a2fbb9ea
ET
9745
9746#ifdef BNX2X_STOP_ON_ERROR
9747 if (unlikely(bp->panic))
9748 return NETDEV_TX_BUSY;
9749#endif
9750
555f6c78
EG
9751 fp_index = skb_get_queue_mapping(skb);
9752 txq = netdev_get_tx_queue(dev, fp_index);
9753
a2fbb9ea 9754 fp = &bp->fp[fp_index];
755735eb 9755
231fd58a 9756 if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
bb2a0f7a 9757 bp->eth_stats.driver_xoff++,
555f6c78 9758 netif_tx_stop_queue(txq);
a2fbb9ea
ET
9759 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
9760 return NETDEV_TX_BUSY;
9761 }
9762
755735eb
EG
9763 DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x protocol %x protocol(%x,%x)"
9764 " gso type %x xmit_type %x\n",
9765 skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
9766 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
9767
632da4d6 9768#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
33471629 9769 /* First, check if we need to linearize the skb
755735eb
EG
9770 (due to FW restrictions) */
9771 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
9772 /* Statistics of linearization */
9773 bp->lin_cnt++;
9774 if (skb_linearize(skb) != 0) {
9775 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
9776 "silently dropping this SKB\n");
9777 dev_kfree_skb_any(skb);
da5a662a 9778 return NETDEV_TX_OK;
755735eb
EG
9779 }
9780 }
632da4d6 9781#endif
755735eb 9782
a2fbb9ea 9783 /*
755735eb 9784 Please read carefully. First we use one BD which we mark as start,
a2fbb9ea 9785 then for TSO or xsum we have a parsing info BD,
755735eb 9786 and only then we have the rest of the TSO BDs.
a2fbb9ea
ET
9787 (don't forget to mark the last one as last,
9788 and to unmap only AFTER you write to the BD ...)
755735eb 9789 And above all, all pdb sizes are in words - NOT DWORDS!
a2fbb9ea
ET
9790 */
9791
9792 pkt_prod = fp->tx_pkt_prod++;
755735eb 9793 bd_prod = TX_BD(fp->tx_bd_prod);
a2fbb9ea 9794
755735eb 9795 /* get a tx_buf and first BD */
a2fbb9ea
ET
9796 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
9797 tx_bd = &fp->tx_desc_ring[bd_prod];
9798
9799 tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
9800 tx_bd->general_data = (UNICAST_ADDRESS <<
9801 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT);
3196a88a
EG
9802 /* header nbd */
9803 tx_bd->general_data |= (1 << ETH_TX_BD_HDR_NBDS_SHIFT);
a2fbb9ea 9804
755735eb
EG
9805 /* remember the first BD of the packet */
9806 tx_buf->first_bd = fp->tx_bd_prod;
9807 tx_buf->skb = skb;
a2fbb9ea
ET
9808
9809 DP(NETIF_MSG_TX_QUEUED,
9810 "sending pkt %u @%p next_idx %u bd %u @%p\n",
9811 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_bd);
9812
0c6671b0
EG
9813#ifdef BCM_VLAN
9814 if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
9815 (bp->flags & HW_VLAN_TX_FLAG)) {
755735eb
EG
9816 tx_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
9817 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
9818 vlan_off += 4;
9819 } else
0c6671b0 9820#endif
755735eb 9821 tx_bd->vlan = cpu_to_le16(pkt_prod);
a2fbb9ea 9822
755735eb 9823 if (xmit_type) {
755735eb 9824 /* turn on parsing and get a BD */
a2fbb9ea
ET
9825 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9826 pbd = (void *)&fp->tx_desc_ring[bd_prod];
755735eb
EG
9827
9828 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
9829 }
9830
9831 if (xmit_type & XMIT_CSUM) {
9832 hlen = (skb_network_header(skb) - skb->data + vlan_off) / 2;
a2fbb9ea
ET
9833
9834 /* for now NS flag is not used in Linux */
755735eb 9835 pbd->global_data = (hlen |
96fc1784 9836 ((skb->protocol == ntohs(ETH_P_8021Q)) <<
a2fbb9ea 9837 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
a2fbb9ea 9838
755735eb
EG
9839 pbd->ip_hlen = (skb_transport_header(skb) -
9840 skb_network_header(skb)) / 2;
9841
9842 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
a2fbb9ea 9843
755735eb
EG
9844 pbd->total_hlen = cpu_to_le16(hlen);
9845 hlen = hlen*2 - vlan_off;
a2fbb9ea 9846
755735eb
EG
9847 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_TCP_CSUM;
9848
9849 if (xmit_type & XMIT_CSUM_V4)
a2fbb9ea 9850 tx_bd->bd_flags.as_bitfield |=
755735eb
EG
9851 ETH_TX_BD_FLAGS_IP_CSUM;
9852 else
9853 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
9854
9855 if (xmit_type & XMIT_CSUM_TCP) {
9856 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
9857
9858 } else {
9859 s8 fix = SKB_CS_OFF(skb); /* signed! */
9860
a2fbb9ea 9861 pbd->global_data |= ETH_TX_PARSE_BD_CS_ANY_FLG;
755735eb 9862 pbd->cs_offset = fix / 2;
a2fbb9ea 9863
755735eb
EG
9864 DP(NETIF_MSG_TX_QUEUED,
9865 "hlen %d offset %d fix %d csum before fix %x\n",
9866 le16_to_cpu(pbd->total_hlen), pbd->cs_offset, fix,
9867 SKB_CS(skb));
9868
9869 /* HW bug: fixup the CSUM */
9870 pbd->tcp_pseudo_csum =
9871 bnx2x_csum_fix(skb_transport_header(skb),
9872 SKB_CS(skb), fix);
9873
9874 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
9875 pbd->tcp_pseudo_csum);
9876 }
a2fbb9ea
ET
9877 }
9878
9879 mapping = pci_map_single(bp->pdev, skb->data,
755735eb 9880 skb_headlen(skb), PCI_DMA_TODEVICE);
a2fbb9ea
ET
9881
9882 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9883 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
6378c025 9884 nbd = skb_shinfo(skb)->nr_frags + ((pbd == NULL) ? 1 : 2);
a2fbb9ea
ET
9885 tx_bd->nbd = cpu_to_le16(nbd);
9886 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
9887
9888 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
755735eb
EG
9889 " nbytes %d flags %x vlan %x\n",
9890 tx_bd, tx_bd->addr_hi, tx_bd->addr_lo, le16_to_cpu(tx_bd->nbd),
9891 le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield,
9892 le16_to_cpu(tx_bd->vlan));
a2fbb9ea 9893
755735eb 9894 if (xmit_type & XMIT_GSO) {
a2fbb9ea
ET
9895
9896 DP(NETIF_MSG_TX_QUEUED,
9897 "TSO packet len %d hlen %d total len %d tso size %d\n",
9898 skb->len, hlen, skb_headlen(skb),
9899 skb_shinfo(skb)->gso_size);
9900
9901 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
9902
755735eb
EG
9903 if (unlikely(skb_headlen(skb) > hlen))
9904 bd_prod = bnx2x_tx_split(bp, fp, &tx_bd, hlen,
9905 bd_prod, ++nbd);
a2fbb9ea
ET
9906
9907 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
9908 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
755735eb
EG
9909 pbd->tcp_flags = pbd_tcp_flags(skb);
9910
9911 if (xmit_type & XMIT_GSO_V4) {
9912 pbd->ip_id = swab16(ip_hdr(skb)->id);
9913 pbd->tcp_pseudo_csum =
a2fbb9ea
ET
9914 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
9915 ip_hdr(skb)->daddr,
9916 0, IPPROTO_TCP, 0));
755735eb
EG
9917
9918 } else
9919 pbd->tcp_pseudo_csum =
9920 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
9921 &ipv6_hdr(skb)->daddr,
9922 0, IPPROTO_TCP, 0));
9923
a2fbb9ea
ET
9924 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
9925 }
9926
755735eb
EG
9927 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
9928 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
a2fbb9ea 9929
755735eb
EG
9930 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9931 tx_bd = &fp->tx_desc_ring[bd_prod];
a2fbb9ea 9932
755735eb
EG
9933 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
9934 frag->size, PCI_DMA_TODEVICE);
a2fbb9ea 9935
755735eb
EG
9936 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9937 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9938 tx_bd->nbytes = cpu_to_le16(frag->size);
9939 tx_bd->vlan = cpu_to_le16(pkt_prod);
9940 tx_bd->bd_flags.as_bitfield = 0;
a2fbb9ea 9941
755735eb
EG
9942 DP(NETIF_MSG_TX_QUEUED,
9943 "frag %d bd @%p addr (%x:%x) nbytes %d flags %x\n",
9944 i, tx_bd, tx_bd->addr_hi, tx_bd->addr_lo,
9945 le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield);
a2fbb9ea
ET
9946 }
9947
755735eb 9948 /* now at last mark the BD as the last BD */
a2fbb9ea
ET
9949 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_END_BD;
9950
9951 DP(NETIF_MSG_TX_QUEUED, "last bd @%p flags %x\n",
9952 tx_bd, tx_bd->bd_flags.as_bitfield);
9953
a2fbb9ea
ET
9954 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9955
755735eb 9956 /* now send a tx doorbell, counting the next BD
a2fbb9ea
ET
9957 * if the packet contains or ends with it
9958 */
9959 if (TX_BD_POFF(bd_prod) < nbd)
9960 nbd++;
9961
9962 if (pbd)
9963 DP(NETIF_MSG_TX_QUEUED,
9964 "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
9965 " tcp_flags %x xsum %x seq %u hlen %u\n",
9966 pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
9967 pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
755735eb 9968 pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
a2fbb9ea 9969
755735eb 9970 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
a2fbb9ea 9971
58f4c4cf
EG
9972 /*
9973 * Make sure that the BD data is updated before updating the producer
9974 * since FW might read the BD right after the producer is updated.
9975 * This is only applicable for weak-ordered memory model archs such
9976 * as IA-64. The following barrier is also mandatory since FW will
9977 * assumes packets must have BDs.
9978 */
9979 wmb();
9980
96fc1784
ET
9981 fp->hw_tx_prods->bds_prod =
9982 cpu_to_le16(le16_to_cpu(fp->hw_tx_prods->bds_prod) + nbd);
a2fbb9ea 9983 mb(); /* FW restriction: must not reorder writing nbd and packets */
96fc1784
ET
9984 fp->hw_tx_prods->packets_prod =
9985 cpu_to_le32(le32_to_cpu(fp->hw_tx_prods->packets_prod) + 1);
755735eb 9986 DOORBELL(bp, FP_IDX(fp), 0);
a2fbb9ea
ET
9987
9988 mmiowb();
9989
755735eb 9990 fp->tx_bd_prod += nbd;
a2fbb9ea
ET
9991 dev->trans_start = jiffies;
9992
9993 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
58f4c4cf
EG
9994 /* We want bnx2x_tx_int to "see" the updated tx_bd_prod
9995 if we put Tx into XOFF state. */
9996 smp_mb();
555f6c78 9997 netif_tx_stop_queue(txq);
bb2a0f7a 9998 bp->eth_stats.driver_xoff++;
a2fbb9ea 9999 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
555f6c78 10000 netif_tx_wake_queue(txq);
a2fbb9ea
ET
10001 }
10002 fp->tx_pkt++;
10003
10004 return NETDEV_TX_OK;
10005}
10006
bb2a0f7a 10007/* called with rtnl_lock */
a2fbb9ea
ET
10008static int bnx2x_open(struct net_device *dev)
10009{
10010 struct bnx2x *bp = netdev_priv(dev);
10011
6eccabb3
EG
10012 netif_carrier_off(dev);
10013
a2fbb9ea
ET
10014 bnx2x_set_power_state(bp, PCI_D0);
10015
bb2a0f7a 10016 return bnx2x_nic_load(bp, LOAD_OPEN);
a2fbb9ea
ET
10017}
10018
bb2a0f7a 10019/* called with rtnl_lock */
a2fbb9ea
ET
10020static int bnx2x_close(struct net_device *dev)
10021{
a2fbb9ea
ET
10022 struct bnx2x *bp = netdev_priv(dev);
10023
10024 /* Unload the driver, release IRQs */
bb2a0f7a
YG
10025 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
10026 if (atomic_read(&bp->pdev->enable_cnt) == 1)
10027 if (!CHIP_REV_IS_SLOW(bp))
10028 bnx2x_set_power_state(bp, PCI_D3hot);
a2fbb9ea
ET
10029
10030 return 0;
10031}
10032
34f80b04
EG
10033/* called with netif_tx_lock from set_multicast */
10034static void bnx2x_set_rx_mode(struct net_device *dev)
10035{
10036 struct bnx2x *bp = netdev_priv(dev);
10037 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
10038 int port = BP_PORT(bp);
10039
10040 if (bp->state != BNX2X_STATE_OPEN) {
10041 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
10042 return;
10043 }
10044
10045 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
10046
10047 if (dev->flags & IFF_PROMISC)
10048 rx_mode = BNX2X_RX_MODE_PROMISC;
10049
10050 else if ((dev->flags & IFF_ALLMULTI) ||
10051 ((dev->mc_count > BNX2X_MAX_MULTICAST) && CHIP_IS_E1(bp)))
10052 rx_mode = BNX2X_RX_MODE_ALLMULTI;
10053
10054 else { /* some multicasts */
10055 if (CHIP_IS_E1(bp)) {
10056 int i, old, offset;
10057 struct dev_mc_list *mclist;
10058 struct mac_configuration_cmd *config =
10059 bnx2x_sp(bp, mcast_config);
10060
10061 for (i = 0, mclist = dev->mc_list;
10062 mclist && (i < dev->mc_count);
10063 i++, mclist = mclist->next) {
10064
10065 config->config_table[i].
10066 cam_entry.msb_mac_addr =
10067 swab16(*(u16 *)&mclist->dmi_addr[0]);
10068 config->config_table[i].
10069 cam_entry.middle_mac_addr =
10070 swab16(*(u16 *)&mclist->dmi_addr[2]);
10071 config->config_table[i].
10072 cam_entry.lsb_mac_addr =
10073 swab16(*(u16 *)&mclist->dmi_addr[4]);
10074 config->config_table[i].cam_entry.flags =
10075 cpu_to_le16(port);
10076 config->config_table[i].
10077 target_table_entry.flags = 0;
10078 config->config_table[i].
10079 target_table_entry.client_id = 0;
10080 config->config_table[i].
10081 target_table_entry.vlan_id = 0;
10082
10083 DP(NETIF_MSG_IFUP,
10084 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
10085 config->config_table[i].
10086 cam_entry.msb_mac_addr,
10087 config->config_table[i].
10088 cam_entry.middle_mac_addr,
10089 config->config_table[i].
10090 cam_entry.lsb_mac_addr);
10091 }
8d9c5f34 10092 old = config->hdr.length;
34f80b04
EG
10093 if (old > i) {
10094 for (; i < old; i++) {
10095 if (CAM_IS_INVALID(config->
10096 config_table[i])) {
af246401 10097 /* already invalidated */
34f80b04
EG
10098 break;
10099 }
10100 /* invalidate */
10101 CAM_INVALIDATE(config->
10102 config_table[i]);
10103 }
10104 }
10105
10106 if (CHIP_REV_IS_SLOW(bp))
10107 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
10108 else
10109 offset = BNX2X_MAX_MULTICAST*(1 + port);
10110
8d9c5f34 10111 config->hdr.length = i;
34f80b04 10112 config->hdr.offset = offset;
8d9c5f34 10113 config->hdr.client_id = bp->fp->cl_id;
34f80b04
EG
10114 config->hdr.reserved1 = 0;
10115
10116 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
10117 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
10118 U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
10119 0);
10120 } else { /* E1H */
10121 /* Accept one or more multicasts */
10122 struct dev_mc_list *mclist;
10123 u32 mc_filter[MC_HASH_SIZE];
10124 u32 crc, bit, regidx;
10125 int i;
10126
10127 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
10128
10129 for (i = 0, mclist = dev->mc_list;
10130 mclist && (i < dev->mc_count);
10131 i++, mclist = mclist->next) {
10132
7c510e4b
JB
10133 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
10134 mclist->dmi_addr);
34f80b04
EG
10135
10136 crc = crc32c_le(0, mclist->dmi_addr, ETH_ALEN);
10137 bit = (crc >> 24) & 0xff;
10138 regidx = bit >> 5;
10139 bit &= 0x1f;
10140 mc_filter[regidx] |= (1 << bit);
10141 }
10142
10143 for (i = 0; i < MC_HASH_SIZE; i++)
10144 REG_WR(bp, MC_HASH_OFFSET(bp, i),
10145 mc_filter[i]);
10146 }
10147 }
10148
10149 bp->rx_mode = rx_mode;
10150 bnx2x_set_storm_rx_mode(bp);
10151}
10152
10153/* called with rtnl_lock */
a2fbb9ea
ET
10154static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
10155{
10156 struct sockaddr *addr = p;
10157 struct bnx2x *bp = netdev_priv(dev);
10158
34f80b04 10159 if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
a2fbb9ea
ET
10160 return -EINVAL;
10161
10162 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
34f80b04
EG
10163 if (netif_running(dev)) {
10164 if (CHIP_IS_E1(bp))
3101c2bc 10165 bnx2x_set_mac_addr_e1(bp, 1);
34f80b04 10166 else
3101c2bc 10167 bnx2x_set_mac_addr_e1h(bp, 1);
34f80b04 10168 }
a2fbb9ea
ET
10169
10170 return 0;
10171}
10172
c18487ee 10173/* called with rtnl_lock */
a2fbb9ea
ET
10174static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
10175{
10176 struct mii_ioctl_data *data = if_mii(ifr);
10177 struct bnx2x *bp = netdev_priv(dev);
3196a88a 10178 int port = BP_PORT(bp);
a2fbb9ea
ET
10179 int err;
10180
10181 switch (cmd) {
10182 case SIOCGMIIPHY:
34f80b04 10183 data->phy_id = bp->port.phy_addr;
a2fbb9ea 10184
c14423fe 10185 /* fallthrough */
c18487ee 10186
a2fbb9ea 10187 case SIOCGMIIREG: {
c18487ee 10188 u16 mii_regval;
a2fbb9ea 10189
c18487ee
YR
10190 if (!netif_running(dev))
10191 return -EAGAIN;
a2fbb9ea 10192
34f80b04 10193 mutex_lock(&bp->port.phy_mutex);
3196a88a 10194 err = bnx2x_cl45_read(bp, port, 0, bp->port.phy_addr,
c18487ee
YR
10195 DEFAULT_PHY_DEV_ADDR,
10196 (data->reg_num & 0x1f), &mii_regval);
10197 data->val_out = mii_regval;
34f80b04 10198 mutex_unlock(&bp->port.phy_mutex);
a2fbb9ea
ET
10199 return err;
10200 }
10201
10202 case SIOCSMIIREG:
10203 if (!capable(CAP_NET_ADMIN))
10204 return -EPERM;
10205
c18487ee
YR
10206 if (!netif_running(dev))
10207 return -EAGAIN;
10208
34f80b04 10209 mutex_lock(&bp->port.phy_mutex);
3196a88a 10210 err = bnx2x_cl45_write(bp, port, 0, bp->port.phy_addr,
c18487ee
YR
10211 DEFAULT_PHY_DEV_ADDR,
10212 (data->reg_num & 0x1f), data->val_in);
34f80b04 10213 mutex_unlock(&bp->port.phy_mutex);
a2fbb9ea
ET
10214 return err;
10215
10216 default:
10217 /* do nothing */
10218 break;
10219 }
10220
10221 return -EOPNOTSUPP;
10222}
10223
34f80b04 10224/* called with rtnl_lock */
a2fbb9ea
ET
10225static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
10226{
10227 struct bnx2x *bp = netdev_priv(dev);
34f80b04 10228 int rc = 0;
a2fbb9ea
ET
10229
10230 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
10231 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
10232 return -EINVAL;
10233
10234 /* This does not race with packet allocation
c14423fe 10235 * because the actual alloc size is
a2fbb9ea
ET
10236 * only updated as part of load
10237 */
10238 dev->mtu = new_mtu;
10239
10240 if (netif_running(dev)) {
34f80b04
EG
10241 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10242 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
a2fbb9ea 10243 }
34f80b04
EG
10244
10245 return rc;
a2fbb9ea
ET
10246}
10247
10248static void bnx2x_tx_timeout(struct net_device *dev)
10249{
10250 struct bnx2x *bp = netdev_priv(dev);
10251
10252#ifdef BNX2X_STOP_ON_ERROR
10253 if (!bp->panic)
10254 bnx2x_panic();
10255#endif
10256 /* This allows the netif to be shutdown gracefully before resetting */
10257 schedule_work(&bp->reset_task);
10258}
10259
10260#ifdef BCM_VLAN
34f80b04 10261/* called with rtnl_lock */
a2fbb9ea
ET
10262static void bnx2x_vlan_rx_register(struct net_device *dev,
10263 struct vlan_group *vlgrp)
10264{
10265 struct bnx2x *bp = netdev_priv(dev);
10266
10267 bp->vlgrp = vlgrp;
0c6671b0
EG
10268
10269 /* Set flags according to the required capabilities */
10270 bp->flags &= ~(HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
10271
10272 if (dev->features & NETIF_F_HW_VLAN_TX)
10273 bp->flags |= HW_VLAN_TX_FLAG;
10274
10275 if (dev->features & NETIF_F_HW_VLAN_RX)
10276 bp->flags |= HW_VLAN_RX_FLAG;
10277
a2fbb9ea 10278 if (netif_running(dev))
49d66772 10279 bnx2x_set_client_config(bp);
a2fbb9ea 10280}
34f80b04 10281
a2fbb9ea
ET
10282#endif
10283
10284#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
10285static void poll_bnx2x(struct net_device *dev)
10286{
10287 struct bnx2x *bp = netdev_priv(dev);
10288
10289 disable_irq(bp->pdev->irq);
10290 bnx2x_interrupt(bp->pdev->irq, dev);
10291 enable_irq(bp->pdev->irq);
10292}
10293#endif
10294
c64213cd
SH
10295static const struct net_device_ops bnx2x_netdev_ops = {
10296 .ndo_open = bnx2x_open,
10297 .ndo_stop = bnx2x_close,
10298 .ndo_start_xmit = bnx2x_start_xmit,
10299 .ndo_set_multicast_list = bnx2x_set_rx_mode,
10300 .ndo_set_mac_address = bnx2x_change_mac_addr,
10301 .ndo_validate_addr = eth_validate_addr,
10302 .ndo_do_ioctl = bnx2x_ioctl,
10303 .ndo_change_mtu = bnx2x_change_mtu,
10304 .ndo_tx_timeout = bnx2x_tx_timeout,
10305#ifdef BCM_VLAN
10306 .ndo_vlan_rx_register = bnx2x_vlan_rx_register,
10307#endif
10308#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
10309 .ndo_poll_controller = poll_bnx2x,
10310#endif
10311};
10312
10313
34f80b04
EG
10314static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
10315 struct net_device *dev)
a2fbb9ea
ET
10316{
10317 struct bnx2x *bp;
10318 int rc;
10319
10320 SET_NETDEV_DEV(dev, &pdev->dev);
10321 bp = netdev_priv(dev);
10322
34f80b04
EG
10323 bp->dev = dev;
10324 bp->pdev = pdev;
a2fbb9ea 10325 bp->flags = 0;
34f80b04 10326 bp->func = PCI_FUNC(pdev->devfn);
a2fbb9ea
ET
10327
10328 rc = pci_enable_device(pdev);
10329 if (rc) {
10330 printk(KERN_ERR PFX "Cannot enable PCI device, aborting\n");
10331 goto err_out;
10332 }
10333
10334 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
10335 printk(KERN_ERR PFX "Cannot find PCI device base address,"
10336 " aborting\n");
10337 rc = -ENODEV;
10338 goto err_out_disable;
10339 }
10340
10341 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
10342 printk(KERN_ERR PFX "Cannot find second PCI device"
10343 " base address, aborting\n");
10344 rc = -ENODEV;
10345 goto err_out_disable;
10346 }
10347
34f80b04
EG
10348 if (atomic_read(&pdev->enable_cnt) == 1) {
10349 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
10350 if (rc) {
10351 printk(KERN_ERR PFX "Cannot obtain PCI resources,"
10352 " aborting\n");
10353 goto err_out_disable;
10354 }
a2fbb9ea 10355
34f80b04
EG
10356 pci_set_master(pdev);
10357 pci_save_state(pdev);
10358 }
a2fbb9ea
ET
10359
10360 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
10361 if (bp->pm_cap == 0) {
10362 printk(KERN_ERR PFX "Cannot find power management"
10363 " capability, aborting\n");
10364 rc = -EIO;
10365 goto err_out_release;
10366 }
10367
10368 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
10369 if (bp->pcie_cap == 0) {
10370 printk(KERN_ERR PFX "Cannot find PCI Express capability,"
10371 " aborting\n");
10372 rc = -EIO;
10373 goto err_out_release;
10374 }
10375
10376 if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0) {
10377 bp->flags |= USING_DAC_FLAG;
10378 if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) != 0) {
10379 printk(KERN_ERR PFX "pci_set_consistent_dma_mask"
10380 " failed, aborting\n");
10381 rc = -EIO;
10382 goto err_out_release;
10383 }
10384
10385 } else if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0) {
10386 printk(KERN_ERR PFX "System does not support DMA,"
10387 " aborting\n");
10388 rc = -EIO;
10389 goto err_out_release;
10390 }
10391
34f80b04
EG
10392 dev->mem_start = pci_resource_start(pdev, 0);
10393 dev->base_addr = dev->mem_start;
10394 dev->mem_end = pci_resource_end(pdev, 0);
a2fbb9ea
ET
10395
10396 dev->irq = pdev->irq;
10397
275f165f 10398 bp->regview = pci_ioremap_bar(pdev, 0);
a2fbb9ea
ET
10399 if (!bp->regview) {
10400 printk(KERN_ERR PFX "Cannot map register space, aborting\n");
10401 rc = -ENOMEM;
10402 goto err_out_release;
10403 }
10404
34f80b04
EG
10405 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
10406 min_t(u64, BNX2X_DB_SIZE,
10407 pci_resource_len(pdev, 2)));
a2fbb9ea
ET
10408 if (!bp->doorbells) {
10409 printk(KERN_ERR PFX "Cannot map doorbell space, aborting\n");
10410 rc = -ENOMEM;
10411 goto err_out_unmap;
10412 }
10413
10414 bnx2x_set_power_state(bp, PCI_D0);
10415
34f80b04
EG
10416 /* clean indirect addresses */
10417 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
10418 PCICFG_VENDOR_ID_OFFSET);
10419 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
10420 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
10421 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
10422 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
a2fbb9ea 10423
34f80b04 10424 dev->watchdog_timeo = TX_TIMEOUT;
a2fbb9ea 10425
c64213cd 10426 dev->netdev_ops = &bnx2x_netdev_ops;
34f80b04 10427 dev->ethtool_ops = &bnx2x_ethtool_ops;
34f80b04
EG
10428 dev->features |= NETIF_F_SG;
10429 dev->features |= NETIF_F_HW_CSUM;
10430 if (bp->flags & USING_DAC_FLAG)
10431 dev->features |= NETIF_F_HIGHDMA;
10432#ifdef BCM_VLAN
10433 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
0c6671b0 10434 bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
34f80b04
EG
10435#endif
10436 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb 10437 dev->features |= NETIF_F_TSO6;
a2fbb9ea
ET
10438
10439 return 0;
10440
10441err_out_unmap:
10442 if (bp->regview) {
10443 iounmap(bp->regview);
10444 bp->regview = NULL;
10445 }
a2fbb9ea
ET
10446 if (bp->doorbells) {
10447 iounmap(bp->doorbells);
10448 bp->doorbells = NULL;
10449 }
10450
10451err_out_release:
34f80b04
EG
10452 if (atomic_read(&pdev->enable_cnt) == 1)
10453 pci_release_regions(pdev);
a2fbb9ea
ET
10454
10455err_out_disable:
10456 pci_disable_device(pdev);
10457 pci_set_drvdata(pdev, NULL);
10458
10459err_out:
10460 return rc;
10461}
10462
25047950
ET
10463static int __devinit bnx2x_get_pcie_width(struct bnx2x *bp)
10464{
10465 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
10466
10467 val = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
10468 return val;
10469}
10470
10471/* return value of 1=2.5GHz 2=5GHz */
10472static int __devinit bnx2x_get_pcie_speed(struct bnx2x *bp)
10473{
10474 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
10475
10476 val = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
10477 return val;
10478}
10479
a2fbb9ea
ET
10480static int __devinit bnx2x_init_one(struct pci_dev *pdev,
10481 const struct pci_device_id *ent)
10482{
10483 static int version_printed;
10484 struct net_device *dev = NULL;
10485 struct bnx2x *bp;
25047950 10486 int rc;
a2fbb9ea
ET
10487
10488 if (version_printed++ == 0)
10489 printk(KERN_INFO "%s", version);
10490
10491 /* dev zeroed in init_etherdev */
555f6c78 10492 dev = alloc_etherdev_mq(sizeof(*bp), MAX_CONTEXT);
34f80b04
EG
10493 if (!dev) {
10494 printk(KERN_ERR PFX "Cannot allocate net device\n");
a2fbb9ea 10495 return -ENOMEM;
34f80b04 10496 }
a2fbb9ea 10497
a2fbb9ea
ET
10498 bp = netdev_priv(dev);
10499 bp->msglevel = debug;
10500
34f80b04 10501 rc = bnx2x_init_dev(pdev, dev);
a2fbb9ea
ET
10502 if (rc < 0) {
10503 free_netdev(dev);
10504 return rc;
10505 }
10506
a2fbb9ea
ET
10507 pci_set_drvdata(pdev, dev);
10508
34f80b04 10509 rc = bnx2x_init_bp(bp);
693fc0d1
EG
10510 if (rc)
10511 goto init_one_exit;
10512
10513 rc = register_netdev(dev);
34f80b04 10514 if (rc) {
693fc0d1 10515 dev_err(&pdev->dev, "Cannot register net device\n");
34f80b04
EG
10516 goto init_one_exit;
10517 }
10518
10519 bp->common.name = board_info[ent->driver_data].name;
25047950 10520 printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
34f80b04
EG
10521 " IRQ %d, ", dev->name, bp->common.name,
10522 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
25047950
ET
10523 bnx2x_get_pcie_width(bp),
10524 (bnx2x_get_pcie_speed(bp) == 2) ? "5GHz (Gen2)" : "2.5GHz",
10525 dev->base_addr, bp->pdev->irq);
e174961c 10526 printk(KERN_CONT "node addr %pM\n", dev->dev_addr);
a2fbb9ea 10527 return 0;
34f80b04
EG
10528
10529init_one_exit:
10530 if (bp->regview)
10531 iounmap(bp->regview);
10532
10533 if (bp->doorbells)
10534 iounmap(bp->doorbells);
10535
10536 free_netdev(dev);
10537
10538 if (atomic_read(&pdev->enable_cnt) == 1)
10539 pci_release_regions(pdev);
10540
10541 pci_disable_device(pdev);
10542 pci_set_drvdata(pdev, NULL);
10543
10544 return rc;
a2fbb9ea
ET
10545}
10546
10547static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
10548{
10549 struct net_device *dev = pci_get_drvdata(pdev);
228241eb
ET
10550 struct bnx2x *bp;
10551
10552 if (!dev) {
228241eb
ET
10553 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10554 return;
10555 }
228241eb 10556 bp = netdev_priv(dev);
a2fbb9ea 10557
a2fbb9ea
ET
10558 unregister_netdev(dev);
10559
10560 if (bp->regview)
10561 iounmap(bp->regview);
10562
10563 if (bp->doorbells)
10564 iounmap(bp->doorbells);
10565
10566 free_netdev(dev);
34f80b04
EG
10567
10568 if (atomic_read(&pdev->enable_cnt) == 1)
10569 pci_release_regions(pdev);
10570
a2fbb9ea
ET
10571 pci_disable_device(pdev);
10572 pci_set_drvdata(pdev, NULL);
10573}
10574
10575static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
10576{
10577 struct net_device *dev = pci_get_drvdata(pdev);
228241eb
ET
10578 struct bnx2x *bp;
10579
34f80b04
EG
10580 if (!dev) {
10581 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10582 return -ENODEV;
10583 }
10584 bp = netdev_priv(dev);
a2fbb9ea 10585
34f80b04 10586 rtnl_lock();
a2fbb9ea 10587
34f80b04 10588 pci_save_state(pdev);
228241eb 10589
34f80b04
EG
10590 if (!netif_running(dev)) {
10591 rtnl_unlock();
10592 return 0;
10593 }
a2fbb9ea
ET
10594
10595 netif_device_detach(dev);
a2fbb9ea 10596
da5a662a 10597 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
34f80b04 10598
a2fbb9ea 10599 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
228241eb 10600
34f80b04
EG
10601 rtnl_unlock();
10602
a2fbb9ea
ET
10603 return 0;
10604}
10605
10606static int bnx2x_resume(struct pci_dev *pdev)
10607{
10608 struct net_device *dev = pci_get_drvdata(pdev);
228241eb 10609 struct bnx2x *bp;
a2fbb9ea
ET
10610 int rc;
10611
228241eb
ET
10612 if (!dev) {
10613 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10614 return -ENODEV;
10615 }
228241eb 10616 bp = netdev_priv(dev);
a2fbb9ea 10617
34f80b04
EG
10618 rtnl_lock();
10619
228241eb 10620 pci_restore_state(pdev);
34f80b04
EG
10621
10622 if (!netif_running(dev)) {
10623 rtnl_unlock();
10624 return 0;
10625 }
10626
a2fbb9ea
ET
10627 bnx2x_set_power_state(bp, PCI_D0);
10628 netif_device_attach(dev);
10629
da5a662a 10630 rc = bnx2x_nic_load(bp, LOAD_OPEN);
a2fbb9ea 10631
34f80b04
EG
10632 rtnl_unlock();
10633
10634 return rc;
a2fbb9ea
ET
10635}
10636
f8ef6e44
YG
10637static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
10638{
10639 int i;
10640
10641 bp->state = BNX2X_STATE_ERROR;
10642
10643 bp->rx_mode = BNX2X_RX_MODE_NONE;
10644
10645 bnx2x_netif_stop(bp, 0);
10646
10647 del_timer_sync(&bp->timer);
10648 bp->stats_state = STATS_STATE_DISABLED;
10649 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
10650
10651 /* Release IRQs */
10652 bnx2x_free_irq(bp);
10653
10654 if (CHIP_IS_E1(bp)) {
10655 struct mac_configuration_cmd *config =
10656 bnx2x_sp(bp, mcast_config);
10657
8d9c5f34 10658 for (i = 0; i < config->hdr.length; i++)
f8ef6e44
YG
10659 CAM_INVALIDATE(config->config_table[i]);
10660 }
10661
10662 /* Free SKBs, SGEs, TPA pool and driver internals */
10663 bnx2x_free_skbs(bp);
555f6c78 10664 for_each_rx_queue(bp, i)
f8ef6e44 10665 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
555f6c78 10666 for_each_rx_queue(bp, i)
7cde1c8b 10667 netif_napi_del(&bnx2x_fp(bp, i, napi));
f8ef6e44
YG
10668 bnx2x_free_mem(bp);
10669
10670 bp->state = BNX2X_STATE_CLOSED;
10671
10672 netif_carrier_off(bp->dev);
10673
10674 return 0;
10675}
10676
10677static void bnx2x_eeh_recover(struct bnx2x *bp)
10678{
10679 u32 val;
10680
10681 mutex_init(&bp->port.phy_mutex);
10682
10683 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
10684 bp->link_params.shmem_base = bp->common.shmem_base;
10685 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
10686
10687 if (!bp->common.shmem_base ||
10688 (bp->common.shmem_base < 0xA0000) ||
10689 (bp->common.shmem_base >= 0xC0000)) {
10690 BNX2X_DEV_INFO("MCP not active\n");
10691 bp->flags |= NO_MCP_FLAG;
10692 return;
10693 }
10694
10695 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
10696 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
10697 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
10698 BNX2X_ERR("BAD MCP validity signature\n");
10699
10700 if (!BP_NOMCP(bp)) {
10701 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
10702 & DRV_MSG_SEQ_NUMBER_MASK);
10703 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
10704 }
10705}
10706
493adb1f
WX
10707/**
10708 * bnx2x_io_error_detected - called when PCI error is detected
10709 * @pdev: Pointer to PCI device
10710 * @state: The current pci connection state
10711 *
10712 * This function is called after a PCI bus error affecting
10713 * this device has been detected.
10714 */
10715static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
10716 pci_channel_state_t state)
10717{
10718 struct net_device *dev = pci_get_drvdata(pdev);
10719 struct bnx2x *bp = netdev_priv(dev);
10720
10721 rtnl_lock();
10722
10723 netif_device_detach(dev);
10724
10725 if (netif_running(dev))
f8ef6e44 10726 bnx2x_eeh_nic_unload(bp);
493adb1f
WX
10727
10728 pci_disable_device(pdev);
10729
10730 rtnl_unlock();
10731
10732 /* Request a slot reset */
10733 return PCI_ERS_RESULT_NEED_RESET;
10734}
10735
10736/**
10737 * bnx2x_io_slot_reset - called after the PCI bus has been reset
10738 * @pdev: Pointer to PCI device
10739 *
10740 * Restart the card from scratch, as if from a cold-boot.
10741 */
10742static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
10743{
10744 struct net_device *dev = pci_get_drvdata(pdev);
10745 struct bnx2x *bp = netdev_priv(dev);
10746
10747 rtnl_lock();
10748
10749 if (pci_enable_device(pdev)) {
10750 dev_err(&pdev->dev,
10751 "Cannot re-enable PCI device after reset\n");
10752 rtnl_unlock();
10753 return PCI_ERS_RESULT_DISCONNECT;
10754 }
10755
10756 pci_set_master(pdev);
10757 pci_restore_state(pdev);
10758
10759 if (netif_running(dev))
10760 bnx2x_set_power_state(bp, PCI_D0);
10761
10762 rtnl_unlock();
10763
10764 return PCI_ERS_RESULT_RECOVERED;
10765}
10766
10767/**
10768 * bnx2x_io_resume - called when traffic can start flowing again
10769 * @pdev: Pointer to PCI device
10770 *
10771 * This callback is called when the error recovery driver tells us that
10772 * its OK to resume normal operation.
10773 */
10774static void bnx2x_io_resume(struct pci_dev *pdev)
10775{
10776 struct net_device *dev = pci_get_drvdata(pdev);
10777 struct bnx2x *bp = netdev_priv(dev);
10778
10779 rtnl_lock();
10780
f8ef6e44
YG
10781 bnx2x_eeh_recover(bp);
10782
493adb1f 10783 if (netif_running(dev))
f8ef6e44 10784 bnx2x_nic_load(bp, LOAD_NORMAL);
493adb1f
WX
10785
10786 netif_device_attach(dev);
10787
10788 rtnl_unlock();
10789}
10790
10791static struct pci_error_handlers bnx2x_err_handler = {
10792 .error_detected = bnx2x_io_error_detected,
10793 .slot_reset = bnx2x_io_slot_reset,
10794 .resume = bnx2x_io_resume,
10795};
10796
a2fbb9ea 10797static struct pci_driver bnx2x_pci_driver = {
493adb1f
WX
10798 .name = DRV_MODULE_NAME,
10799 .id_table = bnx2x_pci_tbl,
10800 .probe = bnx2x_init_one,
10801 .remove = __devexit_p(bnx2x_remove_one),
10802 .suspend = bnx2x_suspend,
10803 .resume = bnx2x_resume,
10804 .err_handler = &bnx2x_err_handler,
a2fbb9ea
ET
10805};
10806
10807static int __init bnx2x_init(void)
10808{
1cf167f2
EG
10809 bnx2x_wq = create_singlethread_workqueue("bnx2x");
10810 if (bnx2x_wq == NULL) {
10811 printk(KERN_ERR PFX "Cannot create workqueue\n");
10812 return -ENOMEM;
10813 }
10814
a2fbb9ea
ET
10815 return pci_register_driver(&bnx2x_pci_driver);
10816}
10817
10818static void __exit bnx2x_cleanup(void)
10819{
10820 pci_unregister_driver(&bnx2x_pci_driver);
1cf167f2
EG
10821
10822 destroy_workqueue(bnx2x_wq);
a2fbb9ea
ET
10823}
10824
10825module_init(bnx2x_init);
10826module_exit(bnx2x_cleanup);
10827
This page took 1.299906 seconds and 5 git commands to generate.