bnx2x: Reporting host statistics to management FW
[deliverable/linux.git] / drivers / net / bnx2x_main.c
CommitLineData
34f80b04 1/* bnx2x_main.c: Broadcom Everest network driver.
a2fbb9ea 2 *
d05c26ce 3 * Copyright (c) 2007-2009 Broadcom Corporation
a2fbb9ea
ET
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
24e3fcef
EG
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
a2fbb9ea
ET
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
ca00392c 13 * Slowpath and fastpath rework by Vladislav Zolotarov
c14423fe 14 * Statistics and Link management by Yitchak Gertner
a2fbb9ea
ET
15 *
16 */
17
a2fbb9ea
ET
18#include <linux/module.h>
19#include <linux/moduleparam.h>
20#include <linux/kernel.h>
21#include <linux/device.h> /* for dev_info() */
22#include <linux/timer.h>
23#include <linux/errno.h>
24#include <linux/ioport.h>
25#include <linux/slab.h>
26#include <linux/vmalloc.h>
27#include <linux/interrupt.h>
28#include <linux/pci.h>
29#include <linux/init.h>
30#include <linux/netdevice.h>
31#include <linux/etherdevice.h>
32#include <linux/skbuff.h>
33#include <linux/dma-mapping.h>
34#include <linux/bitops.h>
35#include <linux/irq.h>
36#include <linux/delay.h>
37#include <asm/byteorder.h>
38#include <linux/time.h>
39#include <linux/ethtool.h>
40#include <linux/mii.h>
0c6671b0 41#include <linux/if_vlan.h>
a2fbb9ea
ET
42#include <net/ip.h>
43#include <net/tcp.h>
44#include <net/checksum.h>
34f80b04 45#include <net/ip6_checksum.h>
a2fbb9ea
ET
46#include <linux/workqueue.h>
47#include <linux/crc32.h>
34f80b04 48#include <linux/crc32c.h>
a2fbb9ea
ET
49#include <linux/prefetch.h>
50#include <linux/zlib.h>
a2fbb9ea
ET
51#include <linux/io.h>
52
359d8b15 53
a2fbb9ea
ET
54#include "bnx2x.h"
55#include "bnx2x_init.h"
94a78b79 56#include "bnx2x_init_ops.h"
0a64ea57 57#include "bnx2x_dump.h"
a2fbb9ea 58
573dd788
EG
59#define DRV_MODULE_VERSION "1.48.114-1"
60#define DRV_MODULE_RELDATE "2009/07/29"
34f80b04 61#define BNX2X_BC_VER 0x040200
a2fbb9ea 62
94a78b79
VZ
63#include <linux/firmware.h>
64#include "bnx2x_fw_file_hdr.h"
65/* FW files */
66#define FW_FILE_PREFIX_E1 "bnx2x-e1-"
67#define FW_FILE_PREFIX_E1H "bnx2x-e1h-"
68
34f80b04
EG
69/* Time in jiffies before concluding the transmitter is hung */
70#define TX_TIMEOUT (5*HZ)
a2fbb9ea 71
53a10565 72static char version[] __devinitdata =
34f80b04 73 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
a2fbb9ea
ET
74 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
75
24e3fcef 76MODULE_AUTHOR("Eliezer Tamir");
e47d7e6e 77MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
a2fbb9ea
ET
78MODULE_LICENSE("GPL");
79MODULE_VERSION(DRV_MODULE_VERSION);
a2fbb9ea 80
555f6c78
EG
81static int multi_mode = 1;
82module_param(multi_mode, int, 0);
ca00392c
EG
83MODULE_PARM_DESC(multi_mode, " Multi queue mode "
84 "(0 Disable; 1 Enable (default))");
85
86static int num_rx_queues;
87module_param(num_rx_queues, int, 0);
88MODULE_PARM_DESC(num_rx_queues, " Number of Rx queues for multi_mode=1"
89 " (default is half number of CPUs)");
90
91static int num_tx_queues;
92module_param(num_tx_queues, int, 0);
93MODULE_PARM_DESC(num_tx_queues, " Number of Tx queues for multi_mode=1"
94 " (default is half number of CPUs)");
555f6c78 95
19680c48 96static int disable_tpa;
19680c48 97module_param(disable_tpa, int, 0);
9898f86d 98MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
8badd27a
EG
99
100static int int_mode;
101module_param(int_mode, int, 0);
102MODULE_PARM_DESC(int_mode, " Force interrupt mode (1 INT#x; 2 MSI)");
103
9898f86d 104static int poll;
a2fbb9ea 105module_param(poll, int, 0);
9898f86d 106MODULE_PARM_DESC(poll, " Use polling (for debug)");
8d5726c4
EG
107
108static int mrrs = -1;
109module_param(mrrs, int, 0);
110MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
111
9898f86d 112static int debug;
a2fbb9ea 113module_param(debug, int, 0);
9898f86d
EG
114MODULE_PARM_DESC(debug, " Default debug msglevel");
115
116static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
a2fbb9ea 117
1cf167f2 118static struct workqueue_struct *bnx2x_wq;
a2fbb9ea
ET
119
120enum bnx2x_board_type {
121 BCM57710 = 0,
34f80b04
EG
122 BCM57711 = 1,
123 BCM57711E = 2,
a2fbb9ea
ET
124};
125
34f80b04 126/* indexed by board_type, above */
53a10565 127static struct {
a2fbb9ea
ET
128 char *name;
129} board_info[] __devinitdata = {
34f80b04
EG
130 { "Broadcom NetXtreme II BCM57710 XGb" },
131 { "Broadcom NetXtreme II BCM57711 XGb" },
132 { "Broadcom NetXtreme II BCM57711E XGb" }
a2fbb9ea
ET
133};
134
34f80b04 135
a2fbb9ea
ET
136static const struct pci_device_id bnx2x_pci_tbl[] = {
137 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57710,
138 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57710 },
34f80b04
EG
139 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711,
140 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711 },
141 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711E,
142 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711E },
a2fbb9ea
ET
143 { 0 }
144};
145
146MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
147
148/****************************************************************************
149* General service functions
150****************************************************************************/
151
152/* used only at init
153 * locking is done by mcp
154 */
155static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
156{
157 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
158 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
159 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
160 PCICFG_VENDOR_ID_OFFSET);
161}
162
a2fbb9ea
ET
163static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
164{
165 u32 val;
166
167 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
168 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
169 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
170 PCICFG_VENDOR_ID_OFFSET);
171
172 return val;
173}
a2fbb9ea
ET
174
175static const u32 dmae_reg_go_c[] = {
176 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
177 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
178 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
179 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
180};
181
182/* copy command into DMAE command memory and set DMAE command go */
183static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
184 int idx)
185{
186 u32 cmd_offset;
187 int i;
188
189 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
190 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
191 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
192
ad8d3948
EG
193 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
194 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
a2fbb9ea
ET
195 }
196 REG_WR(bp, dmae_reg_go_c[idx], 1);
197}
198
ad8d3948
EG
199void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
200 u32 len32)
a2fbb9ea 201{
ad8d3948 202 struct dmae_command *dmae = &bp->init_dmae;
a2fbb9ea 203 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
ad8d3948
EG
204 int cnt = 200;
205
206 if (!bp->dmae_ready) {
207 u32 *data = bnx2x_sp(bp, wb_data[0]);
208
209 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
210 " using indirect\n", dst_addr, len32);
211 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
212 return;
213 }
214
215 mutex_lock(&bp->dmae_mutex);
a2fbb9ea
ET
216
217 memset(dmae, 0, sizeof(struct dmae_command));
218
219 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
220 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
221 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
222#ifdef __BIG_ENDIAN
223 DMAE_CMD_ENDIANITY_B_DW_SWAP |
224#else
225 DMAE_CMD_ENDIANITY_DW_SWAP |
226#endif
34f80b04
EG
227 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
228 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea
ET
229 dmae->src_addr_lo = U64_LO(dma_addr);
230 dmae->src_addr_hi = U64_HI(dma_addr);
231 dmae->dst_addr_lo = dst_addr >> 2;
232 dmae->dst_addr_hi = 0;
233 dmae->len = len32;
234 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
235 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
ad8d3948 236 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 237
c3eefaf6 238 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
a2fbb9ea
ET
239 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
240 "dst_addr [%x:%08x (%08x)]\n"
241 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
242 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
243 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, dst_addr,
244 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
ad8d3948 245 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
a2fbb9ea
ET
246 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
247 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
a2fbb9ea
ET
248
249 *wb_comp = 0;
250
34f80b04 251 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
a2fbb9ea
ET
252
253 udelay(5);
ad8d3948
EG
254
255 while (*wb_comp != DMAE_COMP_VAL) {
256 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
257
ad8d3948 258 if (!cnt) {
c3eefaf6 259 BNX2X_ERR("DMAE timeout!\n");
a2fbb9ea
ET
260 break;
261 }
ad8d3948 262 cnt--;
12469401
YG
263 /* adjust delay for emulation/FPGA */
264 if (CHIP_REV_IS_SLOW(bp))
265 msleep(100);
266 else
267 udelay(5);
a2fbb9ea 268 }
ad8d3948
EG
269
270 mutex_unlock(&bp->dmae_mutex);
a2fbb9ea
ET
271}
272
c18487ee 273void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
a2fbb9ea 274{
ad8d3948 275 struct dmae_command *dmae = &bp->init_dmae;
a2fbb9ea 276 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
ad8d3948
EG
277 int cnt = 200;
278
279 if (!bp->dmae_ready) {
280 u32 *data = bnx2x_sp(bp, wb_data[0]);
281 int i;
282
283 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
284 " using indirect\n", src_addr, len32);
285 for (i = 0; i < len32; i++)
286 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
287 return;
288 }
289
290 mutex_lock(&bp->dmae_mutex);
a2fbb9ea
ET
291
292 memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
293 memset(dmae, 0, sizeof(struct dmae_command));
294
295 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
296 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
297 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
298#ifdef __BIG_ENDIAN
299 DMAE_CMD_ENDIANITY_B_DW_SWAP |
300#else
301 DMAE_CMD_ENDIANITY_DW_SWAP |
302#endif
34f80b04
EG
303 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
304 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea
ET
305 dmae->src_addr_lo = src_addr >> 2;
306 dmae->src_addr_hi = 0;
307 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
308 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
309 dmae->len = len32;
310 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
311 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
ad8d3948 312 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 313
c3eefaf6 314 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
a2fbb9ea
ET
315 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
316 "dst_addr [%x:%08x (%08x)]\n"
317 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
318 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
319 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, src_addr,
320 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
a2fbb9ea
ET
321
322 *wb_comp = 0;
323
34f80b04 324 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
a2fbb9ea
ET
325
326 udelay(5);
ad8d3948
EG
327
328 while (*wb_comp != DMAE_COMP_VAL) {
329
ad8d3948 330 if (!cnt) {
c3eefaf6 331 BNX2X_ERR("DMAE timeout!\n");
a2fbb9ea
ET
332 break;
333 }
ad8d3948 334 cnt--;
12469401
YG
335 /* adjust delay for emulation/FPGA */
336 if (CHIP_REV_IS_SLOW(bp))
337 msleep(100);
338 else
339 udelay(5);
a2fbb9ea 340 }
ad8d3948 341 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
a2fbb9ea
ET
342 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
343 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
ad8d3948
EG
344
345 mutex_unlock(&bp->dmae_mutex);
346}
347
348/* used only for slowpath so not inlined */
349static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
350{
351 u32 wb_write[2];
352
353 wb_write[0] = val_hi;
354 wb_write[1] = val_lo;
355 REG_WR_DMAE(bp, reg, wb_write, 2);
a2fbb9ea 356}
a2fbb9ea 357
ad8d3948
EG
358#ifdef USE_WB_RD
359static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
360{
361 u32 wb_data[2];
362
363 REG_RD_DMAE(bp, reg, wb_data, 2);
364
365 return HILO_U64(wb_data[0], wb_data[1]);
366}
367#endif
368
a2fbb9ea
ET
369static int bnx2x_mc_assert(struct bnx2x *bp)
370{
a2fbb9ea 371 char last_idx;
34f80b04
EG
372 int i, rc = 0;
373 u32 row0, row1, row2, row3;
374
375 /* XSTORM */
376 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
377 XSTORM_ASSERT_LIST_INDEX_OFFSET);
378 if (last_idx)
379 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
380
381 /* print the asserts */
382 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
383
384 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
385 XSTORM_ASSERT_LIST_OFFSET(i));
386 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
387 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
388 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
389 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
390 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
391 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
392
393 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
394 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
395 " 0x%08x 0x%08x 0x%08x\n",
396 i, row3, row2, row1, row0);
397 rc++;
398 } else {
399 break;
400 }
401 }
402
403 /* TSTORM */
404 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
405 TSTORM_ASSERT_LIST_INDEX_OFFSET);
406 if (last_idx)
407 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
408
409 /* print the asserts */
410 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
411
412 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
413 TSTORM_ASSERT_LIST_OFFSET(i));
414 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
415 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
416 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
417 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
418 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
419 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
420
421 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
422 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
423 " 0x%08x 0x%08x 0x%08x\n",
424 i, row3, row2, row1, row0);
425 rc++;
426 } else {
427 break;
428 }
429 }
430
431 /* CSTORM */
432 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
433 CSTORM_ASSERT_LIST_INDEX_OFFSET);
434 if (last_idx)
435 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
436
437 /* print the asserts */
438 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
439
440 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
441 CSTORM_ASSERT_LIST_OFFSET(i));
442 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
443 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
444 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
445 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
446 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
447 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
448
449 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
450 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
451 " 0x%08x 0x%08x 0x%08x\n",
452 i, row3, row2, row1, row0);
453 rc++;
454 } else {
455 break;
456 }
457 }
458
459 /* USTORM */
460 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
461 USTORM_ASSERT_LIST_INDEX_OFFSET);
462 if (last_idx)
463 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
464
465 /* print the asserts */
466 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
467
468 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
469 USTORM_ASSERT_LIST_OFFSET(i));
470 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
471 USTORM_ASSERT_LIST_OFFSET(i) + 4);
472 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
473 USTORM_ASSERT_LIST_OFFSET(i) + 8);
474 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
475 USTORM_ASSERT_LIST_OFFSET(i) + 12);
476
477 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
478 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
479 " 0x%08x 0x%08x 0x%08x\n",
480 i, row3, row2, row1, row0);
481 rc++;
482 } else {
483 break;
a2fbb9ea
ET
484 }
485 }
34f80b04 486
a2fbb9ea
ET
487 return rc;
488}
c14423fe 489
a2fbb9ea
ET
490static void bnx2x_fw_dump(struct bnx2x *bp)
491{
492 u32 mark, offset;
4781bfad 493 __be32 data[9];
a2fbb9ea
ET
494 int word;
495
496 mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
49d66772 497 mark = ((mark + 0x3) & ~0x3);
ad361c98 498 printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n", mark);
a2fbb9ea 499
ad361c98 500 printk(KERN_ERR PFX);
a2fbb9ea
ET
501 for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
502 for (word = 0; word < 8; word++)
503 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
504 offset + 4*word));
505 data[8] = 0x0;
49d66772 506 printk(KERN_CONT "%s", (char *)data);
a2fbb9ea
ET
507 }
508 for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
509 for (word = 0; word < 8; word++)
510 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
511 offset + 4*word));
512 data[8] = 0x0;
49d66772 513 printk(KERN_CONT "%s", (char *)data);
a2fbb9ea 514 }
ad361c98 515 printk(KERN_ERR PFX "end of fw dump\n");
a2fbb9ea
ET
516}
517
518static void bnx2x_panic_dump(struct bnx2x *bp)
519{
520 int i;
521 u16 j, start, end;
522
66e855f3
YG
523 bp->stats_state = STATS_STATE_DISABLED;
524 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
525
a2fbb9ea
ET
526 BNX2X_ERR("begin crash dump -----------------\n");
527
8440d2b6
EG
528 /* Indices */
529 /* Common */
530 BNX2X_ERR("def_c_idx(%u) def_u_idx(%u) def_x_idx(%u)"
531 " def_t_idx(%u) def_att_idx(%u) attn_state(%u)"
532 " spq_prod_idx(%u)\n",
533 bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
534 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
535
536 /* Rx */
537 for_each_rx_queue(bp, i) {
a2fbb9ea 538 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea 539
c3eefaf6 540 BNX2X_ERR("fp%d: rx_bd_prod(%x) rx_bd_cons(%x)"
66e855f3
YG
541 " *rx_bd_cons_sb(%x) rx_comp_prod(%x)"
542 " rx_comp_cons(%x) *rx_cons_sb(%x)\n",
8440d2b6 543 i, fp->rx_bd_prod, fp->rx_bd_cons,
66e855f3
YG
544 le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
545 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
c3eefaf6 546 BNX2X_ERR(" rx_sge_prod(%x) last_max_sge(%x)"
8440d2b6
EG
547 " fp_u_idx(%x) *sb_u_idx(%x)\n",
548 fp->rx_sge_prod, fp->last_max_sge,
549 le16_to_cpu(fp->fp_u_idx),
550 fp->status_blk->u_status_block.status_block_index);
551 }
a2fbb9ea 552
8440d2b6
EG
553 /* Tx */
554 for_each_tx_queue(bp, i) {
555 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea 556
c3eefaf6 557 BNX2X_ERR("fp%d: tx_pkt_prod(%x) tx_pkt_cons(%x)"
8440d2b6
EG
558 " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)\n",
559 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
560 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
c3eefaf6 561 BNX2X_ERR(" fp_c_idx(%x) *sb_c_idx(%x)"
ca00392c 562 " tx_db_prod(%x)\n", le16_to_cpu(fp->fp_c_idx),
8440d2b6 563 fp->status_blk->c_status_block.status_block_index,
ca00392c 564 fp->tx_db.data.prod);
8440d2b6 565 }
a2fbb9ea 566
8440d2b6
EG
567 /* Rings */
568 /* Rx */
569 for_each_rx_queue(bp, i) {
570 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea
ET
571
572 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
573 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
8440d2b6 574 for (j = start; j != end; j = RX_BD(j + 1)) {
a2fbb9ea
ET
575 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
576 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
577
c3eefaf6
EG
578 BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
579 i, j, rx_bd[1], rx_bd[0], sw_bd->skb);
a2fbb9ea
ET
580 }
581
3196a88a
EG
582 start = RX_SGE(fp->rx_sge_prod);
583 end = RX_SGE(fp->last_max_sge);
8440d2b6 584 for (j = start; j != end; j = RX_SGE(j + 1)) {
7a9b2557
VZ
585 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
586 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
587
c3eefaf6
EG
588 BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
589 i, j, rx_sge[1], rx_sge[0], sw_page->page);
7a9b2557
VZ
590 }
591
a2fbb9ea
ET
592 start = RCQ_BD(fp->rx_comp_cons - 10);
593 end = RCQ_BD(fp->rx_comp_cons + 503);
8440d2b6 594 for (j = start; j != end; j = RCQ_BD(j + 1)) {
a2fbb9ea
ET
595 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
596
c3eefaf6
EG
597 BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
598 i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
a2fbb9ea
ET
599 }
600 }
601
8440d2b6
EG
602 /* Tx */
603 for_each_tx_queue(bp, i) {
604 struct bnx2x_fastpath *fp = &bp->fp[i];
605
606 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
607 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
608 for (j = start; j != end; j = TX_BD(j + 1)) {
609 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
610
c3eefaf6
EG
611 BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n",
612 i, j, sw_bd->skb, sw_bd->first_bd);
8440d2b6
EG
613 }
614
615 start = TX_BD(fp->tx_bd_cons - 10);
616 end = TX_BD(fp->tx_bd_cons + 254);
617 for (j = start; j != end; j = TX_BD(j + 1)) {
618 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
619
c3eefaf6
EG
620 BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n",
621 i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
8440d2b6
EG
622 }
623 }
a2fbb9ea 624
34f80b04 625 bnx2x_fw_dump(bp);
a2fbb9ea
ET
626 bnx2x_mc_assert(bp);
627 BNX2X_ERR("end crash dump -----------------\n");
a2fbb9ea
ET
628}
629
615f8fd9 630static void bnx2x_int_enable(struct bnx2x *bp)
a2fbb9ea 631{
34f80b04 632 int port = BP_PORT(bp);
a2fbb9ea
ET
633 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
634 u32 val = REG_RD(bp, addr);
635 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
8badd27a 636 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
a2fbb9ea
ET
637
638 if (msix) {
8badd27a
EG
639 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
640 HC_CONFIG_0_REG_INT_LINE_EN_0);
a2fbb9ea
ET
641 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
642 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
8badd27a
EG
643 } else if (msi) {
644 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
645 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
646 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
647 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
a2fbb9ea
ET
648 } else {
649 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
615f8fd9 650 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
a2fbb9ea
ET
651 HC_CONFIG_0_REG_INT_LINE_EN_0 |
652 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
615f8fd9 653
8badd27a
EG
654 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
655 val, port, addr);
615f8fd9
ET
656
657 REG_WR(bp, addr, val);
658
a2fbb9ea
ET
659 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
660 }
661
8badd27a
EG
662 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n",
663 val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
a2fbb9ea
ET
664
665 REG_WR(bp, addr, val);
37dbbf32
EG
666 /*
667 * Ensure that HC_CONFIG is written before leading/trailing edge config
668 */
669 mmiowb();
670 barrier();
34f80b04
EG
671
672 if (CHIP_IS_E1H(bp)) {
673 /* init leading/trailing edge */
674 if (IS_E1HMF(bp)) {
8badd27a 675 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
34f80b04 676 if (bp->port.pmf)
4acac6a5
EG
677 /* enable nig and gpio3 attention */
678 val |= 0x1100;
34f80b04
EG
679 } else
680 val = 0xffff;
681
682 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
683 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
684 }
37dbbf32
EG
685
686 /* Make sure that interrupts are indeed enabled from here on */
687 mmiowb();
a2fbb9ea
ET
688}
689
615f8fd9 690static void bnx2x_int_disable(struct bnx2x *bp)
a2fbb9ea 691{
34f80b04 692 int port = BP_PORT(bp);
a2fbb9ea
ET
693 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
694 u32 val = REG_RD(bp, addr);
695
696 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
697 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
698 HC_CONFIG_0_REG_INT_LINE_EN_0 |
699 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
700
701 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
702 val, port, addr);
703
8badd27a
EG
704 /* flush all outstanding writes */
705 mmiowb();
706
a2fbb9ea
ET
707 REG_WR(bp, addr, val);
708 if (REG_RD(bp, addr) != val)
709 BNX2X_ERR("BUG! proper val not read from IGU!\n");
356e2385 710
a2fbb9ea
ET
711}
712
f8ef6e44 713static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
a2fbb9ea 714{
a2fbb9ea 715 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
8badd27a 716 int i, offset;
a2fbb9ea 717
34f80b04 718 /* disable interrupt handling */
a2fbb9ea 719 atomic_inc(&bp->intr_sem);
e1510706
EG
720 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
721
f8ef6e44
YG
722 if (disable_hw)
723 /* prevent the HW from sending interrupts */
724 bnx2x_int_disable(bp);
a2fbb9ea
ET
725
726 /* make sure all ISRs are done */
727 if (msix) {
8badd27a
EG
728 synchronize_irq(bp->msix_table[0].vector);
729 offset = 1;
a2fbb9ea 730 for_each_queue(bp, i)
8badd27a 731 synchronize_irq(bp->msix_table[i + offset].vector);
a2fbb9ea
ET
732 } else
733 synchronize_irq(bp->pdev->irq);
734
735 /* make sure sp_task is not running */
1cf167f2
EG
736 cancel_delayed_work(&bp->sp_task);
737 flush_workqueue(bnx2x_wq);
a2fbb9ea
ET
738}
739
34f80b04 740/* fast path */
a2fbb9ea
ET
741
742/*
34f80b04 743 * General service functions
a2fbb9ea
ET
744 */
745
34f80b04 746static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
a2fbb9ea
ET
747 u8 storm, u16 index, u8 op, u8 update)
748{
5c862848
EG
749 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
750 COMMAND_REG_INT_ACK);
a2fbb9ea
ET
751 struct igu_ack_register igu_ack;
752
753 igu_ack.status_block_index = index;
754 igu_ack.sb_id_and_flags =
34f80b04 755 ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
a2fbb9ea
ET
756 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
757 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
758 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
759
5c862848
EG
760 DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
761 (*(u32 *)&igu_ack), hc_addr);
762 REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
37dbbf32
EG
763
764 /* Make sure that ACK is written */
765 mmiowb();
766 barrier();
a2fbb9ea
ET
767}
768
769static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
770{
771 struct host_status_block *fpsb = fp->status_blk;
772 u16 rc = 0;
773
774 barrier(); /* status block is written to by the chip */
775 if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
776 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
777 rc |= 1;
778 }
779 if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
780 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
781 rc |= 2;
782 }
783 return rc;
784}
785
a2fbb9ea
ET
786static u16 bnx2x_ack_int(struct bnx2x *bp)
787{
5c862848
EG
788 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
789 COMMAND_REG_SIMD_MASK);
790 u32 result = REG_RD(bp, hc_addr);
a2fbb9ea 791
5c862848
EG
792 DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
793 result, hc_addr);
a2fbb9ea 794
a2fbb9ea
ET
795 return result;
796}
797
798
799/*
800 * fast path service functions
801 */
802
e8b5fc51
VZ
803static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp)
804{
805 /* Tell compiler that consumer and producer can change */
806 barrier();
807 return (fp->tx_pkt_prod != fp->tx_pkt_cons);
237907c1
EG
808}
809
a2fbb9ea
ET
810/* free skb in the packet ring at pos idx
811 * return idx of last bd freed
812 */
813static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
814 u16 idx)
815{
816 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
ca00392c
EG
817 struct eth_tx_start_bd *tx_start_bd;
818 struct eth_tx_bd *tx_data_bd;
a2fbb9ea 819 struct sk_buff *skb = tx_buf->skb;
34f80b04 820 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
a2fbb9ea
ET
821 int nbd;
822
823 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
824 idx, tx_buf, skb);
825
826 /* unmap first bd */
827 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
ca00392c
EG
828 tx_start_bd = &fp->tx_desc_ring[bd_idx].start_bd;
829 pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_start_bd),
830 BD_UNMAP_LEN(tx_start_bd), PCI_DMA_TODEVICE);
a2fbb9ea 831
ca00392c 832 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
a2fbb9ea 833#ifdef BNX2X_STOP_ON_ERROR
ca00392c 834 if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
34f80b04 835 BNX2X_ERR("BAD nbd!\n");
a2fbb9ea
ET
836 bnx2x_panic();
837 }
838#endif
ca00392c 839 new_cons = nbd + tx_buf->first_bd;
a2fbb9ea 840
ca00392c
EG
841 /* Get the next bd */
842 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
a2fbb9ea 843
ca00392c
EG
844 /* Skip a parse bd... */
845 --nbd;
846 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
847
848 /* ...and the TSO split header bd since they have no mapping */
849 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
850 --nbd;
851 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
a2fbb9ea
ET
852 }
853
854 /* now free frags */
855 while (nbd > 0) {
856
857 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
ca00392c
EG
858 tx_data_bd = &fp->tx_desc_ring[bd_idx].reg_bd;
859 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_data_bd),
860 BD_UNMAP_LEN(tx_data_bd), PCI_DMA_TODEVICE);
a2fbb9ea
ET
861 if (--nbd)
862 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
863 }
864
865 /* release skb */
53e5e96e 866 WARN_ON(!skb);
ca00392c 867 dev_kfree_skb_any(skb);
a2fbb9ea
ET
868 tx_buf->first_bd = 0;
869 tx_buf->skb = NULL;
870
34f80b04 871 return new_cons;
a2fbb9ea
ET
872}
873
34f80b04 874static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
a2fbb9ea 875{
34f80b04
EG
876 s16 used;
877 u16 prod;
878 u16 cons;
a2fbb9ea 879
34f80b04 880 barrier(); /* Tell compiler that prod and cons can change */
a2fbb9ea
ET
881 prod = fp->tx_bd_prod;
882 cons = fp->tx_bd_cons;
883
34f80b04
EG
884 /* NUM_TX_RINGS = number of "next-page" entries
885 It will be used as a threshold */
886 used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
a2fbb9ea 887
34f80b04 888#ifdef BNX2X_STOP_ON_ERROR
53e5e96e
IJ
889 WARN_ON(used < 0);
890 WARN_ON(used > fp->bp->tx_ring_size);
891 WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
34f80b04 892#endif
a2fbb9ea 893
34f80b04 894 return (s16)(fp->bp->tx_ring_size) - used;
a2fbb9ea
ET
895}
896
7961f791 897static void bnx2x_tx_int(struct bnx2x_fastpath *fp)
a2fbb9ea
ET
898{
899 struct bnx2x *bp = fp->bp;
555f6c78 900 struct netdev_queue *txq;
a2fbb9ea
ET
901 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
902 int done = 0;
903
904#ifdef BNX2X_STOP_ON_ERROR
905 if (unlikely(bp->panic))
906 return;
907#endif
908
ca00392c 909 txq = netdev_get_tx_queue(bp->dev, fp->index - bp->num_rx_queues);
a2fbb9ea
ET
910 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
911 sw_cons = fp->tx_pkt_cons;
912
913 while (sw_cons != hw_cons) {
914 u16 pkt_cons;
915
916 pkt_cons = TX_BD(sw_cons);
917
918 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
919
34f80b04 920 DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %u\n",
a2fbb9ea
ET
921 hw_cons, sw_cons, pkt_cons);
922
34f80b04 923/* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
a2fbb9ea
ET
924 rmb();
925 prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
926 }
927*/
928 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
929 sw_cons++;
930 done++;
a2fbb9ea
ET
931 }
932
933 fp->tx_pkt_cons = sw_cons;
934 fp->tx_bd_cons = bd_cons;
935
a2fbb9ea 936 /* TBD need a thresh? */
555f6c78 937 if (unlikely(netif_tx_queue_stopped(txq))) {
a2fbb9ea 938
6044735d
EG
939 /* Need to make the tx_bd_cons update visible to start_xmit()
940 * before checking for netif_tx_queue_stopped(). Without the
941 * memory barrier, there is a small possibility that
942 * start_xmit() will miss it and cause the queue to be stopped
943 * forever.
944 */
945 smp_mb();
946
555f6c78 947 if ((netif_tx_queue_stopped(txq)) &&
da5a662a 948 (bp->state == BNX2X_STATE_OPEN) &&
a2fbb9ea 949 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
555f6c78 950 netif_tx_wake_queue(txq);
a2fbb9ea
ET
951 }
952}
953
3196a88a 954
a2fbb9ea
ET
955static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
956 union eth_rx_cqe *rr_cqe)
957{
958 struct bnx2x *bp = fp->bp;
959 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
960 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
961
34f80b04 962 DP(BNX2X_MSG_SP,
a2fbb9ea 963 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
0626b899 964 fp->index, cid, command, bp->state,
34f80b04 965 rr_cqe->ramrod_cqe.ramrod_type);
a2fbb9ea
ET
966
967 bp->spq_left++;
968
0626b899 969 if (fp->index) {
a2fbb9ea
ET
970 switch (command | fp->state) {
971 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
972 BNX2X_FP_STATE_OPENING):
973 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
974 cid);
975 fp->state = BNX2X_FP_STATE_OPEN;
976 break;
977
978 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
979 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
980 cid);
981 fp->state = BNX2X_FP_STATE_HALTED;
982 break;
983
984 default:
34f80b04
EG
985 BNX2X_ERR("unexpected MC reply (%d) "
986 "fp->state is %x\n", command, fp->state);
987 break;
a2fbb9ea 988 }
34f80b04 989 mb(); /* force bnx2x_wait_ramrod() to see the change */
a2fbb9ea
ET
990 return;
991 }
c14423fe 992
a2fbb9ea
ET
993 switch (command | bp->state) {
994 case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
995 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
996 bp->state = BNX2X_STATE_OPEN;
997 break;
998
999 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
1000 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
1001 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
1002 fp->state = BNX2X_FP_STATE_HALTED;
1003 break;
1004
a2fbb9ea 1005 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
34f80b04 1006 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
49d66772 1007 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
a2fbb9ea
ET
1008 break;
1009
3196a88a 1010
a2fbb9ea 1011 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
34f80b04 1012 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
a2fbb9ea 1013 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
bb2a0f7a 1014 bp->set_mac_pending = 0;
a2fbb9ea
ET
1015 break;
1016
49d66772 1017 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
ca00392c 1018 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DISABLED):
34f80b04 1019 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
49d66772
ET
1020 break;
1021
a2fbb9ea 1022 default:
34f80b04 1023 BNX2X_ERR("unexpected MC reply (%d) bp->state is %x\n",
a2fbb9ea 1024 command, bp->state);
34f80b04 1025 break;
a2fbb9ea 1026 }
34f80b04 1027 mb(); /* force bnx2x_wait_ramrod() to see the change */
a2fbb9ea
ET
1028}
1029
7a9b2557
VZ
1030static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
1031 struct bnx2x_fastpath *fp, u16 index)
1032{
1033 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1034 struct page *page = sw_buf->page;
1035 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1036
1037 /* Skip "next page" elements */
1038 if (!page)
1039 return;
1040
1041 pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
4f40f2cb 1042 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
1043 __free_pages(page, PAGES_PER_SGE_SHIFT);
1044
1045 sw_buf->page = NULL;
1046 sge->addr_hi = 0;
1047 sge->addr_lo = 0;
1048}
1049
1050static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
1051 struct bnx2x_fastpath *fp, int last)
1052{
1053 int i;
1054
1055 for (i = 0; i < last; i++)
1056 bnx2x_free_rx_sge(bp, fp, i);
1057}
1058
1059static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
1060 struct bnx2x_fastpath *fp, u16 index)
1061{
1062 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
1063 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1064 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1065 dma_addr_t mapping;
1066
1067 if (unlikely(page == NULL))
1068 return -ENOMEM;
1069
4f40f2cb 1070 mapping = pci_map_page(bp->pdev, page, 0, SGE_PAGE_SIZE*PAGES_PER_SGE,
7a9b2557 1071 PCI_DMA_FROMDEVICE);
8d8bb39b 1072 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
7a9b2557
VZ
1073 __free_pages(page, PAGES_PER_SGE_SHIFT);
1074 return -ENOMEM;
1075 }
1076
1077 sw_buf->page = page;
1078 pci_unmap_addr_set(sw_buf, mapping, mapping);
1079
1080 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1081 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1082
1083 return 0;
1084}
1085
a2fbb9ea
ET
1086static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1087 struct bnx2x_fastpath *fp, u16 index)
1088{
1089 struct sk_buff *skb;
1090 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1091 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1092 dma_addr_t mapping;
1093
1094 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1095 if (unlikely(skb == NULL))
1096 return -ENOMEM;
1097
437cf2f1 1098 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_size,
a2fbb9ea 1099 PCI_DMA_FROMDEVICE);
8d8bb39b 1100 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
a2fbb9ea
ET
1101 dev_kfree_skb(skb);
1102 return -ENOMEM;
1103 }
1104
1105 rx_buf->skb = skb;
1106 pci_unmap_addr_set(rx_buf, mapping, mapping);
1107
1108 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1109 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1110
1111 return 0;
1112}
1113
1114/* note that we are not allocating a new skb,
1115 * we are just moving one from cons to prod
1116 * we are not creating a new mapping,
1117 * so there is no need to check for dma_mapping_error().
1118 */
1119static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1120 struct sk_buff *skb, u16 cons, u16 prod)
1121{
1122 struct bnx2x *bp = fp->bp;
1123 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1124 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1125 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1126 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1127
1128 pci_dma_sync_single_for_device(bp->pdev,
1129 pci_unmap_addr(cons_rx_buf, mapping),
87942b46 1130 RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
a2fbb9ea
ET
1131
1132 prod_rx_buf->skb = cons_rx_buf->skb;
1133 pci_unmap_addr_set(prod_rx_buf, mapping,
1134 pci_unmap_addr(cons_rx_buf, mapping));
1135 *prod_bd = *cons_bd;
1136}
1137
7a9b2557
VZ
1138static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1139 u16 idx)
1140{
1141 u16 last_max = fp->last_max_sge;
1142
1143 if (SUB_S16(idx, last_max) > 0)
1144 fp->last_max_sge = idx;
1145}
1146
1147static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1148{
1149 int i, j;
1150
1151 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1152 int idx = RX_SGE_CNT * i - 1;
1153
1154 for (j = 0; j < 2; j++) {
1155 SGE_MASK_CLEAR_BIT(fp, idx);
1156 idx--;
1157 }
1158 }
1159}
1160
1161static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1162 struct eth_fast_path_rx_cqe *fp_cqe)
1163{
1164 struct bnx2x *bp = fp->bp;
4f40f2cb 1165 u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
7a9b2557 1166 le16_to_cpu(fp_cqe->len_on_bd)) >>
4f40f2cb 1167 SGE_PAGE_SHIFT;
7a9b2557
VZ
1168 u16 last_max, last_elem, first_elem;
1169 u16 delta = 0;
1170 u16 i;
1171
1172 if (!sge_len)
1173 return;
1174
1175 /* First mark all used pages */
1176 for (i = 0; i < sge_len; i++)
1177 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1178
1179 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1180 sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1181
1182 /* Here we assume that the last SGE index is the biggest */
1183 prefetch((void *)(fp->sge_mask));
1184 bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1185
1186 last_max = RX_SGE(fp->last_max_sge);
1187 last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1188 first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1189
1190 /* If ring is not full */
1191 if (last_elem + 1 != first_elem)
1192 last_elem++;
1193
1194 /* Now update the prod */
1195 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1196 if (likely(fp->sge_mask[i]))
1197 break;
1198
1199 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1200 delta += RX_SGE_MASK_ELEM_SZ;
1201 }
1202
1203 if (delta > 0) {
1204 fp->rx_sge_prod += delta;
1205 /* clear page-end entries */
1206 bnx2x_clear_sge_mask_next_elems(fp);
1207 }
1208
1209 DP(NETIF_MSG_RX_STATUS,
1210 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
1211 fp->last_max_sge, fp->rx_sge_prod);
1212}
1213
1214static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1215{
1216 /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1217 memset(fp->sge_mask, 0xff,
1218 (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1219
33471629
EG
1220 /* Clear the two last indices in the page to 1:
1221 these are the indices that correspond to the "next" element,
7a9b2557
VZ
1222 hence will never be indicated and should be removed from
1223 the calculations. */
1224 bnx2x_clear_sge_mask_next_elems(fp);
1225}
1226
1227static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1228 struct sk_buff *skb, u16 cons, u16 prod)
1229{
1230 struct bnx2x *bp = fp->bp;
1231 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1232 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1233 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1234 dma_addr_t mapping;
1235
1236 /* move empty skb from pool to prod and map it */
1237 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1238 mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
437cf2f1 1239 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
1240 pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1241
1242 /* move partial skb from cons to pool (don't unmap yet) */
1243 fp->tpa_pool[queue] = *cons_rx_buf;
1244
1245 /* mark bin state as start - print error if current state != stop */
1246 if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1247 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1248
1249 fp->tpa_state[queue] = BNX2X_TPA_START;
1250
1251 /* point prod_bd to new skb */
1252 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1253 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1254
1255#ifdef BNX2X_STOP_ON_ERROR
1256 fp->tpa_queue_used |= (1 << queue);
1257#ifdef __powerpc64__
1258 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1259#else
1260 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1261#endif
1262 fp->tpa_queue_used);
1263#endif
1264}
1265
1266static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1267 struct sk_buff *skb,
1268 struct eth_fast_path_rx_cqe *fp_cqe,
1269 u16 cqe_idx)
1270{
1271 struct sw_rx_page *rx_pg, old_rx_pg;
7a9b2557
VZ
1272 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1273 u32 i, frag_len, frag_size, pages;
1274 int err;
1275 int j;
1276
1277 frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
4f40f2cb 1278 pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
7a9b2557
VZ
1279
1280 /* This is needed in order to enable forwarding support */
1281 if (frag_size)
4f40f2cb 1282 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
7a9b2557
VZ
1283 max(frag_size, (u32)len_on_bd));
1284
1285#ifdef BNX2X_STOP_ON_ERROR
4f40f2cb
EG
1286 if (pages >
1287 min((u32)8, (u32)MAX_SKB_FRAGS) * SGE_PAGE_SIZE * PAGES_PER_SGE) {
7a9b2557
VZ
1288 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1289 pages, cqe_idx);
1290 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
1291 fp_cqe->pkt_len, len_on_bd);
1292 bnx2x_panic();
1293 return -EINVAL;
1294 }
1295#endif
1296
1297 /* Run through the SGL and compose the fragmented skb */
1298 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1299 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1300
1301 /* FW gives the indices of the SGE as if the ring is an array
1302 (meaning that "next" element will consume 2 indices) */
4f40f2cb 1303 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
7a9b2557 1304 rx_pg = &fp->rx_page_ring[sge_idx];
7a9b2557
VZ
1305 old_rx_pg = *rx_pg;
1306
1307 /* If we fail to allocate a substitute page, we simply stop
1308 where we are and drop the whole packet */
1309 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1310 if (unlikely(err)) {
de832a55 1311 fp->eth_q_stats.rx_skb_alloc_failed++;
7a9b2557
VZ
1312 return err;
1313 }
1314
1315 /* Unmap the page as we r going to pass it to the stack */
1316 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
4f40f2cb 1317 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
1318
1319 /* Add one frag and update the appropriate fields in the skb */
1320 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1321
1322 skb->data_len += frag_len;
1323 skb->truesize += frag_len;
1324 skb->len += frag_len;
1325
1326 frag_size -= frag_len;
1327 }
1328
1329 return 0;
1330}
1331
1332static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1333 u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1334 u16 cqe_idx)
1335{
1336 struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1337 struct sk_buff *skb = rx_buf->skb;
1338 /* alloc new skb */
1339 struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1340
1341 /* Unmap skb in the pool anyway, as we are going to change
1342 pool entry status to BNX2X_TPA_STOP even if new skb allocation
1343 fails. */
1344 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
437cf2f1 1345 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
7a9b2557 1346
7a9b2557 1347 if (likely(new_skb)) {
66e855f3
YG
1348 /* fix ip xsum and give it to the stack */
1349 /* (no need to map the new skb) */
0c6671b0
EG
1350#ifdef BCM_VLAN
1351 int is_vlan_cqe =
1352 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1353 PARSING_FLAGS_VLAN);
1354 int is_not_hwaccel_vlan_cqe =
1355 (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
1356#endif
7a9b2557
VZ
1357
1358 prefetch(skb);
1359 prefetch(((char *)(skb)) + 128);
1360
7a9b2557
VZ
1361#ifdef BNX2X_STOP_ON_ERROR
1362 if (pad + len > bp->rx_buf_size) {
1363 BNX2X_ERR("skb_put is about to fail... "
1364 "pad %d len %d rx_buf_size %d\n",
1365 pad, len, bp->rx_buf_size);
1366 bnx2x_panic();
1367 return;
1368 }
1369#endif
1370
1371 skb_reserve(skb, pad);
1372 skb_put(skb, len);
1373
1374 skb->protocol = eth_type_trans(skb, bp->dev);
1375 skb->ip_summed = CHECKSUM_UNNECESSARY;
1376
1377 {
1378 struct iphdr *iph;
1379
1380 iph = (struct iphdr *)skb->data;
0c6671b0
EG
1381#ifdef BCM_VLAN
1382 /* If there is no Rx VLAN offloading -
1383 take VLAN tag into an account */
1384 if (unlikely(is_not_hwaccel_vlan_cqe))
1385 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
1386#endif
7a9b2557
VZ
1387 iph->check = 0;
1388 iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1389 }
1390
1391 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1392 &cqe->fast_path_cqe, cqe_idx)) {
1393#ifdef BCM_VLAN
0c6671b0
EG
1394 if ((bp->vlgrp != NULL) && is_vlan_cqe &&
1395 (!is_not_hwaccel_vlan_cqe))
7a9b2557
VZ
1396 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1397 le16_to_cpu(cqe->fast_path_cqe.
1398 vlan_tag));
1399 else
1400#endif
1401 netif_receive_skb(skb);
1402 } else {
1403 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1404 " - dropping packet!\n");
1405 dev_kfree_skb(skb);
1406 }
1407
7a9b2557
VZ
1408
1409 /* put new skb in bin */
1410 fp->tpa_pool[queue].skb = new_skb;
1411
1412 } else {
66e855f3 1413 /* else drop the packet and keep the buffer in the bin */
7a9b2557
VZ
1414 DP(NETIF_MSG_RX_STATUS,
1415 "Failed to allocate new skb - dropping packet!\n");
de832a55 1416 fp->eth_q_stats.rx_skb_alloc_failed++;
7a9b2557
VZ
1417 }
1418
1419 fp->tpa_state[queue] = BNX2X_TPA_STOP;
1420}
1421
1422static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1423 struct bnx2x_fastpath *fp,
1424 u16 bd_prod, u16 rx_comp_prod,
1425 u16 rx_sge_prod)
1426{
8d9c5f34 1427 struct ustorm_eth_rx_producers rx_prods = {0};
7a9b2557
VZ
1428 int i;
1429
1430 /* Update producers */
1431 rx_prods.bd_prod = bd_prod;
1432 rx_prods.cqe_prod = rx_comp_prod;
1433 rx_prods.sge_prod = rx_sge_prod;
1434
58f4c4cf
EG
1435 /*
1436 * Make sure that the BD and SGE data is updated before updating the
1437 * producers since FW might read the BD/SGE right after the producer
1438 * is updated.
1439 * This is only applicable for weak-ordered memory model archs such
1440 * as IA-64. The following barrier is also mandatory since FW will
1441 * assumes BDs must have buffers.
1442 */
1443 wmb();
1444
8d9c5f34
EG
1445 for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++)
1446 REG_WR(bp, BAR_USTRORM_INTMEM +
0626b899 1447 USTORM_RX_PRODS_OFFSET(BP_PORT(bp), fp->cl_id) + i*4,
7a9b2557
VZ
1448 ((u32 *)&rx_prods)[i]);
1449
58f4c4cf
EG
1450 mmiowb(); /* keep prod updates ordered */
1451
7a9b2557 1452 DP(NETIF_MSG_RX_STATUS,
555f6c78
EG
1453 "queue[%d]: wrote bd_prod %u cqe_prod %u sge_prod %u\n",
1454 fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
7a9b2557
VZ
1455}
1456
a2fbb9ea
ET
1457static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1458{
1459 struct bnx2x *bp = fp->bp;
34f80b04 1460 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
a2fbb9ea
ET
1461 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1462 int rx_pkt = 0;
1463
1464#ifdef BNX2X_STOP_ON_ERROR
1465 if (unlikely(bp->panic))
1466 return 0;
1467#endif
1468
34f80b04
EG
1469 /* CQ "next element" is of the size of the regular element,
1470 that's why it's ok here */
a2fbb9ea
ET
1471 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1472 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1473 hw_comp_cons++;
1474
1475 bd_cons = fp->rx_bd_cons;
1476 bd_prod = fp->rx_bd_prod;
34f80b04 1477 bd_prod_fw = bd_prod;
a2fbb9ea
ET
1478 sw_comp_cons = fp->rx_comp_cons;
1479 sw_comp_prod = fp->rx_comp_prod;
1480
1481 /* Memory barrier necessary as speculative reads of the rx
1482 * buffer can be ahead of the index in the status block
1483 */
1484 rmb();
1485
1486 DP(NETIF_MSG_RX_STATUS,
1487 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
0626b899 1488 fp->index, hw_comp_cons, sw_comp_cons);
a2fbb9ea
ET
1489
1490 while (sw_comp_cons != hw_comp_cons) {
34f80b04 1491 struct sw_rx_bd *rx_buf = NULL;
a2fbb9ea
ET
1492 struct sk_buff *skb;
1493 union eth_rx_cqe *cqe;
34f80b04
EG
1494 u8 cqe_fp_flags;
1495 u16 len, pad;
a2fbb9ea
ET
1496
1497 comp_ring_cons = RCQ_BD(sw_comp_cons);
1498 bd_prod = RX_BD(bd_prod);
1499 bd_cons = RX_BD(bd_cons);
1500
1501 cqe = &fp->rx_comp_ring[comp_ring_cons];
34f80b04 1502 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
a2fbb9ea 1503
a2fbb9ea 1504 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
34f80b04
EG
1505 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
1506 cqe_fp_flags, cqe->fast_path_cqe.status_flags,
68d59484 1507 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
34f80b04
EG
1508 le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1509 le16_to_cpu(cqe->fast_path_cqe.pkt_len));
a2fbb9ea
ET
1510
1511 /* is this a slowpath msg? */
34f80b04 1512 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
a2fbb9ea
ET
1513 bnx2x_sp_event(fp, cqe);
1514 goto next_cqe;
1515
1516 /* this is an rx packet */
1517 } else {
1518 rx_buf = &fp->rx_buf_ring[bd_cons];
1519 skb = rx_buf->skb;
a2fbb9ea
ET
1520 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1521 pad = cqe->fast_path_cqe.placement_offset;
1522
7a9b2557
VZ
1523 /* If CQE is marked both TPA_START and TPA_END
1524 it is a non-TPA CQE */
1525 if ((!fp->disable_tpa) &&
1526 (TPA_TYPE(cqe_fp_flags) !=
1527 (TPA_TYPE_START | TPA_TYPE_END))) {
3196a88a 1528 u16 queue = cqe->fast_path_cqe.queue_index;
7a9b2557
VZ
1529
1530 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1531 DP(NETIF_MSG_RX_STATUS,
1532 "calling tpa_start on queue %d\n",
1533 queue);
1534
1535 bnx2x_tpa_start(fp, queue, skb,
1536 bd_cons, bd_prod);
1537 goto next_rx;
1538 }
1539
1540 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1541 DP(NETIF_MSG_RX_STATUS,
1542 "calling tpa_stop on queue %d\n",
1543 queue);
1544
1545 if (!BNX2X_RX_SUM_FIX(cqe))
1546 BNX2X_ERR("STOP on none TCP "
1547 "data\n");
1548
1549 /* This is a size of the linear data
1550 on this skb */
1551 len = le16_to_cpu(cqe->fast_path_cqe.
1552 len_on_bd);
1553 bnx2x_tpa_stop(bp, fp, queue, pad,
1554 len, cqe, comp_ring_cons);
1555#ifdef BNX2X_STOP_ON_ERROR
1556 if (bp->panic)
17cb4006 1557 return 0;
7a9b2557
VZ
1558#endif
1559
1560 bnx2x_update_sge_prod(fp,
1561 &cqe->fast_path_cqe);
1562 goto next_cqe;
1563 }
1564 }
1565
a2fbb9ea
ET
1566 pci_dma_sync_single_for_device(bp->pdev,
1567 pci_unmap_addr(rx_buf, mapping),
1568 pad + RX_COPY_THRESH,
1569 PCI_DMA_FROMDEVICE);
1570 prefetch(skb);
1571 prefetch(((char *)(skb)) + 128);
1572
1573 /* is this an error packet? */
34f80b04 1574 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
a2fbb9ea 1575 DP(NETIF_MSG_RX_ERR,
34f80b04
EG
1576 "ERROR flags %x rx packet %u\n",
1577 cqe_fp_flags, sw_comp_cons);
de832a55 1578 fp->eth_q_stats.rx_err_discard_pkt++;
a2fbb9ea
ET
1579 goto reuse_rx;
1580 }
1581
1582 /* Since we don't have a jumbo ring
1583 * copy small packets if mtu > 1500
1584 */
1585 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1586 (len <= RX_COPY_THRESH)) {
1587 struct sk_buff *new_skb;
1588
1589 new_skb = netdev_alloc_skb(bp->dev,
1590 len + pad);
1591 if (new_skb == NULL) {
1592 DP(NETIF_MSG_RX_ERR,
34f80b04 1593 "ERROR packet dropped "
a2fbb9ea 1594 "because of alloc failure\n");
de832a55 1595 fp->eth_q_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
1596 goto reuse_rx;
1597 }
1598
1599 /* aligned copy */
1600 skb_copy_from_linear_data_offset(skb, pad,
1601 new_skb->data + pad, len);
1602 skb_reserve(new_skb, pad);
1603 skb_put(new_skb, len);
1604
1605 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1606
1607 skb = new_skb;
1608
1609 } else if (bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0) {
1610 pci_unmap_single(bp->pdev,
1611 pci_unmap_addr(rx_buf, mapping),
437cf2f1 1612 bp->rx_buf_size,
a2fbb9ea
ET
1613 PCI_DMA_FROMDEVICE);
1614 skb_reserve(skb, pad);
1615 skb_put(skb, len);
1616
1617 } else {
1618 DP(NETIF_MSG_RX_ERR,
34f80b04 1619 "ERROR packet dropped because "
a2fbb9ea 1620 "of alloc failure\n");
de832a55 1621 fp->eth_q_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
1622reuse_rx:
1623 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1624 goto next_rx;
1625 }
1626
1627 skb->protocol = eth_type_trans(skb, bp->dev);
1628
1629 skb->ip_summed = CHECKSUM_NONE;
66e855f3 1630 if (bp->rx_csum) {
1adcd8be
EG
1631 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1632 skb->ip_summed = CHECKSUM_UNNECESSARY;
66e855f3 1633 else
de832a55 1634 fp->eth_q_stats.hw_csum_err++;
66e855f3 1635 }
a2fbb9ea
ET
1636 }
1637
748e5439 1638 skb_record_rx_queue(skb, fp->index);
a2fbb9ea 1639#ifdef BCM_VLAN
0c6671b0 1640 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
34f80b04
EG
1641 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1642 PARSING_FLAGS_VLAN))
a2fbb9ea
ET
1643 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1644 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1645 else
1646#endif
34f80b04 1647 netif_receive_skb(skb);
a2fbb9ea 1648
a2fbb9ea
ET
1649
1650next_rx:
1651 rx_buf->skb = NULL;
1652
1653 bd_cons = NEXT_RX_IDX(bd_cons);
1654 bd_prod = NEXT_RX_IDX(bd_prod);
34f80b04
EG
1655 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1656 rx_pkt++;
a2fbb9ea
ET
1657next_cqe:
1658 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1659 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
a2fbb9ea 1660
34f80b04 1661 if (rx_pkt == budget)
a2fbb9ea
ET
1662 break;
1663 } /* while */
1664
1665 fp->rx_bd_cons = bd_cons;
34f80b04 1666 fp->rx_bd_prod = bd_prod_fw;
a2fbb9ea
ET
1667 fp->rx_comp_cons = sw_comp_cons;
1668 fp->rx_comp_prod = sw_comp_prod;
1669
7a9b2557
VZ
1670 /* Update producers */
1671 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1672 fp->rx_sge_prod);
a2fbb9ea
ET
1673
1674 fp->rx_pkt += rx_pkt;
1675 fp->rx_calls++;
1676
1677 return rx_pkt;
1678}
1679
1680static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1681{
1682 struct bnx2x_fastpath *fp = fp_cookie;
1683 struct bnx2x *bp = fp->bp;
a2fbb9ea 1684
da5a662a
VZ
1685 /* Return here if interrupt is disabled */
1686 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1687 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1688 return IRQ_HANDLED;
1689 }
1690
34f80b04 1691 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
ca00392c 1692 fp->index, fp->sb_id);
0626b899 1693 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
a2fbb9ea
ET
1694
1695#ifdef BNX2X_STOP_ON_ERROR
1696 if (unlikely(bp->panic))
1697 return IRQ_HANDLED;
1698#endif
ca00392c
EG
1699 /* Handle Rx or Tx according to MSI-X vector */
1700 if (fp->is_rx_queue) {
1701 prefetch(fp->rx_cons_sb);
1702 prefetch(&fp->status_blk->u_status_block.status_block_index);
a2fbb9ea 1703
ca00392c 1704 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
a2fbb9ea 1705
ca00392c
EG
1706 } else {
1707 prefetch(fp->tx_cons_sb);
1708 prefetch(&fp->status_blk->c_status_block.status_block_index);
1709
1710 bnx2x_update_fpsb_idx(fp);
1711 rmb();
1712 bnx2x_tx_int(fp);
1713
1714 /* Re-enable interrupts */
1715 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
1716 le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
1717 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
1718 le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
1719 }
34f80b04 1720
a2fbb9ea
ET
1721 return IRQ_HANDLED;
1722}
1723
1724static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1725{
555f6c78 1726 struct bnx2x *bp = netdev_priv(dev_instance);
a2fbb9ea 1727 u16 status = bnx2x_ack_int(bp);
34f80b04 1728 u16 mask;
ca00392c 1729 int i;
a2fbb9ea 1730
34f80b04 1731 /* Return here if interrupt is shared and it's not for us */
a2fbb9ea
ET
1732 if (unlikely(status == 0)) {
1733 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1734 return IRQ_NONE;
1735 }
f5372251 1736 DP(NETIF_MSG_INTR, "got an interrupt status 0x%x\n", status);
a2fbb9ea 1737
34f80b04 1738 /* Return here if interrupt is disabled */
a2fbb9ea
ET
1739 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1740 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1741 return IRQ_HANDLED;
1742 }
1743
3196a88a
EG
1744#ifdef BNX2X_STOP_ON_ERROR
1745 if (unlikely(bp->panic))
1746 return IRQ_HANDLED;
1747#endif
1748
ca00392c
EG
1749 for (i = 0; i < BNX2X_NUM_QUEUES(bp); i++) {
1750 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea 1751
ca00392c
EG
1752 mask = 0x2 << fp->sb_id;
1753 if (status & mask) {
1754 /* Handle Rx or Tx according to SB id */
1755 if (fp->is_rx_queue) {
1756 prefetch(fp->rx_cons_sb);
1757 prefetch(&fp->status_blk->u_status_block.
1758 status_block_index);
a2fbb9ea 1759
ca00392c 1760 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
a2fbb9ea 1761
ca00392c
EG
1762 } else {
1763 prefetch(fp->tx_cons_sb);
1764 prefetch(&fp->status_blk->c_status_block.
1765 status_block_index);
1766
1767 bnx2x_update_fpsb_idx(fp);
1768 rmb();
1769 bnx2x_tx_int(fp);
1770
1771 /* Re-enable interrupts */
1772 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
1773 le16_to_cpu(fp->fp_u_idx),
1774 IGU_INT_NOP, 1);
1775 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
1776 le16_to_cpu(fp->fp_c_idx),
1777 IGU_INT_ENABLE, 1);
1778 }
1779 status &= ~mask;
1780 }
a2fbb9ea
ET
1781 }
1782
a2fbb9ea 1783
34f80b04 1784 if (unlikely(status & 0x1)) {
1cf167f2 1785 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
a2fbb9ea
ET
1786
1787 status &= ~0x1;
1788 if (!status)
1789 return IRQ_HANDLED;
1790 }
1791
34f80b04
EG
1792 if (status)
1793 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1794 status);
a2fbb9ea 1795
c18487ee 1796 return IRQ_HANDLED;
a2fbb9ea
ET
1797}
1798
c18487ee 1799/* end of fast path */
a2fbb9ea 1800
bb2a0f7a 1801static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
a2fbb9ea 1802
c18487ee
YR
1803/* Link */
1804
1805/*
1806 * General service functions
1807 */
a2fbb9ea 1808
4a37fb66 1809static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
c18487ee
YR
1810{
1811 u32 lock_status;
1812 u32 resource_bit = (1 << resource);
4a37fb66
YG
1813 int func = BP_FUNC(bp);
1814 u32 hw_lock_control_reg;
c18487ee 1815 int cnt;
a2fbb9ea 1816
c18487ee
YR
1817 /* Validating that the resource is within range */
1818 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1819 DP(NETIF_MSG_HW,
1820 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1821 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1822 return -EINVAL;
1823 }
a2fbb9ea 1824
4a37fb66
YG
1825 if (func <= 5) {
1826 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1827 } else {
1828 hw_lock_control_reg =
1829 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1830 }
1831
c18487ee 1832 /* Validating that the resource is not already taken */
4a37fb66 1833 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1834 if (lock_status & resource_bit) {
1835 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1836 lock_status, resource_bit);
1837 return -EEXIST;
1838 }
a2fbb9ea 1839
46230476
EG
1840 /* Try for 5 second every 5ms */
1841 for (cnt = 0; cnt < 1000; cnt++) {
c18487ee 1842 /* Try to acquire the lock */
4a37fb66
YG
1843 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1844 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1845 if (lock_status & resource_bit)
1846 return 0;
a2fbb9ea 1847
c18487ee 1848 msleep(5);
a2fbb9ea 1849 }
c18487ee
YR
1850 DP(NETIF_MSG_HW, "Timeout\n");
1851 return -EAGAIN;
1852}
a2fbb9ea 1853
4a37fb66 1854static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
c18487ee
YR
1855{
1856 u32 lock_status;
1857 u32 resource_bit = (1 << resource);
4a37fb66
YG
1858 int func = BP_FUNC(bp);
1859 u32 hw_lock_control_reg;
a2fbb9ea 1860
c18487ee
YR
1861 /* Validating that the resource is within range */
1862 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1863 DP(NETIF_MSG_HW,
1864 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1865 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1866 return -EINVAL;
1867 }
1868
4a37fb66
YG
1869 if (func <= 5) {
1870 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1871 } else {
1872 hw_lock_control_reg =
1873 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1874 }
1875
c18487ee 1876 /* Validating that the resource is currently taken */
4a37fb66 1877 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1878 if (!(lock_status & resource_bit)) {
1879 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1880 lock_status, resource_bit);
1881 return -EFAULT;
a2fbb9ea
ET
1882 }
1883
4a37fb66 1884 REG_WR(bp, hw_lock_control_reg, resource_bit);
c18487ee
YR
1885 return 0;
1886}
1887
1888/* HW Lock for shared dual port PHYs */
4a37fb66 1889static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
c18487ee 1890{
34f80b04 1891 mutex_lock(&bp->port.phy_mutex);
a2fbb9ea 1892
46c6a674
EG
1893 if (bp->port.need_hw_lock)
1894 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
c18487ee 1895}
a2fbb9ea 1896
4a37fb66 1897static void bnx2x_release_phy_lock(struct bnx2x *bp)
c18487ee 1898{
46c6a674
EG
1899 if (bp->port.need_hw_lock)
1900 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
a2fbb9ea 1901
34f80b04 1902 mutex_unlock(&bp->port.phy_mutex);
c18487ee 1903}
a2fbb9ea 1904
4acac6a5
EG
1905int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1906{
1907 /* The GPIO should be swapped if swap register is set and active */
1908 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1909 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1910 int gpio_shift = gpio_num +
1911 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1912 u32 gpio_mask = (1 << gpio_shift);
1913 u32 gpio_reg;
1914 int value;
1915
1916 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1917 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1918 return -EINVAL;
1919 }
1920
1921 /* read GPIO value */
1922 gpio_reg = REG_RD(bp, MISC_REG_GPIO);
1923
1924 /* get the requested pin value */
1925 if ((gpio_reg & gpio_mask) == gpio_mask)
1926 value = 1;
1927 else
1928 value = 0;
1929
1930 DP(NETIF_MSG_LINK, "pin %d value 0x%x\n", gpio_num, value);
1931
1932 return value;
1933}
1934
17de50b7 1935int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
c18487ee
YR
1936{
1937 /* The GPIO should be swapped if swap register is set and active */
1938 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
17de50b7 1939 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
c18487ee
YR
1940 int gpio_shift = gpio_num +
1941 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1942 u32 gpio_mask = (1 << gpio_shift);
1943 u32 gpio_reg;
a2fbb9ea 1944
c18487ee
YR
1945 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1946 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1947 return -EINVAL;
1948 }
a2fbb9ea 1949
4a37fb66 1950 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
c18487ee
YR
1951 /* read GPIO and mask except the float bits */
1952 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
a2fbb9ea 1953
c18487ee
YR
1954 switch (mode) {
1955 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1956 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1957 gpio_num, gpio_shift);
1958 /* clear FLOAT and set CLR */
1959 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1960 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1961 break;
a2fbb9ea 1962
c18487ee
YR
1963 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1964 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1965 gpio_num, gpio_shift);
1966 /* clear FLOAT and set SET */
1967 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1968 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1969 break;
a2fbb9ea 1970
17de50b7 1971 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
c18487ee
YR
1972 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1973 gpio_num, gpio_shift);
1974 /* set FLOAT */
1975 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1976 break;
a2fbb9ea 1977
c18487ee
YR
1978 default:
1979 break;
a2fbb9ea
ET
1980 }
1981
c18487ee 1982 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
4a37fb66 1983 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
f1410647 1984
c18487ee 1985 return 0;
a2fbb9ea
ET
1986}
1987
4acac6a5
EG
1988int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1989{
1990 /* The GPIO should be swapped if swap register is set and active */
1991 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1992 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1993 int gpio_shift = gpio_num +
1994 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1995 u32 gpio_mask = (1 << gpio_shift);
1996 u32 gpio_reg;
1997
1998 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1999 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2000 return -EINVAL;
2001 }
2002
2003 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2004 /* read GPIO int */
2005 gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
2006
2007 switch (mode) {
2008 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
2009 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
2010 "output low\n", gpio_num, gpio_shift);
2011 /* clear SET and set CLR */
2012 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2013 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2014 break;
2015
2016 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
2017 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
2018 "output high\n", gpio_num, gpio_shift);
2019 /* clear CLR and set SET */
2020 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2021 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2022 break;
2023
2024 default:
2025 break;
2026 }
2027
2028 REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
2029 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2030
2031 return 0;
2032}
2033
c18487ee 2034static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
a2fbb9ea 2035{
c18487ee
YR
2036 u32 spio_mask = (1 << spio_num);
2037 u32 spio_reg;
a2fbb9ea 2038
c18487ee
YR
2039 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
2040 (spio_num > MISC_REGISTERS_SPIO_7)) {
2041 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
2042 return -EINVAL;
a2fbb9ea
ET
2043 }
2044
4a37fb66 2045 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
c18487ee
YR
2046 /* read SPIO and mask except the float bits */
2047 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
a2fbb9ea 2048
c18487ee 2049 switch (mode) {
6378c025 2050 case MISC_REGISTERS_SPIO_OUTPUT_LOW:
c18487ee
YR
2051 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
2052 /* clear FLOAT and set CLR */
2053 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2054 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
2055 break;
a2fbb9ea 2056
6378c025 2057 case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
c18487ee
YR
2058 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
2059 /* clear FLOAT and set SET */
2060 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2061 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
2062 break;
a2fbb9ea 2063
c18487ee
YR
2064 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
2065 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
2066 /* set FLOAT */
2067 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2068 break;
a2fbb9ea 2069
c18487ee
YR
2070 default:
2071 break;
a2fbb9ea
ET
2072 }
2073
c18487ee 2074 REG_WR(bp, MISC_REG_SPIO, spio_reg);
4a37fb66 2075 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
c18487ee 2076
a2fbb9ea
ET
2077 return 0;
2078}
2079
c18487ee 2080static void bnx2x_calc_fc_adv(struct bnx2x *bp)
a2fbb9ea 2081{
ad33ea3a
EG
2082 switch (bp->link_vars.ieee_fc &
2083 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
c18487ee 2084 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
34f80b04 2085 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
c18487ee
YR
2086 ADVERTISED_Pause);
2087 break;
356e2385 2088
c18487ee 2089 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
34f80b04 2090 bp->port.advertising |= (ADVERTISED_Asym_Pause |
c18487ee
YR
2091 ADVERTISED_Pause);
2092 break;
356e2385 2093
c18487ee 2094 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
34f80b04 2095 bp->port.advertising |= ADVERTISED_Asym_Pause;
c18487ee 2096 break;
356e2385 2097
c18487ee 2098 default:
34f80b04 2099 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
c18487ee
YR
2100 ADVERTISED_Pause);
2101 break;
2102 }
2103}
f1410647 2104
c18487ee
YR
2105static void bnx2x_link_report(struct bnx2x *bp)
2106{
2691d51d
EG
2107 if (bp->state == BNX2X_STATE_DISABLED) {
2108 netif_carrier_off(bp->dev);
2109 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
2110 return;
2111 }
2112
c18487ee
YR
2113 if (bp->link_vars.link_up) {
2114 if (bp->state == BNX2X_STATE_OPEN)
2115 netif_carrier_on(bp->dev);
2116 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
f1410647 2117
c18487ee 2118 printk("%d Mbps ", bp->link_vars.line_speed);
f1410647 2119
c18487ee
YR
2120 if (bp->link_vars.duplex == DUPLEX_FULL)
2121 printk("full duplex");
2122 else
2123 printk("half duplex");
f1410647 2124
c0700f90
DM
2125 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
2126 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
c18487ee 2127 printk(", receive ");
356e2385
EG
2128 if (bp->link_vars.flow_ctrl &
2129 BNX2X_FLOW_CTRL_TX)
c18487ee
YR
2130 printk("& transmit ");
2131 } else {
2132 printk(", transmit ");
2133 }
2134 printk("flow control ON");
2135 }
2136 printk("\n");
f1410647 2137
c18487ee
YR
2138 } else { /* link_down */
2139 netif_carrier_off(bp->dev);
2140 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
f1410647 2141 }
c18487ee
YR
2142}
2143
b5bf9068 2144static u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
c18487ee 2145{
19680c48
EG
2146 if (!BP_NOMCP(bp)) {
2147 u8 rc;
a2fbb9ea 2148
19680c48 2149 /* Initialize link parameters structure variables */
8c99e7b0
YR
2150 /* It is recommended to turn off RX FC for jumbo frames
2151 for better performance */
0c593270 2152 if (bp->dev->mtu > 5000)
c0700f90 2153 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
8c99e7b0 2154 else
c0700f90 2155 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
a2fbb9ea 2156
4a37fb66 2157 bnx2x_acquire_phy_lock(bp);
b5bf9068
EG
2158
2159 if (load_mode == LOAD_DIAG)
2160 bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
2161
19680c48 2162 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
b5bf9068 2163
4a37fb66 2164 bnx2x_release_phy_lock(bp);
a2fbb9ea 2165
3c96c68b
EG
2166 bnx2x_calc_fc_adv(bp);
2167
b5bf9068
EG
2168 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
2169 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
19680c48 2170 bnx2x_link_report(bp);
b5bf9068 2171 }
34f80b04 2172
19680c48
EG
2173 return rc;
2174 }
f5372251 2175 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
19680c48 2176 return -EINVAL;
a2fbb9ea
ET
2177}
2178
c18487ee 2179static void bnx2x_link_set(struct bnx2x *bp)
a2fbb9ea 2180{
19680c48 2181 if (!BP_NOMCP(bp)) {
4a37fb66 2182 bnx2x_acquire_phy_lock(bp);
19680c48 2183 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
4a37fb66 2184 bnx2x_release_phy_lock(bp);
a2fbb9ea 2185
19680c48
EG
2186 bnx2x_calc_fc_adv(bp);
2187 } else
f5372251 2188 BNX2X_ERR("Bootcode is missing - can not set link\n");
c18487ee 2189}
a2fbb9ea 2190
c18487ee
YR
2191static void bnx2x__link_reset(struct bnx2x *bp)
2192{
19680c48 2193 if (!BP_NOMCP(bp)) {
4a37fb66 2194 bnx2x_acquire_phy_lock(bp);
589abe3a 2195 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
4a37fb66 2196 bnx2x_release_phy_lock(bp);
19680c48 2197 } else
f5372251 2198 BNX2X_ERR("Bootcode is missing - can not reset link\n");
c18487ee 2199}
a2fbb9ea 2200
c18487ee
YR
2201static u8 bnx2x_link_test(struct bnx2x *bp)
2202{
2203 u8 rc;
a2fbb9ea 2204
4a37fb66 2205 bnx2x_acquire_phy_lock(bp);
c18487ee 2206 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
4a37fb66 2207 bnx2x_release_phy_lock(bp);
a2fbb9ea 2208
c18487ee
YR
2209 return rc;
2210}
a2fbb9ea 2211
8a1c38d1 2212static void bnx2x_init_port_minmax(struct bnx2x *bp)
34f80b04 2213{
8a1c38d1
EG
2214 u32 r_param = bp->link_vars.line_speed / 8;
2215 u32 fair_periodic_timeout_usec;
2216 u32 t_fair;
34f80b04 2217
8a1c38d1
EG
2218 memset(&(bp->cmng.rs_vars), 0,
2219 sizeof(struct rate_shaping_vars_per_port));
2220 memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
34f80b04 2221
8a1c38d1
EG
2222 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2223 bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
34f80b04 2224
8a1c38d1
EG
2225 /* this is the threshold below which no timer arming will occur
2226 1.25 coefficient is for the threshold to be a little bigger
2227 than the real time, to compensate for timer in-accuracy */
2228 bp->cmng.rs_vars.rs_threshold =
34f80b04
EG
2229 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2230
8a1c38d1
EG
2231 /* resolution of fairness timer */
2232 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2233 /* for 10G it is 1000usec. for 1G it is 10000usec. */
2234 t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
34f80b04 2235
8a1c38d1
EG
2236 /* this is the threshold below which we won't arm the timer anymore */
2237 bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
34f80b04 2238
8a1c38d1
EG
2239 /* we multiply by 1e3/8 to get bytes/msec.
2240 We don't want the credits to pass a credit
2241 of the t_fair*FAIR_MEM (algorithm resolution) */
2242 bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
2243 /* since each tick is 4 usec */
2244 bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
34f80b04
EG
2245}
2246
2691d51d
EG
2247/* Calculates the sum of vn_min_rates.
2248 It's needed for further normalizing of the min_rates.
2249 Returns:
2250 sum of vn_min_rates.
2251 or
2252 0 - if all the min_rates are 0.
2253 In the later case fainess algorithm should be deactivated.
2254 If not all min_rates are zero then those that are zeroes will be set to 1.
2255 */
2256static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
2257{
2258 int all_zero = 1;
2259 int port = BP_PORT(bp);
2260 int vn;
2261
2262 bp->vn_weight_sum = 0;
2263 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2264 int func = 2*vn + port;
2265 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2266 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2267 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2268
2269 /* Skip hidden vns */
2270 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
2271 continue;
2272
2273 /* If min rate is zero - set it to 1 */
2274 if (!vn_min_rate)
2275 vn_min_rate = DEF_MIN_RATE;
2276 else
2277 all_zero = 0;
2278
2279 bp->vn_weight_sum += vn_min_rate;
2280 }
2281
2282 /* ... only if all min rates are zeros - disable fairness */
2283 if (all_zero)
2284 bp->vn_weight_sum = 0;
2285}
2286
8a1c38d1 2287static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
34f80b04
EG
2288{
2289 struct rate_shaping_vars_per_vn m_rs_vn;
2290 struct fairness_vars_per_vn m_fair_vn;
2291 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2292 u16 vn_min_rate, vn_max_rate;
2293 int i;
2294
2295 /* If function is hidden - set min and max to zeroes */
2296 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2297 vn_min_rate = 0;
2298 vn_max_rate = 0;
2299
2300 } else {
2301 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2302 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
8a1c38d1 2303 /* If fairness is enabled (not all min rates are zeroes) and
34f80b04 2304 if current min rate is zero - set it to 1.
33471629 2305 This is a requirement of the algorithm. */
8a1c38d1 2306 if (bp->vn_weight_sum && (vn_min_rate == 0))
34f80b04
EG
2307 vn_min_rate = DEF_MIN_RATE;
2308 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2309 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2310 }
2311
8a1c38d1
EG
2312 DP(NETIF_MSG_IFUP,
2313 "func %d: vn_min_rate=%d vn_max_rate=%d vn_weight_sum=%d\n",
2314 func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
34f80b04
EG
2315
2316 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2317 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2318
2319 /* global vn counter - maximal Mbps for this vn */
2320 m_rs_vn.vn_counter.rate = vn_max_rate;
2321
2322 /* quota - number of bytes transmitted in this period */
2323 m_rs_vn.vn_counter.quota =
2324 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2325
8a1c38d1 2326 if (bp->vn_weight_sum) {
34f80b04
EG
2327 /* credit for each period of the fairness algorithm:
2328 number of bytes in T_FAIR (the vn share the port rate).
8a1c38d1
EG
2329 vn_weight_sum should not be larger than 10000, thus
2330 T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
2331 than zero */
34f80b04 2332 m_fair_vn.vn_credit_delta =
8a1c38d1
EG
2333 max((u32)(vn_min_rate * (T_FAIR_COEF /
2334 (8 * bp->vn_weight_sum))),
2335 (u32)(bp->cmng.fair_vars.fair_threshold * 2));
34f80b04
EG
2336 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2337 m_fair_vn.vn_credit_delta);
2338 }
2339
34f80b04
EG
2340 /* Store it to internal memory */
2341 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2342 REG_WR(bp, BAR_XSTRORM_INTMEM +
2343 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2344 ((u32 *)(&m_rs_vn))[i]);
2345
2346 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2347 REG_WR(bp, BAR_XSTRORM_INTMEM +
2348 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2349 ((u32 *)(&m_fair_vn))[i]);
2350}
2351
8a1c38d1 2352
c18487ee
YR
2353/* This function is called upon link interrupt */
2354static void bnx2x_link_attn(struct bnx2x *bp)
2355{
bb2a0f7a
YG
2356 /* Make sure that we are synced with the current statistics */
2357 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2358
c18487ee 2359 bnx2x_link_update(&bp->link_params, &bp->link_vars);
a2fbb9ea 2360
bb2a0f7a
YG
2361 if (bp->link_vars.link_up) {
2362
1c06328c
EG
2363 /* dropless flow control */
2364 if (CHIP_IS_E1H(bp)) {
2365 int port = BP_PORT(bp);
2366 u32 pause_enabled = 0;
2367
2368 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2369 pause_enabled = 1;
2370
2371 REG_WR(bp, BAR_USTRORM_INTMEM +
ca00392c 2372 USTORM_ETH_PAUSE_ENABLED_OFFSET(port),
1c06328c
EG
2373 pause_enabled);
2374 }
2375
bb2a0f7a
YG
2376 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2377 struct host_port_stats *pstats;
2378
2379 pstats = bnx2x_sp(bp, port_stats);
2380 /* reset old bmac stats */
2381 memset(&(pstats->mac_stx[0]), 0,
2382 sizeof(struct mac_stx));
2383 }
2384 if ((bp->state == BNX2X_STATE_OPEN) ||
2385 (bp->state == BNX2X_STATE_DISABLED))
2386 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2387 }
2388
c18487ee
YR
2389 /* indicate link status */
2390 bnx2x_link_report(bp);
34f80b04
EG
2391
2392 if (IS_E1HMF(bp)) {
8a1c38d1 2393 int port = BP_PORT(bp);
34f80b04 2394 int func;
8a1c38d1 2395 int vn;
34f80b04
EG
2396
2397 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2398 if (vn == BP_E1HVN(bp))
2399 continue;
2400
8a1c38d1 2401 func = ((vn << 1) | port);
34f80b04
EG
2402
2403 /* Set the attention towards other drivers
2404 on the same port */
2405 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2406 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2407 }
34f80b04 2408
8a1c38d1
EG
2409 if (bp->link_vars.link_up) {
2410 int i;
2411
2412 /* Init rate shaping and fairness contexts */
2413 bnx2x_init_port_minmax(bp);
34f80b04 2414
34f80b04 2415 for (vn = VN_0; vn < E1HVN_MAX; vn++)
8a1c38d1
EG
2416 bnx2x_init_vn_minmax(bp, 2*vn + port);
2417
2418 /* Store it to internal memory */
2419 for (i = 0;
2420 i < sizeof(struct cmng_struct_per_port) / 4; i++)
2421 REG_WR(bp, BAR_XSTRORM_INTMEM +
2422 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2423 ((u32 *)(&bp->cmng))[i]);
2424 }
34f80b04 2425 }
c18487ee 2426}
a2fbb9ea 2427
c18487ee
YR
2428static void bnx2x__link_status_update(struct bnx2x *bp)
2429{
2691d51d
EG
2430 int func = BP_FUNC(bp);
2431
c18487ee
YR
2432 if (bp->state != BNX2X_STATE_OPEN)
2433 return;
a2fbb9ea 2434
c18487ee 2435 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
a2fbb9ea 2436
bb2a0f7a
YG
2437 if (bp->link_vars.link_up)
2438 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2439 else
2440 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2441
2691d51d
EG
2442 bp->mf_config = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2443 bnx2x_calc_vn_weight_sum(bp);
2444
c18487ee
YR
2445 /* indicate link status */
2446 bnx2x_link_report(bp);
a2fbb9ea 2447}
a2fbb9ea 2448
34f80b04
EG
2449static void bnx2x_pmf_update(struct bnx2x *bp)
2450{
2451 int port = BP_PORT(bp);
2452 u32 val;
2453
2454 bp->port.pmf = 1;
2455 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2456
2457 /* enable nig attention */
2458 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2459 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2460 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
bb2a0f7a
YG
2461
2462 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
34f80b04
EG
2463}
2464
c18487ee 2465/* end of Link */
a2fbb9ea
ET
2466
2467/* slow path */
2468
2469/*
2470 * General service functions
2471 */
2472
2691d51d
EG
2473/* send the MCP a request, block until there is a reply */
2474u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
2475{
2476 int func = BP_FUNC(bp);
2477 u32 seq = ++bp->fw_seq;
2478 u32 rc = 0;
2479 u32 cnt = 1;
2480 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
2481
2482 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
2483 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
2484
2485 do {
2486 /* let the FW do it's magic ... */
2487 msleep(delay);
2488
2489 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
2490
2491 /* Give the FW up to 2 second (200*10ms) */
2492 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 200));
2493
2494 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
2495 cnt*delay, rc, seq);
2496
2497 /* is this a reply to our command? */
2498 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK))
2499 rc &= FW_MSG_CODE_MASK;
2500 else {
2501 /* FW BUG! */
2502 BNX2X_ERR("FW failed to respond!\n");
2503 bnx2x_fw_dump(bp);
2504 rc = 0;
2505 }
2506
2507 return rc;
2508}
2509
2510static void bnx2x_set_storm_rx_mode(struct bnx2x *bp);
2511static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp, int set);
2512static void bnx2x_set_rx_mode(struct net_device *dev);
2513
2514static void bnx2x_e1h_disable(struct bnx2x *bp)
2515{
2516 int port = BP_PORT(bp);
2517 int i;
2518
2519 bp->rx_mode = BNX2X_RX_MODE_NONE;
2520 bnx2x_set_storm_rx_mode(bp);
2521
2522 netif_tx_disable(bp->dev);
2523 bp->dev->trans_start = jiffies; /* prevent tx timeout */
2524
2525 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
2526
2527 bnx2x_set_mac_addr_e1h(bp, 0);
2528
2529 for (i = 0; i < MC_HASH_SIZE; i++)
2530 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
2531
2532 netif_carrier_off(bp->dev);
2533}
2534
2535static void bnx2x_e1h_enable(struct bnx2x *bp)
2536{
2537 int port = BP_PORT(bp);
2538
2539 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
2540
2541 bnx2x_set_mac_addr_e1h(bp, 1);
2542
2543 /* Tx queue should be only reenabled */
2544 netif_tx_wake_all_queues(bp->dev);
2545
2546 /* Initialize the receive filter. */
2547 bnx2x_set_rx_mode(bp->dev);
2548}
2549
2550static void bnx2x_update_min_max(struct bnx2x *bp)
2551{
2552 int port = BP_PORT(bp);
2553 int vn, i;
2554
2555 /* Init rate shaping and fairness contexts */
2556 bnx2x_init_port_minmax(bp);
2557
2558 bnx2x_calc_vn_weight_sum(bp);
2559
2560 for (vn = VN_0; vn < E1HVN_MAX; vn++)
2561 bnx2x_init_vn_minmax(bp, 2*vn + port);
2562
2563 if (bp->port.pmf) {
2564 int func;
2565
2566 /* Set the attention towards other drivers on the same port */
2567 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2568 if (vn == BP_E1HVN(bp))
2569 continue;
2570
2571 func = ((vn << 1) | port);
2572 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2573 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2574 }
2575
2576 /* Store it to internal memory */
2577 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
2578 REG_WR(bp, BAR_XSTRORM_INTMEM +
2579 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2580 ((u32 *)(&bp->cmng))[i]);
2581 }
2582}
2583
2584static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
2585{
2586 int func = BP_FUNC(bp);
2587
2588 DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);
2589 bp->mf_config = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2590
2591 if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
2592
2593 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
2594 DP(NETIF_MSG_IFDOWN, "mf_cfg function disabled\n");
2595 bp->state = BNX2X_STATE_DISABLED;
2596
2597 bnx2x_e1h_disable(bp);
2598 } else {
2599 DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
2600 bp->state = BNX2X_STATE_OPEN;
2601
2602 bnx2x_e1h_enable(bp);
2603 }
2604 dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
2605 }
2606 if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
2607
2608 bnx2x_update_min_max(bp);
2609 dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
2610 }
2611
2612 /* Report results to MCP */
2613 if (dcc_event)
2614 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE);
2615 else
2616 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK);
2617}
2618
a2fbb9ea
ET
2619/* the slow path queue is odd since completions arrive on the fastpath ring */
2620static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2621 u32 data_hi, u32 data_lo, int common)
2622{
34f80b04 2623 int func = BP_FUNC(bp);
a2fbb9ea 2624
34f80b04
EG
2625 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2626 "SPQE (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
a2fbb9ea
ET
2627 (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2628 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2629 HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2630
2631#ifdef BNX2X_STOP_ON_ERROR
2632 if (unlikely(bp->panic))
2633 return -EIO;
2634#endif
2635
34f80b04 2636 spin_lock_bh(&bp->spq_lock);
a2fbb9ea
ET
2637
2638 if (!bp->spq_left) {
2639 BNX2X_ERR("BUG! SPQ ring full!\n");
34f80b04 2640 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
2641 bnx2x_panic();
2642 return -EBUSY;
2643 }
f1410647 2644
a2fbb9ea
ET
2645 /* CID needs port number to be encoded int it */
2646 bp->spq_prod_bd->hdr.conn_and_cmd_data =
2647 cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2648 HW_CID(bp, cid)));
2649 bp->spq_prod_bd->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2650 if (common)
2651 bp->spq_prod_bd->hdr.type |=
2652 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2653
2654 bp->spq_prod_bd->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2655 bp->spq_prod_bd->data.mac_config_addr.lo = cpu_to_le32(data_lo);
2656
2657 bp->spq_left--;
2658
2659 if (bp->spq_prod_bd == bp->spq_last_bd) {
2660 bp->spq_prod_bd = bp->spq;
2661 bp->spq_prod_idx = 0;
2662 DP(NETIF_MSG_TIMER, "end of spq\n");
2663
2664 } else {
2665 bp->spq_prod_bd++;
2666 bp->spq_prod_idx++;
2667 }
2668
37dbbf32
EG
2669 /* Make sure that BD data is updated before writing the producer */
2670 wmb();
2671
34f80b04 2672 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
a2fbb9ea
ET
2673 bp->spq_prod_idx);
2674
37dbbf32
EG
2675 mmiowb();
2676
34f80b04 2677 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
2678 return 0;
2679}
2680
2681/* acquire split MCP access lock register */
4a37fb66 2682static int bnx2x_acquire_alr(struct bnx2x *bp)
a2fbb9ea 2683{
a2fbb9ea 2684 u32 i, j, val;
34f80b04 2685 int rc = 0;
a2fbb9ea
ET
2686
2687 might_sleep();
2688 i = 100;
2689 for (j = 0; j < i*10; j++) {
2690 val = (1UL << 31);
2691 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2692 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2693 if (val & (1L << 31))
2694 break;
2695
2696 msleep(5);
2697 }
a2fbb9ea 2698 if (!(val & (1L << 31))) {
19680c48 2699 BNX2X_ERR("Cannot acquire MCP access lock register\n");
a2fbb9ea
ET
2700 rc = -EBUSY;
2701 }
2702
2703 return rc;
2704}
2705
4a37fb66
YG
2706/* release split MCP access lock register */
2707static void bnx2x_release_alr(struct bnx2x *bp)
a2fbb9ea
ET
2708{
2709 u32 val = 0;
2710
2711 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2712}
2713
2714static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2715{
2716 struct host_def_status_block *def_sb = bp->def_status_blk;
2717 u16 rc = 0;
2718
2719 barrier(); /* status block is written to by the chip */
a2fbb9ea
ET
2720 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2721 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2722 rc |= 1;
2723 }
2724 if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2725 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2726 rc |= 2;
2727 }
2728 if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2729 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2730 rc |= 4;
2731 }
2732 if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2733 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2734 rc |= 8;
2735 }
2736 if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2737 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2738 rc |= 16;
2739 }
2740 return rc;
2741}
2742
2743/*
2744 * slow path service functions
2745 */
2746
2747static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2748{
34f80b04 2749 int port = BP_PORT(bp);
5c862848
EG
2750 u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2751 COMMAND_REG_ATTN_BITS_SET);
a2fbb9ea
ET
2752 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2753 MISC_REG_AEU_MASK_ATTN_FUNC_0;
877e9aa4
ET
2754 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2755 NIG_REG_MASK_INTERRUPT_PORT0;
3fcaf2e5 2756 u32 aeu_mask;
87942b46 2757 u32 nig_mask = 0;
a2fbb9ea 2758
a2fbb9ea
ET
2759 if (bp->attn_state & asserted)
2760 BNX2X_ERR("IGU ERROR\n");
2761
3fcaf2e5
EG
2762 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2763 aeu_mask = REG_RD(bp, aeu_addr);
2764
a2fbb9ea 2765 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
3fcaf2e5
EG
2766 aeu_mask, asserted);
2767 aeu_mask &= ~(asserted & 0xff);
2768 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
a2fbb9ea 2769
3fcaf2e5
EG
2770 REG_WR(bp, aeu_addr, aeu_mask);
2771 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
a2fbb9ea 2772
3fcaf2e5 2773 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
a2fbb9ea 2774 bp->attn_state |= asserted;
3fcaf2e5 2775 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
a2fbb9ea
ET
2776
2777 if (asserted & ATTN_HARD_WIRED_MASK) {
2778 if (asserted & ATTN_NIG_FOR_FUNC) {
a2fbb9ea 2779
a5e9a7cf
EG
2780 bnx2x_acquire_phy_lock(bp);
2781
877e9aa4 2782 /* save nig interrupt mask */
87942b46 2783 nig_mask = REG_RD(bp, nig_int_mask_addr);
877e9aa4 2784 REG_WR(bp, nig_int_mask_addr, 0);
a2fbb9ea 2785
c18487ee 2786 bnx2x_link_attn(bp);
a2fbb9ea
ET
2787
2788 /* handle unicore attn? */
2789 }
2790 if (asserted & ATTN_SW_TIMER_4_FUNC)
2791 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2792
2793 if (asserted & GPIO_2_FUNC)
2794 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2795
2796 if (asserted & GPIO_3_FUNC)
2797 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2798
2799 if (asserted & GPIO_4_FUNC)
2800 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2801
2802 if (port == 0) {
2803 if (asserted & ATTN_GENERAL_ATTN_1) {
2804 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2805 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2806 }
2807 if (asserted & ATTN_GENERAL_ATTN_2) {
2808 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2809 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2810 }
2811 if (asserted & ATTN_GENERAL_ATTN_3) {
2812 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2813 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2814 }
2815 } else {
2816 if (asserted & ATTN_GENERAL_ATTN_4) {
2817 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2818 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2819 }
2820 if (asserted & ATTN_GENERAL_ATTN_5) {
2821 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2822 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2823 }
2824 if (asserted & ATTN_GENERAL_ATTN_6) {
2825 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2826 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2827 }
2828 }
2829
2830 } /* if hardwired */
2831
5c862848
EG
2832 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2833 asserted, hc_addr);
2834 REG_WR(bp, hc_addr, asserted);
a2fbb9ea
ET
2835
2836 /* now set back the mask */
a5e9a7cf 2837 if (asserted & ATTN_NIG_FOR_FUNC) {
87942b46 2838 REG_WR(bp, nig_int_mask_addr, nig_mask);
a5e9a7cf
EG
2839 bnx2x_release_phy_lock(bp);
2840 }
a2fbb9ea
ET
2841}
2842
fd4ef40d
EG
2843static inline void bnx2x_fan_failure(struct bnx2x *bp)
2844{
2845 int port = BP_PORT(bp);
2846
2847 /* mark the failure */
2848 bp->link_params.ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2849 bp->link_params.ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2850 SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
2851 bp->link_params.ext_phy_config);
2852
2853 /* log the failure */
2854 printk(KERN_ERR PFX "Fan Failure on Network Controller %s has caused"
2855 " the driver to shutdown the card to prevent permanent"
2856 " damage. Please contact Dell Support for assistance\n",
2857 bp->dev->name);
2858}
877e9aa4 2859static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
a2fbb9ea 2860{
34f80b04 2861 int port = BP_PORT(bp);
877e9aa4 2862 int reg_offset;
4d295db0 2863 u32 val, swap_val, swap_override;
877e9aa4 2864
34f80b04
EG
2865 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2866 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
877e9aa4 2867
34f80b04 2868 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
877e9aa4
ET
2869
2870 val = REG_RD(bp, reg_offset);
2871 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2872 REG_WR(bp, reg_offset, val);
2873
2874 BNX2X_ERR("SPIO5 hw attention\n");
2875
fd4ef40d 2876 /* Fan failure attention */
35b19ba5
EG
2877 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
2878 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
17de50b7 2879 /* Low power mode is controlled by GPIO 2 */
877e9aa4 2880 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
17de50b7 2881 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
fd4ef40d
EG
2882 /* The PHY reset is controlled by GPIO 1 */
2883 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2884 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
877e9aa4
ET
2885 break;
2886
4d295db0
EG
2887 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
2888 /* The PHY reset is controlled by GPIO 1 */
2889 /* fake the port number to cancel the swap done in
2890 set_gpio() */
2891 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
2892 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
2893 port = (swap_val && swap_override) ^ 1;
2894 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2895 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2896 break;
2897
877e9aa4
ET
2898 default:
2899 break;
2900 }
fd4ef40d 2901 bnx2x_fan_failure(bp);
877e9aa4 2902 }
34f80b04 2903
589abe3a
EG
2904 if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
2905 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
2906 bnx2x_acquire_phy_lock(bp);
2907 bnx2x_handle_module_detect_int(&bp->link_params);
2908 bnx2x_release_phy_lock(bp);
2909 }
2910
34f80b04
EG
2911 if (attn & HW_INTERRUT_ASSERT_SET_0) {
2912
2913 val = REG_RD(bp, reg_offset);
2914 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2915 REG_WR(bp, reg_offset, val);
2916
2917 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2918 (attn & HW_INTERRUT_ASSERT_SET_0));
2919 bnx2x_panic();
2920 }
877e9aa4
ET
2921}
2922
2923static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2924{
2925 u32 val;
2926
0626b899 2927 if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
877e9aa4
ET
2928
2929 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2930 BNX2X_ERR("DB hw attention 0x%x\n", val);
2931 /* DORQ discard attention */
2932 if (val & 0x2)
2933 BNX2X_ERR("FATAL error from DORQ\n");
2934 }
34f80b04
EG
2935
2936 if (attn & HW_INTERRUT_ASSERT_SET_1) {
2937
2938 int port = BP_PORT(bp);
2939 int reg_offset;
2940
2941 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2942 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2943
2944 val = REG_RD(bp, reg_offset);
2945 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2946 REG_WR(bp, reg_offset, val);
2947
2948 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2949 (attn & HW_INTERRUT_ASSERT_SET_1));
2950 bnx2x_panic();
2951 }
877e9aa4
ET
2952}
2953
2954static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2955{
2956 u32 val;
2957
2958 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2959
2960 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2961 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2962 /* CFC error attention */
2963 if (val & 0x2)
2964 BNX2X_ERR("FATAL error from CFC\n");
2965 }
2966
2967 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
2968
2969 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
2970 BNX2X_ERR("PXP hw attention 0x%x\n", val);
2971 /* RQ_USDMDP_FIFO_OVERFLOW */
2972 if (val & 0x18000)
2973 BNX2X_ERR("FATAL error from PXP\n");
2974 }
34f80b04
EG
2975
2976 if (attn & HW_INTERRUT_ASSERT_SET_2) {
2977
2978 int port = BP_PORT(bp);
2979 int reg_offset;
2980
2981 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
2982 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
2983
2984 val = REG_RD(bp, reg_offset);
2985 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
2986 REG_WR(bp, reg_offset, val);
2987
2988 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
2989 (attn & HW_INTERRUT_ASSERT_SET_2));
2990 bnx2x_panic();
2991 }
877e9aa4
ET
2992}
2993
2994static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
2995{
34f80b04
EG
2996 u32 val;
2997
877e9aa4
ET
2998 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
2999
34f80b04
EG
3000 if (attn & BNX2X_PMF_LINK_ASSERT) {
3001 int func = BP_FUNC(bp);
3002
3003 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
2691d51d
EG
3004 val = SHMEM_RD(bp, func_mb[func].drv_status);
3005 if (val & DRV_STATUS_DCC_EVENT_MASK)
3006 bnx2x_dcc_event(bp,
3007 (val & DRV_STATUS_DCC_EVENT_MASK));
34f80b04 3008 bnx2x__link_status_update(bp);
2691d51d 3009 if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
34f80b04
EG
3010 bnx2x_pmf_update(bp);
3011
3012 } else if (attn & BNX2X_MC_ASSERT_BITS) {
877e9aa4
ET
3013
3014 BNX2X_ERR("MC assert!\n");
3015 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
3016 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
3017 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
3018 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
3019 bnx2x_panic();
3020
3021 } else if (attn & BNX2X_MCP_ASSERT) {
3022
3023 BNX2X_ERR("MCP assert!\n");
3024 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
34f80b04 3025 bnx2x_fw_dump(bp);
877e9aa4
ET
3026
3027 } else
3028 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
3029 }
3030
3031 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
34f80b04
EG
3032 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
3033 if (attn & BNX2X_GRC_TIMEOUT) {
3034 val = CHIP_IS_E1H(bp) ?
3035 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
3036 BNX2X_ERR("GRC time-out 0x%08x\n", val);
3037 }
3038 if (attn & BNX2X_GRC_RSV) {
3039 val = CHIP_IS_E1H(bp) ?
3040 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
3041 BNX2X_ERR("GRC reserved 0x%08x\n", val);
3042 }
877e9aa4 3043 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
877e9aa4
ET
3044 }
3045}
3046
3047static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
3048{
a2fbb9ea
ET
3049 struct attn_route attn;
3050 struct attn_route group_mask;
34f80b04 3051 int port = BP_PORT(bp);
877e9aa4 3052 int index;
a2fbb9ea
ET
3053 u32 reg_addr;
3054 u32 val;
3fcaf2e5 3055 u32 aeu_mask;
a2fbb9ea
ET
3056
3057 /* need to take HW lock because MCP or other port might also
3058 try to handle this event */
4a37fb66 3059 bnx2x_acquire_alr(bp);
a2fbb9ea
ET
3060
3061 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
3062 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
3063 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
3064 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
34f80b04
EG
3065 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
3066 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
a2fbb9ea
ET
3067
3068 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
3069 if (deasserted & (1 << index)) {
3070 group_mask = bp->attn_group[index];
3071
34f80b04
EG
3072 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
3073 index, group_mask.sig[0], group_mask.sig[1],
3074 group_mask.sig[2], group_mask.sig[3]);
a2fbb9ea 3075
877e9aa4
ET
3076 bnx2x_attn_int_deasserted3(bp,
3077 attn.sig[3] & group_mask.sig[3]);
3078 bnx2x_attn_int_deasserted1(bp,
3079 attn.sig[1] & group_mask.sig[1]);
3080 bnx2x_attn_int_deasserted2(bp,
3081 attn.sig[2] & group_mask.sig[2]);
3082 bnx2x_attn_int_deasserted0(bp,
3083 attn.sig[0] & group_mask.sig[0]);
a2fbb9ea 3084
a2fbb9ea
ET
3085 if ((attn.sig[0] & group_mask.sig[0] &
3086 HW_PRTY_ASSERT_SET_0) ||
3087 (attn.sig[1] & group_mask.sig[1] &
3088 HW_PRTY_ASSERT_SET_1) ||
3089 (attn.sig[2] & group_mask.sig[2] &
3090 HW_PRTY_ASSERT_SET_2))
6378c025 3091 BNX2X_ERR("FATAL HW block parity attention\n");
a2fbb9ea
ET
3092 }
3093 }
3094
4a37fb66 3095 bnx2x_release_alr(bp);
a2fbb9ea 3096
5c862848 3097 reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
a2fbb9ea
ET
3098
3099 val = ~deasserted;
3fcaf2e5
EG
3100 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
3101 val, reg_addr);
5c862848 3102 REG_WR(bp, reg_addr, val);
a2fbb9ea 3103
a2fbb9ea 3104 if (~bp->attn_state & deasserted)
3fcaf2e5 3105 BNX2X_ERR("IGU ERROR\n");
a2fbb9ea
ET
3106
3107 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
3108 MISC_REG_AEU_MASK_ATTN_FUNC_0;
3109
3fcaf2e5
EG
3110 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3111 aeu_mask = REG_RD(bp, reg_addr);
3112
3113 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
3114 aeu_mask, deasserted);
3115 aeu_mask |= (deasserted & 0xff);
3116 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
a2fbb9ea 3117
3fcaf2e5
EG
3118 REG_WR(bp, reg_addr, aeu_mask);
3119 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
a2fbb9ea
ET
3120
3121 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
3122 bp->attn_state &= ~deasserted;
3123 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
3124}
3125
3126static void bnx2x_attn_int(struct bnx2x *bp)
3127{
3128 /* read local copy of bits */
68d59484
EG
3129 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
3130 attn_bits);
3131 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
3132 attn_bits_ack);
a2fbb9ea
ET
3133 u32 attn_state = bp->attn_state;
3134
3135 /* look for changed bits */
3136 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
3137 u32 deasserted = ~attn_bits & attn_ack & attn_state;
3138
3139 DP(NETIF_MSG_HW,
3140 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
3141 attn_bits, attn_ack, asserted, deasserted);
3142
3143 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
34f80b04 3144 BNX2X_ERR("BAD attention state\n");
a2fbb9ea
ET
3145
3146 /* handle bits that were raised */
3147 if (asserted)
3148 bnx2x_attn_int_asserted(bp, asserted);
3149
3150 if (deasserted)
3151 bnx2x_attn_int_deasserted(bp, deasserted);
3152}
3153
3154static void bnx2x_sp_task(struct work_struct *work)
3155{
1cf167f2 3156 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
a2fbb9ea
ET
3157 u16 status;
3158
34f80b04 3159
a2fbb9ea
ET
3160 /* Return here if interrupt is disabled */
3161 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3196a88a 3162 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
3163 return;
3164 }
3165
3166 status = bnx2x_update_dsb_idx(bp);
34f80b04
EG
3167/* if (status == 0) */
3168/* BNX2X_ERR("spurious slowpath interrupt!\n"); */
a2fbb9ea 3169
3196a88a 3170 DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
a2fbb9ea 3171
877e9aa4
ET
3172 /* HW attentions */
3173 if (status & 0x1)
a2fbb9ea 3174 bnx2x_attn_int(bp);
a2fbb9ea 3175
68d59484 3176 bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
a2fbb9ea
ET
3177 IGU_INT_NOP, 1);
3178 bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
3179 IGU_INT_NOP, 1);
3180 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
3181 IGU_INT_NOP, 1);
3182 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
3183 IGU_INT_NOP, 1);
3184 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
3185 IGU_INT_ENABLE, 1);
877e9aa4 3186
a2fbb9ea
ET
3187}
3188
3189static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
3190{
3191 struct net_device *dev = dev_instance;
3192 struct bnx2x *bp = netdev_priv(dev);
3193
3194 /* Return here if interrupt is disabled */
3195 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3196a88a 3196 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
3197 return IRQ_HANDLED;
3198 }
3199
8d9c5f34 3200 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0);
a2fbb9ea
ET
3201
3202#ifdef BNX2X_STOP_ON_ERROR
3203 if (unlikely(bp->panic))
3204 return IRQ_HANDLED;
3205#endif
3206
1cf167f2 3207 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
a2fbb9ea
ET
3208
3209 return IRQ_HANDLED;
3210}
3211
3212/* end of slow path */
3213
3214/* Statistics */
3215
3216/****************************************************************************
3217* Macros
3218****************************************************************************/
3219
a2fbb9ea
ET
3220/* sum[hi:lo] += add[hi:lo] */
3221#define ADD_64(s_hi, a_hi, s_lo, a_lo) \
3222 do { \
3223 s_lo += a_lo; \
f5ba6772 3224 s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
a2fbb9ea
ET
3225 } while (0)
3226
3227/* difference = minuend - subtrahend */
3228#define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
3229 do { \
bb2a0f7a
YG
3230 if (m_lo < s_lo) { \
3231 /* underflow */ \
a2fbb9ea 3232 d_hi = m_hi - s_hi; \
bb2a0f7a 3233 if (d_hi > 0) { \
6378c025 3234 /* we can 'loan' 1 */ \
a2fbb9ea
ET
3235 d_hi--; \
3236 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
bb2a0f7a 3237 } else { \
6378c025 3238 /* m_hi <= s_hi */ \
a2fbb9ea
ET
3239 d_hi = 0; \
3240 d_lo = 0; \
3241 } \
bb2a0f7a
YG
3242 } else { \
3243 /* m_lo >= s_lo */ \
a2fbb9ea 3244 if (m_hi < s_hi) { \
bb2a0f7a
YG
3245 d_hi = 0; \
3246 d_lo = 0; \
3247 } else { \
6378c025 3248 /* m_hi >= s_hi */ \
bb2a0f7a
YG
3249 d_hi = m_hi - s_hi; \
3250 d_lo = m_lo - s_lo; \
a2fbb9ea
ET
3251 } \
3252 } \
3253 } while (0)
3254
bb2a0f7a 3255#define UPDATE_STAT64(s, t) \
a2fbb9ea 3256 do { \
bb2a0f7a
YG
3257 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
3258 diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
3259 pstats->mac_stx[0].t##_hi = new->s##_hi; \
3260 pstats->mac_stx[0].t##_lo = new->s##_lo; \
3261 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
3262 pstats->mac_stx[1].t##_lo, diff.lo); \
a2fbb9ea
ET
3263 } while (0)
3264
bb2a0f7a 3265#define UPDATE_STAT64_NIG(s, t) \
a2fbb9ea 3266 do { \
bb2a0f7a
YG
3267 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
3268 diff.lo, new->s##_lo, old->s##_lo); \
3269 ADD_64(estats->t##_hi, diff.hi, \
3270 estats->t##_lo, diff.lo); \
a2fbb9ea
ET
3271 } while (0)
3272
3273/* sum[hi:lo] += add */
3274#define ADD_EXTEND_64(s_hi, s_lo, a) \
3275 do { \
3276 s_lo += a; \
3277 s_hi += (s_lo < a) ? 1 : 0; \
3278 } while (0)
3279
bb2a0f7a 3280#define UPDATE_EXTEND_STAT(s) \
a2fbb9ea 3281 do { \
bb2a0f7a
YG
3282 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
3283 pstats->mac_stx[1].s##_lo, \
3284 new->s); \
a2fbb9ea
ET
3285 } while (0)
3286
bb2a0f7a 3287#define UPDATE_EXTEND_TSTAT(s, t) \
a2fbb9ea 3288 do { \
4781bfad
EG
3289 diff = le32_to_cpu(tclient->s) - le32_to_cpu(old_tclient->s); \
3290 old_tclient->s = tclient->s; \
de832a55
EG
3291 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3292 } while (0)
3293
3294#define UPDATE_EXTEND_USTAT(s, t) \
3295 do { \
3296 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3297 old_uclient->s = uclient->s; \
3298 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
bb2a0f7a
YG
3299 } while (0)
3300
3301#define UPDATE_EXTEND_XSTAT(s, t) \
3302 do { \
4781bfad
EG
3303 diff = le32_to_cpu(xclient->s) - le32_to_cpu(old_xclient->s); \
3304 old_xclient->s = xclient->s; \
de832a55
EG
3305 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3306 } while (0)
3307
3308/* minuend -= subtrahend */
3309#define SUB_64(m_hi, s_hi, m_lo, s_lo) \
3310 do { \
3311 DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
3312 } while (0)
3313
3314/* minuend[hi:lo] -= subtrahend */
3315#define SUB_EXTEND_64(m_hi, m_lo, s) \
3316 do { \
3317 SUB_64(m_hi, 0, m_lo, s); \
3318 } while (0)
3319
3320#define SUB_EXTEND_USTAT(s, t) \
3321 do { \
3322 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3323 SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
a2fbb9ea
ET
3324 } while (0)
3325
3326/*
3327 * General service functions
3328 */
3329
3330static inline long bnx2x_hilo(u32 *hiref)
3331{
3332 u32 lo = *(hiref + 1);
3333#if (BITS_PER_LONG == 64)
3334 u32 hi = *hiref;
3335
3336 return HILO_U64(hi, lo);
3337#else
3338 return lo;
3339#endif
3340}
3341
3342/*
3343 * Init service functions
3344 */
3345
bb2a0f7a
YG
3346static void bnx2x_storm_stats_post(struct bnx2x *bp)
3347{
3348 if (!bp->stats_pending) {
3349 struct eth_query_ramrod_data ramrod_data = {0};
de832a55 3350 int i, rc;
bb2a0f7a
YG
3351
3352 ramrod_data.drv_counter = bp->stats_counter++;
8d9c5f34 3353 ramrod_data.collect_port = bp->port.pmf ? 1 : 0;
de832a55
EG
3354 for_each_queue(bp, i)
3355 ramrod_data.ctr_id_vector |= (1 << bp->fp[i].cl_id);
bb2a0f7a
YG
3356
3357 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3358 ((u32 *)&ramrod_data)[1],
3359 ((u32 *)&ramrod_data)[0], 0);
3360 if (rc == 0) {
3361 /* stats ramrod has it's own slot on the spq */
3362 bp->spq_left++;
3363 bp->stats_pending = 1;
3364 }
3365 }
3366}
3367
bb2a0f7a
YG
3368static void bnx2x_hw_stats_post(struct bnx2x *bp)
3369{
3370 struct dmae_command *dmae = &bp->stats_dmae;
3371 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3372
3373 *stats_comp = DMAE_COMP_VAL;
de832a55
EG
3374 if (CHIP_REV_IS_SLOW(bp))
3375 return;
bb2a0f7a
YG
3376
3377 /* loader */
3378 if (bp->executer_idx) {
3379 int loader_idx = PMF_DMAE_C(bp);
3380
3381 memset(dmae, 0, sizeof(struct dmae_command));
3382
3383 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3384 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3385 DMAE_CMD_DST_RESET |
3386#ifdef __BIG_ENDIAN
3387 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3388#else
3389 DMAE_CMD_ENDIANITY_DW_SWAP |
3390#endif
3391 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3392 DMAE_CMD_PORT_0) |
3393 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3394 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3395 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3396 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3397 sizeof(struct dmae_command) *
3398 (loader_idx + 1)) >> 2;
3399 dmae->dst_addr_hi = 0;
3400 dmae->len = sizeof(struct dmae_command) >> 2;
3401 if (CHIP_IS_E1(bp))
3402 dmae->len--;
3403 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3404 dmae->comp_addr_hi = 0;
3405 dmae->comp_val = 1;
3406
3407 *stats_comp = 0;
3408 bnx2x_post_dmae(bp, dmae, loader_idx);
3409
3410 } else if (bp->func_stx) {
3411 *stats_comp = 0;
3412 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3413 }
3414}
3415
3416static int bnx2x_stats_comp(struct bnx2x *bp)
3417{
3418 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3419 int cnt = 10;
3420
3421 might_sleep();
3422 while (*stats_comp != DMAE_COMP_VAL) {
bb2a0f7a
YG
3423 if (!cnt) {
3424 BNX2X_ERR("timeout waiting for stats finished\n");
3425 break;
3426 }
3427 cnt--;
12469401 3428 msleep(1);
bb2a0f7a
YG
3429 }
3430 return 1;
3431}
3432
3433/*
3434 * Statistics service functions
3435 */
3436
3437static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3438{
3439 struct dmae_command *dmae;
3440 u32 opcode;
3441 int loader_idx = PMF_DMAE_C(bp);
3442 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3443
3444 /* sanity */
3445 if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3446 BNX2X_ERR("BUG!\n");
3447 return;
3448 }
3449
3450 bp->executer_idx = 0;
3451
3452 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3453 DMAE_CMD_C_ENABLE |
3454 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3455#ifdef __BIG_ENDIAN
3456 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3457#else
3458 DMAE_CMD_ENDIANITY_DW_SWAP |
3459#endif
3460 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3461 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3462
3463 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3464 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3465 dmae->src_addr_lo = bp->port.port_stx >> 2;
3466 dmae->src_addr_hi = 0;
3467 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3468 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3469 dmae->len = DMAE_LEN32_RD_MAX;
3470 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3471 dmae->comp_addr_hi = 0;
3472 dmae->comp_val = 1;
3473
3474 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3475 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3476 dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3477 dmae->src_addr_hi = 0;
7a9b2557
VZ
3478 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3479 DMAE_LEN32_RD_MAX * 4);
3480 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3481 DMAE_LEN32_RD_MAX * 4);
bb2a0f7a
YG
3482 dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3483 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3484 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3485 dmae->comp_val = DMAE_COMP_VAL;
3486
3487 *stats_comp = 0;
3488 bnx2x_hw_stats_post(bp);
3489 bnx2x_stats_comp(bp);
3490}
3491
3492static void bnx2x_port_stats_init(struct bnx2x *bp)
a2fbb9ea
ET
3493{
3494 struct dmae_command *dmae;
34f80b04 3495 int port = BP_PORT(bp);
bb2a0f7a 3496 int vn = BP_E1HVN(bp);
a2fbb9ea 3497 u32 opcode;
bb2a0f7a 3498 int loader_idx = PMF_DMAE_C(bp);
a2fbb9ea 3499 u32 mac_addr;
bb2a0f7a
YG
3500 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3501
3502 /* sanity */
3503 if (!bp->link_vars.link_up || !bp->port.pmf) {
3504 BNX2X_ERR("BUG!\n");
3505 return;
3506 }
a2fbb9ea
ET
3507
3508 bp->executer_idx = 0;
bb2a0f7a
YG
3509
3510 /* MCP */
3511 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3512 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3513 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 3514#ifdef __BIG_ENDIAN
bb2a0f7a 3515 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 3516#else
bb2a0f7a 3517 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 3518#endif
bb2a0f7a
YG
3519 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3520 (vn << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea 3521
bb2a0f7a 3522 if (bp->port.port_stx) {
a2fbb9ea
ET
3523
3524 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3525 dmae->opcode = opcode;
bb2a0f7a
YG
3526 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3527 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3528 dmae->dst_addr_lo = bp->port.port_stx >> 2;
a2fbb9ea 3529 dmae->dst_addr_hi = 0;
bb2a0f7a
YG
3530 dmae->len = sizeof(struct host_port_stats) >> 2;
3531 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3532 dmae->comp_addr_hi = 0;
3533 dmae->comp_val = 1;
a2fbb9ea
ET
3534 }
3535
bb2a0f7a
YG
3536 if (bp->func_stx) {
3537
3538 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3539 dmae->opcode = opcode;
3540 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3541 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3542 dmae->dst_addr_lo = bp->func_stx >> 2;
3543 dmae->dst_addr_hi = 0;
3544 dmae->len = sizeof(struct host_func_stats) >> 2;
3545 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3546 dmae->comp_addr_hi = 0;
3547 dmae->comp_val = 1;
a2fbb9ea
ET
3548 }
3549
bb2a0f7a 3550 /* MAC */
a2fbb9ea
ET
3551 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3552 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3553 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3554#ifdef __BIG_ENDIAN
3555 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3556#else
3557 DMAE_CMD_ENDIANITY_DW_SWAP |
3558#endif
bb2a0f7a
YG
3559 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3560 (vn << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea 3561
c18487ee 3562 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
a2fbb9ea
ET
3563
3564 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3565 NIG_REG_INGRESS_BMAC0_MEM);
3566
3567 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3568 BIGMAC_REGISTER_TX_STAT_GTBYT */
3569 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3570 dmae->opcode = opcode;
3571 dmae->src_addr_lo = (mac_addr +
3572 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3573 dmae->src_addr_hi = 0;
3574 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3575 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3576 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3577 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3578 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3579 dmae->comp_addr_hi = 0;
3580 dmae->comp_val = 1;
3581
3582 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3583 BIGMAC_REGISTER_RX_STAT_GRIPJ */
3584 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3585 dmae->opcode = opcode;
3586 dmae->src_addr_lo = (mac_addr +
3587 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3588 dmae->src_addr_hi = 0;
3589 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3590 offsetof(struct bmac_stats, rx_stat_gr64_lo));
a2fbb9ea 3591 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3592 offsetof(struct bmac_stats, rx_stat_gr64_lo));
a2fbb9ea
ET
3593 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3594 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3595 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3596 dmae->comp_addr_hi = 0;
3597 dmae->comp_val = 1;
3598
c18487ee 3599 } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
a2fbb9ea
ET
3600
3601 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3602
3603 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3604 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3605 dmae->opcode = opcode;
3606 dmae->src_addr_lo = (mac_addr +
3607 EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3608 dmae->src_addr_hi = 0;
3609 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3610 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3611 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3612 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3613 dmae->comp_addr_hi = 0;
3614 dmae->comp_val = 1;
3615
3616 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3617 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3618 dmae->opcode = opcode;
3619 dmae->src_addr_lo = (mac_addr +
3620 EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3621 dmae->src_addr_hi = 0;
3622 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3623 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
a2fbb9ea 3624 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3625 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
a2fbb9ea
ET
3626 dmae->len = 1;
3627 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3628 dmae->comp_addr_hi = 0;
3629 dmae->comp_val = 1;
3630
3631 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3632 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3633 dmae->opcode = opcode;
3634 dmae->src_addr_lo = (mac_addr +
3635 EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3636 dmae->src_addr_hi = 0;
3637 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3638 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
a2fbb9ea 3639 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3640 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
a2fbb9ea
ET
3641 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3642 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3643 dmae->comp_addr_hi = 0;
3644 dmae->comp_val = 1;
3645 }
3646
3647 /* NIG */
bb2a0f7a
YG
3648 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3649 dmae->opcode = opcode;
3650 dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3651 NIG_REG_STAT0_BRB_DISCARD) >> 2;
3652 dmae->src_addr_hi = 0;
3653 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3654 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3655 dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3656 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3657 dmae->comp_addr_hi = 0;
3658 dmae->comp_val = 1;
3659
3660 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3661 dmae->opcode = opcode;
3662 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3663 NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3664 dmae->src_addr_hi = 0;
3665 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3666 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3667 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3668 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3669 dmae->len = (2*sizeof(u32)) >> 2;
3670 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3671 dmae->comp_addr_hi = 0;
3672 dmae->comp_val = 1;
3673
a2fbb9ea
ET
3674 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3675 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3676 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3677 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3678#ifdef __BIG_ENDIAN
3679 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3680#else
3681 DMAE_CMD_ENDIANITY_DW_SWAP |
3682#endif
bb2a0f7a
YG
3683 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3684 (vn << DMAE_CMD_E1HVN_SHIFT));
3685 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3686 NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
a2fbb9ea 3687 dmae->src_addr_hi = 0;
bb2a0f7a
YG
3688 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3689 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3690 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3691 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3692 dmae->len = (2*sizeof(u32)) >> 2;
3693 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3694 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3695 dmae->comp_val = DMAE_COMP_VAL;
3696
3697 *stats_comp = 0;
a2fbb9ea
ET
3698}
3699
bb2a0f7a 3700static void bnx2x_func_stats_init(struct bnx2x *bp)
a2fbb9ea 3701{
bb2a0f7a
YG
3702 struct dmae_command *dmae = &bp->stats_dmae;
3703 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 3704
bb2a0f7a
YG
3705 /* sanity */
3706 if (!bp->func_stx) {
3707 BNX2X_ERR("BUG!\n");
3708 return;
3709 }
a2fbb9ea 3710
bb2a0f7a
YG
3711 bp->executer_idx = 0;
3712 memset(dmae, 0, sizeof(struct dmae_command));
a2fbb9ea 3713
bb2a0f7a
YG
3714 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3715 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3716 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3717#ifdef __BIG_ENDIAN
3718 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3719#else
3720 DMAE_CMD_ENDIANITY_DW_SWAP |
3721#endif
3722 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3723 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3724 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3725 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3726 dmae->dst_addr_lo = bp->func_stx >> 2;
3727 dmae->dst_addr_hi = 0;
3728 dmae->len = sizeof(struct host_func_stats) >> 2;
3729 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3730 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3731 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 3732
bb2a0f7a
YG
3733 *stats_comp = 0;
3734}
a2fbb9ea 3735
bb2a0f7a
YG
3736static void bnx2x_stats_start(struct bnx2x *bp)
3737{
3738 if (bp->port.pmf)
3739 bnx2x_port_stats_init(bp);
3740
3741 else if (bp->func_stx)
3742 bnx2x_func_stats_init(bp);
3743
3744 bnx2x_hw_stats_post(bp);
3745 bnx2x_storm_stats_post(bp);
3746}
3747
3748static void bnx2x_stats_pmf_start(struct bnx2x *bp)
3749{
3750 bnx2x_stats_comp(bp);
3751 bnx2x_stats_pmf_update(bp);
3752 bnx2x_stats_start(bp);
3753}
3754
3755static void bnx2x_stats_restart(struct bnx2x *bp)
3756{
3757 bnx2x_stats_comp(bp);
3758 bnx2x_stats_start(bp);
3759}
3760
3761static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3762{
3763 struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3764 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
de832a55 3765 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4781bfad
EG
3766 struct {
3767 u32 lo;
3768 u32 hi;
3769 } diff;
bb2a0f7a
YG
3770
3771 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3772 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3773 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3774 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3775 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3776 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
66e855f3 3777 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
bb2a0f7a 3778 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
de832a55 3779 UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf);
bb2a0f7a
YG
3780 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3781 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3782 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3783 UPDATE_STAT64(tx_stat_gt127,
3784 tx_stat_etherstatspkts65octetsto127octets);
3785 UPDATE_STAT64(tx_stat_gt255,
3786 tx_stat_etherstatspkts128octetsto255octets);
3787 UPDATE_STAT64(tx_stat_gt511,
3788 tx_stat_etherstatspkts256octetsto511octets);
3789 UPDATE_STAT64(tx_stat_gt1023,
3790 tx_stat_etherstatspkts512octetsto1023octets);
3791 UPDATE_STAT64(tx_stat_gt1518,
3792 tx_stat_etherstatspkts1024octetsto1522octets);
3793 UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3794 UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3795 UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3796 UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3797 UPDATE_STAT64(tx_stat_gterr,
3798 tx_stat_dot3statsinternalmactransmiterrors);
3799 UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
de832a55
EG
3800
3801 estats->pause_frames_received_hi =
3802 pstats->mac_stx[1].rx_stat_bmac_xpf_hi;
3803 estats->pause_frames_received_lo =
3804 pstats->mac_stx[1].rx_stat_bmac_xpf_lo;
3805
3806 estats->pause_frames_sent_hi =
3807 pstats->mac_stx[1].tx_stat_outxoffsent_hi;
3808 estats->pause_frames_sent_lo =
3809 pstats->mac_stx[1].tx_stat_outxoffsent_lo;
bb2a0f7a
YG
3810}
3811
3812static void bnx2x_emac_stats_update(struct bnx2x *bp)
3813{
3814 struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3815 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
de832a55 3816 struct bnx2x_eth_stats *estats = &bp->eth_stats;
bb2a0f7a
YG
3817
3818 UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3819 UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3820 UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3821 UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3822 UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3823 UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3824 UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3825 UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3826 UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3827 UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3828 UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3829 UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3830 UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3831 UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3832 UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3833 UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3834 UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3835 UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3836 UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3837 UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3838 UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3839 UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3840 UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3841 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3842 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3843 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3844 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3845 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3846 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3847 UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3848 UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
de832a55
EG
3849
3850 estats->pause_frames_received_hi =
3851 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
3852 estats->pause_frames_received_lo =
3853 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
3854 ADD_64(estats->pause_frames_received_hi,
3855 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
3856 estats->pause_frames_received_lo,
3857 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
3858
3859 estats->pause_frames_sent_hi =
3860 pstats->mac_stx[1].tx_stat_outxonsent_hi;
3861 estats->pause_frames_sent_lo =
3862 pstats->mac_stx[1].tx_stat_outxonsent_lo;
3863 ADD_64(estats->pause_frames_sent_hi,
3864 pstats->mac_stx[1].tx_stat_outxoffsent_hi,
3865 estats->pause_frames_sent_lo,
3866 pstats->mac_stx[1].tx_stat_outxoffsent_lo);
bb2a0f7a
YG
3867}
3868
3869static int bnx2x_hw_stats_update(struct bnx2x *bp)
3870{
3871 struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3872 struct nig_stats *old = &(bp->port.old_nig_stats);
3873 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3874 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4781bfad
EG
3875 struct {
3876 u32 lo;
3877 u32 hi;
3878 } diff;
de832a55 3879 u32 nig_timer_max;
bb2a0f7a
YG
3880
3881 if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3882 bnx2x_bmac_stats_update(bp);
3883
3884 else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3885 bnx2x_emac_stats_update(bp);
3886
3887 else { /* unreached */
c3eefaf6 3888 BNX2X_ERR("stats updated by DMAE but no MAC active\n");
bb2a0f7a
YG
3889 return -1;
3890 }
a2fbb9ea 3891
bb2a0f7a
YG
3892 ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3893 new->brb_discard - old->brb_discard);
66e855f3
YG
3894 ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3895 new->brb_truncate - old->brb_truncate);
a2fbb9ea 3896
bb2a0f7a
YG
3897 UPDATE_STAT64_NIG(egress_mac_pkt0,
3898 etherstatspkts1024octetsto1522octets);
3899 UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
a2fbb9ea 3900
bb2a0f7a 3901 memcpy(old, new, sizeof(struct nig_stats));
a2fbb9ea 3902
bb2a0f7a
YG
3903 memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3904 sizeof(struct mac_stx));
3905 estats->brb_drop_hi = pstats->brb_drop_hi;
3906 estats->brb_drop_lo = pstats->brb_drop_lo;
a2fbb9ea 3907
bb2a0f7a 3908 pstats->host_port_stats_start = ++pstats->host_port_stats_end;
a2fbb9ea 3909
de832a55
EG
3910 nig_timer_max = SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
3911 if (nig_timer_max != estats->nig_timer_max) {
3912 estats->nig_timer_max = nig_timer_max;
3913 BNX2X_ERR("NIG timer max (%u)\n", estats->nig_timer_max);
3914 }
3915
bb2a0f7a 3916 return 0;
a2fbb9ea
ET
3917}
3918
bb2a0f7a 3919static int bnx2x_storm_stats_update(struct bnx2x *bp)
a2fbb9ea
ET
3920{
3921 struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
bb2a0f7a 3922 struct tstorm_per_port_stats *tport =
de832a55 3923 &stats->tstorm_common.port_statistics;
bb2a0f7a
YG
3924 struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
3925 struct bnx2x_eth_stats *estats = &bp->eth_stats;
de832a55
EG
3926 int i;
3927
6fe49bb9
EG
3928 memcpy(&(fstats->total_bytes_received_hi),
3929 &(bnx2x_sp(bp, func_stats_base)->total_bytes_received_hi),
de832a55
EG
3930 sizeof(struct host_func_stats) - 2*sizeof(u32));
3931 estats->error_bytes_received_hi = 0;
3932 estats->error_bytes_received_lo = 0;
3933 estats->etherstatsoverrsizepkts_hi = 0;
3934 estats->etherstatsoverrsizepkts_lo = 0;
3935 estats->no_buff_discard_hi = 0;
3936 estats->no_buff_discard_lo = 0;
a2fbb9ea 3937
ca00392c 3938 for_each_rx_queue(bp, i) {
de832a55
EG
3939 struct bnx2x_fastpath *fp = &bp->fp[i];
3940 int cl_id = fp->cl_id;
3941 struct tstorm_per_client_stats *tclient =
3942 &stats->tstorm_common.client_statistics[cl_id];
3943 struct tstorm_per_client_stats *old_tclient = &fp->old_tclient;
3944 struct ustorm_per_client_stats *uclient =
3945 &stats->ustorm_common.client_statistics[cl_id];
3946 struct ustorm_per_client_stats *old_uclient = &fp->old_uclient;
3947 struct xstorm_per_client_stats *xclient =
3948 &stats->xstorm_common.client_statistics[cl_id];
3949 struct xstorm_per_client_stats *old_xclient = &fp->old_xclient;
3950 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
3951 u32 diff;
3952
3953 /* are storm stats valid? */
3954 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
bb2a0f7a 3955 bp->stats_counter) {
de832a55
EG
3956 DP(BNX2X_MSG_STATS, "[%d] stats not updated by xstorm"
3957 " xstorm counter (%d) != stats_counter (%d)\n",
3958 i, xclient->stats_counter, bp->stats_counter);
3959 return -1;
3960 }
3961 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
bb2a0f7a 3962 bp->stats_counter) {
de832a55
EG
3963 DP(BNX2X_MSG_STATS, "[%d] stats not updated by tstorm"
3964 " tstorm counter (%d) != stats_counter (%d)\n",
3965 i, tclient->stats_counter, bp->stats_counter);
3966 return -2;
3967 }
3968 if ((u16)(le16_to_cpu(uclient->stats_counter) + 1) !=
3969 bp->stats_counter) {
3970 DP(BNX2X_MSG_STATS, "[%d] stats not updated by ustorm"
3971 " ustorm counter (%d) != stats_counter (%d)\n",
3972 i, uclient->stats_counter, bp->stats_counter);
3973 return -4;
3974 }
a2fbb9ea 3975
de832a55 3976 qstats->total_bytes_received_hi =
ca00392c 3977 le32_to_cpu(tclient->rcv_broadcast_bytes.hi);
de832a55 3978 qstats->total_bytes_received_lo =
ca00392c
EG
3979 le32_to_cpu(tclient->rcv_broadcast_bytes.lo);
3980
3981 ADD_64(qstats->total_bytes_received_hi,
3982 le32_to_cpu(tclient->rcv_multicast_bytes.hi),
3983 qstats->total_bytes_received_lo,
3984 le32_to_cpu(tclient->rcv_multicast_bytes.lo));
3985
3986 ADD_64(qstats->total_bytes_received_hi,
3987 le32_to_cpu(tclient->rcv_unicast_bytes.hi),
3988 qstats->total_bytes_received_lo,
3989 le32_to_cpu(tclient->rcv_unicast_bytes.lo));
3990
3991 qstats->valid_bytes_received_hi =
3992 qstats->total_bytes_received_hi;
de832a55 3993 qstats->valid_bytes_received_lo =
ca00392c 3994 qstats->total_bytes_received_lo;
bb2a0f7a 3995
de832a55 3996 qstats->error_bytes_received_hi =
bb2a0f7a 3997 le32_to_cpu(tclient->rcv_error_bytes.hi);
de832a55 3998 qstats->error_bytes_received_lo =
bb2a0f7a 3999 le32_to_cpu(tclient->rcv_error_bytes.lo);
bb2a0f7a 4000
de832a55
EG
4001 ADD_64(qstats->total_bytes_received_hi,
4002 qstats->error_bytes_received_hi,
4003 qstats->total_bytes_received_lo,
4004 qstats->error_bytes_received_lo);
4005
4006 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts,
4007 total_unicast_packets_received);
4008 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
4009 total_multicast_packets_received);
4010 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
4011 total_broadcast_packets_received);
4012 UPDATE_EXTEND_TSTAT(packets_too_big_discard,
4013 etherstatsoverrsizepkts);
4014 UPDATE_EXTEND_TSTAT(no_buff_discard, no_buff_discard);
4015
4016 SUB_EXTEND_USTAT(ucast_no_buff_pkts,
4017 total_unicast_packets_received);
4018 SUB_EXTEND_USTAT(mcast_no_buff_pkts,
4019 total_multicast_packets_received);
4020 SUB_EXTEND_USTAT(bcast_no_buff_pkts,
4021 total_broadcast_packets_received);
4022 UPDATE_EXTEND_USTAT(ucast_no_buff_pkts, no_buff_discard);
4023 UPDATE_EXTEND_USTAT(mcast_no_buff_pkts, no_buff_discard);
4024 UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard);
4025
4026 qstats->total_bytes_transmitted_hi =
ca00392c 4027 le32_to_cpu(xclient->unicast_bytes_sent.hi);
de832a55 4028 qstats->total_bytes_transmitted_lo =
ca00392c
EG
4029 le32_to_cpu(xclient->unicast_bytes_sent.lo);
4030
4031 ADD_64(qstats->total_bytes_transmitted_hi,
4032 le32_to_cpu(xclient->multicast_bytes_sent.hi),
4033 qstats->total_bytes_transmitted_lo,
4034 le32_to_cpu(xclient->multicast_bytes_sent.lo));
4035
4036 ADD_64(qstats->total_bytes_transmitted_hi,
4037 le32_to_cpu(xclient->broadcast_bytes_sent.hi),
4038 qstats->total_bytes_transmitted_lo,
4039 le32_to_cpu(xclient->broadcast_bytes_sent.lo));
bb2a0f7a 4040
de832a55
EG
4041 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
4042 total_unicast_packets_transmitted);
4043 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
4044 total_multicast_packets_transmitted);
4045 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
4046 total_broadcast_packets_transmitted);
4047
4048 old_tclient->checksum_discard = tclient->checksum_discard;
4049 old_tclient->ttl0_discard = tclient->ttl0_discard;
4050
4051 ADD_64(fstats->total_bytes_received_hi,
4052 qstats->total_bytes_received_hi,
4053 fstats->total_bytes_received_lo,
4054 qstats->total_bytes_received_lo);
4055 ADD_64(fstats->total_bytes_transmitted_hi,
4056 qstats->total_bytes_transmitted_hi,
4057 fstats->total_bytes_transmitted_lo,
4058 qstats->total_bytes_transmitted_lo);
4059 ADD_64(fstats->total_unicast_packets_received_hi,
4060 qstats->total_unicast_packets_received_hi,
4061 fstats->total_unicast_packets_received_lo,
4062 qstats->total_unicast_packets_received_lo);
4063 ADD_64(fstats->total_multicast_packets_received_hi,
4064 qstats->total_multicast_packets_received_hi,
4065 fstats->total_multicast_packets_received_lo,
4066 qstats->total_multicast_packets_received_lo);
4067 ADD_64(fstats->total_broadcast_packets_received_hi,
4068 qstats->total_broadcast_packets_received_hi,
4069 fstats->total_broadcast_packets_received_lo,
4070 qstats->total_broadcast_packets_received_lo);
4071 ADD_64(fstats->total_unicast_packets_transmitted_hi,
4072 qstats->total_unicast_packets_transmitted_hi,
4073 fstats->total_unicast_packets_transmitted_lo,
4074 qstats->total_unicast_packets_transmitted_lo);
4075 ADD_64(fstats->total_multicast_packets_transmitted_hi,
4076 qstats->total_multicast_packets_transmitted_hi,
4077 fstats->total_multicast_packets_transmitted_lo,
4078 qstats->total_multicast_packets_transmitted_lo);
4079 ADD_64(fstats->total_broadcast_packets_transmitted_hi,
4080 qstats->total_broadcast_packets_transmitted_hi,
4081 fstats->total_broadcast_packets_transmitted_lo,
4082 qstats->total_broadcast_packets_transmitted_lo);
4083 ADD_64(fstats->valid_bytes_received_hi,
4084 qstats->valid_bytes_received_hi,
4085 fstats->valid_bytes_received_lo,
4086 qstats->valid_bytes_received_lo);
4087
4088 ADD_64(estats->error_bytes_received_hi,
4089 qstats->error_bytes_received_hi,
4090 estats->error_bytes_received_lo,
4091 qstats->error_bytes_received_lo);
4092 ADD_64(estats->etherstatsoverrsizepkts_hi,
4093 qstats->etherstatsoverrsizepkts_hi,
4094 estats->etherstatsoverrsizepkts_lo,
4095 qstats->etherstatsoverrsizepkts_lo);
4096 ADD_64(estats->no_buff_discard_hi, qstats->no_buff_discard_hi,
4097 estats->no_buff_discard_lo, qstats->no_buff_discard_lo);
4098 }
4099
4100 ADD_64(fstats->total_bytes_received_hi,
4101 estats->rx_stat_ifhcinbadoctets_hi,
4102 fstats->total_bytes_received_lo,
4103 estats->rx_stat_ifhcinbadoctets_lo);
bb2a0f7a
YG
4104
4105 memcpy(estats, &(fstats->total_bytes_received_hi),
4106 sizeof(struct host_func_stats) - 2*sizeof(u32));
4107
de832a55
EG
4108 ADD_64(estats->etherstatsoverrsizepkts_hi,
4109 estats->rx_stat_dot3statsframestoolong_hi,
4110 estats->etherstatsoverrsizepkts_lo,
4111 estats->rx_stat_dot3statsframestoolong_lo);
4112 ADD_64(estats->error_bytes_received_hi,
4113 estats->rx_stat_ifhcinbadoctets_hi,
4114 estats->error_bytes_received_lo,
4115 estats->rx_stat_ifhcinbadoctets_lo);
4116
4117 if (bp->port.pmf) {
4118 estats->mac_filter_discard =
4119 le32_to_cpu(tport->mac_filter_discard);
4120 estats->xxoverflow_discard =
4121 le32_to_cpu(tport->xxoverflow_discard);
4122 estats->brb_truncate_discard =
bb2a0f7a 4123 le32_to_cpu(tport->brb_truncate_discard);
de832a55
EG
4124 estats->mac_discard = le32_to_cpu(tport->mac_discard);
4125 }
bb2a0f7a
YG
4126
4127 fstats->host_func_stats_start = ++fstats->host_func_stats_end;
a2fbb9ea 4128
de832a55
EG
4129 bp->stats_pending = 0;
4130
a2fbb9ea
ET
4131 return 0;
4132}
4133
bb2a0f7a 4134static void bnx2x_net_stats_update(struct bnx2x *bp)
a2fbb9ea 4135{
bb2a0f7a 4136 struct bnx2x_eth_stats *estats = &bp->eth_stats;
a2fbb9ea 4137 struct net_device_stats *nstats = &bp->dev->stats;
de832a55 4138 int i;
a2fbb9ea
ET
4139
4140 nstats->rx_packets =
4141 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
4142 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
4143 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
4144
4145 nstats->tx_packets =
4146 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
4147 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
4148 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
4149
de832a55 4150 nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
a2fbb9ea 4151
0e39e645 4152 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
a2fbb9ea 4153
de832a55 4154 nstats->rx_dropped = estats->mac_discard;
ca00392c 4155 for_each_rx_queue(bp, i)
de832a55
EG
4156 nstats->rx_dropped +=
4157 le32_to_cpu(bp->fp[i].old_tclient.checksum_discard);
4158
a2fbb9ea
ET
4159 nstats->tx_dropped = 0;
4160
4161 nstats->multicast =
de832a55 4162 bnx2x_hilo(&estats->total_multicast_packets_received_hi);
a2fbb9ea 4163
bb2a0f7a 4164 nstats->collisions =
de832a55 4165 bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi);
bb2a0f7a
YG
4166
4167 nstats->rx_length_errors =
de832a55
EG
4168 bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) +
4169 bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi);
4170 nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) +
4171 bnx2x_hilo(&estats->brb_truncate_hi);
4172 nstats->rx_crc_errors =
4173 bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi);
4174 nstats->rx_frame_errors =
4175 bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi);
4176 nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi);
a2fbb9ea
ET
4177 nstats->rx_missed_errors = estats->xxoverflow_discard;
4178
4179 nstats->rx_errors = nstats->rx_length_errors +
4180 nstats->rx_over_errors +
4181 nstats->rx_crc_errors +
4182 nstats->rx_frame_errors +
0e39e645
ET
4183 nstats->rx_fifo_errors +
4184 nstats->rx_missed_errors;
a2fbb9ea 4185
bb2a0f7a 4186 nstats->tx_aborted_errors =
de832a55
EG
4187 bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) +
4188 bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi);
4189 nstats->tx_carrier_errors =
4190 bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi);
a2fbb9ea
ET
4191 nstats->tx_fifo_errors = 0;
4192 nstats->tx_heartbeat_errors = 0;
4193 nstats->tx_window_errors = 0;
4194
4195 nstats->tx_errors = nstats->tx_aborted_errors +
de832a55
EG
4196 nstats->tx_carrier_errors +
4197 bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi);
4198}
4199
4200static void bnx2x_drv_stats_update(struct bnx2x *bp)
4201{
4202 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4203 int i;
4204
4205 estats->driver_xoff = 0;
4206 estats->rx_err_discard_pkt = 0;
4207 estats->rx_skb_alloc_failed = 0;
4208 estats->hw_csum_err = 0;
ca00392c 4209 for_each_rx_queue(bp, i) {
de832a55
EG
4210 struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats;
4211
4212 estats->driver_xoff += qstats->driver_xoff;
4213 estats->rx_err_discard_pkt += qstats->rx_err_discard_pkt;
4214 estats->rx_skb_alloc_failed += qstats->rx_skb_alloc_failed;
4215 estats->hw_csum_err += qstats->hw_csum_err;
4216 }
a2fbb9ea
ET
4217}
4218
bb2a0f7a 4219static void bnx2x_stats_update(struct bnx2x *bp)
a2fbb9ea 4220{
bb2a0f7a 4221 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 4222
bb2a0f7a
YG
4223 if (*stats_comp != DMAE_COMP_VAL)
4224 return;
4225
4226 if (bp->port.pmf)
de832a55 4227 bnx2x_hw_stats_update(bp);
a2fbb9ea 4228
de832a55
EG
4229 if (bnx2x_storm_stats_update(bp) && (bp->stats_pending++ == 3)) {
4230 BNX2X_ERR("storm stats were not updated for 3 times\n");
4231 bnx2x_panic();
4232 return;
a2fbb9ea
ET
4233 }
4234
de832a55
EG
4235 bnx2x_net_stats_update(bp);
4236 bnx2x_drv_stats_update(bp);
4237
a2fbb9ea 4238 if (bp->msglevel & NETIF_MSG_TIMER) {
ca00392c
EG
4239 struct bnx2x_fastpath *fp0_rx = bp->fp;
4240 struct bnx2x_fastpath *fp0_tx = &(bp->fp[bp->num_rx_queues]);
de832a55
EG
4241 struct tstorm_per_client_stats *old_tclient =
4242 &bp->fp->old_tclient;
4243 struct bnx2x_eth_q_stats *qstats = &bp->fp->eth_q_stats;
bb2a0f7a 4244 struct bnx2x_eth_stats *estats = &bp->eth_stats;
a2fbb9ea 4245 struct net_device_stats *nstats = &bp->dev->stats;
34f80b04 4246 int i;
a2fbb9ea
ET
4247
4248 printk(KERN_DEBUG "%s:\n", bp->dev->name);
4249 printk(KERN_DEBUG " tx avail (%4x) tx hc idx (%x)"
4250 " tx pkt (%lx)\n",
ca00392c
EG
4251 bnx2x_tx_avail(fp0_tx),
4252 le16_to_cpu(*fp0_tx->tx_cons_sb), nstats->tx_packets);
a2fbb9ea
ET
4253 printk(KERN_DEBUG " rx usage (%4x) rx hc idx (%x)"
4254 " rx pkt (%lx)\n",
ca00392c
EG
4255 (u16)(le16_to_cpu(*fp0_rx->rx_cons_sb) -
4256 fp0_rx->rx_comp_cons),
4257 le16_to_cpu(*fp0_rx->rx_cons_sb), nstats->rx_packets);
de832a55
EG
4258 printk(KERN_DEBUG " %s (Xoff events %u) brb drops %u "
4259 "brb truncate %u\n",
4260 (netif_queue_stopped(bp->dev) ? "Xoff" : "Xon"),
4261 qstats->driver_xoff,
4262 estats->brb_drop_lo, estats->brb_truncate_lo);
a2fbb9ea 4263 printk(KERN_DEBUG "tstats: checksum_discard %u "
de832a55 4264 "packets_too_big_discard %lu no_buff_discard %lu "
a2fbb9ea
ET
4265 "mac_discard %u mac_filter_discard %u "
4266 "xxovrflow_discard %u brb_truncate_discard %u "
4267 "ttl0_discard %u\n",
4781bfad 4268 le32_to_cpu(old_tclient->checksum_discard),
de832a55
EG
4269 bnx2x_hilo(&qstats->etherstatsoverrsizepkts_hi),
4270 bnx2x_hilo(&qstats->no_buff_discard_hi),
4271 estats->mac_discard, estats->mac_filter_discard,
4272 estats->xxoverflow_discard, estats->brb_truncate_discard,
4781bfad 4273 le32_to_cpu(old_tclient->ttl0_discard));
a2fbb9ea
ET
4274
4275 for_each_queue(bp, i) {
4276 printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
4277 bnx2x_fp(bp, i, tx_pkt),
4278 bnx2x_fp(bp, i, rx_pkt),
4279 bnx2x_fp(bp, i, rx_calls));
4280 }
4281 }
4282
bb2a0f7a
YG
4283 bnx2x_hw_stats_post(bp);
4284 bnx2x_storm_stats_post(bp);
4285}
a2fbb9ea 4286
bb2a0f7a
YG
4287static void bnx2x_port_stats_stop(struct bnx2x *bp)
4288{
4289 struct dmae_command *dmae;
4290 u32 opcode;
4291 int loader_idx = PMF_DMAE_C(bp);
4292 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 4293
bb2a0f7a 4294 bp->executer_idx = 0;
a2fbb9ea 4295
bb2a0f7a
YG
4296 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4297 DMAE_CMD_C_ENABLE |
4298 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 4299#ifdef __BIG_ENDIAN
bb2a0f7a 4300 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 4301#else
bb2a0f7a 4302 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 4303#endif
bb2a0f7a
YG
4304 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4305 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4306
4307 if (bp->port.port_stx) {
4308
4309 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4310 if (bp->func_stx)
4311 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
4312 else
4313 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4314 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4315 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4316 dmae->dst_addr_lo = bp->port.port_stx >> 2;
a2fbb9ea 4317 dmae->dst_addr_hi = 0;
bb2a0f7a
YG
4318 dmae->len = sizeof(struct host_port_stats) >> 2;
4319 if (bp->func_stx) {
4320 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4321 dmae->comp_addr_hi = 0;
4322 dmae->comp_val = 1;
4323 } else {
4324 dmae->comp_addr_lo =
4325 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4326 dmae->comp_addr_hi =
4327 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4328 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 4329
bb2a0f7a
YG
4330 *stats_comp = 0;
4331 }
a2fbb9ea
ET
4332 }
4333
bb2a0f7a
YG
4334 if (bp->func_stx) {
4335
4336 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4337 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4338 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
4339 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
4340 dmae->dst_addr_lo = bp->func_stx >> 2;
4341 dmae->dst_addr_hi = 0;
4342 dmae->len = sizeof(struct host_func_stats) >> 2;
4343 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4344 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4345 dmae->comp_val = DMAE_COMP_VAL;
4346
4347 *stats_comp = 0;
a2fbb9ea 4348 }
bb2a0f7a
YG
4349}
4350
4351static void bnx2x_stats_stop(struct bnx2x *bp)
4352{
4353 int update = 0;
4354
4355 bnx2x_stats_comp(bp);
4356
4357 if (bp->port.pmf)
4358 update = (bnx2x_hw_stats_update(bp) == 0);
4359
4360 update |= (bnx2x_storm_stats_update(bp) == 0);
4361
4362 if (update) {
4363 bnx2x_net_stats_update(bp);
a2fbb9ea 4364
bb2a0f7a
YG
4365 if (bp->port.pmf)
4366 bnx2x_port_stats_stop(bp);
4367
4368 bnx2x_hw_stats_post(bp);
4369 bnx2x_stats_comp(bp);
a2fbb9ea
ET
4370 }
4371}
4372
bb2a0f7a
YG
4373static void bnx2x_stats_do_nothing(struct bnx2x *bp)
4374{
4375}
4376
4377static const struct {
4378 void (*action)(struct bnx2x *bp);
4379 enum bnx2x_stats_state next_state;
4380} bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
4381/* state event */
4382{
4383/* DISABLED PMF */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
4384/* LINK_UP */ {bnx2x_stats_start, STATS_STATE_ENABLED},
4385/* UPDATE */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
4386/* STOP */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
4387},
4388{
4389/* ENABLED PMF */ {bnx2x_stats_pmf_start, STATS_STATE_ENABLED},
4390/* LINK_UP */ {bnx2x_stats_restart, STATS_STATE_ENABLED},
4391/* UPDATE */ {bnx2x_stats_update, STATS_STATE_ENABLED},
4392/* STOP */ {bnx2x_stats_stop, STATS_STATE_DISABLED}
4393}
4394};
4395
4396static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
4397{
4398 enum bnx2x_stats_state state = bp->stats_state;
4399
4400 bnx2x_stats_stm[state][event].action(bp);
4401 bp->stats_state = bnx2x_stats_stm[state][event].next_state;
4402
4403 if ((event != STATS_EVENT_UPDATE) || (bp->msglevel & NETIF_MSG_TIMER))
4404 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
4405 state, event, bp->stats_state);
4406}
4407
6fe49bb9
EG
4408static void bnx2x_port_stats_base_init(struct bnx2x *bp)
4409{
4410 struct dmae_command *dmae;
4411 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4412
4413 /* sanity */
4414 if (!bp->port.pmf || !bp->port.port_stx) {
4415 BNX2X_ERR("BUG!\n");
4416 return;
4417 }
4418
4419 bp->executer_idx = 0;
4420
4421 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4422 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4423 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4424 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4425#ifdef __BIG_ENDIAN
4426 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4427#else
4428 DMAE_CMD_ENDIANITY_DW_SWAP |
4429#endif
4430 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4431 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4432 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4433 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4434 dmae->dst_addr_lo = bp->port.port_stx >> 2;
4435 dmae->dst_addr_hi = 0;
4436 dmae->len = sizeof(struct host_port_stats) >> 2;
4437 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4438 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4439 dmae->comp_val = DMAE_COMP_VAL;
4440
4441 *stats_comp = 0;
4442 bnx2x_hw_stats_post(bp);
4443 bnx2x_stats_comp(bp);
4444}
4445
4446static void bnx2x_func_stats_base_init(struct bnx2x *bp)
4447{
4448 int vn, vn_max = IS_E1HMF(bp) ? E1HVN_MAX : E1VN_MAX;
4449 int port = BP_PORT(bp);
4450 int func;
4451 u32 func_stx;
4452
4453 /* sanity */
4454 if (!bp->port.pmf || !bp->func_stx) {
4455 BNX2X_ERR("BUG!\n");
4456 return;
4457 }
4458
4459 /* save our func_stx */
4460 func_stx = bp->func_stx;
4461
4462 for (vn = VN_0; vn < vn_max; vn++) {
4463 func = 2*vn + port;
4464
4465 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
4466 bnx2x_func_stats_init(bp);
4467 bnx2x_hw_stats_post(bp);
4468 bnx2x_stats_comp(bp);
4469 }
4470
4471 /* restore our func_stx */
4472 bp->func_stx = func_stx;
4473}
4474
4475static void bnx2x_func_stats_base_update(struct bnx2x *bp)
4476{
4477 struct dmae_command *dmae = &bp->stats_dmae;
4478 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4479
4480 /* sanity */
4481 if (!bp->func_stx) {
4482 BNX2X_ERR("BUG!\n");
4483 return;
4484 }
4485
4486 bp->executer_idx = 0;
4487 memset(dmae, 0, sizeof(struct dmae_command));
4488
4489 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
4490 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4491 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4492#ifdef __BIG_ENDIAN
4493 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4494#else
4495 DMAE_CMD_ENDIANITY_DW_SWAP |
4496#endif
4497 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4498 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4499 dmae->src_addr_lo = bp->func_stx >> 2;
4500 dmae->src_addr_hi = 0;
4501 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats_base));
4502 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats_base));
4503 dmae->len = sizeof(struct host_func_stats) >> 2;
4504 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4505 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4506 dmae->comp_val = DMAE_COMP_VAL;
4507
4508 *stats_comp = 0;
4509 bnx2x_hw_stats_post(bp);
4510 bnx2x_stats_comp(bp);
4511}
4512
4513static void bnx2x_stats_init(struct bnx2x *bp)
4514{
4515 int port = BP_PORT(bp);
4516 int func = BP_FUNC(bp);
4517 int i;
4518
4519 bp->stats_pending = 0;
4520 bp->executer_idx = 0;
4521 bp->stats_counter = 0;
4522
4523 /* port and func stats for management */
4524 if (!BP_NOMCP(bp)) {
4525 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
4526 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
4527
4528 } else {
4529 bp->port.port_stx = 0;
4530 bp->func_stx = 0;
4531 }
4532 DP(BNX2X_MSG_STATS, "port_stx 0x%x func_stx 0x%x\n",
4533 bp->port.port_stx, bp->func_stx);
4534
4535 /* port stats */
4536 memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
4537 bp->port.old_nig_stats.brb_discard =
4538 REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
4539 bp->port.old_nig_stats.brb_truncate =
4540 REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
4541 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
4542 &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
4543 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
4544 &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
4545
4546 /* function stats */
4547 for_each_queue(bp, i) {
4548 struct bnx2x_fastpath *fp = &bp->fp[i];
4549
4550 memset(&fp->old_tclient, 0,
4551 sizeof(struct tstorm_per_client_stats));
4552 memset(&fp->old_uclient, 0,
4553 sizeof(struct ustorm_per_client_stats));
4554 memset(&fp->old_xclient, 0,
4555 sizeof(struct xstorm_per_client_stats));
4556 memset(&fp->eth_q_stats, 0, sizeof(struct bnx2x_eth_q_stats));
4557 }
4558
4559 memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
4560 memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
4561
4562 bp->stats_state = STATS_STATE_DISABLED;
4563
4564 if (bp->port.pmf) {
4565 if (bp->port.port_stx)
4566 bnx2x_port_stats_base_init(bp);
4567
4568 if (bp->func_stx)
4569 bnx2x_func_stats_base_init(bp);
4570
4571 } else if (bp->func_stx)
4572 bnx2x_func_stats_base_update(bp);
4573}
4574
a2fbb9ea
ET
4575static void bnx2x_timer(unsigned long data)
4576{
4577 struct bnx2x *bp = (struct bnx2x *) data;
4578
4579 if (!netif_running(bp->dev))
4580 return;
4581
4582 if (atomic_read(&bp->intr_sem) != 0)
f1410647 4583 goto timer_restart;
a2fbb9ea
ET
4584
4585 if (poll) {
4586 struct bnx2x_fastpath *fp = &bp->fp[0];
4587 int rc;
4588
7961f791 4589 bnx2x_tx_int(fp);
a2fbb9ea
ET
4590 rc = bnx2x_rx_int(fp, 1000);
4591 }
4592
34f80b04
EG
4593 if (!BP_NOMCP(bp)) {
4594 int func = BP_FUNC(bp);
a2fbb9ea
ET
4595 u32 drv_pulse;
4596 u32 mcp_pulse;
4597
4598 ++bp->fw_drv_pulse_wr_seq;
4599 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
4600 /* TBD - add SYSTEM_TIME */
4601 drv_pulse = bp->fw_drv_pulse_wr_seq;
34f80b04 4602 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
a2fbb9ea 4603
34f80b04 4604 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
a2fbb9ea
ET
4605 MCP_PULSE_SEQ_MASK);
4606 /* The delta between driver pulse and mcp response
4607 * should be 1 (before mcp response) or 0 (after mcp response)
4608 */
4609 if ((drv_pulse != mcp_pulse) &&
4610 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
4611 /* someone lost a heartbeat... */
4612 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
4613 drv_pulse, mcp_pulse);
4614 }
4615 }
4616
bb2a0f7a
YG
4617 if ((bp->state == BNX2X_STATE_OPEN) ||
4618 (bp->state == BNX2X_STATE_DISABLED))
4619 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
a2fbb9ea 4620
f1410647 4621timer_restart:
a2fbb9ea
ET
4622 mod_timer(&bp->timer, jiffies + bp->current_interval);
4623}
4624
4625/* end of Statistics */
4626
4627/* nic init */
4628
4629/*
4630 * nic init service functions
4631 */
4632
34f80b04 4633static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
a2fbb9ea 4634{
34f80b04
EG
4635 int port = BP_PORT(bp);
4636
ca00392c
EG
4637 /* "CSTORM" */
4638 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4639 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), 0,
4640 CSTORM_SB_STATUS_BLOCK_U_SIZE / 4);
4641 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4642 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), 0,
4643 CSTORM_SB_STATUS_BLOCK_C_SIZE / 4);
34f80b04
EG
4644}
4645
5c862848
EG
4646static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
4647 dma_addr_t mapping, int sb_id)
34f80b04
EG
4648{
4649 int port = BP_PORT(bp);
bb2a0f7a 4650 int func = BP_FUNC(bp);
a2fbb9ea 4651 int index;
34f80b04 4652 u64 section;
a2fbb9ea
ET
4653
4654 /* USTORM */
4655 section = ((u64)mapping) + offsetof(struct host_status_block,
4656 u_status_block);
34f80b04 4657 sb->u_status_block.status_block_id = sb_id;
a2fbb9ea 4658
ca00392c
EG
4659 REG_WR(bp, BAR_CSTRORM_INTMEM +
4660 CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id), U64_LO(section));
4661 REG_WR(bp, BAR_CSTRORM_INTMEM +
4662 ((CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id)) + 4),
a2fbb9ea 4663 U64_HI(section));
ca00392c
EG
4664 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_USB_FUNC_OFF +
4665 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), func);
a2fbb9ea
ET
4666
4667 for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
ca00392c
EG
4668 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4669 CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id, index), 1);
a2fbb9ea
ET
4670
4671 /* CSTORM */
4672 section = ((u64)mapping) + offsetof(struct host_status_block,
4673 c_status_block);
34f80b04 4674 sb->c_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4675
4676 REG_WR(bp, BAR_CSTRORM_INTMEM +
ca00392c 4677 CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id), U64_LO(section));
a2fbb9ea 4678 REG_WR(bp, BAR_CSTRORM_INTMEM +
ca00392c 4679 ((CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id)) + 4),
a2fbb9ea 4680 U64_HI(section));
7a9b2557 4681 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
ca00392c 4682 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), func);
a2fbb9ea
ET
4683
4684 for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4685 REG_WR16(bp, BAR_CSTRORM_INTMEM +
ca00392c 4686 CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id, index), 1);
34f80b04
EG
4687
4688 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4689}
4690
4691static void bnx2x_zero_def_sb(struct bnx2x *bp)
4692{
4693 int func = BP_FUNC(bp);
a2fbb9ea 4694
ca00392c 4695 bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY +
490c3c9b
EG
4696 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4697 sizeof(struct tstorm_def_status_block)/4);
ca00392c
EG
4698 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4699 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), 0,
4700 sizeof(struct cstorm_def_status_block_u)/4);
4701 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4702 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), 0,
4703 sizeof(struct cstorm_def_status_block_c)/4);
4704 bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY +
34f80b04
EG
4705 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4706 sizeof(struct xstorm_def_status_block)/4);
a2fbb9ea
ET
4707}
4708
4709static void bnx2x_init_def_sb(struct bnx2x *bp,
4710 struct host_def_status_block *def_sb,
34f80b04 4711 dma_addr_t mapping, int sb_id)
a2fbb9ea 4712{
34f80b04
EG
4713 int port = BP_PORT(bp);
4714 int func = BP_FUNC(bp);
a2fbb9ea
ET
4715 int index, val, reg_offset;
4716 u64 section;
4717
4718 /* ATTN */
4719 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4720 atten_status_block);
34f80b04 4721 def_sb->atten_status_block.status_block_id = sb_id;
a2fbb9ea 4722
49d66772
ET
4723 bp->attn_state = 0;
4724
a2fbb9ea
ET
4725 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4726 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4727
34f80b04 4728 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
a2fbb9ea
ET
4729 bp->attn_group[index].sig[0] = REG_RD(bp,
4730 reg_offset + 0x10*index);
4731 bp->attn_group[index].sig[1] = REG_RD(bp,
4732 reg_offset + 0x4 + 0x10*index);
4733 bp->attn_group[index].sig[2] = REG_RD(bp,
4734 reg_offset + 0x8 + 0x10*index);
4735 bp->attn_group[index].sig[3] = REG_RD(bp,
4736 reg_offset + 0xc + 0x10*index);
4737 }
4738
a2fbb9ea
ET
4739 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4740 HC_REG_ATTN_MSG0_ADDR_L);
4741
4742 REG_WR(bp, reg_offset, U64_LO(section));
4743 REG_WR(bp, reg_offset + 4, U64_HI(section));
4744
4745 reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4746
4747 val = REG_RD(bp, reg_offset);
34f80b04 4748 val |= sb_id;
a2fbb9ea
ET
4749 REG_WR(bp, reg_offset, val);
4750
4751 /* USTORM */
4752 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4753 u_def_status_block);
34f80b04 4754 def_sb->u_def_status_block.status_block_id = sb_id;
a2fbb9ea 4755
ca00392c
EG
4756 REG_WR(bp, BAR_CSTRORM_INTMEM +
4757 CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func), U64_LO(section));
4758 REG_WR(bp, BAR_CSTRORM_INTMEM +
4759 ((CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func)) + 4),
a2fbb9ea 4760 U64_HI(section));
ca00392c
EG
4761 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_USB_FUNC_OFF +
4762 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), func);
a2fbb9ea
ET
4763
4764 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
ca00392c
EG
4765 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4766 CSTORM_DEF_SB_HC_DISABLE_U_OFFSET(func, index), 1);
a2fbb9ea
ET
4767
4768 /* CSTORM */
4769 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4770 c_def_status_block);
34f80b04 4771 def_sb->c_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4772
4773 REG_WR(bp, BAR_CSTRORM_INTMEM +
ca00392c 4774 CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func), U64_LO(section));
a2fbb9ea 4775 REG_WR(bp, BAR_CSTRORM_INTMEM +
ca00392c 4776 ((CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func)) + 4),
a2fbb9ea 4777 U64_HI(section));
5c862848 4778 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
ca00392c 4779 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), func);
a2fbb9ea
ET
4780
4781 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4782 REG_WR16(bp, BAR_CSTRORM_INTMEM +
ca00392c 4783 CSTORM_DEF_SB_HC_DISABLE_C_OFFSET(func, index), 1);
a2fbb9ea
ET
4784
4785 /* TSTORM */
4786 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4787 t_def_status_block);
34f80b04 4788 def_sb->t_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4789
4790 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4791 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4792 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4793 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4794 U64_HI(section));
5c862848 4795 REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
34f80b04 4796 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4797
4798 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4799 REG_WR16(bp, BAR_TSTRORM_INTMEM +
34f80b04 4800 TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
a2fbb9ea
ET
4801
4802 /* XSTORM */
4803 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4804 x_def_status_block);
34f80b04 4805 def_sb->x_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4806
4807 REG_WR(bp, BAR_XSTRORM_INTMEM +
34f80b04 4808 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4809 REG_WR(bp, BAR_XSTRORM_INTMEM +
34f80b04 4810 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4811 U64_HI(section));
5c862848 4812 REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
34f80b04 4813 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4814
4815 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4816 REG_WR16(bp, BAR_XSTRORM_INTMEM +
34f80b04 4817 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
49d66772 4818
bb2a0f7a 4819 bp->stats_pending = 0;
66e855f3 4820 bp->set_mac_pending = 0;
bb2a0f7a 4821
34f80b04 4822 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea
ET
4823}
4824
4825static void bnx2x_update_coalesce(struct bnx2x *bp)
4826{
34f80b04 4827 int port = BP_PORT(bp);
a2fbb9ea
ET
4828 int i;
4829
4830 for_each_queue(bp, i) {
34f80b04 4831 int sb_id = bp->fp[i].sb_id;
a2fbb9ea
ET
4832
4833 /* HC_INDEX_U_ETH_RX_CQ_CONS */
ca00392c
EG
4834 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4835 CSTORM_SB_HC_TIMEOUT_U_OFFSET(port, sb_id,
4836 U_SB_ETH_RX_CQ_INDEX),
34f80b04 4837 bp->rx_ticks/12);
ca00392c
EG
4838 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4839 CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id,
4840 U_SB_ETH_RX_CQ_INDEX),
3799cf47 4841 (bp->rx_ticks/12) ? 0 : 1);
a2fbb9ea
ET
4842
4843 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4844 REG_WR8(bp, BAR_CSTRORM_INTMEM +
ca00392c
EG
4845 CSTORM_SB_HC_TIMEOUT_C_OFFSET(port, sb_id,
4846 C_SB_ETH_TX_CQ_INDEX),
34f80b04 4847 bp->tx_ticks/12);
a2fbb9ea 4848 REG_WR16(bp, BAR_CSTRORM_INTMEM +
ca00392c
EG
4849 CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id,
4850 C_SB_ETH_TX_CQ_INDEX),
3799cf47 4851 (bp->tx_ticks/12) ? 0 : 1);
a2fbb9ea
ET
4852 }
4853}
4854
7a9b2557
VZ
4855static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4856 struct bnx2x_fastpath *fp, int last)
4857{
4858 int i;
4859
4860 for (i = 0; i < last; i++) {
4861 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
4862 struct sk_buff *skb = rx_buf->skb;
4863
4864 if (skb == NULL) {
4865 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
4866 continue;
4867 }
4868
4869 if (fp->tpa_state[i] == BNX2X_TPA_START)
4870 pci_unmap_single(bp->pdev,
4871 pci_unmap_addr(rx_buf, mapping),
356e2385 4872 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
4873
4874 dev_kfree_skb(skb);
4875 rx_buf->skb = NULL;
4876 }
4877}
4878
a2fbb9ea
ET
4879static void bnx2x_init_rx_rings(struct bnx2x *bp)
4880{
7a9b2557 4881 int func = BP_FUNC(bp);
32626230
EG
4882 int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
4883 ETH_MAX_AGGREGATION_QUEUES_E1H;
4884 u16 ring_prod, cqe_ring_prod;
a2fbb9ea 4885 int i, j;
a2fbb9ea 4886
87942b46 4887 bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN;
0f00846d
EG
4888 DP(NETIF_MSG_IFUP,
4889 "mtu %d rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
a2fbb9ea 4890
7a9b2557 4891 if (bp->flags & TPA_ENABLE_FLAG) {
7a9b2557 4892
555f6c78 4893 for_each_rx_queue(bp, j) {
32626230 4894 struct bnx2x_fastpath *fp = &bp->fp[j];
7a9b2557 4895
32626230 4896 for (i = 0; i < max_agg_queues; i++) {
7a9b2557
VZ
4897 fp->tpa_pool[i].skb =
4898 netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4899 if (!fp->tpa_pool[i].skb) {
4900 BNX2X_ERR("Failed to allocate TPA "
4901 "skb pool for queue[%d] - "
4902 "disabling TPA on this "
4903 "queue!\n", j);
4904 bnx2x_free_tpa_pool(bp, fp, i);
4905 fp->disable_tpa = 1;
4906 break;
4907 }
4908 pci_unmap_addr_set((struct sw_rx_bd *)
4909 &bp->fp->tpa_pool[i],
4910 mapping, 0);
4911 fp->tpa_state[i] = BNX2X_TPA_STOP;
4912 }
4913 }
4914 }
4915
555f6c78 4916 for_each_rx_queue(bp, j) {
a2fbb9ea
ET
4917 struct bnx2x_fastpath *fp = &bp->fp[j];
4918
4919 fp->rx_bd_cons = 0;
4920 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
7a9b2557
VZ
4921 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
4922
ca00392c
EG
4923 /* Mark queue as Rx */
4924 fp->is_rx_queue = 1;
4925
7a9b2557
VZ
4926 /* "next page" elements initialization */
4927 /* SGE ring */
4928 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
4929 struct eth_rx_sge *sge;
4930
4931 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
4932 sge->addr_hi =
4933 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
4934 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4935 sge->addr_lo =
4936 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
4937 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4938 }
4939
4940 bnx2x_init_sge_ring_bit_mask(fp);
a2fbb9ea 4941
7a9b2557 4942 /* RX BD ring */
a2fbb9ea
ET
4943 for (i = 1; i <= NUM_RX_RINGS; i++) {
4944 struct eth_rx_bd *rx_bd;
4945
4946 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
4947 rx_bd->addr_hi =
4948 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
34f80b04 4949 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
a2fbb9ea
ET
4950 rx_bd->addr_lo =
4951 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
34f80b04 4952 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
a2fbb9ea
ET
4953 }
4954
34f80b04 4955 /* CQ ring */
a2fbb9ea
ET
4956 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4957 struct eth_rx_cqe_next_page *nextpg;
4958
4959 nextpg = (struct eth_rx_cqe_next_page *)
4960 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4961 nextpg->addr_hi =
4962 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
34f80b04 4963 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
a2fbb9ea
ET
4964 nextpg->addr_lo =
4965 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
34f80b04 4966 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
a2fbb9ea
ET
4967 }
4968
7a9b2557
VZ
4969 /* Allocate SGEs and initialize the ring elements */
4970 for (i = 0, ring_prod = 0;
4971 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
a2fbb9ea 4972
7a9b2557
VZ
4973 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
4974 BNX2X_ERR("was only able to allocate "
4975 "%d rx sges\n", i);
4976 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
4977 /* Cleanup already allocated elements */
4978 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
32626230 4979 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
7a9b2557
VZ
4980 fp->disable_tpa = 1;
4981 ring_prod = 0;
4982 break;
4983 }
4984 ring_prod = NEXT_SGE_IDX(ring_prod);
4985 }
4986 fp->rx_sge_prod = ring_prod;
4987
4988 /* Allocate BDs and initialize BD ring */
66e855f3 4989 fp->rx_comp_cons = 0;
7a9b2557 4990 cqe_ring_prod = ring_prod = 0;
a2fbb9ea
ET
4991 for (i = 0; i < bp->rx_ring_size; i++) {
4992 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
4993 BNX2X_ERR("was only able to allocate "
de832a55
EG
4994 "%d rx skbs on queue[%d]\n", i, j);
4995 fp->eth_q_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
4996 break;
4997 }
4998 ring_prod = NEXT_RX_IDX(ring_prod);
7a9b2557 4999 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
53e5e96e 5000 WARN_ON(ring_prod <= i);
a2fbb9ea
ET
5001 }
5002
7a9b2557
VZ
5003 fp->rx_bd_prod = ring_prod;
5004 /* must not have more available CQEs than BDs */
5005 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
5006 cqe_ring_prod);
a2fbb9ea
ET
5007 fp->rx_pkt = fp->rx_calls = 0;
5008
7a9b2557
VZ
5009 /* Warning!
5010 * this will generate an interrupt (to the TSTORM)
5011 * must only be done after chip is initialized
5012 */
5013 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
5014 fp->rx_sge_prod);
a2fbb9ea
ET
5015 if (j != 0)
5016 continue;
5017
5018 REG_WR(bp, BAR_USTRORM_INTMEM +
7a9b2557 5019 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
a2fbb9ea
ET
5020 U64_LO(fp->rx_comp_mapping));
5021 REG_WR(bp, BAR_USTRORM_INTMEM +
7a9b2557 5022 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
a2fbb9ea
ET
5023 U64_HI(fp->rx_comp_mapping));
5024 }
5025}
5026
5027static void bnx2x_init_tx_ring(struct bnx2x *bp)
5028{
5029 int i, j;
5030
555f6c78 5031 for_each_tx_queue(bp, j) {
a2fbb9ea
ET
5032 struct bnx2x_fastpath *fp = &bp->fp[j];
5033
5034 for (i = 1; i <= NUM_TX_RINGS; i++) {
ca00392c
EG
5035 struct eth_tx_next_bd *tx_next_bd =
5036 &fp->tx_desc_ring[TX_DESC_CNT * i - 1].next_bd;
a2fbb9ea 5037
ca00392c 5038 tx_next_bd->addr_hi =
a2fbb9ea 5039 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
34f80b04 5040 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
ca00392c 5041 tx_next_bd->addr_lo =
a2fbb9ea 5042 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
34f80b04 5043 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
a2fbb9ea
ET
5044 }
5045
ca00392c
EG
5046 fp->tx_db.data.header.header = DOORBELL_HDR_DB_TYPE;
5047 fp->tx_db.data.zero_fill1 = 0;
5048 fp->tx_db.data.prod = 0;
5049
a2fbb9ea
ET
5050 fp->tx_pkt_prod = 0;
5051 fp->tx_pkt_cons = 0;
5052 fp->tx_bd_prod = 0;
5053 fp->tx_bd_cons = 0;
5054 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
5055 fp->tx_pkt = 0;
5056 }
6fe49bb9
EG
5057
5058 /* clean tx statistics */
5059 for_each_rx_queue(bp, i)
5060 bnx2x_fp(bp, i, tx_pkt) = 0;
a2fbb9ea
ET
5061}
5062
5063static void bnx2x_init_sp_ring(struct bnx2x *bp)
5064{
34f80b04 5065 int func = BP_FUNC(bp);
a2fbb9ea
ET
5066
5067 spin_lock_init(&bp->spq_lock);
5068
5069 bp->spq_left = MAX_SPQ_PENDING;
5070 bp->spq_prod_idx = 0;
a2fbb9ea
ET
5071 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
5072 bp->spq_prod_bd = bp->spq;
5073 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
5074
34f80b04 5075 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
a2fbb9ea 5076 U64_LO(bp->spq_mapping));
34f80b04
EG
5077 REG_WR(bp,
5078 XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
a2fbb9ea
ET
5079 U64_HI(bp->spq_mapping));
5080
34f80b04 5081 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
a2fbb9ea
ET
5082 bp->spq_prod_idx);
5083}
5084
5085static void bnx2x_init_context(struct bnx2x *bp)
5086{
5087 int i;
5088
ca00392c 5089 for_each_rx_queue(bp, i) {
a2fbb9ea
ET
5090 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
5091 struct bnx2x_fastpath *fp = &bp->fp[i];
de832a55 5092 u8 cl_id = fp->cl_id;
a2fbb9ea 5093
34f80b04
EG
5094 context->ustorm_st_context.common.sb_index_numbers =
5095 BNX2X_RX_SB_INDEX_NUM;
0626b899 5096 context->ustorm_st_context.common.clientId = cl_id;
ca00392c 5097 context->ustorm_st_context.common.status_block_id = fp->sb_id;
34f80b04 5098 context->ustorm_st_context.common.flags =
de832a55
EG
5099 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT |
5100 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS);
5101 context->ustorm_st_context.common.statistics_counter_id =
5102 cl_id;
8d9c5f34 5103 context->ustorm_st_context.common.mc_alignment_log_size =
0f00846d 5104 BNX2X_RX_ALIGN_SHIFT;
34f80b04 5105 context->ustorm_st_context.common.bd_buff_size =
437cf2f1 5106 bp->rx_buf_size;
34f80b04 5107 context->ustorm_st_context.common.bd_page_base_hi =
a2fbb9ea 5108 U64_HI(fp->rx_desc_mapping);
34f80b04 5109 context->ustorm_st_context.common.bd_page_base_lo =
a2fbb9ea 5110 U64_LO(fp->rx_desc_mapping);
7a9b2557
VZ
5111 if (!fp->disable_tpa) {
5112 context->ustorm_st_context.common.flags |=
ca00392c 5113 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA;
7a9b2557 5114 context->ustorm_st_context.common.sge_buff_size =
8d9c5f34
EG
5115 (u16)min((u32)SGE_PAGE_SIZE*PAGES_PER_SGE,
5116 (u32)0xffff);
7a9b2557
VZ
5117 context->ustorm_st_context.common.sge_page_base_hi =
5118 U64_HI(fp->rx_sge_mapping);
5119 context->ustorm_st_context.common.sge_page_base_lo =
5120 U64_LO(fp->rx_sge_mapping);
ca00392c
EG
5121
5122 context->ustorm_st_context.common.max_sges_for_packet =
5123 SGE_PAGE_ALIGN(bp->dev->mtu) >> SGE_PAGE_SHIFT;
5124 context->ustorm_st_context.common.max_sges_for_packet =
5125 ((context->ustorm_st_context.common.
5126 max_sges_for_packet + PAGES_PER_SGE - 1) &
5127 (~(PAGES_PER_SGE - 1))) >> PAGES_PER_SGE_SHIFT;
7a9b2557
VZ
5128 }
5129
8d9c5f34
EG
5130 context->ustorm_ag_context.cdu_usage =
5131 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
5132 CDU_REGION_NUMBER_UCM_AG,
5133 ETH_CONNECTION_TYPE);
5134
ca00392c
EG
5135 context->xstorm_ag_context.cdu_reserved =
5136 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
5137 CDU_REGION_NUMBER_XCM_AG,
5138 ETH_CONNECTION_TYPE);
5139 }
5140
5141 for_each_tx_queue(bp, i) {
5142 struct bnx2x_fastpath *fp = &bp->fp[i];
5143 struct eth_context *context =
5144 bnx2x_sp(bp, context[i - bp->num_rx_queues].eth);
5145
5146 context->cstorm_st_context.sb_index_number =
5147 C_SB_ETH_TX_CQ_INDEX;
5148 context->cstorm_st_context.status_block_id = fp->sb_id;
5149
8d9c5f34
EG
5150 context->xstorm_st_context.tx_bd_page_base_hi =
5151 U64_HI(fp->tx_desc_mapping);
5152 context->xstorm_st_context.tx_bd_page_base_lo =
5153 U64_LO(fp->tx_desc_mapping);
ca00392c 5154 context->xstorm_st_context.statistics_data = (fp->cl_id |
8d9c5f34 5155 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
a2fbb9ea
ET
5156 }
5157}
5158
5159static void bnx2x_init_ind_table(struct bnx2x *bp)
5160{
26c8fa4d 5161 int func = BP_FUNC(bp);
a2fbb9ea
ET
5162 int i;
5163
555f6c78 5164 if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
a2fbb9ea
ET
5165 return;
5166
555f6c78
EG
5167 DP(NETIF_MSG_IFUP,
5168 "Initializing indirection table multi_mode %d\n", bp->multi_mode);
a2fbb9ea 5169 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
34f80b04 5170 REG_WR8(bp, BAR_TSTRORM_INTMEM +
26c8fa4d 5171 TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
0626b899 5172 bp->fp->cl_id + (i % bp->num_rx_queues));
a2fbb9ea
ET
5173}
5174
49d66772
ET
5175static void bnx2x_set_client_config(struct bnx2x *bp)
5176{
49d66772 5177 struct tstorm_eth_client_config tstorm_client = {0};
34f80b04
EG
5178 int port = BP_PORT(bp);
5179 int i;
49d66772 5180
e7799c5f 5181 tstorm_client.mtu = bp->dev->mtu;
49d66772 5182 tstorm_client.config_flags =
de832a55
EG
5183 (TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE |
5184 TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE);
49d66772 5185#ifdef BCM_VLAN
0c6671b0 5186 if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
49d66772 5187 tstorm_client.config_flags |=
8d9c5f34 5188 TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE;
49d66772
ET
5189 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
5190 }
5191#endif
49d66772
ET
5192
5193 for_each_queue(bp, i) {
de832a55
EG
5194 tstorm_client.statistics_counter_id = bp->fp[i].cl_id;
5195
49d66772 5196 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 5197 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
49d66772
ET
5198 ((u32 *)&tstorm_client)[0]);
5199 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 5200 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
49d66772
ET
5201 ((u32 *)&tstorm_client)[1]);
5202 }
5203
34f80b04
EG
5204 DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
5205 ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
49d66772
ET
5206}
5207
a2fbb9ea
ET
5208static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
5209{
a2fbb9ea 5210 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
34f80b04
EG
5211 int mode = bp->rx_mode;
5212 int mask = (1 << BP_L_ID(bp));
5213 int func = BP_FUNC(bp);
581ce43d 5214 int port = BP_PORT(bp);
a2fbb9ea 5215 int i;
581ce43d
EG
5216 /* All but management unicast packets should pass to the host as well */
5217 u32 llh_mask =
5218 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST |
5219 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST |
5220 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN |
5221 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN;
a2fbb9ea 5222
3196a88a 5223 DP(NETIF_MSG_IFUP, "rx mode %d mask 0x%x\n", mode, mask);
a2fbb9ea
ET
5224
5225 switch (mode) {
5226 case BNX2X_RX_MODE_NONE: /* no Rx */
34f80b04
EG
5227 tstorm_mac_filter.ucast_drop_all = mask;
5228 tstorm_mac_filter.mcast_drop_all = mask;
5229 tstorm_mac_filter.bcast_drop_all = mask;
a2fbb9ea 5230 break;
356e2385 5231
a2fbb9ea 5232 case BNX2X_RX_MODE_NORMAL:
34f80b04 5233 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea 5234 break;
356e2385 5235
a2fbb9ea 5236 case BNX2X_RX_MODE_ALLMULTI:
34f80b04
EG
5237 tstorm_mac_filter.mcast_accept_all = mask;
5238 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea 5239 break;
356e2385 5240
a2fbb9ea 5241 case BNX2X_RX_MODE_PROMISC:
34f80b04
EG
5242 tstorm_mac_filter.ucast_accept_all = mask;
5243 tstorm_mac_filter.mcast_accept_all = mask;
5244 tstorm_mac_filter.bcast_accept_all = mask;
581ce43d
EG
5245 /* pass management unicast packets as well */
5246 llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST;
a2fbb9ea 5247 break;
356e2385 5248
a2fbb9ea 5249 default:
34f80b04
EG
5250 BNX2X_ERR("BAD rx mode (%d)\n", mode);
5251 break;
a2fbb9ea
ET
5252 }
5253
581ce43d
EG
5254 REG_WR(bp,
5255 (port ? NIG_REG_LLH1_BRB1_DRV_MASK : NIG_REG_LLH0_BRB1_DRV_MASK),
5256 llh_mask);
5257
a2fbb9ea
ET
5258 for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
5259 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 5260 TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
a2fbb9ea
ET
5261 ((u32 *)&tstorm_mac_filter)[i]);
5262
34f80b04 5263/* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
a2fbb9ea
ET
5264 ((u32 *)&tstorm_mac_filter)[i]); */
5265 }
a2fbb9ea 5266
49d66772
ET
5267 if (mode != BNX2X_RX_MODE_NONE)
5268 bnx2x_set_client_config(bp);
a2fbb9ea
ET
5269}
5270
471de716
EG
5271static void bnx2x_init_internal_common(struct bnx2x *bp)
5272{
5273 int i;
5274
5275 /* Zero this manually as its initialization is
5276 currently missing in the initTool */
5277 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
5278 REG_WR(bp, BAR_USTRORM_INTMEM +
5279 USTORM_AGG_DATA_OFFSET + i * 4, 0);
5280}
5281
5282static void bnx2x_init_internal_port(struct bnx2x *bp)
5283{
5284 int port = BP_PORT(bp);
5285
ca00392c
EG
5286 REG_WR(bp,
5287 BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_U_OFFSET(port), BNX2X_BTR);
5288 REG_WR(bp,
5289 BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_C_OFFSET(port), BNX2X_BTR);
471de716
EG
5290 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
5291 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
5292}
5293
5294static void bnx2x_init_internal_func(struct bnx2x *bp)
a2fbb9ea 5295{
a2fbb9ea
ET
5296 struct tstorm_eth_function_common_config tstorm_config = {0};
5297 struct stats_indication_flags stats_flags = {0};
34f80b04
EG
5298 int port = BP_PORT(bp);
5299 int func = BP_FUNC(bp);
de832a55
EG
5300 int i, j;
5301 u32 offset;
471de716 5302 u16 max_agg_size;
a2fbb9ea
ET
5303
5304 if (is_multi(bp)) {
555f6c78 5305 tstorm_config.config_flags = MULTI_FLAGS(bp);
a2fbb9ea
ET
5306 tstorm_config.rss_result_mask = MULTI_MASK;
5307 }
ca00392c
EG
5308
5309 /* Enable TPA if needed */
5310 if (bp->flags & TPA_ENABLE_FLAG)
5311 tstorm_config.config_flags |=
5312 TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA;
5313
8d9c5f34
EG
5314 if (IS_E1HMF(bp))
5315 tstorm_config.config_flags |=
5316 TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM;
a2fbb9ea 5317
34f80b04
EG
5318 tstorm_config.leading_client_id = BP_L_ID(bp);
5319
a2fbb9ea 5320 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 5321 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
a2fbb9ea
ET
5322 (*(u32 *)&tstorm_config));
5323
c14423fe 5324 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
a2fbb9ea
ET
5325 bnx2x_set_storm_rx_mode(bp);
5326
de832a55
EG
5327 for_each_queue(bp, i) {
5328 u8 cl_id = bp->fp[i].cl_id;
5329
5330 /* reset xstorm per client statistics */
5331 offset = BAR_XSTRORM_INTMEM +
5332 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5333 for (j = 0;
5334 j < sizeof(struct xstorm_per_client_stats) / 4; j++)
5335 REG_WR(bp, offset + j*4, 0);
5336
5337 /* reset tstorm per client statistics */
5338 offset = BAR_TSTRORM_INTMEM +
5339 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5340 for (j = 0;
5341 j < sizeof(struct tstorm_per_client_stats) / 4; j++)
5342 REG_WR(bp, offset + j*4, 0);
5343
5344 /* reset ustorm per client statistics */
5345 offset = BAR_USTRORM_INTMEM +
5346 USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5347 for (j = 0;
5348 j < sizeof(struct ustorm_per_client_stats) / 4; j++)
5349 REG_WR(bp, offset + j*4, 0);
66e855f3
YG
5350 }
5351
5352 /* Init statistics related context */
34f80b04 5353 stats_flags.collect_eth = 1;
a2fbb9ea 5354
66e855f3 5355 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 5356 ((u32 *)&stats_flags)[0]);
66e855f3 5357 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
5358 ((u32 *)&stats_flags)[1]);
5359
66e855f3 5360 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 5361 ((u32 *)&stats_flags)[0]);
66e855f3 5362 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
5363 ((u32 *)&stats_flags)[1]);
5364
de832a55
EG
5365 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func),
5366 ((u32 *)&stats_flags)[0]);
5367 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func) + 4,
5368 ((u32 *)&stats_flags)[1]);
5369
66e855f3 5370 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 5371 ((u32 *)&stats_flags)[0]);
66e855f3 5372 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
5373 ((u32 *)&stats_flags)[1]);
5374
66e855f3
YG
5375 REG_WR(bp, BAR_XSTRORM_INTMEM +
5376 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5377 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5378 REG_WR(bp, BAR_XSTRORM_INTMEM +
5379 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5380 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5381
5382 REG_WR(bp, BAR_TSTRORM_INTMEM +
5383 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5384 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5385 REG_WR(bp, BAR_TSTRORM_INTMEM +
5386 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5387 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
34f80b04 5388
de832a55
EG
5389 REG_WR(bp, BAR_USTRORM_INTMEM +
5390 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5391 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5392 REG_WR(bp, BAR_USTRORM_INTMEM +
5393 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5394 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5395
34f80b04
EG
5396 if (CHIP_IS_E1H(bp)) {
5397 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
5398 IS_E1HMF(bp));
5399 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
5400 IS_E1HMF(bp));
5401 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
5402 IS_E1HMF(bp));
5403 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
5404 IS_E1HMF(bp));
5405
7a9b2557
VZ
5406 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
5407 bp->e1hov);
34f80b04
EG
5408 }
5409
4f40f2cb
EG
5410 /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
5411 max_agg_size =
5412 min((u32)(min((u32)8, (u32)MAX_SKB_FRAGS) *
5413 SGE_PAGE_SIZE * PAGES_PER_SGE),
5414 (u32)0xffff);
555f6c78 5415 for_each_rx_queue(bp, i) {
7a9b2557 5416 struct bnx2x_fastpath *fp = &bp->fp[i];
7a9b2557
VZ
5417
5418 REG_WR(bp, BAR_USTRORM_INTMEM +
0626b899 5419 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id),
7a9b2557
VZ
5420 U64_LO(fp->rx_comp_mapping));
5421 REG_WR(bp, BAR_USTRORM_INTMEM +
0626b899 5422 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id) + 4,
7a9b2557
VZ
5423 U64_HI(fp->rx_comp_mapping));
5424
ca00392c
EG
5425 /* Next page */
5426 REG_WR(bp, BAR_USTRORM_INTMEM +
5427 USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id),
5428 U64_LO(fp->rx_comp_mapping + BCM_PAGE_SIZE));
5429 REG_WR(bp, BAR_USTRORM_INTMEM +
5430 USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id) + 4,
5431 U64_HI(fp->rx_comp_mapping + BCM_PAGE_SIZE));
5432
7a9b2557 5433 REG_WR16(bp, BAR_USTRORM_INTMEM +
0626b899 5434 USTORM_MAX_AGG_SIZE_OFFSET(port, fp->cl_id),
7a9b2557
VZ
5435 max_agg_size);
5436 }
8a1c38d1 5437
1c06328c
EG
5438 /* dropless flow control */
5439 if (CHIP_IS_E1H(bp)) {
5440 struct ustorm_eth_rx_pause_data_e1h rx_pause = {0};
5441
5442 rx_pause.bd_thr_low = 250;
5443 rx_pause.cqe_thr_low = 250;
5444 rx_pause.cos = 1;
5445 rx_pause.sge_thr_low = 0;
5446 rx_pause.bd_thr_high = 350;
5447 rx_pause.cqe_thr_high = 350;
5448 rx_pause.sge_thr_high = 0;
5449
5450 for_each_rx_queue(bp, i) {
5451 struct bnx2x_fastpath *fp = &bp->fp[i];
5452
5453 if (!fp->disable_tpa) {
5454 rx_pause.sge_thr_low = 150;
5455 rx_pause.sge_thr_high = 250;
5456 }
5457
5458
5459 offset = BAR_USTRORM_INTMEM +
5460 USTORM_ETH_RING_PAUSE_DATA_OFFSET(port,
5461 fp->cl_id);
5462 for (j = 0;
5463 j < sizeof(struct ustorm_eth_rx_pause_data_e1h)/4;
5464 j++)
5465 REG_WR(bp, offset + j*4,
5466 ((u32 *)&rx_pause)[j]);
5467 }
5468 }
5469
8a1c38d1
EG
5470 memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
5471
5472 /* Init rate shaping and fairness contexts */
5473 if (IS_E1HMF(bp)) {
5474 int vn;
5475
5476 /* During init there is no active link
5477 Until link is up, set link rate to 10Gbps */
5478 bp->link_vars.line_speed = SPEED_10000;
5479 bnx2x_init_port_minmax(bp);
5480
5481 bnx2x_calc_vn_weight_sum(bp);
5482
5483 for (vn = VN_0; vn < E1HVN_MAX; vn++)
5484 bnx2x_init_vn_minmax(bp, 2*vn + port);
5485
5486 /* Enable rate shaping and fairness */
5487 bp->cmng.flags.cmng_enables =
5488 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
5489 if (bp->vn_weight_sum)
5490 bp->cmng.flags.cmng_enables |=
5491 CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
5492 else
5493 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
5494 " fairness will be disabled\n");
5495 } else {
5496 /* rate shaping and fairness are disabled */
5497 DP(NETIF_MSG_IFUP,
5498 "single function mode minmax will be disabled\n");
5499 }
5500
5501
5502 /* Store it to internal memory */
5503 if (bp->port.pmf)
5504 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
5505 REG_WR(bp, BAR_XSTRORM_INTMEM +
5506 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
5507 ((u32 *)(&bp->cmng))[i]);
a2fbb9ea
ET
5508}
5509
471de716
EG
5510static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
5511{
5512 switch (load_code) {
5513 case FW_MSG_CODE_DRV_LOAD_COMMON:
5514 bnx2x_init_internal_common(bp);
5515 /* no break */
5516
5517 case FW_MSG_CODE_DRV_LOAD_PORT:
5518 bnx2x_init_internal_port(bp);
5519 /* no break */
5520
5521 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5522 bnx2x_init_internal_func(bp);
5523 break;
5524
5525 default:
5526 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5527 break;
5528 }
5529}
5530
5531static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
a2fbb9ea
ET
5532{
5533 int i;
5534
5535 for_each_queue(bp, i) {
5536 struct bnx2x_fastpath *fp = &bp->fp[i];
5537
34f80b04 5538 fp->bp = bp;
a2fbb9ea 5539 fp->state = BNX2X_FP_STATE_CLOSED;
a2fbb9ea 5540 fp->index = i;
34f80b04
EG
5541 fp->cl_id = BP_L_ID(bp) + i;
5542 fp->sb_id = fp->cl_id;
ca00392c
EG
5543 /* Suitable Rx and Tx SBs are served by the same client */
5544 if (i >= bp->num_rx_queues)
5545 fp->cl_id -= bp->num_rx_queues;
34f80b04 5546 DP(NETIF_MSG_IFUP,
f5372251
EG
5547 "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d sb %d\n",
5548 i, bp, fp->status_blk, fp->cl_id, fp->sb_id);
5c862848 5549 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
0626b899 5550 fp->sb_id);
5c862848 5551 bnx2x_update_fpsb_idx(fp);
a2fbb9ea
ET
5552 }
5553
16119785
EG
5554 /* ensure status block indices were read */
5555 rmb();
5556
5557
5c862848
EG
5558 bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
5559 DEF_SB_ID);
5560 bnx2x_update_dsb_idx(bp);
a2fbb9ea
ET
5561 bnx2x_update_coalesce(bp);
5562 bnx2x_init_rx_rings(bp);
5563 bnx2x_init_tx_ring(bp);
5564 bnx2x_init_sp_ring(bp);
5565 bnx2x_init_context(bp);
471de716 5566 bnx2x_init_internal(bp, load_code);
a2fbb9ea 5567 bnx2x_init_ind_table(bp);
0ef00459
EG
5568 bnx2x_stats_init(bp);
5569
5570 /* At this point, we are ready for interrupts */
5571 atomic_set(&bp->intr_sem, 0);
5572
5573 /* flush all before enabling interrupts */
5574 mb();
5575 mmiowb();
5576
615f8fd9 5577 bnx2x_int_enable(bp);
eb8da205
EG
5578
5579 /* Check for SPIO5 */
5580 bnx2x_attn_int_deasserted0(bp,
5581 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
5582 AEU_INPUTS_ATTN_BITS_SPIO5);
a2fbb9ea
ET
5583}
5584
5585/* end of nic init */
5586
5587/*
5588 * gzip service functions
5589 */
5590
5591static int bnx2x_gunzip_init(struct bnx2x *bp)
5592{
5593 bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
5594 &bp->gunzip_mapping);
5595 if (bp->gunzip_buf == NULL)
5596 goto gunzip_nomem1;
5597
5598 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
5599 if (bp->strm == NULL)
5600 goto gunzip_nomem2;
5601
5602 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
5603 GFP_KERNEL);
5604 if (bp->strm->workspace == NULL)
5605 goto gunzip_nomem3;
5606
5607 return 0;
5608
5609gunzip_nomem3:
5610 kfree(bp->strm);
5611 bp->strm = NULL;
5612
5613gunzip_nomem2:
5614 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5615 bp->gunzip_mapping);
5616 bp->gunzip_buf = NULL;
5617
5618gunzip_nomem1:
5619 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
34f80b04 5620 " un-compression\n", bp->dev->name);
a2fbb9ea
ET
5621 return -ENOMEM;
5622}
5623
5624static void bnx2x_gunzip_end(struct bnx2x *bp)
5625{
5626 kfree(bp->strm->workspace);
5627
5628 kfree(bp->strm);
5629 bp->strm = NULL;
5630
5631 if (bp->gunzip_buf) {
5632 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5633 bp->gunzip_mapping);
5634 bp->gunzip_buf = NULL;
5635 }
5636}
5637
94a78b79 5638static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
a2fbb9ea
ET
5639{
5640 int n, rc;
5641
5642 /* check gzip header */
94a78b79
VZ
5643 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
5644 BNX2X_ERR("Bad gzip header\n");
a2fbb9ea 5645 return -EINVAL;
94a78b79 5646 }
a2fbb9ea
ET
5647
5648 n = 10;
5649
34f80b04 5650#define FNAME 0x8
a2fbb9ea
ET
5651
5652 if (zbuf[3] & FNAME)
5653 while ((zbuf[n++] != 0) && (n < len));
5654
94a78b79 5655 bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
a2fbb9ea
ET
5656 bp->strm->avail_in = len - n;
5657 bp->strm->next_out = bp->gunzip_buf;
5658 bp->strm->avail_out = FW_BUF_SIZE;
5659
5660 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
5661 if (rc != Z_OK)
5662 return rc;
5663
5664 rc = zlib_inflate(bp->strm, Z_FINISH);
5665 if ((rc != Z_OK) && (rc != Z_STREAM_END))
5666 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
5667 bp->dev->name, bp->strm->msg);
5668
5669 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
5670 if (bp->gunzip_outlen & 0x3)
5671 printk(KERN_ERR PFX "%s: Firmware decompression error:"
5672 " gunzip_outlen (%d) not aligned\n",
5673 bp->dev->name, bp->gunzip_outlen);
5674 bp->gunzip_outlen >>= 2;
5675
5676 zlib_inflateEnd(bp->strm);
5677
5678 if (rc == Z_STREAM_END)
5679 return 0;
5680
5681 return rc;
5682}
5683
5684/* nic load/unload */
5685
5686/*
34f80b04 5687 * General service functions
a2fbb9ea
ET
5688 */
5689
5690/* send a NIG loopback debug packet */
5691static void bnx2x_lb_pckt(struct bnx2x *bp)
5692{
a2fbb9ea 5693 u32 wb_write[3];
a2fbb9ea
ET
5694
5695 /* Ethernet source and destination addresses */
a2fbb9ea
ET
5696 wb_write[0] = 0x55555555;
5697 wb_write[1] = 0x55555555;
34f80b04 5698 wb_write[2] = 0x20; /* SOP */
a2fbb9ea 5699 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
5700
5701 /* NON-IP protocol */
a2fbb9ea
ET
5702 wb_write[0] = 0x09000000;
5703 wb_write[1] = 0x55555555;
34f80b04 5704 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
a2fbb9ea 5705 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
5706}
5707
5708/* some of the internal memories
5709 * are not directly readable from the driver
5710 * to test them we send debug packets
5711 */
5712static int bnx2x_int_mem_test(struct bnx2x *bp)
5713{
5714 int factor;
5715 int count, i;
5716 u32 val = 0;
5717
ad8d3948 5718 if (CHIP_REV_IS_FPGA(bp))
a2fbb9ea 5719 factor = 120;
ad8d3948
EG
5720 else if (CHIP_REV_IS_EMUL(bp))
5721 factor = 200;
5722 else
a2fbb9ea 5723 factor = 1;
a2fbb9ea
ET
5724
5725 DP(NETIF_MSG_HW, "start part1\n");
5726
5727 /* Disable inputs of parser neighbor blocks */
5728 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5729 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5730 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3196a88a 5731 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
a2fbb9ea
ET
5732
5733 /* Write 0 to parser credits for CFC search request */
5734 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5735
5736 /* send Ethernet packet */
5737 bnx2x_lb_pckt(bp);
5738
5739 /* TODO do i reset NIG statistic? */
5740 /* Wait until NIG register shows 1 packet of size 0x10 */
5741 count = 1000 * factor;
5742 while (count) {
34f80b04 5743
a2fbb9ea
ET
5744 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5745 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
5746 if (val == 0x10)
5747 break;
5748
5749 msleep(10);
5750 count--;
5751 }
5752 if (val != 0x10) {
5753 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5754 return -1;
5755 }
5756
5757 /* Wait until PRS register shows 1 packet */
5758 count = 1000 * factor;
5759 while (count) {
5760 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
a2fbb9ea
ET
5761 if (val == 1)
5762 break;
5763
5764 msleep(10);
5765 count--;
5766 }
5767 if (val != 0x1) {
5768 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5769 return -2;
5770 }
5771
5772 /* Reset and init BRB, PRS */
34f80b04 5773 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
a2fbb9ea 5774 msleep(50);
34f80b04 5775 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
a2fbb9ea 5776 msleep(50);
94a78b79
VZ
5777 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5778 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
a2fbb9ea
ET
5779
5780 DP(NETIF_MSG_HW, "part2\n");
5781
5782 /* Disable inputs of parser neighbor blocks */
5783 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5784 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5785 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3196a88a 5786 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
a2fbb9ea
ET
5787
5788 /* Write 0 to parser credits for CFC search request */
5789 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5790
5791 /* send 10 Ethernet packets */
5792 for (i = 0; i < 10; i++)
5793 bnx2x_lb_pckt(bp);
5794
5795 /* Wait until NIG register shows 10 + 1
5796 packets of size 11*0x10 = 0xb0 */
5797 count = 1000 * factor;
5798 while (count) {
34f80b04 5799
a2fbb9ea
ET
5800 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5801 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
5802 if (val == 0xb0)
5803 break;
5804
5805 msleep(10);
5806 count--;
5807 }
5808 if (val != 0xb0) {
5809 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5810 return -3;
5811 }
5812
5813 /* Wait until PRS register shows 2 packets */
5814 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5815 if (val != 2)
5816 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5817
5818 /* Write 1 to parser credits for CFC search request */
5819 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5820
5821 /* Wait until PRS register shows 3 packets */
5822 msleep(10 * factor);
5823 /* Wait until NIG register shows 1 packet of size 0x10 */
5824 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5825 if (val != 3)
5826 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5827
5828 /* clear NIG EOP FIFO */
5829 for (i = 0; i < 11; i++)
5830 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5831 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5832 if (val != 1) {
5833 BNX2X_ERR("clear of NIG failed\n");
5834 return -4;
5835 }
5836
5837 /* Reset and init BRB, PRS, NIG */
5838 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5839 msleep(50);
5840 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5841 msleep(50);
94a78b79
VZ
5842 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5843 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
a2fbb9ea
ET
5844#ifndef BCM_ISCSI
5845 /* set NIC mode */
5846 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5847#endif
5848
5849 /* Enable inputs of parser neighbor blocks */
5850 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5851 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5852 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
3196a88a 5853 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
a2fbb9ea
ET
5854
5855 DP(NETIF_MSG_HW, "done\n");
5856
5857 return 0; /* OK */
5858}
5859
5860static void enable_blocks_attention(struct bnx2x *bp)
5861{
5862 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5863 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5864 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5865 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5866 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5867 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5868 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5869 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5870 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
34f80b04
EG
5871/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5872/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5873 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5874 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5875 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
34f80b04
EG
5876/* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5877/* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5878 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5879 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5880 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5881 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
34f80b04
EG
5882/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5883/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5884 if (CHIP_REV_IS_FPGA(bp))
5885 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
5886 else
5887 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
a2fbb9ea
ET
5888 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5889 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5890 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
34f80b04
EG
5891/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5892/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5893 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5894 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
34f80b04
EG
5895/* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5896 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
a2fbb9ea
ET
5897}
5898
34f80b04 5899
81f75bbf
EG
5900static void bnx2x_reset_common(struct bnx2x *bp)
5901{
5902 /* reset_common */
5903 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5904 0xd3ffff7f);
5905 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
5906}
5907
fd4ef40d
EG
5908
5909static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
5910{
5911 u32 val;
5912 u8 port;
5913 u8 is_required = 0;
5914
5915 val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
5916 SHARED_HW_CFG_FAN_FAILURE_MASK;
5917
5918 if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
5919 is_required = 1;
5920
5921 /*
5922 * The fan failure mechanism is usually related to the PHY type since
5923 * the power consumption of the board is affected by the PHY. Currently,
5924 * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
5925 */
5926 else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
5927 for (port = PORT_0; port < PORT_MAX; port++) {
5928 u32 phy_type =
5929 SHMEM_RD(bp, dev_info.port_hw_config[port].
5930 external_phy_config) &
5931 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
5932 is_required |=
5933 ((phy_type ==
5934 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) ||
4d295db0
EG
5935 (phy_type ==
5936 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) ||
fd4ef40d
EG
5937 (phy_type ==
5938 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481));
5939 }
5940
5941 DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
5942
5943 if (is_required == 0)
5944 return;
5945
5946 /* Fan failure is indicated by SPIO 5 */
5947 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
5948 MISC_REGISTERS_SPIO_INPUT_HI_Z);
5949
5950 /* set to active low mode */
5951 val = REG_RD(bp, MISC_REG_SPIO_INT);
5952 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
5953 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
5954 REG_WR(bp, MISC_REG_SPIO_INT, val);
5955
5956 /* enable interrupt to signal the IGU */
5957 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
5958 val |= (1 << MISC_REGISTERS_SPIO_5);
5959 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
5960}
5961
34f80b04 5962static int bnx2x_init_common(struct bnx2x *bp)
a2fbb9ea 5963{
a2fbb9ea 5964 u32 val, i;
a2fbb9ea 5965
34f80b04 5966 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp));
a2fbb9ea 5967
81f75bbf 5968 bnx2x_reset_common(bp);
34f80b04
EG
5969 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
5970 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
a2fbb9ea 5971
94a78b79 5972 bnx2x_init_block(bp, MISC_BLOCK, COMMON_STAGE);
34f80b04
EG
5973 if (CHIP_IS_E1H(bp))
5974 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
a2fbb9ea 5975
34f80b04
EG
5976 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
5977 msleep(30);
5978 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
a2fbb9ea 5979
94a78b79 5980 bnx2x_init_block(bp, PXP_BLOCK, COMMON_STAGE);
34f80b04
EG
5981 if (CHIP_IS_E1(bp)) {
5982 /* enable HW interrupt from PXP on USDM overflow
5983 bit 16 on INT_MASK_0 */
5984 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5985 }
a2fbb9ea 5986
94a78b79 5987 bnx2x_init_block(bp, PXP2_BLOCK, COMMON_STAGE);
34f80b04 5988 bnx2x_init_pxp(bp);
a2fbb9ea
ET
5989
5990#ifdef __BIG_ENDIAN
34f80b04
EG
5991 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
5992 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
5993 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
5994 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
5995 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
8badd27a
EG
5996 /* make sure this value is 0 */
5997 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
34f80b04
EG
5998
5999/* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
6000 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
6001 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
6002 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
6003 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
a2fbb9ea
ET
6004#endif
6005
34f80b04 6006 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
a2fbb9ea 6007#ifdef BCM_ISCSI
34f80b04
EG
6008 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
6009 REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
6010 REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
a2fbb9ea
ET
6011#endif
6012
34f80b04
EG
6013 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
6014 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
a2fbb9ea 6015
34f80b04
EG
6016 /* let the HW do it's magic ... */
6017 msleep(100);
6018 /* finish PXP init */
6019 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
6020 if (val != 1) {
6021 BNX2X_ERR("PXP2 CFG failed\n");
6022 return -EBUSY;
6023 }
6024 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
6025 if (val != 1) {
6026 BNX2X_ERR("PXP2 RD_INIT failed\n");
6027 return -EBUSY;
6028 }
a2fbb9ea 6029
34f80b04
EG
6030 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
6031 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
a2fbb9ea 6032
94a78b79 6033 bnx2x_init_block(bp, DMAE_BLOCK, COMMON_STAGE);
a2fbb9ea 6034
34f80b04
EG
6035 /* clean the DMAE memory */
6036 bp->dmae_ready = 1;
6037 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
a2fbb9ea 6038
94a78b79
VZ
6039 bnx2x_init_block(bp, TCM_BLOCK, COMMON_STAGE);
6040 bnx2x_init_block(bp, UCM_BLOCK, COMMON_STAGE);
6041 bnx2x_init_block(bp, CCM_BLOCK, COMMON_STAGE);
6042 bnx2x_init_block(bp, XCM_BLOCK, COMMON_STAGE);
a2fbb9ea 6043
34f80b04
EG
6044 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
6045 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
6046 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
6047 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
6048
94a78b79 6049 bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE);
34f80b04
EG
6050 /* soft reset pulse */
6051 REG_WR(bp, QM_REG_SOFT_RESET, 1);
6052 REG_WR(bp, QM_REG_SOFT_RESET, 0);
a2fbb9ea
ET
6053
6054#ifdef BCM_ISCSI
94a78b79 6055 bnx2x_init_block(bp, TIMERS_BLOCK, COMMON_STAGE);
a2fbb9ea 6056#endif
a2fbb9ea 6057
94a78b79 6058 bnx2x_init_block(bp, DQ_BLOCK, COMMON_STAGE);
34f80b04
EG
6059 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
6060 if (!CHIP_REV_IS_SLOW(bp)) {
6061 /* enable hw interrupt from doorbell Q */
6062 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
6063 }
a2fbb9ea 6064
94a78b79
VZ
6065 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
6066 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
26c8fa4d 6067 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
3196a88a
EG
6068 /* set NIC mode */
6069 REG_WR(bp, PRS_REG_NIC_MODE, 1);
34f80b04
EG
6070 if (CHIP_IS_E1H(bp))
6071 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
a2fbb9ea 6072
94a78b79
VZ
6073 bnx2x_init_block(bp, TSDM_BLOCK, COMMON_STAGE);
6074 bnx2x_init_block(bp, CSDM_BLOCK, COMMON_STAGE);
6075 bnx2x_init_block(bp, USDM_BLOCK, COMMON_STAGE);
6076 bnx2x_init_block(bp, XSDM_BLOCK, COMMON_STAGE);
a2fbb9ea 6077
ca00392c
EG
6078 bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6079 bnx2x_init_fill(bp, USEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6080 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6081 bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
a2fbb9ea 6082
94a78b79
VZ
6083 bnx2x_init_block(bp, TSEM_BLOCK, COMMON_STAGE);
6084 bnx2x_init_block(bp, USEM_BLOCK, COMMON_STAGE);
6085 bnx2x_init_block(bp, CSEM_BLOCK, COMMON_STAGE);
6086 bnx2x_init_block(bp, XSEM_BLOCK, COMMON_STAGE);
a2fbb9ea 6087
34f80b04
EG
6088 /* sync semi rtc */
6089 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6090 0x80000000);
6091 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
6092 0x80000000);
a2fbb9ea 6093
94a78b79
VZ
6094 bnx2x_init_block(bp, UPB_BLOCK, COMMON_STAGE);
6095 bnx2x_init_block(bp, XPB_BLOCK, COMMON_STAGE);
6096 bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE);
a2fbb9ea 6097
34f80b04
EG
6098 REG_WR(bp, SRC_REG_SOFT_RST, 1);
6099 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
6100 REG_WR(bp, i, 0xc0cac01a);
6101 /* TODO: replace with something meaningful */
6102 }
94a78b79 6103 bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE);
34f80b04 6104 REG_WR(bp, SRC_REG_SOFT_RST, 0);
a2fbb9ea 6105
34f80b04
EG
6106 if (sizeof(union cdu_context) != 1024)
6107 /* we currently assume that a context is 1024 bytes */
6108 printk(KERN_ALERT PFX "please adjust the size of"
6109 " cdu_context(%ld)\n", (long)sizeof(union cdu_context));
a2fbb9ea 6110
94a78b79 6111 bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE);
34f80b04
EG
6112 val = (4 << 24) + (0 << 12) + 1024;
6113 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
a2fbb9ea 6114
94a78b79 6115 bnx2x_init_block(bp, CFC_BLOCK, COMMON_STAGE);
34f80b04 6116 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
8d9c5f34
EG
6117 /* enable context validation interrupt from CFC */
6118 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
6119
6120 /* set the thresholds to prevent CFC/CDU race */
6121 REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
a2fbb9ea 6122
94a78b79
VZ
6123 bnx2x_init_block(bp, HC_BLOCK, COMMON_STAGE);
6124 bnx2x_init_block(bp, MISC_AEU_BLOCK, COMMON_STAGE);
a2fbb9ea 6125
94a78b79 6126 bnx2x_init_block(bp, PXPCS_BLOCK, COMMON_STAGE);
34f80b04
EG
6127 /* Reset PCIE errors for debug */
6128 REG_WR(bp, 0x2814, 0xffffffff);
6129 REG_WR(bp, 0x3820, 0xffffffff);
a2fbb9ea 6130
94a78b79 6131 bnx2x_init_block(bp, EMAC0_BLOCK, COMMON_STAGE);
94a78b79 6132 bnx2x_init_block(bp, EMAC1_BLOCK, COMMON_STAGE);
94a78b79 6133 bnx2x_init_block(bp, DBU_BLOCK, COMMON_STAGE);
94a78b79 6134 bnx2x_init_block(bp, DBG_BLOCK, COMMON_STAGE);
34f80b04 6135
94a78b79 6136 bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE);
34f80b04
EG
6137 if (CHIP_IS_E1H(bp)) {
6138 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
6139 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
6140 }
6141
6142 if (CHIP_REV_IS_SLOW(bp))
6143 msleep(200);
6144
6145 /* finish CFC init */
6146 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
6147 if (val != 1) {
6148 BNX2X_ERR("CFC LL_INIT failed\n");
6149 return -EBUSY;
6150 }
6151 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
6152 if (val != 1) {
6153 BNX2X_ERR("CFC AC_INIT failed\n");
6154 return -EBUSY;
6155 }
6156 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
6157 if (val != 1) {
6158 BNX2X_ERR("CFC CAM_INIT failed\n");
6159 return -EBUSY;
6160 }
6161 REG_WR(bp, CFC_REG_DEBUG0, 0);
f1410647 6162
34f80b04
EG
6163 /* read NIG statistic
6164 to see if this is our first up since powerup */
6165 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
6166 val = *bnx2x_sp(bp, wb_data[0]);
6167
6168 /* do internal memory self test */
6169 if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
6170 BNX2X_ERR("internal mem self test failed\n");
6171 return -EBUSY;
6172 }
6173
35b19ba5 6174 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
46c6a674
EG
6175 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
6176 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
6177 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
4d295db0 6178 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
46c6a674
EG
6179 bp->port.need_hw_lock = 1;
6180 break;
6181
34f80b04
EG
6182 default:
6183 break;
6184 }
f1410647 6185
fd4ef40d
EG
6186 bnx2x_setup_fan_failure_detection(bp);
6187
34f80b04
EG
6188 /* clear PXP2 attentions */
6189 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
a2fbb9ea 6190
34f80b04 6191 enable_blocks_attention(bp);
a2fbb9ea 6192
6bbca910
YR
6193 if (!BP_NOMCP(bp)) {
6194 bnx2x_acquire_phy_lock(bp);
6195 bnx2x_common_init_phy(bp, bp->common.shmem_base);
6196 bnx2x_release_phy_lock(bp);
6197 } else
6198 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
6199
34f80b04
EG
6200 return 0;
6201}
a2fbb9ea 6202
34f80b04
EG
6203static int bnx2x_init_port(struct bnx2x *bp)
6204{
6205 int port = BP_PORT(bp);
94a78b79 6206 int init_stage = port ? PORT1_STAGE : PORT0_STAGE;
1c06328c 6207 u32 low, high;
34f80b04 6208 u32 val;
a2fbb9ea 6209
34f80b04
EG
6210 DP(BNX2X_MSG_MCP, "starting port init port %x\n", port);
6211
6212 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
a2fbb9ea 6213
94a78b79 6214 bnx2x_init_block(bp, PXP_BLOCK, init_stage);
94a78b79 6215 bnx2x_init_block(bp, PXP2_BLOCK, init_stage);
ca00392c
EG
6216
6217 bnx2x_init_block(bp, TCM_BLOCK, init_stage);
6218 bnx2x_init_block(bp, UCM_BLOCK, init_stage);
6219 bnx2x_init_block(bp, CCM_BLOCK, init_stage);
a2fbb9ea
ET
6220#ifdef BCM_ISCSI
6221 /* Port0 1
6222 * Port1 385 */
6223 i++;
6224 wb_write[0] = ONCHIP_ADDR1(bp->timers_mapping);
6225 wb_write[1] = ONCHIP_ADDR2(bp->timers_mapping);
6226 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
6227 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
6228
6229 /* Port0 2
6230 * Port1 386 */
6231 i++;
6232 wb_write[0] = ONCHIP_ADDR1(bp->qm_mapping);
6233 wb_write[1] = ONCHIP_ADDR2(bp->qm_mapping);
6234 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
6235 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
6236
6237 /* Port0 3
6238 * Port1 387 */
6239 i++;
6240 wb_write[0] = ONCHIP_ADDR1(bp->t1_mapping);
6241 wb_write[1] = ONCHIP_ADDR2(bp->t1_mapping);
6242 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
6243 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
6244#endif
94a78b79 6245 bnx2x_init_block(bp, XCM_BLOCK, init_stage);
a2fbb9ea 6246
a2fbb9ea
ET
6247#ifdef BCM_ISCSI
6248 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + func*4, 1024/64*20);
6249 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + func*4, 31);
6250
94a78b79 6251 bnx2x_init_block(bp, TIMERS_BLOCK, init_stage);
a2fbb9ea 6252#endif
94a78b79 6253 bnx2x_init_block(bp, DQ_BLOCK, init_stage);
1c06328c 6254
94a78b79 6255 bnx2x_init_block(bp, BRB1_BLOCK, init_stage);
1c06328c
EG
6256 if (CHIP_REV_IS_SLOW(bp) && !CHIP_IS_E1H(bp)) {
6257 /* no pause for emulation and FPGA */
6258 low = 0;
6259 high = 513;
6260 } else {
6261 if (IS_E1HMF(bp))
6262 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
6263 else if (bp->dev->mtu > 4096) {
6264 if (bp->flags & ONE_PORT_FLAG)
6265 low = 160;
6266 else {
6267 val = bp->dev->mtu;
6268 /* (24*1024 + val*4)/256 */
6269 low = 96 + (val/64) + ((val % 64) ? 1 : 0);
6270 }
6271 } else
6272 low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
6273 high = low + 56; /* 14*1024/256 */
6274 }
6275 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
6276 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
6277
6278
94a78b79 6279 bnx2x_init_block(bp, PRS_BLOCK, init_stage);
ca00392c 6280
94a78b79 6281 bnx2x_init_block(bp, TSDM_BLOCK, init_stage);
94a78b79 6282 bnx2x_init_block(bp, CSDM_BLOCK, init_stage);
94a78b79 6283 bnx2x_init_block(bp, USDM_BLOCK, init_stage);
94a78b79 6284 bnx2x_init_block(bp, XSDM_BLOCK, init_stage);
356e2385 6285
94a78b79
VZ
6286 bnx2x_init_block(bp, TSEM_BLOCK, init_stage);
6287 bnx2x_init_block(bp, USEM_BLOCK, init_stage);
6288 bnx2x_init_block(bp, CSEM_BLOCK, init_stage);
6289 bnx2x_init_block(bp, XSEM_BLOCK, init_stage);
356e2385 6290
94a78b79 6291 bnx2x_init_block(bp, UPB_BLOCK, init_stage);
94a78b79 6292 bnx2x_init_block(bp, XPB_BLOCK, init_stage);
34f80b04 6293
94a78b79 6294 bnx2x_init_block(bp, PBF_BLOCK, init_stage);
a2fbb9ea
ET
6295
6296 /* configure PBF to work without PAUSE mtu 9000 */
34f80b04 6297 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
a2fbb9ea
ET
6298
6299 /* update threshold */
34f80b04 6300 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
a2fbb9ea 6301 /* update init credit */
34f80b04 6302 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
a2fbb9ea
ET
6303
6304 /* probe changes */
34f80b04 6305 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
a2fbb9ea 6306 msleep(5);
34f80b04 6307 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
a2fbb9ea
ET
6308
6309#ifdef BCM_ISCSI
6310 /* tell the searcher where the T2 table is */
6311 REG_WR(bp, SRC_REG_COUNTFREE0 + func*4, 16*1024/64);
6312
6313 wb_write[0] = U64_LO(bp->t2_mapping);
6314 wb_write[1] = U64_HI(bp->t2_mapping);
6315 REG_WR_DMAE(bp, SRC_REG_FIRSTFREE0 + func*4, wb_write, 2);
6316 wb_write[0] = U64_LO((u64)bp->t2_mapping + 16*1024 - 64);
6317 wb_write[1] = U64_HI((u64)bp->t2_mapping + 16*1024 - 64);
6318 REG_WR_DMAE(bp, SRC_REG_LASTFREE0 + func*4, wb_write, 2);
6319
6320 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + func*4, 10);
a2fbb9ea 6321#endif
94a78b79 6322 bnx2x_init_block(bp, CDU_BLOCK, init_stage);
94a78b79 6323 bnx2x_init_block(bp, CFC_BLOCK, init_stage);
34f80b04
EG
6324
6325 if (CHIP_IS_E1(bp)) {
6326 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6327 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6328 }
94a78b79 6329 bnx2x_init_block(bp, HC_BLOCK, init_stage);
34f80b04 6330
94a78b79 6331 bnx2x_init_block(bp, MISC_AEU_BLOCK, init_stage);
34f80b04
EG
6332 /* init aeu_mask_attn_func_0/1:
6333 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
6334 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
6335 * bits 4-7 are used for "per vn group attention" */
6336 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
6337 (IS_E1HMF(bp) ? 0xF7 : 0x7));
6338
94a78b79 6339 bnx2x_init_block(bp, PXPCS_BLOCK, init_stage);
94a78b79 6340 bnx2x_init_block(bp, EMAC0_BLOCK, init_stage);
94a78b79 6341 bnx2x_init_block(bp, EMAC1_BLOCK, init_stage);
94a78b79 6342 bnx2x_init_block(bp, DBU_BLOCK, init_stage);
94a78b79 6343 bnx2x_init_block(bp, DBG_BLOCK, init_stage);
356e2385 6344
94a78b79 6345 bnx2x_init_block(bp, NIG_BLOCK, init_stage);
34f80b04
EG
6346
6347 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
6348
6349 if (CHIP_IS_E1H(bp)) {
34f80b04
EG
6350 /* 0x2 disable e1hov, 0x1 enable */
6351 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
6352 (IS_E1HMF(bp) ? 0x1 : 0x2));
6353
1c06328c
EG
6354 /* support pause requests from USDM, TSDM and BRB */
6355 REG_WR(bp, NIG_REG_LLFC_EGRESS_SRC_ENABLE_0 + port*4, 0x7);
6356
6357 {
6358 REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
6359 REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
6360 REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
6361 }
34f80b04
EG
6362 }
6363
94a78b79 6364 bnx2x_init_block(bp, MCP_BLOCK, init_stage);
94a78b79 6365 bnx2x_init_block(bp, DMAE_BLOCK, init_stage);
a2fbb9ea 6366
35b19ba5 6367 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
589abe3a
EG
6368 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
6369 {
6370 u32 swap_val, swap_override, aeu_gpio_mask, offset;
6371
6372 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
6373 MISC_REGISTERS_GPIO_INPUT_HI_Z, port);
6374
6375 /* The GPIO should be swapped if the swap register is
6376 set and active */
6377 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
6378 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
6379
6380 /* Select function upon port-swap configuration */
6381 if (port == 0) {
6382 offset = MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
6383 aeu_gpio_mask = (swap_val && swap_override) ?
6384 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1 :
6385 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0;
6386 } else {
6387 offset = MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0;
6388 aeu_gpio_mask = (swap_val && swap_override) ?
6389 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 :
6390 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1;
6391 }
6392 val = REG_RD(bp, offset);
6393 /* add GPIO3 to group */
6394 val |= aeu_gpio_mask;
6395 REG_WR(bp, offset, val);
6396 }
6397 break;
6398
35b19ba5 6399 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
4d295db0 6400 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
f1410647 6401 /* add SPIO 5 to group 0 */
4d295db0
EG
6402 {
6403 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
6404 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
6405 val = REG_RD(bp, reg_addr);
f1410647 6406 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
4d295db0
EG
6407 REG_WR(bp, reg_addr, val);
6408 }
f1410647
ET
6409 break;
6410
6411 default:
6412 break;
6413 }
6414
c18487ee 6415 bnx2x__link_reset(bp);
a2fbb9ea 6416
34f80b04
EG
6417 return 0;
6418}
6419
6420#define ILT_PER_FUNC (768/2)
6421#define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
6422/* the phys address is shifted right 12 bits and has an added
6423 1=valid bit added to the 53rd bit
6424 then since this is a wide register(TM)
6425 we split it into two 32 bit writes
6426 */
6427#define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
6428#define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
6429#define PXP_ONE_ILT(x) (((x) << 10) | x)
6430#define PXP_ILT_RANGE(f, l) (((l) << 10) | f)
6431
6432#define CNIC_ILT_LINES 0
6433
6434static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
6435{
6436 int reg;
6437
6438 if (CHIP_IS_E1H(bp))
6439 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
6440 else /* E1 */
6441 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
6442
6443 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
6444}
6445
6446static int bnx2x_init_func(struct bnx2x *bp)
6447{
6448 int port = BP_PORT(bp);
6449 int func = BP_FUNC(bp);
8badd27a 6450 u32 addr, val;
34f80b04
EG
6451 int i;
6452
6453 DP(BNX2X_MSG_MCP, "starting func init func %x\n", func);
6454
8badd27a
EG
6455 /* set MSI reconfigure capability */
6456 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
6457 val = REG_RD(bp, addr);
6458 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
6459 REG_WR(bp, addr, val);
6460
34f80b04
EG
6461 i = FUNC_ILT_BASE(func);
6462
6463 bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
6464 if (CHIP_IS_E1H(bp)) {
6465 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
6466 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
6467 } else /* E1 */
6468 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
6469 PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
6470
6471
6472 if (CHIP_IS_E1H(bp)) {
6473 for (i = 0; i < 9; i++)
6474 bnx2x_init_block(bp,
94a78b79 6475 cm_blocks[i], FUNC0_STAGE + func);
34f80b04
EG
6476
6477 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
6478 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
6479 }
6480
6481 /* HC init per function */
6482 if (CHIP_IS_E1H(bp)) {
6483 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
6484
6485 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6486 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6487 }
94a78b79 6488 bnx2x_init_block(bp, HC_BLOCK, FUNC0_STAGE + func);
34f80b04 6489
c14423fe 6490 /* Reset PCIE errors for debug */
a2fbb9ea
ET
6491 REG_WR(bp, 0x2114, 0xffffffff);
6492 REG_WR(bp, 0x2120, 0xffffffff);
a2fbb9ea 6493
34f80b04
EG
6494 return 0;
6495}
6496
6497static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
6498{
6499 int i, rc = 0;
a2fbb9ea 6500
34f80b04
EG
6501 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
6502 BP_FUNC(bp), load_code);
a2fbb9ea 6503
34f80b04
EG
6504 bp->dmae_ready = 0;
6505 mutex_init(&bp->dmae_mutex);
6506 bnx2x_gunzip_init(bp);
a2fbb9ea 6507
34f80b04
EG
6508 switch (load_code) {
6509 case FW_MSG_CODE_DRV_LOAD_COMMON:
6510 rc = bnx2x_init_common(bp);
6511 if (rc)
6512 goto init_hw_err;
6513 /* no break */
6514
6515 case FW_MSG_CODE_DRV_LOAD_PORT:
6516 bp->dmae_ready = 1;
6517 rc = bnx2x_init_port(bp);
6518 if (rc)
6519 goto init_hw_err;
6520 /* no break */
6521
6522 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
6523 bp->dmae_ready = 1;
6524 rc = bnx2x_init_func(bp);
6525 if (rc)
6526 goto init_hw_err;
6527 break;
6528
6529 default:
6530 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
6531 break;
6532 }
6533
6534 if (!BP_NOMCP(bp)) {
6535 int func = BP_FUNC(bp);
a2fbb9ea
ET
6536
6537 bp->fw_drv_pulse_wr_seq =
34f80b04 6538 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
a2fbb9ea 6539 DRV_PULSE_SEQ_MASK);
6fe49bb9
EG
6540 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
6541 }
a2fbb9ea 6542
34f80b04
EG
6543 /* this needs to be done before gunzip end */
6544 bnx2x_zero_def_sb(bp);
6545 for_each_queue(bp, i)
6546 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
6547
6548init_hw_err:
6549 bnx2x_gunzip_end(bp);
6550
6551 return rc;
a2fbb9ea
ET
6552}
6553
a2fbb9ea
ET
6554static void bnx2x_free_mem(struct bnx2x *bp)
6555{
6556
6557#define BNX2X_PCI_FREE(x, y, size) \
6558 do { \
6559 if (x) { \
6560 pci_free_consistent(bp->pdev, size, x, y); \
6561 x = NULL; \
6562 y = 0; \
6563 } \
6564 } while (0)
6565
6566#define BNX2X_FREE(x) \
6567 do { \
6568 if (x) { \
6569 vfree(x); \
6570 x = NULL; \
6571 } \
6572 } while (0)
6573
6574 int i;
6575
6576 /* fastpath */
555f6c78 6577 /* Common */
a2fbb9ea
ET
6578 for_each_queue(bp, i) {
6579
555f6c78 6580 /* status blocks */
a2fbb9ea
ET
6581 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
6582 bnx2x_fp(bp, i, status_blk_mapping),
ca00392c 6583 sizeof(struct host_status_block));
555f6c78
EG
6584 }
6585 /* Rx */
6586 for_each_rx_queue(bp, i) {
a2fbb9ea 6587
555f6c78 6588 /* fastpath rx rings: rx_buf rx_desc rx_comp */
a2fbb9ea
ET
6589 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
6590 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
6591 bnx2x_fp(bp, i, rx_desc_mapping),
6592 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6593
6594 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
6595 bnx2x_fp(bp, i, rx_comp_mapping),
6596 sizeof(struct eth_fast_path_rx_cqe) *
6597 NUM_RCQ_BD);
a2fbb9ea 6598
7a9b2557 6599 /* SGE ring */
32626230 6600 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
7a9b2557
VZ
6601 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
6602 bnx2x_fp(bp, i, rx_sge_mapping),
6603 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
6604 }
555f6c78
EG
6605 /* Tx */
6606 for_each_tx_queue(bp, i) {
6607
6608 /* fastpath tx rings: tx_buf tx_desc */
6609 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
6610 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
6611 bnx2x_fp(bp, i, tx_desc_mapping),
ca00392c 6612 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
555f6c78 6613 }
a2fbb9ea
ET
6614 /* end of fastpath */
6615
6616 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
34f80b04 6617 sizeof(struct host_def_status_block));
a2fbb9ea
ET
6618
6619 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
34f80b04 6620 sizeof(struct bnx2x_slowpath));
a2fbb9ea
ET
6621
6622#ifdef BCM_ISCSI
6623 BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
6624 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
6625 BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
6626 BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
6627#endif
7a9b2557 6628 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
a2fbb9ea
ET
6629
6630#undef BNX2X_PCI_FREE
6631#undef BNX2X_KFREE
6632}
6633
6634static int bnx2x_alloc_mem(struct bnx2x *bp)
6635{
6636
6637#define BNX2X_PCI_ALLOC(x, y, size) \
6638 do { \
6639 x = pci_alloc_consistent(bp->pdev, size, y); \
6640 if (x == NULL) \
6641 goto alloc_mem_err; \
6642 memset(x, 0, size); \
6643 } while (0)
6644
6645#define BNX2X_ALLOC(x, size) \
6646 do { \
6647 x = vmalloc(size); \
6648 if (x == NULL) \
6649 goto alloc_mem_err; \
6650 memset(x, 0, size); \
6651 } while (0)
6652
6653 int i;
6654
6655 /* fastpath */
555f6c78 6656 /* Common */
a2fbb9ea
ET
6657 for_each_queue(bp, i) {
6658 bnx2x_fp(bp, i, bp) = bp;
6659
555f6c78 6660 /* status blocks */
a2fbb9ea
ET
6661 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
6662 &bnx2x_fp(bp, i, status_blk_mapping),
ca00392c 6663 sizeof(struct host_status_block));
555f6c78
EG
6664 }
6665 /* Rx */
6666 for_each_rx_queue(bp, i) {
a2fbb9ea 6667
555f6c78 6668 /* fastpath rx rings: rx_buf rx_desc rx_comp */
a2fbb9ea
ET
6669 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
6670 sizeof(struct sw_rx_bd) * NUM_RX_BD);
6671 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
6672 &bnx2x_fp(bp, i, rx_desc_mapping),
6673 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6674
6675 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
6676 &bnx2x_fp(bp, i, rx_comp_mapping),
6677 sizeof(struct eth_fast_path_rx_cqe) *
6678 NUM_RCQ_BD);
6679
7a9b2557
VZ
6680 /* SGE ring */
6681 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
6682 sizeof(struct sw_rx_page) * NUM_RX_SGE);
6683 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
6684 &bnx2x_fp(bp, i, rx_sge_mapping),
6685 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
a2fbb9ea 6686 }
555f6c78
EG
6687 /* Tx */
6688 for_each_tx_queue(bp, i) {
6689
555f6c78
EG
6690 /* fastpath tx rings: tx_buf tx_desc */
6691 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
6692 sizeof(struct sw_tx_bd) * NUM_TX_BD);
6693 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
6694 &bnx2x_fp(bp, i, tx_desc_mapping),
ca00392c 6695 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
555f6c78 6696 }
a2fbb9ea
ET
6697 /* end of fastpath */
6698
6699 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
6700 sizeof(struct host_def_status_block));
6701
6702 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
6703 sizeof(struct bnx2x_slowpath));
6704
6705#ifdef BCM_ISCSI
6706 BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
6707
6708 /* Initialize T1 */
6709 for (i = 0; i < 64*1024; i += 64) {
6710 *(u64 *)((char *)bp->t1 + i + 56) = 0x0UL;
6711 *(u64 *)((char *)bp->t1 + i + 3) = 0x0UL;
6712 }
6713
6714 /* allocate searcher T2 table
6715 we allocate 1/4 of alloc num for T2
6716 (which is not entered into the ILT) */
6717 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
6718
6719 /* Initialize T2 */
6720 for (i = 0; i < 16*1024; i += 64)
6721 * (u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
6722
c14423fe 6723 /* now fixup the last line in the block to point to the next block */
a2fbb9ea
ET
6724 *(u64 *)((char *)bp->t2 + 1024*16-8) = bp->t2_mapping;
6725
6726 /* Timer block array (MAX_CONN*8) phys uncached for now 1024 conns */
6727 BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
6728
6729 /* QM queues (128*MAX_CONN) */
6730 BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
6731#endif
6732
6733 /* Slow path ring */
6734 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
6735
6736 return 0;
6737
6738alloc_mem_err:
6739 bnx2x_free_mem(bp);
6740 return -ENOMEM;
6741
6742#undef BNX2X_PCI_ALLOC
6743#undef BNX2X_ALLOC
6744}
6745
6746static void bnx2x_free_tx_skbs(struct bnx2x *bp)
6747{
6748 int i;
6749
555f6c78 6750 for_each_tx_queue(bp, i) {
a2fbb9ea
ET
6751 struct bnx2x_fastpath *fp = &bp->fp[i];
6752
6753 u16 bd_cons = fp->tx_bd_cons;
6754 u16 sw_prod = fp->tx_pkt_prod;
6755 u16 sw_cons = fp->tx_pkt_cons;
6756
a2fbb9ea
ET
6757 while (sw_cons != sw_prod) {
6758 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
6759 sw_cons++;
6760 }
6761 }
6762}
6763
6764static void bnx2x_free_rx_skbs(struct bnx2x *bp)
6765{
6766 int i, j;
6767
555f6c78 6768 for_each_rx_queue(bp, j) {
a2fbb9ea
ET
6769 struct bnx2x_fastpath *fp = &bp->fp[j];
6770
a2fbb9ea
ET
6771 for (i = 0; i < NUM_RX_BD; i++) {
6772 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
6773 struct sk_buff *skb = rx_buf->skb;
6774
6775 if (skb == NULL)
6776 continue;
6777
6778 pci_unmap_single(bp->pdev,
6779 pci_unmap_addr(rx_buf, mapping),
356e2385 6780 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
a2fbb9ea
ET
6781
6782 rx_buf->skb = NULL;
6783 dev_kfree_skb(skb);
6784 }
7a9b2557 6785 if (!fp->disable_tpa)
32626230
EG
6786 bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
6787 ETH_MAX_AGGREGATION_QUEUES_E1 :
7a9b2557 6788 ETH_MAX_AGGREGATION_QUEUES_E1H);
a2fbb9ea
ET
6789 }
6790}
6791
6792static void bnx2x_free_skbs(struct bnx2x *bp)
6793{
6794 bnx2x_free_tx_skbs(bp);
6795 bnx2x_free_rx_skbs(bp);
6796}
6797
6798static void bnx2x_free_msix_irqs(struct bnx2x *bp)
6799{
34f80b04 6800 int i, offset = 1;
a2fbb9ea
ET
6801
6802 free_irq(bp->msix_table[0].vector, bp->dev);
c14423fe 6803 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
a2fbb9ea
ET
6804 bp->msix_table[0].vector);
6805
6806 for_each_queue(bp, i) {
c14423fe 6807 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
34f80b04 6808 "state %x\n", i, bp->msix_table[i + offset].vector,
a2fbb9ea
ET
6809 bnx2x_fp(bp, i, state));
6810
34f80b04 6811 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
a2fbb9ea 6812 }
a2fbb9ea
ET
6813}
6814
6815static void bnx2x_free_irq(struct bnx2x *bp)
6816{
a2fbb9ea 6817 if (bp->flags & USING_MSIX_FLAG) {
a2fbb9ea
ET
6818 bnx2x_free_msix_irqs(bp);
6819 pci_disable_msix(bp->pdev);
a2fbb9ea
ET
6820 bp->flags &= ~USING_MSIX_FLAG;
6821
8badd27a
EG
6822 } else if (bp->flags & USING_MSI_FLAG) {
6823 free_irq(bp->pdev->irq, bp->dev);
6824 pci_disable_msi(bp->pdev);
6825 bp->flags &= ~USING_MSI_FLAG;
6826
a2fbb9ea
ET
6827 } else
6828 free_irq(bp->pdev->irq, bp->dev);
6829}
6830
6831static int bnx2x_enable_msix(struct bnx2x *bp)
6832{
8badd27a
EG
6833 int i, rc, offset = 1;
6834 int igu_vec = 0;
a2fbb9ea 6835
8badd27a
EG
6836 bp->msix_table[0].entry = igu_vec;
6837 DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n", igu_vec);
a2fbb9ea 6838
34f80b04 6839 for_each_queue(bp, i) {
8badd27a 6840 igu_vec = BP_L_ID(bp) + offset + i;
34f80b04
EG
6841 bp->msix_table[i + offset].entry = igu_vec;
6842 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
6843 "(fastpath #%u)\n", i + offset, igu_vec, i);
a2fbb9ea
ET
6844 }
6845
34f80b04 6846 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
555f6c78 6847 BNX2X_NUM_QUEUES(bp) + offset);
34f80b04 6848 if (rc) {
8badd27a
EG
6849 DP(NETIF_MSG_IFUP, "MSI-X is not attainable rc %d\n", rc);
6850 return rc;
34f80b04 6851 }
8badd27a 6852
a2fbb9ea
ET
6853 bp->flags |= USING_MSIX_FLAG;
6854
6855 return 0;
a2fbb9ea
ET
6856}
6857
a2fbb9ea
ET
6858static int bnx2x_req_msix_irqs(struct bnx2x *bp)
6859{
34f80b04 6860 int i, rc, offset = 1;
a2fbb9ea 6861
a2fbb9ea
ET
6862 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
6863 bp->dev->name, bp->dev);
a2fbb9ea
ET
6864 if (rc) {
6865 BNX2X_ERR("request sp irq failed\n");
6866 return -EBUSY;
6867 }
6868
6869 for_each_queue(bp, i) {
555f6c78
EG
6870 struct bnx2x_fastpath *fp = &bp->fp[i];
6871
ca00392c
EG
6872 if (i < bp->num_rx_queues)
6873 sprintf(fp->name, "%s-rx-%d", bp->dev->name, i);
6874 else
6875 sprintf(fp->name, "%s-tx-%d",
6876 bp->dev->name, i - bp->num_rx_queues);
6877
34f80b04 6878 rc = request_irq(bp->msix_table[i + offset].vector,
555f6c78 6879 bnx2x_msix_fp_int, 0, fp->name, fp);
a2fbb9ea 6880 if (rc) {
555f6c78 6881 BNX2X_ERR("request fp #%d irq failed rc %d\n", i, rc);
a2fbb9ea
ET
6882 bnx2x_free_msix_irqs(bp);
6883 return -EBUSY;
6884 }
6885
555f6c78 6886 fp->state = BNX2X_FP_STATE_IRQ;
a2fbb9ea
ET
6887 }
6888
555f6c78 6889 i = BNX2X_NUM_QUEUES(bp);
ca00392c
EG
6890 printk(KERN_INFO PFX "%s: using MSI-X IRQs: sp %d fp[%d] %d"
6891 " ... fp[%d] %d\n",
6892 bp->dev->name, bp->msix_table[0].vector,
6893 0, bp->msix_table[offset].vector,
6894 i - 1, bp->msix_table[offset + i - 1].vector);
555f6c78 6895
a2fbb9ea 6896 return 0;
a2fbb9ea
ET
6897}
6898
8badd27a
EG
6899static int bnx2x_enable_msi(struct bnx2x *bp)
6900{
6901 int rc;
6902
6903 rc = pci_enable_msi(bp->pdev);
6904 if (rc) {
6905 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
6906 return -1;
6907 }
6908 bp->flags |= USING_MSI_FLAG;
6909
6910 return 0;
6911}
6912
a2fbb9ea
ET
6913static int bnx2x_req_irq(struct bnx2x *bp)
6914{
8badd27a 6915 unsigned long flags;
34f80b04 6916 int rc;
a2fbb9ea 6917
8badd27a
EG
6918 if (bp->flags & USING_MSI_FLAG)
6919 flags = 0;
6920 else
6921 flags = IRQF_SHARED;
6922
6923 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
34f80b04 6924 bp->dev->name, bp->dev);
a2fbb9ea
ET
6925 if (!rc)
6926 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
6927
6928 return rc;
a2fbb9ea
ET
6929}
6930
65abd74d
YG
6931static void bnx2x_napi_enable(struct bnx2x *bp)
6932{
6933 int i;
6934
555f6c78 6935 for_each_rx_queue(bp, i)
65abd74d
YG
6936 napi_enable(&bnx2x_fp(bp, i, napi));
6937}
6938
6939static void bnx2x_napi_disable(struct bnx2x *bp)
6940{
6941 int i;
6942
555f6c78 6943 for_each_rx_queue(bp, i)
65abd74d
YG
6944 napi_disable(&bnx2x_fp(bp, i, napi));
6945}
6946
6947static void bnx2x_netif_start(struct bnx2x *bp)
6948{
e1510706
EG
6949 int intr_sem;
6950
6951 intr_sem = atomic_dec_and_test(&bp->intr_sem);
6952 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
6953
6954 if (intr_sem) {
65abd74d 6955 if (netif_running(bp->dev)) {
65abd74d
YG
6956 bnx2x_napi_enable(bp);
6957 bnx2x_int_enable(bp);
555f6c78
EG
6958 if (bp->state == BNX2X_STATE_OPEN)
6959 netif_tx_wake_all_queues(bp->dev);
65abd74d
YG
6960 }
6961 }
6962}
6963
f8ef6e44 6964static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
65abd74d 6965{
f8ef6e44 6966 bnx2x_int_disable_sync(bp, disable_hw);
e94d8af3 6967 bnx2x_napi_disable(bp);
762d5f6c
EG
6968 netif_tx_disable(bp->dev);
6969 bp->dev->trans_start = jiffies; /* prevent tx timeout */
65abd74d
YG
6970}
6971
a2fbb9ea
ET
6972/*
6973 * Init service functions
6974 */
6975
3101c2bc 6976static void bnx2x_set_mac_addr_e1(struct bnx2x *bp, int set)
a2fbb9ea
ET
6977{
6978 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
34f80b04 6979 int port = BP_PORT(bp);
a2fbb9ea
ET
6980
6981 /* CAM allocation
6982 * unicasts 0-31:port0 32-63:port1
6983 * multicast 64-127:port0 128-191:port1
6984 */
8d9c5f34 6985 config->hdr.length = 2;
af246401 6986 config->hdr.offset = port ? 32 : 0;
0626b899 6987 config->hdr.client_id = bp->fp->cl_id;
a2fbb9ea
ET
6988 config->hdr.reserved1 = 0;
6989
6990 /* primary MAC */
6991 config->config_table[0].cam_entry.msb_mac_addr =
6992 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6993 config->config_table[0].cam_entry.middle_mac_addr =
6994 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6995 config->config_table[0].cam_entry.lsb_mac_addr =
6996 swab16(*(u16 *)&bp->dev->dev_addr[4]);
34f80b04 6997 config->config_table[0].cam_entry.flags = cpu_to_le16(port);
3101c2bc
YG
6998 if (set)
6999 config->config_table[0].target_table_entry.flags = 0;
7000 else
7001 CAM_INVALIDATE(config->config_table[0]);
ca00392c
EG
7002 config->config_table[0].target_table_entry.clients_bit_vector =
7003 cpu_to_le32(1 << BP_L_ID(bp));
a2fbb9ea
ET
7004 config->config_table[0].target_table_entry.vlan_id = 0;
7005
3101c2bc
YG
7006 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
7007 (set ? "setting" : "clearing"),
a2fbb9ea
ET
7008 config->config_table[0].cam_entry.msb_mac_addr,
7009 config->config_table[0].cam_entry.middle_mac_addr,
7010 config->config_table[0].cam_entry.lsb_mac_addr);
7011
7012 /* broadcast */
4781bfad
EG
7013 config->config_table[1].cam_entry.msb_mac_addr = cpu_to_le16(0xffff);
7014 config->config_table[1].cam_entry.middle_mac_addr = cpu_to_le16(0xffff);
7015 config->config_table[1].cam_entry.lsb_mac_addr = cpu_to_le16(0xffff);
34f80b04 7016 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
3101c2bc
YG
7017 if (set)
7018 config->config_table[1].target_table_entry.flags =
a2fbb9ea 7019 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
3101c2bc
YG
7020 else
7021 CAM_INVALIDATE(config->config_table[1]);
ca00392c
EG
7022 config->config_table[1].target_table_entry.clients_bit_vector =
7023 cpu_to_le32(1 << BP_L_ID(bp));
a2fbb9ea
ET
7024 config->config_table[1].target_table_entry.vlan_id = 0;
7025
7026 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7027 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
7028 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
7029}
7030
3101c2bc 7031static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp, int set)
34f80b04
EG
7032{
7033 struct mac_configuration_cmd_e1h *config =
7034 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
7035
34f80b04
EG
7036 /* CAM allocation for E1H
7037 * unicasts: by func number
7038 * multicast: 20+FUNC*20, 20 each
7039 */
8d9c5f34 7040 config->hdr.length = 1;
34f80b04 7041 config->hdr.offset = BP_FUNC(bp);
0626b899 7042 config->hdr.client_id = bp->fp->cl_id;
34f80b04
EG
7043 config->hdr.reserved1 = 0;
7044
7045 /* primary MAC */
7046 config->config_table[0].msb_mac_addr =
7047 swab16(*(u16 *)&bp->dev->dev_addr[0]);
7048 config->config_table[0].middle_mac_addr =
7049 swab16(*(u16 *)&bp->dev->dev_addr[2]);
7050 config->config_table[0].lsb_mac_addr =
7051 swab16(*(u16 *)&bp->dev->dev_addr[4]);
ca00392c
EG
7052 config->config_table[0].clients_bit_vector =
7053 cpu_to_le32(1 << BP_L_ID(bp));
34f80b04
EG
7054 config->config_table[0].vlan_id = 0;
7055 config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
3101c2bc
YG
7056 if (set)
7057 config->config_table[0].flags = BP_PORT(bp);
7058 else
7059 config->config_table[0].flags =
7060 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
34f80b04 7061
3101c2bc
YG
7062 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) E1HOV %d CLID %d\n",
7063 (set ? "setting" : "clearing"),
34f80b04
EG
7064 config->config_table[0].msb_mac_addr,
7065 config->config_table[0].middle_mac_addr,
7066 config->config_table[0].lsb_mac_addr, bp->e1hov, BP_L_ID(bp));
7067
7068 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7069 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
7070 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
7071}
7072
a2fbb9ea
ET
7073static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
7074 int *state_p, int poll)
7075{
7076 /* can take a while if any port is running */
8b3a0f0b 7077 int cnt = 5000;
a2fbb9ea 7078
c14423fe
ET
7079 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
7080 poll ? "polling" : "waiting", state, idx);
a2fbb9ea
ET
7081
7082 might_sleep();
34f80b04 7083 while (cnt--) {
a2fbb9ea
ET
7084 if (poll) {
7085 bnx2x_rx_int(bp->fp, 10);
34f80b04
EG
7086 /* if index is different from 0
7087 * the reply for some commands will
3101c2bc 7088 * be on the non default queue
a2fbb9ea
ET
7089 */
7090 if (idx)
7091 bnx2x_rx_int(&bp->fp[idx], 10);
7092 }
a2fbb9ea 7093
3101c2bc 7094 mb(); /* state is changed by bnx2x_sp_event() */
8b3a0f0b
EG
7095 if (*state_p == state) {
7096#ifdef BNX2X_STOP_ON_ERROR
7097 DP(NETIF_MSG_IFUP, "exit (cnt %d)\n", 5000 - cnt);
7098#endif
a2fbb9ea 7099 return 0;
8b3a0f0b 7100 }
a2fbb9ea 7101
a2fbb9ea 7102 msleep(1);
a2fbb9ea
ET
7103 }
7104
a2fbb9ea 7105 /* timeout! */
49d66772
ET
7106 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
7107 poll ? "polling" : "waiting", state, idx);
34f80b04
EG
7108#ifdef BNX2X_STOP_ON_ERROR
7109 bnx2x_panic();
7110#endif
a2fbb9ea 7111
49d66772 7112 return -EBUSY;
a2fbb9ea
ET
7113}
7114
7115static int bnx2x_setup_leading(struct bnx2x *bp)
7116{
34f80b04 7117 int rc;
a2fbb9ea 7118
c14423fe 7119 /* reset IGU state */
34f80b04 7120 bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea
ET
7121
7122 /* SETUP ramrod */
7123 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
7124
34f80b04
EG
7125 /* Wait for completion */
7126 rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
a2fbb9ea 7127
34f80b04 7128 return rc;
a2fbb9ea
ET
7129}
7130
7131static int bnx2x_setup_multi(struct bnx2x *bp, int index)
7132{
555f6c78
EG
7133 struct bnx2x_fastpath *fp = &bp->fp[index];
7134
a2fbb9ea 7135 /* reset IGU state */
555f6c78 7136 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea 7137
228241eb 7138 /* SETUP ramrod */
555f6c78
EG
7139 fp->state = BNX2X_FP_STATE_OPENING;
7140 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0,
7141 fp->cl_id, 0);
a2fbb9ea
ET
7142
7143 /* Wait for completion */
7144 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
555f6c78 7145 &(fp->state), 0);
a2fbb9ea
ET
7146}
7147
a2fbb9ea 7148static int bnx2x_poll(struct napi_struct *napi, int budget);
a2fbb9ea 7149
ca00392c
EG
7150static void bnx2x_set_int_mode_msix(struct bnx2x *bp, int *num_rx_queues_out,
7151 int *num_tx_queues_out)
7152{
7153 int _num_rx_queues = 0, _num_tx_queues = 0;
7154
7155 switch (bp->multi_mode) {
7156 case ETH_RSS_MODE_DISABLED:
7157 _num_rx_queues = 1;
7158 _num_tx_queues = 1;
7159 break;
7160
7161 case ETH_RSS_MODE_REGULAR:
7162 if (num_rx_queues)
7163 _num_rx_queues = min_t(u32, num_rx_queues,
7164 BNX2X_MAX_QUEUES(bp));
7165 else
7166 _num_rx_queues = min_t(u32, num_online_cpus(),
7167 BNX2X_MAX_QUEUES(bp));
7168
7169 if (num_tx_queues)
7170 _num_tx_queues = min_t(u32, num_tx_queues,
7171 BNX2X_MAX_QUEUES(bp));
7172 else
7173 _num_tx_queues = min_t(u32, num_online_cpus(),
7174 BNX2X_MAX_QUEUES(bp));
7175
7176 /* There must be not more Tx queues than Rx queues */
7177 if (_num_tx_queues > _num_rx_queues) {
7178 BNX2X_ERR("number of tx queues (%d) > "
7179 "number of rx queues (%d)"
7180 " defaulting to %d\n",
7181 _num_tx_queues, _num_rx_queues,
7182 _num_rx_queues);
7183 _num_tx_queues = _num_rx_queues;
7184 }
7185 break;
7186
7187
7188 default:
7189 _num_rx_queues = 1;
7190 _num_tx_queues = 1;
7191 break;
7192 }
7193
7194 *num_rx_queues_out = _num_rx_queues;
7195 *num_tx_queues_out = _num_tx_queues;
7196}
7197
7198static int bnx2x_set_int_mode(struct bnx2x *bp)
a2fbb9ea 7199{
ca00392c 7200 int rc = 0;
a2fbb9ea 7201
8badd27a
EG
7202 switch (int_mode) {
7203 case INT_MODE_INTx:
7204 case INT_MODE_MSI:
ca00392c
EG
7205 bp->num_rx_queues = 1;
7206 bp->num_tx_queues = 1;
7207 DP(NETIF_MSG_IFUP, "set number of queues to 1\n");
8badd27a
EG
7208 break;
7209
7210 case INT_MODE_MSIX:
7211 default:
ca00392c
EG
7212 /* Set interrupt mode according to bp->multi_mode value */
7213 bnx2x_set_int_mode_msix(bp, &bp->num_rx_queues,
7214 &bp->num_tx_queues);
7215
7216 DP(NETIF_MSG_IFUP, "set number of queues to: rx %d tx %d\n",
555f6c78 7217 bp->num_rx_queues, bp->num_tx_queues);
ca00392c 7218
2dfe0e1f
EG
7219 /* if we can't use MSI-X we only need one fp,
7220 * so try to enable MSI-X with the requested number of fp's
7221 * and fallback to MSI or legacy INTx with one fp
7222 */
ca00392c
EG
7223 rc = bnx2x_enable_msix(bp);
7224 if (rc) {
34f80b04 7225 /* failed to enable MSI-X */
555f6c78
EG
7226 if (bp->multi_mode)
7227 BNX2X_ERR("Multi requested but failed to "
ca00392c
EG
7228 "enable MSI-X (rx %d tx %d), "
7229 "set number of queues to 1\n",
7230 bp->num_rx_queues, bp->num_tx_queues);
7231 bp->num_rx_queues = 1;
7232 bp->num_tx_queues = 1;
a2fbb9ea 7233 }
8badd27a 7234 break;
a2fbb9ea 7235 }
555f6c78 7236 bp->dev->real_num_tx_queues = bp->num_tx_queues;
ca00392c 7237 return rc;
8badd27a
EG
7238}
7239
8badd27a
EG
7240
7241/* must be called with rtnl_lock */
7242static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
7243{
7244 u32 load_code;
ca00392c
EG
7245 int i, rc;
7246
8badd27a 7247#ifdef BNX2X_STOP_ON_ERROR
8badd27a
EG
7248 if (unlikely(bp->panic))
7249 return -EPERM;
7250#endif
7251
7252 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
7253
ca00392c 7254 rc = bnx2x_set_int_mode(bp);
c14423fe 7255
a2fbb9ea
ET
7256 if (bnx2x_alloc_mem(bp))
7257 return -ENOMEM;
7258
555f6c78 7259 for_each_rx_queue(bp, i)
7a9b2557
VZ
7260 bnx2x_fp(bp, i, disable_tpa) =
7261 ((bp->flags & TPA_ENABLE_FLAG) == 0);
7262
555f6c78 7263 for_each_rx_queue(bp, i)
2dfe0e1f
EG
7264 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
7265 bnx2x_poll, 128);
7266
2dfe0e1f
EG
7267 bnx2x_napi_enable(bp);
7268
34f80b04
EG
7269 if (bp->flags & USING_MSIX_FLAG) {
7270 rc = bnx2x_req_msix_irqs(bp);
7271 if (rc) {
7272 pci_disable_msix(bp->pdev);
2dfe0e1f 7273 goto load_error1;
34f80b04
EG
7274 }
7275 } else {
ca00392c
EG
7276 /* Fall to INTx if failed to enable MSI-X due to lack of
7277 memory (in bnx2x_set_int_mode()) */
8badd27a
EG
7278 if ((rc != -ENOMEM) && (int_mode != INT_MODE_INTx))
7279 bnx2x_enable_msi(bp);
34f80b04
EG
7280 bnx2x_ack_int(bp);
7281 rc = bnx2x_req_irq(bp);
7282 if (rc) {
2dfe0e1f 7283 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
8badd27a
EG
7284 if (bp->flags & USING_MSI_FLAG)
7285 pci_disable_msi(bp->pdev);
2dfe0e1f 7286 goto load_error1;
a2fbb9ea 7287 }
8badd27a
EG
7288 if (bp->flags & USING_MSI_FLAG) {
7289 bp->dev->irq = bp->pdev->irq;
7290 printk(KERN_INFO PFX "%s: using MSI IRQ %d\n",
7291 bp->dev->name, bp->pdev->irq);
7292 }
a2fbb9ea
ET
7293 }
7294
2dfe0e1f
EG
7295 /* Send LOAD_REQUEST command to MCP
7296 Returns the type of LOAD command:
7297 if it is the first port to be initialized
7298 common blocks should be initialized, otherwise - not
7299 */
7300 if (!BP_NOMCP(bp)) {
7301 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
7302 if (!load_code) {
7303 BNX2X_ERR("MCP response failure, aborting\n");
7304 rc = -EBUSY;
7305 goto load_error2;
7306 }
7307 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
7308 rc = -EBUSY; /* other port in diagnostic mode */
7309 goto load_error2;
7310 }
7311
7312 } else {
7313 int port = BP_PORT(bp);
7314
f5372251 7315 DP(NETIF_MSG_IFUP, "NO MCP - load counts %d, %d, %d\n",
2dfe0e1f
EG
7316 load_count[0], load_count[1], load_count[2]);
7317 load_count[0]++;
7318 load_count[1 + port]++;
f5372251 7319 DP(NETIF_MSG_IFUP, "NO MCP - new load counts %d, %d, %d\n",
2dfe0e1f
EG
7320 load_count[0], load_count[1], load_count[2]);
7321 if (load_count[0] == 1)
7322 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
7323 else if (load_count[1 + port] == 1)
7324 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
7325 else
7326 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
7327 }
7328
7329 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
7330 (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
7331 bp->port.pmf = 1;
7332 else
7333 bp->port.pmf = 0;
7334 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
a2fbb9ea 7335
a2fbb9ea 7336 /* Initialize HW */
34f80b04
EG
7337 rc = bnx2x_init_hw(bp, load_code);
7338 if (rc) {
a2fbb9ea 7339 BNX2X_ERR("HW init failed, aborting\n");
2dfe0e1f 7340 goto load_error2;
a2fbb9ea
ET
7341 }
7342
a2fbb9ea 7343 /* Setup NIC internals and enable interrupts */
471de716 7344 bnx2x_nic_init(bp, load_code);
a2fbb9ea 7345
2691d51d
EG
7346 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) &&
7347 (bp->common.shmem2_base))
7348 SHMEM2_WR(bp, dcc_support,
7349 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
7350 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
7351
a2fbb9ea 7352 /* Send LOAD_DONE command to MCP */
34f80b04 7353 if (!BP_NOMCP(bp)) {
228241eb
ET
7354 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
7355 if (!load_code) {
da5a662a 7356 BNX2X_ERR("MCP response failure, aborting\n");
34f80b04 7357 rc = -EBUSY;
2dfe0e1f 7358 goto load_error3;
a2fbb9ea
ET
7359 }
7360 }
7361
7362 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
7363
34f80b04
EG
7364 rc = bnx2x_setup_leading(bp);
7365 if (rc) {
da5a662a 7366 BNX2X_ERR("Setup leading failed!\n");
2dfe0e1f 7367 goto load_error3;
34f80b04 7368 }
a2fbb9ea 7369
34f80b04
EG
7370 if (CHIP_IS_E1H(bp))
7371 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
f5372251 7372 DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
34f80b04
EG
7373 bp->state = BNX2X_STATE_DISABLED;
7374 }
a2fbb9ea 7375
ca00392c 7376 if (bp->state == BNX2X_STATE_OPEN) {
34f80b04
EG
7377 for_each_nondefault_queue(bp, i) {
7378 rc = bnx2x_setup_multi(bp, i);
7379 if (rc)
2dfe0e1f 7380 goto load_error3;
34f80b04 7381 }
a2fbb9ea 7382
ca00392c
EG
7383 if (CHIP_IS_E1(bp))
7384 bnx2x_set_mac_addr_e1(bp, 1);
7385 else
7386 bnx2x_set_mac_addr_e1h(bp, 1);
7387 }
34f80b04
EG
7388
7389 if (bp->port.pmf)
b5bf9068 7390 bnx2x_initial_phy_init(bp, load_mode);
a2fbb9ea
ET
7391
7392 /* Start fast path */
34f80b04
EG
7393 switch (load_mode) {
7394 case LOAD_NORMAL:
ca00392c
EG
7395 if (bp->state == BNX2X_STATE_OPEN) {
7396 /* Tx queue should be only reenabled */
7397 netif_tx_wake_all_queues(bp->dev);
7398 }
2dfe0e1f 7399 /* Initialize the receive filter. */
34f80b04
EG
7400 bnx2x_set_rx_mode(bp->dev);
7401 break;
7402
7403 case LOAD_OPEN:
555f6c78 7404 netif_tx_start_all_queues(bp->dev);
ca00392c
EG
7405 if (bp->state != BNX2X_STATE_OPEN)
7406 netif_tx_disable(bp->dev);
2dfe0e1f 7407 /* Initialize the receive filter. */
34f80b04 7408 bnx2x_set_rx_mode(bp->dev);
34f80b04 7409 break;
a2fbb9ea 7410
34f80b04 7411 case LOAD_DIAG:
2dfe0e1f 7412 /* Initialize the receive filter. */
a2fbb9ea 7413 bnx2x_set_rx_mode(bp->dev);
34f80b04
EG
7414 bp->state = BNX2X_STATE_DIAG;
7415 break;
7416
7417 default:
7418 break;
a2fbb9ea
ET
7419 }
7420
34f80b04
EG
7421 if (!bp->port.pmf)
7422 bnx2x__link_status_update(bp);
7423
a2fbb9ea
ET
7424 /* start the timer */
7425 mod_timer(&bp->timer, jiffies + bp->current_interval);
7426
34f80b04 7427
a2fbb9ea
ET
7428 return 0;
7429
2dfe0e1f
EG
7430load_error3:
7431 bnx2x_int_disable_sync(bp, 1);
7432 if (!BP_NOMCP(bp)) {
7433 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
7434 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7435 }
7436 bp->port.pmf = 0;
7a9b2557
VZ
7437 /* Free SKBs, SGEs, TPA pool and driver internals */
7438 bnx2x_free_skbs(bp);
555f6c78 7439 for_each_rx_queue(bp, i)
3196a88a 7440 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
2dfe0e1f 7441load_error2:
d1014634
YG
7442 /* Release IRQs */
7443 bnx2x_free_irq(bp);
2dfe0e1f
EG
7444load_error1:
7445 bnx2x_napi_disable(bp);
555f6c78 7446 for_each_rx_queue(bp, i)
7cde1c8b 7447 netif_napi_del(&bnx2x_fp(bp, i, napi));
a2fbb9ea
ET
7448 bnx2x_free_mem(bp);
7449
34f80b04 7450 return rc;
a2fbb9ea
ET
7451}
7452
7453static int bnx2x_stop_multi(struct bnx2x *bp, int index)
7454{
555f6c78 7455 struct bnx2x_fastpath *fp = &bp->fp[index];
a2fbb9ea
ET
7456 int rc;
7457
c14423fe 7458 /* halt the connection */
555f6c78
EG
7459 fp->state = BNX2X_FP_STATE_HALTING;
7460 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, fp->cl_id, 0);
a2fbb9ea 7461
34f80b04 7462 /* Wait for completion */
a2fbb9ea 7463 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
555f6c78 7464 &(fp->state), 1);
c14423fe 7465 if (rc) /* timeout */
a2fbb9ea
ET
7466 return rc;
7467
7468 /* delete cfc entry */
7469 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
7470
34f80b04
EG
7471 /* Wait for completion */
7472 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
555f6c78 7473 &(fp->state), 1);
34f80b04 7474 return rc;
a2fbb9ea
ET
7475}
7476
da5a662a 7477static int bnx2x_stop_leading(struct bnx2x *bp)
a2fbb9ea 7478{
4781bfad 7479 __le16 dsb_sp_prod_idx;
c14423fe 7480 /* if the other port is handling traffic,
a2fbb9ea 7481 this can take a lot of time */
34f80b04
EG
7482 int cnt = 500;
7483 int rc;
a2fbb9ea
ET
7484
7485 might_sleep();
7486
7487 /* Send HALT ramrod */
7488 bp->fp[0].state = BNX2X_FP_STATE_HALTING;
0626b899 7489 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, bp->fp->cl_id, 0);
a2fbb9ea 7490
34f80b04
EG
7491 /* Wait for completion */
7492 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
7493 &(bp->fp[0].state), 1);
7494 if (rc) /* timeout */
da5a662a 7495 return rc;
a2fbb9ea 7496
49d66772 7497 dsb_sp_prod_idx = *bp->dsb_sp_prod;
a2fbb9ea 7498
228241eb 7499 /* Send PORT_DELETE ramrod */
a2fbb9ea
ET
7500 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
7501
49d66772 7502 /* Wait for completion to arrive on default status block
a2fbb9ea
ET
7503 we are going to reset the chip anyway
7504 so there is not much to do if this times out
7505 */
34f80b04 7506 while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
34f80b04
EG
7507 if (!cnt) {
7508 DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
7509 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
7510 *bp->dsb_sp_prod, dsb_sp_prod_idx);
7511#ifdef BNX2X_STOP_ON_ERROR
7512 bnx2x_panic();
7513#endif
36e552ab 7514 rc = -EBUSY;
34f80b04
EG
7515 break;
7516 }
7517 cnt--;
da5a662a 7518 msleep(1);
5650d9d4 7519 rmb(); /* Refresh the dsb_sp_prod */
49d66772
ET
7520 }
7521 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
7522 bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
da5a662a
VZ
7523
7524 return rc;
a2fbb9ea
ET
7525}
7526
34f80b04
EG
7527static void bnx2x_reset_func(struct bnx2x *bp)
7528{
7529 int port = BP_PORT(bp);
7530 int func = BP_FUNC(bp);
7531 int base, i;
7532
7533 /* Configure IGU */
7534 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
7535 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
7536
34f80b04
EG
7537 /* Clear ILT */
7538 base = FUNC_ILT_BASE(func);
7539 for (i = base; i < base + ILT_PER_FUNC; i++)
7540 bnx2x_ilt_wr(bp, i, 0);
7541}
7542
7543static void bnx2x_reset_port(struct bnx2x *bp)
7544{
7545 int port = BP_PORT(bp);
7546 u32 val;
7547
7548 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
7549
7550 /* Do not rcv packets to BRB */
7551 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
7552 /* Do not direct rcv packets that are not for MCP to the BRB */
7553 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
7554 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7555
7556 /* Configure AEU */
7557 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
7558
7559 msleep(100);
7560 /* Check for BRB port occupancy */
7561 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
7562 if (val)
7563 DP(NETIF_MSG_IFDOWN,
33471629 7564 "BRB1 is not empty %d blocks are occupied\n", val);
34f80b04
EG
7565
7566 /* TODO: Close Doorbell port? */
7567}
7568
34f80b04
EG
7569static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
7570{
7571 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
7572 BP_FUNC(bp), reset_code);
7573
7574 switch (reset_code) {
7575 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
7576 bnx2x_reset_port(bp);
7577 bnx2x_reset_func(bp);
7578 bnx2x_reset_common(bp);
7579 break;
7580
7581 case FW_MSG_CODE_DRV_UNLOAD_PORT:
7582 bnx2x_reset_port(bp);
7583 bnx2x_reset_func(bp);
7584 break;
7585
7586 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
7587 bnx2x_reset_func(bp);
7588 break;
49d66772 7589
34f80b04
EG
7590 default:
7591 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
7592 break;
7593 }
7594}
7595
33471629 7596/* must be called with rtnl_lock */
34f80b04 7597static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
a2fbb9ea 7598{
da5a662a 7599 int port = BP_PORT(bp);
a2fbb9ea 7600 u32 reset_code = 0;
da5a662a 7601 int i, cnt, rc;
a2fbb9ea
ET
7602
7603 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
7604
228241eb
ET
7605 bp->rx_mode = BNX2X_RX_MODE_NONE;
7606 bnx2x_set_storm_rx_mode(bp);
a2fbb9ea 7607
f8ef6e44 7608 bnx2x_netif_stop(bp, 1);
e94d8af3 7609
34f80b04
EG
7610 del_timer_sync(&bp->timer);
7611 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
7612 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
bb2a0f7a 7613 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04 7614
70b9986c
EG
7615 /* Release IRQs */
7616 bnx2x_free_irq(bp);
7617
555f6c78
EG
7618 /* Wait until tx fastpath tasks complete */
7619 for_each_tx_queue(bp, i) {
228241eb
ET
7620 struct bnx2x_fastpath *fp = &bp->fp[i];
7621
34f80b04 7622 cnt = 1000;
e8b5fc51 7623 while (bnx2x_has_tx_work_unload(fp)) {
da5a662a 7624
7961f791 7625 bnx2x_tx_int(fp);
34f80b04
EG
7626 if (!cnt) {
7627 BNX2X_ERR("timeout waiting for queue[%d]\n",
7628 i);
7629#ifdef BNX2X_STOP_ON_ERROR
7630 bnx2x_panic();
7631 return -EBUSY;
7632#else
7633 break;
7634#endif
7635 }
7636 cnt--;
da5a662a 7637 msleep(1);
34f80b04 7638 }
228241eb 7639 }
da5a662a
VZ
7640 /* Give HW time to discard old tx messages */
7641 msleep(1);
a2fbb9ea 7642
3101c2bc
YG
7643 if (CHIP_IS_E1(bp)) {
7644 struct mac_configuration_cmd *config =
7645 bnx2x_sp(bp, mcast_config);
7646
7647 bnx2x_set_mac_addr_e1(bp, 0);
7648
8d9c5f34 7649 for (i = 0; i < config->hdr.length; i++)
3101c2bc
YG
7650 CAM_INVALIDATE(config->config_table[i]);
7651
8d9c5f34 7652 config->hdr.length = i;
3101c2bc
YG
7653 if (CHIP_REV_IS_SLOW(bp))
7654 config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
7655 else
7656 config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
0626b899 7657 config->hdr.client_id = bp->fp->cl_id;
3101c2bc
YG
7658 config->hdr.reserved1 = 0;
7659
7660 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7661 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
7662 U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
7663
7664 } else { /* E1H */
65abd74d
YG
7665 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
7666
3101c2bc
YG
7667 bnx2x_set_mac_addr_e1h(bp, 0);
7668
7669 for (i = 0; i < MC_HASH_SIZE; i++)
7670 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
7d0446c2
EG
7671
7672 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
3101c2bc
YG
7673 }
7674
65abd74d
YG
7675 if (unload_mode == UNLOAD_NORMAL)
7676 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7677
7d0446c2 7678 else if (bp->flags & NO_WOL_FLAG)
65abd74d 7679 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
65abd74d 7680
7d0446c2 7681 else if (bp->wol) {
65abd74d
YG
7682 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
7683 u8 *mac_addr = bp->dev->dev_addr;
7684 u32 val;
7685 /* The mac address is written to entries 1-4 to
7686 preserve entry 0 which is used by the PMF */
7687 u8 entry = (BP_E1HVN(bp) + 1)*8;
7688
7689 val = (mac_addr[0] << 8) | mac_addr[1];
7690 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
7691
7692 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
7693 (mac_addr[4] << 8) | mac_addr[5];
7694 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
7695
7696 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
7697
7698 } else
7699 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
da5a662a 7700
34f80b04
EG
7701 /* Close multi and leading connections
7702 Completions for ramrods are collected in a synchronous way */
a2fbb9ea
ET
7703 for_each_nondefault_queue(bp, i)
7704 if (bnx2x_stop_multi(bp, i))
228241eb 7705 goto unload_error;
a2fbb9ea 7706
da5a662a
VZ
7707 rc = bnx2x_stop_leading(bp);
7708 if (rc) {
34f80b04 7709 BNX2X_ERR("Stop leading failed!\n");
da5a662a 7710#ifdef BNX2X_STOP_ON_ERROR
34f80b04 7711 return -EBUSY;
da5a662a
VZ
7712#else
7713 goto unload_error;
34f80b04 7714#endif
228241eb
ET
7715 }
7716
7717unload_error:
34f80b04 7718 if (!BP_NOMCP(bp))
228241eb 7719 reset_code = bnx2x_fw_command(bp, reset_code);
34f80b04 7720 else {
f5372251 7721 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts %d, %d, %d\n",
34f80b04
EG
7722 load_count[0], load_count[1], load_count[2]);
7723 load_count[0]--;
da5a662a 7724 load_count[1 + port]--;
f5372251 7725 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts %d, %d, %d\n",
34f80b04
EG
7726 load_count[0], load_count[1], load_count[2]);
7727 if (load_count[0] == 0)
7728 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
da5a662a 7729 else if (load_count[1 + port] == 0)
34f80b04
EG
7730 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
7731 else
7732 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
7733 }
a2fbb9ea 7734
34f80b04
EG
7735 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
7736 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
7737 bnx2x__link_reset(bp);
a2fbb9ea
ET
7738
7739 /* Reset the chip */
228241eb 7740 bnx2x_reset_chip(bp, reset_code);
a2fbb9ea
ET
7741
7742 /* Report UNLOAD_DONE to MCP */
34f80b04 7743 if (!BP_NOMCP(bp))
a2fbb9ea 7744 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
356e2385 7745
9a035440 7746 bp->port.pmf = 0;
a2fbb9ea 7747
7a9b2557 7748 /* Free SKBs, SGEs, TPA pool and driver internals */
a2fbb9ea 7749 bnx2x_free_skbs(bp);
555f6c78 7750 for_each_rx_queue(bp, i)
3196a88a 7751 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
555f6c78 7752 for_each_rx_queue(bp, i)
7cde1c8b 7753 netif_napi_del(&bnx2x_fp(bp, i, napi));
a2fbb9ea
ET
7754 bnx2x_free_mem(bp);
7755
7756 bp->state = BNX2X_STATE_CLOSED;
228241eb 7757
a2fbb9ea
ET
7758 netif_carrier_off(bp->dev);
7759
7760 return 0;
7761}
7762
34f80b04
EG
7763static void bnx2x_reset_task(struct work_struct *work)
7764{
7765 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
7766
7767#ifdef BNX2X_STOP_ON_ERROR
7768 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
7769 " so reset not done to allow debug dump,\n"
ad361c98 7770 " you will need to reboot when done\n");
34f80b04
EG
7771 return;
7772#endif
7773
7774 rtnl_lock();
7775
7776 if (!netif_running(bp->dev))
7777 goto reset_task_exit;
7778
7779 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
7780 bnx2x_nic_load(bp, LOAD_NORMAL);
7781
7782reset_task_exit:
7783 rtnl_unlock();
7784}
7785
a2fbb9ea
ET
7786/* end of nic load/unload */
7787
7788/* ethtool_ops */
7789
7790/*
7791 * Init service functions
7792 */
7793
f1ef27ef
EG
7794static inline u32 bnx2x_get_pretend_reg(struct bnx2x *bp, int func)
7795{
7796 switch (func) {
7797 case 0: return PXP2_REG_PGL_PRETEND_FUNC_F0;
7798 case 1: return PXP2_REG_PGL_PRETEND_FUNC_F1;
7799 case 2: return PXP2_REG_PGL_PRETEND_FUNC_F2;
7800 case 3: return PXP2_REG_PGL_PRETEND_FUNC_F3;
7801 case 4: return PXP2_REG_PGL_PRETEND_FUNC_F4;
7802 case 5: return PXP2_REG_PGL_PRETEND_FUNC_F5;
7803 case 6: return PXP2_REG_PGL_PRETEND_FUNC_F6;
7804 case 7: return PXP2_REG_PGL_PRETEND_FUNC_F7;
7805 default:
7806 BNX2X_ERR("Unsupported function index: %d\n", func);
7807 return (u32)(-1);
7808 }
7809}
7810
7811static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp, int orig_func)
7812{
7813 u32 reg = bnx2x_get_pretend_reg(bp, orig_func), new_val;
7814
7815 /* Flush all outstanding writes */
7816 mmiowb();
7817
7818 /* Pretend to be function 0 */
7819 REG_WR(bp, reg, 0);
7820 /* Flush the GRC transaction (in the chip) */
7821 new_val = REG_RD(bp, reg);
7822 if (new_val != 0) {
7823 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (0,%d)!\n",
7824 new_val);
7825 BUG();
7826 }
7827
7828 /* From now we are in the "like-E1" mode */
7829 bnx2x_int_disable(bp);
7830
7831 /* Flush all outstanding writes */
7832 mmiowb();
7833
7834 /* Restore the original funtion settings */
7835 REG_WR(bp, reg, orig_func);
7836 new_val = REG_RD(bp, reg);
7837 if (new_val != orig_func) {
7838 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (%d,%d)!\n",
7839 orig_func, new_val);
7840 BUG();
7841 }
7842}
7843
7844static inline void bnx2x_undi_int_disable(struct bnx2x *bp, int func)
7845{
7846 if (CHIP_IS_E1H(bp))
7847 bnx2x_undi_int_disable_e1h(bp, func);
7848 else
7849 bnx2x_int_disable(bp);
7850}
7851
34f80b04
EG
7852static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
7853{
7854 u32 val;
7855
7856 /* Check if there is any driver already loaded */
7857 val = REG_RD(bp, MISC_REG_UNPREPARED);
7858 if (val == 0x1) {
7859 /* Check if it is the UNDI driver
7860 * UNDI driver initializes CID offset for normal bell to 0x7
7861 */
4a37fb66 7862 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
34f80b04
EG
7863 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
7864 if (val == 0x7) {
7865 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
da5a662a 7866 /* save our func */
34f80b04 7867 int func = BP_FUNC(bp);
da5a662a
VZ
7868 u32 swap_en;
7869 u32 swap_val;
34f80b04 7870
b4661739
EG
7871 /* clear the UNDI indication */
7872 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
7873
34f80b04
EG
7874 BNX2X_DEV_INFO("UNDI is active! reset device\n");
7875
7876 /* try unload UNDI on port 0 */
7877 bp->func = 0;
da5a662a
VZ
7878 bp->fw_seq =
7879 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7880 DRV_MSG_SEQ_NUMBER_MASK);
34f80b04 7881 reset_code = bnx2x_fw_command(bp, reset_code);
34f80b04
EG
7882
7883 /* if UNDI is loaded on the other port */
7884 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
7885
da5a662a
VZ
7886 /* send "DONE" for previous unload */
7887 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7888
7889 /* unload UNDI on port 1 */
34f80b04 7890 bp->func = 1;
da5a662a
VZ
7891 bp->fw_seq =
7892 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7893 DRV_MSG_SEQ_NUMBER_MASK);
7894 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7895
7896 bnx2x_fw_command(bp, reset_code);
34f80b04
EG
7897 }
7898
b4661739
EG
7899 /* now it's safe to release the lock */
7900 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7901
f1ef27ef 7902 bnx2x_undi_int_disable(bp, func);
da5a662a
VZ
7903
7904 /* close input traffic and wait for it */
7905 /* Do not rcv packets to BRB */
7906 REG_WR(bp,
7907 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
7908 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
7909 /* Do not direct rcv packets that are not for MCP to
7910 * the BRB */
7911 REG_WR(bp,
7912 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
7913 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7914 /* clear AEU */
7915 REG_WR(bp,
7916 (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
7917 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
7918 msleep(10);
7919
7920 /* save NIG port swap info */
7921 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
7922 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
34f80b04
EG
7923 /* reset device */
7924 REG_WR(bp,
7925 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
da5a662a 7926 0xd3ffffff);
34f80b04
EG
7927 REG_WR(bp,
7928 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
7929 0x1403);
da5a662a
VZ
7930 /* take the NIG out of reset and restore swap values */
7931 REG_WR(bp,
7932 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
7933 MISC_REGISTERS_RESET_REG_1_RST_NIG);
7934 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
7935 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
7936
7937 /* send unload done to the MCP */
7938 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7939
7940 /* restore our func and fw_seq */
7941 bp->func = func;
7942 bp->fw_seq =
7943 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7944 DRV_MSG_SEQ_NUMBER_MASK);
b4661739
EG
7945
7946 } else
7947 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
34f80b04
EG
7948 }
7949}
7950
7951static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
7952{
7953 u32 val, val2, val3, val4, id;
72ce58c3 7954 u16 pmc;
34f80b04
EG
7955
7956 /* Get the chip revision id and number. */
7957 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
7958 val = REG_RD(bp, MISC_REG_CHIP_NUM);
7959 id = ((val & 0xffff) << 16);
7960 val = REG_RD(bp, MISC_REG_CHIP_REV);
7961 id |= ((val & 0xf) << 12);
7962 val = REG_RD(bp, MISC_REG_CHIP_METAL);
7963 id |= ((val & 0xff) << 4);
5a40e08e 7964 val = REG_RD(bp, MISC_REG_BOND_ID);
34f80b04
EG
7965 id |= (val & 0xf);
7966 bp->common.chip_id = id;
7967 bp->link_params.chip_id = bp->common.chip_id;
7968 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
7969
1c06328c
EG
7970 val = (REG_RD(bp, 0x2874) & 0x55);
7971 if ((bp->common.chip_id & 0x1) ||
7972 (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
7973 bp->flags |= ONE_PORT_FLAG;
7974 BNX2X_DEV_INFO("single port device\n");
7975 }
7976
34f80b04
EG
7977 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
7978 bp->common.flash_size = (NVRAM_1MB_SIZE <<
7979 (val & MCPR_NVM_CFG4_FLASH_SIZE));
7980 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
7981 bp->common.flash_size, bp->common.flash_size);
7982
7983 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
2691d51d 7984 bp->common.shmem2_base = REG_RD(bp, MISC_REG_GENERIC_CR_0);
34f80b04 7985 bp->link_params.shmem_base = bp->common.shmem_base;
2691d51d
EG
7986 BNX2X_DEV_INFO("shmem offset 0x%x shmem2 offset 0x%x\n",
7987 bp->common.shmem_base, bp->common.shmem2_base);
34f80b04
EG
7988
7989 if (!bp->common.shmem_base ||
7990 (bp->common.shmem_base < 0xA0000) ||
7991 (bp->common.shmem_base >= 0xC0000)) {
7992 BNX2X_DEV_INFO("MCP not active\n");
7993 bp->flags |= NO_MCP_FLAG;
7994 return;
7995 }
7996
7997 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
7998 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7999 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
8000 BNX2X_ERR("BAD MCP validity signature\n");
8001
8002 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
35b19ba5 8003 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
34f80b04
EG
8004
8005 bp->link_params.hw_led_mode = ((bp->common.hw_config &
8006 SHARED_HW_CFG_LED_MODE_MASK) >>
8007 SHARED_HW_CFG_LED_MODE_SHIFT);
8008
c2c8b03e
EG
8009 bp->link_params.feature_config_flags = 0;
8010 val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
8011 if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
8012 bp->link_params.feature_config_flags |=
8013 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
8014 else
8015 bp->link_params.feature_config_flags &=
8016 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
8017
34f80b04
EG
8018 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
8019 bp->common.bc_ver = val;
8020 BNX2X_DEV_INFO("bc_ver %X\n", val);
8021 if (val < BNX2X_BC_VER) {
8022 /* for now only warn
8023 * later we might need to enforce this */
8024 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
8025 " please upgrade BC\n", BNX2X_BC_VER, val);
8026 }
4d295db0
EG
8027 bp->link_params.feature_config_flags |=
8028 (val >= REQ_BC_VER_4_VRFY_OPT_MDL) ?
8029 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0;
72ce58c3
EG
8030
8031 if (BP_E1HVN(bp) == 0) {
8032 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
8033 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
8034 } else {
8035 /* no WOL capability for E1HVN != 0 */
8036 bp->flags |= NO_WOL_FLAG;
8037 }
8038 BNX2X_DEV_INFO("%sWoL capable\n",
f5372251 8039 (bp->flags & NO_WOL_FLAG) ? "not " : "");
34f80b04
EG
8040
8041 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
8042 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
8043 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
8044 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
8045
8046 printk(KERN_INFO PFX "part number %X-%X-%X-%X\n",
8047 val, val2, val3, val4);
8048}
8049
8050static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
8051 u32 switch_cfg)
a2fbb9ea 8052{
34f80b04 8053 int port = BP_PORT(bp);
a2fbb9ea
ET
8054 u32 ext_phy_type;
8055
a2fbb9ea
ET
8056 switch (switch_cfg) {
8057 case SWITCH_CFG_1G:
8058 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
8059
c18487ee
YR
8060 ext_phy_type =
8061 SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea
ET
8062 switch (ext_phy_type) {
8063 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
8064 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
8065 ext_phy_type);
8066
34f80b04
EG
8067 bp->port.supported |= (SUPPORTED_10baseT_Half |
8068 SUPPORTED_10baseT_Full |
8069 SUPPORTED_100baseT_Half |
8070 SUPPORTED_100baseT_Full |
8071 SUPPORTED_1000baseT_Full |
8072 SUPPORTED_2500baseX_Full |
8073 SUPPORTED_TP |
8074 SUPPORTED_FIBRE |
8075 SUPPORTED_Autoneg |
8076 SUPPORTED_Pause |
8077 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
8078 break;
8079
8080 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
8081 BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
8082 ext_phy_type);
8083
34f80b04
EG
8084 bp->port.supported |= (SUPPORTED_10baseT_Half |
8085 SUPPORTED_10baseT_Full |
8086 SUPPORTED_100baseT_Half |
8087 SUPPORTED_100baseT_Full |
8088 SUPPORTED_1000baseT_Full |
8089 SUPPORTED_TP |
8090 SUPPORTED_FIBRE |
8091 SUPPORTED_Autoneg |
8092 SUPPORTED_Pause |
8093 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
8094 break;
8095
8096 default:
8097 BNX2X_ERR("NVRAM config error. "
8098 "BAD SerDes ext_phy_config 0x%x\n",
c18487ee 8099 bp->link_params.ext_phy_config);
a2fbb9ea
ET
8100 return;
8101 }
8102
34f80b04
EG
8103 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
8104 port*0x10);
8105 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea
ET
8106 break;
8107
8108 case SWITCH_CFG_10G:
8109 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
8110
c18487ee
YR
8111 ext_phy_type =
8112 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea
ET
8113 switch (ext_phy_type) {
8114 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
8115 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
8116 ext_phy_type);
8117
34f80b04
EG
8118 bp->port.supported |= (SUPPORTED_10baseT_Half |
8119 SUPPORTED_10baseT_Full |
8120 SUPPORTED_100baseT_Half |
8121 SUPPORTED_100baseT_Full |
8122 SUPPORTED_1000baseT_Full |
8123 SUPPORTED_2500baseX_Full |
8124 SUPPORTED_10000baseT_Full |
8125 SUPPORTED_TP |
8126 SUPPORTED_FIBRE |
8127 SUPPORTED_Autoneg |
8128 SUPPORTED_Pause |
8129 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
8130 break;
8131
589abe3a
EG
8132 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
8133 BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
34f80b04 8134 ext_phy_type);
f1410647 8135
34f80b04 8136 bp->port.supported |= (SUPPORTED_10000baseT_Full |
589abe3a 8137 SUPPORTED_1000baseT_Full |
34f80b04 8138 SUPPORTED_FIBRE |
589abe3a 8139 SUPPORTED_Autoneg |
34f80b04
EG
8140 SUPPORTED_Pause |
8141 SUPPORTED_Asym_Pause);
f1410647
ET
8142 break;
8143
589abe3a
EG
8144 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
8145 BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
f1410647
ET
8146 ext_phy_type);
8147
34f80b04 8148 bp->port.supported |= (SUPPORTED_10000baseT_Full |
589abe3a 8149 SUPPORTED_2500baseX_Full |
34f80b04 8150 SUPPORTED_1000baseT_Full |
589abe3a
EG
8151 SUPPORTED_FIBRE |
8152 SUPPORTED_Autoneg |
8153 SUPPORTED_Pause |
8154 SUPPORTED_Asym_Pause);
8155 break;
8156
8157 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
8158 BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
8159 ext_phy_type);
8160
8161 bp->port.supported |= (SUPPORTED_10000baseT_Full |
34f80b04
EG
8162 SUPPORTED_FIBRE |
8163 SUPPORTED_Pause |
8164 SUPPORTED_Asym_Pause);
f1410647
ET
8165 break;
8166
589abe3a
EG
8167 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
8168 BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
a2fbb9ea
ET
8169 ext_phy_type);
8170
34f80b04
EG
8171 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8172 SUPPORTED_1000baseT_Full |
8173 SUPPORTED_FIBRE |
34f80b04
EG
8174 SUPPORTED_Pause |
8175 SUPPORTED_Asym_Pause);
f1410647
ET
8176 break;
8177
589abe3a
EG
8178 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
8179 BNX2X_DEV_INFO("ext_phy_type 0x%x (8726)\n",
c18487ee
YR
8180 ext_phy_type);
8181
34f80b04 8182 bp->port.supported |= (SUPPORTED_10000baseT_Full |
34f80b04 8183 SUPPORTED_1000baseT_Full |
34f80b04 8184 SUPPORTED_Autoneg |
589abe3a 8185 SUPPORTED_FIBRE |
34f80b04
EG
8186 SUPPORTED_Pause |
8187 SUPPORTED_Asym_Pause);
c18487ee
YR
8188 break;
8189
4d295db0
EG
8190 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
8191 BNX2X_DEV_INFO("ext_phy_type 0x%x (8727)\n",
8192 ext_phy_type);
8193
8194 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8195 SUPPORTED_1000baseT_Full |
8196 SUPPORTED_Autoneg |
8197 SUPPORTED_FIBRE |
8198 SUPPORTED_Pause |
8199 SUPPORTED_Asym_Pause);
8200 break;
8201
f1410647
ET
8202 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
8203 BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
8204 ext_phy_type);
8205
34f80b04
EG
8206 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8207 SUPPORTED_TP |
8208 SUPPORTED_Autoneg |
8209 SUPPORTED_Pause |
8210 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
8211 break;
8212
28577185
EG
8213 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
8214 BNX2X_DEV_INFO("ext_phy_type 0x%x (BCM8481)\n",
8215 ext_phy_type);
8216
8217 bp->port.supported |= (SUPPORTED_10baseT_Half |
8218 SUPPORTED_10baseT_Full |
8219 SUPPORTED_100baseT_Half |
8220 SUPPORTED_100baseT_Full |
8221 SUPPORTED_1000baseT_Full |
8222 SUPPORTED_10000baseT_Full |
8223 SUPPORTED_TP |
8224 SUPPORTED_Autoneg |
8225 SUPPORTED_Pause |
8226 SUPPORTED_Asym_Pause);
8227 break;
8228
c18487ee
YR
8229 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
8230 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
8231 bp->link_params.ext_phy_config);
8232 break;
8233
a2fbb9ea
ET
8234 default:
8235 BNX2X_ERR("NVRAM config error. "
8236 "BAD XGXS ext_phy_config 0x%x\n",
c18487ee 8237 bp->link_params.ext_phy_config);
a2fbb9ea
ET
8238 return;
8239 }
8240
34f80b04
EG
8241 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
8242 port*0x18);
8243 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea 8244
a2fbb9ea
ET
8245 break;
8246
8247 default:
8248 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
34f80b04 8249 bp->port.link_config);
a2fbb9ea
ET
8250 return;
8251 }
34f80b04 8252 bp->link_params.phy_addr = bp->port.phy_addr;
a2fbb9ea
ET
8253
8254 /* mask what we support according to speed_cap_mask */
c18487ee
YR
8255 if (!(bp->link_params.speed_cap_mask &
8256 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
34f80b04 8257 bp->port.supported &= ~SUPPORTED_10baseT_Half;
a2fbb9ea 8258
c18487ee
YR
8259 if (!(bp->link_params.speed_cap_mask &
8260 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
34f80b04 8261 bp->port.supported &= ~SUPPORTED_10baseT_Full;
a2fbb9ea 8262
c18487ee
YR
8263 if (!(bp->link_params.speed_cap_mask &
8264 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
34f80b04 8265 bp->port.supported &= ~SUPPORTED_100baseT_Half;
a2fbb9ea 8266
c18487ee
YR
8267 if (!(bp->link_params.speed_cap_mask &
8268 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
34f80b04 8269 bp->port.supported &= ~SUPPORTED_100baseT_Full;
a2fbb9ea 8270
c18487ee
YR
8271 if (!(bp->link_params.speed_cap_mask &
8272 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
34f80b04
EG
8273 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
8274 SUPPORTED_1000baseT_Full);
a2fbb9ea 8275
c18487ee
YR
8276 if (!(bp->link_params.speed_cap_mask &
8277 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
34f80b04 8278 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
a2fbb9ea 8279
c18487ee
YR
8280 if (!(bp->link_params.speed_cap_mask &
8281 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
34f80b04 8282 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
a2fbb9ea 8283
34f80b04 8284 BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
a2fbb9ea
ET
8285}
8286
34f80b04 8287static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
a2fbb9ea 8288{
c18487ee 8289 bp->link_params.req_duplex = DUPLEX_FULL;
a2fbb9ea 8290
34f80b04 8291 switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
a2fbb9ea 8292 case PORT_FEATURE_LINK_SPEED_AUTO:
34f80b04 8293 if (bp->port.supported & SUPPORTED_Autoneg) {
c18487ee 8294 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
34f80b04 8295 bp->port.advertising = bp->port.supported;
a2fbb9ea 8296 } else {
c18487ee
YR
8297 u32 ext_phy_type =
8298 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8299
8300 if ((ext_phy_type ==
8301 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
8302 (ext_phy_type ==
8303 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
a2fbb9ea 8304 /* force 10G, no AN */
c18487ee 8305 bp->link_params.req_line_speed = SPEED_10000;
34f80b04 8306 bp->port.advertising =
a2fbb9ea
ET
8307 (ADVERTISED_10000baseT_Full |
8308 ADVERTISED_FIBRE);
8309 break;
8310 }
8311 BNX2X_ERR("NVRAM config error. "
8312 "Invalid link_config 0x%x"
8313 " Autoneg not supported\n",
34f80b04 8314 bp->port.link_config);
a2fbb9ea
ET
8315 return;
8316 }
8317 break;
8318
8319 case PORT_FEATURE_LINK_SPEED_10M_FULL:
34f80b04 8320 if (bp->port.supported & SUPPORTED_10baseT_Full) {
c18487ee 8321 bp->link_params.req_line_speed = SPEED_10;
34f80b04
EG
8322 bp->port.advertising = (ADVERTISED_10baseT_Full |
8323 ADVERTISED_TP);
a2fbb9ea
ET
8324 } else {
8325 BNX2X_ERR("NVRAM config error. "
8326 "Invalid link_config 0x%x"
8327 " speed_cap_mask 0x%x\n",
34f80b04 8328 bp->port.link_config,
c18487ee 8329 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
8330 return;
8331 }
8332 break;
8333
8334 case PORT_FEATURE_LINK_SPEED_10M_HALF:
34f80b04 8335 if (bp->port.supported & SUPPORTED_10baseT_Half) {
c18487ee
YR
8336 bp->link_params.req_line_speed = SPEED_10;
8337 bp->link_params.req_duplex = DUPLEX_HALF;
34f80b04
EG
8338 bp->port.advertising = (ADVERTISED_10baseT_Half |
8339 ADVERTISED_TP);
a2fbb9ea
ET
8340 } else {
8341 BNX2X_ERR("NVRAM config error. "
8342 "Invalid link_config 0x%x"
8343 " speed_cap_mask 0x%x\n",
34f80b04 8344 bp->port.link_config,
c18487ee 8345 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
8346 return;
8347 }
8348 break;
8349
8350 case PORT_FEATURE_LINK_SPEED_100M_FULL:
34f80b04 8351 if (bp->port.supported & SUPPORTED_100baseT_Full) {
c18487ee 8352 bp->link_params.req_line_speed = SPEED_100;
34f80b04
EG
8353 bp->port.advertising = (ADVERTISED_100baseT_Full |
8354 ADVERTISED_TP);
a2fbb9ea
ET
8355 } else {
8356 BNX2X_ERR("NVRAM config error. "
8357 "Invalid link_config 0x%x"
8358 " speed_cap_mask 0x%x\n",
34f80b04 8359 bp->port.link_config,
c18487ee 8360 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
8361 return;
8362 }
8363 break;
8364
8365 case PORT_FEATURE_LINK_SPEED_100M_HALF:
34f80b04 8366 if (bp->port.supported & SUPPORTED_100baseT_Half) {
c18487ee
YR
8367 bp->link_params.req_line_speed = SPEED_100;
8368 bp->link_params.req_duplex = DUPLEX_HALF;
34f80b04
EG
8369 bp->port.advertising = (ADVERTISED_100baseT_Half |
8370 ADVERTISED_TP);
a2fbb9ea
ET
8371 } else {
8372 BNX2X_ERR("NVRAM config error. "
8373 "Invalid link_config 0x%x"
8374 " speed_cap_mask 0x%x\n",
34f80b04 8375 bp->port.link_config,
c18487ee 8376 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
8377 return;
8378 }
8379 break;
8380
8381 case PORT_FEATURE_LINK_SPEED_1G:
34f80b04 8382 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
c18487ee 8383 bp->link_params.req_line_speed = SPEED_1000;
34f80b04
EG
8384 bp->port.advertising = (ADVERTISED_1000baseT_Full |
8385 ADVERTISED_TP);
a2fbb9ea
ET
8386 } else {
8387 BNX2X_ERR("NVRAM config error. "
8388 "Invalid link_config 0x%x"
8389 " speed_cap_mask 0x%x\n",
34f80b04 8390 bp->port.link_config,
c18487ee 8391 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
8392 return;
8393 }
8394 break;
8395
8396 case PORT_FEATURE_LINK_SPEED_2_5G:
34f80b04 8397 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
c18487ee 8398 bp->link_params.req_line_speed = SPEED_2500;
34f80b04
EG
8399 bp->port.advertising = (ADVERTISED_2500baseX_Full |
8400 ADVERTISED_TP);
a2fbb9ea
ET
8401 } else {
8402 BNX2X_ERR("NVRAM config error. "
8403 "Invalid link_config 0x%x"
8404 " speed_cap_mask 0x%x\n",
34f80b04 8405 bp->port.link_config,
c18487ee 8406 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
8407 return;
8408 }
8409 break;
8410
8411 case PORT_FEATURE_LINK_SPEED_10G_CX4:
8412 case PORT_FEATURE_LINK_SPEED_10G_KX4:
8413 case PORT_FEATURE_LINK_SPEED_10G_KR:
34f80b04 8414 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
c18487ee 8415 bp->link_params.req_line_speed = SPEED_10000;
34f80b04
EG
8416 bp->port.advertising = (ADVERTISED_10000baseT_Full |
8417 ADVERTISED_FIBRE);
a2fbb9ea
ET
8418 } else {
8419 BNX2X_ERR("NVRAM config error. "
8420 "Invalid link_config 0x%x"
8421 " speed_cap_mask 0x%x\n",
34f80b04 8422 bp->port.link_config,
c18487ee 8423 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
8424 return;
8425 }
8426 break;
8427
8428 default:
8429 BNX2X_ERR("NVRAM config error. "
8430 "BAD link speed link_config 0x%x\n",
34f80b04 8431 bp->port.link_config);
c18487ee 8432 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
34f80b04 8433 bp->port.advertising = bp->port.supported;
a2fbb9ea
ET
8434 break;
8435 }
a2fbb9ea 8436
34f80b04
EG
8437 bp->link_params.req_flow_ctrl = (bp->port.link_config &
8438 PORT_FEATURE_FLOW_CONTROL_MASK);
c0700f90 8439 if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
4ab84d45 8440 !(bp->port.supported & SUPPORTED_Autoneg))
c0700f90 8441 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
a2fbb9ea 8442
c18487ee 8443 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x"
f1410647 8444 " advertising 0x%x\n",
c18487ee
YR
8445 bp->link_params.req_line_speed,
8446 bp->link_params.req_duplex,
34f80b04 8447 bp->link_params.req_flow_ctrl, bp->port.advertising);
a2fbb9ea
ET
8448}
8449
34f80b04 8450static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
a2fbb9ea 8451{
34f80b04
EG
8452 int port = BP_PORT(bp);
8453 u32 val, val2;
589abe3a 8454 u32 config;
c2c8b03e 8455 u16 i;
01cd4528 8456 u32 ext_phy_type;
a2fbb9ea 8457
c18487ee 8458 bp->link_params.bp = bp;
34f80b04 8459 bp->link_params.port = port;
c18487ee 8460
c18487ee 8461 bp->link_params.lane_config =
a2fbb9ea 8462 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
c18487ee 8463 bp->link_params.ext_phy_config =
a2fbb9ea
ET
8464 SHMEM_RD(bp,
8465 dev_info.port_hw_config[port].external_phy_config);
4d295db0
EG
8466 /* BCM8727_NOC => BCM8727 no over current */
8467 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
8468 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC) {
8469 bp->link_params.ext_phy_config &=
8470 ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
8471 bp->link_params.ext_phy_config |=
8472 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727;
8473 bp->link_params.feature_config_flags |=
8474 FEATURE_CONFIG_BCM8727_NOC;
8475 }
8476
c18487ee 8477 bp->link_params.speed_cap_mask =
a2fbb9ea
ET
8478 SHMEM_RD(bp,
8479 dev_info.port_hw_config[port].speed_capability_mask);
8480
34f80b04 8481 bp->port.link_config =
a2fbb9ea
ET
8482 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
8483
c2c8b03e
EG
8484 /* Get the 4 lanes xgxs config rx and tx */
8485 for (i = 0; i < 2; i++) {
8486 val = SHMEM_RD(bp,
8487 dev_info.port_hw_config[port].xgxs_config_rx[i<<1]);
8488 bp->link_params.xgxs_config_rx[i << 1] = ((val>>16) & 0xffff);
8489 bp->link_params.xgxs_config_rx[(i << 1) + 1] = (val & 0xffff);
8490
8491 val = SHMEM_RD(bp,
8492 dev_info.port_hw_config[port].xgxs_config_tx[i<<1]);
8493 bp->link_params.xgxs_config_tx[i << 1] = ((val>>16) & 0xffff);
8494 bp->link_params.xgxs_config_tx[(i << 1) + 1] = (val & 0xffff);
8495 }
8496
3ce2c3f9
EG
8497 /* If the device is capable of WoL, set the default state according
8498 * to the HW
8499 */
4d295db0 8500 config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
3ce2c3f9
EG
8501 bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
8502 (config & PORT_FEATURE_WOL_ENABLED));
8503
c2c8b03e
EG
8504 BNX2X_DEV_INFO("lane_config 0x%08x ext_phy_config 0x%08x"
8505 " speed_cap_mask 0x%08x link_config 0x%08x\n",
c18487ee
YR
8506 bp->link_params.lane_config,
8507 bp->link_params.ext_phy_config,
34f80b04 8508 bp->link_params.speed_cap_mask, bp->port.link_config);
a2fbb9ea 8509
4d295db0
EG
8510 bp->link_params.switch_cfg |= (bp->port.link_config &
8511 PORT_FEATURE_CONNECTED_SWITCH_MASK);
c18487ee 8512 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
a2fbb9ea
ET
8513
8514 bnx2x_link_settings_requested(bp);
8515
01cd4528
EG
8516 /*
8517 * If connected directly, work with the internal PHY, otherwise, work
8518 * with the external PHY
8519 */
8520 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8521 if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
8522 bp->mdio.prtad = bp->link_params.phy_addr;
8523
8524 else if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
8525 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
8526 bp->mdio.prtad =
8527 (bp->link_params.ext_phy_config &
8528 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >>
8529 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT;
8530
a2fbb9ea
ET
8531 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
8532 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
8533 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
8534 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8535 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8536 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8537 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
8538 bp->dev->dev_addr[5] = (u8)(val & 0xff);
c18487ee
YR
8539 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
8540 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
34f80b04
EG
8541}
8542
8543static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
8544{
8545 int func = BP_FUNC(bp);
8546 u32 val, val2;
8547 int rc = 0;
a2fbb9ea 8548
34f80b04 8549 bnx2x_get_common_hwinfo(bp);
a2fbb9ea 8550
34f80b04
EG
8551 bp->e1hov = 0;
8552 bp->e1hmf = 0;
8553 if (CHIP_IS_E1H(bp)) {
8554 bp->mf_config =
8555 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
a2fbb9ea 8556
2691d51d 8557 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[FUNC_0].e1hov_tag) &
3196a88a 8558 FUNC_MF_CFG_E1HOV_TAG_MASK);
2691d51d 8559 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT)
34f80b04 8560 bp->e1hmf = 1;
2691d51d
EG
8561 BNX2X_DEV_INFO("%s function mode\n",
8562 IS_E1HMF(bp) ? "multi" : "single");
8563
8564 if (IS_E1HMF(bp)) {
8565 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].
8566 e1hov_tag) &
8567 FUNC_MF_CFG_E1HOV_TAG_MASK);
8568 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
8569 bp->e1hov = val;
8570 BNX2X_DEV_INFO("E1HOV for func %d is %d "
8571 "(0x%04x)\n",
8572 func, bp->e1hov, bp->e1hov);
8573 } else {
34f80b04
EG
8574 BNX2X_ERR("!!! No valid E1HOV for func %d,"
8575 " aborting\n", func);
8576 rc = -EPERM;
8577 }
2691d51d
EG
8578 } else {
8579 if (BP_E1HVN(bp)) {
8580 BNX2X_ERR("!!! VN %d in single function mode,"
8581 " aborting\n", BP_E1HVN(bp));
8582 rc = -EPERM;
8583 }
34f80b04
EG
8584 }
8585 }
a2fbb9ea 8586
34f80b04
EG
8587 if (!BP_NOMCP(bp)) {
8588 bnx2x_get_port_hwinfo(bp);
8589
8590 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
8591 DRV_MSG_SEQ_NUMBER_MASK);
8592 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
8593 }
8594
8595 if (IS_E1HMF(bp)) {
8596 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
8597 val = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_lower);
8598 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
8599 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
8600 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
8601 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8602 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8603 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8604 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
8605 bp->dev->dev_addr[5] = (u8)(val & 0xff);
8606 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
8607 ETH_ALEN);
8608 memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
8609 ETH_ALEN);
a2fbb9ea 8610 }
34f80b04
EG
8611
8612 return rc;
a2fbb9ea
ET
8613 }
8614
34f80b04
EG
8615 if (BP_NOMCP(bp)) {
8616 /* only supposed to happen on emulation/FPGA */
33471629 8617 BNX2X_ERR("warning random MAC workaround active\n");
34f80b04
EG
8618 random_ether_addr(bp->dev->dev_addr);
8619 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8620 }
a2fbb9ea 8621
34f80b04
EG
8622 return rc;
8623}
8624
8625static int __devinit bnx2x_init_bp(struct bnx2x *bp)
8626{
8627 int func = BP_FUNC(bp);
87942b46 8628 int timer_interval;
34f80b04
EG
8629 int rc;
8630
da5a662a
VZ
8631 /* Disable interrupt handling until HW is initialized */
8632 atomic_set(&bp->intr_sem, 1);
e1510706 8633 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
da5a662a 8634
34f80b04 8635 mutex_init(&bp->port.phy_mutex);
a2fbb9ea 8636
1cf167f2 8637 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
34f80b04
EG
8638 INIT_WORK(&bp->reset_task, bnx2x_reset_task);
8639
8640 rc = bnx2x_get_hwinfo(bp);
8641
8642 /* need to reset chip if undi was active */
8643 if (!BP_NOMCP(bp))
8644 bnx2x_undi_unload(bp);
8645
8646 if (CHIP_REV_IS_FPGA(bp))
8647 printk(KERN_ERR PFX "FPGA detected\n");
8648
8649 if (BP_NOMCP(bp) && (func == 0))
8650 printk(KERN_ERR PFX
8651 "MCP disabled, must load devices in order!\n");
8652
555f6c78 8653 /* Set multi queue mode */
8badd27a
EG
8654 if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
8655 ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
555f6c78 8656 printk(KERN_ERR PFX
8badd27a 8657 "Multi disabled since int_mode requested is not MSI-X\n");
555f6c78
EG
8658 multi_mode = ETH_RSS_MODE_DISABLED;
8659 }
8660 bp->multi_mode = multi_mode;
8661
8662
7a9b2557
VZ
8663 /* Set TPA flags */
8664 if (disable_tpa) {
8665 bp->flags &= ~TPA_ENABLE_FLAG;
8666 bp->dev->features &= ~NETIF_F_LRO;
8667 } else {
8668 bp->flags |= TPA_ENABLE_FLAG;
8669 bp->dev->features |= NETIF_F_LRO;
8670 }
8671
8d5726c4 8672 bp->mrrs = mrrs;
7a9b2557 8673
34f80b04
EG
8674 bp->tx_ring_size = MAX_TX_AVAIL;
8675 bp->rx_ring_size = MAX_RX_AVAIL;
8676
8677 bp->rx_csum = 1;
34f80b04
EG
8678
8679 bp->tx_ticks = 50;
8680 bp->rx_ticks = 25;
8681
87942b46
EG
8682 timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
8683 bp->current_interval = (poll ? poll : timer_interval);
34f80b04
EG
8684
8685 init_timer(&bp->timer);
8686 bp->timer.expires = jiffies + bp->current_interval;
8687 bp->timer.data = (unsigned long) bp;
8688 bp->timer.function = bnx2x_timer;
8689
8690 return rc;
a2fbb9ea
ET
8691}
8692
8693/*
8694 * ethtool service functions
8695 */
8696
8697/* All ethtool functions called with rtnl_lock */
8698
8699static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8700{
8701 struct bnx2x *bp = netdev_priv(dev);
8702
34f80b04
EG
8703 cmd->supported = bp->port.supported;
8704 cmd->advertising = bp->port.advertising;
a2fbb9ea
ET
8705
8706 if (netif_carrier_ok(dev)) {
c18487ee
YR
8707 cmd->speed = bp->link_vars.line_speed;
8708 cmd->duplex = bp->link_vars.duplex;
a2fbb9ea 8709 } else {
c18487ee
YR
8710 cmd->speed = bp->link_params.req_line_speed;
8711 cmd->duplex = bp->link_params.req_duplex;
a2fbb9ea 8712 }
34f80b04
EG
8713 if (IS_E1HMF(bp)) {
8714 u16 vn_max_rate;
8715
8716 vn_max_rate = ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
8717 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
8718 if (vn_max_rate < cmd->speed)
8719 cmd->speed = vn_max_rate;
8720 }
a2fbb9ea 8721
c18487ee
YR
8722 if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
8723 u32 ext_phy_type =
8724 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
f1410647
ET
8725
8726 switch (ext_phy_type) {
8727 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
f1410647 8728 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
c18487ee 8729 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
589abe3a
EG
8730 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
8731 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
8732 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
4d295db0 8733 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
f1410647
ET
8734 cmd->port = PORT_FIBRE;
8735 break;
8736
8737 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
28577185 8738 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
f1410647
ET
8739 cmd->port = PORT_TP;
8740 break;
8741
c18487ee
YR
8742 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
8743 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
8744 bp->link_params.ext_phy_config);
8745 break;
8746
f1410647
ET
8747 default:
8748 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
c18487ee
YR
8749 bp->link_params.ext_phy_config);
8750 break;
f1410647
ET
8751 }
8752 } else
a2fbb9ea 8753 cmd->port = PORT_TP;
a2fbb9ea 8754
01cd4528 8755 cmd->phy_address = bp->mdio.prtad;
a2fbb9ea
ET
8756 cmd->transceiver = XCVR_INTERNAL;
8757
c18487ee 8758 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
a2fbb9ea 8759 cmd->autoneg = AUTONEG_ENABLE;
f1410647 8760 else
a2fbb9ea 8761 cmd->autoneg = AUTONEG_DISABLE;
a2fbb9ea
ET
8762
8763 cmd->maxtxpkt = 0;
8764 cmd->maxrxpkt = 0;
8765
8766 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
8767 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
8768 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
8769 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
8770 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
8771 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
8772 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
8773
8774 return 0;
8775}
8776
8777static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8778{
8779 struct bnx2x *bp = netdev_priv(dev);
8780 u32 advertising;
8781
34f80b04
EG
8782 if (IS_E1HMF(bp))
8783 return 0;
8784
a2fbb9ea
ET
8785 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
8786 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
8787 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
8788 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
8789 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
8790 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
8791 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
8792
a2fbb9ea 8793 if (cmd->autoneg == AUTONEG_ENABLE) {
34f80b04
EG
8794 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
8795 DP(NETIF_MSG_LINK, "Autoneg not supported\n");
a2fbb9ea 8796 return -EINVAL;
f1410647 8797 }
a2fbb9ea
ET
8798
8799 /* advertise the requested speed and duplex if supported */
34f80b04 8800 cmd->advertising &= bp->port.supported;
a2fbb9ea 8801
c18487ee
YR
8802 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
8803 bp->link_params.req_duplex = DUPLEX_FULL;
34f80b04
EG
8804 bp->port.advertising |= (ADVERTISED_Autoneg |
8805 cmd->advertising);
a2fbb9ea
ET
8806
8807 } else { /* forced speed */
8808 /* advertise the requested speed and duplex if supported */
8809 switch (cmd->speed) {
8810 case SPEED_10:
8811 if (cmd->duplex == DUPLEX_FULL) {
34f80b04 8812 if (!(bp->port.supported &
f1410647
ET
8813 SUPPORTED_10baseT_Full)) {
8814 DP(NETIF_MSG_LINK,
8815 "10M full not supported\n");
a2fbb9ea 8816 return -EINVAL;
f1410647 8817 }
a2fbb9ea
ET
8818
8819 advertising = (ADVERTISED_10baseT_Full |
8820 ADVERTISED_TP);
8821 } else {
34f80b04 8822 if (!(bp->port.supported &
f1410647
ET
8823 SUPPORTED_10baseT_Half)) {
8824 DP(NETIF_MSG_LINK,
8825 "10M half not supported\n");
a2fbb9ea 8826 return -EINVAL;
f1410647 8827 }
a2fbb9ea
ET
8828
8829 advertising = (ADVERTISED_10baseT_Half |
8830 ADVERTISED_TP);
8831 }
8832 break;
8833
8834 case SPEED_100:
8835 if (cmd->duplex == DUPLEX_FULL) {
34f80b04 8836 if (!(bp->port.supported &
f1410647
ET
8837 SUPPORTED_100baseT_Full)) {
8838 DP(NETIF_MSG_LINK,
8839 "100M full not supported\n");
a2fbb9ea 8840 return -EINVAL;
f1410647 8841 }
a2fbb9ea
ET
8842
8843 advertising = (ADVERTISED_100baseT_Full |
8844 ADVERTISED_TP);
8845 } else {
34f80b04 8846 if (!(bp->port.supported &
f1410647
ET
8847 SUPPORTED_100baseT_Half)) {
8848 DP(NETIF_MSG_LINK,
8849 "100M half not supported\n");
a2fbb9ea 8850 return -EINVAL;
f1410647 8851 }
a2fbb9ea
ET
8852
8853 advertising = (ADVERTISED_100baseT_Half |
8854 ADVERTISED_TP);
8855 }
8856 break;
8857
8858 case SPEED_1000:
f1410647
ET
8859 if (cmd->duplex != DUPLEX_FULL) {
8860 DP(NETIF_MSG_LINK, "1G half not supported\n");
a2fbb9ea 8861 return -EINVAL;
f1410647 8862 }
a2fbb9ea 8863
34f80b04 8864 if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
f1410647 8865 DP(NETIF_MSG_LINK, "1G full not supported\n");
a2fbb9ea 8866 return -EINVAL;
f1410647 8867 }
a2fbb9ea
ET
8868
8869 advertising = (ADVERTISED_1000baseT_Full |
8870 ADVERTISED_TP);
8871 break;
8872
8873 case SPEED_2500:
f1410647
ET
8874 if (cmd->duplex != DUPLEX_FULL) {
8875 DP(NETIF_MSG_LINK,
8876 "2.5G half not supported\n");
a2fbb9ea 8877 return -EINVAL;
f1410647 8878 }
a2fbb9ea 8879
34f80b04 8880 if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
f1410647
ET
8881 DP(NETIF_MSG_LINK,
8882 "2.5G full not supported\n");
a2fbb9ea 8883 return -EINVAL;
f1410647 8884 }
a2fbb9ea 8885
f1410647 8886 advertising = (ADVERTISED_2500baseX_Full |
a2fbb9ea
ET
8887 ADVERTISED_TP);
8888 break;
8889
8890 case SPEED_10000:
f1410647
ET
8891 if (cmd->duplex != DUPLEX_FULL) {
8892 DP(NETIF_MSG_LINK, "10G half not supported\n");
a2fbb9ea 8893 return -EINVAL;
f1410647 8894 }
a2fbb9ea 8895
34f80b04 8896 if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
f1410647 8897 DP(NETIF_MSG_LINK, "10G full not supported\n");
a2fbb9ea 8898 return -EINVAL;
f1410647 8899 }
a2fbb9ea
ET
8900
8901 advertising = (ADVERTISED_10000baseT_Full |
8902 ADVERTISED_FIBRE);
8903 break;
8904
8905 default:
f1410647 8906 DP(NETIF_MSG_LINK, "Unsupported speed\n");
a2fbb9ea
ET
8907 return -EINVAL;
8908 }
8909
c18487ee
YR
8910 bp->link_params.req_line_speed = cmd->speed;
8911 bp->link_params.req_duplex = cmd->duplex;
34f80b04 8912 bp->port.advertising = advertising;
a2fbb9ea
ET
8913 }
8914
c18487ee 8915 DP(NETIF_MSG_LINK, "req_line_speed %d\n"
a2fbb9ea 8916 DP_LEVEL " req_duplex %d advertising 0x%x\n",
c18487ee 8917 bp->link_params.req_line_speed, bp->link_params.req_duplex,
34f80b04 8918 bp->port.advertising);
a2fbb9ea 8919
34f80b04 8920 if (netif_running(dev)) {
bb2a0f7a 8921 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
8922 bnx2x_link_set(bp);
8923 }
a2fbb9ea
ET
8924
8925 return 0;
8926}
8927
c18487ee
YR
8928#define PHY_FW_VER_LEN 10
8929
a2fbb9ea
ET
8930static void bnx2x_get_drvinfo(struct net_device *dev,
8931 struct ethtool_drvinfo *info)
8932{
8933 struct bnx2x *bp = netdev_priv(dev);
f0e53a84 8934 u8 phy_fw_ver[PHY_FW_VER_LEN];
a2fbb9ea
ET
8935
8936 strcpy(info->driver, DRV_MODULE_NAME);
8937 strcpy(info->version, DRV_MODULE_VERSION);
c18487ee
YR
8938
8939 phy_fw_ver[0] = '\0';
34f80b04 8940 if (bp->port.pmf) {
4a37fb66 8941 bnx2x_acquire_phy_lock(bp);
34f80b04
EG
8942 bnx2x_get_ext_phy_fw_version(&bp->link_params,
8943 (bp->state != BNX2X_STATE_CLOSED),
8944 phy_fw_ver, PHY_FW_VER_LEN);
4a37fb66 8945 bnx2x_release_phy_lock(bp);
34f80b04 8946 }
c18487ee 8947
f0e53a84
EG
8948 snprintf(info->fw_version, 32, "BC:%d.%d.%d%s%s",
8949 (bp->common.bc_ver & 0xff0000) >> 16,
8950 (bp->common.bc_ver & 0xff00) >> 8,
8951 (bp->common.bc_ver & 0xff),
8952 ((phy_fw_ver[0] != '\0') ? " PHY:" : ""), phy_fw_ver);
a2fbb9ea
ET
8953 strcpy(info->bus_info, pci_name(bp->pdev));
8954 info->n_stats = BNX2X_NUM_STATS;
8955 info->testinfo_len = BNX2X_NUM_TESTS;
34f80b04 8956 info->eedump_len = bp->common.flash_size;
a2fbb9ea
ET
8957 info->regdump_len = 0;
8958}
8959
0a64ea57
EG
8960#define IS_E1_ONLINE(info) (((info) & RI_E1_ONLINE) == RI_E1_ONLINE)
8961#define IS_E1H_ONLINE(info) (((info) & RI_E1H_ONLINE) == RI_E1H_ONLINE)
8962
8963static int bnx2x_get_regs_len(struct net_device *dev)
8964{
8965 static u32 regdump_len;
8966 struct bnx2x *bp = netdev_priv(dev);
8967 int i;
8968
8969 if (regdump_len)
8970 return regdump_len;
8971
8972 if (CHIP_IS_E1(bp)) {
8973 for (i = 0; i < REGS_COUNT; i++)
8974 if (IS_E1_ONLINE(reg_addrs[i].info))
8975 regdump_len += reg_addrs[i].size;
8976
8977 for (i = 0; i < WREGS_COUNT_E1; i++)
8978 if (IS_E1_ONLINE(wreg_addrs_e1[i].info))
8979 regdump_len += wreg_addrs_e1[i].size *
8980 (1 + wreg_addrs_e1[i].read_regs_count);
8981
8982 } else { /* E1H */
8983 for (i = 0; i < REGS_COUNT; i++)
8984 if (IS_E1H_ONLINE(reg_addrs[i].info))
8985 regdump_len += reg_addrs[i].size;
8986
8987 for (i = 0; i < WREGS_COUNT_E1H; i++)
8988 if (IS_E1H_ONLINE(wreg_addrs_e1h[i].info))
8989 regdump_len += wreg_addrs_e1h[i].size *
8990 (1 + wreg_addrs_e1h[i].read_regs_count);
8991 }
8992 regdump_len *= 4;
8993 regdump_len += sizeof(struct dump_hdr);
8994
8995 return regdump_len;
8996}
8997
8998static void bnx2x_get_regs(struct net_device *dev,
8999 struct ethtool_regs *regs, void *_p)
9000{
9001 u32 *p = _p, i, j;
9002 struct bnx2x *bp = netdev_priv(dev);
9003 struct dump_hdr dump_hdr = {0};
9004
9005 regs->version = 0;
9006 memset(p, 0, regs->len);
9007
9008 if (!netif_running(bp->dev))
9009 return;
9010
9011 dump_hdr.hdr_size = (sizeof(struct dump_hdr) / 4) - 1;
9012 dump_hdr.dump_sign = dump_sign_all;
9013 dump_hdr.xstorm_waitp = REG_RD(bp, XSTORM_WAITP_ADDR);
9014 dump_hdr.tstorm_waitp = REG_RD(bp, TSTORM_WAITP_ADDR);
9015 dump_hdr.ustorm_waitp = REG_RD(bp, USTORM_WAITP_ADDR);
9016 dump_hdr.cstorm_waitp = REG_RD(bp, CSTORM_WAITP_ADDR);
9017 dump_hdr.info = CHIP_IS_E1(bp) ? RI_E1_ONLINE : RI_E1H_ONLINE;
9018
9019 memcpy(p, &dump_hdr, sizeof(struct dump_hdr));
9020 p += dump_hdr.hdr_size + 1;
9021
9022 if (CHIP_IS_E1(bp)) {
9023 for (i = 0; i < REGS_COUNT; i++)
9024 if (IS_E1_ONLINE(reg_addrs[i].info))
9025 for (j = 0; j < reg_addrs[i].size; j++)
9026 *p++ = REG_RD(bp,
9027 reg_addrs[i].addr + j*4);
9028
9029 } else { /* E1H */
9030 for (i = 0; i < REGS_COUNT; i++)
9031 if (IS_E1H_ONLINE(reg_addrs[i].info))
9032 for (j = 0; j < reg_addrs[i].size; j++)
9033 *p++ = REG_RD(bp,
9034 reg_addrs[i].addr + j*4);
9035 }
9036}
9037
a2fbb9ea
ET
9038static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9039{
9040 struct bnx2x *bp = netdev_priv(dev);
9041
9042 if (bp->flags & NO_WOL_FLAG) {
9043 wol->supported = 0;
9044 wol->wolopts = 0;
9045 } else {
9046 wol->supported = WAKE_MAGIC;
9047 if (bp->wol)
9048 wol->wolopts = WAKE_MAGIC;
9049 else
9050 wol->wolopts = 0;
9051 }
9052 memset(&wol->sopass, 0, sizeof(wol->sopass));
9053}
9054
9055static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9056{
9057 struct bnx2x *bp = netdev_priv(dev);
9058
9059 if (wol->wolopts & ~WAKE_MAGIC)
9060 return -EINVAL;
9061
9062 if (wol->wolopts & WAKE_MAGIC) {
9063 if (bp->flags & NO_WOL_FLAG)
9064 return -EINVAL;
9065
9066 bp->wol = 1;
34f80b04 9067 } else
a2fbb9ea 9068 bp->wol = 0;
34f80b04 9069
a2fbb9ea
ET
9070 return 0;
9071}
9072
9073static u32 bnx2x_get_msglevel(struct net_device *dev)
9074{
9075 struct bnx2x *bp = netdev_priv(dev);
9076
9077 return bp->msglevel;
9078}
9079
9080static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
9081{
9082 struct bnx2x *bp = netdev_priv(dev);
9083
9084 if (capable(CAP_NET_ADMIN))
9085 bp->msglevel = level;
9086}
9087
9088static int bnx2x_nway_reset(struct net_device *dev)
9089{
9090 struct bnx2x *bp = netdev_priv(dev);
9091
34f80b04
EG
9092 if (!bp->port.pmf)
9093 return 0;
a2fbb9ea 9094
34f80b04 9095 if (netif_running(dev)) {
bb2a0f7a 9096 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
9097 bnx2x_link_set(bp);
9098 }
a2fbb9ea
ET
9099
9100 return 0;
9101}
9102
01e53298
NO
9103static u32
9104bnx2x_get_link(struct net_device *dev)
9105{
9106 struct bnx2x *bp = netdev_priv(dev);
9107
9108 return bp->link_vars.link_up;
9109}
9110
a2fbb9ea
ET
9111static int bnx2x_get_eeprom_len(struct net_device *dev)
9112{
9113 struct bnx2x *bp = netdev_priv(dev);
9114
34f80b04 9115 return bp->common.flash_size;
a2fbb9ea
ET
9116}
9117
9118static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
9119{
34f80b04 9120 int port = BP_PORT(bp);
a2fbb9ea
ET
9121 int count, i;
9122 u32 val = 0;
9123
9124 /* adjust timeout for emulation/FPGA */
9125 count = NVRAM_TIMEOUT_COUNT;
9126 if (CHIP_REV_IS_SLOW(bp))
9127 count *= 100;
9128
9129 /* request access to nvram interface */
9130 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
9131 (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
9132
9133 for (i = 0; i < count*10; i++) {
9134 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
9135 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
9136 break;
9137
9138 udelay(5);
9139 }
9140
9141 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
34f80b04 9142 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
a2fbb9ea
ET
9143 return -EBUSY;
9144 }
9145
9146 return 0;
9147}
9148
9149static int bnx2x_release_nvram_lock(struct bnx2x *bp)
9150{
34f80b04 9151 int port = BP_PORT(bp);
a2fbb9ea
ET
9152 int count, i;
9153 u32 val = 0;
9154
9155 /* adjust timeout for emulation/FPGA */
9156 count = NVRAM_TIMEOUT_COUNT;
9157 if (CHIP_REV_IS_SLOW(bp))
9158 count *= 100;
9159
9160 /* relinquish nvram interface */
9161 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
9162 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
9163
9164 for (i = 0; i < count*10; i++) {
9165 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
9166 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
9167 break;
9168
9169 udelay(5);
9170 }
9171
9172 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
34f80b04 9173 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
a2fbb9ea
ET
9174 return -EBUSY;
9175 }
9176
9177 return 0;
9178}
9179
9180static void bnx2x_enable_nvram_access(struct bnx2x *bp)
9181{
9182 u32 val;
9183
9184 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
9185
9186 /* enable both bits, even on read */
9187 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
9188 (val | MCPR_NVM_ACCESS_ENABLE_EN |
9189 MCPR_NVM_ACCESS_ENABLE_WR_EN));
9190}
9191
9192static void bnx2x_disable_nvram_access(struct bnx2x *bp)
9193{
9194 u32 val;
9195
9196 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
9197
9198 /* disable both bits, even after read */
9199 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
9200 (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
9201 MCPR_NVM_ACCESS_ENABLE_WR_EN)));
9202}
9203
4781bfad 9204static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, __be32 *ret_val,
a2fbb9ea
ET
9205 u32 cmd_flags)
9206{
f1410647 9207 int count, i, rc;
a2fbb9ea
ET
9208 u32 val;
9209
9210 /* build the command word */
9211 cmd_flags |= MCPR_NVM_COMMAND_DOIT;
9212
9213 /* need to clear DONE bit separately */
9214 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
9215
9216 /* address of the NVRAM to read from */
9217 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
9218 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
9219
9220 /* issue a read command */
9221 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
9222
9223 /* adjust timeout for emulation/FPGA */
9224 count = NVRAM_TIMEOUT_COUNT;
9225 if (CHIP_REV_IS_SLOW(bp))
9226 count *= 100;
9227
9228 /* wait for completion */
9229 *ret_val = 0;
9230 rc = -EBUSY;
9231 for (i = 0; i < count; i++) {
9232 udelay(5);
9233 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
9234
9235 if (val & MCPR_NVM_COMMAND_DONE) {
9236 val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
a2fbb9ea
ET
9237 /* we read nvram data in cpu order
9238 * but ethtool sees it as an array of bytes
9239 * converting to big-endian will do the work */
4781bfad 9240 *ret_val = cpu_to_be32(val);
a2fbb9ea
ET
9241 rc = 0;
9242 break;
9243 }
9244 }
9245
9246 return rc;
9247}
9248
9249static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
9250 int buf_size)
9251{
9252 int rc;
9253 u32 cmd_flags;
4781bfad 9254 __be32 val;
a2fbb9ea
ET
9255
9256 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
34f80b04 9257 DP(BNX2X_MSG_NVM,
c14423fe 9258 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
a2fbb9ea
ET
9259 offset, buf_size);
9260 return -EINVAL;
9261 }
9262
34f80b04
EG
9263 if (offset + buf_size > bp->common.flash_size) {
9264 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 9265 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 9266 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
9267 return -EINVAL;
9268 }
9269
9270 /* request access to nvram interface */
9271 rc = bnx2x_acquire_nvram_lock(bp);
9272 if (rc)
9273 return rc;
9274
9275 /* enable access to nvram interface */
9276 bnx2x_enable_nvram_access(bp);
9277
9278 /* read the first word(s) */
9279 cmd_flags = MCPR_NVM_COMMAND_FIRST;
9280 while ((buf_size > sizeof(u32)) && (rc == 0)) {
9281 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
9282 memcpy(ret_buf, &val, 4);
9283
9284 /* advance to the next dword */
9285 offset += sizeof(u32);
9286 ret_buf += sizeof(u32);
9287 buf_size -= sizeof(u32);
9288 cmd_flags = 0;
9289 }
9290
9291 if (rc == 0) {
9292 cmd_flags |= MCPR_NVM_COMMAND_LAST;
9293 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
9294 memcpy(ret_buf, &val, 4);
9295 }
9296
9297 /* disable access to nvram interface */
9298 bnx2x_disable_nvram_access(bp);
9299 bnx2x_release_nvram_lock(bp);
9300
9301 return rc;
9302}
9303
9304static int bnx2x_get_eeprom(struct net_device *dev,
9305 struct ethtool_eeprom *eeprom, u8 *eebuf)
9306{
9307 struct bnx2x *bp = netdev_priv(dev);
9308 int rc;
9309
2add3acb
EG
9310 if (!netif_running(dev))
9311 return -EAGAIN;
9312
34f80b04 9313 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
a2fbb9ea
ET
9314 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
9315 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
9316 eeprom->len, eeprom->len);
9317
9318 /* parameters already validated in ethtool_get_eeprom */
9319
9320 rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
9321
9322 return rc;
9323}
9324
9325static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
9326 u32 cmd_flags)
9327{
f1410647 9328 int count, i, rc;
a2fbb9ea
ET
9329
9330 /* build the command word */
9331 cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
9332
9333 /* need to clear DONE bit separately */
9334 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
9335
9336 /* write the data */
9337 REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
9338
9339 /* address of the NVRAM to write to */
9340 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
9341 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
9342
9343 /* issue the write command */
9344 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
9345
9346 /* adjust timeout for emulation/FPGA */
9347 count = NVRAM_TIMEOUT_COUNT;
9348 if (CHIP_REV_IS_SLOW(bp))
9349 count *= 100;
9350
9351 /* wait for completion */
9352 rc = -EBUSY;
9353 for (i = 0; i < count; i++) {
9354 udelay(5);
9355 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
9356 if (val & MCPR_NVM_COMMAND_DONE) {
9357 rc = 0;
9358 break;
9359 }
9360 }
9361
9362 return rc;
9363}
9364
f1410647 9365#define BYTE_OFFSET(offset) (8 * (offset & 0x03))
a2fbb9ea
ET
9366
9367static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
9368 int buf_size)
9369{
9370 int rc;
9371 u32 cmd_flags;
9372 u32 align_offset;
4781bfad 9373 __be32 val;
a2fbb9ea 9374
34f80b04
EG
9375 if (offset + buf_size > bp->common.flash_size) {
9376 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 9377 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 9378 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
9379 return -EINVAL;
9380 }
9381
9382 /* request access to nvram interface */
9383 rc = bnx2x_acquire_nvram_lock(bp);
9384 if (rc)
9385 return rc;
9386
9387 /* enable access to nvram interface */
9388 bnx2x_enable_nvram_access(bp);
9389
9390 cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
9391 align_offset = (offset & ~0x03);
9392 rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
9393
9394 if (rc == 0) {
9395 val &= ~(0xff << BYTE_OFFSET(offset));
9396 val |= (*data_buf << BYTE_OFFSET(offset));
9397
9398 /* nvram data is returned as an array of bytes
9399 * convert it back to cpu order */
9400 val = be32_to_cpu(val);
9401
a2fbb9ea
ET
9402 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
9403 cmd_flags);
9404 }
9405
9406 /* disable access to nvram interface */
9407 bnx2x_disable_nvram_access(bp);
9408 bnx2x_release_nvram_lock(bp);
9409
9410 return rc;
9411}
9412
9413static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
9414 int buf_size)
9415{
9416 int rc;
9417 u32 cmd_flags;
9418 u32 val;
9419 u32 written_so_far;
9420
34f80b04 9421 if (buf_size == 1) /* ethtool */
a2fbb9ea 9422 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
a2fbb9ea
ET
9423
9424 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
34f80b04 9425 DP(BNX2X_MSG_NVM,
c14423fe 9426 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
a2fbb9ea
ET
9427 offset, buf_size);
9428 return -EINVAL;
9429 }
9430
34f80b04
EG
9431 if (offset + buf_size > bp->common.flash_size) {
9432 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 9433 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 9434 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
9435 return -EINVAL;
9436 }
9437
9438 /* request access to nvram interface */
9439 rc = bnx2x_acquire_nvram_lock(bp);
9440 if (rc)
9441 return rc;
9442
9443 /* enable access to nvram interface */
9444 bnx2x_enable_nvram_access(bp);
9445
9446 written_so_far = 0;
9447 cmd_flags = MCPR_NVM_COMMAND_FIRST;
9448 while ((written_so_far < buf_size) && (rc == 0)) {
9449 if (written_so_far == (buf_size - sizeof(u32)))
9450 cmd_flags |= MCPR_NVM_COMMAND_LAST;
9451 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
9452 cmd_flags |= MCPR_NVM_COMMAND_LAST;
9453 else if ((offset % NVRAM_PAGE_SIZE) == 0)
9454 cmd_flags |= MCPR_NVM_COMMAND_FIRST;
9455
9456 memcpy(&val, data_buf, 4);
a2fbb9ea
ET
9457
9458 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
9459
9460 /* advance to the next dword */
9461 offset += sizeof(u32);
9462 data_buf += sizeof(u32);
9463 written_so_far += sizeof(u32);
9464 cmd_flags = 0;
9465 }
9466
9467 /* disable access to nvram interface */
9468 bnx2x_disable_nvram_access(bp);
9469 bnx2x_release_nvram_lock(bp);
9470
9471 return rc;
9472}
9473
9474static int bnx2x_set_eeprom(struct net_device *dev,
9475 struct ethtool_eeprom *eeprom, u8 *eebuf)
9476{
9477 struct bnx2x *bp = netdev_priv(dev);
f57a6025
EG
9478 int port = BP_PORT(bp);
9479 int rc = 0;
a2fbb9ea 9480
9f4c9583
EG
9481 if (!netif_running(dev))
9482 return -EAGAIN;
9483
34f80b04 9484 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
a2fbb9ea
ET
9485 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
9486 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
9487 eeprom->len, eeprom->len);
9488
9489 /* parameters already validated in ethtool_set_eeprom */
9490
f57a6025
EG
9491 /* PHY eeprom can be accessed only by the PMF */
9492 if ((eeprom->magic >= 0x50485900) && (eeprom->magic <= 0x504859FF) &&
9493 !bp->port.pmf)
9494 return -EINVAL;
9495
9496 if (eeprom->magic == 0x50485950) {
9497 /* 'PHYP' (0x50485950): prepare phy for FW upgrade */
9498 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04 9499
f57a6025
EG
9500 bnx2x_acquire_phy_lock(bp);
9501 rc |= bnx2x_link_reset(&bp->link_params,
9502 &bp->link_vars, 0);
9503 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
9504 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101)
9505 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
9506 MISC_REGISTERS_GPIO_HIGH, port);
9507 bnx2x_release_phy_lock(bp);
9508 bnx2x_link_report(bp);
9509
9510 } else if (eeprom->magic == 0x50485952) {
9511 /* 'PHYR' (0x50485952): re-init link after FW upgrade */
9512 if ((bp->state == BNX2X_STATE_OPEN) ||
9513 (bp->state == BNX2X_STATE_DISABLED)) {
4a37fb66 9514 bnx2x_acquire_phy_lock(bp);
f57a6025
EG
9515 rc |= bnx2x_link_reset(&bp->link_params,
9516 &bp->link_vars, 1);
9517
9518 rc |= bnx2x_phy_init(&bp->link_params,
9519 &bp->link_vars);
4a37fb66 9520 bnx2x_release_phy_lock(bp);
f57a6025
EG
9521 bnx2x_calc_fc_adv(bp);
9522 }
9523 } else if (eeprom->magic == 0x53985943) {
9524 /* 'PHYC' (0x53985943): PHY FW upgrade completed */
9525 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
9526 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) {
9527 u8 ext_phy_addr =
9528 (bp->link_params.ext_phy_config &
9529 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >>
9530 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT;
9531
9532 /* DSP Remove Download Mode */
9533 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
9534 MISC_REGISTERS_GPIO_LOW, port);
34f80b04 9535
f57a6025
EG
9536 bnx2x_acquire_phy_lock(bp);
9537
9538 bnx2x_sfx7101_sp_sw_reset(bp, port, ext_phy_addr);
9539
9540 /* wait 0.5 sec to allow it to run */
9541 msleep(500);
9542 bnx2x_ext_phy_hw_reset(bp, port);
9543 msleep(500);
9544 bnx2x_release_phy_lock(bp);
9545 }
9546 } else
c18487ee 9547 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
a2fbb9ea
ET
9548
9549 return rc;
9550}
9551
9552static int bnx2x_get_coalesce(struct net_device *dev,
9553 struct ethtool_coalesce *coal)
9554{
9555 struct bnx2x *bp = netdev_priv(dev);
9556
9557 memset(coal, 0, sizeof(struct ethtool_coalesce));
9558
9559 coal->rx_coalesce_usecs = bp->rx_ticks;
9560 coal->tx_coalesce_usecs = bp->tx_ticks;
a2fbb9ea
ET
9561
9562 return 0;
9563}
9564
ca00392c 9565#define BNX2X_MAX_COALES_TOUT (0xf0*12) /* Maximal coalescing timeout in us */
a2fbb9ea
ET
9566static int bnx2x_set_coalesce(struct net_device *dev,
9567 struct ethtool_coalesce *coal)
9568{
9569 struct bnx2x *bp = netdev_priv(dev);
9570
9571 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
ca00392c
EG
9572 if (bp->rx_ticks > BNX2X_MAX_COALES_TOUT)
9573 bp->rx_ticks = BNX2X_MAX_COALES_TOUT;
a2fbb9ea
ET
9574
9575 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
ca00392c
EG
9576 if (bp->tx_ticks > BNX2X_MAX_COALES_TOUT)
9577 bp->tx_ticks = BNX2X_MAX_COALES_TOUT;
a2fbb9ea 9578
34f80b04 9579 if (netif_running(dev))
a2fbb9ea
ET
9580 bnx2x_update_coalesce(bp);
9581
9582 return 0;
9583}
9584
9585static void bnx2x_get_ringparam(struct net_device *dev,
9586 struct ethtool_ringparam *ering)
9587{
9588 struct bnx2x *bp = netdev_priv(dev);
9589
9590 ering->rx_max_pending = MAX_RX_AVAIL;
9591 ering->rx_mini_max_pending = 0;
9592 ering->rx_jumbo_max_pending = 0;
9593
9594 ering->rx_pending = bp->rx_ring_size;
9595 ering->rx_mini_pending = 0;
9596 ering->rx_jumbo_pending = 0;
9597
9598 ering->tx_max_pending = MAX_TX_AVAIL;
9599 ering->tx_pending = bp->tx_ring_size;
9600}
9601
9602static int bnx2x_set_ringparam(struct net_device *dev,
9603 struct ethtool_ringparam *ering)
9604{
9605 struct bnx2x *bp = netdev_priv(dev);
34f80b04 9606 int rc = 0;
a2fbb9ea
ET
9607
9608 if ((ering->rx_pending > MAX_RX_AVAIL) ||
9609 (ering->tx_pending > MAX_TX_AVAIL) ||
9610 (ering->tx_pending <= MAX_SKB_FRAGS + 4))
9611 return -EINVAL;
9612
9613 bp->rx_ring_size = ering->rx_pending;
9614 bp->tx_ring_size = ering->tx_pending;
9615
34f80b04
EG
9616 if (netif_running(dev)) {
9617 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9618 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
a2fbb9ea
ET
9619 }
9620
34f80b04 9621 return rc;
a2fbb9ea
ET
9622}
9623
9624static void bnx2x_get_pauseparam(struct net_device *dev,
9625 struct ethtool_pauseparam *epause)
9626{
9627 struct bnx2x *bp = netdev_priv(dev);
9628
356e2385
EG
9629 epause->autoneg = (bp->link_params.req_flow_ctrl ==
9630 BNX2X_FLOW_CTRL_AUTO) &&
c18487ee
YR
9631 (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
9632
c0700f90
DM
9633 epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) ==
9634 BNX2X_FLOW_CTRL_RX);
9635 epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) ==
9636 BNX2X_FLOW_CTRL_TX);
a2fbb9ea
ET
9637
9638 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9639 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
9640 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9641}
9642
9643static int bnx2x_set_pauseparam(struct net_device *dev,
9644 struct ethtool_pauseparam *epause)
9645{
9646 struct bnx2x *bp = netdev_priv(dev);
9647
34f80b04
EG
9648 if (IS_E1HMF(bp))
9649 return 0;
9650
a2fbb9ea
ET
9651 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9652 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
9653 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9654
c0700f90 9655 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
a2fbb9ea 9656
f1410647 9657 if (epause->rx_pause)
c0700f90 9658 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_RX;
c18487ee 9659
f1410647 9660 if (epause->tx_pause)
c0700f90 9661 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_TX;
c18487ee 9662
c0700f90
DM
9663 if (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO)
9664 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
a2fbb9ea 9665
c18487ee 9666 if (epause->autoneg) {
34f80b04 9667 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
3196a88a 9668 DP(NETIF_MSG_LINK, "autoneg not supported\n");
c18487ee
YR
9669 return -EINVAL;
9670 }
a2fbb9ea 9671
c18487ee 9672 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
c0700f90 9673 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
c18487ee 9674 }
a2fbb9ea 9675
c18487ee
YR
9676 DP(NETIF_MSG_LINK,
9677 "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
34f80b04
EG
9678
9679 if (netif_running(dev)) {
bb2a0f7a 9680 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
9681 bnx2x_link_set(bp);
9682 }
a2fbb9ea
ET
9683
9684 return 0;
9685}
9686
df0f2343
VZ
9687static int bnx2x_set_flags(struct net_device *dev, u32 data)
9688{
9689 struct bnx2x *bp = netdev_priv(dev);
9690 int changed = 0;
9691 int rc = 0;
9692
9693 /* TPA requires Rx CSUM offloading */
9694 if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
9695 if (!(dev->features & NETIF_F_LRO)) {
9696 dev->features |= NETIF_F_LRO;
9697 bp->flags |= TPA_ENABLE_FLAG;
9698 changed = 1;
9699 }
9700
9701 } else if (dev->features & NETIF_F_LRO) {
9702 dev->features &= ~NETIF_F_LRO;
9703 bp->flags &= ~TPA_ENABLE_FLAG;
9704 changed = 1;
9705 }
9706
9707 if (changed && netif_running(dev)) {
9708 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9709 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
9710 }
9711
9712 return rc;
9713}
9714
a2fbb9ea
ET
9715static u32 bnx2x_get_rx_csum(struct net_device *dev)
9716{
9717 struct bnx2x *bp = netdev_priv(dev);
9718
9719 return bp->rx_csum;
9720}
9721
9722static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
9723{
9724 struct bnx2x *bp = netdev_priv(dev);
df0f2343 9725 int rc = 0;
a2fbb9ea
ET
9726
9727 bp->rx_csum = data;
df0f2343
VZ
9728
9729 /* Disable TPA, when Rx CSUM is disabled. Otherwise all
9730 TPA'ed packets will be discarded due to wrong TCP CSUM */
9731 if (!data) {
9732 u32 flags = ethtool_op_get_flags(dev);
9733
9734 rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
9735 }
9736
9737 return rc;
a2fbb9ea
ET
9738}
9739
9740static int bnx2x_set_tso(struct net_device *dev, u32 data)
9741{
755735eb 9742 if (data) {
a2fbb9ea 9743 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb
EG
9744 dev->features |= NETIF_F_TSO6;
9745 } else {
a2fbb9ea 9746 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb
EG
9747 dev->features &= ~NETIF_F_TSO6;
9748 }
9749
a2fbb9ea
ET
9750 return 0;
9751}
9752
f3c87cdd 9753static const struct {
a2fbb9ea
ET
9754 char string[ETH_GSTRING_LEN];
9755} bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
f3c87cdd
YG
9756 { "register_test (offline)" },
9757 { "memory_test (offline)" },
9758 { "loopback_test (offline)" },
9759 { "nvram_test (online)" },
9760 { "interrupt_test (online)" },
9761 { "link_test (online)" },
d3d4f495 9762 { "idle check (online)" }
a2fbb9ea
ET
9763};
9764
9765static int bnx2x_self_test_count(struct net_device *dev)
9766{
9767 return BNX2X_NUM_TESTS;
9768}
9769
f3c87cdd
YG
9770static int bnx2x_test_registers(struct bnx2x *bp)
9771{
9772 int idx, i, rc = -ENODEV;
9773 u32 wr_val = 0;
9dabc424 9774 int port = BP_PORT(bp);
f3c87cdd
YG
9775 static const struct {
9776 u32 offset0;
9777 u32 offset1;
9778 u32 mask;
9779 } reg_tbl[] = {
9780/* 0 */ { BRB1_REG_PAUSE_LOW_THRESHOLD_0, 4, 0x000003ff },
9781 { DORQ_REG_DB_ADDR0, 4, 0xffffffff },
9782 { HC_REG_AGG_INT_0, 4, 0x000003ff },
9783 { PBF_REG_MAC_IF0_ENABLE, 4, 0x00000001 },
9784 { PBF_REG_P0_INIT_CRD, 4, 0x000007ff },
9785 { PRS_REG_CID_PORT_0, 4, 0x00ffffff },
9786 { PXP2_REG_PSWRQ_CDU0_L2P, 4, 0x000fffff },
9787 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
9788 { PXP2_REG_PSWRQ_TM0_L2P, 4, 0x000fffff },
9789 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
9790/* 10 */ { PXP2_REG_PSWRQ_TSDM0_L2P, 4, 0x000fffff },
9791 { QM_REG_CONNNUM_0, 4, 0x000fffff },
9792 { TM_REG_LIN0_MAX_ACTIVE_CID, 4, 0x0003ffff },
9793 { SRC_REG_KEYRSS0_0, 40, 0xffffffff },
9794 { SRC_REG_KEYRSS0_7, 40, 0xffffffff },
9795 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
9796 { XCM_REG_WU_DA_CNT_CMD00, 4, 0x00000003 },
9797 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 4, 0x000000ff },
f3c87cdd 9798 { NIG_REG_LLH0_T_BIT, 4, 0x00000001 },
c1f1a06f
EG
9799 { NIG_REG_EMAC0_IN_EN, 4, 0x00000001 },
9800/* 20 */ { NIG_REG_BMAC0_IN_EN, 4, 0x00000001 },
f3c87cdd
YG
9801 { NIG_REG_XCM0_OUT_EN, 4, 0x00000001 },
9802 { NIG_REG_BRB0_OUT_EN, 4, 0x00000001 },
9803 { NIG_REG_LLH0_XCM_MASK, 4, 0x00000007 },
9804 { NIG_REG_LLH0_ACPI_PAT_6_LEN, 68, 0x000000ff },
9805 { NIG_REG_LLH0_ACPI_PAT_0_CRC, 68, 0xffffffff },
9806 { NIG_REG_LLH0_DEST_MAC_0_0, 160, 0xffffffff },
9807 { NIG_REG_LLH0_DEST_IP_0_1, 160, 0xffffffff },
9808 { NIG_REG_LLH0_IPV4_IPV6_0, 160, 0x00000001 },
c1f1a06f
EG
9809 { NIG_REG_LLH0_DEST_UDP_0, 160, 0x0000ffff },
9810/* 30 */ { NIG_REG_LLH0_DEST_TCP_0, 160, 0x0000ffff },
f3c87cdd
YG
9811 { NIG_REG_LLH0_VLAN_ID_0, 160, 0x00000fff },
9812 { NIG_REG_XGXS_SERDES0_MODE_SEL, 4, 0x00000001 },
9813 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
9814 { NIG_REG_STATUS_INTERRUPT_PORT0, 4, 0x07ffffff },
9815 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
9816 { NIG_REG_SERDES0_CTRL_PHY_ADDR, 16, 0x0000001f },
9817
9818 { 0xffffffff, 0, 0x00000000 }
9819 };
9820
9821 if (!netif_running(bp->dev))
9822 return rc;
9823
9824 /* Repeat the test twice:
9825 First by writing 0x00000000, second by writing 0xffffffff */
9826 for (idx = 0; idx < 2; idx++) {
9827
9828 switch (idx) {
9829 case 0:
9830 wr_val = 0;
9831 break;
9832 case 1:
9833 wr_val = 0xffffffff;
9834 break;
9835 }
9836
9837 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
9838 u32 offset, mask, save_val, val;
f3c87cdd
YG
9839
9840 offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
9841 mask = reg_tbl[i].mask;
9842
9843 save_val = REG_RD(bp, offset);
9844
9845 REG_WR(bp, offset, wr_val);
9846 val = REG_RD(bp, offset);
9847
9848 /* Restore the original register's value */
9849 REG_WR(bp, offset, save_val);
9850
9851 /* verify that value is as expected value */
9852 if ((val & mask) != (wr_val & mask))
9853 goto test_reg_exit;
9854 }
9855 }
9856
9857 rc = 0;
9858
9859test_reg_exit:
9860 return rc;
9861}
9862
9863static int bnx2x_test_memory(struct bnx2x *bp)
9864{
9865 int i, j, rc = -ENODEV;
9866 u32 val;
9867 static const struct {
9868 u32 offset;
9869 int size;
9870 } mem_tbl[] = {
9871 { CCM_REG_XX_DESCR_TABLE, CCM_REG_XX_DESCR_TABLE_SIZE },
9872 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
9873 { CFC_REG_LINK_LIST, CFC_REG_LINK_LIST_SIZE },
9874 { DMAE_REG_CMD_MEM, DMAE_REG_CMD_MEM_SIZE },
9875 { TCM_REG_XX_DESCR_TABLE, TCM_REG_XX_DESCR_TABLE_SIZE },
9876 { UCM_REG_XX_DESCR_TABLE, UCM_REG_XX_DESCR_TABLE_SIZE },
9877 { XCM_REG_XX_DESCR_TABLE, XCM_REG_XX_DESCR_TABLE_SIZE },
9878
9879 { 0xffffffff, 0 }
9880 };
9881 static const struct {
9882 char *name;
9883 u32 offset;
9dabc424
YG
9884 u32 e1_mask;
9885 u32 e1h_mask;
f3c87cdd 9886 } prty_tbl[] = {
9dabc424
YG
9887 { "CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, 0x3ffc0, 0 },
9888 { "CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, 0x2, 0x2 },
9889 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0, 0 },
9890 { "TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, 0x3ffc0, 0 },
9891 { "UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, 0x3ffc0, 0 },
9892 { "XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, 0x3ffc1, 0 },
9893
9894 { NULL, 0xffffffff, 0, 0 }
f3c87cdd
YG
9895 };
9896
9897 if (!netif_running(bp->dev))
9898 return rc;
9899
9900 /* Go through all the memories */
9901 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
9902 for (j = 0; j < mem_tbl[i].size; j++)
9903 REG_RD(bp, mem_tbl[i].offset + j*4);
9904
9905 /* Check the parity status */
9906 for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
9907 val = REG_RD(bp, prty_tbl[i].offset);
9dabc424
YG
9908 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
9909 (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
f3c87cdd
YG
9910 DP(NETIF_MSG_HW,
9911 "%s is 0x%x\n", prty_tbl[i].name, val);
9912 goto test_mem_exit;
9913 }
9914 }
9915
9916 rc = 0;
9917
9918test_mem_exit:
9919 return rc;
9920}
9921
f3c87cdd
YG
9922static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
9923{
9924 int cnt = 1000;
9925
9926 if (link_up)
9927 while (bnx2x_link_test(bp) && cnt--)
9928 msleep(10);
9929}
9930
9931static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
9932{
9933 unsigned int pkt_size, num_pkts, i;
9934 struct sk_buff *skb;
9935 unsigned char *packet;
ca00392c
EG
9936 struct bnx2x_fastpath *fp_rx = &bp->fp[0];
9937 struct bnx2x_fastpath *fp_tx = &bp->fp[bp->num_rx_queues];
f3c87cdd
YG
9938 u16 tx_start_idx, tx_idx;
9939 u16 rx_start_idx, rx_idx;
ca00392c 9940 u16 pkt_prod, bd_prod;
f3c87cdd 9941 struct sw_tx_bd *tx_buf;
ca00392c
EG
9942 struct eth_tx_start_bd *tx_start_bd;
9943 struct eth_tx_parse_bd *pbd = NULL;
f3c87cdd
YG
9944 dma_addr_t mapping;
9945 union eth_rx_cqe *cqe;
9946 u8 cqe_fp_flags;
9947 struct sw_rx_bd *rx_buf;
9948 u16 len;
9949 int rc = -ENODEV;
9950
b5bf9068
EG
9951 /* check the loopback mode */
9952 switch (loopback_mode) {
9953 case BNX2X_PHY_LOOPBACK:
9954 if (bp->link_params.loopback_mode != LOOPBACK_XGXS_10)
9955 return -EINVAL;
9956 break;
9957 case BNX2X_MAC_LOOPBACK:
f3c87cdd 9958 bp->link_params.loopback_mode = LOOPBACK_BMAC;
f3c87cdd 9959 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
b5bf9068
EG
9960 break;
9961 default:
f3c87cdd 9962 return -EINVAL;
b5bf9068 9963 }
f3c87cdd 9964
b5bf9068
EG
9965 /* prepare the loopback packet */
9966 pkt_size = (((bp->dev->mtu < ETH_MAX_PACKET_SIZE) ?
9967 bp->dev->mtu : ETH_MAX_PACKET_SIZE) + ETH_HLEN);
f3c87cdd
YG
9968 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
9969 if (!skb) {
9970 rc = -ENOMEM;
9971 goto test_loopback_exit;
9972 }
9973 packet = skb_put(skb, pkt_size);
9974 memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
ca00392c
EG
9975 memset(packet + ETH_ALEN, 0, ETH_ALEN);
9976 memset(packet + 2*ETH_ALEN, 0x77, (ETH_HLEN - 2*ETH_ALEN));
f3c87cdd
YG
9977 for (i = ETH_HLEN; i < pkt_size; i++)
9978 packet[i] = (unsigned char) (i & 0xff);
9979
b5bf9068 9980 /* send the loopback packet */
f3c87cdd 9981 num_pkts = 0;
ca00392c
EG
9982 tx_start_idx = le16_to_cpu(*fp_tx->tx_cons_sb);
9983 rx_start_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
f3c87cdd 9984
ca00392c
EG
9985 pkt_prod = fp_tx->tx_pkt_prod++;
9986 tx_buf = &fp_tx->tx_buf_ring[TX_BD(pkt_prod)];
9987 tx_buf->first_bd = fp_tx->tx_bd_prod;
f3c87cdd 9988 tx_buf->skb = skb;
ca00392c 9989 tx_buf->flags = 0;
f3c87cdd 9990
ca00392c
EG
9991 bd_prod = TX_BD(fp_tx->tx_bd_prod);
9992 tx_start_bd = &fp_tx->tx_desc_ring[bd_prod].start_bd;
f3c87cdd
YG
9993 mapping = pci_map_single(bp->pdev, skb->data,
9994 skb_headlen(skb), PCI_DMA_TODEVICE);
ca00392c
EG
9995 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9996 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9997 tx_start_bd->nbd = cpu_to_le16(2); /* start + pbd */
9998 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
9999 tx_start_bd->vlan = cpu_to_le16(pkt_prod);
10000 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
10001 tx_start_bd->general_data = ((UNICAST_ADDRESS <<
10002 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT) | 1);
10003
10004 /* turn on parsing and get a BD */
10005 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10006 pbd = &fp_tx->tx_desc_ring[bd_prod].parse_bd;
10007
10008 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
f3c87cdd 10009
58f4c4cf
EG
10010 wmb();
10011
ca00392c
EG
10012 fp_tx->tx_db.data.prod += 2;
10013 barrier();
10014 DOORBELL(bp, fp_tx->index - bp->num_rx_queues, fp_tx->tx_db.raw);
f3c87cdd
YG
10015
10016 mmiowb();
10017
10018 num_pkts++;
ca00392c 10019 fp_tx->tx_bd_prod += 2; /* start + pbd */
f3c87cdd
YG
10020 bp->dev->trans_start = jiffies;
10021
10022 udelay(100);
10023
ca00392c 10024 tx_idx = le16_to_cpu(*fp_tx->tx_cons_sb);
f3c87cdd
YG
10025 if (tx_idx != tx_start_idx + num_pkts)
10026 goto test_loopback_exit;
10027
ca00392c 10028 rx_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
f3c87cdd
YG
10029 if (rx_idx != rx_start_idx + num_pkts)
10030 goto test_loopback_exit;
10031
ca00392c 10032 cqe = &fp_rx->rx_comp_ring[RCQ_BD(fp_rx->rx_comp_cons)];
f3c87cdd
YG
10033 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
10034 if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
10035 goto test_loopback_rx_exit;
10036
10037 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
10038 if (len != pkt_size)
10039 goto test_loopback_rx_exit;
10040
ca00392c 10041 rx_buf = &fp_rx->rx_buf_ring[RX_BD(fp_rx->rx_bd_cons)];
f3c87cdd
YG
10042 skb = rx_buf->skb;
10043 skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
10044 for (i = ETH_HLEN; i < pkt_size; i++)
10045 if (*(skb->data + i) != (unsigned char) (i & 0xff))
10046 goto test_loopback_rx_exit;
10047
10048 rc = 0;
10049
10050test_loopback_rx_exit:
f3c87cdd 10051
ca00392c
EG
10052 fp_rx->rx_bd_cons = NEXT_RX_IDX(fp_rx->rx_bd_cons);
10053 fp_rx->rx_bd_prod = NEXT_RX_IDX(fp_rx->rx_bd_prod);
10054 fp_rx->rx_comp_cons = NEXT_RCQ_IDX(fp_rx->rx_comp_cons);
10055 fp_rx->rx_comp_prod = NEXT_RCQ_IDX(fp_rx->rx_comp_prod);
f3c87cdd
YG
10056
10057 /* Update producers */
ca00392c
EG
10058 bnx2x_update_rx_prod(bp, fp_rx, fp_rx->rx_bd_prod, fp_rx->rx_comp_prod,
10059 fp_rx->rx_sge_prod);
f3c87cdd
YG
10060
10061test_loopback_exit:
10062 bp->link_params.loopback_mode = LOOPBACK_NONE;
10063
10064 return rc;
10065}
10066
10067static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
10068{
b5bf9068 10069 int rc = 0, res;
f3c87cdd
YG
10070
10071 if (!netif_running(bp->dev))
10072 return BNX2X_LOOPBACK_FAILED;
10073
f8ef6e44 10074 bnx2x_netif_stop(bp, 1);
3910c8ae 10075 bnx2x_acquire_phy_lock(bp);
f3c87cdd 10076
b5bf9068
EG
10077 res = bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up);
10078 if (res) {
10079 DP(NETIF_MSG_PROBE, " PHY loopback failed (res %d)\n", res);
10080 rc |= BNX2X_PHY_LOOPBACK_FAILED;
f3c87cdd
YG
10081 }
10082
b5bf9068
EG
10083 res = bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up);
10084 if (res) {
10085 DP(NETIF_MSG_PROBE, " MAC loopback failed (res %d)\n", res);
10086 rc |= BNX2X_MAC_LOOPBACK_FAILED;
f3c87cdd
YG
10087 }
10088
3910c8ae 10089 bnx2x_release_phy_lock(bp);
f3c87cdd
YG
10090 bnx2x_netif_start(bp);
10091
10092 return rc;
10093}
10094
10095#define CRC32_RESIDUAL 0xdebb20e3
10096
10097static int bnx2x_test_nvram(struct bnx2x *bp)
10098{
10099 static const struct {
10100 int offset;
10101 int size;
10102 } nvram_tbl[] = {
10103 { 0, 0x14 }, /* bootstrap */
10104 { 0x14, 0xec }, /* dir */
10105 { 0x100, 0x350 }, /* manuf_info */
10106 { 0x450, 0xf0 }, /* feature_info */
10107 { 0x640, 0x64 }, /* upgrade_key_info */
10108 { 0x6a4, 0x64 },
10109 { 0x708, 0x70 }, /* manuf_key_info */
10110 { 0x778, 0x70 },
10111 { 0, 0 }
10112 };
4781bfad 10113 __be32 buf[0x350 / 4];
f3c87cdd
YG
10114 u8 *data = (u8 *)buf;
10115 int i, rc;
10116 u32 magic, csum;
10117
10118 rc = bnx2x_nvram_read(bp, 0, data, 4);
10119 if (rc) {
f5372251 10120 DP(NETIF_MSG_PROBE, "magic value read (rc %d)\n", rc);
f3c87cdd
YG
10121 goto test_nvram_exit;
10122 }
10123
10124 magic = be32_to_cpu(buf[0]);
10125 if (magic != 0x669955aa) {
10126 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
10127 rc = -ENODEV;
10128 goto test_nvram_exit;
10129 }
10130
10131 for (i = 0; nvram_tbl[i].size; i++) {
10132
10133 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
10134 nvram_tbl[i].size);
10135 if (rc) {
10136 DP(NETIF_MSG_PROBE,
f5372251 10137 "nvram_tbl[%d] read data (rc %d)\n", i, rc);
f3c87cdd
YG
10138 goto test_nvram_exit;
10139 }
10140
10141 csum = ether_crc_le(nvram_tbl[i].size, data);
10142 if (csum != CRC32_RESIDUAL) {
10143 DP(NETIF_MSG_PROBE,
10144 "nvram_tbl[%d] csum value (0x%08x)\n", i, csum);
10145 rc = -ENODEV;
10146 goto test_nvram_exit;
10147 }
10148 }
10149
10150test_nvram_exit:
10151 return rc;
10152}
10153
10154static int bnx2x_test_intr(struct bnx2x *bp)
10155{
10156 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
10157 int i, rc;
10158
10159 if (!netif_running(bp->dev))
10160 return -ENODEV;
10161
8d9c5f34 10162 config->hdr.length = 0;
af246401
EG
10163 if (CHIP_IS_E1(bp))
10164 config->hdr.offset = (BP_PORT(bp) ? 32 : 0);
10165 else
10166 config->hdr.offset = BP_FUNC(bp);
0626b899 10167 config->hdr.client_id = bp->fp->cl_id;
f3c87cdd
YG
10168 config->hdr.reserved1 = 0;
10169
10170 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
10171 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
10172 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
10173 if (rc == 0) {
10174 bp->set_mac_pending++;
10175 for (i = 0; i < 10; i++) {
10176 if (!bp->set_mac_pending)
10177 break;
10178 msleep_interruptible(10);
10179 }
10180 if (i == 10)
10181 rc = -ENODEV;
10182 }
10183
10184 return rc;
10185}
10186
a2fbb9ea
ET
10187static void bnx2x_self_test(struct net_device *dev,
10188 struct ethtool_test *etest, u64 *buf)
10189{
10190 struct bnx2x *bp = netdev_priv(dev);
a2fbb9ea
ET
10191
10192 memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
10193
f3c87cdd 10194 if (!netif_running(dev))
a2fbb9ea 10195 return;
a2fbb9ea 10196
33471629 10197 /* offline tests are not supported in MF mode */
f3c87cdd
YG
10198 if (IS_E1HMF(bp))
10199 etest->flags &= ~ETH_TEST_FL_OFFLINE;
10200
10201 if (etest->flags & ETH_TEST_FL_OFFLINE) {
279abdf5
EG
10202 int port = BP_PORT(bp);
10203 u32 val;
f3c87cdd
YG
10204 u8 link_up;
10205
279abdf5
EG
10206 /* save current value of input enable for TX port IF */
10207 val = REG_RD(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4);
10208 /* disable input for TX port IF */
10209 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, 0);
10210
f3c87cdd
YG
10211 link_up = bp->link_vars.link_up;
10212 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10213 bnx2x_nic_load(bp, LOAD_DIAG);
10214 /* wait until link state is restored */
10215 bnx2x_wait_for_link(bp, link_up);
10216
10217 if (bnx2x_test_registers(bp) != 0) {
10218 buf[0] = 1;
10219 etest->flags |= ETH_TEST_FL_FAILED;
10220 }
10221 if (bnx2x_test_memory(bp) != 0) {
10222 buf[1] = 1;
10223 etest->flags |= ETH_TEST_FL_FAILED;
10224 }
10225 buf[2] = bnx2x_test_loopback(bp, link_up);
10226 if (buf[2] != 0)
10227 etest->flags |= ETH_TEST_FL_FAILED;
a2fbb9ea 10228
f3c87cdd 10229 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
279abdf5
EG
10230
10231 /* restore input for TX port IF */
10232 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, val);
10233
f3c87cdd
YG
10234 bnx2x_nic_load(bp, LOAD_NORMAL);
10235 /* wait until link state is restored */
10236 bnx2x_wait_for_link(bp, link_up);
10237 }
10238 if (bnx2x_test_nvram(bp) != 0) {
10239 buf[3] = 1;
a2fbb9ea
ET
10240 etest->flags |= ETH_TEST_FL_FAILED;
10241 }
f3c87cdd
YG
10242 if (bnx2x_test_intr(bp) != 0) {
10243 buf[4] = 1;
10244 etest->flags |= ETH_TEST_FL_FAILED;
10245 }
10246 if (bp->port.pmf)
10247 if (bnx2x_link_test(bp) != 0) {
10248 buf[5] = 1;
10249 etest->flags |= ETH_TEST_FL_FAILED;
10250 }
f3c87cdd
YG
10251
10252#ifdef BNX2X_EXTRA_DEBUG
10253 bnx2x_panic_dump(bp);
10254#endif
a2fbb9ea
ET
10255}
10256
de832a55
EG
10257static const struct {
10258 long offset;
10259 int size;
10260 u8 string[ETH_GSTRING_LEN];
10261} bnx2x_q_stats_arr[BNX2X_NUM_Q_STATS] = {
10262/* 1 */ { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%d]: rx_bytes" },
10263 { Q_STATS_OFFSET32(error_bytes_received_hi),
10264 8, "[%d]: rx_error_bytes" },
10265 { Q_STATS_OFFSET32(total_unicast_packets_received_hi),
10266 8, "[%d]: rx_ucast_packets" },
10267 { Q_STATS_OFFSET32(total_multicast_packets_received_hi),
10268 8, "[%d]: rx_mcast_packets" },
10269 { Q_STATS_OFFSET32(total_broadcast_packets_received_hi),
10270 8, "[%d]: rx_bcast_packets" },
10271 { Q_STATS_OFFSET32(no_buff_discard_hi), 8, "[%d]: rx_discards" },
10272 { Q_STATS_OFFSET32(rx_err_discard_pkt),
10273 4, "[%d]: rx_phy_ip_err_discards"},
10274 { Q_STATS_OFFSET32(rx_skb_alloc_failed),
10275 4, "[%d]: rx_skb_alloc_discard" },
10276 { Q_STATS_OFFSET32(hw_csum_err), 4, "[%d]: rx_csum_offload_errors" },
10277
10278/* 10 */{ Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%d]: tx_bytes" },
10279 { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
10280 8, "[%d]: tx_packets" }
10281};
10282
bb2a0f7a
YG
10283static const struct {
10284 long offset;
10285 int size;
10286 u32 flags;
66e855f3
YG
10287#define STATS_FLAGS_PORT 1
10288#define STATS_FLAGS_FUNC 2
de832a55 10289#define STATS_FLAGS_BOTH (STATS_FLAGS_FUNC | STATS_FLAGS_PORT)
66e855f3 10290 u8 string[ETH_GSTRING_LEN];
bb2a0f7a 10291} bnx2x_stats_arr[BNX2X_NUM_STATS] = {
de832a55
EG
10292/* 1 */ { STATS_OFFSET32(total_bytes_received_hi),
10293 8, STATS_FLAGS_BOTH, "rx_bytes" },
66e855f3 10294 { STATS_OFFSET32(error_bytes_received_hi),
de832a55 10295 8, STATS_FLAGS_BOTH, "rx_error_bytes" },
bb2a0f7a 10296 { STATS_OFFSET32(total_unicast_packets_received_hi),
de832a55 10297 8, STATS_FLAGS_BOTH, "rx_ucast_packets" },
bb2a0f7a 10298 { STATS_OFFSET32(total_multicast_packets_received_hi),
de832a55 10299 8, STATS_FLAGS_BOTH, "rx_mcast_packets" },
bb2a0f7a 10300 { STATS_OFFSET32(total_broadcast_packets_received_hi),
de832a55 10301 8, STATS_FLAGS_BOTH, "rx_bcast_packets" },
bb2a0f7a 10302 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
66e855f3 10303 8, STATS_FLAGS_PORT, "rx_crc_errors" },
bb2a0f7a 10304 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
66e855f3 10305 8, STATS_FLAGS_PORT, "rx_align_errors" },
de832a55
EG
10306 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
10307 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
10308 { STATS_OFFSET32(etherstatsoverrsizepkts_hi),
10309 8, STATS_FLAGS_PORT, "rx_oversize_packets" },
10310/* 10 */{ STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
10311 8, STATS_FLAGS_PORT, "rx_fragments" },
10312 { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
10313 8, STATS_FLAGS_PORT, "rx_jabbers" },
10314 { STATS_OFFSET32(no_buff_discard_hi),
10315 8, STATS_FLAGS_BOTH, "rx_discards" },
10316 { STATS_OFFSET32(mac_filter_discard),
10317 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
10318 { STATS_OFFSET32(xxoverflow_discard),
10319 4, STATS_FLAGS_PORT, "rx_fw_discards" },
10320 { STATS_OFFSET32(brb_drop_hi),
10321 8, STATS_FLAGS_PORT, "rx_brb_discard" },
10322 { STATS_OFFSET32(brb_truncate_hi),
10323 8, STATS_FLAGS_PORT, "rx_brb_truncate" },
10324 { STATS_OFFSET32(pause_frames_received_hi),
10325 8, STATS_FLAGS_PORT, "rx_pause_frames" },
10326 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
10327 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
10328 { STATS_OFFSET32(nig_timer_max),
10329 4, STATS_FLAGS_PORT, "rx_constant_pause_events" },
10330/* 20 */{ STATS_OFFSET32(rx_err_discard_pkt),
10331 4, STATS_FLAGS_BOTH, "rx_phy_ip_err_discards"},
10332 { STATS_OFFSET32(rx_skb_alloc_failed),
10333 4, STATS_FLAGS_BOTH, "rx_skb_alloc_discard" },
10334 { STATS_OFFSET32(hw_csum_err),
10335 4, STATS_FLAGS_BOTH, "rx_csum_offload_errors" },
10336
10337 { STATS_OFFSET32(total_bytes_transmitted_hi),
10338 8, STATS_FLAGS_BOTH, "tx_bytes" },
10339 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
10340 8, STATS_FLAGS_PORT, "tx_error_bytes" },
10341 { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
10342 8, STATS_FLAGS_BOTH, "tx_packets" },
10343 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
10344 8, STATS_FLAGS_PORT, "tx_mac_errors" },
10345 { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
10346 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
bb2a0f7a 10347 { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
66e855f3 10348 8, STATS_FLAGS_PORT, "tx_single_collisions" },
bb2a0f7a 10349 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
66e855f3 10350 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
de832a55 10351/* 30 */{ STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
66e855f3 10352 8, STATS_FLAGS_PORT, "tx_deferred" },
bb2a0f7a 10353 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
66e855f3 10354 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
bb2a0f7a 10355 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
66e855f3 10356 8, STATS_FLAGS_PORT, "tx_late_collisions" },
bb2a0f7a 10357 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
66e855f3 10358 8, STATS_FLAGS_PORT, "tx_total_collisions" },
bb2a0f7a 10359 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
66e855f3 10360 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
bb2a0f7a 10361 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
66e855f3 10362 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
bb2a0f7a 10363 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
66e855f3 10364 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
bb2a0f7a 10365 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
66e855f3 10366 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
bb2a0f7a 10367 { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
66e855f3 10368 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
bb2a0f7a 10369 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
66e855f3 10370 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
de832a55 10371/* 40 */{ STATS_OFFSET32(etherstatspktsover1522octets_hi),
66e855f3 10372 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
de832a55
EG
10373 { STATS_OFFSET32(pause_frames_sent_hi),
10374 8, STATS_FLAGS_PORT, "tx_pause_frames" }
a2fbb9ea
ET
10375};
10376
de832a55
EG
10377#define IS_PORT_STAT(i) \
10378 ((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT)
10379#define IS_FUNC_STAT(i) (bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC)
10380#define IS_E1HMF_MODE_STAT(bp) \
10381 (IS_E1HMF(bp) && !(bp->msglevel & BNX2X_MSG_STATS))
66e855f3 10382
a2fbb9ea
ET
10383static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
10384{
bb2a0f7a 10385 struct bnx2x *bp = netdev_priv(dev);
de832a55 10386 int i, j, k;
bb2a0f7a 10387
a2fbb9ea
ET
10388 switch (stringset) {
10389 case ETH_SS_STATS:
de832a55
EG
10390 if (is_multi(bp)) {
10391 k = 0;
ca00392c 10392 for_each_rx_queue(bp, i) {
de832a55
EG
10393 for (j = 0; j < BNX2X_NUM_Q_STATS; j++)
10394 sprintf(buf + (k + j)*ETH_GSTRING_LEN,
10395 bnx2x_q_stats_arr[j].string, i);
10396 k += BNX2X_NUM_Q_STATS;
10397 }
10398 if (IS_E1HMF_MODE_STAT(bp))
10399 break;
10400 for (j = 0; j < BNX2X_NUM_STATS; j++)
10401 strcpy(buf + (k + j)*ETH_GSTRING_LEN,
10402 bnx2x_stats_arr[j].string);
10403 } else {
10404 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
10405 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
10406 continue;
10407 strcpy(buf + j*ETH_GSTRING_LEN,
10408 bnx2x_stats_arr[i].string);
10409 j++;
10410 }
bb2a0f7a 10411 }
a2fbb9ea
ET
10412 break;
10413
10414 case ETH_SS_TEST:
10415 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
10416 break;
10417 }
10418}
10419
10420static int bnx2x_get_stats_count(struct net_device *dev)
10421{
bb2a0f7a 10422 struct bnx2x *bp = netdev_priv(dev);
de832a55 10423 int i, num_stats;
bb2a0f7a 10424
de832a55 10425 if (is_multi(bp)) {
ca00392c 10426 num_stats = BNX2X_NUM_Q_STATS * bp->num_rx_queues;
de832a55
EG
10427 if (!IS_E1HMF_MODE_STAT(bp))
10428 num_stats += BNX2X_NUM_STATS;
10429 } else {
10430 if (IS_E1HMF_MODE_STAT(bp)) {
10431 num_stats = 0;
10432 for (i = 0; i < BNX2X_NUM_STATS; i++)
10433 if (IS_FUNC_STAT(i))
10434 num_stats++;
10435 } else
10436 num_stats = BNX2X_NUM_STATS;
bb2a0f7a 10437 }
de832a55 10438
bb2a0f7a 10439 return num_stats;
a2fbb9ea
ET
10440}
10441
10442static void bnx2x_get_ethtool_stats(struct net_device *dev,
10443 struct ethtool_stats *stats, u64 *buf)
10444{
10445 struct bnx2x *bp = netdev_priv(dev);
de832a55
EG
10446 u32 *hw_stats, *offset;
10447 int i, j, k;
bb2a0f7a 10448
de832a55
EG
10449 if (is_multi(bp)) {
10450 k = 0;
ca00392c 10451 for_each_rx_queue(bp, i) {
de832a55
EG
10452 hw_stats = (u32 *)&bp->fp[i].eth_q_stats;
10453 for (j = 0; j < BNX2X_NUM_Q_STATS; j++) {
10454 if (bnx2x_q_stats_arr[j].size == 0) {
10455 /* skip this counter */
10456 buf[k + j] = 0;
10457 continue;
10458 }
10459 offset = (hw_stats +
10460 bnx2x_q_stats_arr[j].offset);
10461 if (bnx2x_q_stats_arr[j].size == 4) {
10462 /* 4-byte counter */
10463 buf[k + j] = (u64) *offset;
10464 continue;
10465 }
10466 /* 8-byte counter */
10467 buf[k + j] = HILO_U64(*offset, *(offset + 1));
10468 }
10469 k += BNX2X_NUM_Q_STATS;
10470 }
10471 if (IS_E1HMF_MODE_STAT(bp))
10472 return;
10473 hw_stats = (u32 *)&bp->eth_stats;
10474 for (j = 0; j < BNX2X_NUM_STATS; j++) {
10475 if (bnx2x_stats_arr[j].size == 0) {
10476 /* skip this counter */
10477 buf[k + j] = 0;
10478 continue;
10479 }
10480 offset = (hw_stats + bnx2x_stats_arr[j].offset);
10481 if (bnx2x_stats_arr[j].size == 4) {
10482 /* 4-byte counter */
10483 buf[k + j] = (u64) *offset;
10484 continue;
10485 }
10486 /* 8-byte counter */
10487 buf[k + j] = HILO_U64(*offset, *(offset + 1));
a2fbb9ea 10488 }
de832a55
EG
10489 } else {
10490 hw_stats = (u32 *)&bp->eth_stats;
10491 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
10492 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
10493 continue;
10494 if (bnx2x_stats_arr[i].size == 0) {
10495 /* skip this counter */
10496 buf[j] = 0;
10497 j++;
10498 continue;
10499 }
10500 offset = (hw_stats + bnx2x_stats_arr[i].offset);
10501 if (bnx2x_stats_arr[i].size == 4) {
10502 /* 4-byte counter */
10503 buf[j] = (u64) *offset;
10504 j++;
10505 continue;
10506 }
10507 /* 8-byte counter */
10508 buf[j] = HILO_U64(*offset, *(offset + 1));
bb2a0f7a 10509 j++;
a2fbb9ea 10510 }
a2fbb9ea
ET
10511 }
10512}
10513
10514static int bnx2x_phys_id(struct net_device *dev, u32 data)
10515{
10516 struct bnx2x *bp = netdev_priv(dev);
34f80b04 10517 int port = BP_PORT(bp);
a2fbb9ea
ET
10518 int i;
10519
34f80b04
EG
10520 if (!netif_running(dev))
10521 return 0;
10522
10523 if (!bp->port.pmf)
10524 return 0;
10525
a2fbb9ea
ET
10526 if (data == 0)
10527 data = 2;
10528
10529 for (i = 0; i < (data * 2); i++) {
c18487ee 10530 if ((i % 2) == 0)
34f80b04 10531 bnx2x_set_led(bp, port, LED_MODE_OPER, SPEED_1000,
c18487ee
YR
10532 bp->link_params.hw_led_mode,
10533 bp->link_params.chip_id);
10534 else
34f80b04 10535 bnx2x_set_led(bp, port, LED_MODE_OFF, 0,
c18487ee
YR
10536 bp->link_params.hw_led_mode,
10537 bp->link_params.chip_id);
10538
a2fbb9ea
ET
10539 msleep_interruptible(500);
10540 if (signal_pending(current))
10541 break;
10542 }
10543
c18487ee 10544 if (bp->link_vars.link_up)
34f80b04 10545 bnx2x_set_led(bp, port, LED_MODE_OPER,
c18487ee
YR
10546 bp->link_vars.line_speed,
10547 bp->link_params.hw_led_mode,
10548 bp->link_params.chip_id);
a2fbb9ea
ET
10549
10550 return 0;
10551}
10552
10553static struct ethtool_ops bnx2x_ethtool_ops = {
7a9b2557
VZ
10554 .get_settings = bnx2x_get_settings,
10555 .set_settings = bnx2x_set_settings,
10556 .get_drvinfo = bnx2x_get_drvinfo,
0a64ea57
EG
10557 .get_regs_len = bnx2x_get_regs_len,
10558 .get_regs = bnx2x_get_regs,
a2fbb9ea
ET
10559 .get_wol = bnx2x_get_wol,
10560 .set_wol = bnx2x_set_wol,
7a9b2557
VZ
10561 .get_msglevel = bnx2x_get_msglevel,
10562 .set_msglevel = bnx2x_set_msglevel,
10563 .nway_reset = bnx2x_nway_reset,
01e53298 10564 .get_link = bnx2x_get_link,
7a9b2557
VZ
10565 .get_eeprom_len = bnx2x_get_eeprom_len,
10566 .get_eeprom = bnx2x_get_eeprom,
10567 .set_eeprom = bnx2x_set_eeprom,
10568 .get_coalesce = bnx2x_get_coalesce,
10569 .set_coalesce = bnx2x_set_coalesce,
10570 .get_ringparam = bnx2x_get_ringparam,
10571 .set_ringparam = bnx2x_set_ringparam,
10572 .get_pauseparam = bnx2x_get_pauseparam,
10573 .set_pauseparam = bnx2x_set_pauseparam,
10574 .get_rx_csum = bnx2x_get_rx_csum,
10575 .set_rx_csum = bnx2x_set_rx_csum,
10576 .get_tx_csum = ethtool_op_get_tx_csum,
755735eb 10577 .set_tx_csum = ethtool_op_set_tx_hw_csum,
7a9b2557
VZ
10578 .set_flags = bnx2x_set_flags,
10579 .get_flags = ethtool_op_get_flags,
10580 .get_sg = ethtool_op_get_sg,
10581 .set_sg = ethtool_op_set_sg,
a2fbb9ea
ET
10582 .get_tso = ethtool_op_get_tso,
10583 .set_tso = bnx2x_set_tso,
10584 .self_test_count = bnx2x_self_test_count,
7a9b2557
VZ
10585 .self_test = bnx2x_self_test,
10586 .get_strings = bnx2x_get_strings,
a2fbb9ea
ET
10587 .phys_id = bnx2x_phys_id,
10588 .get_stats_count = bnx2x_get_stats_count,
bb2a0f7a 10589 .get_ethtool_stats = bnx2x_get_ethtool_stats,
a2fbb9ea
ET
10590};
10591
10592/* end of ethtool_ops */
10593
10594/****************************************************************************
10595* General service functions
10596****************************************************************************/
10597
10598static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
10599{
10600 u16 pmcsr;
10601
10602 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
10603
10604 switch (state) {
10605 case PCI_D0:
34f80b04 10606 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
a2fbb9ea
ET
10607 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
10608 PCI_PM_CTRL_PME_STATUS));
10609
10610 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
33471629 10611 /* delay required during transition out of D3hot */
a2fbb9ea 10612 msleep(20);
34f80b04 10613 break;
a2fbb9ea 10614
34f80b04
EG
10615 case PCI_D3hot:
10616 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
10617 pmcsr |= 3;
a2fbb9ea 10618
34f80b04
EG
10619 if (bp->wol)
10620 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
a2fbb9ea 10621
34f80b04
EG
10622 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
10623 pmcsr);
a2fbb9ea 10624
34f80b04
EG
10625 /* No more memory access after this point until
10626 * device is brought back to D0.
10627 */
10628 break;
10629
10630 default:
10631 return -EINVAL;
10632 }
10633 return 0;
a2fbb9ea
ET
10634}
10635
237907c1
EG
10636static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
10637{
10638 u16 rx_cons_sb;
10639
10640 /* Tell compiler that status block fields can change */
10641 barrier();
10642 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
10643 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
10644 rx_cons_sb++;
10645 return (fp->rx_comp_cons != rx_cons_sb);
10646}
10647
34f80b04
EG
10648/*
10649 * net_device service functions
10650 */
10651
a2fbb9ea
ET
10652static int bnx2x_poll(struct napi_struct *napi, int budget)
10653{
10654 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
10655 napi);
10656 struct bnx2x *bp = fp->bp;
10657 int work_done = 0;
10658
10659#ifdef BNX2X_STOP_ON_ERROR
10660 if (unlikely(bp->panic))
34f80b04 10661 goto poll_panic;
a2fbb9ea
ET
10662#endif
10663
a2fbb9ea
ET
10664 prefetch(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb);
10665 prefetch((char *)(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb) + 256);
10666
10667 bnx2x_update_fpsb_idx(fp);
10668
8534f32c 10669 if (bnx2x_has_rx_work(fp)) {
a2fbb9ea 10670 work_done = bnx2x_rx_int(fp, budget);
356e2385 10671
8534f32c
EG
10672 /* must not complete if we consumed full budget */
10673 if (work_done >= budget)
10674 goto poll_again;
10675 }
a2fbb9ea 10676
ca00392c 10677 /* bnx2x_has_rx_work() reads the status block, thus we need to
8534f32c 10678 * ensure that status block indices have been actually read
ca00392c 10679 * (bnx2x_update_fpsb_idx) prior to this check (bnx2x_has_rx_work)
8534f32c 10680 * so that we won't write the "newer" value of the status block to IGU
ca00392c 10681 * (if there was a DMA right after bnx2x_has_rx_work and
8534f32c
EG
10682 * if there is no rmb, the memory reading (bnx2x_update_fpsb_idx)
10683 * may be postponed to right before bnx2x_ack_sb). In this case
10684 * there will never be another interrupt until there is another update
10685 * of the status block, while there is still unhandled work.
10686 */
10687 rmb();
a2fbb9ea 10688
ca00392c 10689 if (!bnx2x_has_rx_work(fp)) {
a2fbb9ea 10690#ifdef BNX2X_STOP_ON_ERROR
34f80b04 10691poll_panic:
a2fbb9ea 10692#endif
288379f0 10693 napi_complete(napi);
a2fbb9ea 10694
0626b899 10695 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
a2fbb9ea 10696 le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
0626b899 10697 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
a2fbb9ea
ET
10698 le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
10699 }
356e2385 10700
8534f32c 10701poll_again:
a2fbb9ea
ET
10702 return work_done;
10703}
10704
755735eb
EG
10705
10706/* we split the first BD into headers and data BDs
33471629 10707 * to ease the pain of our fellow microcode engineers
755735eb
EG
10708 * we use one mapping for both BDs
10709 * So far this has only been observed to happen
10710 * in Other Operating Systems(TM)
10711 */
10712static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
10713 struct bnx2x_fastpath *fp,
ca00392c
EG
10714 struct sw_tx_bd *tx_buf,
10715 struct eth_tx_start_bd **tx_bd, u16 hlen,
755735eb
EG
10716 u16 bd_prod, int nbd)
10717{
ca00392c 10718 struct eth_tx_start_bd *h_tx_bd = *tx_bd;
755735eb
EG
10719 struct eth_tx_bd *d_tx_bd;
10720 dma_addr_t mapping;
10721 int old_len = le16_to_cpu(h_tx_bd->nbytes);
10722
10723 /* first fix first BD */
10724 h_tx_bd->nbd = cpu_to_le16(nbd);
10725 h_tx_bd->nbytes = cpu_to_le16(hlen);
10726
10727 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
10728 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
10729 h_tx_bd->addr_lo, h_tx_bd->nbd);
10730
10731 /* now get a new data BD
10732 * (after the pbd) and fill it */
10733 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
ca00392c 10734 d_tx_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
755735eb
EG
10735
10736 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
10737 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
10738
10739 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10740 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10741 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
ca00392c
EG
10742
10743 /* this marks the BD as one that has no individual mapping */
10744 tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
10745
755735eb
EG
10746 DP(NETIF_MSG_TX_QUEUED,
10747 "TSO split data size is %d (%x:%x)\n",
10748 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
10749
ca00392c
EG
10750 /* update tx_bd */
10751 *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
755735eb
EG
10752
10753 return bd_prod;
10754}
10755
10756static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
10757{
10758 if (fix > 0)
10759 csum = (u16) ~csum_fold(csum_sub(csum,
10760 csum_partial(t_header - fix, fix, 0)));
10761
10762 else if (fix < 0)
10763 csum = (u16) ~csum_fold(csum_add(csum,
10764 csum_partial(t_header, -fix, 0)));
10765
10766 return swab16(csum);
10767}
10768
10769static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
10770{
10771 u32 rc;
10772
10773 if (skb->ip_summed != CHECKSUM_PARTIAL)
10774 rc = XMIT_PLAIN;
10775
10776 else {
4781bfad 10777 if (skb->protocol == htons(ETH_P_IPV6)) {
755735eb
EG
10778 rc = XMIT_CSUM_V6;
10779 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
10780 rc |= XMIT_CSUM_TCP;
10781
10782 } else {
10783 rc = XMIT_CSUM_V4;
10784 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
10785 rc |= XMIT_CSUM_TCP;
10786 }
10787 }
10788
10789 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
10790 rc |= XMIT_GSO_V4;
10791
10792 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
10793 rc |= XMIT_GSO_V6;
10794
10795 return rc;
10796}
10797
632da4d6 10798#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
f5372251
EG
10799/* check if packet requires linearization (packet is too fragmented)
10800 no need to check fragmentation if page size > 8K (there will be no
10801 violation to FW restrictions) */
755735eb
EG
10802static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
10803 u32 xmit_type)
10804{
10805 int to_copy = 0;
10806 int hlen = 0;
10807 int first_bd_sz = 0;
10808
10809 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
10810 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
10811
10812 if (xmit_type & XMIT_GSO) {
10813 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
10814 /* Check if LSO packet needs to be copied:
10815 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
10816 int wnd_size = MAX_FETCH_BD - 3;
33471629 10817 /* Number of windows to check */
755735eb
EG
10818 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
10819 int wnd_idx = 0;
10820 int frag_idx = 0;
10821 u32 wnd_sum = 0;
10822
10823 /* Headers length */
10824 hlen = (int)(skb_transport_header(skb) - skb->data) +
10825 tcp_hdrlen(skb);
10826
10827 /* Amount of data (w/o headers) on linear part of SKB*/
10828 first_bd_sz = skb_headlen(skb) - hlen;
10829
10830 wnd_sum = first_bd_sz;
10831
10832 /* Calculate the first sum - it's special */
10833 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
10834 wnd_sum +=
10835 skb_shinfo(skb)->frags[frag_idx].size;
10836
10837 /* If there was data on linear skb data - check it */
10838 if (first_bd_sz > 0) {
10839 if (unlikely(wnd_sum < lso_mss)) {
10840 to_copy = 1;
10841 goto exit_lbl;
10842 }
10843
10844 wnd_sum -= first_bd_sz;
10845 }
10846
10847 /* Others are easier: run through the frag list and
10848 check all windows */
10849 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
10850 wnd_sum +=
10851 skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
10852
10853 if (unlikely(wnd_sum < lso_mss)) {
10854 to_copy = 1;
10855 break;
10856 }
10857 wnd_sum -=
10858 skb_shinfo(skb)->frags[wnd_idx].size;
10859 }
755735eb
EG
10860 } else {
10861 /* in non-LSO too fragmented packet should always
10862 be linearized */
10863 to_copy = 1;
10864 }
10865 }
10866
10867exit_lbl:
10868 if (unlikely(to_copy))
10869 DP(NETIF_MSG_TX_QUEUED,
10870 "Linearization IS REQUIRED for %s packet. "
10871 "num_frags %d hlen %d first_bd_sz %d\n",
10872 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
10873 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
10874
10875 return to_copy;
10876}
632da4d6 10877#endif
755735eb
EG
10878
10879/* called with netif_tx_lock
a2fbb9ea 10880 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
755735eb 10881 * netif_wake_queue()
a2fbb9ea
ET
10882 */
10883static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
10884{
10885 struct bnx2x *bp = netdev_priv(dev);
ca00392c 10886 struct bnx2x_fastpath *fp, *fp_stat;
555f6c78 10887 struct netdev_queue *txq;
a2fbb9ea 10888 struct sw_tx_bd *tx_buf;
ca00392c
EG
10889 struct eth_tx_start_bd *tx_start_bd;
10890 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
a2fbb9ea
ET
10891 struct eth_tx_parse_bd *pbd = NULL;
10892 u16 pkt_prod, bd_prod;
755735eb 10893 int nbd, fp_index;
a2fbb9ea 10894 dma_addr_t mapping;
755735eb 10895 u32 xmit_type = bnx2x_xmit_type(bp, skb);
755735eb
EG
10896 int i;
10897 u8 hlen = 0;
ca00392c 10898 __le16 pkt_size = 0;
a2fbb9ea
ET
10899
10900#ifdef BNX2X_STOP_ON_ERROR
10901 if (unlikely(bp->panic))
10902 return NETDEV_TX_BUSY;
10903#endif
10904
555f6c78
EG
10905 fp_index = skb_get_queue_mapping(skb);
10906 txq = netdev_get_tx_queue(dev, fp_index);
10907
ca00392c
EG
10908 fp = &bp->fp[fp_index + bp->num_rx_queues];
10909 fp_stat = &bp->fp[fp_index];
755735eb 10910
231fd58a 10911 if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
ca00392c 10912 fp_stat->eth_q_stats.driver_xoff++;
555f6c78 10913 netif_tx_stop_queue(txq);
a2fbb9ea
ET
10914 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
10915 return NETDEV_TX_BUSY;
10916 }
10917
755735eb
EG
10918 DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x protocol %x protocol(%x,%x)"
10919 " gso type %x xmit_type %x\n",
10920 skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
10921 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
10922
632da4d6 10923#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
f5372251
EG
10924 /* First, check if we need to linearize the skb (due to FW
10925 restrictions). No need to check fragmentation if page size > 8K
10926 (there will be no violation to FW restrictions) */
755735eb
EG
10927 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
10928 /* Statistics of linearization */
10929 bp->lin_cnt++;
10930 if (skb_linearize(skb) != 0) {
10931 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
10932 "silently dropping this SKB\n");
10933 dev_kfree_skb_any(skb);
da5a662a 10934 return NETDEV_TX_OK;
755735eb
EG
10935 }
10936 }
632da4d6 10937#endif
755735eb 10938
a2fbb9ea 10939 /*
755735eb 10940 Please read carefully. First we use one BD which we mark as start,
ca00392c 10941 then we have a parsing info BD (used for TSO or xsum),
755735eb 10942 and only then we have the rest of the TSO BDs.
a2fbb9ea
ET
10943 (don't forget to mark the last one as last,
10944 and to unmap only AFTER you write to the BD ...)
755735eb 10945 And above all, all pdb sizes are in words - NOT DWORDS!
a2fbb9ea
ET
10946 */
10947
10948 pkt_prod = fp->tx_pkt_prod++;
755735eb 10949 bd_prod = TX_BD(fp->tx_bd_prod);
a2fbb9ea 10950
755735eb 10951 /* get a tx_buf and first BD */
a2fbb9ea 10952 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
ca00392c 10953 tx_start_bd = &fp->tx_desc_ring[bd_prod].start_bd;
a2fbb9ea 10954
ca00392c
EG
10955 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
10956 tx_start_bd->general_data = (UNICAST_ADDRESS <<
10957 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT);
3196a88a 10958 /* header nbd */
ca00392c 10959 tx_start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT);
a2fbb9ea 10960
755735eb
EG
10961 /* remember the first BD of the packet */
10962 tx_buf->first_bd = fp->tx_bd_prod;
10963 tx_buf->skb = skb;
ca00392c 10964 tx_buf->flags = 0;
a2fbb9ea
ET
10965
10966 DP(NETIF_MSG_TX_QUEUED,
10967 "sending pkt %u @%p next_idx %u bd %u @%p\n",
ca00392c 10968 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_start_bd);
a2fbb9ea 10969
0c6671b0
EG
10970#ifdef BCM_VLAN
10971 if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
10972 (bp->flags & HW_VLAN_TX_FLAG)) {
ca00392c
EG
10973 tx_start_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
10974 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
755735eb 10975 } else
0c6671b0 10976#endif
ca00392c 10977 tx_start_bd->vlan = cpu_to_le16(pkt_prod);
a2fbb9ea 10978
ca00392c
EG
10979 /* turn on parsing and get a BD */
10980 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10981 pbd = &fp->tx_desc_ring[bd_prod].parse_bd;
755735eb 10982
ca00392c 10983 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
755735eb
EG
10984
10985 if (xmit_type & XMIT_CSUM) {
ca00392c 10986 hlen = (skb_network_header(skb) - skb->data) / 2;
a2fbb9ea
ET
10987
10988 /* for now NS flag is not used in Linux */
4781bfad
EG
10989 pbd->global_data =
10990 (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
10991 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
a2fbb9ea 10992
755735eb
EG
10993 pbd->ip_hlen = (skb_transport_header(skb) -
10994 skb_network_header(skb)) / 2;
10995
10996 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
a2fbb9ea 10997
755735eb 10998 pbd->total_hlen = cpu_to_le16(hlen);
ca00392c 10999 hlen = hlen*2;
a2fbb9ea 11000
ca00392c 11001 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
755735eb
EG
11002
11003 if (xmit_type & XMIT_CSUM_V4)
ca00392c 11004 tx_start_bd->bd_flags.as_bitfield |=
755735eb
EG
11005 ETH_TX_BD_FLAGS_IP_CSUM;
11006 else
ca00392c
EG
11007 tx_start_bd->bd_flags.as_bitfield |=
11008 ETH_TX_BD_FLAGS_IPV6;
755735eb
EG
11009
11010 if (xmit_type & XMIT_CSUM_TCP) {
11011 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
11012
11013 } else {
11014 s8 fix = SKB_CS_OFF(skb); /* signed! */
11015
ca00392c 11016 pbd->global_data |= ETH_TX_PARSE_BD_UDP_CS_FLG;
a2fbb9ea 11017
755735eb 11018 DP(NETIF_MSG_TX_QUEUED,
ca00392c
EG
11019 "hlen %d fix %d csum before fix %x\n",
11020 le16_to_cpu(pbd->total_hlen), fix, SKB_CS(skb));
755735eb
EG
11021
11022 /* HW bug: fixup the CSUM */
11023 pbd->tcp_pseudo_csum =
11024 bnx2x_csum_fix(skb_transport_header(skb),
11025 SKB_CS(skb), fix);
11026
11027 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
11028 pbd->tcp_pseudo_csum);
11029 }
a2fbb9ea
ET
11030 }
11031
11032 mapping = pci_map_single(bp->pdev, skb->data,
755735eb 11033 skb_headlen(skb), PCI_DMA_TODEVICE);
a2fbb9ea 11034
ca00392c
EG
11035 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
11036 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
11037 nbd = skb_shinfo(skb)->nr_frags + 2; /* start_bd + pbd + frags */
11038 tx_start_bd->nbd = cpu_to_le16(nbd);
11039 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
11040 pkt_size = tx_start_bd->nbytes;
a2fbb9ea
ET
11041
11042 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
755735eb 11043 " nbytes %d flags %x vlan %x\n",
ca00392c
EG
11044 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
11045 le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
11046 tx_start_bd->bd_flags.as_bitfield, le16_to_cpu(tx_start_bd->vlan));
a2fbb9ea 11047
755735eb 11048 if (xmit_type & XMIT_GSO) {
a2fbb9ea
ET
11049
11050 DP(NETIF_MSG_TX_QUEUED,
11051 "TSO packet len %d hlen %d total len %d tso size %d\n",
11052 skb->len, hlen, skb_headlen(skb),
11053 skb_shinfo(skb)->gso_size);
11054
ca00392c 11055 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
a2fbb9ea 11056
755735eb 11057 if (unlikely(skb_headlen(skb) > hlen))
ca00392c
EG
11058 bd_prod = bnx2x_tx_split(bp, fp, tx_buf, &tx_start_bd,
11059 hlen, bd_prod, ++nbd);
a2fbb9ea
ET
11060
11061 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
11062 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
755735eb
EG
11063 pbd->tcp_flags = pbd_tcp_flags(skb);
11064
11065 if (xmit_type & XMIT_GSO_V4) {
11066 pbd->ip_id = swab16(ip_hdr(skb)->id);
11067 pbd->tcp_pseudo_csum =
a2fbb9ea
ET
11068 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
11069 ip_hdr(skb)->daddr,
11070 0, IPPROTO_TCP, 0));
755735eb
EG
11071
11072 } else
11073 pbd->tcp_pseudo_csum =
11074 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
11075 &ipv6_hdr(skb)->daddr,
11076 0, IPPROTO_TCP, 0));
11077
a2fbb9ea
ET
11078 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
11079 }
ca00392c 11080 tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
a2fbb9ea 11081
755735eb
EG
11082 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
11083 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
a2fbb9ea 11084
755735eb 11085 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
ca00392c
EG
11086 tx_data_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
11087 if (total_pkt_bd == NULL)
11088 total_pkt_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
a2fbb9ea 11089
755735eb
EG
11090 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
11091 frag->size, PCI_DMA_TODEVICE);
a2fbb9ea 11092
ca00392c
EG
11093 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
11094 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
11095 tx_data_bd->nbytes = cpu_to_le16(frag->size);
11096 le16_add_cpu(&pkt_size, frag->size);
a2fbb9ea 11097
755735eb 11098 DP(NETIF_MSG_TX_QUEUED,
ca00392c
EG
11099 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
11100 i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
11101 le16_to_cpu(tx_data_bd->nbytes));
a2fbb9ea
ET
11102 }
11103
ca00392c 11104 DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
a2fbb9ea 11105
a2fbb9ea
ET
11106 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
11107
755735eb 11108 /* now send a tx doorbell, counting the next BD
a2fbb9ea
ET
11109 * if the packet contains or ends with it
11110 */
11111 if (TX_BD_POFF(bd_prod) < nbd)
11112 nbd++;
11113
ca00392c
EG
11114 if (total_pkt_bd != NULL)
11115 total_pkt_bd->total_pkt_bytes = pkt_size;
11116
a2fbb9ea
ET
11117 if (pbd)
11118 DP(NETIF_MSG_TX_QUEUED,
11119 "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
11120 " tcp_flags %x xsum %x seq %u hlen %u\n",
11121 pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
11122 pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
755735eb 11123 pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
a2fbb9ea 11124
755735eb 11125 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
a2fbb9ea 11126
58f4c4cf
EG
11127 /*
11128 * Make sure that the BD data is updated before updating the producer
11129 * since FW might read the BD right after the producer is updated.
11130 * This is only applicable for weak-ordered memory model archs such
11131 * as IA-64. The following barrier is also mandatory since FW will
11132 * assumes packets must have BDs.
11133 */
11134 wmb();
11135
ca00392c
EG
11136 fp->tx_db.data.prod += nbd;
11137 barrier();
11138 DOORBELL(bp, fp->index - bp->num_rx_queues, fp->tx_db.raw);
a2fbb9ea
ET
11139
11140 mmiowb();
11141
755735eb 11142 fp->tx_bd_prod += nbd;
a2fbb9ea
ET
11143
11144 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
ca00392c 11145 netif_tx_stop_queue(txq);
58f4c4cf
EG
11146 /* We want bnx2x_tx_int to "see" the updated tx_bd_prod
11147 if we put Tx into XOFF state. */
11148 smp_mb();
ca00392c 11149 fp_stat->eth_q_stats.driver_xoff++;
a2fbb9ea 11150 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
555f6c78 11151 netif_tx_wake_queue(txq);
a2fbb9ea 11152 }
ca00392c 11153 fp_stat->tx_pkt++;
a2fbb9ea
ET
11154
11155 return NETDEV_TX_OK;
11156}
11157
bb2a0f7a 11158/* called with rtnl_lock */
a2fbb9ea
ET
11159static int bnx2x_open(struct net_device *dev)
11160{
11161 struct bnx2x *bp = netdev_priv(dev);
11162
6eccabb3
EG
11163 netif_carrier_off(dev);
11164
a2fbb9ea
ET
11165 bnx2x_set_power_state(bp, PCI_D0);
11166
bb2a0f7a 11167 return bnx2x_nic_load(bp, LOAD_OPEN);
a2fbb9ea
ET
11168}
11169
bb2a0f7a 11170/* called with rtnl_lock */
a2fbb9ea
ET
11171static int bnx2x_close(struct net_device *dev)
11172{
a2fbb9ea
ET
11173 struct bnx2x *bp = netdev_priv(dev);
11174
11175 /* Unload the driver, release IRQs */
bb2a0f7a
YG
11176 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
11177 if (atomic_read(&bp->pdev->enable_cnt) == 1)
11178 if (!CHIP_REV_IS_SLOW(bp))
11179 bnx2x_set_power_state(bp, PCI_D3hot);
a2fbb9ea
ET
11180
11181 return 0;
11182}
11183
f5372251 11184/* called with netif_tx_lock from dev_mcast.c */
34f80b04
EG
11185static void bnx2x_set_rx_mode(struct net_device *dev)
11186{
11187 struct bnx2x *bp = netdev_priv(dev);
11188 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
11189 int port = BP_PORT(bp);
11190
11191 if (bp->state != BNX2X_STATE_OPEN) {
11192 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
11193 return;
11194 }
11195
11196 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
11197
11198 if (dev->flags & IFF_PROMISC)
11199 rx_mode = BNX2X_RX_MODE_PROMISC;
11200
11201 else if ((dev->flags & IFF_ALLMULTI) ||
11202 ((dev->mc_count > BNX2X_MAX_MULTICAST) && CHIP_IS_E1(bp)))
11203 rx_mode = BNX2X_RX_MODE_ALLMULTI;
11204
11205 else { /* some multicasts */
11206 if (CHIP_IS_E1(bp)) {
11207 int i, old, offset;
11208 struct dev_mc_list *mclist;
11209 struct mac_configuration_cmd *config =
11210 bnx2x_sp(bp, mcast_config);
11211
11212 for (i = 0, mclist = dev->mc_list;
11213 mclist && (i < dev->mc_count);
11214 i++, mclist = mclist->next) {
11215
11216 config->config_table[i].
11217 cam_entry.msb_mac_addr =
11218 swab16(*(u16 *)&mclist->dmi_addr[0]);
11219 config->config_table[i].
11220 cam_entry.middle_mac_addr =
11221 swab16(*(u16 *)&mclist->dmi_addr[2]);
11222 config->config_table[i].
11223 cam_entry.lsb_mac_addr =
11224 swab16(*(u16 *)&mclist->dmi_addr[4]);
11225 config->config_table[i].cam_entry.flags =
11226 cpu_to_le16(port);
11227 config->config_table[i].
11228 target_table_entry.flags = 0;
ca00392c
EG
11229 config->config_table[i].target_table_entry.
11230 clients_bit_vector =
11231 cpu_to_le32(1 << BP_L_ID(bp));
34f80b04
EG
11232 config->config_table[i].
11233 target_table_entry.vlan_id = 0;
11234
11235 DP(NETIF_MSG_IFUP,
11236 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
11237 config->config_table[i].
11238 cam_entry.msb_mac_addr,
11239 config->config_table[i].
11240 cam_entry.middle_mac_addr,
11241 config->config_table[i].
11242 cam_entry.lsb_mac_addr);
11243 }
8d9c5f34 11244 old = config->hdr.length;
34f80b04
EG
11245 if (old > i) {
11246 for (; i < old; i++) {
11247 if (CAM_IS_INVALID(config->
11248 config_table[i])) {
af246401 11249 /* already invalidated */
34f80b04
EG
11250 break;
11251 }
11252 /* invalidate */
11253 CAM_INVALIDATE(config->
11254 config_table[i]);
11255 }
11256 }
11257
11258 if (CHIP_REV_IS_SLOW(bp))
11259 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
11260 else
11261 offset = BNX2X_MAX_MULTICAST*(1 + port);
11262
8d9c5f34 11263 config->hdr.length = i;
34f80b04 11264 config->hdr.offset = offset;
8d9c5f34 11265 config->hdr.client_id = bp->fp->cl_id;
34f80b04
EG
11266 config->hdr.reserved1 = 0;
11267
11268 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
11269 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
11270 U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
11271 0);
11272 } else { /* E1H */
11273 /* Accept one or more multicasts */
11274 struct dev_mc_list *mclist;
11275 u32 mc_filter[MC_HASH_SIZE];
11276 u32 crc, bit, regidx;
11277 int i;
11278
11279 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
11280
11281 for (i = 0, mclist = dev->mc_list;
11282 mclist && (i < dev->mc_count);
11283 i++, mclist = mclist->next) {
11284
7c510e4b
JB
11285 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
11286 mclist->dmi_addr);
34f80b04
EG
11287
11288 crc = crc32c_le(0, mclist->dmi_addr, ETH_ALEN);
11289 bit = (crc >> 24) & 0xff;
11290 regidx = bit >> 5;
11291 bit &= 0x1f;
11292 mc_filter[regidx] |= (1 << bit);
11293 }
11294
11295 for (i = 0; i < MC_HASH_SIZE; i++)
11296 REG_WR(bp, MC_HASH_OFFSET(bp, i),
11297 mc_filter[i]);
11298 }
11299 }
11300
11301 bp->rx_mode = rx_mode;
11302 bnx2x_set_storm_rx_mode(bp);
11303}
11304
11305/* called with rtnl_lock */
a2fbb9ea
ET
11306static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
11307{
11308 struct sockaddr *addr = p;
11309 struct bnx2x *bp = netdev_priv(dev);
11310
34f80b04 11311 if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
a2fbb9ea
ET
11312 return -EINVAL;
11313
11314 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
34f80b04
EG
11315 if (netif_running(dev)) {
11316 if (CHIP_IS_E1(bp))
3101c2bc 11317 bnx2x_set_mac_addr_e1(bp, 1);
34f80b04 11318 else
3101c2bc 11319 bnx2x_set_mac_addr_e1h(bp, 1);
34f80b04 11320 }
a2fbb9ea
ET
11321
11322 return 0;
11323}
11324
c18487ee 11325/* called with rtnl_lock */
01cd4528
EG
11326static int bnx2x_mdio_read(struct net_device *netdev, int prtad,
11327 int devad, u16 addr)
a2fbb9ea 11328{
01cd4528
EG
11329 struct bnx2x *bp = netdev_priv(netdev);
11330 u16 value;
11331 int rc;
11332 u32 phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea 11333
01cd4528
EG
11334 DP(NETIF_MSG_LINK, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n",
11335 prtad, devad, addr);
a2fbb9ea 11336
01cd4528
EG
11337 if (prtad != bp->mdio.prtad) {
11338 DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
11339 prtad, bp->mdio.prtad);
11340 return -EINVAL;
11341 }
11342
11343 /* The HW expects different devad if CL22 is used */
11344 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
c18487ee 11345
01cd4528
EG
11346 bnx2x_acquire_phy_lock(bp);
11347 rc = bnx2x_cl45_read(bp, BP_PORT(bp), phy_type, prtad,
11348 devad, addr, &value);
11349 bnx2x_release_phy_lock(bp);
11350 DP(NETIF_MSG_LINK, "mdio_read_val 0x%x rc = 0x%x\n", value, rc);
a2fbb9ea 11351
01cd4528
EG
11352 if (!rc)
11353 rc = value;
11354 return rc;
11355}
a2fbb9ea 11356
01cd4528
EG
11357/* called with rtnl_lock */
11358static int bnx2x_mdio_write(struct net_device *netdev, int prtad, int devad,
11359 u16 addr, u16 value)
11360{
11361 struct bnx2x *bp = netdev_priv(netdev);
11362 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
11363 int rc;
11364
11365 DP(NETIF_MSG_LINK, "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x,"
11366 " value 0x%x\n", prtad, devad, addr, value);
11367
11368 if (prtad != bp->mdio.prtad) {
11369 DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
11370 prtad, bp->mdio.prtad);
11371 return -EINVAL;
a2fbb9ea
ET
11372 }
11373
01cd4528
EG
11374 /* The HW expects different devad if CL22 is used */
11375 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
a2fbb9ea 11376
01cd4528
EG
11377 bnx2x_acquire_phy_lock(bp);
11378 rc = bnx2x_cl45_write(bp, BP_PORT(bp), ext_phy_type, prtad,
11379 devad, addr, value);
11380 bnx2x_release_phy_lock(bp);
11381 return rc;
11382}
c18487ee 11383
01cd4528
EG
11384/* called with rtnl_lock */
11385static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
11386{
11387 struct bnx2x *bp = netdev_priv(dev);
11388 struct mii_ioctl_data *mdio = if_mii(ifr);
a2fbb9ea 11389
01cd4528
EG
11390 DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
11391 mdio->phy_id, mdio->reg_num, mdio->val_in);
a2fbb9ea 11392
01cd4528
EG
11393 if (!netif_running(dev))
11394 return -EAGAIN;
11395
11396 return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
a2fbb9ea
ET
11397}
11398
34f80b04 11399/* called with rtnl_lock */
a2fbb9ea
ET
11400static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
11401{
11402 struct bnx2x *bp = netdev_priv(dev);
34f80b04 11403 int rc = 0;
a2fbb9ea
ET
11404
11405 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
11406 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
11407 return -EINVAL;
11408
11409 /* This does not race with packet allocation
c14423fe 11410 * because the actual alloc size is
a2fbb9ea
ET
11411 * only updated as part of load
11412 */
11413 dev->mtu = new_mtu;
11414
11415 if (netif_running(dev)) {
34f80b04
EG
11416 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
11417 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
a2fbb9ea 11418 }
34f80b04
EG
11419
11420 return rc;
a2fbb9ea
ET
11421}
11422
11423static void bnx2x_tx_timeout(struct net_device *dev)
11424{
11425 struct bnx2x *bp = netdev_priv(dev);
11426
11427#ifdef BNX2X_STOP_ON_ERROR
11428 if (!bp->panic)
11429 bnx2x_panic();
11430#endif
11431 /* This allows the netif to be shutdown gracefully before resetting */
11432 schedule_work(&bp->reset_task);
11433}
11434
11435#ifdef BCM_VLAN
34f80b04 11436/* called with rtnl_lock */
a2fbb9ea
ET
11437static void bnx2x_vlan_rx_register(struct net_device *dev,
11438 struct vlan_group *vlgrp)
11439{
11440 struct bnx2x *bp = netdev_priv(dev);
11441
11442 bp->vlgrp = vlgrp;
0c6671b0
EG
11443
11444 /* Set flags according to the required capabilities */
11445 bp->flags &= ~(HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
11446
11447 if (dev->features & NETIF_F_HW_VLAN_TX)
11448 bp->flags |= HW_VLAN_TX_FLAG;
11449
11450 if (dev->features & NETIF_F_HW_VLAN_RX)
11451 bp->flags |= HW_VLAN_RX_FLAG;
11452
a2fbb9ea 11453 if (netif_running(dev))
49d66772 11454 bnx2x_set_client_config(bp);
a2fbb9ea 11455}
34f80b04 11456
a2fbb9ea
ET
11457#endif
11458
11459#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
11460static void poll_bnx2x(struct net_device *dev)
11461{
11462 struct bnx2x *bp = netdev_priv(dev);
11463
11464 disable_irq(bp->pdev->irq);
11465 bnx2x_interrupt(bp->pdev->irq, dev);
11466 enable_irq(bp->pdev->irq);
11467}
11468#endif
11469
c64213cd
SH
11470static const struct net_device_ops bnx2x_netdev_ops = {
11471 .ndo_open = bnx2x_open,
11472 .ndo_stop = bnx2x_close,
11473 .ndo_start_xmit = bnx2x_start_xmit,
356e2385 11474 .ndo_set_multicast_list = bnx2x_set_rx_mode,
c64213cd
SH
11475 .ndo_set_mac_address = bnx2x_change_mac_addr,
11476 .ndo_validate_addr = eth_validate_addr,
11477 .ndo_do_ioctl = bnx2x_ioctl,
11478 .ndo_change_mtu = bnx2x_change_mtu,
11479 .ndo_tx_timeout = bnx2x_tx_timeout,
11480#ifdef BCM_VLAN
11481 .ndo_vlan_rx_register = bnx2x_vlan_rx_register,
11482#endif
11483#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
11484 .ndo_poll_controller = poll_bnx2x,
11485#endif
11486};
11487
34f80b04
EG
11488static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
11489 struct net_device *dev)
a2fbb9ea
ET
11490{
11491 struct bnx2x *bp;
11492 int rc;
11493
11494 SET_NETDEV_DEV(dev, &pdev->dev);
11495 bp = netdev_priv(dev);
11496
34f80b04
EG
11497 bp->dev = dev;
11498 bp->pdev = pdev;
a2fbb9ea 11499 bp->flags = 0;
34f80b04 11500 bp->func = PCI_FUNC(pdev->devfn);
a2fbb9ea
ET
11501
11502 rc = pci_enable_device(pdev);
11503 if (rc) {
11504 printk(KERN_ERR PFX "Cannot enable PCI device, aborting\n");
11505 goto err_out;
11506 }
11507
11508 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
11509 printk(KERN_ERR PFX "Cannot find PCI device base address,"
11510 " aborting\n");
11511 rc = -ENODEV;
11512 goto err_out_disable;
11513 }
11514
11515 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
11516 printk(KERN_ERR PFX "Cannot find second PCI device"
11517 " base address, aborting\n");
11518 rc = -ENODEV;
11519 goto err_out_disable;
11520 }
11521
34f80b04
EG
11522 if (atomic_read(&pdev->enable_cnt) == 1) {
11523 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
11524 if (rc) {
11525 printk(KERN_ERR PFX "Cannot obtain PCI resources,"
11526 " aborting\n");
11527 goto err_out_disable;
11528 }
a2fbb9ea 11529
34f80b04
EG
11530 pci_set_master(pdev);
11531 pci_save_state(pdev);
11532 }
a2fbb9ea
ET
11533
11534 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
11535 if (bp->pm_cap == 0) {
11536 printk(KERN_ERR PFX "Cannot find power management"
11537 " capability, aborting\n");
11538 rc = -EIO;
11539 goto err_out_release;
11540 }
11541
11542 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
11543 if (bp->pcie_cap == 0) {
11544 printk(KERN_ERR PFX "Cannot find PCI Express capability,"
11545 " aborting\n");
11546 rc = -EIO;
11547 goto err_out_release;
11548 }
11549
6a35528a 11550 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) {
a2fbb9ea 11551 bp->flags |= USING_DAC_FLAG;
6a35528a 11552 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) {
a2fbb9ea
ET
11553 printk(KERN_ERR PFX "pci_set_consistent_dma_mask"
11554 " failed, aborting\n");
11555 rc = -EIO;
11556 goto err_out_release;
11557 }
11558
284901a9 11559 } else if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) {
a2fbb9ea
ET
11560 printk(KERN_ERR PFX "System does not support DMA,"
11561 " aborting\n");
11562 rc = -EIO;
11563 goto err_out_release;
11564 }
11565
34f80b04
EG
11566 dev->mem_start = pci_resource_start(pdev, 0);
11567 dev->base_addr = dev->mem_start;
11568 dev->mem_end = pci_resource_end(pdev, 0);
a2fbb9ea
ET
11569
11570 dev->irq = pdev->irq;
11571
275f165f 11572 bp->regview = pci_ioremap_bar(pdev, 0);
a2fbb9ea
ET
11573 if (!bp->regview) {
11574 printk(KERN_ERR PFX "Cannot map register space, aborting\n");
11575 rc = -ENOMEM;
11576 goto err_out_release;
11577 }
11578
34f80b04
EG
11579 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
11580 min_t(u64, BNX2X_DB_SIZE,
11581 pci_resource_len(pdev, 2)));
a2fbb9ea
ET
11582 if (!bp->doorbells) {
11583 printk(KERN_ERR PFX "Cannot map doorbell space, aborting\n");
11584 rc = -ENOMEM;
11585 goto err_out_unmap;
11586 }
11587
11588 bnx2x_set_power_state(bp, PCI_D0);
11589
34f80b04
EG
11590 /* clean indirect addresses */
11591 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
11592 PCICFG_VENDOR_ID_OFFSET);
11593 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
11594 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
11595 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
11596 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
a2fbb9ea 11597
34f80b04 11598 dev->watchdog_timeo = TX_TIMEOUT;
a2fbb9ea 11599
c64213cd 11600 dev->netdev_ops = &bnx2x_netdev_ops;
34f80b04 11601 dev->ethtool_ops = &bnx2x_ethtool_ops;
34f80b04
EG
11602 dev->features |= NETIF_F_SG;
11603 dev->features |= NETIF_F_HW_CSUM;
11604 if (bp->flags & USING_DAC_FLAG)
11605 dev->features |= NETIF_F_HIGHDMA;
5316bc0b
EG
11606 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
11607 dev->features |= NETIF_F_TSO6;
34f80b04
EG
11608#ifdef BCM_VLAN
11609 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
0c6671b0 11610 bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
5316bc0b
EG
11611
11612 dev->vlan_features |= NETIF_F_SG;
11613 dev->vlan_features |= NETIF_F_HW_CSUM;
11614 if (bp->flags & USING_DAC_FLAG)
11615 dev->vlan_features |= NETIF_F_HIGHDMA;
11616 dev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
11617 dev->vlan_features |= NETIF_F_TSO6;
34f80b04 11618#endif
a2fbb9ea 11619
01cd4528
EG
11620 /* get_port_hwinfo() will set prtad and mmds properly */
11621 bp->mdio.prtad = MDIO_PRTAD_NONE;
11622 bp->mdio.mmds = 0;
11623 bp->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
11624 bp->mdio.dev = dev;
11625 bp->mdio.mdio_read = bnx2x_mdio_read;
11626 bp->mdio.mdio_write = bnx2x_mdio_write;
11627
a2fbb9ea
ET
11628 return 0;
11629
11630err_out_unmap:
11631 if (bp->regview) {
11632 iounmap(bp->regview);
11633 bp->regview = NULL;
11634 }
a2fbb9ea
ET
11635 if (bp->doorbells) {
11636 iounmap(bp->doorbells);
11637 bp->doorbells = NULL;
11638 }
11639
11640err_out_release:
34f80b04
EG
11641 if (atomic_read(&pdev->enable_cnt) == 1)
11642 pci_release_regions(pdev);
a2fbb9ea
ET
11643
11644err_out_disable:
11645 pci_disable_device(pdev);
11646 pci_set_drvdata(pdev, NULL);
11647
11648err_out:
11649 return rc;
11650}
11651
25047950
ET
11652static int __devinit bnx2x_get_pcie_width(struct bnx2x *bp)
11653{
11654 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
11655
11656 val = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
11657 return val;
11658}
11659
11660/* return value of 1=2.5GHz 2=5GHz */
11661static int __devinit bnx2x_get_pcie_speed(struct bnx2x *bp)
11662{
11663 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
11664
11665 val = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
11666 return val;
11667}
94a78b79
VZ
11668static int __devinit bnx2x_check_firmware(struct bnx2x *bp)
11669{
11670 struct bnx2x_fw_file_hdr *fw_hdr;
11671 struct bnx2x_fw_file_section *sections;
11672 u16 *ops_offsets;
11673 u32 offset, len, num_ops;
11674 int i;
11675 const struct firmware *firmware = bp->firmware;
11676 const u8 * fw_ver;
11677
11678 if (firmware->size < sizeof(struct bnx2x_fw_file_hdr))
11679 return -EINVAL;
11680
11681 fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
11682 sections = (struct bnx2x_fw_file_section *)fw_hdr;
11683
11684 /* Make sure none of the offsets and sizes make us read beyond
11685 * the end of the firmware data */
11686 for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
11687 offset = be32_to_cpu(sections[i].offset);
11688 len = be32_to_cpu(sections[i].len);
11689 if (offset + len > firmware->size) {
11690 printk(KERN_ERR PFX "Section %d length is out of bounds\n", i);
11691 return -EINVAL;
11692 }
11693 }
11694
11695 /* Likewise for the init_ops offsets */
11696 offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
11697 ops_offsets = (u16 *)(firmware->data + offset);
11698 num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
11699
11700 for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
11701 if (be16_to_cpu(ops_offsets[i]) > num_ops) {
11702 printk(KERN_ERR PFX "Section offset %d is out of bounds\n", i);
11703 return -EINVAL;
11704 }
11705 }
11706
11707 /* Check FW version */
11708 offset = be32_to_cpu(fw_hdr->fw_version.offset);
11709 fw_ver = firmware->data + offset;
11710 if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
11711 (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
11712 (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
11713 (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
11714 printk(KERN_ERR PFX "Bad FW version:%d.%d.%d.%d."
11715 " Should be %d.%d.%d.%d\n",
11716 fw_ver[0], fw_ver[1], fw_ver[2],
11717 fw_ver[3], BCM_5710_FW_MAJOR_VERSION,
11718 BCM_5710_FW_MINOR_VERSION,
11719 BCM_5710_FW_REVISION_VERSION,
11720 BCM_5710_FW_ENGINEERING_VERSION);
11721 return -EINVAL;
11722 }
11723
11724 return 0;
11725}
11726
11727static void inline be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
11728{
11729 u32 i;
11730 const __be32 *source = (const __be32*)_source;
11731 u32 *target = (u32*)_target;
11732
11733 for (i = 0; i < n/4; i++)
11734 target[i] = be32_to_cpu(source[i]);
11735}
11736
11737/*
11738 Ops array is stored in the following format:
11739 {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
11740 */
11741static void inline bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
11742{
11743 u32 i, j, tmp;
11744 const __be32 *source = (const __be32*)_source;
11745 struct raw_op *target = (struct raw_op*)_target;
11746
11747 for (i = 0, j = 0; i < n/8; i++, j+=2) {
11748 tmp = be32_to_cpu(source[j]);
11749 target[i].op = (tmp >> 24) & 0xff;
11750 target[i].offset = tmp & 0xffffff;
11751 target[i].raw_data = be32_to_cpu(source[j+1]);
11752 }
11753}
11754static void inline be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
11755{
11756 u32 i;
11757 u16 *target = (u16*)_target;
11758 const __be16 *source = (const __be16*)_source;
11759
11760 for (i = 0; i < n/2; i++)
11761 target[i] = be16_to_cpu(source[i]);
11762}
11763
11764#define BNX2X_ALLOC_AND_SET(arr, lbl, func) \
11765 do { \
11766 u32 len = be32_to_cpu(fw_hdr->arr.len); \
11767 bp->arr = kmalloc(len, GFP_KERNEL); \
11768 if (!bp->arr) { \
11769 printk(KERN_ERR PFX "Failed to allocate %d bytes for "#arr"\n", len); \
11770 goto lbl; \
11771 } \
11772 func(bp->firmware->data + \
11773 be32_to_cpu(fw_hdr->arr.offset), \
11774 (u8*)bp->arr, len); \
11775 } while (0)
11776
11777
11778static int __devinit bnx2x_init_firmware(struct bnx2x *bp, struct device *dev)
11779{
11780 char fw_file_name[40] = {0};
11781 int rc, offset;
11782 struct bnx2x_fw_file_hdr *fw_hdr;
11783
11784 /* Create a FW file name */
11785 if (CHIP_IS_E1(bp))
11786 offset = sprintf(fw_file_name, FW_FILE_PREFIX_E1);
11787 else
11788 offset = sprintf(fw_file_name, FW_FILE_PREFIX_E1H);
11789
11790 sprintf(fw_file_name + offset, "%d.%d.%d.%d.fw",
11791 BCM_5710_FW_MAJOR_VERSION,
11792 BCM_5710_FW_MINOR_VERSION,
11793 BCM_5710_FW_REVISION_VERSION,
11794 BCM_5710_FW_ENGINEERING_VERSION);
11795
11796 printk(KERN_INFO PFX "Loading %s\n", fw_file_name);
11797
11798 rc = request_firmware(&bp->firmware, fw_file_name, dev);
11799 if (rc) {
11800 printk(KERN_ERR PFX "Can't load firmware file %s\n", fw_file_name);
11801 goto request_firmware_exit;
11802 }
11803
11804 rc = bnx2x_check_firmware(bp);
11805 if (rc) {
11806 printk(KERN_ERR PFX "Corrupt firmware file %s\n", fw_file_name);
11807 goto request_firmware_exit;
11808 }
11809
11810 fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
11811
11812 /* Initialize the pointers to the init arrays */
11813 /* Blob */
11814 BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
11815
11816 /* Opcodes */
11817 BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
11818
11819 /* Offsets */
11820 BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err, be16_to_cpu_n);
11821
11822 /* STORMs firmware */
11823 bp->tsem_int_table_data = bp->firmware->data +
11824 be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
11825 bp->tsem_pram_data = bp->firmware->data +
11826 be32_to_cpu(fw_hdr->tsem_pram_data.offset);
11827 bp->usem_int_table_data = bp->firmware->data +
11828 be32_to_cpu(fw_hdr->usem_int_table_data.offset);
11829 bp->usem_pram_data = bp->firmware->data +
11830 be32_to_cpu(fw_hdr->usem_pram_data.offset);
11831 bp->xsem_int_table_data = bp->firmware->data +
11832 be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
11833 bp->xsem_pram_data = bp->firmware->data +
11834 be32_to_cpu(fw_hdr->xsem_pram_data.offset);
11835 bp->csem_int_table_data = bp->firmware->data +
11836 be32_to_cpu(fw_hdr->csem_int_table_data.offset);
11837 bp->csem_pram_data = bp->firmware->data +
11838 be32_to_cpu(fw_hdr->csem_pram_data.offset);
11839
11840 return 0;
11841init_offsets_alloc_err:
11842 kfree(bp->init_ops);
11843init_ops_alloc_err:
11844 kfree(bp->init_data);
11845request_firmware_exit:
11846 release_firmware(bp->firmware);
11847
11848 return rc;
11849}
11850
11851
25047950 11852
a2fbb9ea
ET
11853static int __devinit bnx2x_init_one(struct pci_dev *pdev,
11854 const struct pci_device_id *ent)
11855{
11856 static int version_printed;
11857 struct net_device *dev = NULL;
11858 struct bnx2x *bp;
25047950 11859 int rc;
a2fbb9ea
ET
11860
11861 if (version_printed++ == 0)
11862 printk(KERN_INFO "%s", version);
11863
11864 /* dev zeroed in init_etherdev */
555f6c78 11865 dev = alloc_etherdev_mq(sizeof(*bp), MAX_CONTEXT);
34f80b04
EG
11866 if (!dev) {
11867 printk(KERN_ERR PFX "Cannot allocate net device\n");
a2fbb9ea 11868 return -ENOMEM;
34f80b04 11869 }
a2fbb9ea 11870
a2fbb9ea
ET
11871 bp = netdev_priv(dev);
11872 bp->msglevel = debug;
11873
34f80b04 11874 rc = bnx2x_init_dev(pdev, dev);
a2fbb9ea
ET
11875 if (rc < 0) {
11876 free_netdev(dev);
11877 return rc;
11878 }
11879
a2fbb9ea
ET
11880 pci_set_drvdata(pdev, dev);
11881
34f80b04 11882 rc = bnx2x_init_bp(bp);
693fc0d1
EG
11883 if (rc)
11884 goto init_one_exit;
11885
94a78b79
VZ
11886 /* Set init arrays */
11887 rc = bnx2x_init_firmware(bp, &pdev->dev);
11888 if (rc) {
11889 printk(KERN_ERR PFX "Error loading firmware\n");
11890 goto init_one_exit;
11891 }
11892
693fc0d1 11893 rc = register_netdev(dev);
34f80b04 11894 if (rc) {
693fc0d1 11895 dev_err(&pdev->dev, "Cannot register net device\n");
34f80b04
EG
11896 goto init_one_exit;
11897 }
11898
25047950 11899 printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
87942b46 11900 " IRQ %d, ", dev->name, board_info[ent->driver_data].name,
34f80b04 11901 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
25047950
ET
11902 bnx2x_get_pcie_width(bp),
11903 (bnx2x_get_pcie_speed(bp) == 2) ? "5GHz (Gen2)" : "2.5GHz",
11904 dev->base_addr, bp->pdev->irq);
e174961c 11905 printk(KERN_CONT "node addr %pM\n", dev->dev_addr);
c016201c 11906
a2fbb9ea 11907 return 0;
34f80b04
EG
11908
11909init_one_exit:
11910 if (bp->regview)
11911 iounmap(bp->regview);
11912
11913 if (bp->doorbells)
11914 iounmap(bp->doorbells);
11915
11916 free_netdev(dev);
11917
11918 if (atomic_read(&pdev->enable_cnt) == 1)
11919 pci_release_regions(pdev);
11920
11921 pci_disable_device(pdev);
11922 pci_set_drvdata(pdev, NULL);
11923
11924 return rc;
a2fbb9ea
ET
11925}
11926
11927static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
11928{
11929 struct net_device *dev = pci_get_drvdata(pdev);
228241eb
ET
11930 struct bnx2x *bp;
11931
11932 if (!dev) {
228241eb
ET
11933 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
11934 return;
11935 }
228241eb 11936 bp = netdev_priv(dev);
a2fbb9ea 11937
a2fbb9ea
ET
11938 unregister_netdev(dev);
11939
94a78b79
VZ
11940 kfree(bp->init_ops_offsets);
11941 kfree(bp->init_ops);
11942 kfree(bp->init_data);
11943 release_firmware(bp->firmware);
11944
a2fbb9ea
ET
11945 if (bp->regview)
11946 iounmap(bp->regview);
11947
11948 if (bp->doorbells)
11949 iounmap(bp->doorbells);
11950
11951 free_netdev(dev);
34f80b04
EG
11952
11953 if (atomic_read(&pdev->enable_cnt) == 1)
11954 pci_release_regions(pdev);
11955
a2fbb9ea
ET
11956 pci_disable_device(pdev);
11957 pci_set_drvdata(pdev, NULL);
11958}
11959
11960static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
11961{
11962 struct net_device *dev = pci_get_drvdata(pdev);
228241eb
ET
11963 struct bnx2x *bp;
11964
34f80b04
EG
11965 if (!dev) {
11966 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
11967 return -ENODEV;
11968 }
11969 bp = netdev_priv(dev);
a2fbb9ea 11970
34f80b04 11971 rtnl_lock();
a2fbb9ea 11972
34f80b04 11973 pci_save_state(pdev);
228241eb 11974
34f80b04
EG
11975 if (!netif_running(dev)) {
11976 rtnl_unlock();
11977 return 0;
11978 }
a2fbb9ea
ET
11979
11980 netif_device_detach(dev);
a2fbb9ea 11981
da5a662a 11982 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
34f80b04 11983
a2fbb9ea 11984 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
228241eb 11985
34f80b04
EG
11986 rtnl_unlock();
11987
a2fbb9ea
ET
11988 return 0;
11989}
11990
11991static int bnx2x_resume(struct pci_dev *pdev)
11992{
11993 struct net_device *dev = pci_get_drvdata(pdev);
228241eb 11994 struct bnx2x *bp;
a2fbb9ea
ET
11995 int rc;
11996
228241eb
ET
11997 if (!dev) {
11998 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
11999 return -ENODEV;
12000 }
228241eb 12001 bp = netdev_priv(dev);
a2fbb9ea 12002
34f80b04
EG
12003 rtnl_lock();
12004
228241eb 12005 pci_restore_state(pdev);
34f80b04
EG
12006
12007 if (!netif_running(dev)) {
12008 rtnl_unlock();
12009 return 0;
12010 }
12011
a2fbb9ea
ET
12012 bnx2x_set_power_state(bp, PCI_D0);
12013 netif_device_attach(dev);
12014
da5a662a 12015 rc = bnx2x_nic_load(bp, LOAD_OPEN);
a2fbb9ea 12016
34f80b04
EG
12017 rtnl_unlock();
12018
12019 return rc;
a2fbb9ea
ET
12020}
12021
f8ef6e44
YG
12022static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
12023{
12024 int i;
12025
12026 bp->state = BNX2X_STATE_ERROR;
12027
12028 bp->rx_mode = BNX2X_RX_MODE_NONE;
12029
12030 bnx2x_netif_stop(bp, 0);
12031
12032 del_timer_sync(&bp->timer);
12033 bp->stats_state = STATS_STATE_DISABLED;
12034 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
12035
12036 /* Release IRQs */
12037 bnx2x_free_irq(bp);
12038
12039 if (CHIP_IS_E1(bp)) {
12040 struct mac_configuration_cmd *config =
12041 bnx2x_sp(bp, mcast_config);
12042
8d9c5f34 12043 for (i = 0; i < config->hdr.length; i++)
f8ef6e44
YG
12044 CAM_INVALIDATE(config->config_table[i]);
12045 }
12046
12047 /* Free SKBs, SGEs, TPA pool and driver internals */
12048 bnx2x_free_skbs(bp);
555f6c78 12049 for_each_rx_queue(bp, i)
f8ef6e44 12050 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
555f6c78 12051 for_each_rx_queue(bp, i)
7cde1c8b 12052 netif_napi_del(&bnx2x_fp(bp, i, napi));
f8ef6e44
YG
12053 bnx2x_free_mem(bp);
12054
12055 bp->state = BNX2X_STATE_CLOSED;
12056
12057 netif_carrier_off(bp->dev);
12058
12059 return 0;
12060}
12061
12062static void bnx2x_eeh_recover(struct bnx2x *bp)
12063{
12064 u32 val;
12065
12066 mutex_init(&bp->port.phy_mutex);
12067
12068 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
12069 bp->link_params.shmem_base = bp->common.shmem_base;
12070 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
12071
12072 if (!bp->common.shmem_base ||
12073 (bp->common.shmem_base < 0xA0000) ||
12074 (bp->common.shmem_base >= 0xC0000)) {
12075 BNX2X_DEV_INFO("MCP not active\n");
12076 bp->flags |= NO_MCP_FLAG;
12077 return;
12078 }
12079
12080 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
12081 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
12082 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
12083 BNX2X_ERR("BAD MCP validity signature\n");
12084
12085 if (!BP_NOMCP(bp)) {
12086 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
12087 & DRV_MSG_SEQ_NUMBER_MASK);
12088 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
12089 }
12090}
12091
493adb1f
WX
12092/**
12093 * bnx2x_io_error_detected - called when PCI error is detected
12094 * @pdev: Pointer to PCI device
12095 * @state: The current pci connection state
12096 *
12097 * This function is called after a PCI bus error affecting
12098 * this device has been detected.
12099 */
12100static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
12101 pci_channel_state_t state)
12102{
12103 struct net_device *dev = pci_get_drvdata(pdev);
12104 struct bnx2x *bp = netdev_priv(dev);
12105
12106 rtnl_lock();
12107
12108 netif_device_detach(dev);
12109
07ce50e4
DN
12110 if (state == pci_channel_io_perm_failure) {
12111 rtnl_unlock();
12112 return PCI_ERS_RESULT_DISCONNECT;
12113 }
12114
493adb1f 12115 if (netif_running(dev))
f8ef6e44 12116 bnx2x_eeh_nic_unload(bp);
493adb1f
WX
12117
12118 pci_disable_device(pdev);
12119
12120 rtnl_unlock();
12121
12122 /* Request a slot reset */
12123 return PCI_ERS_RESULT_NEED_RESET;
12124}
12125
12126/**
12127 * bnx2x_io_slot_reset - called after the PCI bus has been reset
12128 * @pdev: Pointer to PCI device
12129 *
12130 * Restart the card from scratch, as if from a cold-boot.
12131 */
12132static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
12133{
12134 struct net_device *dev = pci_get_drvdata(pdev);
12135 struct bnx2x *bp = netdev_priv(dev);
12136
12137 rtnl_lock();
12138
12139 if (pci_enable_device(pdev)) {
12140 dev_err(&pdev->dev,
12141 "Cannot re-enable PCI device after reset\n");
12142 rtnl_unlock();
12143 return PCI_ERS_RESULT_DISCONNECT;
12144 }
12145
12146 pci_set_master(pdev);
12147 pci_restore_state(pdev);
12148
12149 if (netif_running(dev))
12150 bnx2x_set_power_state(bp, PCI_D0);
12151
12152 rtnl_unlock();
12153
12154 return PCI_ERS_RESULT_RECOVERED;
12155}
12156
12157/**
12158 * bnx2x_io_resume - called when traffic can start flowing again
12159 * @pdev: Pointer to PCI device
12160 *
12161 * This callback is called when the error recovery driver tells us that
12162 * its OK to resume normal operation.
12163 */
12164static void bnx2x_io_resume(struct pci_dev *pdev)
12165{
12166 struct net_device *dev = pci_get_drvdata(pdev);
12167 struct bnx2x *bp = netdev_priv(dev);
12168
12169 rtnl_lock();
12170
f8ef6e44
YG
12171 bnx2x_eeh_recover(bp);
12172
493adb1f 12173 if (netif_running(dev))
f8ef6e44 12174 bnx2x_nic_load(bp, LOAD_NORMAL);
493adb1f
WX
12175
12176 netif_device_attach(dev);
12177
12178 rtnl_unlock();
12179}
12180
12181static struct pci_error_handlers bnx2x_err_handler = {
12182 .error_detected = bnx2x_io_error_detected,
356e2385
EG
12183 .slot_reset = bnx2x_io_slot_reset,
12184 .resume = bnx2x_io_resume,
493adb1f
WX
12185};
12186
a2fbb9ea 12187static struct pci_driver bnx2x_pci_driver = {
493adb1f
WX
12188 .name = DRV_MODULE_NAME,
12189 .id_table = bnx2x_pci_tbl,
12190 .probe = bnx2x_init_one,
12191 .remove = __devexit_p(bnx2x_remove_one),
12192 .suspend = bnx2x_suspend,
12193 .resume = bnx2x_resume,
12194 .err_handler = &bnx2x_err_handler,
a2fbb9ea
ET
12195};
12196
12197static int __init bnx2x_init(void)
12198{
dd21ca6d
SG
12199 int ret;
12200
1cf167f2
EG
12201 bnx2x_wq = create_singlethread_workqueue("bnx2x");
12202 if (bnx2x_wq == NULL) {
12203 printk(KERN_ERR PFX "Cannot create workqueue\n");
12204 return -ENOMEM;
12205 }
12206
dd21ca6d
SG
12207 ret = pci_register_driver(&bnx2x_pci_driver);
12208 if (ret) {
12209 printk(KERN_ERR PFX "Cannot register driver\n");
12210 destroy_workqueue(bnx2x_wq);
12211 }
12212 return ret;
a2fbb9ea
ET
12213}
12214
12215static void __exit bnx2x_cleanup(void)
12216{
12217 pci_unregister_driver(&bnx2x_pci_driver);
1cf167f2
EG
12218
12219 destroy_workqueue(bnx2x_wq);
a2fbb9ea
ET
12220}
12221
12222module_init(bnx2x_init);
12223module_exit(bnx2x_cleanup);
12224
94a78b79 12225
This page took 2.421154 seconds and 5 git commands to generate.