bnx2x: Adding Likely directive
[deliverable/linux.git] / drivers / net / bnx2x_main.c
CommitLineData
34f80b04 1/* bnx2x_main.c: Broadcom Everest network driver.
a2fbb9ea 2 *
d05c26ce 3 * Copyright (c) 2007-2009 Broadcom Corporation
a2fbb9ea
ET
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
24e3fcef
EG
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
a2fbb9ea
ET
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
ca00392c 13 * Slowpath and fastpath rework by Vladislav Zolotarov
c14423fe 14 * Statistics and Link management by Yitchak Gertner
a2fbb9ea
ET
15 *
16 */
17
a2fbb9ea
ET
18#include <linux/module.h>
19#include <linux/moduleparam.h>
20#include <linux/kernel.h>
21#include <linux/device.h> /* for dev_info() */
22#include <linux/timer.h>
23#include <linux/errno.h>
24#include <linux/ioport.h>
25#include <linux/slab.h>
26#include <linux/vmalloc.h>
27#include <linux/interrupt.h>
28#include <linux/pci.h>
29#include <linux/init.h>
30#include <linux/netdevice.h>
31#include <linux/etherdevice.h>
32#include <linux/skbuff.h>
33#include <linux/dma-mapping.h>
34#include <linux/bitops.h>
35#include <linux/irq.h>
36#include <linux/delay.h>
37#include <asm/byteorder.h>
38#include <linux/time.h>
39#include <linux/ethtool.h>
40#include <linux/mii.h>
0c6671b0 41#include <linux/if_vlan.h>
a2fbb9ea
ET
42#include <net/ip.h>
43#include <net/tcp.h>
44#include <net/checksum.h>
34f80b04 45#include <net/ip6_checksum.h>
a2fbb9ea
ET
46#include <linux/workqueue.h>
47#include <linux/crc32.h>
34f80b04 48#include <linux/crc32c.h>
a2fbb9ea
ET
49#include <linux/prefetch.h>
50#include <linux/zlib.h>
a2fbb9ea
ET
51#include <linux/io.h>
52
359d8b15 53
a2fbb9ea
ET
54#include "bnx2x.h"
55#include "bnx2x_init.h"
94a78b79 56#include "bnx2x_init_ops.h"
0a64ea57 57#include "bnx2x_dump.h"
a2fbb9ea 58
573dd788
EG
59#define DRV_MODULE_VERSION "1.48.114-1"
60#define DRV_MODULE_RELDATE "2009/07/29"
34f80b04 61#define BNX2X_BC_VER 0x040200
a2fbb9ea 62
94a78b79
VZ
63#include <linux/firmware.h>
64#include "bnx2x_fw_file_hdr.h"
65/* FW files */
66#define FW_FILE_PREFIX_E1 "bnx2x-e1-"
67#define FW_FILE_PREFIX_E1H "bnx2x-e1h-"
68
34f80b04
EG
69/* Time in jiffies before concluding the transmitter is hung */
70#define TX_TIMEOUT (5*HZ)
a2fbb9ea 71
53a10565 72static char version[] __devinitdata =
34f80b04 73 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
a2fbb9ea
ET
74 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
75
24e3fcef 76MODULE_AUTHOR("Eliezer Tamir");
e47d7e6e 77MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
a2fbb9ea
ET
78MODULE_LICENSE("GPL");
79MODULE_VERSION(DRV_MODULE_VERSION);
a2fbb9ea 80
555f6c78
EG
81static int multi_mode = 1;
82module_param(multi_mode, int, 0);
ca00392c
EG
83MODULE_PARM_DESC(multi_mode, " Multi queue mode "
84 "(0 Disable; 1 Enable (default))");
85
86static int num_rx_queues;
87module_param(num_rx_queues, int, 0);
88MODULE_PARM_DESC(num_rx_queues, " Number of Rx queues for multi_mode=1"
89 " (default is half number of CPUs)");
90
91static int num_tx_queues;
92module_param(num_tx_queues, int, 0);
93MODULE_PARM_DESC(num_tx_queues, " Number of Tx queues for multi_mode=1"
94 " (default is half number of CPUs)");
555f6c78 95
19680c48 96static int disable_tpa;
19680c48 97module_param(disable_tpa, int, 0);
9898f86d 98MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
8badd27a
EG
99
100static int int_mode;
101module_param(int_mode, int, 0);
102MODULE_PARM_DESC(int_mode, " Force interrupt mode (1 INT#x; 2 MSI)");
103
9898f86d 104static int poll;
a2fbb9ea 105module_param(poll, int, 0);
9898f86d 106MODULE_PARM_DESC(poll, " Use polling (for debug)");
8d5726c4
EG
107
108static int mrrs = -1;
109module_param(mrrs, int, 0);
110MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
111
9898f86d 112static int debug;
a2fbb9ea 113module_param(debug, int, 0);
9898f86d
EG
114MODULE_PARM_DESC(debug, " Default debug msglevel");
115
116static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
a2fbb9ea 117
1cf167f2 118static struct workqueue_struct *bnx2x_wq;
a2fbb9ea
ET
119
120enum bnx2x_board_type {
121 BCM57710 = 0,
34f80b04
EG
122 BCM57711 = 1,
123 BCM57711E = 2,
a2fbb9ea
ET
124};
125
34f80b04 126/* indexed by board_type, above */
53a10565 127static struct {
a2fbb9ea
ET
128 char *name;
129} board_info[] __devinitdata = {
34f80b04
EG
130 { "Broadcom NetXtreme II BCM57710 XGb" },
131 { "Broadcom NetXtreme II BCM57711 XGb" },
132 { "Broadcom NetXtreme II BCM57711E XGb" }
a2fbb9ea
ET
133};
134
34f80b04 135
a2fbb9ea
ET
136static const struct pci_device_id bnx2x_pci_tbl[] = {
137 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57710,
138 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57710 },
34f80b04
EG
139 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711,
140 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711 },
141 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711E,
142 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711E },
a2fbb9ea
ET
143 { 0 }
144};
145
146MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
147
148/****************************************************************************
149* General service functions
150****************************************************************************/
151
152/* used only at init
153 * locking is done by mcp
154 */
155static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
156{
157 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
158 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
159 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
160 PCICFG_VENDOR_ID_OFFSET);
161}
162
a2fbb9ea
ET
163static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
164{
165 u32 val;
166
167 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
168 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
169 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
170 PCICFG_VENDOR_ID_OFFSET);
171
172 return val;
173}
a2fbb9ea
ET
174
175static const u32 dmae_reg_go_c[] = {
176 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
177 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
178 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
179 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
180};
181
182/* copy command into DMAE command memory and set DMAE command go */
183static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
184 int idx)
185{
186 u32 cmd_offset;
187 int i;
188
189 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
190 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
191 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
192
ad8d3948
EG
193 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
194 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
a2fbb9ea
ET
195 }
196 REG_WR(bp, dmae_reg_go_c[idx], 1);
197}
198
ad8d3948
EG
199void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
200 u32 len32)
a2fbb9ea 201{
ad8d3948 202 struct dmae_command *dmae = &bp->init_dmae;
a2fbb9ea 203 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
ad8d3948
EG
204 int cnt = 200;
205
206 if (!bp->dmae_ready) {
207 u32 *data = bnx2x_sp(bp, wb_data[0]);
208
209 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
210 " using indirect\n", dst_addr, len32);
211 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
212 return;
213 }
214
215 mutex_lock(&bp->dmae_mutex);
a2fbb9ea
ET
216
217 memset(dmae, 0, sizeof(struct dmae_command));
218
219 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
220 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
221 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
222#ifdef __BIG_ENDIAN
223 DMAE_CMD_ENDIANITY_B_DW_SWAP |
224#else
225 DMAE_CMD_ENDIANITY_DW_SWAP |
226#endif
34f80b04
EG
227 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
228 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea
ET
229 dmae->src_addr_lo = U64_LO(dma_addr);
230 dmae->src_addr_hi = U64_HI(dma_addr);
231 dmae->dst_addr_lo = dst_addr >> 2;
232 dmae->dst_addr_hi = 0;
233 dmae->len = len32;
234 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
235 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
ad8d3948 236 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 237
c3eefaf6 238 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
a2fbb9ea
ET
239 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
240 "dst_addr [%x:%08x (%08x)]\n"
241 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
242 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
243 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, dst_addr,
244 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
ad8d3948 245 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
a2fbb9ea
ET
246 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
247 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
a2fbb9ea
ET
248
249 *wb_comp = 0;
250
34f80b04 251 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
a2fbb9ea
ET
252
253 udelay(5);
ad8d3948
EG
254
255 while (*wb_comp != DMAE_COMP_VAL) {
256 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
257
ad8d3948 258 if (!cnt) {
c3eefaf6 259 BNX2X_ERR("DMAE timeout!\n");
a2fbb9ea
ET
260 break;
261 }
ad8d3948 262 cnt--;
12469401
YG
263 /* adjust delay for emulation/FPGA */
264 if (CHIP_REV_IS_SLOW(bp))
265 msleep(100);
266 else
267 udelay(5);
a2fbb9ea 268 }
ad8d3948
EG
269
270 mutex_unlock(&bp->dmae_mutex);
a2fbb9ea
ET
271}
272
c18487ee 273void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
a2fbb9ea 274{
ad8d3948 275 struct dmae_command *dmae = &bp->init_dmae;
a2fbb9ea 276 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
ad8d3948
EG
277 int cnt = 200;
278
279 if (!bp->dmae_ready) {
280 u32 *data = bnx2x_sp(bp, wb_data[0]);
281 int i;
282
283 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
284 " using indirect\n", src_addr, len32);
285 for (i = 0; i < len32; i++)
286 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
287 return;
288 }
289
290 mutex_lock(&bp->dmae_mutex);
a2fbb9ea
ET
291
292 memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
293 memset(dmae, 0, sizeof(struct dmae_command));
294
295 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
296 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
297 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
298#ifdef __BIG_ENDIAN
299 DMAE_CMD_ENDIANITY_B_DW_SWAP |
300#else
301 DMAE_CMD_ENDIANITY_DW_SWAP |
302#endif
34f80b04
EG
303 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
304 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea
ET
305 dmae->src_addr_lo = src_addr >> 2;
306 dmae->src_addr_hi = 0;
307 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
308 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
309 dmae->len = len32;
310 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
311 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
ad8d3948 312 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 313
c3eefaf6 314 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
a2fbb9ea
ET
315 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
316 "dst_addr [%x:%08x (%08x)]\n"
317 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
318 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
319 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, src_addr,
320 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
a2fbb9ea
ET
321
322 *wb_comp = 0;
323
34f80b04 324 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
a2fbb9ea
ET
325
326 udelay(5);
ad8d3948
EG
327
328 while (*wb_comp != DMAE_COMP_VAL) {
329
ad8d3948 330 if (!cnt) {
c3eefaf6 331 BNX2X_ERR("DMAE timeout!\n");
a2fbb9ea
ET
332 break;
333 }
ad8d3948 334 cnt--;
12469401
YG
335 /* adjust delay for emulation/FPGA */
336 if (CHIP_REV_IS_SLOW(bp))
337 msleep(100);
338 else
339 udelay(5);
a2fbb9ea 340 }
ad8d3948 341 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
a2fbb9ea
ET
342 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
343 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
ad8d3948
EG
344
345 mutex_unlock(&bp->dmae_mutex);
346}
347
348/* used only for slowpath so not inlined */
349static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
350{
351 u32 wb_write[2];
352
353 wb_write[0] = val_hi;
354 wb_write[1] = val_lo;
355 REG_WR_DMAE(bp, reg, wb_write, 2);
a2fbb9ea 356}
a2fbb9ea 357
ad8d3948
EG
358#ifdef USE_WB_RD
359static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
360{
361 u32 wb_data[2];
362
363 REG_RD_DMAE(bp, reg, wb_data, 2);
364
365 return HILO_U64(wb_data[0], wb_data[1]);
366}
367#endif
368
a2fbb9ea
ET
369static int bnx2x_mc_assert(struct bnx2x *bp)
370{
a2fbb9ea 371 char last_idx;
34f80b04
EG
372 int i, rc = 0;
373 u32 row0, row1, row2, row3;
374
375 /* XSTORM */
376 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
377 XSTORM_ASSERT_LIST_INDEX_OFFSET);
378 if (last_idx)
379 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
380
381 /* print the asserts */
382 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
383
384 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
385 XSTORM_ASSERT_LIST_OFFSET(i));
386 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
387 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
388 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
389 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
390 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
391 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
392
393 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
394 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
395 " 0x%08x 0x%08x 0x%08x\n",
396 i, row3, row2, row1, row0);
397 rc++;
398 } else {
399 break;
400 }
401 }
402
403 /* TSTORM */
404 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
405 TSTORM_ASSERT_LIST_INDEX_OFFSET);
406 if (last_idx)
407 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
408
409 /* print the asserts */
410 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
411
412 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
413 TSTORM_ASSERT_LIST_OFFSET(i));
414 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
415 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
416 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
417 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
418 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
419 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
420
421 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
422 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
423 " 0x%08x 0x%08x 0x%08x\n",
424 i, row3, row2, row1, row0);
425 rc++;
426 } else {
427 break;
428 }
429 }
430
431 /* CSTORM */
432 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
433 CSTORM_ASSERT_LIST_INDEX_OFFSET);
434 if (last_idx)
435 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
436
437 /* print the asserts */
438 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
439
440 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
441 CSTORM_ASSERT_LIST_OFFSET(i));
442 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
443 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
444 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
445 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
446 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
447 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
448
449 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
450 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
451 " 0x%08x 0x%08x 0x%08x\n",
452 i, row3, row2, row1, row0);
453 rc++;
454 } else {
455 break;
456 }
457 }
458
459 /* USTORM */
460 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
461 USTORM_ASSERT_LIST_INDEX_OFFSET);
462 if (last_idx)
463 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
464
465 /* print the asserts */
466 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
467
468 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
469 USTORM_ASSERT_LIST_OFFSET(i));
470 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
471 USTORM_ASSERT_LIST_OFFSET(i) + 4);
472 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
473 USTORM_ASSERT_LIST_OFFSET(i) + 8);
474 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
475 USTORM_ASSERT_LIST_OFFSET(i) + 12);
476
477 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
478 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
479 " 0x%08x 0x%08x 0x%08x\n",
480 i, row3, row2, row1, row0);
481 rc++;
482 } else {
483 break;
a2fbb9ea
ET
484 }
485 }
34f80b04 486
a2fbb9ea
ET
487 return rc;
488}
c14423fe 489
a2fbb9ea
ET
490static void bnx2x_fw_dump(struct bnx2x *bp)
491{
492 u32 mark, offset;
4781bfad 493 __be32 data[9];
a2fbb9ea
ET
494 int word;
495
496 mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
49d66772 497 mark = ((mark + 0x3) & ~0x3);
ad361c98 498 printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n", mark);
a2fbb9ea 499
ad361c98 500 printk(KERN_ERR PFX);
a2fbb9ea
ET
501 for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
502 for (word = 0; word < 8; word++)
503 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
504 offset + 4*word));
505 data[8] = 0x0;
49d66772 506 printk(KERN_CONT "%s", (char *)data);
a2fbb9ea
ET
507 }
508 for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
509 for (word = 0; word < 8; word++)
510 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
511 offset + 4*word));
512 data[8] = 0x0;
49d66772 513 printk(KERN_CONT "%s", (char *)data);
a2fbb9ea 514 }
ad361c98 515 printk(KERN_ERR PFX "end of fw dump\n");
a2fbb9ea
ET
516}
517
518static void bnx2x_panic_dump(struct bnx2x *bp)
519{
520 int i;
521 u16 j, start, end;
522
66e855f3
YG
523 bp->stats_state = STATS_STATE_DISABLED;
524 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
525
a2fbb9ea
ET
526 BNX2X_ERR("begin crash dump -----------------\n");
527
8440d2b6
EG
528 /* Indices */
529 /* Common */
530 BNX2X_ERR("def_c_idx(%u) def_u_idx(%u) def_x_idx(%u)"
531 " def_t_idx(%u) def_att_idx(%u) attn_state(%u)"
532 " spq_prod_idx(%u)\n",
533 bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
534 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
535
536 /* Rx */
537 for_each_rx_queue(bp, i) {
a2fbb9ea 538 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea 539
c3eefaf6 540 BNX2X_ERR("fp%d: rx_bd_prod(%x) rx_bd_cons(%x)"
66e855f3
YG
541 " *rx_bd_cons_sb(%x) rx_comp_prod(%x)"
542 " rx_comp_cons(%x) *rx_cons_sb(%x)\n",
8440d2b6 543 i, fp->rx_bd_prod, fp->rx_bd_cons,
66e855f3
YG
544 le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
545 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
c3eefaf6 546 BNX2X_ERR(" rx_sge_prod(%x) last_max_sge(%x)"
8440d2b6
EG
547 " fp_u_idx(%x) *sb_u_idx(%x)\n",
548 fp->rx_sge_prod, fp->last_max_sge,
549 le16_to_cpu(fp->fp_u_idx),
550 fp->status_blk->u_status_block.status_block_index);
551 }
a2fbb9ea 552
8440d2b6
EG
553 /* Tx */
554 for_each_tx_queue(bp, i) {
555 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea 556
c3eefaf6 557 BNX2X_ERR("fp%d: tx_pkt_prod(%x) tx_pkt_cons(%x)"
8440d2b6
EG
558 " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)\n",
559 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
560 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
c3eefaf6 561 BNX2X_ERR(" fp_c_idx(%x) *sb_c_idx(%x)"
ca00392c 562 " tx_db_prod(%x)\n", le16_to_cpu(fp->fp_c_idx),
8440d2b6 563 fp->status_blk->c_status_block.status_block_index,
ca00392c 564 fp->tx_db.data.prod);
8440d2b6 565 }
a2fbb9ea 566
8440d2b6
EG
567 /* Rings */
568 /* Rx */
569 for_each_rx_queue(bp, i) {
570 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea
ET
571
572 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
573 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
8440d2b6 574 for (j = start; j != end; j = RX_BD(j + 1)) {
a2fbb9ea
ET
575 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
576 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
577
c3eefaf6
EG
578 BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
579 i, j, rx_bd[1], rx_bd[0], sw_bd->skb);
a2fbb9ea
ET
580 }
581
3196a88a
EG
582 start = RX_SGE(fp->rx_sge_prod);
583 end = RX_SGE(fp->last_max_sge);
8440d2b6 584 for (j = start; j != end; j = RX_SGE(j + 1)) {
7a9b2557
VZ
585 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
586 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
587
c3eefaf6
EG
588 BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
589 i, j, rx_sge[1], rx_sge[0], sw_page->page);
7a9b2557
VZ
590 }
591
a2fbb9ea
ET
592 start = RCQ_BD(fp->rx_comp_cons - 10);
593 end = RCQ_BD(fp->rx_comp_cons + 503);
8440d2b6 594 for (j = start; j != end; j = RCQ_BD(j + 1)) {
a2fbb9ea
ET
595 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
596
c3eefaf6
EG
597 BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
598 i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
a2fbb9ea
ET
599 }
600 }
601
8440d2b6
EG
602 /* Tx */
603 for_each_tx_queue(bp, i) {
604 struct bnx2x_fastpath *fp = &bp->fp[i];
605
606 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
607 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
608 for (j = start; j != end; j = TX_BD(j + 1)) {
609 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
610
c3eefaf6
EG
611 BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n",
612 i, j, sw_bd->skb, sw_bd->first_bd);
8440d2b6
EG
613 }
614
615 start = TX_BD(fp->tx_bd_cons - 10);
616 end = TX_BD(fp->tx_bd_cons + 254);
617 for (j = start; j != end; j = TX_BD(j + 1)) {
618 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
619
c3eefaf6
EG
620 BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n",
621 i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
8440d2b6
EG
622 }
623 }
a2fbb9ea 624
34f80b04 625 bnx2x_fw_dump(bp);
a2fbb9ea
ET
626 bnx2x_mc_assert(bp);
627 BNX2X_ERR("end crash dump -----------------\n");
a2fbb9ea
ET
628}
629
615f8fd9 630static void bnx2x_int_enable(struct bnx2x *bp)
a2fbb9ea 631{
34f80b04 632 int port = BP_PORT(bp);
a2fbb9ea
ET
633 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
634 u32 val = REG_RD(bp, addr);
635 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
8badd27a 636 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
a2fbb9ea
ET
637
638 if (msix) {
8badd27a
EG
639 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
640 HC_CONFIG_0_REG_INT_LINE_EN_0);
a2fbb9ea
ET
641 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
642 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
8badd27a
EG
643 } else if (msi) {
644 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
645 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
646 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
647 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
a2fbb9ea
ET
648 } else {
649 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
615f8fd9 650 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
a2fbb9ea
ET
651 HC_CONFIG_0_REG_INT_LINE_EN_0 |
652 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
615f8fd9 653
8badd27a
EG
654 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
655 val, port, addr);
615f8fd9
ET
656
657 REG_WR(bp, addr, val);
658
a2fbb9ea
ET
659 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
660 }
661
8badd27a
EG
662 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n",
663 val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
a2fbb9ea
ET
664
665 REG_WR(bp, addr, val);
37dbbf32
EG
666 /*
667 * Ensure that HC_CONFIG is written before leading/trailing edge config
668 */
669 mmiowb();
670 barrier();
34f80b04
EG
671
672 if (CHIP_IS_E1H(bp)) {
673 /* init leading/trailing edge */
674 if (IS_E1HMF(bp)) {
8badd27a 675 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
34f80b04 676 if (bp->port.pmf)
4acac6a5
EG
677 /* enable nig and gpio3 attention */
678 val |= 0x1100;
34f80b04
EG
679 } else
680 val = 0xffff;
681
682 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
683 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
684 }
37dbbf32
EG
685
686 /* Make sure that interrupts are indeed enabled from here on */
687 mmiowb();
a2fbb9ea
ET
688}
689
615f8fd9 690static void bnx2x_int_disable(struct bnx2x *bp)
a2fbb9ea 691{
34f80b04 692 int port = BP_PORT(bp);
a2fbb9ea
ET
693 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
694 u32 val = REG_RD(bp, addr);
695
696 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
697 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
698 HC_CONFIG_0_REG_INT_LINE_EN_0 |
699 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
700
701 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
702 val, port, addr);
703
8badd27a
EG
704 /* flush all outstanding writes */
705 mmiowb();
706
a2fbb9ea
ET
707 REG_WR(bp, addr, val);
708 if (REG_RD(bp, addr) != val)
709 BNX2X_ERR("BUG! proper val not read from IGU!\n");
356e2385 710
a2fbb9ea
ET
711}
712
f8ef6e44 713static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
a2fbb9ea 714{
a2fbb9ea 715 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
8badd27a 716 int i, offset;
a2fbb9ea 717
34f80b04 718 /* disable interrupt handling */
a2fbb9ea 719 atomic_inc(&bp->intr_sem);
e1510706
EG
720 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
721
f8ef6e44
YG
722 if (disable_hw)
723 /* prevent the HW from sending interrupts */
724 bnx2x_int_disable(bp);
a2fbb9ea
ET
725
726 /* make sure all ISRs are done */
727 if (msix) {
8badd27a
EG
728 synchronize_irq(bp->msix_table[0].vector);
729 offset = 1;
a2fbb9ea 730 for_each_queue(bp, i)
8badd27a 731 synchronize_irq(bp->msix_table[i + offset].vector);
a2fbb9ea
ET
732 } else
733 synchronize_irq(bp->pdev->irq);
734
735 /* make sure sp_task is not running */
1cf167f2
EG
736 cancel_delayed_work(&bp->sp_task);
737 flush_workqueue(bnx2x_wq);
a2fbb9ea
ET
738}
739
34f80b04 740/* fast path */
a2fbb9ea
ET
741
742/*
34f80b04 743 * General service functions
a2fbb9ea
ET
744 */
745
34f80b04 746static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
a2fbb9ea
ET
747 u8 storm, u16 index, u8 op, u8 update)
748{
5c862848
EG
749 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
750 COMMAND_REG_INT_ACK);
a2fbb9ea
ET
751 struct igu_ack_register igu_ack;
752
753 igu_ack.status_block_index = index;
754 igu_ack.sb_id_and_flags =
34f80b04 755 ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
a2fbb9ea
ET
756 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
757 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
758 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
759
5c862848
EG
760 DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
761 (*(u32 *)&igu_ack), hc_addr);
762 REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
37dbbf32
EG
763
764 /* Make sure that ACK is written */
765 mmiowb();
766 barrier();
a2fbb9ea
ET
767}
768
769static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
770{
771 struct host_status_block *fpsb = fp->status_blk;
772 u16 rc = 0;
773
774 barrier(); /* status block is written to by the chip */
775 if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
776 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
777 rc |= 1;
778 }
779 if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
780 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
781 rc |= 2;
782 }
783 return rc;
784}
785
a2fbb9ea
ET
786static u16 bnx2x_ack_int(struct bnx2x *bp)
787{
5c862848
EG
788 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
789 COMMAND_REG_SIMD_MASK);
790 u32 result = REG_RD(bp, hc_addr);
a2fbb9ea 791
5c862848
EG
792 DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
793 result, hc_addr);
a2fbb9ea 794
a2fbb9ea
ET
795 return result;
796}
797
798
799/*
800 * fast path service functions
801 */
802
e8b5fc51
VZ
803static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp)
804{
805 /* Tell compiler that consumer and producer can change */
806 barrier();
807 return (fp->tx_pkt_prod != fp->tx_pkt_cons);
237907c1
EG
808}
809
a2fbb9ea
ET
810/* free skb in the packet ring at pos idx
811 * return idx of last bd freed
812 */
813static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
814 u16 idx)
815{
816 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
ca00392c
EG
817 struct eth_tx_start_bd *tx_start_bd;
818 struct eth_tx_bd *tx_data_bd;
a2fbb9ea 819 struct sk_buff *skb = tx_buf->skb;
34f80b04 820 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
a2fbb9ea
ET
821 int nbd;
822
823 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
824 idx, tx_buf, skb);
825
826 /* unmap first bd */
827 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
ca00392c
EG
828 tx_start_bd = &fp->tx_desc_ring[bd_idx].start_bd;
829 pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_start_bd),
830 BD_UNMAP_LEN(tx_start_bd), PCI_DMA_TODEVICE);
a2fbb9ea 831
ca00392c 832 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
a2fbb9ea 833#ifdef BNX2X_STOP_ON_ERROR
ca00392c 834 if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
34f80b04 835 BNX2X_ERR("BAD nbd!\n");
a2fbb9ea
ET
836 bnx2x_panic();
837 }
838#endif
ca00392c 839 new_cons = nbd + tx_buf->first_bd;
a2fbb9ea 840
ca00392c
EG
841 /* Get the next bd */
842 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
a2fbb9ea 843
ca00392c
EG
844 /* Skip a parse bd... */
845 --nbd;
846 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
847
848 /* ...and the TSO split header bd since they have no mapping */
849 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
850 --nbd;
851 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
a2fbb9ea
ET
852 }
853
854 /* now free frags */
855 while (nbd > 0) {
856
857 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
ca00392c
EG
858 tx_data_bd = &fp->tx_desc_ring[bd_idx].reg_bd;
859 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_data_bd),
860 BD_UNMAP_LEN(tx_data_bd), PCI_DMA_TODEVICE);
a2fbb9ea
ET
861 if (--nbd)
862 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
863 }
864
865 /* release skb */
53e5e96e 866 WARN_ON(!skb);
ca00392c 867 dev_kfree_skb_any(skb);
a2fbb9ea
ET
868 tx_buf->first_bd = 0;
869 tx_buf->skb = NULL;
870
34f80b04 871 return new_cons;
a2fbb9ea
ET
872}
873
34f80b04 874static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
a2fbb9ea 875{
34f80b04
EG
876 s16 used;
877 u16 prod;
878 u16 cons;
a2fbb9ea 879
34f80b04 880 barrier(); /* Tell compiler that prod and cons can change */
a2fbb9ea
ET
881 prod = fp->tx_bd_prod;
882 cons = fp->tx_bd_cons;
883
34f80b04
EG
884 /* NUM_TX_RINGS = number of "next-page" entries
885 It will be used as a threshold */
886 used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
a2fbb9ea 887
34f80b04 888#ifdef BNX2X_STOP_ON_ERROR
53e5e96e
IJ
889 WARN_ON(used < 0);
890 WARN_ON(used > fp->bp->tx_ring_size);
891 WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
34f80b04 892#endif
a2fbb9ea 893
34f80b04 894 return (s16)(fp->bp->tx_ring_size) - used;
a2fbb9ea
ET
895}
896
7961f791 897static void bnx2x_tx_int(struct bnx2x_fastpath *fp)
a2fbb9ea
ET
898{
899 struct bnx2x *bp = fp->bp;
555f6c78 900 struct netdev_queue *txq;
a2fbb9ea
ET
901 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
902 int done = 0;
903
904#ifdef BNX2X_STOP_ON_ERROR
905 if (unlikely(bp->panic))
906 return;
907#endif
908
ca00392c 909 txq = netdev_get_tx_queue(bp->dev, fp->index - bp->num_rx_queues);
a2fbb9ea
ET
910 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
911 sw_cons = fp->tx_pkt_cons;
912
913 while (sw_cons != hw_cons) {
914 u16 pkt_cons;
915
916 pkt_cons = TX_BD(sw_cons);
917
918 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
919
34f80b04 920 DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %u\n",
a2fbb9ea
ET
921 hw_cons, sw_cons, pkt_cons);
922
34f80b04 923/* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
a2fbb9ea
ET
924 rmb();
925 prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
926 }
927*/
928 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
929 sw_cons++;
930 done++;
a2fbb9ea
ET
931 }
932
933 fp->tx_pkt_cons = sw_cons;
934 fp->tx_bd_cons = bd_cons;
935
a2fbb9ea 936 /* TBD need a thresh? */
555f6c78 937 if (unlikely(netif_tx_queue_stopped(txq))) {
a2fbb9ea 938
6044735d
EG
939 /* Need to make the tx_bd_cons update visible to start_xmit()
940 * before checking for netif_tx_queue_stopped(). Without the
941 * memory barrier, there is a small possibility that
942 * start_xmit() will miss it and cause the queue to be stopped
943 * forever.
944 */
945 smp_mb();
946
555f6c78 947 if ((netif_tx_queue_stopped(txq)) &&
da5a662a 948 (bp->state == BNX2X_STATE_OPEN) &&
a2fbb9ea 949 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
555f6c78 950 netif_tx_wake_queue(txq);
a2fbb9ea
ET
951 }
952}
953
3196a88a 954
a2fbb9ea
ET
955static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
956 union eth_rx_cqe *rr_cqe)
957{
958 struct bnx2x *bp = fp->bp;
959 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
960 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
961
34f80b04 962 DP(BNX2X_MSG_SP,
a2fbb9ea 963 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
0626b899 964 fp->index, cid, command, bp->state,
34f80b04 965 rr_cqe->ramrod_cqe.ramrod_type);
a2fbb9ea
ET
966
967 bp->spq_left++;
968
0626b899 969 if (fp->index) {
a2fbb9ea
ET
970 switch (command | fp->state) {
971 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
972 BNX2X_FP_STATE_OPENING):
973 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
974 cid);
975 fp->state = BNX2X_FP_STATE_OPEN;
976 break;
977
978 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
979 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
980 cid);
981 fp->state = BNX2X_FP_STATE_HALTED;
982 break;
983
984 default:
34f80b04
EG
985 BNX2X_ERR("unexpected MC reply (%d) "
986 "fp->state is %x\n", command, fp->state);
987 break;
a2fbb9ea 988 }
34f80b04 989 mb(); /* force bnx2x_wait_ramrod() to see the change */
a2fbb9ea
ET
990 return;
991 }
c14423fe 992
a2fbb9ea
ET
993 switch (command | bp->state) {
994 case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
995 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
996 bp->state = BNX2X_STATE_OPEN;
997 break;
998
999 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
1000 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
1001 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
1002 fp->state = BNX2X_FP_STATE_HALTED;
1003 break;
1004
a2fbb9ea 1005 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
34f80b04 1006 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
49d66772 1007 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
a2fbb9ea
ET
1008 break;
1009
3196a88a 1010
a2fbb9ea 1011 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
34f80b04 1012 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
a2fbb9ea 1013 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
bb2a0f7a 1014 bp->set_mac_pending = 0;
a2fbb9ea
ET
1015 break;
1016
49d66772 1017 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
ca00392c 1018 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DISABLED):
34f80b04 1019 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
49d66772
ET
1020 break;
1021
a2fbb9ea 1022 default:
34f80b04 1023 BNX2X_ERR("unexpected MC reply (%d) bp->state is %x\n",
a2fbb9ea 1024 command, bp->state);
34f80b04 1025 break;
a2fbb9ea 1026 }
34f80b04 1027 mb(); /* force bnx2x_wait_ramrod() to see the change */
a2fbb9ea
ET
1028}
1029
7a9b2557
VZ
1030static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
1031 struct bnx2x_fastpath *fp, u16 index)
1032{
1033 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1034 struct page *page = sw_buf->page;
1035 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1036
1037 /* Skip "next page" elements */
1038 if (!page)
1039 return;
1040
1041 pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
4f40f2cb 1042 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
1043 __free_pages(page, PAGES_PER_SGE_SHIFT);
1044
1045 sw_buf->page = NULL;
1046 sge->addr_hi = 0;
1047 sge->addr_lo = 0;
1048}
1049
1050static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
1051 struct bnx2x_fastpath *fp, int last)
1052{
1053 int i;
1054
1055 for (i = 0; i < last; i++)
1056 bnx2x_free_rx_sge(bp, fp, i);
1057}
1058
1059static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
1060 struct bnx2x_fastpath *fp, u16 index)
1061{
1062 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
1063 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1064 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1065 dma_addr_t mapping;
1066
1067 if (unlikely(page == NULL))
1068 return -ENOMEM;
1069
4f40f2cb 1070 mapping = pci_map_page(bp->pdev, page, 0, SGE_PAGE_SIZE*PAGES_PER_SGE,
7a9b2557 1071 PCI_DMA_FROMDEVICE);
8d8bb39b 1072 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
7a9b2557
VZ
1073 __free_pages(page, PAGES_PER_SGE_SHIFT);
1074 return -ENOMEM;
1075 }
1076
1077 sw_buf->page = page;
1078 pci_unmap_addr_set(sw_buf, mapping, mapping);
1079
1080 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1081 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1082
1083 return 0;
1084}
1085
a2fbb9ea
ET
1086static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1087 struct bnx2x_fastpath *fp, u16 index)
1088{
1089 struct sk_buff *skb;
1090 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1091 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1092 dma_addr_t mapping;
1093
1094 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1095 if (unlikely(skb == NULL))
1096 return -ENOMEM;
1097
437cf2f1 1098 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_size,
a2fbb9ea 1099 PCI_DMA_FROMDEVICE);
8d8bb39b 1100 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
a2fbb9ea
ET
1101 dev_kfree_skb(skb);
1102 return -ENOMEM;
1103 }
1104
1105 rx_buf->skb = skb;
1106 pci_unmap_addr_set(rx_buf, mapping, mapping);
1107
1108 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1109 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1110
1111 return 0;
1112}
1113
1114/* note that we are not allocating a new skb,
1115 * we are just moving one from cons to prod
1116 * we are not creating a new mapping,
1117 * so there is no need to check for dma_mapping_error().
1118 */
1119static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1120 struct sk_buff *skb, u16 cons, u16 prod)
1121{
1122 struct bnx2x *bp = fp->bp;
1123 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1124 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1125 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1126 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1127
1128 pci_dma_sync_single_for_device(bp->pdev,
1129 pci_unmap_addr(cons_rx_buf, mapping),
87942b46 1130 RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
a2fbb9ea
ET
1131
1132 prod_rx_buf->skb = cons_rx_buf->skb;
1133 pci_unmap_addr_set(prod_rx_buf, mapping,
1134 pci_unmap_addr(cons_rx_buf, mapping));
1135 *prod_bd = *cons_bd;
1136}
1137
7a9b2557
VZ
1138static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1139 u16 idx)
1140{
1141 u16 last_max = fp->last_max_sge;
1142
1143 if (SUB_S16(idx, last_max) > 0)
1144 fp->last_max_sge = idx;
1145}
1146
1147static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1148{
1149 int i, j;
1150
1151 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1152 int idx = RX_SGE_CNT * i - 1;
1153
1154 for (j = 0; j < 2; j++) {
1155 SGE_MASK_CLEAR_BIT(fp, idx);
1156 idx--;
1157 }
1158 }
1159}
1160
1161static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1162 struct eth_fast_path_rx_cqe *fp_cqe)
1163{
1164 struct bnx2x *bp = fp->bp;
4f40f2cb 1165 u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
7a9b2557 1166 le16_to_cpu(fp_cqe->len_on_bd)) >>
4f40f2cb 1167 SGE_PAGE_SHIFT;
7a9b2557
VZ
1168 u16 last_max, last_elem, first_elem;
1169 u16 delta = 0;
1170 u16 i;
1171
1172 if (!sge_len)
1173 return;
1174
1175 /* First mark all used pages */
1176 for (i = 0; i < sge_len; i++)
1177 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1178
1179 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1180 sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1181
1182 /* Here we assume that the last SGE index is the biggest */
1183 prefetch((void *)(fp->sge_mask));
1184 bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1185
1186 last_max = RX_SGE(fp->last_max_sge);
1187 last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1188 first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1189
1190 /* If ring is not full */
1191 if (last_elem + 1 != first_elem)
1192 last_elem++;
1193
1194 /* Now update the prod */
1195 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1196 if (likely(fp->sge_mask[i]))
1197 break;
1198
1199 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1200 delta += RX_SGE_MASK_ELEM_SZ;
1201 }
1202
1203 if (delta > 0) {
1204 fp->rx_sge_prod += delta;
1205 /* clear page-end entries */
1206 bnx2x_clear_sge_mask_next_elems(fp);
1207 }
1208
1209 DP(NETIF_MSG_RX_STATUS,
1210 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
1211 fp->last_max_sge, fp->rx_sge_prod);
1212}
1213
1214static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1215{
1216 /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1217 memset(fp->sge_mask, 0xff,
1218 (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1219
33471629
EG
1220 /* Clear the two last indices in the page to 1:
1221 these are the indices that correspond to the "next" element,
7a9b2557
VZ
1222 hence will never be indicated and should be removed from
1223 the calculations. */
1224 bnx2x_clear_sge_mask_next_elems(fp);
1225}
1226
1227static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1228 struct sk_buff *skb, u16 cons, u16 prod)
1229{
1230 struct bnx2x *bp = fp->bp;
1231 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1232 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1233 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1234 dma_addr_t mapping;
1235
1236 /* move empty skb from pool to prod and map it */
1237 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1238 mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
437cf2f1 1239 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
1240 pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1241
1242 /* move partial skb from cons to pool (don't unmap yet) */
1243 fp->tpa_pool[queue] = *cons_rx_buf;
1244
1245 /* mark bin state as start - print error if current state != stop */
1246 if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1247 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1248
1249 fp->tpa_state[queue] = BNX2X_TPA_START;
1250
1251 /* point prod_bd to new skb */
1252 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1253 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1254
1255#ifdef BNX2X_STOP_ON_ERROR
1256 fp->tpa_queue_used |= (1 << queue);
1257#ifdef __powerpc64__
1258 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1259#else
1260 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1261#endif
1262 fp->tpa_queue_used);
1263#endif
1264}
1265
1266static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1267 struct sk_buff *skb,
1268 struct eth_fast_path_rx_cqe *fp_cqe,
1269 u16 cqe_idx)
1270{
1271 struct sw_rx_page *rx_pg, old_rx_pg;
7a9b2557
VZ
1272 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1273 u32 i, frag_len, frag_size, pages;
1274 int err;
1275 int j;
1276
1277 frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
4f40f2cb 1278 pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
7a9b2557
VZ
1279
1280 /* This is needed in order to enable forwarding support */
1281 if (frag_size)
4f40f2cb 1282 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
7a9b2557
VZ
1283 max(frag_size, (u32)len_on_bd));
1284
1285#ifdef BNX2X_STOP_ON_ERROR
4f40f2cb
EG
1286 if (pages >
1287 min((u32)8, (u32)MAX_SKB_FRAGS) * SGE_PAGE_SIZE * PAGES_PER_SGE) {
7a9b2557
VZ
1288 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1289 pages, cqe_idx);
1290 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
1291 fp_cqe->pkt_len, len_on_bd);
1292 bnx2x_panic();
1293 return -EINVAL;
1294 }
1295#endif
1296
1297 /* Run through the SGL and compose the fragmented skb */
1298 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1299 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1300
1301 /* FW gives the indices of the SGE as if the ring is an array
1302 (meaning that "next" element will consume 2 indices) */
4f40f2cb 1303 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
7a9b2557 1304 rx_pg = &fp->rx_page_ring[sge_idx];
7a9b2557
VZ
1305 old_rx_pg = *rx_pg;
1306
1307 /* If we fail to allocate a substitute page, we simply stop
1308 where we are and drop the whole packet */
1309 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1310 if (unlikely(err)) {
de832a55 1311 fp->eth_q_stats.rx_skb_alloc_failed++;
7a9b2557
VZ
1312 return err;
1313 }
1314
1315 /* Unmap the page as we r going to pass it to the stack */
1316 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
4f40f2cb 1317 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
1318
1319 /* Add one frag and update the appropriate fields in the skb */
1320 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1321
1322 skb->data_len += frag_len;
1323 skb->truesize += frag_len;
1324 skb->len += frag_len;
1325
1326 frag_size -= frag_len;
1327 }
1328
1329 return 0;
1330}
1331
1332static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1333 u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1334 u16 cqe_idx)
1335{
1336 struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1337 struct sk_buff *skb = rx_buf->skb;
1338 /* alloc new skb */
1339 struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1340
1341 /* Unmap skb in the pool anyway, as we are going to change
1342 pool entry status to BNX2X_TPA_STOP even if new skb allocation
1343 fails. */
1344 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
437cf2f1 1345 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
7a9b2557 1346
7a9b2557 1347 if (likely(new_skb)) {
66e855f3
YG
1348 /* fix ip xsum and give it to the stack */
1349 /* (no need to map the new skb) */
0c6671b0
EG
1350#ifdef BCM_VLAN
1351 int is_vlan_cqe =
1352 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1353 PARSING_FLAGS_VLAN);
1354 int is_not_hwaccel_vlan_cqe =
1355 (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
1356#endif
7a9b2557
VZ
1357
1358 prefetch(skb);
1359 prefetch(((char *)(skb)) + 128);
1360
7a9b2557
VZ
1361#ifdef BNX2X_STOP_ON_ERROR
1362 if (pad + len > bp->rx_buf_size) {
1363 BNX2X_ERR("skb_put is about to fail... "
1364 "pad %d len %d rx_buf_size %d\n",
1365 pad, len, bp->rx_buf_size);
1366 bnx2x_panic();
1367 return;
1368 }
1369#endif
1370
1371 skb_reserve(skb, pad);
1372 skb_put(skb, len);
1373
1374 skb->protocol = eth_type_trans(skb, bp->dev);
1375 skb->ip_summed = CHECKSUM_UNNECESSARY;
1376
1377 {
1378 struct iphdr *iph;
1379
1380 iph = (struct iphdr *)skb->data;
0c6671b0
EG
1381#ifdef BCM_VLAN
1382 /* If there is no Rx VLAN offloading -
1383 take VLAN tag into an account */
1384 if (unlikely(is_not_hwaccel_vlan_cqe))
1385 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
1386#endif
7a9b2557
VZ
1387 iph->check = 0;
1388 iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1389 }
1390
1391 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1392 &cqe->fast_path_cqe, cqe_idx)) {
1393#ifdef BCM_VLAN
0c6671b0
EG
1394 if ((bp->vlgrp != NULL) && is_vlan_cqe &&
1395 (!is_not_hwaccel_vlan_cqe))
7a9b2557
VZ
1396 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1397 le16_to_cpu(cqe->fast_path_cqe.
1398 vlan_tag));
1399 else
1400#endif
1401 netif_receive_skb(skb);
1402 } else {
1403 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1404 " - dropping packet!\n");
1405 dev_kfree_skb(skb);
1406 }
1407
7a9b2557
VZ
1408
1409 /* put new skb in bin */
1410 fp->tpa_pool[queue].skb = new_skb;
1411
1412 } else {
66e855f3 1413 /* else drop the packet and keep the buffer in the bin */
7a9b2557
VZ
1414 DP(NETIF_MSG_RX_STATUS,
1415 "Failed to allocate new skb - dropping packet!\n");
de832a55 1416 fp->eth_q_stats.rx_skb_alloc_failed++;
7a9b2557
VZ
1417 }
1418
1419 fp->tpa_state[queue] = BNX2X_TPA_STOP;
1420}
1421
1422static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1423 struct bnx2x_fastpath *fp,
1424 u16 bd_prod, u16 rx_comp_prod,
1425 u16 rx_sge_prod)
1426{
8d9c5f34 1427 struct ustorm_eth_rx_producers rx_prods = {0};
7a9b2557
VZ
1428 int i;
1429
1430 /* Update producers */
1431 rx_prods.bd_prod = bd_prod;
1432 rx_prods.cqe_prod = rx_comp_prod;
1433 rx_prods.sge_prod = rx_sge_prod;
1434
58f4c4cf
EG
1435 /*
1436 * Make sure that the BD and SGE data is updated before updating the
1437 * producers since FW might read the BD/SGE right after the producer
1438 * is updated.
1439 * This is only applicable for weak-ordered memory model archs such
1440 * as IA-64. The following barrier is also mandatory since FW will
1441 * assumes BDs must have buffers.
1442 */
1443 wmb();
1444
8d9c5f34
EG
1445 for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++)
1446 REG_WR(bp, BAR_USTRORM_INTMEM +
0626b899 1447 USTORM_RX_PRODS_OFFSET(BP_PORT(bp), fp->cl_id) + i*4,
7a9b2557
VZ
1448 ((u32 *)&rx_prods)[i]);
1449
58f4c4cf
EG
1450 mmiowb(); /* keep prod updates ordered */
1451
7a9b2557 1452 DP(NETIF_MSG_RX_STATUS,
555f6c78
EG
1453 "queue[%d]: wrote bd_prod %u cqe_prod %u sge_prod %u\n",
1454 fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
7a9b2557
VZ
1455}
1456
a2fbb9ea
ET
1457static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1458{
1459 struct bnx2x *bp = fp->bp;
34f80b04 1460 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
a2fbb9ea
ET
1461 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1462 int rx_pkt = 0;
1463
1464#ifdef BNX2X_STOP_ON_ERROR
1465 if (unlikely(bp->panic))
1466 return 0;
1467#endif
1468
34f80b04
EG
1469 /* CQ "next element" is of the size of the regular element,
1470 that's why it's ok here */
a2fbb9ea
ET
1471 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1472 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1473 hw_comp_cons++;
1474
1475 bd_cons = fp->rx_bd_cons;
1476 bd_prod = fp->rx_bd_prod;
34f80b04 1477 bd_prod_fw = bd_prod;
a2fbb9ea
ET
1478 sw_comp_cons = fp->rx_comp_cons;
1479 sw_comp_prod = fp->rx_comp_prod;
1480
1481 /* Memory barrier necessary as speculative reads of the rx
1482 * buffer can be ahead of the index in the status block
1483 */
1484 rmb();
1485
1486 DP(NETIF_MSG_RX_STATUS,
1487 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
0626b899 1488 fp->index, hw_comp_cons, sw_comp_cons);
a2fbb9ea
ET
1489
1490 while (sw_comp_cons != hw_comp_cons) {
34f80b04 1491 struct sw_rx_bd *rx_buf = NULL;
a2fbb9ea
ET
1492 struct sk_buff *skb;
1493 union eth_rx_cqe *cqe;
34f80b04
EG
1494 u8 cqe_fp_flags;
1495 u16 len, pad;
a2fbb9ea
ET
1496
1497 comp_ring_cons = RCQ_BD(sw_comp_cons);
1498 bd_prod = RX_BD(bd_prod);
1499 bd_cons = RX_BD(bd_cons);
1500
619e7a66
EG
1501 /* Prefetch the page containing the BD descriptor
1502 at producer's index. It will be needed when new skb is
1503 allocated */
1504 prefetch((void *)(PAGE_ALIGN((unsigned long)
1505 (&fp->rx_desc_ring[bd_prod])) -
1506 PAGE_SIZE + 1));
1507
a2fbb9ea 1508 cqe = &fp->rx_comp_ring[comp_ring_cons];
34f80b04 1509 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
a2fbb9ea 1510
a2fbb9ea 1511 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
34f80b04
EG
1512 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
1513 cqe_fp_flags, cqe->fast_path_cqe.status_flags,
68d59484 1514 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
34f80b04
EG
1515 le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1516 le16_to_cpu(cqe->fast_path_cqe.pkt_len));
a2fbb9ea
ET
1517
1518 /* is this a slowpath msg? */
34f80b04 1519 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
a2fbb9ea
ET
1520 bnx2x_sp_event(fp, cqe);
1521 goto next_cqe;
1522
1523 /* this is an rx packet */
1524 } else {
1525 rx_buf = &fp->rx_buf_ring[bd_cons];
1526 skb = rx_buf->skb;
a2fbb9ea
ET
1527 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1528 pad = cqe->fast_path_cqe.placement_offset;
1529
7a9b2557
VZ
1530 /* If CQE is marked both TPA_START and TPA_END
1531 it is a non-TPA CQE */
1532 if ((!fp->disable_tpa) &&
1533 (TPA_TYPE(cqe_fp_flags) !=
1534 (TPA_TYPE_START | TPA_TYPE_END))) {
3196a88a 1535 u16 queue = cqe->fast_path_cqe.queue_index;
7a9b2557
VZ
1536
1537 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1538 DP(NETIF_MSG_RX_STATUS,
1539 "calling tpa_start on queue %d\n",
1540 queue);
1541
1542 bnx2x_tpa_start(fp, queue, skb,
1543 bd_cons, bd_prod);
1544 goto next_rx;
1545 }
1546
1547 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1548 DP(NETIF_MSG_RX_STATUS,
1549 "calling tpa_stop on queue %d\n",
1550 queue);
1551
1552 if (!BNX2X_RX_SUM_FIX(cqe))
1553 BNX2X_ERR("STOP on none TCP "
1554 "data\n");
1555
1556 /* This is a size of the linear data
1557 on this skb */
1558 len = le16_to_cpu(cqe->fast_path_cqe.
1559 len_on_bd);
1560 bnx2x_tpa_stop(bp, fp, queue, pad,
1561 len, cqe, comp_ring_cons);
1562#ifdef BNX2X_STOP_ON_ERROR
1563 if (bp->panic)
17cb4006 1564 return 0;
7a9b2557
VZ
1565#endif
1566
1567 bnx2x_update_sge_prod(fp,
1568 &cqe->fast_path_cqe);
1569 goto next_cqe;
1570 }
1571 }
1572
a2fbb9ea
ET
1573 pci_dma_sync_single_for_device(bp->pdev,
1574 pci_unmap_addr(rx_buf, mapping),
1575 pad + RX_COPY_THRESH,
1576 PCI_DMA_FROMDEVICE);
1577 prefetch(skb);
1578 prefetch(((char *)(skb)) + 128);
1579
1580 /* is this an error packet? */
34f80b04 1581 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
a2fbb9ea 1582 DP(NETIF_MSG_RX_ERR,
34f80b04
EG
1583 "ERROR flags %x rx packet %u\n",
1584 cqe_fp_flags, sw_comp_cons);
de832a55 1585 fp->eth_q_stats.rx_err_discard_pkt++;
a2fbb9ea
ET
1586 goto reuse_rx;
1587 }
1588
1589 /* Since we don't have a jumbo ring
1590 * copy small packets if mtu > 1500
1591 */
1592 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1593 (len <= RX_COPY_THRESH)) {
1594 struct sk_buff *new_skb;
1595
1596 new_skb = netdev_alloc_skb(bp->dev,
1597 len + pad);
1598 if (new_skb == NULL) {
1599 DP(NETIF_MSG_RX_ERR,
34f80b04 1600 "ERROR packet dropped "
a2fbb9ea 1601 "because of alloc failure\n");
de832a55 1602 fp->eth_q_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
1603 goto reuse_rx;
1604 }
1605
1606 /* aligned copy */
1607 skb_copy_from_linear_data_offset(skb, pad,
1608 new_skb->data + pad, len);
1609 skb_reserve(new_skb, pad);
1610 skb_put(new_skb, len);
1611
1612 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1613
1614 skb = new_skb;
1615
a119a069
EG
1616 } else
1617 if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) {
a2fbb9ea
ET
1618 pci_unmap_single(bp->pdev,
1619 pci_unmap_addr(rx_buf, mapping),
437cf2f1 1620 bp->rx_buf_size,
a2fbb9ea
ET
1621 PCI_DMA_FROMDEVICE);
1622 skb_reserve(skb, pad);
1623 skb_put(skb, len);
1624
1625 } else {
1626 DP(NETIF_MSG_RX_ERR,
34f80b04 1627 "ERROR packet dropped because "
a2fbb9ea 1628 "of alloc failure\n");
de832a55 1629 fp->eth_q_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
1630reuse_rx:
1631 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1632 goto next_rx;
1633 }
1634
1635 skb->protocol = eth_type_trans(skb, bp->dev);
1636
1637 skb->ip_summed = CHECKSUM_NONE;
66e855f3 1638 if (bp->rx_csum) {
1adcd8be
EG
1639 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1640 skb->ip_summed = CHECKSUM_UNNECESSARY;
66e855f3 1641 else
de832a55 1642 fp->eth_q_stats.hw_csum_err++;
66e855f3 1643 }
a2fbb9ea
ET
1644 }
1645
748e5439 1646 skb_record_rx_queue(skb, fp->index);
a2fbb9ea 1647#ifdef BCM_VLAN
0c6671b0 1648 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
34f80b04
EG
1649 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1650 PARSING_FLAGS_VLAN))
a2fbb9ea
ET
1651 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1652 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1653 else
1654#endif
34f80b04 1655 netif_receive_skb(skb);
a2fbb9ea 1656
a2fbb9ea
ET
1657
1658next_rx:
1659 rx_buf->skb = NULL;
1660
1661 bd_cons = NEXT_RX_IDX(bd_cons);
1662 bd_prod = NEXT_RX_IDX(bd_prod);
34f80b04
EG
1663 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1664 rx_pkt++;
a2fbb9ea
ET
1665next_cqe:
1666 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1667 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
a2fbb9ea 1668
34f80b04 1669 if (rx_pkt == budget)
a2fbb9ea
ET
1670 break;
1671 } /* while */
1672
1673 fp->rx_bd_cons = bd_cons;
34f80b04 1674 fp->rx_bd_prod = bd_prod_fw;
a2fbb9ea
ET
1675 fp->rx_comp_cons = sw_comp_cons;
1676 fp->rx_comp_prod = sw_comp_prod;
1677
7a9b2557
VZ
1678 /* Update producers */
1679 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1680 fp->rx_sge_prod);
a2fbb9ea
ET
1681
1682 fp->rx_pkt += rx_pkt;
1683 fp->rx_calls++;
1684
1685 return rx_pkt;
1686}
1687
1688static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1689{
1690 struct bnx2x_fastpath *fp = fp_cookie;
1691 struct bnx2x *bp = fp->bp;
a2fbb9ea 1692
da5a662a
VZ
1693 /* Return here if interrupt is disabled */
1694 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1695 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1696 return IRQ_HANDLED;
1697 }
1698
34f80b04 1699 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
ca00392c 1700 fp->index, fp->sb_id);
0626b899 1701 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
a2fbb9ea
ET
1702
1703#ifdef BNX2X_STOP_ON_ERROR
1704 if (unlikely(bp->panic))
1705 return IRQ_HANDLED;
1706#endif
ca00392c
EG
1707 /* Handle Rx or Tx according to MSI-X vector */
1708 if (fp->is_rx_queue) {
1709 prefetch(fp->rx_cons_sb);
1710 prefetch(&fp->status_blk->u_status_block.status_block_index);
a2fbb9ea 1711
ca00392c 1712 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
a2fbb9ea 1713
ca00392c
EG
1714 } else {
1715 prefetch(fp->tx_cons_sb);
1716 prefetch(&fp->status_blk->c_status_block.status_block_index);
1717
1718 bnx2x_update_fpsb_idx(fp);
1719 rmb();
1720 bnx2x_tx_int(fp);
1721
1722 /* Re-enable interrupts */
1723 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
1724 le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
1725 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
1726 le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
1727 }
34f80b04 1728
a2fbb9ea
ET
1729 return IRQ_HANDLED;
1730}
1731
1732static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1733{
555f6c78 1734 struct bnx2x *bp = netdev_priv(dev_instance);
a2fbb9ea 1735 u16 status = bnx2x_ack_int(bp);
34f80b04 1736 u16 mask;
ca00392c 1737 int i;
a2fbb9ea 1738
34f80b04 1739 /* Return here if interrupt is shared and it's not for us */
a2fbb9ea
ET
1740 if (unlikely(status == 0)) {
1741 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1742 return IRQ_NONE;
1743 }
f5372251 1744 DP(NETIF_MSG_INTR, "got an interrupt status 0x%x\n", status);
a2fbb9ea 1745
34f80b04 1746 /* Return here if interrupt is disabled */
a2fbb9ea
ET
1747 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1748 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1749 return IRQ_HANDLED;
1750 }
1751
3196a88a
EG
1752#ifdef BNX2X_STOP_ON_ERROR
1753 if (unlikely(bp->panic))
1754 return IRQ_HANDLED;
1755#endif
1756
ca00392c
EG
1757 for (i = 0; i < BNX2X_NUM_QUEUES(bp); i++) {
1758 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea 1759
ca00392c
EG
1760 mask = 0x2 << fp->sb_id;
1761 if (status & mask) {
1762 /* Handle Rx or Tx according to SB id */
1763 if (fp->is_rx_queue) {
1764 prefetch(fp->rx_cons_sb);
1765 prefetch(&fp->status_blk->u_status_block.
1766 status_block_index);
a2fbb9ea 1767
ca00392c 1768 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
a2fbb9ea 1769
ca00392c
EG
1770 } else {
1771 prefetch(fp->tx_cons_sb);
1772 prefetch(&fp->status_blk->c_status_block.
1773 status_block_index);
1774
1775 bnx2x_update_fpsb_idx(fp);
1776 rmb();
1777 bnx2x_tx_int(fp);
1778
1779 /* Re-enable interrupts */
1780 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
1781 le16_to_cpu(fp->fp_u_idx),
1782 IGU_INT_NOP, 1);
1783 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
1784 le16_to_cpu(fp->fp_c_idx),
1785 IGU_INT_ENABLE, 1);
1786 }
1787 status &= ~mask;
1788 }
a2fbb9ea
ET
1789 }
1790
a2fbb9ea 1791
34f80b04 1792 if (unlikely(status & 0x1)) {
1cf167f2 1793 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
a2fbb9ea
ET
1794
1795 status &= ~0x1;
1796 if (!status)
1797 return IRQ_HANDLED;
1798 }
1799
34f80b04
EG
1800 if (status)
1801 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1802 status);
a2fbb9ea 1803
c18487ee 1804 return IRQ_HANDLED;
a2fbb9ea
ET
1805}
1806
c18487ee 1807/* end of fast path */
a2fbb9ea 1808
bb2a0f7a 1809static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
a2fbb9ea 1810
c18487ee
YR
1811/* Link */
1812
1813/*
1814 * General service functions
1815 */
a2fbb9ea 1816
4a37fb66 1817static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
c18487ee
YR
1818{
1819 u32 lock_status;
1820 u32 resource_bit = (1 << resource);
4a37fb66
YG
1821 int func = BP_FUNC(bp);
1822 u32 hw_lock_control_reg;
c18487ee 1823 int cnt;
a2fbb9ea 1824
c18487ee
YR
1825 /* Validating that the resource is within range */
1826 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1827 DP(NETIF_MSG_HW,
1828 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1829 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1830 return -EINVAL;
1831 }
a2fbb9ea 1832
4a37fb66
YG
1833 if (func <= 5) {
1834 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1835 } else {
1836 hw_lock_control_reg =
1837 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1838 }
1839
c18487ee 1840 /* Validating that the resource is not already taken */
4a37fb66 1841 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1842 if (lock_status & resource_bit) {
1843 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1844 lock_status, resource_bit);
1845 return -EEXIST;
1846 }
a2fbb9ea 1847
46230476
EG
1848 /* Try for 5 second every 5ms */
1849 for (cnt = 0; cnt < 1000; cnt++) {
c18487ee 1850 /* Try to acquire the lock */
4a37fb66
YG
1851 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1852 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1853 if (lock_status & resource_bit)
1854 return 0;
a2fbb9ea 1855
c18487ee 1856 msleep(5);
a2fbb9ea 1857 }
c18487ee
YR
1858 DP(NETIF_MSG_HW, "Timeout\n");
1859 return -EAGAIN;
1860}
a2fbb9ea 1861
4a37fb66 1862static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
c18487ee
YR
1863{
1864 u32 lock_status;
1865 u32 resource_bit = (1 << resource);
4a37fb66
YG
1866 int func = BP_FUNC(bp);
1867 u32 hw_lock_control_reg;
a2fbb9ea 1868
c18487ee
YR
1869 /* Validating that the resource is within range */
1870 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1871 DP(NETIF_MSG_HW,
1872 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1873 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1874 return -EINVAL;
1875 }
1876
4a37fb66
YG
1877 if (func <= 5) {
1878 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1879 } else {
1880 hw_lock_control_reg =
1881 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1882 }
1883
c18487ee 1884 /* Validating that the resource is currently taken */
4a37fb66 1885 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1886 if (!(lock_status & resource_bit)) {
1887 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1888 lock_status, resource_bit);
1889 return -EFAULT;
a2fbb9ea
ET
1890 }
1891
4a37fb66 1892 REG_WR(bp, hw_lock_control_reg, resource_bit);
c18487ee
YR
1893 return 0;
1894}
1895
1896/* HW Lock for shared dual port PHYs */
4a37fb66 1897static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
c18487ee 1898{
34f80b04 1899 mutex_lock(&bp->port.phy_mutex);
a2fbb9ea 1900
46c6a674
EG
1901 if (bp->port.need_hw_lock)
1902 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
c18487ee 1903}
a2fbb9ea 1904
4a37fb66 1905static void bnx2x_release_phy_lock(struct bnx2x *bp)
c18487ee 1906{
46c6a674
EG
1907 if (bp->port.need_hw_lock)
1908 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
a2fbb9ea 1909
34f80b04 1910 mutex_unlock(&bp->port.phy_mutex);
c18487ee 1911}
a2fbb9ea 1912
4acac6a5
EG
1913int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1914{
1915 /* The GPIO should be swapped if swap register is set and active */
1916 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1917 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1918 int gpio_shift = gpio_num +
1919 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1920 u32 gpio_mask = (1 << gpio_shift);
1921 u32 gpio_reg;
1922 int value;
1923
1924 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1925 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1926 return -EINVAL;
1927 }
1928
1929 /* read GPIO value */
1930 gpio_reg = REG_RD(bp, MISC_REG_GPIO);
1931
1932 /* get the requested pin value */
1933 if ((gpio_reg & gpio_mask) == gpio_mask)
1934 value = 1;
1935 else
1936 value = 0;
1937
1938 DP(NETIF_MSG_LINK, "pin %d value 0x%x\n", gpio_num, value);
1939
1940 return value;
1941}
1942
17de50b7 1943int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
c18487ee
YR
1944{
1945 /* The GPIO should be swapped if swap register is set and active */
1946 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
17de50b7 1947 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
c18487ee
YR
1948 int gpio_shift = gpio_num +
1949 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1950 u32 gpio_mask = (1 << gpio_shift);
1951 u32 gpio_reg;
a2fbb9ea 1952
c18487ee
YR
1953 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1954 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1955 return -EINVAL;
1956 }
a2fbb9ea 1957
4a37fb66 1958 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
c18487ee
YR
1959 /* read GPIO and mask except the float bits */
1960 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
a2fbb9ea 1961
c18487ee
YR
1962 switch (mode) {
1963 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1964 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1965 gpio_num, gpio_shift);
1966 /* clear FLOAT and set CLR */
1967 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1968 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1969 break;
a2fbb9ea 1970
c18487ee
YR
1971 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1972 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1973 gpio_num, gpio_shift);
1974 /* clear FLOAT and set SET */
1975 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1976 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1977 break;
a2fbb9ea 1978
17de50b7 1979 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
c18487ee
YR
1980 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1981 gpio_num, gpio_shift);
1982 /* set FLOAT */
1983 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1984 break;
a2fbb9ea 1985
c18487ee
YR
1986 default:
1987 break;
a2fbb9ea
ET
1988 }
1989
c18487ee 1990 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
4a37fb66 1991 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
f1410647 1992
c18487ee 1993 return 0;
a2fbb9ea
ET
1994}
1995
4acac6a5
EG
1996int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1997{
1998 /* The GPIO should be swapped if swap register is set and active */
1999 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
2000 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
2001 int gpio_shift = gpio_num +
2002 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
2003 u32 gpio_mask = (1 << gpio_shift);
2004 u32 gpio_reg;
2005
2006 if (gpio_num > MISC_REGISTERS_GPIO_3) {
2007 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2008 return -EINVAL;
2009 }
2010
2011 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2012 /* read GPIO int */
2013 gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
2014
2015 switch (mode) {
2016 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
2017 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
2018 "output low\n", gpio_num, gpio_shift);
2019 /* clear SET and set CLR */
2020 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2021 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2022 break;
2023
2024 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
2025 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
2026 "output high\n", gpio_num, gpio_shift);
2027 /* clear CLR and set SET */
2028 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2029 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2030 break;
2031
2032 default:
2033 break;
2034 }
2035
2036 REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
2037 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2038
2039 return 0;
2040}
2041
c18487ee 2042static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
a2fbb9ea 2043{
c18487ee
YR
2044 u32 spio_mask = (1 << spio_num);
2045 u32 spio_reg;
a2fbb9ea 2046
c18487ee
YR
2047 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
2048 (spio_num > MISC_REGISTERS_SPIO_7)) {
2049 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
2050 return -EINVAL;
a2fbb9ea
ET
2051 }
2052
4a37fb66 2053 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
c18487ee
YR
2054 /* read SPIO and mask except the float bits */
2055 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
a2fbb9ea 2056
c18487ee 2057 switch (mode) {
6378c025 2058 case MISC_REGISTERS_SPIO_OUTPUT_LOW:
c18487ee
YR
2059 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
2060 /* clear FLOAT and set CLR */
2061 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2062 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
2063 break;
a2fbb9ea 2064
6378c025 2065 case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
c18487ee
YR
2066 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
2067 /* clear FLOAT and set SET */
2068 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2069 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
2070 break;
a2fbb9ea 2071
c18487ee
YR
2072 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
2073 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
2074 /* set FLOAT */
2075 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2076 break;
a2fbb9ea 2077
c18487ee
YR
2078 default:
2079 break;
a2fbb9ea
ET
2080 }
2081
c18487ee 2082 REG_WR(bp, MISC_REG_SPIO, spio_reg);
4a37fb66 2083 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
c18487ee 2084
a2fbb9ea
ET
2085 return 0;
2086}
2087
c18487ee 2088static void bnx2x_calc_fc_adv(struct bnx2x *bp)
a2fbb9ea 2089{
ad33ea3a
EG
2090 switch (bp->link_vars.ieee_fc &
2091 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
c18487ee 2092 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
34f80b04 2093 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
c18487ee
YR
2094 ADVERTISED_Pause);
2095 break;
356e2385 2096
c18487ee 2097 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
34f80b04 2098 bp->port.advertising |= (ADVERTISED_Asym_Pause |
c18487ee
YR
2099 ADVERTISED_Pause);
2100 break;
356e2385 2101
c18487ee 2102 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
34f80b04 2103 bp->port.advertising |= ADVERTISED_Asym_Pause;
c18487ee 2104 break;
356e2385 2105
c18487ee 2106 default:
34f80b04 2107 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
c18487ee
YR
2108 ADVERTISED_Pause);
2109 break;
2110 }
2111}
f1410647 2112
c18487ee
YR
2113static void bnx2x_link_report(struct bnx2x *bp)
2114{
2691d51d
EG
2115 if (bp->state == BNX2X_STATE_DISABLED) {
2116 netif_carrier_off(bp->dev);
2117 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
2118 return;
2119 }
2120
c18487ee
YR
2121 if (bp->link_vars.link_up) {
2122 if (bp->state == BNX2X_STATE_OPEN)
2123 netif_carrier_on(bp->dev);
2124 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
f1410647 2125
c18487ee 2126 printk("%d Mbps ", bp->link_vars.line_speed);
f1410647 2127
c18487ee
YR
2128 if (bp->link_vars.duplex == DUPLEX_FULL)
2129 printk("full duplex");
2130 else
2131 printk("half duplex");
f1410647 2132
c0700f90
DM
2133 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
2134 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
c18487ee 2135 printk(", receive ");
356e2385
EG
2136 if (bp->link_vars.flow_ctrl &
2137 BNX2X_FLOW_CTRL_TX)
c18487ee
YR
2138 printk("& transmit ");
2139 } else {
2140 printk(", transmit ");
2141 }
2142 printk("flow control ON");
2143 }
2144 printk("\n");
f1410647 2145
c18487ee
YR
2146 } else { /* link_down */
2147 netif_carrier_off(bp->dev);
2148 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
f1410647 2149 }
c18487ee
YR
2150}
2151
b5bf9068 2152static u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
c18487ee 2153{
19680c48
EG
2154 if (!BP_NOMCP(bp)) {
2155 u8 rc;
a2fbb9ea 2156
19680c48 2157 /* Initialize link parameters structure variables */
8c99e7b0
YR
2158 /* It is recommended to turn off RX FC for jumbo frames
2159 for better performance */
0c593270 2160 if (bp->dev->mtu > 5000)
c0700f90 2161 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
8c99e7b0 2162 else
c0700f90 2163 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
a2fbb9ea 2164
4a37fb66 2165 bnx2x_acquire_phy_lock(bp);
b5bf9068
EG
2166
2167 if (load_mode == LOAD_DIAG)
2168 bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
2169
19680c48 2170 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
b5bf9068 2171
4a37fb66 2172 bnx2x_release_phy_lock(bp);
a2fbb9ea 2173
3c96c68b
EG
2174 bnx2x_calc_fc_adv(bp);
2175
b5bf9068
EG
2176 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
2177 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
19680c48 2178 bnx2x_link_report(bp);
b5bf9068 2179 }
34f80b04 2180
19680c48
EG
2181 return rc;
2182 }
f5372251 2183 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
19680c48 2184 return -EINVAL;
a2fbb9ea
ET
2185}
2186
c18487ee 2187static void bnx2x_link_set(struct bnx2x *bp)
a2fbb9ea 2188{
19680c48 2189 if (!BP_NOMCP(bp)) {
4a37fb66 2190 bnx2x_acquire_phy_lock(bp);
19680c48 2191 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
4a37fb66 2192 bnx2x_release_phy_lock(bp);
a2fbb9ea 2193
19680c48
EG
2194 bnx2x_calc_fc_adv(bp);
2195 } else
f5372251 2196 BNX2X_ERR("Bootcode is missing - can not set link\n");
c18487ee 2197}
a2fbb9ea 2198
c18487ee
YR
2199static void bnx2x__link_reset(struct bnx2x *bp)
2200{
19680c48 2201 if (!BP_NOMCP(bp)) {
4a37fb66 2202 bnx2x_acquire_phy_lock(bp);
589abe3a 2203 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
4a37fb66 2204 bnx2x_release_phy_lock(bp);
19680c48 2205 } else
f5372251 2206 BNX2X_ERR("Bootcode is missing - can not reset link\n");
c18487ee 2207}
a2fbb9ea 2208
c18487ee
YR
2209static u8 bnx2x_link_test(struct bnx2x *bp)
2210{
2211 u8 rc;
a2fbb9ea 2212
4a37fb66 2213 bnx2x_acquire_phy_lock(bp);
c18487ee 2214 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
4a37fb66 2215 bnx2x_release_phy_lock(bp);
a2fbb9ea 2216
c18487ee
YR
2217 return rc;
2218}
a2fbb9ea 2219
8a1c38d1 2220static void bnx2x_init_port_minmax(struct bnx2x *bp)
34f80b04 2221{
8a1c38d1
EG
2222 u32 r_param = bp->link_vars.line_speed / 8;
2223 u32 fair_periodic_timeout_usec;
2224 u32 t_fair;
34f80b04 2225
8a1c38d1
EG
2226 memset(&(bp->cmng.rs_vars), 0,
2227 sizeof(struct rate_shaping_vars_per_port));
2228 memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
34f80b04 2229
8a1c38d1
EG
2230 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2231 bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
34f80b04 2232
8a1c38d1
EG
2233 /* this is the threshold below which no timer arming will occur
2234 1.25 coefficient is for the threshold to be a little bigger
2235 than the real time, to compensate for timer in-accuracy */
2236 bp->cmng.rs_vars.rs_threshold =
34f80b04
EG
2237 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2238
8a1c38d1
EG
2239 /* resolution of fairness timer */
2240 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2241 /* for 10G it is 1000usec. for 1G it is 10000usec. */
2242 t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
34f80b04 2243
8a1c38d1
EG
2244 /* this is the threshold below which we won't arm the timer anymore */
2245 bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
34f80b04 2246
8a1c38d1
EG
2247 /* we multiply by 1e3/8 to get bytes/msec.
2248 We don't want the credits to pass a credit
2249 of the t_fair*FAIR_MEM (algorithm resolution) */
2250 bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
2251 /* since each tick is 4 usec */
2252 bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
34f80b04
EG
2253}
2254
2691d51d
EG
2255/* Calculates the sum of vn_min_rates.
2256 It's needed for further normalizing of the min_rates.
2257 Returns:
2258 sum of vn_min_rates.
2259 or
2260 0 - if all the min_rates are 0.
2261 In the later case fainess algorithm should be deactivated.
2262 If not all min_rates are zero then those that are zeroes will be set to 1.
2263 */
2264static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
2265{
2266 int all_zero = 1;
2267 int port = BP_PORT(bp);
2268 int vn;
2269
2270 bp->vn_weight_sum = 0;
2271 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2272 int func = 2*vn + port;
2273 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2274 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2275 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2276
2277 /* Skip hidden vns */
2278 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
2279 continue;
2280
2281 /* If min rate is zero - set it to 1 */
2282 if (!vn_min_rate)
2283 vn_min_rate = DEF_MIN_RATE;
2284 else
2285 all_zero = 0;
2286
2287 bp->vn_weight_sum += vn_min_rate;
2288 }
2289
2290 /* ... only if all min rates are zeros - disable fairness */
2291 if (all_zero)
2292 bp->vn_weight_sum = 0;
2293}
2294
8a1c38d1 2295static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
34f80b04
EG
2296{
2297 struct rate_shaping_vars_per_vn m_rs_vn;
2298 struct fairness_vars_per_vn m_fair_vn;
2299 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2300 u16 vn_min_rate, vn_max_rate;
2301 int i;
2302
2303 /* If function is hidden - set min and max to zeroes */
2304 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2305 vn_min_rate = 0;
2306 vn_max_rate = 0;
2307
2308 } else {
2309 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2310 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
8a1c38d1 2311 /* If fairness is enabled (not all min rates are zeroes) and
34f80b04 2312 if current min rate is zero - set it to 1.
33471629 2313 This is a requirement of the algorithm. */
8a1c38d1 2314 if (bp->vn_weight_sum && (vn_min_rate == 0))
34f80b04
EG
2315 vn_min_rate = DEF_MIN_RATE;
2316 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2317 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2318 }
2319
8a1c38d1
EG
2320 DP(NETIF_MSG_IFUP,
2321 "func %d: vn_min_rate=%d vn_max_rate=%d vn_weight_sum=%d\n",
2322 func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
34f80b04
EG
2323
2324 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2325 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2326
2327 /* global vn counter - maximal Mbps for this vn */
2328 m_rs_vn.vn_counter.rate = vn_max_rate;
2329
2330 /* quota - number of bytes transmitted in this period */
2331 m_rs_vn.vn_counter.quota =
2332 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2333
8a1c38d1 2334 if (bp->vn_weight_sum) {
34f80b04
EG
2335 /* credit for each period of the fairness algorithm:
2336 number of bytes in T_FAIR (the vn share the port rate).
8a1c38d1
EG
2337 vn_weight_sum should not be larger than 10000, thus
2338 T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
2339 than zero */
34f80b04 2340 m_fair_vn.vn_credit_delta =
8a1c38d1
EG
2341 max((u32)(vn_min_rate * (T_FAIR_COEF /
2342 (8 * bp->vn_weight_sum))),
2343 (u32)(bp->cmng.fair_vars.fair_threshold * 2));
34f80b04
EG
2344 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2345 m_fair_vn.vn_credit_delta);
2346 }
2347
34f80b04
EG
2348 /* Store it to internal memory */
2349 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2350 REG_WR(bp, BAR_XSTRORM_INTMEM +
2351 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2352 ((u32 *)(&m_rs_vn))[i]);
2353
2354 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2355 REG_WR(bp, BAR_XSTRORM_INTMEM +
2356 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2357 ((u32 *)(&m_fair_vn))[i]);
2358}
2359
8a1c38d1 2360
c18487ee
YR
2361/* This function is called upon link interrupt */
2362static void bnx2x_link_attn(struct bnx2x *bp)
2363{
bb2a0f7a
YG
2364 /* Make sure that we are synced with the current statistics */
2365 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2366
c18487ee 2367 bnx2x_link_update(&bp->link_params, &bp->link_vars);
a2fbb9ea 2368
bb2a0f7a
YG
2369 if (bp->link_vars.link_up) {
2370
1c06328c
EG
2371 /* dropless flow control */
2372 if (CHIP_IS_E1H(bp)) {
2373 int port = BP_PORT(bp);
2374 u32 pause_enabled = 0;
2375
2376 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2377 pause_enabled = 1;
2378
2379 REG_WR(bp, BAR_USTRORM_INTMEM +
ca00392c 2380 USTORM_ETH_PAUSE_ENABLED_OFFSET(port),
1c06328c
EG
2381 pause_enabled);
2382 }
2383
bb2a0f7a
YG
2384 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2385 struct host_port_stats *pstats;
2386
2387 pstats = bnx2x_sp(bp, port_stats);
2388 /* reset old bmac stats */
2389 memset(&(pstats->mac_stx[0]), 0,
2390 sizeof(struct mac_stx));
2391 }
2392 if ((bp->state == BNX2X_STATE_OPEN) ||
2393 (bp->state == BNX2X_STATE_DISABLED))
2394 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2395 }
2396
c18487ee
YR
2397 /* indicate link status */
2398 bnx2x_link_report(bp);
34f80b04
EG
2399
2400 if (IS_E1HMF(bp)) {
8a1c38d1 2401 int port = BP_PORT(bp);
34f80b04 2402 int func;
8a1c38d1 2403 int vn;
34f80b04
EG
2404
2405 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2406 if (vn == BP_E1HVN(bp))
2407 continue;
2408
8a1c38d1 2409 func = ((vn << 1) | port);
34f80b04
EG
2410
2411 /* Set the attention towards other drivers
2412 on the same port */
2413 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2414 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2415 }
34f80b04 2416
8a1c38d1
EG
2417 if (bp->link_vars.link_up) {
2418 int i;
2419
2420 /* Init rate shaping and fairness contexts */
2421 bnx2x_init_port_minmax(bp);
34f80b04 2422
34f80b04 2423 for (vn = VN_0; vn < E1HVN_MAX; vn++)
8a1c38d1
EG
2424 bnx2x_init_vn_minmax(bp, 2*vn + port);
2425
2426 /* Store it to internal memory */
2427 for (i = 0;
2428 i < sizeof(struct cmng_struct_per_port) / 4; i++)
2429 REG_WR(bp, BAR_XSTRORM_INTMEM +
2430 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2431 ((u32 *)(&bp->cmng))[i]);
2432 }
34f80b04 2433 }
c18487ee 2434}
a2fbb9ea 2435
c18487ee
YR
2436static void bnx2x__link_status_update(struct bnx2x *bp)
2437{
2691d51d
EG
2438 int func = BP_FUNC(bp);
2439
c18487ee
YR
2440 if (bp->state != BNX2X_STATE_OPEN)
2441 return;
a2fbb9ea 2442
c18487ee 2443 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
a2fbb9ea 2444
bb2a0f7a
YG
2445 if (bp->link_vars.link_up)
2446 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2447 else
2448 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2449
2691d51d
EG
2450 bp->mf_config = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2451 bnx2x_calc_vn_weight_sum(bp);
2452
c18487ee
YR
2453 /* indicate link status */
2454 bnx2x_link_report(bp);
a2fbb9ea 2455}
a2fbb9ea 2456
34f80b04
EG
2457static void bnx2x_pmf_update(struct bnx2x *bp)
2458{
2459 int port = BP_PORT(bp);
2460 u32 val;
2461
2462 bp->port.pmf = 1;
2463 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2464
2465 /* enable nig attention */
2466 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2467 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2468 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
bb2a0f7a
YG
2469
2470 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
34f80b04
EG
2471}
2472
c18487ee 2473/* end of Link */
a2fbb9ea
ET
2474
2475/* slow path */
2476
2477/*
2478 * General service functions
2479 */
2480
2691d51d
EG
2481/* send the MCP a request, block until there is a reply */
2482u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
2483{
2484 int func = BP_FUNC(bp);
2485 u32 seq = ++bp->fw_seq;
2486 u32 rc = 0;
2487 u32 cnt = 1;
2488 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
2489
2490 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
2491 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
2492
2493 do {
2494 /* let the FW do it's magic ... */
2495 msleep(delay);
2496
2497 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
2498
2499 /* Give the FW up to 2 second (200*10ms) */
2500 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 200));
2501
2502 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
2503 cnt*delay, rc, seq);
2504
2505 /* is this a reply to our command? */
2506 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK))
2507 rc &= FW_MSG_CODE_MASK;
2508 else {
2509 /* FW BUG! */
2510 BNX2X_ERR("FW failed to respond!\n");
2511 bnx2x_fw_dump(bp);
2512 rc = 0;
2513 }
2514
2515 return rc;
2516}
2517
2518static void bnx2x_set_storm_rx_mode(struct bnx2x *bp);
2519static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp, int set);
2520static void bnx2x_set_rx_mode(struct net_device *dev);
2521
2522static void bnx2x_e1h_disable(struct bnx2x *bp)
2523{
2524 int port = BP_PORT(bp);
2525 int i;
2526
2527 bp->rx_mode = BNX2X_RX_MODE_NONE;
2528 bnx2x_set_storm_rx_mode(bp);
2529
2530 netif_tx_disable(bp->dev);
2531 bp->dev->trans_start = jiffies; /* prevent tx timeout */
2532
2533 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
2534
2535 bnx2x_set_mac_addr_e1h(bp, 0);
2536
2537 for (i = 0; i < MC_HASH_SIZE; i++)
2538 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
2539
2540 netif_carrier_off(bp->dev);
2541}
2542
2543static void bnx2x_e1h_enable(struct bnx2x *bp)
2544{
2545 int port = BP_PORT(bp);
2546
2547 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
2548
2549 bnx2x_set_mac_addr_e1h(bp, 1);
2550
2551 /* Tx queue should be only reenabled */
2552 netif_tx_wake_all_queues(bp->dev);
2553
2554 /* Initialize the receive filter. */
2555 bnx2x_set_rx_mode(bp->dev);
2556}
2557
2558static void bnx2x_update_min_max(struct bnx2x *bp)
2559{
2560 int port = BP_PORT(bp);
2561 int vn, i;
2562
2563 /* Init rate shaping and fairness contexts */
2564 bnx2x_init_port_minmax(bp);
2565
2566 bnx2x_calc_vn_weight_sum(bp);
2567
2568 for (vn = VN_0; vn < E1HVN_MAX; vn++)
2569 bnx2x_init_vn_minmax(bp, 2*vn + port);
2570
2571 if (bp->port.pmf) {
2572 int func;
2573
2574 /* Set the attention towards other drivers on the same port */
2575 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2576 if (vn == BP_E1HVN(bp))
2577 continue;
2578
2579 func = ((vn << 1) | port);
2580 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2581 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2582 }
2583
2584 /* Store it to internal memory */
2585 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
2586 REG_WR(bp, BAR_XSTRORM_INTMEM +
2587 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2588 ((u32 *)(&bp->cmng))[i]);
2589 }
2590}
2591
2592static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
2593{
2594 int func = BP_FUNC(bp);
2595
2596 DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);
2597 bp->mf_config = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2598
2599 if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
2600
2601 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
2602 DP(NETIF_MSG_IFDOWN, "mf_cfg function disabled\n");
2603 bp->state = BNX2X_STATE_DISABLED;
2604
2605 bnx2x_e1h_disable(bp);
2606 } else {
2607 DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
2608 bp->state = BNX2X_STATE_OPEN;
2609
2610 bnx2x_e1h_enable(bp);
2611 }
2612 dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
2613 }
2614 if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
2615
2616 bnx2x_update_min_max(bp);
2617 dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
2618 }
2619
2620 /* Report results to MCP */
2621 if (dcc_event)
2622 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE);
2623 else
2624 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK);
2625}
2626
a2fbb9ea
ET
2627/* the slow path queue is odd since completions arrive on the fastpath ring */
2628static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2629 u32 data_hi, u32 data_lo, int common)
2630{
34f80b04 2631 int func = BP_FUNC(bp);
a2fbb9ea 2632
34f80b04
EG
2633 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2634 "SPQE (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
a2fbb9ea
ET
2635 (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2636 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2637 HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2638
2639#ifdef BNX2X_STOP_ON_ERROR
2640 if (unlikely(bp->panic))
2641 return -EIO;
2642#endif
2643
34f80b04 2644 spin_lock_bh(&bp->spq_lock);
a2fbb9ea
ET
2645
2646 if (!bp->spq_left) {
2647 BNX2X_ERR("BUG! SPQ ring full!\n");
34f80b04 2648 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
2649 bnx2x_panic();
2650 return -EBUSY;
2651 }
f1410647 2652
a2fbb9ea
ET
2653 /* CID needs port number to be encoded int it */
2654 bp->spq_prod_bd->hdr.conn_and_cmd_data =
2655 cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2656 HW_CID(bp, cid)));
2657 bp->spq_prod_bd->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2658 if (common)
2659 bp->spq_prod_bd->hdr.type |=
2660 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2661
2662 bp->spq_prod_bd->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2663 bp->spq_prod_bd->data.mac_config_addr.lo = cpu_to_le32(data_lo);
2664
2665 bp->spq_left--;
2666
2667 if (bp->spq_prod_bd == bp->spq_last_bd) {
2668 bp->spq_prod_bd = bp->spq;
2669 bp->spq_prod_idx = 0;
2670 DP(NETIF_MSG_TIMER, "end of spq\n");
2671
2672 } else {
2673 bp->spq_prod_bd++;
2674 bp->spq_prod_idx++;
2675 }
2676
37dbbf32
EG
2677 /* Make sure that BD data is updated before writing the producer */
2678 wmb();
2679
34f80b04 2680 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
a2fbb9ea
ET
2681 bp->spq_prod_idx);
2682
37dbbf32
EG
2683 mmiowb();
2684
34f80b04 2685 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
2686 return 0;
2687}
2688
2689/* acquire split MCP access lock register */
4a37fb66 2690static int bnx2x_acquire_alr(struct bnx2x *bp)
a2fbb9ea 2691{
a2fbb9ea 2692 u32 i, j, val;
34f80b04 2693 int rc = 0;
a2fbb9ea
ET
2694
2695 might_sleep();
2696 i = 100;
2697 for (j = 0; j < i*10; j++) {
2698 val = (1UL << 31);
2699 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2700 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2701 if (val & (1L << 31))
2702 break;
2703
2704 msleep(5);
2705 }
a2fbb9ea 2706 if (!(val & (1L << 31))) {
19680c48 2707 BNX2X_ERR("Cannot acquire MCP access lock register\n");
a2fbb9ea
ET
2708 rc = -EBUSY;
2709 }
2710
2711 return rc;
2712}
2713
4a37fb66
YG
2714/* release split MCP access lock register */
2715static void bnx2x_release_alr(struct bnx2x *bp)
a2fbb9ea
ET
2716{
2717 u32 val = 0;
2718
2719 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2720}
2721
2722static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2723{
2724 struct host_def_status_block *def_sb = bp->def_status_blk;
2725 u16 rc = 0;
2726
2727 barrier(); /* status block is written to by the chip */
a2fbb9ea
ET
2728 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2729 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2730 rc |= 1;
2731 }
2732 if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2733 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2734 rc |= 2;
2735 }
2736 if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2737 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2738 rc |= 4;
2739 }
2740 if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2741 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2742 rc |= 8;
2743 }
2744 if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2745 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2746 rc |= 16;
2747 }
2748 return rc;
2749}
2750
2751/*
2752 * slow path service functions
2753 */
2754
2755static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2756{
34f80b04 2757 int port = BP_PORT(bp);
5c862848
EG
2758 u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2759 COMMAND_REG_ATTN_BITS_SET);
a2fbb9ea
ET
2760 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2761 MISC_REG_AEU_MASK_ATTN_FUNC_0;
877e9aa4
ET
2762 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2763 NIG_REG_MASK_INTERRUPT_PORT0;
3fcaf2e5 2764 u32 aeu_mask;
87942b46 2765 u32 nig_mask = 0;
a2fbb9ea 2766
a2fbb9ea
ET
2767 if (bp->attn_state & asserted)
2768 BNX2X_ERR("IGU ERROR\n");
2769
3fcaf2e5
EG
2770 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2771 aeu_mask = REG_RD(bp, aeu_addr);
2772
a2fbb9ea 2773 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
3fcaf2e5
EG
2774 aeu_mask, asserted);
2775 aeu_mask &= ~(asserted & 0xff);
2776 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
a2fbb9ea 2777
3fcaf2e5
EG
2778 REG_WR(bp, aeu_addr, aeu_mask);
2779 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
a2fbb9ea 2780
3fcaf2e5 2781 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
a2fbb9ea 2782 bp->attn_state |= asserted;
3fcaf2e5 2783 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
a2fbb9ea
ET
2784
2785 if (asserted & ATTN_HARD_WIRED_MASK) {
2786 if (asserted & ATTN_NIG_FOR_FUNC) {
a2fbb9ea 2787
a5e9a7cf
EG
2788 bnx2x_acquire_phy_lock(bp);
2789
877e9aa4 2790 /* save nig interrupt mask */
87942b46 2791 nig_mask = REG_RD(bp, nig_int_mask_addr);
877e9aa4 2792 REG_WR(bp, nig_int_mask_addr, 0);
a2fbb9ea 2793
c18487ee 2794 bnx2x_link_attn(bp);
a2fbb9ea
ET
2795
2796 /* handle unicore attn? */
2797 }
2798 if (asserted & ATTN_SW_TIMER_4_FUNC)
2799 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2800
2801 if (asserted & GPIO_2_FUNC)
2802 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2803
2804 if (asserted & GPIO_3_FUNC)
2805 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2806
2807 if (asserted & GPIO_4_FUNC)
2808 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2809
2810 if (port == 0) {
2811 if (asserted & ATTN_GENERAL_ATTN_1) {
2812 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2813 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2814 }
2815 if (asserted & ATTN_GENERAL_ATTN_2) {
2816 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2817 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2818 }
2819 if (asserted & ATTN_GENERAL_ATTN_3) {
2820 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2821 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2822 }
2823 } else {
2824 if (asserted & ATTN_GENERAL_ATTN_4) {
2825 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2826 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2827 }
2828 if (asserted & ATTN_GENERAL_ATTN_5) {
2829 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2830 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2831 }
2832 if (asserted & ATTN_GENERAL_ATTN_6) {
2833 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2834 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2835 }
2836 }
2837
2838 } /* if hardwired */
2839
5c862848
EG
2840 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2841 asserted, hc_addr);
2842 REG_WR(bp, hc_addr, asserted);
a2fbb9ea
ET
2843
2844 /* now set back the mask */
a5e9a7cf 2845 if (asserted & ATTN_NIG_FOR_FUNC) {
87942b46 2846 REG_WR(bp, nig_int_mask_addr, nig_mask);
a5e9a7cf
EG
2847 bnx2x_release_phy_lock(bp);
2848 }
a2fbb9ea
ET
2849}
2850
fd4ef40d
EG
2851static inline void bnx2x_fan_failure(struct bnx2x *bp)
2852{
2853 int port = BP_PORT(bp);
2854
2855 /* mark the failure */
2856 bp->link_params.ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2857 bp->link_params.ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2858 SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
2859 bp->link_params.ext_phy_config);
2860
2861 /* log the failure */
2862 printk(KERN_ERR PFX "Fan Failure on Network Controller %s has caused"
2863 " the driver to shutdown the card to prevent permanent"
2864 " damage. Please contact Dell Support for assistance\n",
2865 bp->dev->name);
2866}
877e9aa4 2867static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
a2fbb9ea 2868{
34f80b04 2869 int port = BP_PORT(bp);
877e9aa4 2870 int reg_offset;
4d295db0 2871 u32 val, swap_val, swap_override;
877e9aa4 2872
34f80b04
EG
2873 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2874 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
877e9aa4 2875
34f80b04 2876 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
877e9aa4
ET
2877
2878 val = REG_RD(bp, reg_offset);
2879 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2880 REG_WR(bp, reg_offset, val);
2881
2882 BNX2X_ERR("SPIO5 hw attention\n");
2883
fd4ef40d 2884 /* Fan failure attention */
35b19ba5
EG
2885 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
2886 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
17de50b7 2887 /* Low power mode is controlled by GPIO 2 */
877e9aa4 2888 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
17de50b7 2889 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
fd4ef40d
EG
2890 /* The PHY reset is controlled by GPIO 1 */
2891 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2892 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
877e9aa4
ET
2893 break;
2894
4d295db0
EG
2895 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
2896 /* The PHY reset is controlled by GPIO 1 */
2897 /* fake the port number to cancel the swap done in
2898 set_gpio() */
2899 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
2900 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
2901 port = (swap_val && swap_override) ^ 1;
2902 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2903 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2904 break;
2905
877e9aa4
ET
2906 default:
2907 break;
2908 }
fd4ef40d 2909 bnx2x_fan_failure(bp);
877e9aa4 2910 }
34f80b04 2911
589abe3a
EG
2912 if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
2913 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
2914 bnx2x_acquire_phy_lock(bp);
2915 bnx2x_handle_module_detect_int(&bp->link_params);
2916 bnx2x_release_phy_lock(bp);
2917 }
2918
34f80b04
EG
2919 if (attn & HW_INTERRUT_ASSERT_SET_0) {
2920
2921 val = REG_RD(bp, reg_offset);
2922 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2923 REG_WR(bp, reg_offset, val);
2924
2925 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2926 (attn & HW_INTERRUT_ASSERT_SET_0));
2927 bnx2x_panic();
2928 }
877e9aa4
ET
2929}
2930
2931static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2932{
2933 u32 val;
2934
0626b899 2935 if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
877e9aa4
ET
2936
2937 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2938 BNX2X_ERR("DB hw attention 0x%x\n", val);
2939 /* DORQ discard attention */
2940 if (val & 0x2)
2941 BNX2X_ERR("FATAL error from DORQ\n");
2942 }
34f80b04
EG
2943
2944 if (attn & HW_INTERRUT_ASSERT_SET_1) {
2945
2946 int port = BP_PORT(bp);
2947 int reg_offset;
2948
2949 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2950 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2951
2952 val = REG_RD(bp, reg_offset);
2953 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2954 REG_WR(bp, reg_offset, val);
2955
2956 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2957 (attn & HW_INTERRUT_ASSERT_SET_1));
2958 bnx2x_panic();
2959 }
877e9aa4
ET
2960}
2961
2962static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2963{
2964 u32 val;
2965
2966 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2967
2968 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2969 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2970 /* CFC error attention */
2971 if (val & 0x2)
2972 BNX2X_ERR("FATAL error from CFC\n");
2973 }
2974
2975 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
2976
2977 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
2978 BNX2X_ERR("PXP hw attention 0x%x\n", val);
2979 /* RQ_USDMDP_FIFO_OVERFLOW */
2980 if (val & 0x18000)
2981 BNX2X_ERR("FATAL error from PXP\n");
2982 }
34f80b04
EG
2983
2984 if (attn & HW_INTERRUT_ASSERT_SET_2) {
2985
2986 int port = BP_PORT(bp);
2987 int reg_offset;
2988
2989 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
2990 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
2991
2992 val = REG_RD(bp, reg_offset);
2993 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
2994 REG_WR(bp, reg_offset, val);
2995
2996 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
2997 (attn & HW_INTERRUT_ASSERT_SET_2));
2998 bnx2x_panic();
2999 }
877e9aa4
ET
3000}
3001
3002static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
3003{
34f80b04
EG
3004 u32 val;
3005
877e9aa4
ET
3006 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
3007
34f80b04
EG
3008 if (attn & BNX2X_PMF_LINK_ASSERT) {
3009 int func = BP_FUNC(bp);
3010
3011 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
2691d51d
EG
3012 val = SHMEM_RD(bp, func_mb[func].drv_status);
3013 if (val & DRV_STATUS_DCC_EVENT_MASK)
3014 bnx2x_dcc_event(bp,
3015 (val & DRV_STATUS_DCC_EVENT_MASK));
34f80b04 3016 bnx2x__link_status_update(bp);
2691d51d 3017 if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
34f80b04
EG
3018 bnx2x_pmf_update(bp);
3019
3020 } else if (attn & BNX2X_MC_ASSERT_BITS) {
877e9aa4
ET
3021
3022 BNX2X_ERR("MC assert!\n");
3023 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
3024 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
3025 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
3026 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
3027 bnx2x_panic();
3028
3029 } else if (attn & BNX2X_MCP_ASSERT) {
3030
3031 BNX2X_ERR("MCP assert!\n");
3032 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
34f80b04 3033 bnx2x_fw_dump(bp);
877e9aa4
ET
3034
3035 } else
3036 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
3037 }
3038
3039 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
34f80b04
EG
3040 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
3041 if (attn & BNX2X_GRC_TIMEOUT) {
3042 val = CHIP_IS_E1H(bp) ?
3043 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
3044 BNX2X_ERR("GRC time-out 0x%08x\n", val);
3045 }
3046 if (attn & BNX2X_GRC_RSV) {
3047 val = CHIP_IS_E1H(bp) ?
3048 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
3049 BNX2X_ERR("GRC reserved 0x%08x\n", val);
3050 }
877e9aa4 3051 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
877e9aa4
ET
3052 }
3053}
3054
3055static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
3056{
a2fbb9ea
ET
3057 struct attn_route attn;
3058 struct attn_route group_mask;
34f80b04 3059 int port = BP_PORT(bp);
877e9aa4 3060 int index;
a2fbb9ea
ET
3061 u32 reg_addr;
3062 u32 val;
3fcaf2e5 3063 u32 aeu_mask;
a2fbb9ea
ET
3064
3065 /* need to take HW lock because MCP or other port might also
3066 try to handle this event */
4a37fb66 3067 bnx2x_acquire_alr(bp);
a2fbb9ea
ET
3068
3069 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
3070 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
3071 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
3072 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
34f80b04
EG
3073 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
3074 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
a2fbb9ea
ET
3075
3076 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
3077 if (deasserted & (1 << index)) {
3078 group_mask = bp->attn_group[index];
3079
34f80b04
EG
3080 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
3081 index, group_mask.sig[0], group_mask.sig[1],
3082 group_mask.sig[2], group_mask.sig[3]);
a2fbb9ea 3083
877e9aa4
ET
3084 bnx2x_attn_int_deasserted3(bp,
3085 attn.sig[3] & group_mask.sig[3]);
3086 bnx2x_attn_int_deasserted1(bp,
3087 attn.sig[1] & group_mask.sig[1]);
3088 bnx2x_attn_int_deasserted2(bp,
3089 attn.sig[2] & group_mask.sig[2]);
3090 bnx2x_attn_int_deasserted0(bp,
3091 attn.sig[0] & group_mask.sig[0]);
a2fbb9ea 3092
a2fbb9ea
ET
3093 if ((attn.sig[0] & group_mask.sig[0] &
3094 HW_PRTY_ASSERT_SET_0) ||
3095 (attn.sig[1] & group_mask.sig[1] &
3096 HW_PRTY_ASSERT_SET_1) ||
3097 (attn.sig[2] & group_mask.sig[2] &
3098 HW_PRTY_ASSERT_SET_2))
6378c025 3099 BNX2X_ERR("FATAL HW block parity attention\n");
a2fbb9ea
ET
3100 }
3101 }
3102
4a37fb66 3103 bnx2x_release_alr(bp);
a2fbb9ea 3104
5c862848 3105 reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
a2fbb9ea
ET
3106
3107 val = ~deasserted;
3fcaf2e5
EG
3108 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
3109 val, reg_addr);
5c862848 3110 REG_WR(bp, reg_addr, val);
a2fbb9ea 3111
a2fbb9ea 3112 if (~bp->attn_state & deasserted)
3fcaf2e5 3113 BNX2X_ERR("IGU ERROR\n");
a2fbb9ea
ET
3114
3115 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
3116 MISC_REG_AEU_MASK_ATTN_FUNC_0;
3117
3fcaf2e5
EG
3118 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3119 aeu_mask = REG_RD(bp, reg_addr);
3120
3121 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
3122 aeu_mask, deasserted);
3123 aeu_mask |= (deasserted & 0xff);
3124 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
a2fbb9ea 3125
3fcaf2e5
EG
3126 REG_WR(bp, reg_addr, aeu_mask);
3127 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
a2fbb9ea
ET
3128
3129 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
3130 bp->attn_state &= ~deasserted;
3131 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
3132}
3133
3134static void bnx2x_attn_int(struct bnx2x *bp)
3135{
3136 /* read local copy of bits */
68d59484
EG
3137 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
3138 attn_bits);
3139 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
3140 attn_bits_ack);
a2fbb9ea
ET
3141 u32 attn_state = bp->attn_state;
3142
3143 /* look for changed bits */
3144 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
3145 u32 deasserted = ~attn_bits & attn_ack & attn_state;
3146
3147 DP(NETIF_MSG_HW,
3148 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
3149 attn_bits, attn_ack, asserted, deasserted);
3150
3151 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
34f80b04 3152 BNX2X_ERR("BAD attention state\n");
a2fbb9ea
ET
3153
3154 /* handle bits that were raised */
3155 if (asserted)
3156 bnx2x_attn_int_asserted(bp, asserted);
3157
3158 if (deasserted)
3159 bnx2x_attn_int_deasserted(bp, deasserted);
3160}
3161
3162static void bnx2x_sp_task(struct work_struct *work)
3163{
1cf167f2 3164 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
a2fbb9ea
ET
3165 u16 status;
3166
34f80b04 3167
a2fbb9ea
ET
3168 /* Return here if interrupt is disabled */
3169 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3196a88a 3170 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
3171 return;
3172 }
3173
3174 status = bnx2x_update_dsb_idx(bp);
34f80b04
EG
3175/* if (status == 0) */
3176/* BNX2X_ERR("spurious slowpath interrupt!\n"); */
a2fbb9ea 3177
3196a88a 3178 DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
a2fbb9ea 3179
877e9aa4
ET
3180 /* HW attentions */
3181 if (status & 0x1)
a2fbb9ea 3182 bnx2x_attn_int(bp);
a2fbb9ea 3183
68d59484 3184 bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
a2fbb9ea
ET
3185 IGU_INT_NOP, 1);
3186 bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
3187 IGU_INT_NOP, 1);
3188 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
3189 IGU_INT_NOP, 1);
3190 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
3191 IGU_INT_NOP, 1);
3192 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
3193 IGU_INT_ENABLE, 1);
877e9aa4 3194
a2fbb9ea
ET
3195}
3196
3197static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
3198{
3199 struct net_device *dev = dev_instance;
3200 struct bnx2x *bp = netdev_priv(dev);
3201
3202 /* Return here if interrupt is disabled */
3203 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3196a88a 3204 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
3205 return IRQ_HANDLED;
3206 }
3207
8d9c5f34 3208 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0);
a2fbb9ea
ET
3209
3210#ifdef BNX2X_STOP_ON_ERROR
3211 if (unlikely(bp->panic))
3212 return IRQ_HANDLED;
3213#endif
3214
1cf167f2 3215 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
a2fbb9ea
ET
3216
3217 return IRQ_HANDLED;
3218}
3219
3220/* end of slow path */
3221
3222/* Statistics */
3223
3224/****************************************************************************
3225* Macros
3226****************************************************************************/
3227
a2fbb9ea
ET
3228/* sum[hi:lo] += add[hi:lo] */
3229#define ADD_64(s_hi, a_hi, s_lo, a_lo) \
3230 do { \
3231 s_lo += a_lo; \
f5ba6772 3232 s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
a2fbb9ea
ET
3233 } while (0)
3234
3235/* difference = minuend - subtrahend */
3236#define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
3237 do { \
bb2a0f7a
YG
3238 if (m_lo < s_lo) { \
3239 /* underflow */ \
a2fbb9ea 3240 d_hi = m_hi - s_hi; \
bb2a0f7a 3241 if (d_hi > 0) { \
6378c025 3242 /* we can 'loan' 1 */ \
a2fbb9ea
ET
3243 d_hi--; \
3244 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
bb2a0f7a 3245 } else { \
6378c025 3246 /* m_hi <= s_hi */ \
a2fbb9ea
ET
3247 d_hi = 0; \
3248 d_lo = 0; \
3249 } \
bb2a0f7a
YG
3250 } else { \
3251 /* m_lo >= s_lo */ \
a2fbb9ea 3252 if (m_hi < s_hi) { \
bb2a0f7a
YG
3253 d_hi = 0; \
3254 d_lo = 0; \
3255 } else { \
6378c025 3256 /* m_hi >= s_hi */ \
bb2a0f7a
YG
3257 d_hi = m_hi - s_hi; \
3258 d_lo = m_lo - s_lo; \
a2fbb9ea
ET
3259 } \
3260 } \
3261 } while (0)
3262
bb2a0f7a 3263#define UPDATE_STAT64(s, t) \
a2fbb9ea 3264 do { \
bb2a0f7a
YG
3265 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
3266 diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
3267 pstats->mac_stx[0].t##_hi = new->s##_hi; \
3268 pstats->mac_stx[0].t##_lo = new->s##_lo; \
3269 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
3270 pstats->mac_stx[1].t##_lo, diff.lo); \
a2fbb9ea
ET
3271 } while (0)
3272
bb2a0f7a 3273#define UPDATE_STAT64_NIG(s, t) \
a2fbb9ea 3274 do { \
bb2a0f7a
YG
3275 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
3276 diff.lo, new->s##_lo, old->s##_lo); \
3277 ADD_64(estats->t##_hi, diff.hi, \
3278 estats->t##_lo, diff.lo); \
a2fbb9ea
ET
3279 } while (0)
3280
3281/* sum[hi:lo] += add */
3282#define ADD_EXTEND_64(s_hi, s_lo, a) \
3283 do { \
3284 s_lo += a; \
3285 s_hi += (s_lo < a) ? 1 : 0; \
3286 } while (0)
3287
bb2a0f7a 3288#define UPDATE_EXTEND_STAT(s) \
a2fbb9ea 3289 do { \
bb2a0f7a
YG
3290 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
3291 pstats->mac_stx[1].s##_lo, \
3292 new->s); \
a2fbb9ea
ET
3293 } while (0)
3294
bb2a0f7a 3295#define UPDATE_EXTEND_TSTAT(s, t) \
a2fbb9ea 3296 do { \
4781bfad
EG
3297 diff = le32_to_cpu(tclient->s) - le32_to_cpu(old_tclient->s); \
3298 old_tclient->s = tclient->s; \
de832a55
EG
3299 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3300 } while (0)
3301
3302#define UPDATE_EXTEND_USTAT(s, t) \
3303 do { \
3304 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3305 old_uclient->s = uclient->s; \
3306 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
bb2a0f7a
YG
3307 } while (0)
3308
3309#define UPDATE_EXTEND_XSTAT(s, t) \
3310 do { \
4781bfad
EG
3311 diff = le32_to_cpu(xclient->s) - le32_to_cpu(old_xclient->s); \
3312 old_xclient->s = xclient->s; \
de832a55
EG
3313 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3314 } while (0)
3315
3316/* minuend -= subtrahend */
3317#define SUB_64(m_hi, s_hi, m_lo, s_lo) \
3318 do { \
3319 DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
3320 } while (0)
3321
3322/* minuend[hi:lo] -= subtrahend */
3323#define SUB_EXTEND_64(m_hi, m_lo, s) \
3324 do { \
3325 SUB_64(m_hi, 0, m_lo, s); \
3326 } while (0)
3327
3328#define SUB_EXTEND_USTAT(s, t) \
3329 do { \
3330 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3331 SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
a2fbb9ea
ET
3332 } while (0)
3333
3334/*
3335 * General service functions
3336 */
3337
3338static inline long bnx2x_hilo(u32 *hiref)
3339{
3340 u32 lo = *(hiref + 1);
3341#if (BITS_PER_LONG == 64)
3342 u32 hi = *hiref;
3343
3344 return HILO_U64(hi, lo);
3345#else
3346 return lo;
3347#endif
3348}
3349
3350/*
3351 * Init service functions
3352 */
3353
bb2a0f7a
YG
3354static void bnx2x_storm_stats_post(struct bnx2x *bp)
3355{
3356 if (!bp->stats_pending) {
3357 struct eth_query_ramrod_data ramrod_data = {0};
de832a55 3358 int i, rc;
bb2a0f7a
YG
3359
3360 ramrod_data.drv_counter = bp->stats_counter++;
8d9c5f34 3361 ramrod_data.collect_port = bp->port.pmf ? 1 : 0;
de832a55
EG
3362 for_each_queue(bp, i)
3363 ramrod_data.ctr_id_vector |= (1 << bp->fp[i].cl_id);
bb2a0f7a
YG
3364
3365 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3366 ((u32 *)&ramrod_data)[1],
3367 ((u32 *)&ramrod_data)[0], 0);
3368 if (rc == 0) {
3369 /* stats ramrod has it's own slot on the spq */
3370 bp->spq_left++;
3371 bp->stats_pending = 1;
3372 }
3373 }
3374}
3375
bb2a0f7a
YG
3376static void bnx2x_hw_stats_post(struct bnx2x *bp)
3377{
3378 struct dmae_command *dmae = &bp->stats_dmae;
3379 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3380
3381 *stats_comp = DMAE_COMP_VAL;
de832a55
EG
3382 if (CHIP_REV_IS_SLOW(bp))
3383 return;
bb2a0f7a
YG
3384
3385 /* loader */
3386 if (bp->executer_idx) {
3387 int loader_idx = PMF_DMAE_C(bp);
3388
3389 memset(dmae, 0, sizeof(struct dmae_command));
3390
3391 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3392 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3393 DMAE_CMD_DST_RESET |
3394#ifdef __BIG_ENDIAN
3395 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3396#else
3397 DMAE_CMD_ENDIANITY_DW_SWAP |
3398#endif
3399 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3400 DMAE_CMD_PORT_0) |
3401 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3402 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3403 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3404 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3405 sizeof(struct dmae_command) *
3406 (loader_idx + 1)) >> 2;
3407 dmae->dst_addr_hi = 0;
3408 dmae->len = sizeof(struct dmae_command) >> 2;
3409 if (CHIP_IS_E1(bp))
3410 dmae->len--;
3411 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3412 dmae->comp_addr_hi = 0;
3413 dmae->comp_val = 1;
3414
3415 *stats_comp = 0;
3416 bnx2x_post_dmae(bp, dmae, loader_idx);
3417
3418 } else if (bp->func_stx) {
3419 *stats_comp = 0;
3420 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3421 }
3422}
3423
3424static int bnx2x_stats_comp(struct bnx2x *bp)
3425{
3426 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3427 int cnt = 10;
3428
3429 might_sleep();
3430 while (*stats_comp != DMAE_COMP_VAL) {
bb2a0f7a
YG
3431 if (!cnt) {
3432 BNX2X_ERR("timeout waiting for stats finished\n");
3433 break;
3434 }
3435 cnt--;
12469401 3436 msleep(1);
bb2a0f7a
YG
3437 }
3438 return 1;
3439}
3440
3441/*
3442 * Statistics service functions
3443 */
3444
3445static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3446{
3447 struct dmae_command *dmae;
3448 u32 opcode;
3449 int loader_idx = PMF_DMAE_C(bp);
3450 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3451
3452 /* sanity */
3453 if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3454 BNX2X_ERR("BUG!\n");
3455 return;
3456 }
3457
3458 bp->executer_idx = 0;
3459
3460 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3461 DMAE_CMD_C_ENABLE |
3462 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3463#ifdef __BIG_ENDIAN
3464 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3465#else
3466 DMAE_CMD_ENDIANITY_DW_SWAP |
3467#endif
3468 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3469 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3470
3471 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3472 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3473 dmae->src_addr_lo = bp->port.port_stx >> 2;
3474 dmae->src_addr_hi = 0;
3475 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3476 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3477 dmae->len = DMAE_LEN32_RD_MAX;
3478 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3479 dmae->comp_addr_hi = 0;
3480 dmae->comp_val = 1;
3481
3482 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3483 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3484 dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3485 dmae->src_addr_hi = 0;
7a9b2557
VZ
3486 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3487 DMAE_LEN32_RD_MAX * 4);
3488 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3489 DMAE_LEN32_RD_MAX * 4);
bb2a0f7a
YG
3490 dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3491 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3492 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3493 dmae->comp_val = DMAE_COMP_VAL;
3494
3495 *stats_comp = 0;
3496 bnx2x_hw_stats_post(bp);
3497 bnx2x_stats_comp(bp);
3498}
3499
3500static void bnx2x_port_stats_init(struct bnx2x *bp)
a2fbb9ea
ET
3501{
3502 struct dmae_command *dmae;
34f80b04 3503 int port = BP_PORT(bp);
bb2a0f7a 3504 int vn = BP_E1HVN(bp);
a2fbb9ea 3505 u32 opcode;
bb2a0f7a 3506 int loader_idx = PMF_DMAE_C(bp);
a2fbb9ea 3507 u32 mac_addr;
bb2a0f7a
YG
3508 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3509
3510 /* sanity */
3511 if (!bp->link_vars.link_up || !bp->port.pmf) {
3512 BNX2X_ERR("BUG!\n");
3513 return;
3514 }
a2fbb9ea
ET
3515
3516 bp->executer_idx = 0;
bb2a0f7a
YG
3517
3518 /* MCP */
3519 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3520 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3521 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 3522#ifdef __BIG_ENDIAN
bb2a0f7a 3523 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 3524#else
bb2a0f7a 3525 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 3526#endif
bb2a0f7a
YG
3527 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3528 (vn << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea 3529
bb2a0f7a 3530 if (bp->port.port_stx) {
a2fbb9ea
ET
3531
3532 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3533 dmae->opcode = opcode;
bb2a0f7a
YG
3534 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3535 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3536 dmae->dst_addr_lo = bp->port.port_stx >> 2;
a2fbb9ea 3537 dmae->dst_addr_hi = 0;
bb2a0f7a
YG
3538 dmae->len = sizeof(struct host_port_stats) >> 2;
3539 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3540 dmae->comp_addr_hi = 0;
3541 dmae->comp_val = 1;
a2fbb9ea
ET
3542 }
3543
bb2a0f7a
YG
3544 if (bp->func_stx) {
3545
3546 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3547 dmae->opcode = opcode;
3548 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3549 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3550 dmae->dst_addr_lo = bp->func_stx >> 2;
3551 dmae->dst_addr_hi = 0;
3552 dmae->len = sizeof(struct host_func_stats) >> 2;
3553 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3554 dmae->comp_addr_hi = 0;
3555 dmae->comp_val = 1;
a2fbb9ea
ET
3556 }
3557
bb2a0f7a 3558 /* MAC */
a2fbb9ea
ET
3559 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3560 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3561 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3562#ifdef __BIG_ENDIAN
3563 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3564#else
3565 DMAE_CMD_ENDIANITY_DW_SWAP |
3566#endif
bb2a0f7a
YG
3567 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3568 (vn << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea 3569
c18487ee 3570 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
a2fbb9ea
ET
3571
3572 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3573 NIG_REG_INGRESS_BMAC0_MEM);
3574
3575 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3576 BIGMAC_REGISTER_TX_STAT_GTBYT */
3577 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3578 dmae->opcode = opcode;
3579 dmae->src_addr_lo = (mac_addr +
3580 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3581 dmae->src_addr_hi = 0;
3582 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3583 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3584 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3585 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3586 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3587 dmae->comp_addr_hi = 0;
3588 dmae->comp_val = 1;
3589
3590 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3591 BIGMAC_REGISTER_RX_STAT_GRIPJ */
3592 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3593 dmae->opcode = opcode;
3594 dmae->src_addr_lo = (mac_addr +
3595 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3596 dmae->src_addr_hi = 0;
3597 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3598 offsetof(struct bmac_stats, rx_stat_gr64_lo));
a2fbb9ea 3599 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3600 offsetof(struct bmac_stats, rx_stat_gr64_lo));
a2fbb9ea
ET
3601 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3602 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3603 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3604 dmae->comp_addr_hi = 0;
3605 dmae->comp_val = 1;
3606
c18487ee 3607 } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
a2fbb9ea
ET
3608
3609 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3610
3611 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3612 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3613 dmae->opcode = opcode;
3614 dmae->src_addr_lo = (mac_addr +
3615 EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3616 dmae->src_addr_hi = 0;
3617 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3618 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3619 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3620 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3621 dmae->comp_addr_hi = 0;
3622 dmae->comp_val = 1;
3623
3624 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3625 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3626 dmae->opcode = opcode;
3627 dmae->src_addr_lo = (mac_addr +
3628 EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3629 dmae->src_addr_hi = 0;
3630 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3631 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
a2fbb9ea 3632 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3633 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
a2fbb9ea
ET
3634 dmae->len = 1;
3635 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3636 dmae->comp_addr_hi = 0;
3637 dmae->comp_val = 1;
3638
3639 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3640 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3641 dmae->opcode = opcode;
3642 dmae->src_addr_lo = (mac_addr +
3643 EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3644 dmae->src_addr_hi = 0;
3645 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3646 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
a2fbb9ea 3647 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3648 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
a2fbb9ea
ET
3649 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3650 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3651 dmae->comp_addr_hi = 0;
3652 dmae->comp_val = 1;
3653 }
3654
3655 /* NIG */
bb2a0f7a
YG
3656 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3657 dmae->opcode = opcode;
3658 dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3659 NIG_REG_STAT0_BRB_DISCARD) >> 2;
3660 dmae->src_addr_hi = 0;
3661 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3662 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3663 dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3664 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3665 dmae->comp_addr_hi = 0;
3666 dmae->comp_val = 1;
3667
3668 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3669 dmae->opcode = opcode;
3670 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3671 NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3672 dmae->src_addr_hi = 0;
3673 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3674 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3675 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3676 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3677 dmae->len = (2*sizeof(u32)) >> 2;
3678 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3679 dmae->comp_addr_hi = 0;
3680 dmae->comp_val = 1;
3681
a2fbb9ea
ET
3682 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3683 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3684 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3685 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3686#ifdef __BIG_ENDIAN
3687 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3688#else
3689 DMAE_CMD_ENDIANITY_DW_SWAP |
3690#endif
bb2a0f7a
YG
3691 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3692 (vn << DMAE_CMD_E1HVN_SHIFT));
3693 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3694 NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
a2fbb9ea 3695 dmae->src_addr_hi = 0;
bb2a0f7a
YG
3696 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3697 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3698 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3699 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3700 dmae->len = (2*sizeof(u32)) >> 2;
3701 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3702 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3703 dmae->comp_val = DMAE_COMP_VAL;
3704
3705 *stats_comp = 0;
a2fbb9ea
ET
3706}
3707
bb2a0f7a 3708static void bnx2x_func_stats_init(struct bnx2x *bp)
a2fbb9ea 3709{
bb2a0f7a
YG
3710 struct dmae_command *dmae = &bp->stats_dmae;
3711 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 3712
bb2a0f7a
YG
3713 /* sanity */
3714 if (!bp->func_stx) {
3715 BNX2X_ERR("BUG!\n");
3716 return;
3717 }
a2fbb9ea 3718
bb2a0f7a
YG
3719 bp->executer_idx = 0;
3720 memset(dmae, 0, sizeof(struct dmae_command));
a2fbb9ea 3721
bb2a0f7a
YG
3722 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3723 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3724 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3725#ifdef __BIG_ENDIAN
3726 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3727#else
3728 DMAE_CMD_ENDIANITY_DW_SWAP |
3729#endif
3730 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3731 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3732 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3733 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3734 dmae->dst_addr_lo = bp->func_stx >> 2;
3735 dmae->dst_addr_hi = 0;
3736 dmae->len = sizeof(struct host_func_stats) >> 2;
3737 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3738 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3739 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 3740
bb2a0f7a
YG
3741 *stats_comp = 0;
3742}
a2fbb9ea 3743
bb2a0f7a
YG
3744static void bnx2x_stats_start(struct bnx2x *bp)
3745{
3746 if (bp->port.pmf)
3747 bnx2x_port_stats_init(bp);
3748
3749 else if (bp->func_stx)
3750 bnx2x_func_stats_init(bp);
3751
3752 bnx2x_hw_stats_post(bp);
3753 bnx2x_storm_stats_post(bp);
3754}
3755
3756static void bnx2x_stats_pmf_start(struct bnx2x *bp)
3757{
3758 bnx2x_stats_comp(bp);
3759 bnx2x_stats_pmf_update(bp);
3760 bnx2x_stats_start(bp);
3761}
3762
3763static void bnx2x_stats_restart(struct bnx2x *bp)
3764{
3765 bnx2x_stats_comp(bp);
3766 bnx2x_stats_start(bp);
3767}
3768
3769static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3770{
3771 struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3772 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
de832a55 3773 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4781bfad
EG
3774 struct {
3775 u32 lo;
3776 u32 hi;
3777 } diff;
bb2a0f7a
YG
3778
3779 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3780 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3781 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3782 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3783 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3784 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
66e855f3 3785 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
bb2a0f7a 3786 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
de832a55 3787 UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf);
bb2a0f7a
YG
3788 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3789 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3790 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3791 UPDATE_STAT64(tx_stat_gt127,
3792 tx_stat_etherstatspkts65octetsto127octets);
3793 UPDATE_STAT64(tx_stat_gt255,
3794 tx_stat_etherstatspkts128octetsto255octets);
3795 UPDATE_STAT64(tx_stat_gt511,
3796 tx_stat_etherstatspkts256octetsto511octets);
3797 UPDATE_STAT64(tx_stat_gt1023,
3798 tx_stat_etherstatspkts512octetsto1023octets);
3799 UPDATE_STAT64(tx_stat_gt1518,
3800 tx_stat_etherstatspkts1024octetsto1522octets);
3801 UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3802 UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3803 UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3804 UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3805 UPDATE_STAT64(tx_stat_gterr,
3806 tx_stat_dot3statsinternalmactransmiterrors);
3807 UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
de832a55
EG
3808
3809 estats->pause_frames_received_hi =
3810 pstats->mac_stx[1].rx_stat_bmac_xpf_hi;
3811 estats->pause_frames_received_lo =
3812 pstats->mac_stx[1].rx_stat_bmac_xpf_lo;
3813
3814 estats->pause_frames_sent_hi =
3815 pstats->mac_stx[1].tx_stat_outxoffsent_hi;
3816 estats->pause_frames_sent_lo =
3817 pstats->mac_stx[1].tx_stat_outxoffsent_lo;
bb2a0f7a
YG
3818}
3819
3820static void bnx2x_emac_stats_update(struct bnx2x *bp)
3821{
3822 struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3823 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
de832a55 3824 struct bnx2x_eth_stats *estats = &bp->eth_stats;
bb2a0f7a
YG
3825
3826 UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3827 UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3828 UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3829 UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3830 UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3831 UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3832 UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3833 UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3834 UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3835 UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3836 UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3837 UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3838 UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3839 UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3840 UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3841 UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3842 UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3843 UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3844 UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3845 UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3846 UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3847 UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3848 UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3849 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3850 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3851 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3852 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3853 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3854 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3855 UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3856 UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
de832a55
EG
3857
3858 estats->pause_frames_received_hi =
3859 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
3860 estats->pause_frames_received_lo =
3861 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
3862 ADD_64(estats->pause_frames_received_hi,
3863 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
3864 estats->pause_frames_received_lo,
3865 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
3866
3867 estats->pause_frames_sent_hi =
3868 pstats->mac_stx[1].tx_stat_outxonsent_hi;
3869 estats->pause_frames_sent_lo =
3870 pstats->mac_stx[1].tx_stat_outxonsent_lo;
3871 ADD_64(estats->pause_frames_sent_hi,
3872 pstats->mac_stx[1].tx_stat_outxoffsent_hi,
3873 estats->pause_frames_sent_lo,
3874 pstats->mac_stx[1].tx_stat_outxoffsent_lo);
bb2a0f7a
YG
3875}
3876
3877static int bnx2x_hw_stats_update(struct bnx2x *bp)
3878{
3879 struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3880 struct nig_stats *old = &(bp->port.old_nig_stats);
3881 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3882 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4781bfad
EG
3883 struct {
3884 u32 lo;
3885 u32 hi;
3886 } diff;
de832a55 3887 u32 nig_timer_max;
bb2a0f7a
YG
3888
3889 if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3890 bnx2x_bmac_stats_update(bp);
3891
3892 else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3893 bnx2x_emac_stats_update(bp);
3894
3895 else { /* unreached */
c3eefaf6 3896 BNX2X_ERR("stats updated by DMAE but no MAC active\n");
bb2a0f7a
YG
3897 return -1;
3898 }
a2fbb9ea 3899
bb2a0f7a
YG
3900 ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3901 new->brb_discard - old->brb_discard);
66e855f3
YG
3902 ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3903 new->brb_truncate - old->brb_truncate);
a2fbb9ea 3904
bb2a0f7a
YG
3905 UPDATE_STAT64_NIG(egress_mac_pkt0,
3906 etherstatspkts1024octetsto1522octets);
3907 UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
a2fbb9ea 3908
bb2a0f7a 3909 memcpy(old, new, sizeof(struct nig_stats));
a2fbb9ea 3910
bb2a0f7a
YG
3911 memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3912 sizeof(struct mac_stx));
3913 estats->brb_drop_hi = pstats->brb_drop_hi;
3914 estats->brb_drop_lo = pstats->brb_drop_lo;
a2fbb9ea 3915
bb2a0f7a 3916 pstats->host_port_stats_start = ++pstats->host_port_stats_end;
a2fbb9ea 3917
de832a55
EG
3918 nig_timer_max = SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
3919 if (nig_timer_max != estats->nig_timer_max) {
3920 estats->nig_timer_max = nig_timer_max;
3921 BNX2X_ERR("NIG timer max (%u)\n", estats->nig_timer_max);
3922 }
3923
bb2a0f7a 3924 return 0;
a2fbb9ea
ET
3925}
3926
bb2a0f7a 3927static int bnx2x_storm_stats_update(struct bnx2x *bp)
a2fbb9ea
ET
3928{
3929 struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
bb2a0f7a 3930 struct tstorm_per_port_stats *tport =
de832a55 3931 &stats->tstorm_common.port_statistics;
bb2a0f7a
YG
3932 struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
3933 struct bnx2x_eth_stats *estats = &bp->eth_stats;
de832a55
EG
3934 int i;
3935
6fe49bb9
EG
3936 memcpy(&(fstats->total_bytes_received_hi),
3937 &(bnx2x_sp(bp, func_stats_base)->total_bytes_received_hi),
de832a55
EG
3938 sizeof(struct host_func_stats) - 2*sizeof(u32));
3939 estats->error_bytes_received_hi = 0;
3940 estats->error_bytes_received_lo = 0;
3941 estats->etherstatsoverrsizepkts_hi = 0;
3942 estats->etherstatsoverrsizepkts_lo = 0;
3943 estats->no_buff_discard_hi = 0;
3944 estats->no_buff_discard_lo = 0;
a2fbb9ea 3945
ca00392c 3946 for_each_rx_queue(bp, i) {
de832a55
EG
3947 struct bnx2x_fastpath *fp = &bp->fp[i];
3948 int cl_id = fp->cl_id;
3949 struct tstorm_per_client_stats *tclient =
3950 &stats->tstorm_common.client_statistics[cl_id];
3951 struct tstorm_per_client_stats *old_tclient = &fp->old_tclient;
3952 struct ustorm_per_client_stats *uclient =
3953 &stats->ustorm_common.client_statistics[cl_id];
3954 struct ustorm_per_client_stats *old_uclient = &fp->old_uclient;
3955 struct xstorm_per_client_stats *xclient =
3956 &stats->xstorm_common.client_statistics[cl_id];
3957 struct xstorm_per_client_stats *old_xclient = &fp->old_xclient;
3958 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
3959 u32 diff;
3960
3961 /* are storm stats valid? */
3962 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
bb2a0f7a 3963 bp->stats_counter) {
de832a55
EG
3964 DP(BNX2X_MSG_STATS, "[%d] stats not updated by xstorm"
3965 " xstorm counter (%d) != stats_counter (%d)\n",
3966 i, xclient->stats_counter, bp->stats_counter);
3967 return -1;
3968 }
3969 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
bb2a0f7a 3970 bp->stats_counter) {
de832a55
EG
3971 DP(BNX2X_MSG_STATS, "[%d] stats not updated by tstorm"
3972 " tstorm counter (%d) != stats_counter (%d)\n",
3973 i, tclient->stats_counter, bp->stats_counter);
3974 return -2;
3975 }
3976 if ((u16)(le16_to_cpu(uclient->stats_counter) + 1) !=
3977 bp->stats_counter) {
3978 DP(BNX2X_MSG_STATS, "[%d] stats not updated by ustorm"
3979 " ustorm counter (%d) != stats_counter (%d)\n",
3980 i, uclient->stats_counter, bp->stats_counter);
3981 return -4;
3982 }
a2fbb9ea 3983
de832a55 3984 qstats->total_bytes_received_hi =
ca00392c 3985 le32_to_cpu(tclient->rcv_broadcast_bytes.hi);
de832a55 3986 qstats->total_bytes_received_lo =
ca00392c
EG
3987 le32_to_cpu(tclient->rcv_broadcast_bytes.lo);
3988
3989 ADD_64(qstats->total_bytes_received_hi,
3990 le32_to_cpu(tclient->rcv_multicast_bytes.hi),
3991 qstats->total_bytes_received_lo,
3992 le32_to_cpu(tclient->rcv_multicast_bytes.lo));
3993
3994 ADD_64(qstats->total_bytes_received_hi,
3995 le32_to_cpu(tclient->rcv_unicast_bytes.hi),
3996 qstats->total_bytes_received_lo,
3997 le32_to_cpu(tclient->rcv_unicast_bytes.lo));
3998
3999 qstats->valid_bytes_received_hi =
4000 qstats->total_bytes_received_hi;
de832a55 4001 qstats->valid_bytes_received_lo =
ca00392c 4002 qstats->total_bytes_received_lo;
bb2a0f7a 4003
de832a55 4004 qstats->error_bytes_received_hi =
bb2a0f7a 4005 le32_to_cpu(tclient->rcv_error_bytes.hi);
de832a55 4006 qstats->error_bytes_received_lo =
bb2a0f7a 4007 le32_to_cpu(tclient->rcv_error_bytes.lo);
bb2a0f7a 4008
de832a55
EG
4009 ADD_64(qstats->total_bytes_received_hi,
4010 qstats->error_bytes_received_hi,
4011 qstats->total_bytes_received_lo,
4012 qstats->error_bytes_received_lo);
4013
4014 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts,
4015 total_unicast_packets_received);
4016 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
4017 total_multicast_packets_received);
4018 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
4019 total_broadcast_packets_received);
4020 UPDATE_EXTEND_TSTAT(packets_too_big_discard,
4021 etherstatsoverrsizepkts);
4022 UPDATE_EXTEND_TSTAT(no_buff_discard, no_buff_discard);
4023
4024 SUB_EXTEND_USTAT(ucast_no_buff_pkts,
4025 total_unicast_packets_received);
4026 SUB_EXTEND_USTAT(mcast_no_buff_pkts,
4027 total_multicast_packets_received);
4028 SUB_EXTEND_USTAT(bcast_no_buff_pkts,
4029 total_broadcast_packets_received);
4030 UPDATE_EXTEND_USTAT(ucast_no_buff_pkts, no_buff_discard);
4031 UPDATE_EXTEND_USTAT(mcast_no_buff_pkts, no_buff_discard);
4032 UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard);
4033
4034 qstats->total_bytes_transmitted_hi =
ca00392c 4035 le32_to_cpu(xclient->unicast_bytes_sent.hi);
de832a55 4036 qstats->total_bytes_transmitted_lo =
ca00392c
EG
4037 le32_to_cpu(xclient->unicast_bytes_sent.lo);
4038
4039 ADD_64(qstats->total_bytes_transmitted_hi,
4040 le32_to_cpu(xclient->multicast_bytes_sent.hi),
4041 qstats->total_bytes_transmitted_lo,
4042 le32_to_cpu(xclient->multicast_bytes_sent.lo));
4043
4044 ADD_64(qstats->total_bytes_transmitted_hi,
4045 le32_to_cpu(xclient->broadcast_bytes_sent.hi),
4046 qstats->total_bytes_transmitted_lo,
4047 le32_to_cpu(xclient->broadcast_bytes_sent.lo));
bb2a0f7a 4048
de832a55
EG
4049 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
4050 total_unicast_packets_transmitted);
4051 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
4052 total_multicast_packets_transmitted);
4053 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
4054 total_broadcast_packets_transmitted);
4055
4056 old_tclient->checksum_discard = tclient->checksum_discard;
4057 old_tclient->ttl0_discard = tclient->ttl0_discard;
4058
4059 ADD_64(fstats->total_bytes_received_hi,
4060 qstats->total_bytes_received_hi,
4061 fstats->total_bytes_received_lo,
4062 qstats->total_bytes_received_lo);
4063 ADD_64(fstats->total_bytes_transmitted_hi,
4064 qstats->total_bytes_transmitted_hi,
4065 fstats->total_bytes_transmitted_lo,
4066 qstats->total_bytes_transmitted_lo);
4067 ADD_64(fstats->total_unicast_packets_received_hi,
4068 qstats->total_unicast_packets_received_hi,
4069 fstats->total_unicast_packets_received_lo,
4070 qstats->total_unicast_packets_received_lo);
4071 ADD_64(fstats->total_multicast_packets_received_hi,
4072 qstats->total_multicast_packets_received_hi,
4073 fstats->total_multicast_packets_received_lo,
4074 qstats->total_multicast_packets_received_lo);
4075 ADD_64(fstats->total_broadcast_packets_received_hi,
4076 qstats->total_broadcast_packets_received_hi,
4077 fstats->total_broadcast_packets_received_lo,
4078 qstats->total_broadcast_packets_received_lo);
4079 ADD_64(fstats->total_unicast_packets_transmitted_hi,
4080 qstats->total_unicast_packets_transmitted_hi,
4081 fstats->total_unicast_packets_transmitted_lo,
4082 qstats->total_unicast_packets_transmitted_lo);
4083 ADD_64(fstats->total_multicast_packets_transmitted_hi,
4084 qstats->total_multicast_packets_transmitted_hi,
4085 fstats->total_multicast_packets_transmitted_lo,
4086 qstats->total_multicast_packets_transmitted_lo);
4087 ADD_64(fstats->total_broadcast_packets_transmitted_hi,
4088 qstats->total_broadcast_packets_transmitted_hi,
4089 fstats->total_broadcast_packets_transmitted_lo,
4090 qstats->total_broadcast_packets_transmitted_lo);
4091 ADD_64(fstats->valid_bytes_received_hi,
4092 qstats->valid_bytes_received_hi,
4093 fstats->valid_bytes_received_lo,
4094 qstats->valid_bytes_received_lo);
4095
4096 ADD_64(estats->error_bytes_received_hi,
4097 qstats->error_bytes_received_hi,
4098 estats->error_bytes_received_lo,
4099 qstats->error_bytes_received_lo);
4100 ADD_64(estats->etherstatsoverrsizepkts_hi,
4101 qstats->etherstatsoverrsizepkts_hi,
4102 estats->etherstatsoverrsizepkts_lo,
4103 qstats->etherstatsoverrsizepkts_lo);
4104 ADD_64(estats->no_buff_discard_hi, qstats->no_buff_discard_hi,
4105 estats->no_buff_discard_lo, qstats->no_buff_discard_lo);
4106 }
4107
4108 ADD_64(fstats->total_bytes_received_hi,
4109 estats->rx_stat_ifhcinbadoctets_hi,
4110 fstats->total_bytes_received_lo,
4111 estats->rx_stat_ifhcinbadoctets_lo);
bb2a0f7a
YG
4112
4113 memcpy(estats, &(fstats->total_bytes_received_hi),
4114 sizeof(struct host_func_stats) - 2*sizeof(u32));
4115
de832a55
EG
4116 ADD_64(estats->etherstatsoverrsizepkts_hi,
4117 estats->rx_stat_dot3statsframestoolong_hi,
4118 estats->etherstatsoverrsizepkts_lo,
4119 estats->rx_stat_dot3statsframestoolong_lo);
4120 ADD_64(estats->error_bytes_received_hi,
4121 estats->rx_stat_ifhcinbadoctets_hi,
4122 estats->error_bytes_received_lo,
4123 estats->rx_stat_ifhcinbadoctets_lo);
4124
4125 if (bp->port.pmf) {
4126 estats->mac_filter_discard =
4127 le32_to_cpu(tport->mac_filter_discard);
4128 estats->xxoverflow_discard =
4129 le32_to_cpu(tport->xxoverflow_discard);
4130 estats->brb_truncate_discard =
bb2a0f7a 4131 le32_to_cpu(tport->brb_truncate_discard);
de832a55
EG
4132 estats->mac_discard = le32_to_cpu(tport->mac_discard);
4133 }
bb2a0f7a
YG
4134
4135 fstats->host_func_stats_start = ++fstats->host_func_stats_end;
a2fbb9ea 4136
de832a55
EG
4137 bp->stats_pending = 0;
4138
a2fbb9ea
ET
4139 return 0;
4140}
4141
bb2a0f7a 4142static void bnx2x_net_stats_update(struct bnx2x *bp)
a2fbb9ea 4143{
bb2a0f7a 4144 struct bnx2x_eth_stats *estats = &bp->eth_stats;
a2fbb9ea 4145 struct net_device_stats *nstats = &bp->dev->stats;
de832a55 4146 int i;
a2fbb9ea
ET
4147
4148 nstats->rx_packets =
4149 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
4150 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
4151 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
4152
4153 nstats->tx_packets =
4154 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
4155 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
4156 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
4157
de832a55 4158 nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
a2fbb9ea 4159
0e39e645 4160 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
a2fbb9ea 4161
de832a55 4162 nstats->rx_dropped = estats->mac_discard;
ca00392c 4163 for_each_rx_queue(bp, i)
de832a55
EG
4164 nstats->rx_dropped +=
4165 le32_to_cpu(bp->fp[i].old_tclient.checksum_discard);
4166
a2fbb9ea
ET
4167 nstats->tx_dropped = 0;
4168
4169 nstats->multicast =
de832a55 4170 bnx2x_hilo(&estats->total_multicast_packets_received_hi);
a2fbb9ea 4171
bb2a0f7a 4172 nstats->collisions =
de832a55 4173 bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi);
bb2a0f7a
YG
4174
4175 nstats->rx_length_errors =
de832a55
EG
4176 bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) +
4177 bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi);
4178 nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) +
4179 bnx2x_hilo(&estats->brb_truncate_hi);
4180 nstats->rx_crc_errors =
4181 bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi);
4182 nstats->rx_frame_errors =
4183 bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi);
4184 nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi);
a2fbb9ea
ET
4185 nstats->rx_missed_errors = estats->xxoverflow_discard;
4186
4187 nstats->rx_errors = nstats->rx_length_errors +
4188 nstats->rx_over_errors +
4189 nstats->rx_crc_errors +
4190 nstats->rx_frame_errors +
0e39e645
ET
4191 nstats->rx_fifo_errors +
4192 nstats->rx_missed_errors;
a2fbb9ea 4193
bb2a0f7a 4194 nstats->tx_aborted_errors =
de832a55
EG
4195 bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) +
4196 bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi);
4197 nstats->tx_carrier_errors =
4198 bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi);
a2fbb9ea
ET
4199 nstats->tx_fifo_errors = 0;
4200 nstats->tx_heartbeat_errors = 0;
4201 nstats->tx_window_errors = 0;
4202
4203 nstats->tx_errors = nstats->tx_aborted_errors +
de832a55
EG
4204 nstats->tx_carrier_errors +
4205 bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi);
4206}
4207
4208static void bnx2x_drv_stats_update(struct bnx2x *bp)
4209{
4210 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4211 int i;
4212
4213 estats->driver_xoff = 0;
4214 estats->rx_err_discard_pkt = 0;
4215 estats->rx_skb_alloc_failed = 0;
4216 estats->hw_csum_err = 0;
ca00392c 4217 for_each_rx_queue(bp, i) {
de832a55
EG
4218 struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats;
4219
4220 estats->driver_xoff += qstats->driver_xoff;
4221 estats->rx_err_discard_pkt += qstats->rx_err_discard_pkt;
4222 estats->rx_skb_alloc_failed += qstats->rx_skb_alloc_failed;
4223 estats->hw_csum_err += qstats->hw_csum_err;
4224 }
a2fbb9ea
ET
4225}
4226
bb2a0f7a 4227static void bnx2x_stats_update(struct bnx2x *bp)
a2fbb9ea 4228{
bb2a0f7a 4229 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 4230
bb2a0f7a
YG
4231 if (*stats_comp != DMAE_COMP_VAL)
4232 return;
4233
4234 if (bp->port.pmf)
de832a55 4235 bnx2x_hw_stats_update(bp);
a2fbb9ea 4236
de832a55
EG
4237 if (bnx2x_storm_stats_update(bp) && (bp->stats_pending++ == 3)) {
4238 BNX2X_ERR("storm stats were not updated for 3 times\n");
4239 bnx2x_panic();
4240 return;
a2fbb9ea
ET
4241 }
4242
de832a55
EG
4243 bnx2x_net_stats_update(bp);
4244 bnx2x_drv_stats_update(bp);
4245
a2fbb9ea 4246 if (bp->msglevel & NETIF_MSG_TIMER) {
ca00392c
EG
4247 struct bnx2x_fastpath *fp0_rx = bp->fp;
4248 struct bnx2x_fastpath *fp0_tx = &(bp->fp[bp->num_rx_queues]);
de832a55
EG
4249 struct tstorm_per_client_stats *old_tclient =
4250 &bp->fp->old_tclient;
4251 struct bnx2x_eth_q_stats *qstats = &bp->fp->eth_q_stats;
bb2a0f7a 4252 struct bnx2x_eth_stats *estats = &bp->eth_stats;
a2fbb9ea 4253 struct net_device_stats *nstats = &bp->dev->stats;
34f80b04 4254 int i;
a2fbb9ea
ET
4255
4256 printk(KERN_DEBUG "%s:\n", bp->dev->name);
4257 printk(KERN_DEBUG " tx avail (%4x) tx hc idx (%x)"
4258 " tx pkt (%lx)\n",
ca00392c
EG
4259 bnx2x_tx_avail(fp0_tx),
4260 le16_to_cpu(*fp0_tx->tx_cons_sb), nstats->tx_packets);
a2fbb9ea
ET
4261 printk(KERN_DEBUG " rx usage (%4x) rx hc idx (%x)"
4262 " rx pkt (%lx)\n",
ca00392c
EG
4263 (u16)(le16_to_cpu(*fp0_rx->rx_cons_sb) -
4264 fp0_rx->rx_comp_cons),
4265 le16_to_cpu(*fp0_rx->rx_cons_sb), nstats->rx_packets);
de832a55
EG
4266 printk(KERN_DEBUG " %s (Xoff events %u) brb drops %u "
4267 "brb truncate %u\n",
4268 (netif_queue_stopped(bp->dev) ? "Xoff" : "Xon"),
4269 qstats->driver_xoff,
4270 estats->brb_drop_lo, estats->brb_truncate_lo);
a2fbb9ea 4271 printk(KERN_DEBUG "tstats: checksum_discard %u "
de832a55 4272 "packets_too_big_discard %lu no_buff_discard %lu "
a2fbb9ea
ET
4273 "mac_discard %u mac_filter_discard %u "
4274 "xxovrflow_discard %u brb_truncate_discard %u "
4275 "ttl0_discard %u\n",
4781bfad 4276 le32_to_cpu(old_tclient->checksum_discard),
de832a55
EG
4277 bnx2x_hilo(&qstats->etherstatsoverrsizepkts_hi),
4278 bnx2x_hilo(&qstats->no_buff_discard_hi),
4279 estats->mac_discard, estats->mac_filter_discard,
4280 estats->xxoverflow_discard, estats->brb_truncate_discard,
4781bfad 4281 le32_to_cpu(old_tclient->ttl0_discard));
a2fbb9ea
ET
4282
4283 for_each_queue(bp, i) {
4284 printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
4285 bnx2x_fp(bp, i, tx_pkt),
4286 bnx2x_fp(bp, i, rx_pkt),
4287 bnx2x_fp(bp, i, rx_calls));
4288 }
4289 }
4290
bb2a0f7a
YG
4291 bnx2x_hw_stats_post(bp);
4292 bnx2x_storm_stats_post(bp);
4293}
a2fbb9ea 4294
bb2a0f7a
YG
4295static void bnx2x_port_stats_stop(struct bnx2x *bp)
4296{
4297 struct dmae_command *dmae;
4298 u32 opcode;
4299 int loader_idx = PMF_DMAE_C(bp);
4300 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 4301
bb2a0f7a 4302 bp->executer_idx = 0;
a2fbb9ea 4303
bb2a0f7a
YG
4304 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4305 DMAE_CMD_C_ENABLE |
4306 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 4307#ifdef __BIG_ENDIAN
bb2a0f7a 4308 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 4309#else
bb2a0f7a 4310 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 4311#endif
bb2a0f7a
YG
4312 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4313 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4314
4315 if (bp->port.port_stx) {
4316
4317 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4318 if (bp->func_stx)
4319 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
4320 else
4321 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4322 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4323 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4324 dmae->dst_addr_lo = bp->port.port_stx >> 2;
a2fbb9ea 4325 dmae->dst_addr_hi = 0;
bb2a0f7a
YG
4326 dmae->len = sizeof(struct host_port_stats) >> 2;
4327 if (bp->func_stx) {
4328 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4329 dmae->comp_addr_hi = 0;
4330 dmae->comp_val = 1;
4331 } else {
4332 dmae->comp_addr_lo =
4333 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4334 dmae->comp_addr_hi =
4335 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4336 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 4337
bb2a0f7a
YG
4338 *stats_comp = 0;
4339 }
a2fbb9ea
ET
4340 }
4341
bb2a0f7a
YG
4342 if (bp->func_stx) {
4343
4344 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4345 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4346 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
4347 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
4348 dmae->dst_addr_lo = bp->func_stx >> 2;
4349 dmae->dst_addr_hi = 0;
4350 dmae->len = sizeof(struct host_func_stats) >> 2;
4351 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4352 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4353 dmae->comp_val = DMAE_COMP_VAL;
4354
4355 *stats_comp = 0;
a2fbb9ea 4356 }
bb2a0f7a
YG
4357}
4358
4359static void bnx2x_stats_stop(struct bnx2x *bp)
4360{
4361 int update = 0;
4362
4363 bnx2x_stats_comp(bp);
4364
4365 if (bp->port.pmf)
4366 update = (bnx2x_hw_stats_update(bp) == 0);
4367
4368 update |= (bnx2x_storm_stats_update(bp) == 0);
4369
4370 if (update) {
4371 bnx2x_net_stats_update(bp);
a2fbb9ea 4372
bb2a0f7a
YG
4373 if (bp->port.pmf)
4374 bnx2x_port_stats_stop(bp);
4375
4376 bnx2x_hw_stats_post(bp);
4377 bnx2x_stats_comp(bp);
a2fbb9ea
ET
4378 }
4379}
4380
bb2a0f7a
YG
4381static void bnx2x_stats_do_nothing(struct bnx2x *bp)
4382{
4383}
4384
4385static const struct {
4386 void (*action)(struct bnx2x *bp);
4387 enum bnx2x_stats_state next_state;
4388} bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
4389/* state event */
4390{
4391/* DISABLED PMF */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
4392/* LINK_UP */ {bnx2x_stats_start, STATS_STATE_ENABLED},
4393/* UPDATE */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
4394/* STOP */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
4395},
4396{
4397/* ENABLED PMF */ {bnx2x_stats_pmf_start, STATS_STATE_ENABLED},
4398/* LINK_UP */ {bnx2x_stats_restart, STATS_STATE_ENABLED},
4399/* UPDATE */ {bnx2x_stats_update, STATS_STATE_ENABLED},
4400/* STOP */ {bnx2x_stats_stop, STATS_STATE_DISABLED}
4401}
4402};
4403
4404static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
4405{
4406 enum bnx2x_stats_state state = bp->stats_state;
4407
4408 bnx2x_stats_stm[state][event].action(bp);
4409 bp->stats_state = bnx2x_stats_stm[state][event].next_state;
4410
4411 if ((event != STATS_EVENT_UPDATE) || (bp->msglevel & NETIF_MSG_TIMER))
4412 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
4413 state, event, bp->stats_state);
4414}
4415
6fe49bb9
EG
4416static void bnx2x_port_stats_base_init(struct bnx2x *bp)
4417{
4418 struct dmae_command *dmae;
4419 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4420
4421 /* sanity */
4422 if (!bp->port.pmf || !bp->port.port_stx) {
4423 BNX2X_ERR("BUG!\n");
4424 return;
4425 }
4426
4427 bp->executer_idx = 0;
4428
4429 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4430 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4431 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4432 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4433#ifdef __BIG_ENDIAN
4434 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4435#else
4436 DMAE_CMD_ENDIANITY_DW_SWAP |
4437#endif
4438 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4439 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4440 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4441 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4442 dmae->dst_addr_lo = bp->port.port_stx >> 2;
4443 dmae->dst_addr_hi = 0;
4444 dmae->len = sizeof(struct host_port_stats) >> 2;
4445 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4446 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4447 dmae->comp_val = DMAE_COMP_VAL;
4448
4449 *stats_comp = 0;
4450 bnx2x_hw_stats_post(bp);
4451 bnx2x_stats_comp(bp);
4452}
4453
4454static void bnx2x_func_stats_base_init(struct bnx2x *bp)
4455{
4456 int vn, vn_max = IS_E1HMF(bp) ? E1HVN_MAX : E1VN_MAX;
4457 int port = BP_PORT(bp);
4458 int func;
4459 u32 func_stx;
4460
4461 /* sanity */
4462 if (!bp->port.pmf || !bp->func_stx) {
4463 BNX2X_ERR("BUG!\n");
4464 return;
4465 }
4466
4467 /* save our func_stx */
4468 func_stx = bp->func_stx;
4469
4470 for (vn = VN_0; vn < vn_max; vn++) {
4471 func = 2*vn + port;
4472
4473 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
4474 bnx2x_func_stats_init(bp);
4475 bnx2x_hw_stats_post(bp);
4476 bnx2x_stats_comp(bp);
4477 }
4478
4479 /* restore our func_stx */
4480 bp->func_stx = func_stx;
4481}
4482
4483static void bnx2x_func_stats_base_update(struct bnx2x *bp)
4484{
4485 struct dmae_command *dmae = &bp->stats_dmae;
4486 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4487
4488 /* sanity */
4489 if (!bp->func_stx) {
4490 BNX2X_ERR("BUG!\n");
4491 return;
4492 }
4493
4494 bp->executer_idx = 0;
4495 memset(dmae, 0, sizeof(struct dmae_command));
4496
4497 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
4498 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4499 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4500#ifdef __BIG_ENDIAN
4501 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4502#else
4503 DMAE_CMD_ENDIANITY_DW_SWAP |
4504#endif
4505 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4506 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4507 dmae->src_addr_lo = bp->func_stx >> 2;
4508 dmae->src_addr_hi = 0;
4509 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats_base));
4510 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats_base));
4511 dmae->len = sizeof(struct host_func_stats) >> 2;
4512 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4513 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4514 dmae->comp_val = DMAE_COMP_VAL;
4515
4516 *stats_comp = 0;
4517 bnx2x_hw_stats_post(bp);
4518 bnx2x_stats_comp(bp);
4519}
4520
4521static void bnx2x_stats_init(struct bnx2x *bp)
4522{
4523 int port = BP_PORT(bp);
4524 int func = BP_FUNC(bp);
4525 int i;
4526
4527 bp->stats_pending = 0;
4528 bp->executer_idx = 0;
4529 bp->stats_counter = 0;
4530
4531 /* port and func stats for management */
4532 if (!BP_NOMCP(bp)) {
4533 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
4534 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
4535
4536 } else {
4537 bp->port.port_stx = 0;
4538 bp->func_stx = 0;
4539 }
4540 DP(BNX2X_MSG_STATS, "port_stx 0x%x func_stx 0x%x\n",
4541 bp->port.port_stx, bp->func_stx);
4542
4543 /* port stats */
4544 memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
4545 bp->port.old_nig_stats.brb_discard =
4546 REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
4547 bp->port.old_nig_stats.brb_truncate =
4548 REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
4549 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
4550 &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
4551 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
4552 &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
4553
4554 /* function stats */
4555 for_each_queue(bp, i) {
4556 struct bnx2x_fastpath *fp = &bp->fp[i];
4557
4558 memset(&fp->old_tclient, 0,
4559 sizeof(struct tstorm_per_client_stats));
4560 memset(&fp->old_uclient, 0,
4561 sizeof(struct ustorm_per_client_stats));
4562 memset(&fp->old_xclient, 0,
4563 sizeof(struct xstorm_per_client_stats));
4564 memset(&fp->eth_q_stats, 0, sizeof(struct bnx2x_eth_q_stats));
4565 }
4566
4567 memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
4568 memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
4569
4570 bp->stats_state = STATS_STATE_DISABLED;
4571
4572 if (bp->port.pmf) {
4573 if (bp->port.port_stx)
4574 bnx2x_port_stats_base_init(bp);
4575
4576 if (bp->func_stx)
4577 bnx2x_func_stats_base_init(bp);
4578
4579 } else if (bp->func_stx)
4580 bnx2x_func_stats_base_update(bp);
4581}
4582
a2fbb9ea
ET
4583static void bnx2x_timer(unsigned long data)
4584{
4585 struct bnx2x *bp = (struct bnx2x *) data;
4586
4587 if (!netif_running(bp->dev))
4588 return;
4589
4590 if (atomic_read(&bp->intr_sem) != 0)
f1410647 4591 goto timer_restart;
a2fbb9ea
ET
4592
4593 if (poll) {
4594 struct bnx2x_fastpath *fp = &bp->fp[0];
4595 int rc;
4596
7961f791 4597 bnx2x_tx_int(fp);
a2fbb9ea
ET
4598 rc = bnx2x_rx_int(fp, 1000);
4599 }
4600
34f80b04
EG
4601 if (!BP_NOMCP(bp)) {
4602 int func = BP_FUNC(bp);
a2fbb9ea
ET
4603 u32 drv_pulse;
4604 u32 mcp_pulse;
4605
4606 ++bp->fw_drv_pulse_wr_seq;
4607 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
4608 /* TBD - add SYSTEM_TIME */
4609 drv_pulse = bp->fw_drv_pulse_wr_seq;
34f80b04 4610 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
a2fbb9ea 4611
34f80b04 4612 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
a2fbb9ea
ET
4613 MCP_PULSE_SEQ_MASK);
4614 /* The delta between driver pulse and mcp response
4615 * should be 1 (before mcp response) or 0 (after mcp response)
4616 */
4617 if ((drv_pulse != mcp_pulse) &&
4618 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
4619 /* someone lost a heartbeat... */
4620 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
4621 drv_pulse, mcp_pulse);
4622 }
4623 }
4624
bb2a0f7a
YG
4625 if ((bp->state == BNX2X_STATE_OPEN) ||
4626 (bp->state == BNX2X_STATE_DISABLED))
4627 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
a2fbb9ea 4628
f1410647 4629timer_restart:
a2fbb9ea
ET
4630 mod_timer(&bp->timer, jiffies + bp->current_interval);
4631}
4632
4633/* end of Statistics */
4634
4635/* nic init */
4636
4637/*
4638 * nic init service functions
4639 */
4640
34f80b04 4641static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
a2fbb9ea 4642{
34f80b04
EG
4643 int port = BP_PORT(bp);
4644
ca00392c
EG
4645 /* "CSTORM" */
4646 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4647 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), 0,
4648 CSTORM_SB_STATUS_BLOCK_U_SIZE / 4);
4649 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4650 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), 0,
4651 CSTORM_SB_STATUS_BLOCK_C_SIZE / 4);
34f80b04
EG
4652}
4653
5c862848
EG
4654static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
4655 dma_addr_t mapping, int sb_id)
34f80b04
EG
4656{
4657 int port = BP_PORT(bp);
bb2a0f7a 4658 int func = BP_FUNC(bp);
a2fbb9ea 4659 int index;
34f80b04 4660 u64 section;
a2fbb9ea
ET
4661
4662 /* USTORM */
4663 section = ((u64)mapping) + offsetof(struct host_status_block,
4664 u_status_block);
34f80b04 4665 sb->u_status_block.status_block_id = sb_id;
a2fbb9ea 4666
ca00392c
EG
4667 REG_WR(bp, BAR_CSTRORM_INTMEM +
4668 CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id), U64_LO(section));
4669 REG_WR(bp, BAR_CSTRORM_INTMEM +
4670 ((CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id)) + 4),
a2fbb9ea 4671 U64_HI(section));
ca00392c
EG
4672 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_USB_FUNC_OFF +
4673 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), func);
a2fbb9ea
ET
4674
4675 for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
ca00392c
EG
4676 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4677 CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id, index), 1);
a2fbb9ea
ET
4678
4679 /* CSTORM */
4680 section = ((u64)mapping) + offsetof(struct host_status_block,
4681 c_status_block);
34f80b04 4682 sb->c_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4683
4684 REG_WR(bp, BAR_CSTRORM_INTMEM +
ca00392c 4685 CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id), U64_LO(section));
a2fbb9ea 4686 REG_WR(bp, BAR_CSTRORM_INTMEM +
ca00392c 4687 ((CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id)) + 4),
a2fbb9ea 4688 U64_HI(section));
7a9b2557 4689 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
ca00392c 4690 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), func);
a2fbb9ea
ET
4691
4692 for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4693 REG_WR16(bp, BAR_CSTRORM_INTMEM +
ca00392c 4694 CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id, index), 1);
34f80b04
EG
4695
4696 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4697}
4698
4699static void bnx2x_zero_def_sb(struct bnx2x *bp)
4700{
4701 int func = BP_FUNC(bp);
a2fbb9ea 4702
ca00392c 4703 bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY +
490c3c9b
EG
4704 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4705 sizeof(struct tstorm_def_status_block)/4);
ca00392c
EG
4706 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4707 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), 0,
4708 sizeof(struct cstorm_def_status_block_u)/4);
4709 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4710 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), 0,
4711 sizeof(struct cstorm_def_status_block_c)/4);
4712 bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY +
34f80b04
EG
4713 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4714 sizeof(struct xstorm_def_status_block)/4);
a2fbb9ea
ET
4715}
4716
4717static void bnx2x_init_def_sb(struct bnx2x *bp,
4718 struct host_def_status_block *def_sb,
34f80b04 4719 dma_addr_t mapping, int sb_id)
a2fbb9ea 4720{
34f80b04
EG
4721 int port = BP_PORT(bp);
4722 int func = BP_FUNC(bp);
a2fbb9ea
ET
4723 int index, val, reg_offset;
4724 u64 section;
4725
4726 /* ATTN */
4727 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4728 atten_status_block);
34f80b04 4729 def_sb->atten_status_block.status_block_id = sb_id;
a2fbb9ea 4730
49d66772
ET
4731 bp->attn_state = 0;
4732
a2fbb9ea
ET
4733 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4734 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4735
34f80b04 4736 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
a2fbb9ea
ET
4737 bp->attn_group[index].sig[0] = REG_RD(bp,
4738 reg_offset + 0x10*index);
4739 bp->attn_group[index].sig[1] = REG_RD(bp,
4740 reg_offset + 0x4 + 0x10*index);
4741 bp->attn_group[index].sig[2] = REG_RD(bp,
4742 reg_offset + 0x8 + 0x10*index);
4743 bp->attn_group[index].sig[3] = REG_RD(bp,
4744 reg_offset + 0xc + 0x10*index);
4745 }
4746
a2fbb9ea
ET
4747 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4748 HC_REG_ATTN_MSG0_ADDR_L);
4749
4750 REG_WR(bp, reg_offset, U64_LO(section));
4751 REG_WR(bp, reg_offset + 4, U64_HI(section));
4752
4753 reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4754
4755 val = REG_RD(bp, reg_offset);
34f80b04 4756 val |= sb_id;
a2fbb9ea
ET
4757 REG_WR(bp, reg_offset, val);
4758
4759 /* USTORM */
4760 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4761 u_def_status_block);
34f80b04 4762 def_sb->u_def_status_block.status_block_id = sb_id;
a2fbb9ea 4763
ca00392c
EG
4764 REG_WR(bp, BAR_CSTRORM_INTMEM +
4765 CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func), U64_LO(section));
4766 REG_WR(bp, BAR_CSTRORM_INTMEM +
4767 ((CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func)) + 4),
a2fbb9ea 4768 U64_HI(section));
ca00392c
EG
4769 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_USB_FUNC_OFF +
4770 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), func);
a2fbb9ea
ET
4771
4772 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
ca00392c
EG
4773 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4774 CSTORM_DEF_SB_HC_DISABLE_U_OFFSET(func, index), 1);
a2fbb9ea
ET
4775
4776 /* CSTORM */
4777 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4778 c_def_status_block);
34f80b04 4779 def_sb->c_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4780
4781 REG_WR(bp, BAR_CSTRORM_INTMEM +
ca00392c 4782 CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func), U64_LO(section));
a2fbb9ea 4783 REG_WR(bp, BAR_CSTRORM_INTMEM +
ca00392c 4784 ((CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func)) + 4),
a2fbb9ea 4785 U64_HI(section));
5c862848 4786 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
ca00392c 4787 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), func);
a2fbb9ea
ET
4788
4789 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4790 REG_WR16(bp, BAR_CSTRORM_INTMEM +
ca00392c 4791 CSTORM_DEF_SB_HC_DISABLE_C_OFFSET(func, index), 1);
a2fbb9ea
ET
4792
4793 /* TSTORM */
4794 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4795 t_def_status_block);
34f80b04 4796 def_sb->t_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4797
4798 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4799 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4800 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4801 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4802 U64_HI(section));
5c862848 4803 REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
34f80b04 4804 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4805
4806 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4807 REG_WR16(bp, BAR_TSTRORM_INTMEM +
34f80b04 4808 TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
a2fbb9ea
ET
4809
4810 /* XSTORM */
4811 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4812 x_def_status_block);
34f80b04 4813 def_sb->x_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4814
4815 REG_WR(bp, BAR_XSTRORM_INTMEM +
34f80b04 4816 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4817 REG_WR(bp, BAR_XSTRORM_INTMEM +
34f80b04 4818 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4819 U64_HI(section));
5c862848 4820 REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
34f80b04 4821 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4822
4823 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4824 REG_WR16(bp, BAR_XSTRORM_INTMEM +
34f80b04 4825 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
49d66772 4826
bb2a0f7a 4827 bp->stats_pending = 0;
66e855f3 4828 bp->set_mac_pending = 0;
bb2a0f7a 4829
34f80b04 4830 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea
ET
4831}
4832
4833static void bnx2x_update_coalesce(struct bnx2x *bp)
4834{
34f80b04 4835 int port = BP_PORT(bp);
a2fbb9ea
ET
4836 int i;
4837
4838 for_each_queue(bp, i) {
34f80b04 4839 int sb_id = bp->fp[i].sb_id;
a2fbb9ea
ET
4840
4841 /* HC_INDEX_U_ETH_RX_CQ_CONS */
ca00392c
EG
4842 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4843 CSTORM_SB_HC_TIMEOUT_U_OFFSET(port, sb_id,
4844 U_SB_ETH_RX_CQ_INDEX),
34f80b04 4845 bp->rx_ticks/12);
ca00392c
EG
4846 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4847 CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id,
4848 U_SB_ETH_RX_CQ_INDEX),
3799cf47 4849 (bp->rx_ticks/12) ? 0 : 1);
a2fbb9ea
ET
4850
4851 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4852 REG_WR8(bp, BAR_CSTRORM_INTMEM +
ca00392c
EG
4853 CSTORM_SB_HC_TIMEOUT_C_OFFSET(port, sb_id,
4854 C_SB_ETH_TX_CQ_INDEX),
34f80b04 4855 bp->tx_ticks/12);
a2fbb9ea 4856 REG_WR16(bp, BAR_CSTRORM_INTMEM +
ca00392c
EG
4857 CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id,
4858 C_SB_ETH_TX_CQ_INDEX),
3799cf47 4859 (bp->tx_ticks/12) ? 0 : 1);
a2fbb9ea
ET
4860 }
4861}
4862
7a9b2557
VZ
4863static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4864 struct bnx2x_fastpath *fp, int last)
4865{
4866 int i;
4867
4868 for (i = 0; i < last; i++) {
4869 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
4870 struct sk_buff *skb = rx_buf->skb;
4871
4872 if (skb == NULL) {
4873 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
4874 continue;
4875 }
4876
4877 if (fp->tpa_state[i] == BNX2X_TPA_START)
4878 pci_unmap_single(bp->pdev,
4879 pci_unmap_addr(rx_buf, mapping),
356e2385 4880 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
4881
4882 dev_kfree_skb(skb);
4883 rx_buf->skb = NULL;
4884 }
4885}
4886
a2fbb9ea
ET
4887static void bnx2x_init_rx_rings(struct bnx2x *bp)
4888{
7a9b2557 4889 int func = BP_FUNC(bp);
32626230
EG
4890 int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
4891 ETH_MAX_AGGREGATION_QUEUES_E1H;
4892 u16 ring_prod, cqe_ring_prod;
a2fbb9ea 4893 int i, j;
a2fbb9ea 4894
87942b46 4895 bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN;
0f00846d
EG
4896 DP(NETIF_MSG_IFUP,
4897 "mtu %d rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
a2fbb9ea 4898
7a9b2557 4899 if (bp->flags & TPA_ENABLE_FLAG) {
7a9b2557 4900
555f6c78 4901 for_each_rx_queue(bp, j) {
32626230 4902 struct bnx2x_fastpath *fp = &bp->fp[j];
7a9b2557 4903
32626230 4904 for (i = 0; i < max_agg_queues; i++) {
7a9b2557
VZ
4905 fp->tpa_pool[i].skb =
4906 netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4907 if (!fp->tpa_pool[i].skb) {
4908 BNX2X_ERR("Failed to allocate TPA "
4909 "skb pool for queue[%d] - "
4910 "disabling TPA on this "
4911 "queue!\n", j);
4912 bnx2x_free_tpa_pool(bp, fp, i);
4913 fp->disable_tpa = 1;
4914 break;
4915 }
4916 pci_unmap_addr_set((struct sw_rx_bd *)
4917 &bp->fp->tpa_pool[i],
4918 mapping, 0);
4919 fp->tpa_state[i] = BNX2X_TPA_STOP;
4920 }
4921 }
4922 }
4923
555f6c78 4924 for_each_rx_queue(bp, j) {
a2fbb9ea
ET
4925 struct bnx2x_fastpath *fp = &bp->fp[j];
4926
4927 fp->rx_bd_cons = 0;
4928 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
7a9b2557
VZ
4929 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
4930
ca00392c
EG
4931 /* Mark queue as Rx */
4932 fp->is_rx_queue = 1;
4933
7a9b2557
VZ
4934 /* "next page" elements initialization */
4935 /* SGE ring */
4936 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
4937 struct eth_rx_sge *sge;
4938
4939 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
4940 sge->addr_hi =
4941 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
4942 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4943 sge->addr_lo =
4944 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
4945 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4946 }
4947
4948 bnx2x_init_sge_ring_bit_mask(fp);
a2fbb9ea 4949
7a9b2557 4950 /* RX BD ring */
a2fbb9ea
ET
4951 for (i = 1; i <= NUM_RX_RINGS; i++) {
4952 struct eth_rx_bd *rx_bd;
4953
4954 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
4955 rx_bd->addr_hi =
4956 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
34f80b04 4957 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
a2fbb9ea
ET
4958 rx_bd->addr_lo =
4959 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
34f80b04 4960 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
a2fbb9ea
ET
4961 }
4962
34f80b04 4963 /* CQ ring */
a2fbb9ea
ET
4964 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4965 struct eth_rx_cqe_next_page *nextpg;
4966
4967 nextpg = (struct eth_rx_cqe_next_page *)
4968 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4969 nextpg->addr_hi =
4970 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
34f80b04 4971 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
a2fbb9ea
ET
4972 nextpg->addr_lo =
4973 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
34f80b04 4974 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
a2fbb9ea
ET
4975 }
4976
7a9b2557
VZ
4977 /* Allocate SGEs and initialize the ring elements */
4978 for (i = 0, ring_prod = 0;
4979 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
a2fbb9ea 4980
7a9b2557
VZ
4981 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
4982 BNX2X_ERR("was only able to allocate "
4983 "%d rx sges\n", i);
4984 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
4985 /* Cleanup already allocated elements */
4986 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
32626230 4987 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
7a9b2557
VZ
4988 fp->disable_tpa = 1;
4989 ring_prod = 0;
4990 break;
4991 }
4992 ring_prod = NEXT_SGE_IDX(ring_prod);
4993 }
4994 fp->rx_sge_prod = ring_prod;
4995
4996 /* Allocate BDs and initialize BD ring */
66e855f3 4997 fp->rx_comp_cons = 0;
7a9b2557 4998 cqe_ring_prod = ring_prod = 0;
a2fbb9ea
ET
4999 for (i = 0; i < bp->rx_ring_size; i++) {
5000 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
5001 BNX2X_ERR("was only able to allocate "
de832a55
EG
5002 "%d rx skbs on queue[%d]\n", i, j);
5003 fp->eth_q_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
5004 break;
5005 }
5006 ring_prod = NEXT_RX_IDX(ring_prod);
7a9b2557 5007 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
53e5e96e 5008 WARN_ON(ring_prod <= i);
a2fbb9ea
ET
5009 }
5010
7a9b2557
VZ
5011 fp->rx_bd_prod = ring_prod;
5012 /* must not have more available CQEs than BDs */
5013 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
5014 cqe_ring_prod);
a2fbb9ea
ET
5015 fp->rx_pkt = fp->rx_calls = 0;
5016
7a9b2557
VZ
5017 /* Warning!
5018 * this will generate an interrupt (to the TSTORM)
5019 * must only be done after chip is initialized
5020 */
5021 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
5022 fp->rx_sge_prod);
a2fbb9ea
ET
5023 if (j != 0)
5024 continue;
5025
5026 REG_WR(bp, BAR_USTRORM_INTMEM +
7a9b2557 5027 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
a2fbb9ea
ET
5028 U64_LO(fp->rx_comp_mapping));
5029 REG_WR(bp, BAR_USTRORM_INTMEM +
7a9b2557 5030 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
a2fbb9ea
ET
5031 U64_HI(fp->rx_comp_mapping));
5032 }
5033}
5034
5035static void bnx2x_init_tx_ring(struct bnx2x *bp)
5036{
5037 int i, j;
5038
555f6c78 5039 for_each_tx_queue(bp, j) {
a2fbb9ea
ET
5040 struct bnx2x_fastpath *fp = &bp->fp[j];
5041
5042 for (i = 1; i <= NUM_TX_RINGS; i++) {
ca00392c
EG
5043 struct eth_tx_next_bd *tx_next_bd =
5044 &fp->tx_desc_ring[TX_DESC_CNT * i - 1].next_bd;
a2fbb9ea 5045
ca00392c 5046 tx_next_bd->addr_hi =
a2fbb9ea 5047 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
34f80b04 5048 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
ca00392c 5049 tx_next_bd->addr_lo =
a2fbb9ea 5050 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
34f80b04 5051 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
a2fbb9ea
ET
5052 }
5053
ca00392c
EG
5054 fp->tx_db.data.header.header = DOORBELL_HDR_DB_TYPE;
5055 fp->tx_db.data.zero_fill1 = 0;
5056 fp->tx_db.data.prod = 0;
5057
a2fbb9ea
ET
5058 fp->tx_pkt_prod = 0;
5059 fp->tx_pkt_cons = 0;
5060 fp->tx_bd_prod = 0;
5061 fp->tx_bd_cons = 0;
5062 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
5063 fp->tx_pkt = 0;
5064 }
6fe49bb9
EG
5065
5066 /* clean tx statistics */
5067 for_each_rx_queue(bp, i)
5068 bnx2x_fp(bp, i, tx_pkt) = 0;
a2fbb9ea
ET
5069}
5070
5071static void bnx2x_init_sp_ring(struct bnx2x *bp)
5072{
34f80b04 5073 int func = BP_FUNC(bp);
a2fbb9ea
ET
5074
5075 spin_lock_init(&bp->spq_lock);
5076
5077 bp->spq_left = MAX_SPQ_PENDING;
5078 bp->spq_prod_idx = 0;
a2fbb9ea
ET
5079 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
5080 bp->spq_prod_bd = bp->spq;
5081 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
5082
34f80b04 5083 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
a2fbb9ea 5084 U64_LO(bp->spq_mapping));
34f80b04
EG
5085 REG_WR(bp,
5086 XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
a2fbb9ea
ET
5087 U64_HI(bp->spq_mapping));
5088
34f80b04 5089 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
a2fbb9ea
ET
5090 bp->spq_prod_idx);
5091}
5092
5093static void bnx2x_init_context(struct bnx2x *bp)
5094{
5095 int i;
5096
ca00392c 5097 for_each_rx_queue(bp, i) {
a2fbb9ea
ET
5098 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
5099 struct bnx2x_fastpath *fp = &bp->fp[i];
de832a55 5100 u8 cl_id = fp->cl_id;
a2fbb9ea 5101
34f80b04
EG
5102 context->ustorm_st_context.common.sb_index_numbers =
5103 BNX2X_RX_SB_INDEX_NUM;
0626b899 5104 context->ustorm_st_context.common.clientId = cl_id;
ca00392c 5105 context->ustorm_st_context.common.status_block_id = fp->sb_id;
34f80b04 5106 context->ustorm_st_context.common.flags =
de832a55
EG
5107 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT |
5108 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS);
5109 context->ustorm_st_context.common.statistics_counter_id =
5110 cl_id;
8d9c5f34 5111 context->ustorm_st_context.common.mc_alignment_log_size =
0f00846d 5112 BNX2X_RX_ALIGN_SHIFT;
34f80b04 5113 context->ustorm_st_context.common.bd_buff_size =
437cf2f1 5114 bp->rx_buf_size;
34f80b04 5115 context->ustorm_st_context.common.bd_page_base_hi =
a2fbb9ea 5116 U64_HI(fp->rx_desc_mapping);
34f80b04 5117 context->ustorm_st_context.common.bd_page_base_lo =
a2fbb9ea 5118 U64_LO(fp->rx_desc_mapping);
7a9b2557
VZ
5119 if (!fp->disable_tpa) {
5120 context->ustorm_st_context.common.flags |=
ca00392c 5121 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA;
7a9b2557 5122 context->ustorm_st_context.common.sge_buff_size =
8d9c5f34
EG
5123 (u16)min((u32)SGE_PAGE_SIZE*PAGES_PER_SGE,
5124 (u32)0xffff);
7a9b2557
VZ
5125 context->ustorm_st_context.common.sge_page_base_hi =
5126 U64_HI(fp->rx_sge_mapping);
5127 context->ustorm_st_context.common.sge_page_base_lo =
5128 U64_LO(fp->rx_sge_mapping);
ca00392c
EG
5129
5130 context->ustorm_st_context.common.max_sges_for_packet =
5131 SGE_PAGE_ALIGN(bp->dev->mtu) >> SGE_PAGE_SHIFT;
5132 context->ustorm_st_context.common.max_sges_for_packet =
5133 ((context->ustorm_st_context.common.
5134 max_sges_for_packet + PAGES_PER_SGE - 1) &
5135 (~(PAGES_PER_SGE - 1))) >> PAGES_PER_SGE_SHIFT;
7a9b2557
VZ
5136 }
5137
8d9c5f34
EG
5138 context->ustorm_ag_context.cdu_usage =
5139 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
5140 CDU_REGION_NUMBER_UCM_AG,
5141 ETH_CONNECTION_TYPE);
5142
ca00392c
EG
5143 context->xstorm_ag_context.cdu_reserved =
5144 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
5145 CDU_REGION_NUMBER_XCM_AG,
5146 ETH_CONNECTION_TYPE);
5147 }
5148
5149 for_each_tx_queue(bp, i) {
5150 struct bnx2x_fastpath *fp = &bp->fp[i];
5151 struct eth_context *context =
5152 bnx2x_sp(bp, context[i - bp->num_rx_queues].eth);
5153
5154 context->cstorm_st_context.sb_index_number =
5155 C_SB_ETH_TX_CQ_INDEX;
5156 context->cstorm_st_context.status_block_id = fp->sb_id;
5157
8d9c5f34
EG
5158 context->xstorm_st_context.tx_bd_page_base_hi =
5159 U64_HI(fp->tx_desc_mapping);
5160 context->xstorm_st_context.tx_bd_page_base_lo =
5161 U64_LO(fp->tx_desc_mapping);
ca00392c 5162 context->xstorm_st_context.statistics_data = (fp->cl_id |
8d9c5f34 5163 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
a2fbb9ea
ET
5164 }
5165}
5166
5167static void bnx2x_init_ind_table(struct bnx2x *bp)
5168{
26c8fa4d 5169 int func = BP_FUNC(bp);
a2fbb9ea
ET
5170 int i;
5171
555f6c78 5172 if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
a2fbb9ea
ET
5173 return;
5174
555f6c78
EG
5175 DP(NETIF_MSG_IFUP,
5176 "Initializing indirection table multi_mode %d\n", bp->multi_mode);
a2fbb9ea 5177 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
34f80b04 5178 REG_WR8(bp, BAR_TSTRORM_INTMEM +
26c8fa4d 5179 TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
0626b899 5180 bp->fp->cl_id + (i % bp->num_rx_queues));
a2fbb9ea
ET
5181}
5182
49d66772
ET
5183static void bnx2x_set_client_config(struct bnx2x *bp)
5184{
49d66772 5185 struct tstorm_eth_client_config tstorm_client = {0};
34f80b04
EG
5186 int port = BP_PORT(bp);
5187 int i;
49d66772 5188
e7799c5f 5189 tstorm_client.mtu = bp->dev->mtu;
49d66772 5190 tstorm_client.config_flags =
de832a55
EG
5191 (TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE |
5192 TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE);
49d66772 5193#ifdef BCM_VLAN
0c6671b0 5194 if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
49d66772 5195 tstorm_client.config_flags |=
8d9c5f34 5196 TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE;
49d66772
ET
5197 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
5198 }
5199#endif
49d66772
ET
5200
5201 for_each_queue(bp, i) {
de832a55
EG
5202 tstorm_client.statistics_counter_id = bp->fp[i].cl_id;
5203
49d66772 5204 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 5205 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
49d66772
ET
5206 ((u32 *)&tstorm_client)[0]);
5207 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 5208 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
49d66772
ET
5209 ((u32 *)&tstorm_client)[1]);
5210 }
5211
34f80b04
EG
5212 DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
5213 ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
49d66772
ET
5214}
5215
a2fbb9ea
ET
5216static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
5217{
a2fbb9ea 5218 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
34f80b04
EG
5219 int mode = bp->rx_mode;
5220 int mask = (1 << BP_L_ID(bp));
5221 int func = BP_FUNC(bp);
581ce43d 5222 int port = BP_PORT(bp);
a2fbb9ea 5223 int i;
581ce43d
EG
5224 /* All but management unicast packets should pass to the host as well */
5225 u32 llh_mask =
5226 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST |
5227 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST |
5228 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN |
5229 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN;
a2fbb9ea 5230
3196a88a 5231 DP(NETIF_MSG_IFUP, "rx mode %d mask 0x%x\n", mode, mask);
a2fbb9ea
ET
5232
5233 switch (mode) {
5234 case BNX2X_RX_MODE_NONE: /* no Rx */
34f80b04
EG
5235 tstorm_mac_filter.ucast_drop_all = mask;
5236 tstorm_mac_filter.mcast_drop_all = mask;
5237 tstorm_mac_filter.bcast_drop_all = mask;
a2fbb9ea 5238 break;
356e2385 5239
a2fbb9ea 5240 case BNX2X_RX_MODE_NORMAL:
34f80b04 5241 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea 5242 break;
356e2385 5243
a2fbb9ea 5244 case BNX2X_RX_MODE_ALLMULTI:
34f80b04
EG
5245 tstorm_mac_filter.mcast_accept_all = mask;
5246 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea 5247 break;
356e2385 5248
a2fbb9ea 5249 case BNX2X_RX_MODE_PROMISC:
34f80b04
EG
5250 tstorm_mac_filter.ucast_accept_all = mask;
5251 tstorm_mac_filter.mcast_accept_all = mask;
5252 tstorm_mac_filter.bcast_accept_all = mask;
581ce43d
EG
5253 /* pass management unicast packets as well */
5254 llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST;
a2fbb9ea 5255 break;
356e2385 5256
a2fbb9ea 5257 default:
34f80b04
EG
5258 BNX2X_ERR("BAD rx mode (%d)\n", mode);
5259 break;
a2fbb9ea
ET
5260 }
5261
581ce43d
EG
5262 REG_WR(bp,
5263 (port ? NIG_REG_LLH1_BRB1_DRV_MASK : NIG_REG_LLH0_BRB1_DRV_MASK),
5264 llh_mask);
5265
a2fbb9ea
ET
5266 for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
5267 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 5268 TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
a2fbb9ea
ET
5269 ((u32 *)&tstorm_mac_filter)[i]);
5270
34f80b04 5271/* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
a2fbb9ea
ET
5272 ((u32 *)&tstorm_mac_filter)[i]); */
5273 }
a2fbb9ea 5274
49d66772
ET
5275 if (mode != BNX2X_RX_MODE_NONE)
5276 bnx2x_set_client_config(bp);
a2fbb9ea
ET
5277}
5278
471de716
EG
5279static void bnx2x_init_internal_common(struct bnx2x *bp)
5280{
5281 int i;
5282
5283 /* Zero this manually as its initialization is
5284 currently missing in the initTool */
5285 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
5286 REG_WR(bp, BAR_USTRORM_INTMEM +
5287 USTORM_AGG_DATA_OFFSET + i * 4, 0);
5288}
5289
5290static void bnx2x_init_internal_port(struct bnx2x *bp)
5291{
5292 int port = BP_PORT(bp);
5293
ca00392c
EG
5294 REG_WR(bp,
5295 BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_U_OFFSET(port), BNX2X_BTR);
5296 REG_WR(bp,
5297 BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_C_OFFSET(port), BNX2X_BTR);
471de716
EG
5298 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
5299 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
5300}
5301
5302static void bnx2x_init_internal_func(struct bnx2x *bp)
a2fbb9ea 5303{
a2fbb9ea
ET
5304 struct tstorm_eth_function_common_config tstorm_config = {0};
5305 struct stats_indication_flags stats_flags = {0};
34f80b04
EG
5306 int port = BP_PORT(bp);
5307 int func = BP_FUNC(bp);
de832a55
EG
5308 int i, j;
5309 u32 offset;
471de716 5310 u16 max_agg_size;
a2fbb9ea
ET
5311
5312 if (is_multi(bp)) {
555f6c78 5313 tstorm_config.config_flags = MULTI_FLAGS(bp);
a2fbb9ea
ET
5314 tstorm_config.rss_result_mask = MULTI_MASK;
5315 }
ca00392c
EG
5316
5317 /* Enable TPA if needed */
5318 if (bp->flags & TPA_ENABLE_FLAG)
5319 tstorm_config.config_flags |=
5320 TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA;
5321
8d9c5f34
EG
5322 if (IS_E1HMF(bp))
5323 tstorm_config.config_flags |=
5324 TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM;
a2fbb9ea 5325
34f80b04
EG
5326 tstorm_config.leading_client_id = BP_L_ID(bp);
5327
a2fbb9ea 5328 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 5329 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
a2fbb9ea
ET
5330 (*(u32 *)&tstorm_config));
5331
c14423fe 5332 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
a2fbb9ea
ET
5333 bnx2x_set_storm_rx_mode(bp);
5334
de832a55
EG
5335 for_each_queue(bp, i) {
5336 u8 cl_id = bp->fp[i].cl_id;
5337
5338 /* reset xstorm per client statistics */
5339 offset = BAR_XSTRORM_INTMEM +
5340 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5341 for (j = 0;
5342 j < sizeof(struct xstorm_per_client_stats) / 4; j++)
5343 REG_WR(bp, offset + j*4, 0);
5344
5345 /* reset tstorm per client statistics */
5346 offset = BAR_TSTRORM_INTMEM +
5347 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5348 for (j = 0;
5349 j < sizeof(struct tstorm_per_client_stats) / 4; j++)
5350 REG_WR(bp, offset + j*4, 0);
5351
5352 /* reset ustorm per client statistics */
5353 offset = BAR_USTRORM_INTMEM +
5354 USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5355 for (j = 0;
5356 j < sizeof(struct ustorm_per_client_stats) / 4; j++)
5357 REG_WR(bp, offset + j*4, 0);
66e855f3
YG
5358 }
5359
5360 /* Init statistics related context */
34f80b04 5361 stats_flags.collect_eth = 1;
a2fbb9ea 5362
66e855f3 5363 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 5364 ((u32 *)&stats_flags)[0]);
66e855f3 5365 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
5366 ((u32 *)&stats_flags)[1]);
5367
66e855f3 5368 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 5369 ((u32 *)&stats_flags)[0]);
66e855f3 5370 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
5371 ((u32 *)&stats_flags)[1]);
5372
de832a55
EG
5373 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func),
5374 ((u32 *)&stats_flags)[0]);
5375 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func) + 4,
5376 ((u32 *)&stats_flags)[1]);
5377
66e855f3 5378 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 5379 ((u32 *)&stats_flags)[0]);
66e855f3 5380 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
5381 ((u32 *)&stats_flags)[1]);
5382
66e855f3
YG
5383 REG_WR(bp, BAR_XSTRORM_INTMEM +
5384 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5385 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5386 REG_WR(bp, BAR_XSTRORM_INTMEM +
5387 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5388 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5389
5390 REG_WR(bp, BAR_TSTRORM_INTMEM +
5391 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5392 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5393 REG_WR(bp, BAR_TSTRORM_INTMEM +
5394 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5395 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
34f80b04 5396
de832a55
EG
5397 REG_WR(bp, BAR_USTRORM_INTMEM +
5398 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5399 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5400 REG_WR(bp, BAR_USTRORM_INTMEM +
5401 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5402 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5403
34f80b04
EG
5404 if (CHIP_IS_E1H(bp)) {
5405 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
5406 IS_E1HMF(bp));
5407 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
5408 IS_E1HMF(bp));
5409 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
5410 IS_E1HMF(bp));
5411 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
5412 IS_E1HMF(bp));
5413
7a9b2557
VZ
5414 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
5415 bp->e1hov);
34f80b04
EG
5416 }
5417
4f40f2cb
EG
5418 /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
5419 max_agg_size =
5420 min((u32)(min((u32)8, (u32)MAX_SKB_FRAGS) *
5421 SGE_PAGE_SIZE * PAGES_PER_SGE),
5422 (u32)0xffff);
555f6c78 5423 for_each_rx_queue(bp, i) {
7a9b2557 5424 struct bnx2x_fastpath *fp = &bp->fp[i];
7a9b2557
VZ
5425
5426 REG_WR(bp, BAR_USTRORM_INTMEM +
0626b899 5427 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id),
7a9b2557
VZ
5428 U64_LO(fp->rx_comp_mapping));
5429 REG_WR(bp, BAR_USTRORM_INTMEM +
0626b899 5430 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id) + 4,
7a9b2557
VZ
5431 U64_HI(fp->rx_comp_mapping));
5432
ca00392c
EG
5433 /* Next page */
5434 REG_WR(bp, BAR_USTRORM_INTMEM +
5435 USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id),
5436 U64_LO(fp->rx_comp_mapping + BCM_PAGE_SIZE));
5437 REG_WR(bp, BAR_USTRORM_INTMEM +
5438 USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id) + 4,
5439 U64_HI(fp->rx_comp_mapping + BCM_PAGE_SIZE));
5440
7a9b2557 5441 REG_WR16(bp, BAR_USTRORM_INTMEM +
0626b899 5442 USTORM_MAX_AGG_SIZE_OFFSET(port, fp->cl_id),
7a9b2557
VZ
5443 max_agg_size);
5444 }
8a1c38d1 5445
1c06328c
EG
5446 /* dropless flow control */
5447 if (CHIP_IS_E1H(bp)) {
5448 struct ustorm_eth_rx_pause_data_e1h rx_pause = {0};
5449
5450 rx_pause.bd_thr_low = 250;
5451 rx_pause.cqe_thr_low = 250;
5452 rx_pause.cos = 1;
5453 rx_pause.sge_thr_low = 0;
5454 rx_pause.bd_thr_high = 350;
5455 rx_pause.cqe_thr_high = 350;
5456 rx_pause.sge_thr_high = 0;
5457
5458 for_each_rx_queue(bp, i) {
5459 struct bnx2x_fastpath *fp = &bp->fp[i];
5460
5461 if (!fp->disable_tpa) {
5462 rx_pause.sge_thr_low = 150;
5463 rx_pause.sge_thr_high = 250;
5464 }
5465
5466
5467 offset = BAR_USTRORM_INTMEM +
5468 USTORM_ETH_RING_PAUSE_DATA_OFFSET(port,
5469 fp->cl_id);
5470 for (j = 0;
5471 j < sizeof(struct ustorm_eth_rx_pause_data_e1h)/4;
5472 j++)
5473 REG_WR(bp, offset + j*4,
5474 ((u32 *)&rx_pause)[j]);
5475 }
5476 }
5477
8a1c38d1
EG
5478 memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
5479
5480 /* Init rate shaping and fairness contexts */
5481 if (IS_E1HMF(bp)) {
5482 int vn;
5483
5484 /* During init there is no active link
5485 Until link is up, set link rate to 10Gbps */
5486 bp->link_vars.line_speed = SPEED_10000;
5487 bnx2x_init_port_minmax(bp);
5488
5489 bnx2x_calc_vn_weight_sum(bp);
5490
5491 for (vn = VN_0; vn < E1HVN_MAX; vn++)
5492 bnx2x_init_vn_minmax(bp, 2*vn + port);
5493
5494 /* Enable rate shaping and fairness */
5495 bp->cmng.flags.cmng_enables =
5496 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
5497 if (bp->vn_weight_sum)
5498 bp->cmng.flags.cmng_enables |=
5499 CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
5500 else
5501 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
5502 " fairness will be disabled\n");
5503 } else {
5504 /* rate shaping and fairness are disabled */
5505 DP(NETIF_MSG_IFUP,
5506 "single function mode minmax will be disabled\n");
5507 }
5508
5509
5510 /* Store it to internal memory */
5511 if (bp->port.pmf)
5512 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
5513 REG_WR(bp, BAR_XSTRORM_INTMEM +
5514 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
5515 ((u32 *)(&bp->cmng))[i]);
a2fbb9ea
ET
5516}
5517
471de716
EG
5518static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
5519{
5520 switch (load_code) {
5521 case FW_MSG_CODE_DRV_LOAD_COMMON:
5522 bnx2x_init_internal_common(bp);
5523 /* no break */
5524
5525 case FW_MSG_CODE_DRV_LOAD_PORT:
5526 bnx2x_init_internal_port(bp);
5527 /* no break */
5528
5529 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5530 bnx2x_init_internal_func(bp);
5531 break;
5532
5533 default:
5534 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5535 break;
5536 }
5537}
5538
5539static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
a2fbb9ea
ET
5540{
5541 int i;
5542
5543 for_each_queue(bp, i) {
5544 struct bnx2x_fastpath *fp = &bp->fp[i];
5545
34f80b04 5546 fp->bp = bp;
a2fbb9ea 5547 fp->state = BNX2X_FP_STATE_CLOSED;
a2fbb9ea 5548 fp->index = i;
34f80b04
EG
5549 fp->cl_id = BP_L_ID(bp) + i;
5550 fp->sb_id = fp->cl_id;
ca00392c
EG
5551 /* Suitable Rx and Tx SBs are served by the same client */
5552 if (i >= bp->num_rx_queues)
5553 fp->cl_id -= bp->num_rx_queues;
34f80b04 5554 DP(NETIF_MSG_IFUP,
f5372251
EG
5555 "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d sb %d\n",
5556 i, bp, fp->status_blk, fp->cl_id, fp->sb_id);
5c862848 5557 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
0626b899 5558 fp->sb_id);
5c862848 5559 bnx2x_update_fpsb_idx(fp);
a2fbb9ea
ET
5560 }
5561
16119785
EG
5562 /* ensure status block indices were read */
5563 rmb();
5564
5565
5c862848
EG
5566 bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
5567 DEF_SB_ID);
5568 bnx2x_update_dsb_idx(bp);
a2fbb9ea
ET
5569 bnx2x_update_coalesce(bp);
5570 bnx2x_init_rx_rings(bp);
5571 bnx2x_init_tx_ring(bp);
5572 bnx2x_init_sp_ring(bp);
5573 bnx2x_init_context(bp);
471de716 5574 bnx2x_init_internal(bp, load_code);
a2fbb9ea 5575 bnx2x_init_ind_table(bp);
0ef00459
EG
5576 bnx2x_stats_init(bp);
5577
5578 /* At this point, we are ready for interrupts */
5579 atomic_set(&bp->intr_sem, 0);
5580
5581 /* flush all before enabling interrupts */
5582 mb();
5583 mmiowb();
5584
615f8fd9 5585 bnx2x_int_enable(bp);
eb8da205
EG
5586
5587 /* Check for SPIO5 */
5588 bnx2x_attn_int_deasserted0(bp,
5589 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
5590 AEU_INPUTS_ATTN_BITS_SPIO5);
a2fbb9ea
ET
5591}
5592
5593/* end of nic init */
5594
5595/*
5596 * gzip service functions
5597 */
5598
5599static int bnx2x_gunzip_init(struct bnx2x *bp)
5600{
5601 bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
5602 &bp->gunzip_mapping);
5603 if (bp->gunzip_buf == NULL)
5604 goto gunzip_nomem1;
5605
5606 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
5607 if (bp->strm == NULL)
5608 goto gunzip_nomem2;
5609
5610 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
5611 GFP_KERNEL);
5612 if (bp->strm->workspace == NULL)
5613 goto gunzip_nomem3;
5614
5615 return 0;
5616
5617gunzip_nomem3:
5618 kfree(bp->strm);
5619 bp->strm = NULL;
5620
5621gunzip_nomem2:
5622 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5623 bp->gunzip_mapping);
5624 bp->gunzip_buf = NULL;
5625
5626gunzip_nomem1:
5627 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
34f80b04 5628 " un-compression\n", bp->dev->name);
a2fbb9ea
ET
5629 return -ENOMEM;
5630}
5631
5632static void bnx2x_gunzip_end(struct bnx2x *bp)
5633{
5634 kfree(bp->strm->workspace);
5635
5636 kfree(bp->strm);
5637 bp->strm = NULL;
5638
5639 if (bp->gunzip_buf) {
5640 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5641 bp->gunzip_mapping);
5642 bp->gunzip_buf = NULL;
5643 }
5644}
5645
94a78b79 5646static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
a2fbb9ea
ET
5647{
5648 int n, rc;
5649
5650 /* check gzip header */
94a78b79
VZ
5651 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
5652 BNX2X_ERR("Bad gzip header\n");
a2fbb9ea 5653 return -EINVAL;
94a78b79 5654 }
a2fbb9ea
ET
5655
5656 n = 10;
5657
34f80b04 5658#define FNAME 0x8
a2fbb9ea
ET
5659
5660 if (zbuf[3] & FNAME)
5661 while ((zbuf[n++] != 0) && (n < len));
5662
94a78b79 5663 bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
a2fbb9ea
ET
5664 bp->strm->avail_in = len - n;
5665 bp->strm->next_out = bp->gunzip_buf;
5666 bp->strm->avail_out = FW_BUF_SIZE;
5667
5668 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
5669 if (rc != Z_OK)
5670 return rc;
5671
5672 rc = zlib_inflate(bp->strm, Z_FINISH);
5673 if ((rc != Z_OK) && (rc != Z_STREAM_END))
5674 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
5675 bp->dev->name, bp->strm->msg);
5676
5677 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
5678 if (bp->gunzip_outlen & 0x3)
5679 printk(KERN_ERR PFX "%s: Firmware decompression error:"
5680 " gunzip_outlen (%d) not aligned\n",
5681 bp->dev->name, bp->gunzip_outlen);
5682 bp->gunzip_outlen >>= 2;
5683
5684 zlib_inflateEnd(bp->strm);
5685
5686 if (rc == Z_STREAM_END)
5687 return 0;
5688
5689 return rc;
5690}
5691
5692/* nic load/unload */
5693
5694/*
34f80b04 5695 * General service functions
a2fbb9ea
ET
5696 */
5697
5698/* send a NIG loopback debug packet */
5699static void bnx2x_lb_pckt(struct bnx2x *bp)
5700{
a2fbb9ea 5701 u32 wb_write[3];
a2fbb9ea
ET
5702
5703 /* Ethernet source and destination addresses */
a2fbb9ea
ET
5704 wb_write[0] = 0x55555555;
5705 wb_write[1] = 0x55555555;
34f80b04 5706 wb_write[2] = 0x20; /* SOP */
a2fbb9ea 5707 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
5708
5709 /* NON-IP protocol */
a2fbb9ea
ET
5710 wb_write[0] = 0x09000000;
5711 wb_write[1] = 0x55555555;
34f80b04 5712 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
a2fbb9ea 5713 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
5714}
5715
5716/* some of the internal memories
5717 * are not directly readable from the driver
5718 * to test them we send debug packets
5719 */
5720static int bnx2x_int_mem_test(struct bnx2x *bp)
5721{
5722 int factor;
5723 int count, i;
5724 u32 val = 0;
5725
ad8d3948 5726 if (CHIP_REV_IS_FPGA(bp))
a2fbb9ea 5727 factor = 120;
ad8d3948
EG
5728 else if (CHIP_REV_IS_EMUL(bp))
5729 factor = 200;
5730 else
a2fbb9ea 5731 factor = 1;
a2fbb9ea
ET
5732
5733 DP(NETIF_MSG_HW, "start part1\n");
5734
5735 /* Disable inputs of parser neighbor blocks */
5736 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5737 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5738 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3196a88a 5739 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
a2fbb9ea
ET
5740
5741 /* Write 0 to parser credits for CFC search request */
5742 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5743
5744 /* send Ethernet packet */
5745 bnx2x_lb_pckt(bp);
5746
5747 /* TODO do i reset NIG statistic? */
5748 /* Wait until NIG register shows 1 packet of size 0x10 */
5749 count = 1000 * factor;
5750 while (count) {
34f80b04 5751
a2fbb9ea
ET
5752 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5753 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
5754 if (val == 0x10)
5755 break;
5756
5757 msleep(10);
5758 count--;
5759 }
5760 if (val != 0x10) {
5761 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5762 return -1;
5763 }
5764
5765 /* Wait until PRS register shows 1 packet */
5766 count = 1000 * factor;
5767 while (count) {
5768 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
a2fbb9ea
ET
5769 if (val == 1)
5770 break;
5771
5772 msleep(10);
5773 count--;
5774 }
5775 if (val != 0x1) {
5776 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5777 return -2;
5778 }
5779
5780 /* Reset and init BRB, PRS */
34f80b04 5781 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
a2fbb9ea 5782 msleep(50);
34f80b04 5783 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
a2fbb9ea 5784 msleep(50);
94a78b79
VZ
5785 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5786 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
a2fbb9ea
ET
5787
5788 DP(NETIF_MSG_HW, "part2\n");
5789
5790 /* Disable inputs of parser neighbor blocks */
5791 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5792 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5793 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3196a88a 5794 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
a2fbb9ea
ET
5795
5796 /* Write 0 to parser credits for CFC search request */
5797 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5798
5799 /* send 10 Ethernet packets */
5800 for (i = 0; i < 10; i++)
5801 bnx2x_lb_pckt(bp);
5802
5803 /* Wait until NIG register shows 10 + 1
5804 packets of size 11*0x10 = 0xb0 */
5805 count = 1000 * factor;
5806 while (count) {
34f80b04 5807
a2fbb9ea
ET
5808 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5809 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
5810 if (val == 0xb0)
5811 break;
5812
5813 msleep(10);
5814 count--;
5815 }
5816 if (val != 0xb0) {
5817 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5818 return -3;
5819 }
5820
5821 /* Wait until PRS register shows 2 packets */
5822 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5823 if (val != 2)
5824 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5825
5826 /* Write 1 to parser credits for CFC search request */
5827 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5828
5829 /* Wait until PRS register shows 3 packets */
5830 msleep(10 * factor);
5831 /* Wait until NIG register shows 1 packet of size 0x10 */
5832 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5833 if (val != 3)
5834 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5835
5836 /* clear NIG EOP FIFO */
5837 for (i = 0; i < 11; i++)
5838 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5839 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5840 if (val != 1) {
5841 BNX2X_ERR("clear of NIG failed\n");
5842 return -4;
5843 }
5844
5845 /* Reset and init BRB, PRS, NIG */
5846 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5847 msleep(50);
5848 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5849 msleep(50);
94a78b79
VZ
5850 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5851 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
a2fbb9ea
ET
5852#ifndef BCM_ISCSI
5853 /* set NIC mode */
5854 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5855#endif
5856
5857 /* Enable inputs of parser neighbor blocks */
5858 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5859 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5860 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
3196a88a 5861 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
a2fbb9ea
ET
5862
5863 DP(NETIF_MSG_HW, "done\n");
5864
5865 return 0; /* OK */
5866}
5867
5868static void enable_blocks_attention(struct bnx2x *bp)
5869{
5870 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5871 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5872 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5873 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5874 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5875 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5876 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5877 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5878 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
34f80b04
EG
5879/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5880/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5881 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5882 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5883 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
34f80b04
EG
5884/* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5885/* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5886 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5887 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5888 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5889 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
34f80b04
EG
5890/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5891/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5892 if (CHIP_REV_IS_FPGA(bp))
5893 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
5894 else
5895 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
a2fbb9ea
ET
5896 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5897 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5898 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
34f80b04
EG
5899/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5900/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5901 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5902 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
34f80b04
EG
5903/* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5904 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
a2fbb9ea
ET
5905}
5906
34f80b04 5907
81f75bbf
EG
5908static void bnx2x_reset_common(struct bnx2x *bp)
5909{
5910 /* reset_common */
5911 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5912 0xd3ffff7f);
5913 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
5914}
5915
fd4ef40d
EG
5916
5917static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
5918{
5919 u32 val;
5920 u8 port;
5921 u8 is_required = 0;
5922
5923 val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
5924 SHARED_HW_CFG_FAN_FAILURE_MASK;
5925
5926 if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
5927 is_required = 1;
5928
5929 /*
5930 * The fan failure mechanism is usually related to the PHY type since
5931 * the power consumption of the board is affected by the PHY. Currently,
5932 * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
5933 */
5934 else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
5935 for (port = PORT_0; port < PORT_MAX; port++) {
5936 u32 phy_type =
5937 SHMEM_RD(bp, dev_info.port_hw_config[port].
5938 external_phy_config) &
5939 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
5940 is_required |=
5941 ((phy_type ==
5942 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) ||
4d295db0
EG
5943 (phy_type ==
5944 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) ||
fd4ef40d
EG
5945 (phy_type ==
5946 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481));
5947 }
5948
5949 DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
5950
5951 if (is_required == 0)
5952 return;
5953
5954 /* Fan failure is indicated by SPIO 5 */
5955 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
5956 MISC_REGISTERS_SPIO_INPUT_HI_Z);
5957
5958 /* set to active low mode */
5959 val = REG_RD(bp, MISC_REG_SPIO_INT);
5960 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
5961 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
5962 REG_WR(bp, MISC_REG_SPIO_INT, val);
5963
5964 /* enable interrupt to signal the IGU */
5965 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
5966 val |= (1 << MISC_REGISTERS_SPIO_5);
5967 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
5968}
5969
34f80b04 5970static int bnx2x_init_common(struct bnx2x *bp)
a2fbb9ea 5971{
a2fbb9ea 5972 u32 val, i;
a2fbb9ea 5973
34f80b04 5974 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp));
a2fbb9ea 5975
81f75bbf 5976 bnx2x_reset_common(bp);
34f80b04
EG
5977 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
5978 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
a2fbb9ea 5979
94a78b79 5980 bnx2x_init_block(bp, MISC_BLOCK, COMMON_STAGE);
34f80b04
EG
5981 if (CHIP_IS_E1H(bp))
5982 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
a2fbb9ea 5983
34f80b04
EG
5984 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
5985 msleep(30);
5986 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
a2fbb9ea 5987
94a78b79 5988 bnx2x_init_block(bp, PXP_BLOCK, COMMON_STAGE);
34f80b04
EG
5989 if (CHIP_IS_E1(bp)) {
5990 /* enable HW interrupt from PXP on USDM overflow
5991 bit 16 on INT_MASK_0 */
5992 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5993 }
a2fbb9ea 5994
94a78b79 5995 bnx2x_init_block(bp, PXP2_BLOCK, COMMON_STAGE);
34f80b04 5996 bnx2x_init_pxp(bp);
a2fbb9ea
ET
5997
5998#ifdef __BIG_ENDIAN
34f80b04
EG
5999 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
6000 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
6001 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
6002 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
6003 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
8badd27a
EG
6004 /* make sure this value is 0 */
6005 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
34f80b04
EG
6006
6007/* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
6008 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
6009 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
6010 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
6011 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
a2fbb9ea
ET
6012#endif
6013
34f80b04 6014 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
a2fbb9ea 6015#ifdef BCM_ISCSI
34f80b04
EG
6016 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
6017 REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
6018 REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
a2fbb9ea
ET
6019#endif
6020
34f80b04
EG
6021 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
6022 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
a2fbb9ea 6023
34f80b04
EG
6024 /* let the HW do it's magic ... */
6025 msleep(100);
6026 /* finish PXP init */
6027 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
6028 if (val != 1) {
6029 BNX2X_ERR("PXP2 CFG failed\n");
6030 return -EBUSY;
6031 }
6032 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
6033 if (val != 1) {
6034 BNX2X_ERR("PXP2 RD_INIT failed\n");
6035 return -EBUSY;
6036 }
a2fbb9ea 6037
34f80b04
EG
6038 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
6039 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
a2fbb9ea 6040
94a78b79 6041 bnx2x_init_block(bp, DMAE_BLOCK, COMMON_STAGE);
a2fbb9ea 6042
34f80b04
EG
6043 /* clean the DMAE memory */
6044 bp->dmae_ready = 1;
6045 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
a2fbb9ea 6046
94a78b79
VZ
6047 bnx2x_init_block(bp, TCM_BLOCK, COMMON_STAGE);
6048 bnx2x_init_block(bp, UCM_BLOCK, COMMON_STAGE);
6049 bnx2x_init_block(bp, CCM_BLOCK, COMMON_STAGE);
6050 bnx2x_init_block(bp, XCM_BLOCK, COMMON_STAGE);
a2fbb9ea 6051
34f80b04
EG
6052 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
6053 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
6054 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
6055 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
6056
94a78b79 6057 bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE);
34f80b04
EG
6058 /* soft reset pulse */
6059 REG_WR(bp, QM_REG_SOFT_RESET, 1);
6060 REG_WR(bp, QM_REG_SOFT_RESET, 0);
a2fbb9ea
ET
6061
6062#ifdef BCM_ISCSI
94a78b79 6063 bnx2x_init_block(bp, TIMERS_BLOCK, COMMON_STAGE);
a2fbb9ea 6064#endif
a2fbb9ea 6065
94a78b79 6066 bnx2x_init_block(bp, DQ_BLOCK, COMMON_STAGE);
34f80b04
EG
6067 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
6068 if (!CHIP_REV_IS_SLOW(bp)) {
6069 /* enable hw interrupt from doorbell Q */
6070 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
6071 }
a2fbb9ea 6072
94a78b79
VZ
6073 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
6074 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
26c8fa4d 6075 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
3196a88a
EG
6076 /* set NIC mode */
6077 REG_WR(bp, PRS_REG_NIC_MODE, 1);
34f80b04
EG
6078 if (CHIP_IS_E1H(bp))
6079 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
a2fbb9ea 6080
94a78b79
VZ
6081 bnx2x_init_block(bp, TSDM_BLOCK, COMMON_STAGE);
6082 bnx2x_init_block(bp, CSDM_BLOCK, COMMON_STAGE);
6083 bnx2x_init_block(bp, USDM_BLOCK, COMMON_STAGE);
6084 bnx2x_init_block(bp, XSDM_BLOCK, COMMON_STAGE);
a2fbb9ea 6085
ca00392c
EG
6086 bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6087 bnx2x_init_fill(bp, USEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6088 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6089 bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
a2fbb9ea 6090
94a78b79
VZ
6091 bnx2x_init_block(bp, TSEM_BLOCK, COMMON_STAGE);
6092 bnx2x_init_block(bp, USEM_BLOCK, COMMON_STAGE);
6093 bnx2x_init_block(bp, CSEM_BLOCK, COMMON_STAGE);
6094 bnx2x_init_block(bp, XSEM_BLOCK, COMMON_STAGE);
a2fbb9ea 6095
34f80b04
EG
6096 /* sync semi rtc */
6097 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6098 0x80000000);
6099 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
6100 0x80000000);
a2fbb9ea 6101
94a78b79
VZ
6102 bnx2x_init_block(bp, UPB_BLOCK, COMMON_STAGE);
6103 bnx2x_init_block(bp, XPB_BLOCK, COMMON_STAGE);
6104 bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE);
a2fbb9ea 6105
34f80b04
EG
6106 REG_WR(bp, SRC_REG_SOFT_RST, 1);
6107 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
6108 REG_WR(bp, i, 0xc0cac01a);
6109 /* TODO: replace with something meaningful */
6110 }
94a78b79 6111 bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE);
34f80b04 6112 REG_WR(bp, SRC_REG_SOFT_RST, 0);
a2fbb9ea 6113
34f80b04
EG
6114 if (sizeof(union cdu_context) != 1024)
6115 /* we currently assume that a context is 1024 bytes */
6116 printk(KERN_ALERT PFX "please adjust the size of"
6117 " cdu_context(%ld)\n", (long)sizeof(union cdu_context));
a2fbb9ea 6118
94a78b79 6119 bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE);
34f80b04
EG
6120 val = (4 << 24) + (0 << 12) + 1024;
6121 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
a2fbb9ea 6122
94a78b79 6123 bnx2x_init_block(bp, CFC_BLOCK, COMMON_STAGE);
34f80b04 6124 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
8d9c5f34
EG
6125 /* enable context validation interrupt from CFC */
6126 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
6127
6128 /* set the thresholds to prevent CFC/CDU race */
6129 REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
a2fbb9ea 6130
94a78b79
VZ
6131 bnx2x_init_block(bp, HC_BLOCK, COMMON_STAGE);
6132 bnx2x_init_block(bp, MISC_AEU_BLOCK, COMMON_STAGE);
a2fbb9ea 6133
94a78b79 6134 bnx2x_init_block(bp, PXPCS_BLOCK, COMMON_STAGE);
34f80b04
EG
6135 /* Reset PCIE errors for debug */
6136 REG_WR(bp, 0x2814, 0xffffffff);
6137 REG_WR(bp, 0x3820, 0xffffffff);
a2fbb9ea 6138
94a78b79 6139 bnx2x_init_block(bp, EMAC0_BLOCK, COMMON_STAGE);
94a78b79 6140 bnx2x_init_block(bp, EMAC1_BLOCK, COMMON_STAGE);
94a78b79 6141 bnx2x_init_block(bp, DBU_BLOCK, COMMON_STAGE);
94a78b79 6142 bnx2x_init_block(bp, DBG_BLOCK, COMMON_STAGE);
34f80b04 6143
94a78b79 6144 bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE);
34f80b04
EG
6145 if (CHIP_IS_E1H(bp)) {
6146 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
6147 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
6148 }
6149
6150 if (CHIP_REV_IS_SLOW(bp))
6151 msleep(200);
6152
6153 /* finish CFC init */
6154 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
6155 if (val != 1) {
6156 BNX2X_ERR("CFC LL_INIT failed\n");
6157 return -EBUSY;
6158 }
6159 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
6160 if (val != 1) {
6161 BNX2X_ERR("CFC AC_INIT failed\n");
6162 return -EBUSY;
6163 }
6164 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
6165 if (val != 1) {
6166 BNX2X_ERR("CFC CAM_INIT failed\n");
6167 return -EBUSY;
6168 }
6169 REG_WR(bp, CFC_REG_DEBUG0, 0);
f1410647 6170
34f80b04
EG
6171 /* read NIG statistic
6172 to see if this is our first up since powerup */
6173 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
6174 val = *bnx2x_sp(bp, wb_data[0]);
6175
6176 /* do internal memory self test */
6177 if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
6178 BNX2X_ERR("internal mem self test failed\n");
6179 return -EBUSY;
6180 }
6181
35b19ba5 6182 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
46c6a674
EG
6183 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
6184 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
6185 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
4d295db0 6186 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
46c6a674
EG
6187 bp->port.need_hw_lock = 1;
6188 break;
6189
34f80b04
EG
6190 default:
6191 break;
6192 }
f1410647 6193
fd4ef40d
EG
6194 bnx2x_setup_fan_failure_detection(bp);
6195
34f80b04
EG
6196 /* clear PXP2 attentions */
6197 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
a2fbb9ea 6198
34f80b04 6199 enable_blocks_attention(bp);
a2fbb9ea 6200
6bbca910
YR
6201 if (!BP_NOMCP(bp)) {
6202 bnx2x_acquire_phy_lock(bp);
6203 bnx2x_common_init_phy(bp, bp->common.shmem_base);
6204 bnx2x_release_phy_lock(bp);
6205 } else
6206 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
6207
34f80b04
EG
6208 return 0;
6209}
a2fbb9ea 6210
34f80b04
EG
6211static int bnx2x_init_port(struct bnx2x *bp)
6212{
6213 int port = BP_PORT(bp);
94a78b79 6214 int init_stage = port ? PORT1_STAGE : PORT0_STAGE;
1c06328c 6215 u32 low, high;
34f80b04 6216 u32 val;
a2fbb9ea 6217
34f80b04
EG
6218 DP(BNX2X_MSG_MCP, "starting port init port %x\n", port);
6219
6220 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
a2fbb9ea 6221
94a78b79 6222 bnx2x_init_block(bp, PXP_BLOCK, init_stage);
94a78b79 6223 bnx2x_init_block(bp, PXP2_BLOCK, init_stage);
ca00392c
EG
6224
6225 bnx2x_init_block(bp, TCM_BLOCK, init_stage);
6226 bnx2x_init_block(bp, UCM_BLOCK, init_stage);
6227 bnx2x_init_block(bp, CCM_BLOCK, init_stage);
a2fbb9ea
ET
6228#ifdef BCM_ISCSI
6229 /* Port0 1
6230 * Port1 385 */
6231 i++;
6232 wb_write[0] = ONCHIP_ADDR1(bp->timers_mapping);
6233 wb_write[1] = ONCHIP_ADDR2(bp->timers_mapping);
6234 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
6235 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
6236
6237 /* Port0 2
6238 * Port1 386 */
6239 i++;
6240 wb_write[0] = ONCHIP_ADDR1(bp->qm_mapping);
6241 wb_write[1] = ONCHIP_ADDR2(bp->qm_mapping);
6242 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
6243 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
6244
6245 /* Port0 3
6246 * Port1 387 */
6247 i++;
6248 wb_write[0] = ONCHIP_ADDR1(bp->t1_mapping);
6249 wb_write[1] = ONCHIP_ADDR2(bp->t1_mapping);
6250 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
6251 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
6252#endif
94a78b79 6253 bnx2x_init_block(bp, XCM_BLOCK, init_stage);
a2fbb9ea 6254
a2fbb9ea
ET
6255#ifdef BCM_ISCSI
6256 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + func*4, 1024/64*20);
6257 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + func*4, 31);
6258
94a78b79 6259 bnx2x_init_block(bp, TIMERS_BLOCK, init_stage);
a2fbb9ea 6260#endif
94a78b79 6261 bnx2x_init_block(bp, DQ_BLOCK, init_stage);
1c06328c 6262
94a78b79 6263 bnx2x_init_block(bp, BRB1_BLOCK, init_stage);
1c06328c
EG
6264 if (CHIP_REV_IS_SLOW(bp) && !CHIP_IS_E1H(bp)) {
6265 /* no pause for emulation and FPGA */
6266 low = 0;
6267 high = 513;
6268 } else {
6269 if (IS_E1HMF(bp))
6270 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
6271 else if (bp->dev->mtu > 4096) {
6272 if (bp->flags & ONE_PORT_FLAG)
6273 low = 160;
6274 else {
6275 val = bp->dev->mtu;
6276 /* (24*1024 + val*4)/256 */
6277 low = 96 + (val/64) + ((val % 64) ? 1 : 0);
6278 }
6279 } else
6280 low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
6281 high = low + 56; /* 14*1024/256 */
6282 }
6283 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
6284 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
6285
6286
94a78b79 6287 bnx2x_init_block(bp, PRS_BLOCK, init_stage);
ca00392c 6288
94a78b79 6289 bnx2x_init_block(bp, TSDM_BLOCK, init_stage);
94a78b79 6290 bnx2x_init_block(bp, CSDM_BLOCK, init_stage);
94a78b79 6291 bnx2x_init_block(bp, USDM_BLOCK, init_stage);
94a78b79 6292 bnx2x_init_block(bp, XSDM_BLOCK, init_stage);
356e2385 6293
94a78b79
VZ
6294 bnx2x_init_block(bp, TSEM_BLOCK, init_stage);
6295 bnx2x_init_block(bp, USEM_BLOCK, init_stage);
6296 bnx2x_init_block(bp, CSEM_BLOCK, init_stage);
6297 bnx2x_init_block(bp, XSEM_BLOCK, init_stage);
356e2385 6298
94a78b79 6299 bnx2x_init_block(bp, UPB_BLOCK, init_stage);
94a78b79 6300 bnx2x_init_block(bp, XPB_BLOCK, init_stage);
34f80b04 6301
94a78b79 6302 bnx2x_init_block(bp, PBF_BLOCK, init_stage);
a2fbb9ea
ET
6303
6304 /* configure PBF to work without PAUSE mtu 9000 */
34f80b04 6305 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
a2fbb9ea
ET
6306
6307 /* update threshold */
34f80b04 6308 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
a2fbb9ea 6309 /* update init credit */
34f80b04 6310 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
a2fbb9ea
ET
6311
6312 /* probe changes */
34f80b04 6313 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
a2fbb9ea 6314 msleep(5);
34f80b04 6315 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
a2fbb9ea
ET
6316
6317#ifdef BCM_ISCSI
6318 /* tell the searcher where the T2 table is */
6319 REG_WR(bp, SRC_REG_COUNTFREE0 + func*4, 16*1024/64);
6320
6321 wb_write[0] = U64_LO(bp->t2_mapping);
6322 wb_write[1] = U64_HI(bp->t2_mapping);
6323 REG_WR_DMAE(bp, SRC_REG_FIRSTFREE0 + func*4, wb_write, 2);
6324 wb_write[0] = U64_LO((u64)bp->t2_mapping + 16*1024 - 64);
6325 wb_write[1] = U64_HI((u64)bp->t2_mapping + 16*1024 - 64);
6326 REG_WR_DMAE(bp, SRC_REG_LASTFREE0 + func*4, wb_write, 2);
6327
6328 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + func*4, 10);
a2fbb9ea 6329#endif
94a78b79 6330 bnx2x_init_block(bp, CDU_BLOCK, init_stage);
94a78b79 6331 bnx2x_init_block(bp, CFC_BLOCK, init_stage);
34f80b04
EG
6332
6333 if (CHIP_IS_E1(bp)) {
6334 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6335 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6336 }
94a78b79 6337 bnx2x_init_block(bp, HC_BLOCK, init_stage);
34f80b04 6338
94a78b79 6339 bnx2x_init_block(bp, MISC_AEU_BLOCK, init_stage);
34f80b04
EG
6340 /* init aeu_mask_attn_func_0/1:
6341 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
6342 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
6343 * bits 4-7 are used for "per vn group attention" */
6344 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
6345 (IS_E1HMF(bp) ? 0xF7 : 0x7));
6346
94a78b79 6347 bnx2x_init_block(bp, PXPCS_BLOCK, init_stage);
94a78b79 6348 bnx2x_init_block(bp, EMAC0_BLOCK, init_stage);
94a78b79 6349 bnx2x_init_block(bp, EMAC1_BLOCK, init_stage);
94a78b79 6350 bnx2x_init_block(bp, DBU_BLOCK, init_stage);
94a78b79 6351 bnx2x_init_block(bp, DBG_BLOCK, init_stage);
356e2385 6352
94a78b79 6353 bnx2x_init_block(bp, NIG_BLOCK, init_stage);
34f80b04
EG
6354
6355 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
6356
6357 if (CHIP_IS_E1H(bp)) {
34f80b04
EG
6358 /* 0x2 disable e1hov, 0x1 enable */
6359 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
6360 (IS_E1HMF(bp) ? 0x1 : 0x2));
6361
1c06328c
EG
6362 /* support pause requests from USDM, TSDM and BRB */
6363 REG_WR(bp, NIG_REG_LLFC_EGRESS_SRC_ENABLE_0 + port*4, 0x7);
6364
6365 {
6366 REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
6367 REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
6368 REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
6369 }
34f80b04
EG
6370 }
6371
94a78b79 6372 bnx2x_init_block(bp, MCP_BLOCK, init_stage);
94a78b79 6373 bnx2x_init_block(bp, DMAE_BLOCK, init_stage);
a2fbb9ea 6374
35b19ba5 6375 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
589abe3a
EG
6376 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
6377 {
6378 u32 swap_val, swap_override, aeu_gpio_mask, offset;
6379
6380 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
6381 MISC_REGISTERS_GPIO_INPUT_HI_Z, port);
6382
6383 /* The GPIO should be swapped if the swap register is
6384 set and active */
6385 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
6386 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
6387
6388 /* Select function upon port-swap configuration */
6389 if (port == 0) {
6390 offset = MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
6391 aeu_gpio_mask = (swap_val && swap_override) ?
6392 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1 :
6393 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0;
6394 } else {
6395 offset = MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0;
6396 aeu_gpio_mask = (swap_val && swap_override) ?
6397 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 :
6398 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1;
6399 }
6400 val = REG_RD(bp, offset);
6401 /* add GPIO3 to group */
6402 val |= aeu_gpio_mask;
6403 REG_WR(bp, offset, val);
6404 }
6405 break;
6406
35b19ba5 6407 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
4d295db0 6408 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
f1410647 6409 /* add SPIO 5 to group 0 */
4d295db0
EG
6410 {
6411 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
6412 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
6413 val = REG_RD(bp, reg_addr);
f1410647 6414 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
4d295db0
EG
6415 REG_WR(bp, reg_addr, val);
6416 }
f1410647
ET
6417 break;
6418
6419 default:
6420 break;
6421 }
6422
c18487ee 6423 bnx2x__link_reset(bp);
a2fbb9ea 6424
34f80b04
EG
6425 return 0;
6426}
6427
6428#define ILT_PER_FUNC (768/2)
6429#define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
6430/* the phys address is shifted right 12 bits and has an added
6431 1=valid bit added to the 53rd bit
6432 then since this is a wide register(TM)
6433 we split it into two 32 bit writes
6434 */
6435#define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
6436#define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
6437#define PXP_ONE_ILT(x) (((x) << 10) | x)
6438#define PXP_ILT_RANGE(f, l) (((l) << 10) | f)
6439
6440#define CNIC_ILT_LINES 0
6441
6442static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
6443{
6444 int reg;
6445
6446 if (CHIP_IS_E1H(bp))
6447 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
6448 else /* E1 */
6449 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
6450
6451 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
6452}
6453
6454static int bnx2x_init_func(struct bnx2x *bp)
6455{
6456 int port = BP_PORT(bp);
6457 int func = BP_FUNC(bp);
8badd27a 6458 u32 addr, val;
34f80b04
EG
6459 int i;
6460
6461 DP(BNX2X_MSG_MCP, "starting func init func %x\n", func);
6462
8badd27a
EG
6463 /* set MSI reconfigure capability */
6464 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
6465 val = REG_RD(bp, addr);
6466 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
6467 REG_WR(bp, addr, val);
6468
34f80b04
EG
6469 i = FUNC_ILT_BASE(func);
6470
6471 bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
6472 if (CHIP_IS_E1H(bp)) {
6473 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
6474 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
6475 } else /* E1 */
6476 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
6477 PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
6478
6479
6480 if (CHIP_IS_E1H(bp)) {
6481 for (i = 0; i < 9; i++)
6482 bnx2x_init_block(bp,
94a78b79 6483 cm_blocks[i], FUNC0_STAGE + func);
34f80b04
EG
6484
6485 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
6486 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
6487 }
6488
6489 /* HC init per function */
6490 if (CHIP_IS_E1H(bp)) {
6491 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
6492
6493 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6494 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6495 }
94a78b79 6496 bnx2x_init_block(bp, HC_BLOCK, FUNC0_STAGE + func);
34f80b04 6497
c14423fe 6498 /* Reset PCIE errors for debug */
a2fbb9ea
ET
6499 REG_WR(bp, 0x2114, 0xffffffff);
6500 REG_WR(bp, 0x2120, 0xffffffff);
a2fbb9ea 6501
34f80b04
EG
6502 return 0;
6503}
6504
6505static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
6506{
6507 int i, rc = 0;
a2fbb9ea 6508
34f80b04
EG
6509 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
6510 BP_FUNC(bp), load_code);
a2fbb9ea 6511
34f80b04
EG
6512 bp->dmae_ready = 0;
6513 mutex_init(&bp->dmae_mutex);
6514 bnx2x_gunzip_init(bp);
a2fbb9ea 6515
34f80b04
EG
6516 switch (load_code) {
6517 case FW_MSG_CODE_DRV_LOAD_COMMON:
6518 rc = bnx2x_init_common(bp);
6519 if (rc)
6520 goto init_hw_err;
6521 /* no break */
6522
6523 case FW_MSG_CODE_DRV_LOAD_PORT:
6524 bp->dmae_ready = 1;
6525 rc = bnx2x_init_port(bp);
6526 if (rc)
6527 goto init_hw_err;
6528 /* no break */
6529
6530 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
6531 bp->dmae_ready = 1;
6532 rc = bnx2x_init_func(bp);
6533 if (rc)
6534 goto init_hw_err;
6535 break;
6536
6537 default:
6538 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
6539 break;
6540 }
6541
6542 if (!BP_NOMCP(bp)) {
6543 int func = BP_FUNC(bp);
a2fbb9ea
ET
6544
6545 bp->fw_drv_pulse_wr_seq =
34f80b04 6546 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
a2fbb9ea 6547 DRV_PULSE_SEQ_MASK);
6fe49bb9
EG
6548 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
6549 }
a2fbb9ea 6550
34f80b04
EG
6551 /* this needs to be done before gunzip end */
6552 bnx2x_zero_def_sb(bp);
6553 for_each_queue(bp, i)
6554 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
6555
6556init_hw_err:
6557 bnx2x_gunzip_end(bp);
6558
6559 return rc;
a2fbb9ea
ET
6560}
6561
a2fbb9ea
ET
6562static void bnx2x_free_mem(struct bnx2x *bp)
6563{
6564
6565#define BNX2X_PCI_FREE(x, y, size) \
6566 do { \
6567 if (x) { \
6568 pci_free_consistent(bp->pdev, size, x, y); \
6569 x = NULL; \
6570 y = 0; \
6571 } \
6572 } while (0)
6573
6574#define BNX2X_FREE(x) \
6575 do { \
6576 if (x) { \
6577 vfree(x); \
6578 x = NULL; \
6579 } \
6580 } while (0)
6581
6582 int i;
6583
6584 /* fastpath */
555f6c78 6585 /* Common */
a2fbb9ea
ET
6586 for_each_queue(bp, i) {
6587
555f6c78 6588 /* status blocks */
a2fbb9ea
ET
6589 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
6590 bnx2x_fp(bp, i, status_blk_mapping),
ca00392c 6591 sizeof(struct host_status_block));
555f6c78
EG
6592 }
6593 /* Rx */
6594 for_each_rx_queue(bp, i) {
a2fbb9ea 6595
555f6c78 6596 /* fastpath rx rings: rx_buf rx_desc rx_comp */
a2fbb9ea
ET
6597 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
6598 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
6599 bnx2x_fp(bp, i, rx_desc_mapping),
6600 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6601
6602 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
6603 bnx2x_fp(bp, i, rx_comp_mapping),
6604 sizeof(struct eth_fast_path_rx_cqe) *
6605 NUM_RCQ_BD);
a2fbb9ea 6606
7a9b2557 6607 /* SGE ring */
32626230 6608 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
7a9b2557
VZ
6609 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
6610 bnx2x_fp(bp, i, rx_sge_mapping),
6611 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
6612 }
555f6c78
EG
6613 /* Tx */
6614 for_each_tx_queue(bp, i) {
6615
6616 /* fastpath tx rings: tx_buf tx_desc */
6617 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
6618 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
6619 bnx2x_fp(bp, i, tx_desc_mapping),
ca00392c 6620 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
555f6c78 6621 }
a2fbb9ea
ET
6622 /* end of fastpath */
6623
6624 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
34f80b04 6625 sizeof(struct host_def_status_block));
a2fbb9ea
ET
6626
6627 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
34f80b04 6628 sizeof(struct bnx2x_slowpath));
a2fbb9ea
ET
6629
6630#ifdef BCM_ISCSI
6631 BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
6632 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
6633 BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
6634 BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
6635#endif
7a9b2557 6636 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
a2fbb9ea
ET
6637
6638#undef BNX2X_PCI_FREE
6639#undef BNX2X_KFREE
6640}
6641
6642static int bnx2x_alloc_mem(struct bnx2x *bp)
6643{
6644
6645#define BNX2X_PCI_ALLOC(x, y, size) \
6646 do { \
6647 x = pci_alloc_consistent(bp->pdev, size, y); \
6648 if (x == NULL) \
6649 goto alloc_mem_err; \
6650 memset(x, 0, size); \
6651 } while (0)
6652
6653#define BNX2X_ALLOC(x, size) \
6654 do { \
6655 x = vmalloc(size); \
6656 if (x == NULL) \
6657 goto alloc_mem_err; \
6658 memset(x, 0, size); \
6659 } while (0)
6660
6661 int i;
6662
6663 /* fastpath */
555f6c78 6664 /* Common */
a2fbb9ea
ET
6665 for_each_queue(bp, i) {
6666 bnx2x_fp(bp, i, bp) = bp;
6667
555f6c78 6668 /* status blocks */
a2fbb9ea
ET
6669 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
6670 &bnx2x_fp(bp, i, status_blk_mapping),
ca00392c 6671 sizeof(struct host_status_block));
555f6c78
EG
6672 }
6673 /* Rx */
6674 for_each_rx_queue(bp, i) {
a2fbb9ea 6675
555f6c78 6676 /* fastpath rx rings: rx_buf rx_desc rx_comp */
a2fbb9ea
ET
6677 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
6678 sizeof(struct sw_rx_bd) * NUM_RX_BD);
6679 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
6680 &bnx2x_fp(bp, i, rx_desc_mapping),
6681 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6682
6683 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
6684 &bnx2x_fp(bp, i, rx_comp_mapping),
6685 sizeof(struct eth_fast_path_rx_cqe) *
6686 NUM_RCQ_BD);
6687
7a9b2557
VZ
6688 /* SGE ring */
6689 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
6690 sizeof(struct sw_rx_page) * NUM_RX_SGE);
6691 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
6692 &bnx2x_fp(bp, i, rx_sge_mapping),
6693 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
a2fbb9ea 6694 }
555f6c78
EG
6695 /* Tx */
6696 for_each_tx_queue(bp, i) {
6697
555f6c78
EG
6698 /* fastpath tx rings: tx_buf tx_desc */
6699 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
6700 sizeof(struct sw_tx_bd) * NUM_TX_BD);
6701 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
6702 &bnx2x_fp(bp, i, tx_desc_mapping),
ca00392c 6703 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
555f6c78 6704 }
a2fbb9ea
ET
6705 /* end of fastpath */
6706
6707 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
6708 sizeof(struct host_def_status_block));
6709
6710 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
6711 sizeof(struct bnx2x_slowpath));
6712
6713#ifdef BCM_ISCSI
6714 BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
6715
6716 /* Initialize T1 */
6717 for (i = 0; i < 64*1024; i += 64) {
6718 *(u64 *)((char *)bp->t1 + i + 56) = 0x0UL;
6719 *(u64 *)((char *)bp->t1 + i + 3) = 0x0UL;
6720 }
6721
6722 /* allocate searcher T2 table
6723 we allocate 1/4 of alloc num for T2
6724 (which is not entered into the ILT) */
6725 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
6726
6727 /* Initialize T2 */
6728 for (i = 0; i < 16*1024; i += 64)
6729 * (u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
6730
c14423fe 6731 /* now fixup the last line in the block to point to the next block */
a2fbb9ea
ET
6732 *(u64 *)((char *)bp->t2 + 1024*16-8) = bp->t2_mapping;
6733
6734 /* Timer block array (MAX_CONN*8) phys uncached for now 1024 conns */
6735 BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
6736
6737 /* QM queues (128*MAX_CONN) */
6738 BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
6739#endif
6740
6741 /* Slow path ring */
6742 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
6743
6744 return 0;
6745
6746alloc_mem_err:
6747 bnx2x_free_mem(bp);
6748 return -ENOMEM;
6749
6750#undef BNX2X_PCI_ALLOC
6751#undef BNX2X_ALLOC
6752}
6753
6754static void bnx2x_free_tx_skbs(struct bnx2x *bp)
6755{
6756 int i;
6757
555f6c78 6758 for_each_tx_queue(bp, i) {
a2fbb9ea
ET
6759 struct bnx2x_fastpath *fp = &bp->fp[i];
6760
6761 u16 bd_cons = fp->tx_bd_cons;
6762 u16 sw_prod = fp->tx_pkt_prod;
6763 u16 sw_cons = fp->tx_pkt_cons;
6764
a2fbb9ea
ET
6765 while (sw_cons != sw_prod) {
6766 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
6767 sw_cons++;
6768 }
6769 }
6770}
6771
6772static void bnx2x_free_rx_skbs(struct bnx2x *bp)
6773{
6774 int i, j;
6775
555f6c78 6776 for_each_rx_queue(bp, j) {
a2fbb9ea
ET
6777 struct bnx2x_fastpath *fp = &bp->fp[j];
6778
a2fbb9ea
ET
6779 for (i = 0; i < NUM_RX_BD; i++) {
6780 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
6781 struct sk_buff *skb = rx_buf->skb;
6782
6783 if (skb == NULL)
6784 continue;
6785
6786 pci_unmap_single(bp->pdev,
6787 pci_unmap_addr(rx_buf, mapping),
356e2385 6788 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
a2fbb9ea
ET
6789
6790 rx_buf->skb = NULL;
6791 dev_kfree_skb(skb);
6792 }
7a9b2557 6793 if (!fp->disable_tpa)
32626230
EG
6794 bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
6795 ETH_MAX_AGGREGATION_QUEUES_E1 :
7a9b2557 6796 ETH_MAX_AGGREGATION_QUEUES_E1H);
a2fbb9ea
ET
6797 }
6798}
6799
6800static void bnx2x_free_skbs(struct bnx2x *bp)
6801{
6802 bnx2x_free_tx_skbs(bp);
6803 bnx2x_free_rx_skbs(bp);
6804}
6805
6806static void bnx2x_free_msix_irqs(struct bnx2x *bp)
6807{
34f80b04 6808 int i, offset = 1;
a2fbb9ea
ET
6809
6810 free_irq(bp->msix_table[0].vector, bp->dev);
c14423fe 6811 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
a2fbb9ea
ET
6812 bp->msix_table[0].vector);
6813
6814 for_each_queue(bp, i) {
c14423fe 6815 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
34f80b04 6816 "state %x\n", i, bp->msix_table[i + offset].vector,
a2fbb9ea
ET
6817 bnx2x_fp(bp, i, state));
6818
34f80b04 6819 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
a2fbb9ea 6820 }
a2fbb9ea
ET
6821}
6822
6823static void bnx2x_free_irq(struct bnx2x *bp)
6824{
a2fbb9ea 6825 if (bp->flags & USING_MSIX_FLAG) {
a2fbb9ea
ET
6826 bnx2x_free_msix_irqs(bp);
6827 pci_disable_msix(bp->pdev);
a2fbb9ea
ET
6828 bp->flags &= ~USING_MSIX_FLAG;
6829
8badd27a
EG
6830 } else if (bp->flags & USING_MSI_FLAG) {
6831 free_irq(bp->pdev->irq, bp->dev);
6832 pci_disable_msi(bp->pdev);
6833 bp->flags &= ~USING_MSI_FLAG;
6834
a2fbb9ea
ET
6835 } else
6836 free_irq(bp->pdev->irq, bp->dev);
6837}
6838
6839static int bnx2x_enable_msix(struct bnx2x *bp)
6840{
8badd27a
EG
6841 int i, rc, offset = 1;
6842 int igu_vec = 0;
a2fbb9ea 6843
8badd27a
EG
6844 bp->msix_table[0].entry = igu_vec;
6845 DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n", igu_vec);
a2fbb9ea 6846
34f80b04 6847 for_each_queue(bp, i) {
8badd27a 6848 igu_vec = BP_L_ID(bp) + offset + i;
34f80b04
EG
6849 bp->msix_table[i + offset].entry = igu_vec;
6850 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
6851 "(fastpath #%u)\n", i + offset, igu_vec, i);
a2fbb9ea
ET
6852 }
6853
34f80b04 6854 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
555f6c78 6855 BNX2X_NUM_QUEUES(bp) + offset);
34f80b04 6856 if (rc) {
8badd27a
EG
6857 DP(NETIF_MSG_IFUP, "MSI-X is not attainable rc %d\n", rc);
6858 return rc;
34f80b04 6859 }
8badd27a 6860
a2fbb9ea
ET
6861 bp->flags |= USING_MSIX_FLAG;
6862
6863 return 0;
a2fbb9ea
ET
6864}
6865
a2fbb9ea
ET
6866static int bnx2x_req_msix_irqs(struct bnx2x *bp)
6867{
34f80b04 6868 int i, rc, offset = 1;
a2fbb9ea 6869
a2fbb9ea
ET
6870 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
6871 bp->dev->name, bp->dev);
a2fbb9ea
ET
6872 if (rc) {
6873 BNX2X_ERR("request sp irq failed\n");
6874 return -EBUSY;
6875 }
6876
6877 for_each_queue(bp, i) {
555f6c78
EG
6878 struct bnx2x_fastpath *fp = &bp->fp[i];
6879
ca00392c
EG
6880 if (i < bp->num_rx_queues)
6881 sprintf(fp->name, "%s-rx-%d", bp->dev->name, i);
6882 else
6883 sprintf(fp->name, "%s-tx-%d",
6884 bp->dev->name, i - bp->num_rx_queues);
6885
34f80b04 6886 rc = request_irq(bp->msix_table[i + offset].vector,
555f6c78 6887 bnx2x_msix_fp_int, 0, fp->name, fp);
a2fbb9ea 6888 if (rc) {
555f6c78 6889 BNX2X_ERR("request fp #%d irq failed rc %d\n", i, rc);
a2fbb9ea
ET
6890 bnx2x_free_msix_irqs(bp);
6891 return -EBUSY;
6892 }
6893
555f6c78 6894 fp->state = BNX2X_FP_STATE_IRQ;
a2fbb9ea
ET
6895 }
6896
555f6c78 6897 i = BNX2X_NUM_QUEUES(bp);
ca00392c
EG
6898 printk(KERN_INFO PFX "%s: using MSI-X IRQs: sp %d fp[%d] %d"
6899 " ... fp[%d] %d\n",
6900 bp->dev->name, bp->msix_table[0].vector,
6901 0, bp->msix_table[offset].vector,
6902 i - 1, bp->msix_table[offset + i - 1].vector);
555f6c78 6903
a2fbb9ea 6904 return 0;
a2fbb9ea
ET
6905}
6906
8badd27a
EG
6907static int bnx2x_enable_msi(struct bnx2x *bp)
6908{
6909 int rc;
6910
6911 rc = pci_enable_msi(bp->pdev);
6912 if (rc) {
6913 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
6914 return -1;
6915 }
6916 bp->flags |= USING_MSI_FLAG;
6917
6918 return 0;
6919}
6920
a2fbb9ea
ET
6921static int bnx2x_req_irq(struct bnx2x *bp)
6922{
8badd27a 6923 unsigned long flags;
34f80b04 6924 int rc;
a2fbb9ea 6925
8badd27a
EG
6926 if (bp->flags & USING_MSI_FLAG)
6927 flags = 0;
6928 else
6929 flags = IRQF_SHARED;
6930
6931 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
34f80b04 6932 bp->dev->name, bp->dev);
a2fbb9ea
ET
6933 if (!rc)
6934 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
6935
6936 return rc;
a2fbb9ea
ET
6937}
6938
65abd74d
YG
6939static void bnx2x_napi_enable(struct bnx2x *bp)
6940{
6941 int i;
6942
555f6c78 6943 for_each_rx_queue(bp, i)
65abd74d
YG
6944 napi_enable(&bnx2x_fp(bp, i, napi));
6945}
6946
6947static void bnx2x_napi_disable(struct bnx2x *bp)
6948{
6949 int i;
6950
555f6c78 6951 for_each_rx_queue(bp, i)
65abd74d
YG
6952 napi_disable(&bnx2x_fp(bp, i, napi));
6953}
6954
6955static void bnx2x_netif_start(struct bnx2x *bp)
6956{
e1510706
EG
6957 int intr_sem;
6958
6959 intr_sem = atomic_dec_and_test(&bp->intr_sem);
6960 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
6961
6962 if (intr_sem) {
65abd74d 6963 if (netif_running(bp->dev)) {
65abd74d
YG
6964 bnx2x_napi_enable(bp);
6965 bnx2x_int_enable(bp);
555f6c78
EG
6966 if (bp->state == BNX2X_STATE_OPEN)
6967 netif_tx_wake_all_queues(bp->dev);
65abd74d
YG
6968 }
6969 }
6970}
6971
f8ef6e44 6972static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
65abd74d 6973{
f8ef6e44 6974 bnx2x_int_disable_sync(bp, disable_hw);
e94d8af3 6975 bnx2x_napi_disable(bp);
762d5f6c
EG
6976 netif_tx_disable(bp->dev);
6977 bp->dev->trans_start = jiffies; /* prevent tx timeout */
65abd74d
YG
6978}
6979
a2fbb9ea
ET
6980/*
6981 * Init service functions
6982 */
6983
3101c2bc 6984static void bnx2x_set_mac_addr_e1(struct bnx2x *bp, int set)
a2fbb9ea
ET
6985{
6986 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
34f80b04 6987 int port = BP_PORT(bp);
a2fbb9ea
ET
6988
6989 /* CAM allocation
6990 * unicasts 0-31:port0 32-63:port1
6991 * multicast 64-127:port0 128-191:port1
6992 */
8d9c5f34 6993 config->hdr.length = 2;
af246401 6994 config->hdr.offset = port ? 32 : 0;
0626b899 6995 config->hdr.client_id = bp->fp->cl_id;
a2fbb9ea
ET
6996 config->hdr.reserved1 = 0;
6997
6998 /* primary MAC */
6999 config->config_table[0].cam_entry.msb_mac_addr =
7000 swab16(*(u16 *)&bp->dev->dev_addr[0]);
7001 config->config_table[0].cam_entry.middle_mac_addr =
7002 swab16(*(u16 *)&bp->dev->dev_addr[2]);
7003 config->config_table[0].cam_entry.lsb_mac_addr =
7004 swab16(*(u16 *)&bp->dev->dev_addr[4]);
34f80b04 7005 config->config_table[0].cam_entry.flags = cpu_to_le16(port);
3101c2bc
YG
7006 if (set)
7007 config->config_table[0].target_table_entry.flags = 0;
7008 else
7009 CAM_INVALIDATE(config->config_table[0]);
ca00392c
EG
7010 config->config_table[0].target_table_entry.clients_bit_vector =
7011 cpu_to_le32(1 << BP_L_ID(bp));
a2fbb9ea
ET
7012 config->config_table[0].target_table_entry.vlan_id = 0;
7013
3101c2bc
YG
7014 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
7015 (set ? "setting" : "clearing"),
a2fbb9ea
ET
7016 config->config_table[0].cam_entry.msb_mac_addr,
7017 config->config_table[0].cam_entry.middle_mac_addr,
7018 config->config_table[0].cam_entry.lsb_mac_addr);
7019
7020 /* broadcast */
4781bfad
EG
7021 config->config_table[1].cam_entry.msb_mac_addr = cpu_to_le16(0xffff);
7022 config->config_table[1].cam_entry.middle_mac_addr = cpu_to_le16(0xffff);
7023 config->config_table[1].cam_entry.lsb_mac_addr = cpu_to_le16(0xffff);
34f80b04 7024 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
3101c2bc
YG
7025 if (set)
7026 config->config_table[1].target_table_entry.flags =
a2fbb9ea 7027 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
3101c2bc
YG
7028 else
7029 CAM_INVALIDATE(config->config_table[1]);
ca00392c
EG
7030 config->config_table[1].target_table_entry.clients_bit_vector =
7031 cpu_to_le32(1 << BP_L_ID(bp));
a2fbb9ea
ET
7032 config->config_table[1].target_table_entry.vlan_id = 0;
7033
7034 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7035 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
7036 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
7037}
7038
3101c2bc 7039static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp, int set)
34f80b04
EG
7040{
7041 struct mac_configuration_cmd_e1h *config =
7042 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
7043
34f80b04
EG
7044 /* CAM allocation for E1H
7045 * unicasts: by func number
7046 * multicast: 20+FUNC*20, 20 each
7047 */
8d9c5f34 7048 config->hdr.length = 1;
34f80b04 7049 config->hdr.offset = BP_FUNC(bp);
0626b899 7050 config->hdr.client_id = bp->fp->cl_id;
34f80b04
EG
7051 config->hdr.reserved1 = 0;
7052
7053 /* primary MAC */
7054 config->config_table[0].msb_mac_addr =
7055 swab16(*(u16 *)&bp->dev->dev_addr[0]);
7056 config->config_table[0].middle_mac_addr =
7057 swab16(*(u16 *)&bp->dev->dev_addr[2]);
7058 config->config_table[0].lsb_mac_addr =
7059 swab16(*(u16 *)&bp->dev->dev_addr[4]);
ca00392c
EG
7060 config->config_table[0].clients_bit_vector =
7061 cpu_to_le32(1 << BP_L_ID(bp));
34f80b04
EG
7062 config->config_table[0].vlan_id = 0;
7063 config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
3101c2bc
YG
7064 if (set)
7065 config->config_table[0].flags = BP_PORT(bp);
7066 else
7067 config->config_table[0].flags =
7068 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
34f80b04 7069
3101c2bc
YG
7070 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) E1HOV %d CLID %d\n",
7071 (set ? "setting" : "clearing"),
34f80b04
EG
7072 config->config_table[0].msb_mac_addr,
7073 config->config_table[0].middle_mac_addr,
7074 config->config_table[0].lsb_mac_addr, bp->e1hov, BP_L_ID(bp));
7075
7076 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7077 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
7078 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
7079}
7080
a2fbb9ea
ET
7081static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
7082 int *state_p, int poll)
7083{
7084 /* can take a while if any port is running */
8b3a0f0b 7085 int cnt = 5000;
a2fbb9ea 7086
c14423fe
ET
7087 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
7088 poll ? "polling" : "waiting", state, idx);
a2fbb9ea
ET
7089
7090 might_sleep();
34f80b04 7091 while (cnt--) {
a2fbb9ea
ET
7092 if (poll) {
7093 bnx2x_rx_int(bp->fp, 10);
34f80b04
EG
7094 /* if index is different from 0
7095 * the reply for some commands will
3101c2bc 7096 * be on the non default queue
a2fbb9ea
ET
7097 */
7098 if (idx)
7099 bnx2x_rx_int(&bp->fp[idx], 10);
7100 }
a2fbb9ea 7101
3101c2bc 7102 mb(); /* state is changed by bnx2x_sp_event() */
8b3a0f0b
EG
7103 if (*state_p == state) {
7104#ifdef BNX2X_STOP_ON_ERROR
7105 DP(NETIF_MSG_IFUP, "exit (cnt %d)\n", 5000 - cnt);
7106#endif
a2fbb9ea 7107 return 0;
8b3a0f0b 7108 }
a2fbb9ea 7109
a2fbb9ea 7110 msleep(1);
a2fbb9ea
ET
7111 }
7112
a2fbb9ea 7113 /* timeout! */
49d66772
ET
7114 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
7115 poll ? "polling" : "waiting", state, idx);
34f80b04
EG
7116#ifdef BNX2X_STOP_ON_ERROR
7117 bnx2x_panic();
7118#endif
a2fbb9ea 7119
49d66772 7120 return -EBUSY;
a2fbb9ea
ET
7121}
7122
7123static int bnx2x_setup_leading(struct bnx2x *bp)
7124{
34f80b04 7125 int rc;
a2fbb9ea 7126
c14423fe 7127 /* reset IGU state */
34f80b04 7128 bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea
ET
7129
7130 /* SETUP ramrod */
7131 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
7132
34f80b04
EG
7133 /* Wait for completion */
7134 rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
a2fbb9ea 7135
34f80b04 7136 return rc;
a2fbb9ea
ET
7137}
7138
7139static int bnx2x_setup_multi(struct bnx2x *bp, int index)
7140{
555f6c78
EG
7141 struct bnx2x_fastpath *fp = &bp->fp[index];
7142
a2fbb9ea 7143 /* reset IGU state */
555f6c78 7144 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea 7145
228241eb 7146 /* SETUP ramrod */
555f6c78
EG
7147 fp->state = BNX2X_FP_STATE_OPENING;
7148 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0,
7149 fp->cl_id, 0);
a2fbb9ea
ET
7150
7151 /* Wait for completion */
7152 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
555f6c78 7153 &(fp->state), 0);
a2fbb9ea
ET
7154}
7155
a2fbb9ea 7156static int bnx2x_poll(struct napi_struct *napi, int budget);
a2fbb9ea 7157
ca00392c
EG
7158static void bnx2x_set_int_mode_msix(struct bnx2x *bp, int *num_rx_queues_out,
7159 int *num_tx_queues_out)
7160{
7161 int _num_rx_queues = 0, _num_tx_queues = 0;
7162
7163 switch (bp->multi_mode) {
7164 case ETH_RSS_MODE_DISABLED:
7165 _num_rx_queues = 1;
7166 _num_tx_queues = 1;
7167 break;
7168
7169 case ETH_RSS_MODE_REGULAR:
7170 if (num_rx_queues)
7171 _num_rx_queues = min_t(u32, num_rx_queues,
7172 BNX2X_MAX_QUEUES(bp));
7173 else
7174 _num_rx_queues = min_t(u32, num_online_cpus(),
7175 BNX2X_MAX_QUEUES(bp));
7176
7177 if (num_tx_queues)
7178 _num_tx_queues = min_t(u32, num_tx_queues,
7179 BNX2X_MAX_QUEUES(bp));
7180 else
7181 _num_tx_queues = min_t(u32, num_online_cpus(),
7182 BNX2X_MAX_QUEUES(bp));
7183
7184 /* There must be not more Tx queues than Rx queues */
7185 if (_num_tx_queues > _num_rx_queues) {
7186 BNX2X_ERR("number of tx queues (%d) > "
7187 "number of rx queues (%d)"
7188 " defaulting to %d\n",
7189 _num_tx_queues, _num_rx_queues,
7190 _num_rx_queues);
7191 _num_tx_queues = _num_rx_queues;
7192 }
7193 break;
7194
7195
7196 default:
7197 _num_rx_queues = 1;
7198 _num_tx_queues = 1;
7199 break;
7200 }
7201
7202 *num_rx_queues_out = _num_rx_queues;
7203 *num_tx_queues_out = _num_tx_queues;
7204}
7205
7206static int bnx2x_set_int_mode(struct bnx2x *bp)
a2fbb9ea 7207{
ca00392c 7208 int rc = 0;
a2fbb9ea 7209
8badd27a
EG
7210 switch (int_mode) {
7211 case INT_MODE_INTx:
7212 case INT_MODE_MSI:
ca00392c
EG
7213 bp->num_rx_queues = 1;
7214 bp->num_tx_queues = 1;
7215 DP(NETIF_MSG_IFUP, "set number of queues to 1\n");
8badd27a
EG
7216 break;
7217
7218 case INT_MODE_MSIX:
7219 default:
ca00392c
EG
7220 /* Set interrupt mode according to bp->multi_mode value */
7221 bnx2x_set_int_mode_msix(bp, &bp->num_rx_queues,
7222 &bp->num_tx_queues);
7223
7224 DP(NETIF_MSG_IFUP, "set number of queues to: rx %d tx %d\n",
555f6c78 7225 bp->num_rx_queues, bp->num_tx_queues);
ca00392c 7226
2dfe0e1f
EG
7227 /* if we can't use MSI-X we only need one fp,
7228 * so try to enable MSI-X with the requested number of fp's
7229 * and fallback to MSI or legacy INTx with one fp
7230 */
ca00392c
EG
7231 rc = bnx2x_enable_msix(bp);
7232 if (rc) {
34f80b04 7233 /* failed to enable MSI-X */
555f6c78
EG
7234 if (bp->multi_mode)
7235 BNX2X_ERR("Multi requested but failed to "
ca00392c
EG
7236 "enable MSI-X (rx %d tx %d), "
7237 "set number of queues to 1\n",
7238 bp->num_rx_queues, bp->num_tx_queues);
7239 bp->num_rx_queues = 1;
7240 bp->num_tx_queues = 1;
a2fbb9ea 7241 }
8badd27a 7242 break;
a2fbb9ea 7243 }
555f6c78 7244 bp->dev->real_num_tx_queues = bp->num_tx_queues;
ca00392c 7245 return rc;
8badd27a
EG
7246}
7247
8badd27a
EG
7248
7249/* must be called with rtnl_lock */
7250static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
7251{
7252 u32 load_code;
ca00392c
EG
7253 int i, rc;
7254
8badd27a 7255#ifdef BNX2X_STOP_ON_ERROR
8badd27a
EG
7256 if (unlikely(bp->panic))
7257 return -EPERM;
7258#endif
7259
7260 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
7261
ca00392c 7262 rc = bnx2x_set_int_mode(bp);
c14423fe 7263
a2fbb9ea
ET
7264 if (bnx2x_alloc_mem(bp))
7265 return -ENOMEM;
7266
555f6c78 7267 for_each_rx_queue(bp, i)
7a9b2557
VZ
7268 bnx2x_fp(bp, i, disable_tpa) =
7269 ((bp->flags & TPA_ENABLE_FLAG) == 0);
7270
555f6c78 7271 for_each_rx_queue(bp, i)
2dfe0e1f
EG
7272 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
7273 bnx2x_poll, 128);
7274
2dfe0e1f
EG
7275 bnx2x_napi_enable(bp);
7276
34f80b04
EG
7277 if (bp->flags & USING_MSIX_FLAG) {
7278 rc = bnx2x_req_msix_irqs(bp);
7279 if (rc) {
7280 pci_disable_msix(bp->pdev);
2dfe0e1f 7281 goto load_error1;
34f80b04
EG
7282 }
7283 } else {
ca00392c
EG
7284 /* Fall to INTx if failed to enable MSI-X due to lack of
7285 memory (in bnx2x_set_int_mode()) */
8badd27a
EG
7286 if ((rc != -ENOMEM) && (int_mode != INT_MODE_INTx))
7287 bnx2x_enable_msi(bp);
34f80b04
EG
7288 bnx2x_ack_int(bp);
7289 rc = bnx2x_req_irq(bp);
7290 if (rc) {
2dfe0e1f 7291 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
8badd27a
EG
7292 if (bp->flags & USING_MSI_FLAG)
7293 pci_disable_msi(bp->pdev);
2dfe0e1f 7294 goto load_error1;
a2fbb9ea 7295 }
8badd27a
EG
7296 if (bp->flags & USING_MSI_FLAG) {
7297 bp->dev->irq = bp->pdev->irq;
7298 printk(KERN_INFO PFX "%s: using MSI IRQ %d\n",
7299 bp->dev->name, bp->pdev->irq);
7300 }
a2fbb9ea
ET
7301 }
7302
2dfe0e1f
EG
7303 /* Send LOAD_REQUEST command to MCP
7304 Returns the type of LOAD command:
7305 if it is the first port to be initialized
7306 common blocks should be initialized, otherwise - not
7307 */
7308 if (!BP_NOMCP(bp)) {
7309 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
7310 if (!load_code) {
7311 BNX2X_ERR("MCP response failure, aborting\n");
7312 rc = -EBUSY;
7313 goto load_error2;
7314 }
7315 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
7316 rc = -EBUSY; /* other port in diagnostic mode */
7317 goto load_error2;
7318 }
7319
7320 } else {
7321 int port = BP_PORT(bp);
7322
f5372251 7323 DP(NETIF_MSG_IFUP, "NO MCP - load counts %d, %d, %d\n",
2dfe0e1f
EG
7324 load_count[0], load_count[1], load_count[2]);
7325 load_count[0]++;
7326 load_count[1 + port]++;
f5372251 7327 DP(NETIF_MSG_IFUP, "NO MCP - new load counts %d, %d, %d\n",
2dfe0e1f
EG
7328 load_count[0], load_count[1], load_count[2]);
7329 if (load_count[0] == 1)
7330 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
7331 else if (load_count[1 + port] == 1)
7332 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
7333 else
7334 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
7335 }
7336
7337 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
7338 (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
7339 bp->port.pmf = 1;
7340 else
7341 bp->port.pmf = 0;
7342 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
a2fbb9ea 7343
a2fbb9ea 7344 /* Initialize HW */
34f80b04
EG
7345 rc = bnx2x_init_hw(bp, load_code);
7346 if (rc) {
a2fbb9ea 7347 BNX2X_ERR("HW init failed, aborting\n");
2dfe0e1f 7348 goto load_error2;
a2fbb9ea
ET
7349 }
7350
a2fbb9ea 7351 /* Setup NIC internals and enable interrupts */
471de716 7352 bnx2x_nic_init(bp, load_code);
a2fbb9ea 7353
2691d51d
EG
7354 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) &&
7355 (bp->common.shmem2_base))
7356 SHMEM2_WR(bp, dcc_support,
7357 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
7358 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
7359
a2fbb9ea 7360 /* Send LOAD_DONE command to MCP */
34f80b04 7361 if (!BP_NOMCP(bp)) {
228241eb
ET
7362 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
7363 if (!load_code) {
da5a662a 7364 BNX2X_ERR("MCP response failure, aborting\n");
34f80b04 7365 rc = -EBUSY;
2dfe0e1f 7366 goto load_error3;
a2fbb9ea
ET
7367 }
7368 }
7369
7370 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
7371
34f80b04
EG
7372 rc = bnx2x_setup_leading(bp);
7373 if (rc) {
da5a662a 7374 BNX2X_ERR("Setup leading failed!\n");
2dfe0e1f 7375 goto load_error3;
34f80b04 7376 }
a2fbb9ea 7377
34f80b04
EG
7378 if (CHIP_IS_E1H(bp))
7379 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
f5372251 7380 DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
34f80b04
EG
7381 bp->state = BNX2X_STATE_DISABLED;
7382 }
a2fbb9ea 7383
ca00392c 7384 if (bp->state == BNX2X_STATE_OPEN) {
34f80b04
EG
7385 for_each_nondefault_queue(bp, i) {
7386 rc = bnx2x_setup_multi(bp, i);
7387 if (rc)
2dfe0e1f 7388 goto load_error3;
34f80b04 7389 }
a2fbb9ea 7390
ca00392c
EG
7391 if (CHIP_IS_E1(bp))
7392 bnx2x_set_mac_addr_e1(bp, 1);
7393 else
7394 bnx2x_set_mac_addr_e1h(bp, 1);
7395 }
34f80b04
EG
7396
7397 if (bp->port.pmf)
b5bf9068 7398 bnx2x_initial_phy_init(bp, load_mode);
a2fbb9ea
ET
7399
7400 /* Start fast path */
34f80b04
EG
7401 switch (load_mode) {
7402 case LOAD_NORMAL:
ca00392c
EG
7403 if (bp->state == BNX2X_STATE_OPEN) {
7404 /* Tx queue should be only reenabled */
7405 netif_tx_wake_all_queues(bp->dev);
7406 }
2dfe0e1f 7407 /* Initialize the receive filter. */
34f80b04
EG
7408 bnx2x_set_rx_mode(bp->dev);
7409 break;
7410
7411 case LOAD_OPEN:
555f6c78 7412 netif_tx_start_all_queues(bp->dev);
ca00392c
EG
7413 if (bp->state != BNX2X_STATE_OPEN)
7414 netif_tx_disable(bp->dev);
2dfe0e1f 7415 /* Initialize the receive filter. */
34f80b04 7416 bnx2x_set_rx_mode(bp->dev);
34f80b04 7417 break;
a2fbb9ea 7418
34f80b04 7419 case LOAD_DIAG:
2dfe0e1f 7420 /* Initialize the receive filter. */
a2fbb9ea 7421 bnx2x_set_rx_mode(bp->dev);
34f80b04
EG
7422 bp->state = BNX2X_STATE_DIAG;
7423 break;
7424
7425 default:
7426 break;
a2fbb9ea
ET
7427 }
7428
34f80b04
EG
7429 if (!bp->port.pmf)
7430 bnx2x__link_status_update(bp);
7431
a2fbb9ea
ET
7432 /* start the timer */
7433 mod_timer(&bp->timer, jiffies + bp->current_interval);
7434
34f80b04 7435
a2fbb9ea
ET
7436 return 0;
7437
2dfe0e1f
EG
7438load_error3:
7439 bnx2x_int_disable_sync(bp, 1);
7440 if (!BP_NOMCP(bp)) {
7441 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
7442 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7443 }
7444 bp->port.pmf = 0;
7a9b2557
VZ
7445 /* Free SKBs, SGEs, TPA pool and driver internals */
7446 bnx2x_free_skbs(bp);
555f6c78 7447 for_each_rx_queue(bp, i)
3196a88a 7448 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
2dfe0e1f 7449load_error2:
d1014634
YG
7450 /* Release IRQs */
7451 bnx2x_free_irq(bp);
2dfe0e1f
EG
7452load_error1:
7453 bnx2x_napi_disable(bp);
555f6c78 7454 for_each_rx_queue(bp, i)
7cde1c8b 7455 netif_napi_del(&bnx2x_fp(bp, i, napi));
a2fbb9ea
ET
7456 bnx2x_free_mem(bp);
7457
34f80b04 7458 return rc;
a2fbb9ea
ET
7459}
7460
7461static int bnx2x_stop_multi(struct bnx2x *bp, int index)
7462{
555f6c78 7463 struct bnx2x_fastpath *fp = &bp->fp[index];
a2fbb9ea
ET
7464 int rc;
7465
c14423fe 7466 /* halt the connection */
555f6c78
EG
7467 fp->state = BNX2X_FP_STATE_HALTING;
7468 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, fp->cl_id, 0);
a2fbb9ea 7469
34f80b04 7470 /* Wait for completion */
a2fbb9ea 7471 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
555f6c78 7472 &(fp->state), 1);
c14423fe 7473 if (rc) /* timeout */
a2fbb9ea
ET
7474 return rc;
7475
7476 /* delete cfc entry */
7477 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
7478
34f80b04
EG
7479 /* Wait for completion */
7480 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
555f6c78 7481 &(fp->state), 1);
34f80b04 7482 return rc;
a2fbb9ea
ET
7483}
7484
da5a662a 7485static int bnx2x_stop_leading(struct bnx2x *bp)
a2fbb9ea 7486{
4781bfad 7487 __le16 dsb_sp_prod_idx;
c14423fe 7488 /* if the other port is handling traffic,
a2fbb9ea 7489 this can take a lot of time */
34f80b04
EG
7490 int cnt = 500;
7491 int rc;
a2fbb9ea
ET
7492
7493 might_sleep();
7494
7495 /* Send HALT ramrod */
7496 bp->fp[0].state = BNX2X_FP_STATE_HALTING;
0626b899 7497 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, bp->fp->cl_id, 0);
a2fbb9ea 7498
34f80b04
EG
7499 /* Wait for completion */
7500 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
7501 &(bp->fp[0].state), 1);
7502 if (rc) /* timeout */
da5a662a 7503 return rc;
a2fbb9ea 7504
49d66772 7505 dsb_sp_prod_idx = *bp->dsb_sp_prod;
a2fbb9ea 7506
228241eb 7507 /* Send PORT_DELETE ramrod */
a2fbb9ea
ET
7508 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
7509
49d66772 7510 /* Wait for completion to arrive on default status block
a2fbb9ea
ET
7511 we are going to reset the chip anyway
7512 so there is not much to do if this times out
7513 */
34f80b04 7514 while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
34f80b04
EG
7515 if (!cnt) {
7516 DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
7517 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
7518 *bp->dsb_sp_prod, dsb_sp_prod_idx);
7519#ifdef BNX2X_STOP_ON_ERROR
7520 bnx2x_panic();
7521#endif
36e552ab 7522 rc = -EBUSY;
34f80b04
EG
7523 break;
7524 }
7525 cnt--;
da5a662a 7526 msleep(1);
5650d9d4 7527 rmb(); /* Refresh the dsb_sp_prod */
49d66772
ET
7528 }
7529 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
7530 bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
da5a662a
VZ
7531
7532 return rc;
a2fbb9ea
ET
7533}
7534
34f80b04
EG
7535static void bnx2x_reset_func(struct bnx2x *bp)
7536{
7537 int port = BP_PORT(bp);
7538 int func = BP_FUNC(bp);
7539 int base, i;
7540
7541 /* Configure IGU */
7542 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
7543 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
7544
34f80b04
EG
7545 /* Clear ILT */
7546 base = FUNC_ILT_BASE(func);
7547 for (i = base; i < base + ILT_PER_FUNC; i++)
7548 bnx2x_ilt_wr(bp, i, 0);
7549}
7550
7551static void bnx2x_reset_port(struct bnx2x *bp)
7552{
7553 int port = BP_PORT(bp);
7554 u32 val;
7555
7556 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
7557
7558 /* Do not rcv packets to BRB */
7559 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
7560 /* Do not direct rcv packets that are not for MCP to the BRB */
7561 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
7562 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7563
7564 /* Configure AEU */
7565 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
7566
7567 msleep(100);
7568 /* Check for BRB port occupancy */
7569 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
7570 if (val)
7571 DP(NETIF_MSG_IFDOWN,
33471629 7572 "BRB1 is not empty %d blocks are occupied\n", val);
34f80b04
EG
7573
7574 /* TODO: Close Doorbell port? */
7575}
7576
34f80b04
EG
7577static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
7578{
7579 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
7580 BP_FUNC(bp), reset_code);
7581
7582 switch (reset_code) {
7583 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
7584 bnx2x_reset_port(bp);
7585 bnx2x_reset_func(bp);
7586 bnx2x_reset_common(bp);
7587 break;
7588
7589 case FW_MSG_CODE_DRV_UNLOAD_PORT:
7590 bnx2x_reset_port(bp);
7591 bnx2x_reset_func(bp);
7592 break;
7593
7594 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
7595 bnx2x_reset_func(bp);
7596 break;
49d66772 7597
34f80b04
EG
7598 default:
7599 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
7600 break;
7601 }
7602}
7603
33471629 7604/* must be called with rtnl_lock */
34f80b04 7605static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
a2fbb9ea 7606{
da5a662a 7607 int port = BP_PORT(bp);
a2fbb9ea 7608 u32 reset_code = 0;
da5a662a 7609 int i, cnt, rc;
a2fbb9ea
ET
7610
7611 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
7612
228241eb
ET
7613 bp->rx_mode = BNX2X_RX_MODE_NONE;
7614 bnx2x_set_storm_rx_mode(bp);
a2fbb9ea 7615
f8ef6e44 7616 bnx2x_netif_stop(bp, 1);
e94d8af3 7617
34f80b04
EG
7618 del_timer_sync(&bp->timer);
7619 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
7620 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
bb2a0f7a 7621 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04 7622
70b9986c
EG
7623 /* Release IRQs */
7624 bnx2x_free_irq(bp);
7625
555f6c78
EG
7626 /* Wait until tx fastpath tasks complete */
7627 for_each_tx_queue(bp, i) {
228241eb
ET
7628 struct bnx2x_fastpath *fp = &bp->fp[i];
7629
34f80b04 7630 cnt = 1000;
e8b5fc51 7631 while (bnx2x_has_tx_work_unload(fp)) {
da5a662a 7632
7961f791 7633 bnx2x_tx_int(fp);
34f80b04
EG
7634 if (!cnt) {
7635 BNX2X_ERR("timeout waiting for queue[%d]\n",
7636 i);
7637#ifdef BNX2X_STOP_ON_ERROR
7638 bnx2x_panic();
7639 return -EBUSY;
7640#else
7641 break;
7642#endif
7643 }
7644 cnt--;
da5a662a 7645 msleep(1);
34f80b04 7646 }
228241eb 7647 }
da5a662a
VZ
7648 /* Give HW time to discard old tx messages */
7649 msleep(1);
a2fbb9ea 7650
3101c2bc
YG
7651 if (CHIP_IS_E1(bp)) {
7652 struct mac_configuration_cmd *config =
7653 bnx2x_sp(bp, mcast_config);
7654
7655 bnx2x_set_mac_addr_e1(bp, 0);
7656
8d9c5f34 7657 for (i = 0; i < config->hdr.length; i++)
3101c2bc
YG
7658 CAM_INVALIDATE(config->config_table[i]);
7659
8d9c5f34 7660 config->hdr.length = i;
3101c2bc
YG
7661 if (CHIP_REV_IS_SLOW(bp))
7662 config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
7663 else
7664 config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
0626b899 7665 config->hdr.client_id = bp->fp->cl_id;
3101c2bc
YG
7666 config->hdr.reserved1 = 0;
7667
7668 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7669 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
7670 U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
7671
7672 } else { /* E1H */
65abd74d
YG
7673 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
7674
3101c2bc
YG
7675 bnx2x_set_mac_addr_e1h(bp, 0);
7676
7677 for (i = 0; i < MC_HASH_SIZE; i++)
7678 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
7d0446c2
EG
7679
7680 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
3101c2bc
YG
7681 }
7682
65abd74d
YG
7683 if (unload_mode == UNLOAD_NORMAL)
7684 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7685
7d0446c2 7686 else if (bp->flags & NO_WOL_FLAG)
65abd74d 7687 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
65abd74d 7688
7d0446c2 7689 else if (bp->wol) {
65abd74d
YG
7690 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
7691 u8 *mac_addr = bp->dev->dev_addr;
7692 u32 val;
7693 /* The mac address is written to entries 1-4 to
7694 preserve entry 0 which is used by the PMF */
7695 u8 entry = (BP_E1HVN(bp) + 1)*8;
7696
7697 val = (mac_addr[0] << 8) | mac_addr[1];
7698 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
7699
7700 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
7701 (mac_addr[4] << 8) | mac_addr[5];
7702 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
7703
7704 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
7705
7706 } else
7707 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
da5a662a 7708
34f80b04
EG
7709 /* Close multi and leading connections
7710 Completions for ramrods are collected in a synchronous way */
a2fbb9ea
ET
7711 for_each_nondefault_queue(bp, i)
7712 if (bnx2x_stop_multi(bp, i))
228241eb 7713 goto unload_error;
a2fbb9ea 7714
da5a662a
VZ
7715 rc = bnx2x_stop_leading(bp);
7716 if (rc) {
34f80b04 7717 BNX2X_ERR("Stop leading failed!\n");
da5a662a 7718#ifdef BNX2X_STOP_ON_ERROR
34f80b04 7719 return -EBUSY;
da5a662a
VZ
7720#else
7721 goto unload_error;
34f80b04 7722#endif
228241eb
ET
7723 }
7724
7725unload_error:
34f80b04 7726 if (!BP_NOMCP(bp))
228241eb 7727 reset_code = bnx2x_fw_command(bp, reset_code);
34f80b04 7728 else {
f5372251 7729 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts %d, %d, %d\n",
34f80b04
EG
7730 load_count[0], load_count[1], load_count[2]);
7731 load_count[0]--;
da5a662a 7732 load_count[1 + port]--;
f5372251 7733 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts %d, %d, %d\n",
34f80b04
EG
7734 load_count[0], load_count[1], load_count[2]);
7735 if (load_count[0] == 0)
7736 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
da5a662a 7737 else if (load_count[1 + port] == 0)
34f80b04
EG
7738 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
7739 else
7740 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
7741 }
a2fbb9ea 7742
34f80b04
EG
7743 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
7744 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
7745 bnx2x__link_reset(bp);
a2fbb9ea
ET
7746
7747 /* Reset the chip */
228241eb 7748 bnx2x_reset_chip(bp, reset_code);
a2fbb9ea
ET
7749
7750 /* Report UNLOAD_DONE to MCP */
34f80b04 7751 if (!BP_NOMCP(bp))
a2fbb9ea 7752 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
356e2385 7753
9a035440 7754 bp->port.pmf = 0;
a2fbb9ea 7755
7a9b2557 7756 /* Free SKBs, SGEs, TPA pool and driver internals */
a2fbb9ea 7757 bnx2x_free_skbs(bp);
555f6c78 7758 for_each_rx_queue(bp, i)
3196a88a 7759 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
555f6c78 7760 for_each_rx_queue(bp, i)
7cde1c8b 7761 netif_napi_del(&bnx2x_fp(bp, i, napi));
a2fbb9ea
ET
7762 bnx2x_free_mem(bp);
7763
7764 bp->state = BNX2X_STATE_CLOSED;
228241eb 7765
a2fbb9ea
ET
7766 netif_carrier_off(bp->dev);
7767
7768 return 0;
7769}
7770
34f80b04
EG
7771static void bnx2x_reset_task(struct work_struct *work)
7772{
7773 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
7774
7775#ifdef BNX2X_STOP_ON_ERROR
7776 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
7777 " so reset not done to allow debug dump,\n"
ad361c98 7778 " you will need to reboot when done\n");
34f80b04
EG
7779 return;
7780#endif
7781
7782 rtnl_lock();
7783
7784 if (!netif_running(bp->dev))
7785 goto reset_task_exit;
7786
7787 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
7788 bnx2x_nic_load(bp, LOAD_NORMAL);
7789
7790reset_task_exit:
7791 rtnl_unlock();
7792}
7793
a2fbb9ea
ET
7794/* end of nic load/unload */
7795
7796/* ethtool_ops */
7797
7798/*
7799 * Init service functions
7800 */
7801
f1ef27ef
EG
7802static inline u32 bnx2x_get_pretend_reg(struct bnx2x *bp, int func)
7803{
7804 switch (func) {
7805 case 0: return PXP2_REG_PGL_PRETEND_FUNC_F0;
7806 case 1: return PXP2_REG_PGL_PRETEND_FUNC_F1;
7807 case 2: return PXP2_REG_PGL_PRETEND_FUNC_F2;
7808 case 3: return PXP2_REG_PGL_PRETEND_FUNC_F3;
7809 case 4: return PXP2_REG_PGL_PRETEND_FUNC_F4;
7810 case 5: return PXP2_REG_PGL_PRETEND_FUNC_F5;
7811 case 6: return PXP2_REG_PGL_PRETEND_FUNC_F6;
7812 case 7: return PXP2_REG_PGL_PRETEND_FUNC_F7;
7813 default:
7814 BNX2X_ERR("Unsupported function index: %d\n", func);
7815 return (u32)(-1);
7816 }
7817}
7818
7819static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp, int orig_func)
7820{
7821 u32 reg = bnx2x_get_pretend_reg(bp, orig_func), new_val;
7822
7823 /* Flush all outstanding writes */
7824 mmiowb();
7825
7826 /* Pretend to be function 0 */
7827 REG_WR(bp, reg, 0);
7828 /* Flush the GRC transaction (in the chip) */
7829 new_val = REG_RD(bp, reg);
7830 if (new_val != 0) {
7831 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (0,%d)!\n",
7832 new_val);
7833 BUG();
7834 }
7835
7836 /* From now we are in the "like-E1" mode */
7837 bnx2x_int_disable(bp);
7838
7839 /* Flush all outstanding writes */
7840 mmiowb();
7841
7842 /* Restore the original funtion settings */
7843 REG_WR(bp, reg, orig_func);
7844 new_val = REG_RD(bp, reg);
7845 if (new_val != orig_func) {
7846 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (%d,%d)!\n",
7847 orig_func, new_val);
7848 BUG();
7849 }
7850}
7851
7852static inline void bnx2x_undi_int_disable(struct bnx2x *bp, int func)
7853{
7854 if (CHIP_IS_E1H(bp))
7855 bnx2x_undi_int_disable_e1h(bp, func);
7856 else
7857 bnx2x_int_disable(bp);
7858}
7859
34f80b04
EG
7860static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
7861{
7862 u32 val;
7863
7864 /* Check if there is any driver already loaded */
7865 val = REG_RD(bp, MISC_REG_UNPREPARED);
7866 if (val == 0x1) {
7867 /* Check if it is the UNDI driver
7868 * UNDI driver initializes CID offset for normal bell to 0x7
7869 */
4a37fb66 7870 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
34f80b04
EG
7871 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
7872 if (val == 0x7) {
7873 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
da5a662a 7874 /* save our func */
34f80b04 7875 int func = BP_FUNC(bp);
da5a662a
VZ
7876 u32 swap_en;
7877 u32 swap_val;
34f80b04 7878
b4661739
EG
7879 /* clear the UNDI indication */
7880 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
7881
34f80b04
EG
7882 BNX2X_DEV_INFO("UNDI is active! reset device\n");
7883
7884 /* try unload UNDI on port 0 */
7885 bp->func = 0;
da5a662a
VZ
7886 bp->fw_seq =
7887 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7888 DRV_MSG_SEQ_NUMBER_MASK);
34f80b04 7889 reset_code = bnx2x_fw_command(bp, reset_code);
34f80b04
EG
7890
7891 /* if UNDI is loaded on the other port */
7892 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
7893
da5a662a
VZ
7894 /* send "DONE" for previous unload */
7895 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7896
7897 /* unload UNDI on port 1 */
34f80b04 7898 bp->func = 1;
da5a662a
VZ
7899 bp->fw_seq =
7900 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7901 DRV_MSG_SEQ_NUMBER_MASK);
7902 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7903
7904 bnx2x_fw_command(bp, reset_code);
34f80b04
EG
7905 }
7906
b4661739
EG
7907 /* now it's safe to release the lock */
7908 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7909
f1ef27ef 7910 bnx2x_undi_int_disable(bp, func);
da5a662a
VZ
7911
7912 /* close input traffic and wait for it */
7913 /* Do not rcv packets to BRB */
7914 REG_WR(bp,
7915 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
7916 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
7917 /* Do not direct rcv packets that are not for MCP to
7918 * the BRB */
7919 REG_WR(bp,
7920 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
7921 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7922 /* clear AEU */
7923 REG_WR(bp,
7924 (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
7925 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
7926 msleep(10);
7927
7928 /* save NIG port swap info */
7929 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
7930 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
34f80b04
EG
7931 /* reset device */
7932 REG_WR(bp,
7933 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
da5a662a 7934 0xd3ffffff);
34f80b04
EG
7935 REG_WR(bp,
7936 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
7937 0x1403);
da5a662a
VZ
7938 /* take the NIG out of reset and restore swap values */
7939 REG_WR(bp,
7940 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
7941 MISC_REGISTERS_RESET_REG_1_RST_NIG);
7942 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
7943 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
7944
7945 /* send unload done to the MCP */
7946 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7947
7948 /* restore our func and fw_seq */
7949 bp->func = func;
7950 bp->fw_seq =
7951 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7952 DRV_MSG_SEQ_NUMBER_MASK);
b4661739
EG
7953
7954 } else
7955 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
34f80b04
EG
7956 }
7957}
7958
7959static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
7960{
7961 u32 val, val2, val3, val4, id;
72ce58c3 7962 u16 pmc;
34f80b04
EG
7963
7964 /* Get the chip revision id and number. */
7965 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
7966 val = REG_RD(bp, MISC_REG_CHIP_NUM);
7967 id = ((val & 0xffff) << 16);
7968 val = REG_RD(bp, MISC_REG_CHIP_REV);
7969 id |= ((val & 0xf) << 12);
7970 val = REG_RD(bp, MISC_REG_CHIP_METAL);
7971 id |= ((val & 0xff) << 4);
5a40e08e 7972 val = REG_RD(bp, MISC_REG_BOND_ID);
34f80b04
EG
7973 id |= (val & 0xf);
7974 bp->common.chip_id = id;
7975 bp->link_params.chip_id = bp->common.chip_id;
7976 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
7977
1c06328c
EG
7978 val = (REG_RD(bp, 0x2874) & 0x55);
7979 if ((bp->common.chip_id & 0x1) ||
7980 (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
7981 bp->flags |= ONE_PORT_FLAG;
7982 BNX2X_DEV_INFO("single port device\n");
7983 }
7984
34f80b04
EG
7985 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
7986 bp->common.flash_size = (NVRAM_1MB_SIZE <<
7987 (val & MCPR_NVM_CFG4_FLASH_SIZE));
7988 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
7989 bp->common.flash_size, bp->common.flash_size);
7990
7991 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
2691d51d 7992 bp->common.shmem2_base = REG_RD(bp, MISC_REG_GENERIC_CR_0);
34f80b04 7993 bp->link_params.shmem_base = bp->common.shmem_base;
2691d51d
EG
7994 BNX2X_DEV_INFO("shmem offset 0x%x shmem2 offset 0x%x\n",
7995 bp->common.shmem_base, bp->common.shmem2_base);
34f80b04
EG
7996
7997 if (!bp->common.shmem_base ||
7998 (bp->common.shmem_base < 0xA0000) ||
7999 (bp->common.shmem_base >= 0xC0000)) {
8000 BNX2X_DEV_INFO("MCP not active\n");
8001 bp->flags |= NO_MCP_FLAG;
8002 return;
8003 }
8004
8005 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
8006 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
8007 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
8008 BNX2X_ERR("BAD MCP validity signature\n");
8009
8010 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
35b19ba5 8011 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
34f80b04
EG
8012
8013 bp->link_params.hw_led_mode = ((bp->common.hw_config &
8014 SHARED_HW_CFG_LED_MODE_MASK) >>
8015 SHARED_HW_CFG_LED_MODE_SHIFT);
8016
c2c8b03e
EG
8017 bp->link_params.feature_config_flags = 0;
8018 val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
8019 if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
8020 bp->link_params.feature_config_flags |=
8021 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
8022 else
8023 bp->link_params.feature_config_flags &=
8024 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
8025
34f80b04
EG
8026 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
8027 bp->common.bc_ver = val;
8028 BNX2X_DEV_INFO("bc_ver %X\n", val);
8029 if (val < BNX2X_BC_VER) {
8030 /* for now only warn
8031 * later we might need to enforce this */
8032 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
8033 " please upgrade BC\n", BNX2X_BC_VER, val);
8034 }
4d295db0
EG
8035 bp->link_params.feature_config_flags |=
8036 (val >= REQ_BC_VER_4_VRFY_OPT_MDL) ?
8037 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0;
72ce58c3
EG
8038
8039 if (BP_E1HVN(bp) == 0) {
8040 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
8041 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
8042 } else {
8043 /* no WOL capability for E1HVN != 0 */
8044 bp->flags |= NO_WOL_FLAG;
8045 }
8046 BNX2X_DEV_INFO("%sWoL capable\n",
f5372251 8047 (bp->flags & NO_WOL_FLAG) ? "not " : "");
34f80b04
EG
8048
8049 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
8050 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
8051 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
8052 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
8053
8054 printk(KERN_INFO PFX "part number %X-%X-%X-%X\n",
8055 val, val2, val3, val4);
8056}
8057
8058static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
8059 u32 switch_cfg)
a2fbb9ea 8060{
34f80b04 8061 int port = BP_PORT(bp);
a2fbb9ea
ET
8062 u32 ext_phy_type;
8063
a2fbb9ea
ET
8064 switch (switch_cfg) {
8065 case SWITCH_CFG_1G:
8066 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
8067
c18487ee
YR
8068 ext_phy_type =
8069 SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea
ET
8070 switch (ext_phy_type) {
8071 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
8072 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
8073 ext_phy_type);
8074
34f80b04
EG
8075 bp->port.supported |= (SUPPORTED_10baseT_Half |
8076 SUPPORTED_10baseT_Full |
8077 SUPPORTED_100baseT_Half |
8078 SUPPORTED_100baseT_Full |
8079 SUPPORTED_1000baseT_Full |
8080 SUPPORTED_2500baseX_Full |
8081 SUPPORTED_TP |
8082 SUPPORTED_FIBRE |
8083 SUPPORTED_Autoneg |
8084 SUPPORTED_Pause |
8085 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
8086 break;
8087
8088 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
8089 BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
8090 ext_phy_type);
8091
34f80b04
EG
8092 bp->port.supported |= (SUPPORTED_10baseT_Half |
8093 SUPPORTED_10baseT_Full |
8094 SUPPORTED_100baseT_Half |
8095 SUPPORTED_100baseT_Full |
8096 SUPPORTED_1000baseT_Full |
8097 SUPPORTED_TP |
8098 SUPPORTED_FIBRE |
8099 SUPPORTED_Autoneg |
8100 SUPPORTED_Pause |
8101 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
8102 break;
8103
8104 default:
8105 BNX2X_ERR("NVRAM config error. "
8106 "BAD SerDes ext_phy_config 0x%x\n",
c18487ee 8107 bp->link_params.ext_phy_config);
a2fbb9ea
ET
8108 return;
8109 }
8110
34f80b04
EG
8111 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
8112 port*0x10);
8113 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea
ET
8114 break;
8115
8116 case SWITCH_CFG_10G:
8117 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
8118
c18487ee
YR
8119 ext_phy_type =
8120 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea
ET
8121 switch (ext_phy_type) {
8122 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
8123 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
8124 ext_phy_type);
8125
34f80b04
EG
8126 bp->port.supported |= (SUPPORTED_10baseT_Half |
8127 SUPPORTED_10baseT_Full |
8128 SUPPORTED_100baseT_Half |
8129 SUPPORTED_100baseT_Full |
8130 SUPPORTED_1000baseT_Full |
8131 SUPPORTED_2500baseX_Full |
8132 SUPPORTED_10000baseT_Full |
8133 SUPPORTED_TP |
8134 SUPPORTED_FIBRE |
8135 SUPPORTED_Autoneg |
8136 SUPPORTED_Pause |
8137 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
8138 break;
8139
589abe3a
EG
8140 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
8141 BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
34f80b04 8142 ext_phy_type);
f1410647 8143
34f80b04 8144 bp->port.supported |= (SUPPORTED_10000baseT_Full |
589abe3a 8145 SUPPORTED_1000baseT_Full |
34f80b04 8146 SUPPORTED_FIBRE |
589abe3a 8147 SUPPORTED_Autoneg |
34f80b04
EG
8148 SUPPORTED_Pause |
8149 SUPPORTED_Asym_Pause);
f1410647
ET
8150 break;
8151
589abe3a
EG
8152 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
8153 BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
f1410647
ET
8154 ext_phy_type);
8155
34f80b04 8156 bp->port.supported |= (SUPPORTED_10000baseT_Full |
589abe3a 8157 SUPPORTED_2500baseX_Full |
34f80b04 8158 SUPPORTED_1000baseT_Full |
589abe3a
EG
8159 SUPPORTED_FIBRE |
8160 SUPPORTED_Autoneg |
8161 SUPPORTED_Pause |
8162 SUPPORTED_Asym_Pause);
8163 break;
8164
8165 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
8166 BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
8167 ext_phy_type);
8168
8169 bp->port.supported |= (SUPPORTED_10000baseT_Full |
34f80b04
EG
8170 SUPPORTED_FIBRE |
8171 SUPPORTED_Pause |
8172 SUPPORTED_Asym_Pause);
f1410647
ET
8173 break;
8174
589abe3a
EG
8175 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
8176 BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
a2fbb9ea
ET
8177 ext_phy_type);
8178
34f80b04
EG
8179 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8180 SUPPORTED_1000baseT_Full |
8181 SUPPORTED_FIBRE |
34f80b04
EG
8182 SUPPORTED_Pause |
8183 SUPPORTED_Asym_Pause);
f1410647
ET
8184 break;
8185
589abe3a
EG
8186 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
8187 BNX2X_DEV_INFO("ext_phy_type 0x%x (8726)\n",
c18487ee
YR
8188 ext_phy_type);
8189
34f80b04 8190 bp->port.supported |= (SUPPORTED_10000baseT_Full |
34f80b04 8191 SUPPORTED_1000baseT_Full |
34f80b04 8192 SUPPORTED_Autoneg |
589abe3a 8193 SUPPORTED_FIBRE |
34f80b04
EG
8194 SUPPORTED_Pause |
8195 SUPPORTED_Asym_Pause);
c18487ee
YR
8196 break;
8197
4d295db0
EG
8198 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
8199 BNX2X_DEV_INFO("ext_phy_type 0x%x (8727)\n",
8200 ext_phy_type);
8201
8202 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8203 SUPPORTED_1000baseT_Full |
8204 SUPPORTED_Autoneg |
8205 SUPPORTED_FIBRE |
8206 SUPPORTED_Pause |
8207 SUPPORTED_Asym_Pause);
8208 break;
8209
f1410647
ET
8210 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
8211 BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
8212 ext_phy_type);
8213
34f80b04
EG
8214 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8215 SUPPORTED_TP |
8216 SUPPORTED_Autoneg |
8217 SUPPORTED_Pause |
8218 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
8219 break;
8220
28577185
EG
8221 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
8222 BNX2X_DEV_INFO("ext_phy_type 0x%x (BCM8481)\n",
8223 ext_phy_type);
8224
8225 bp->port.supported |= (SUPPORTED_10baseT_Half |
8226 SUPPORTED_10baseT_Full |
8227 SUPPORTED_100baseT_Half |
8228 SUPPORTED_100baseT_Full |
8229 SUPPORTED_1000baseT_Full |
8230 SUPPORTED_10000baseT_Full |
8231 SUPPORTED_TP |
8232 SUPPORTED_Autoneg |
8233 SUPPORTED_Pause |
8234 SUPPORTED_Asym_Pause);
8235 break;
8236
c18487ee
YR
8237 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
8238 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
8239 bp->link_params.ext_phy_config);
8240 break;
8241
a2fbb9ea
ET
8242 default:
8243 BNX2X_ERR("NVRAM config error. "
8244 "BAD XGXS ext_phy_config 0x%x\n",
c18487ee 8245 bp->link_params.ext_phy_config);
a2fbb9ea
ET
8246 return;
8247 }
8248
34f80b04
EG
8249 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
8250 port*0x18);
8251 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea 8252
a2fbb9ea
ET
8253 break;
8254
8255 default:
8256 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
34f80b04 8257 bp->port.link_config);
a2fbb9ea
ET
8258 return;
8259 }
34f80b04 8260 bp->link_params.phy_addr = bp->port.phy_addr;
a2fbb9ea
ET
8261
8262 /* mask what we support according to speed_cap_mask */
c18487ee
YR
8263 if (!(bp->link_params.speed_cap_mask &
8264 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
34f80b04 8265 bp->port.supported &= ~SUPPORTED_10baseT_Half;
a2fbb9ea 8266
c18487ee
YR
8267 if (!(bp->link_params.speed_cap_mask &
8268 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
34f80b04 8269 bp->port.supported &= ~SUPPORTED_10baseT_Full;
a2fbb9ea 8270
c18487ee
YR
8271 if (!(bp->link_params.speed_cap_mask &
8272 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
34f80b04 8273 bp->port.supported &= ~SUPPORTED_100baseT_Half;
a2fbb9ea 8274
c18487ee
YR
8275 if (!(bp->link_params.speed_cap_mask &
8276 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
34f80b04 8277 bp->port.supported &= ~SUPPORTED_100baseT_Full;
a2fbb9ea 8278
c18487ee
YR
8279 if (!(bp->link_params.speed_cap_mask &
8280 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
34f80b04
EG
8281 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
8282 SUPPORTED_1000baseT_Full);
a2fbb9ea 8283
c18487ee
YR
8284 if (!(bp->link_params.speed_cap_mask &
8285 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
34f80b04 8286 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
a2fbb9ea 8287
c18487ee
YR
8288 if (!(bp->link_params.speed_cap_mask &
8289 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
34f80b04 8290 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
a2fbb9ea 8291
34f80b04 8292 BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
a2fbb9ea
ET
8293}
8294
34f80b04 8295static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
a2fbb9ea 8296{
c18487ee 8297 bp->link_params.req_duplex = DUPLEX_FULL;
a2fbb9ea 8298
34f80b04 8299 switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
a2fbb9ea 8300 case PORT_FEATURE_LINK_SPEED_AUTO:
34f80b04 8301 if (bp->port.supported & SUPPORTED_Autoneg) {
c18487ee 8302 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
34f80b04 8303 bp->port.advertising = bp->port.supported;
a2fbb9ea 8304 } else {
c18487ee
YR
8305 u32 ext_phy_type =
8306 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8307
8308 if ((ext_phy_type ==
8309 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
8310 (ext_phy_type ==
8311 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
a2fbb9ea 8312 /* force 10G, no AN */
c18487ee 8313 bp->link_params.req_line_speed = SPEED_10000;
34f80b04 8314 bp->port.advertising =
a2fbb9ea
ET
8315 (ADVERTISED_10000baseT_Full |
8316 ADVERTISED_FIBRE);
8317 break;
8318 }
8319 BNX2X_ERR("NVRAM config error. "
8320 "Invalid link_config 0x%x"
8321 " Autoneg not supported\n",
34f80b04 8322 bp->port.link_config);
a2fbb9ea
ET
8323 return;
8324 }
8325 break;
8326
8327 case PORT_FEATURE_LINK_SPEED_10M_FULL:
34f80b04 8328 if (bp->port.supported & SUPPORTED_10baseT_Full) {
c18487ee 8329 bp->link_params.req_line_speed = SPEED_10;
34f80b04
EG
8330 bp->port.advertising = (ADVERTISED_10baseT_Full |
8331 ADVERTISED_TP);
a2fbb9ea
ET
8332 } else {
8333 BNX2X_ERR("NVRAM config error. "
8334 "Invalid link_config 0x%x"
8335 " speed_cap_mask 0x%x\n",
34f80b04 8336 bp->port.link_config,
c18487ee 8337 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
8338 return;
8339 }
8340 break;
8341
8342 case PORT_FEATURE_LINK_SPEED_10M_HALF:
34f80b04 8343 if (bp->port.supported & SUPPORTED_10baseT_Half) {
c18487ee
YR
8344 bp->link_params.req_line_speed = SPEED_10;
8345 bp->link_params.req_duplex = DUPLEX_HALF;
34f80b04
EG
8346 bp->port.advertising = (ADVERTISED_10baseT_Half |
8347 ADVERTISED_TP);
a2fbb9ea
ET
8348 } else {
8349 BNX2X_ERR("NVRAM config error. "
8350 "Invalid link_config 0x%x"
8351 " speed_cap_mask 0x%x\n",
34f80b04 8352 bp->port.link_config,
c18487ee 8353 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
8354 return;
8355 }
8356 break;
8357
8358 case PORT_FEATURE_LINK_SPEED_100M_FULL:
34f80b04 8359 if (bp->port.supported & SUPPORTED_100baseT_Full) {
c18487ee 8360 bp->link_params.req_line_speed = SPEED_100;
34f80b04
EG
8361 bp->port.advertising = (ADVERTISED_100baseT_Full |
8362 ADVERTISED_TP);
a2fbb9ea
ET
8363 } else {
8364 BNX2X_ERR("NVRAM config error. "
8365 "Invalid link_config 0x%x"
8366 " speed_cap_mask 0x%x\n",
34f80b04 8367 bp->port.link_config,
c18487ee 8368 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
8369 return;
8370 }
8371 break;
8372
8373 case PORT_FEATURE_LINK_SPEED_100M_HALF:
34f80b04 8374 if (bp->port.supported & SUPPORTED_100baseT_Half) {
c18487ee
YR
8375 bp->link_params.req_line_speed = SPEED_100;
8376 bp->link_params.req_duplex = DUPLEX_HALF;
34f80b04
EG
8377 bp->port.advertising = (ADVERTISED_100baseT_Half |
8378 ADVERTISED_TP);
a2fbb9ea
ET
8379 } else {
8380 BNX2X_ERR("NVRAM config error. "
8381 "Invalid link_config 0x%x"
8382 " speed_cap_mask 0x%x\n",
34f80b04 8383 bp->port.link_config,
c18487ee 8384 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
8385 return;
8386 }
8387 break;
8388
8389 case PORT_FEATURE_LINK_SPEED_1G:
34f80b04 8390 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
c18487ee 8391 bp->link_params.req_line_speed = SPEED_1000;
34f80b04
EG
8392 bp->port.advertising = (ADVERTISED_1000baseT_Full |
8393 ADVERTISED_TP);
a2fbb9ea
ET
8394 } else {
8395 BNX2X_ERR("NVRAM config error. "
8396 "Invalid link_config 0x%x"
8397 " speed_cap_mask 0x%x\n",
34f80b04 8398 bp->port.link_config,
c18487ee 8399 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
8400 return;
8401 }
8402 break;
8403
8404 case PORT_FEATURE_LINK_SPEED_2_5G:
34f80b04 8405 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
c18487ee 8406 bp->link_params.req_line_speed = SPEED_2500;
34f80b04
EG
8407 bp->port.advertising = (ADVERTISED_2500baseX_Full |
8408 ADVERTISED_TP);
a2fbb9ea
ET
8409 } else {
8410 BNX2X_ERR("NVRAM config error. "
8411 "Invalid link_config 0x%x"
8412 " speed_cap_mask 0x%x\n",
34f80b04 8413 bp->port.link_config,
c18487ee 8414 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
8415 return;
8416 }
8417 break;
8418
8419 case PORT_FEATURE_LINK_SPEED_10G_CX4:
8420 case PORT_FEATURE_LINK_SPEED_10G_KX4:
8421 case PORT_FEATURE_LINK_SPEED_10G_KR:
34f80b04 8422 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
c18487ee 8423 bp->link_params.req_line_speed = SPEED_10000;
34f80b04
EG
8424 bp->port.advertising = (ADVERTISED_10000baseT_Full |
8425 ADVERTISED_FIBRE);
a2fbb9ea
ET
8426 } else {
8427 BNX2X_ERR("NVRAM config error. "
8428 "Invalid link_config 0x%x"
8429 " speed_cap_mask 0x%x\n",
34f80b04 8430 bp->port.link_config,
c18487ee 8431 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
8432 return;
8433 }
8434 break;
8435
8436 default:
8437 BNX2X_ERR("NVRAM config error. "
8438 "BAD link speed link_config 0x%x\n",
34f80b04 8439 bp->port.link_config);
c18487ee 8440 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
34f80b04 8441 bp->port.advertising = bp->port.supported;
a2fbb9ea
ET
8442 break;
8443 }
a2fbb9ea 8444
34f80b04
EG
8445 bp->link_params.req_flow_ctrl = (bp->port.link_config &
8446 PORT_FEATURE_FLOW_CONTROL_MASK);
c0700f90 8447 if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
4ab84d45 8448 !(bp->port.supported & SUPPORTED_Autoneg))
c0700f90 8449 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
a2fbb9ea 8450
c18487ee 8451 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x"
f1410647 8452 " advertising 0x%x\n",
c18487ee
YR
8453 bp->link_params.req_line_speed,
8454 bp->link_params.req_duplex,
34f80b04 8455 bp->link_params.req_flow_ctrl, bp->port.advertising);
a2fbb9ea
ET
8456}
8457
34f80b04 8458static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
a2fbb9ea 8459{
34f80b04
EG
8460 int port = BP_PORT(bp);
8461 u32 val, val2;
589abe3a 8462 u32 config;
c2c8b03e 8463 u16 i;
01cd4528 8464 u32 ext_phy_type;
a2fbb9ea 8465
c18487ee 8466 bp->link_params.bp = bp;
34f80b04 8467 bp->link_params.port = port;
c18487ee 8468
c18487ee 8469 bp->link_params.lane_config =
a2fbb9ea 8470 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
c18487ee 8471 bp->link_params.ext_phy_config =
a2fbb9ea
ET
8472 SHMEM_RD(bp,
8473 dev_info.port_hw_config[port].external_phy_config);
4d295db0
EG
8474 /* BCM8727_NOC => BCM8727 no over current */
8475 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
8476 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC) {
8477 bp->link_params.ext_phy_config &=
8478 ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
8479 bp->link_params.ext_phy_config |=
8480 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727;
8481 bp->link_params.feature_config_flags |=
8482 FEATURE_CONFIG_BCM8727_NOC;
8483 }
8484
c18487ee 8485 bp->link_params.speed_cap_mask =
a2fbb9ea
ET
8486 SHMEM_RD(bp,
8487 dev_info.port_hw_config[port].speed_capability_mask);
8488
34f80b04 8489 bp->port.link_config =
a2fbb9ea
ET
8490 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
8491
c2c8b03e
EG
8492 /* Get the 4 lanes xgxs config rx and tx */
8493 for (i = 0; i < 2; i++) {
8494 val = SHMEM_RD(bp,
8495 dev_info.port_hw_config[port].xgxs_config_rx[i<<1]);
8496 bp->link_params.xgxs_config_rx[i << 1] = ((val>>16) & 0xffff);
8497 bp->link_params.xgxs_config_rx[(i << 1) + 1] = (val & 0xffff);
8498
8499 val = SHMEM_RD(bp,
8500 dev_info.port_hw_config[port].xgxs_config_tx[i<<1]);
8501 bp->link_params.xgxs_config_tx[i << 1] = ((val>>16) & 0xffff);
8502 bp->link_params.xgxs_config_tx[(i << 1) + 1] = (val & 0xffff);
8503 }
8504
3ce2c3f9
EG
8505 /* If the device is capable of WoL, set the default state according
8506 * to the HW
8507 */
4d295db0 8508 config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
3ce2c3f9
EG
8509 bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
8510 (config & PORT_FEATURE_WOL_ENABLED));
8511
c2c8b03e
EG
8512 BNX2X_DEV_INFO("lane_config 0x%08x ext_phy_config 0x%08x"
8513 " speed_cap_mask 0x%08x link_config 0x%08x\n",
c18487ee
YR
8514 bp->link_params.lane_config,
8515 bp->link_params.ext_phy_config,
34f80b04 8516 bp->link_params.speed_cap_mask, bp->port.link_config);
a2fbb9ea 8517
4d295db0
EG
8518 bp->link_params.switch_cfg |= (bp->port.link_config &
8519 PORT_FEATURE_CONNECTED_SWITCH_MASK);
c18487ee 8520 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
a2fbb9ea
ET
8521
8522 bnx2x_link_settings_requested(bp);
8523
01cd4528
EG
8524 /*
8525 * If connected directly, work with the internal PHY, otherwise, work
8526 * with the external PHY
8527 */
8528 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8529 if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
8530 bp->mdio.prtad = bp->link_params.phy_addr;
8531
8532 else if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
8533 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
8534 bp->mdio.prtad =
8535 (bp->link_params.ext_phy_config &
8536 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >>
8537 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT;
8538
a2fbb9ea
ET
8539 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
8540 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
8541 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
8542 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8543 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8544 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8545 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
8546 bp->dev->dev_addr[5] = (u8)(val & 0xff);
c18487ee
YR
8547 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
8548 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
34f80b04
EG
8549}
8550
8551static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
8552{
8553 int func = BP_FUNC(bp);
8554 u32 val, val2;
8555 int rc = 0;
a2fbb9ea 8556
34f80b04 8557 bnx2x_get_common_hwinfo(bp);
a2fbb9ea 8558
34f80b04
EG
8559 bp->e1hov = 0;
8560 bp->e1hmf = 0;
8561 if (CHIP_IS_E1H(bp)) {
8562 bp->mf_config =
8563 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
a2fbb9ea 8564
2691d51d 8565 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[FUNC_0].e1hov_tag) &
3196a88a 8566 FUNC_MF_CFG_E1HOV_TAG_MASK);
2691d51d 8567 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT)
34f80b04 8568 bp->e1hmf = 1;
2691d51d
EG
8569 BNX2X_DEV_INFO("%s function mode\n",
8570 IS_E1HMF(bp) ? "multi" : "single");
8571
8572 if (IS_E1HMF(bp)) {
8573 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].
8574 e1hov_tag) &
8575 FUNC_MF_CFG_E1HOV_TAG_MASK);
8576 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
8577 bp->e1hov = val;
8578 BNX2X_DEV_INFO("E1HOV for func %d is %d "
8579 "(0x%04x)\n",
8580 func, bp->e1hov, bp->e1hov);
8581 } else {
34f80b04
EG
8582 BNX2X_ERR("!!! No valid E1HOV for func %d,"
8583 " aborting\n", func);
8584 rc = -EPERM;
8585 }
2691d51d
EG
8586 } else {
8587 if (BP_E1HVN(bp)) {
8588 BNX2X_ERR("!!! VN %d in single function mode,"
8589 " aborting\n", BP_E1HVN(bp));
8590 rc = -EPERM;
8591 }
34f80b04
EG
8592 }
8593 }
a2fbb9ea 8594
34f80b04
EG
8595 if (!BP_NOMCP(bp)) {
8596 bnx2x_get_port_hwinfo(bp);
8597
8598 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
8599 DRV_MSG_SEQ_NUMBER_MASK);
8600 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
8601 }
8602
8603 if (IS_E1HMF(bp)) {
8604 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
8605 val = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_lower);
8606 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
8607 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
8608 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
8609 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8610 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8611 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8612 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
8613 bp->dev->dev_addr[5] = (u8)(val & 0xff);
8614 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
8615 ETH_ALEN);
8616 memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
8617 ETH_ALEN);
a2fbb9ea 8618 }
34f80b04
EG
8619
8620 return rc;
a2fbb9ea
ET
8621 }
8622
34f80b04
EG
8623 if (BP_NOMCP(bp)) {
8624 /* only supposed to happen on emulation/FPGA */
33471629 8625 BNX2X_ERR("warning random MAC workaround active\n");
34f80b04
EG
8626 random_ether_addr(bp->dev->dev_addr);
8627 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8628 }
a2fbb9ea 8629
34f80b04
EG
8630 return rc;
8631}
8632
8633static int __devinit bnx2x_init_bp(struct bnx2x *bp)
8634{
8635 int func = BP_FUNC(bp);
87942b46 8636 int timer_interval;
34f80b04
EG
8637 int rc;
8638
da5a662a
VZ
8639 /* Disable interrupt handling until HW is initialized */
8640 atomic_set(&bp->intr_sem, 1);
e1510706 8641 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
da5a662a 8642
34f80b04 8643 mutex_init(&bp->port.phy_mutex);
a2fbb9ea 8644
1cf167f2 8645 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
34f80b04
EG
8646 INIT_WORK(&bp->reset_task, bnx2x_reset_task);
8647
8648 rc = bnx2x_get_hwinfo(bp);
8649
8650 /* need to reset chip if undi was active */
8651 if (!BP_NOMCP(bp))
8652 bnx2x_undi_unload(bp);
8653
8654 if (CHIP_REV_IS_FPGA(bp))
8655 printk(KERN_ERR PFX "FPGA detected\n");
8656
8657 if (BP_NOMCP(bp) && (func == 0))
8658 printk(KERN_ERR PFX
8659 "MCP disabled, must load devices in order!\n");
8660
555f6c78 8661 /* Set multi queue mode */
8badd27a
EG
8662 if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
8663 ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
555f6c78 8664 printk(KERN_ERR PFX
8badd27a 8665 "Multi disabled since int_mode requested is not MSI-X\n");
555f6c78
EG
8666 multi_mode = ETH_RSS_MODE_DISABLED;
8667 }
8668 bp->multi_mode = multi_mode;
8669
8670
7a9b2557
VZ
8671 /* Set TPA flags */
8672 if (disable_tpa) {
8673 bp->flags &= ~TPA_ENABLE_FLAG;
8674 bp->dev->features &= ~NETIF_F_LRO;
8675 } else {
8676 bp->flags |= TPA_ENABLE_FLAG;
8677 bp->dev->features |= NETIF_F_LRO;
8678 }
8679
8d5726c4 8680 bp->mrrs = mrrs;
7a9b2557 8681
34f80b04
EG
8682 bp->tx_ring_size = MAX_TX_AVAIL;
8683 bp->rx_ring_size = MAX_RX_AVAIL;
8684
8685 bp->rx_csum = 1;
34f80b04
EG
8686
8687 bp->tx_ticks = 50;
8688 bp->rx_ticks = 25;
8689
87942b46
EG
8690 timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
8691 bp->current_interval = (poll ? poll : timer_interval);
34f80b04
EG
8692
8693 init_timer(&bp->timer);
8694 bp->timer.expires = jiffies + bp->current_interval;
8695 bp->timer.data = (unsigned long) bp;
8696 bp->timer.function = bnx2x_timer;
8697
8698 return rc;
a2fbb9ea
ET
8699}
8700
8701/*
8702 * ethtool service functions
8703 */
8704
8705/* All ethtool functions called with rtnl_lock */
8706
8707static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8708{
8709 struct bnx2x *bp = netdev_priv(dev);
8710
34f80b04
EG
8711 cmd->supported = bp->port.supported;
8712 cmd->advertising = bp->port.advertising;
a2fbb9ea
ET
8713
8714 if (netif_carrier_ok(dev)) {
c18487ee
YR
8715 cmd->speed = bp->link_vars.line_speed;
8716 cmd->duplex = bp->link_vars.duplex;
a2fbb9ea 8717 } else {
c18487ee
YR
8718 cmd->speed = bp->link_params.req_line_speed;
8719 cmd->duplex = bp->link_params.req_duplex;
a2fbb9ea 8720 }
34f80b04
EG
8721 if (IS_E1HMF(bp)) {
8722 u16 vn_max_rate;
8723
8724 vn_max_rate = ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
8725 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
8726 if (vn_max_rate < cmd->speed)
8727 cmd->speed = vn_max_rate;
8728 }
a2fbb9ea 8729
c18487ee
YR
8730 if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
8731 u32 ext_phy_type =
8732 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
f1410647
ET
8733
8734 switch (ext_phy_type) {
8735 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
f1410647 8736 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
c18487ee 8737 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
589abe3a
EG
8738 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
8739 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
8740 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
4d295db0 8741 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
f1410647
ET
8742 cmd->port = PORT_FIBRE;
8743 break;
8744
8745 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
28577185 8746 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
f1410647
ET
8747 cmd->port = PORT_TP;
8748 break;
8749
c18487ee
YR
8750 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
8751 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
8752 bp->link_params.ext_phy_config);
8753 break;
8754
f1410647
ET
8755 default:
8756 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
c18487ee
YR
8757 bp->link_params.ext_phy_config);
8758 break;
f1410647
ET
8759 }
8760 } else
a2fbb9ea 8761 cmd->port = PORT_TP;
a2fbb9ea 8762
01cd4528 8763 cmd->phy_address = bp->mdio.prtad;
a2fbb9ea
ET
8764 cmd->transceiver = XCVR_INTERNAL;
8765
c18487ee 8766 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
a2fbb9ea 8767 cmd->autoneg = AUTONEG_ENABLE;
f1410647 8768 else
a2fbb9ea 8769 cmd->autoneg = AUTONEG_DISABLE;
a2fbb9ea
ET
8770
8771 cmd->maxtxpkt = 0;
8772 cmd->maxrxpkt = 0;
8773
8774 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
8775 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
8776 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
8777 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
8778 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
8779 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
8780 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
8781
8782 return 0;
8783}
8784
8785static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8786{
8787 struct bnx2x *bp = netdev_priv(dev);
8788 u32 advertising;
8789
34f80b04
EG
8790 if (IS_E1HMF(bp))
8791 return 0;
8792
a2fbb9ea
ET
8793 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
8794 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
8795 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
8796 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
8797 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
8798 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
8799 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
8800
a2fbb9ea 8801 if (cmd->autoneg == AUTONEG_ENABLE) {
34f80b04
EG
8802 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
8803 DP(NETIF_MSG_LINK, "Autoneg not supported\n");
a2fbb9ea 8804 return -EINVAL;
f1410647 8805 }
a2fbb9ea
ET
8806
8807 /* advertise the requested speed and duplex if supported */
34f80b04 8808 cmd->advertising &= bp->port.supported;
a2fbb9ea 8809
c18487ee
YR
8810 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
8811 bp->link_params.req_duplex = DUPLEX_FULL;
34f80b04
EG
8812 bp->port.advertising |= (ADVERTISED_Autoneg |
8813 cmd->advertising);
a2fbb9ea
ET
8814
8815 } else { /* forced speed */
8816 /* advertise the requested speed and duplex if supported */
8817 switch (cmd->speed) {
8818 case SPEED_10:
8819 if (cmd->duplex == DUPLEX_FULL) {
34f80b04 8820 if (!(bp->port.supported &
f1410647
ET
8821 SUPPORTED_10baseT_Full)) {
8822 DP(NETIF_MSG_LINK,
8823 "10M full not supported\n");
a2fbb9ea 8824 return -EINVAL;
f1410647 8825 }
a2fbb9ea
ET
8826
8827 advertising = (ADVERTISED_10baseT_Full |
8828 ADVERTISED_TP);
8829 } else {
34f80b04 8830 if (!(bp->port.supported &
f1410647
ET
8831 SUPPORTED_10baseT_Half)) {
8832 DP(NETIF_MSG_LINK,
8833 "10M half not supported\n");
a2fbb9ea 8834 return -EINVAL;
f1410647 8835 }
a2fbb9ea
ET
8836
8837 advertising = (ADVERTISED_10baseT_Half |
8838 ADVERTISED_TP);
8839 }
8840 break;
8841
8842 case SPEED_100:
8843 if (cmd->duplex == DUPLEX_FULL) {
34f80b04 8844 if (!(bp->port.supported &
f1410647
ET
8845 SUPPORTED_100baseT_Full)) {
8846 DP(NETIF_MSG_LINK,
8847 "100M full not supported\n");
a2fbb9ea 8848 return -EINVAL;
f1410647 8849 }
a2fbb9ea
ET
8850
8851 advertising = (ADVERTISED_100baseT_Full |
8852 ADVERTISED_TP);
8853 } else {
34f80b04 8854 if (!(bp->port.supported &
f1410647
ET
8855 SUPPORTED_100baseT_Half)) {
8856 DP(NETIF_MSG_LINK,
8857 "100M half not supported\n");
a2fbb9ea 8858 return -EINVAL;
f1410647 8859 }
a2fbb9ea
ET
8860
8861 advertising = (ADVERTISED_100baseT_Half |
8862 ADVERTISED_TP);
8863 }
8864 break;
8865
8866 case SPEED_1000:
f1410647
ET
8867 if (cmd->duplex != DUPLEX_FULL) {
8868 DP(NETIF_MSG_LINK, "1G half not supported\n");
a2fbb9ea 8869 return -EINVAL;
f1410647 8870 }
a2fbb9ea 8871
34f80b04 8872 if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
f1410647 8873 DP(NETIF_MSG_LINK, "1G full not supported\n");
a2fbb9ea 8874 return -EINVAL;
f1410647 8875 }
a2fbb9ea
ET
8876
8877 advertising = (ADVERTISED_1000baseT_Full |
8878 ADVERTISED_TP);
8879 break;
8880
8881 case SPEED_2500:
f1410647
ET
8882 if (cmd->duplex != DUPLEX_FULL) {
8883 DP(NETIF_MSG_LINK,
8884 "2.5G half not supported\n");
a2fbb9ea 8885 return -EINVAL;
f1410647 8886 }
a2fbb9ea 8887
34f80b04 8888 if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
f1410647
ET
8889 DP(NETIF_MSG_LINK,
8890 "2.5G full not supported\n");
a2fbb9ea 8891 return -EINVAL;
f1410647 8892 }
a2fbb9ea 8893
f1410647 8894 advertising = (ADVERTISED_2500baseX_Full |
a2fbb9ea
ET
8895 ADVERTISED_TP);
8896 break;
8897
8898 case SPEED_10000:
f1410647
ET
8899 if (cmd->duplex != DUPLEX_FULL) {
8900 DP(NETIF_MSG_LINK, "10G half not supported\n");
a2fbb9ea 8901 return -EINVAL;
f1410647 8902 }
a2fbb9ea 8903
34f80b04 8904 if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
f1410647 8905 DP(NETIF_MSG_LINK, "10G full not supported\n");
a2fbb9ea 8906 return -EINVAL;
f1410647 8907 }
a2fbb9ea
ET
8908
8909 advertising = (ADVERTISED_10000baseT_Full |
8910 ADVERTISED_FIBRE);
8911 break;
8912
8913 default:
f1410647 8914 DP(NETIF_MSG_LINK, "Unsupported speed\n");
a2fbb9ea
ET
8915 return -EINVAL;
8916 }
8917
c18487ee
YR
8918 bp->link_params.req_line_speed = cmd->speed;
8919 bp->link_params.req_duplex = cmd->duplex;
34f80b04 8920 bp->port.advertising = advertising;
a2fbb9ea
ET
8921 }
8922
c18487ee 8923 DP(NETIF_MSG_LINK, "req_line_speed %d\n"
a2fbb9ea 8924 DP_LEVEL " req_duplex %d advertising 0x%x\n",
c18487ee 8925 bp->link_params.req_line_speed, bp->link_params.req_duplex,
34f80b04 8926 bp->port.advertising);
a2fbb9ea 8927
34f80b04 8928 if (netif_running(dev)) {
bb2a0f7a 8929 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
8930 bnx2x_link_set(bp);
8931 }
a2fbb9ea
ET
8932
8933 return 0;
8934}
8935
c18487ee
YR
8936#define PHY_FW_VER_LEN 10
8937
a2fbb9ea
ET
8938static void bnx2x_get_drvinfo(struct net_device *dev,
8939 struct ethtool_drvinfo *info)
8940{
8941 struct bnx2x *bp = netdev_priv(dev);
f0e53a84 8942 u8 phy_fw_ver[PHY_FW_VER_LEN];
a2fbb9ea
ET
8943
8944 strcpy(info->driver, DRV_MODULE_NAME);
8945 strcpy(info->version, DRV_MODULE_VERSION);
c18487ee
YR
8946
8947 phy_fw_ver[0] = '\0';
34f80b04 8948 if (bp->port.pmf) {
4a37fb66 8949 bnx2x_acquire_phy_lock(bp);
34f80b04
EG
8950 bnx2x_get_ext_phy_fw_version(&bp->link_params,
8951 (bp->state != BNX2X_STATE_CLOSED),
8952 phy_fw_ver, PHY_FW_VER_LEN);
4a37fb66 8953 bnx2x_release_phy_lock(bp);
34f80b04 8954 }
c18487ee 8955
f0e53a84
EG
8956 snprintf(info->fw_version, 32, "BC:%d.%d.%d%s%s",
8957 (bp->common.bc_ver & 0xff0000) >> 16,
8958 (bp->common.bc_ver & 0xff00) >> 8,
8959 (bp->common.bc_ver & 0xff),
8960 ((phy_fw_ver[0] != '\0') ? " PHY:" : ""), phy_fw_ver);
a2fbb9ea
ET
8961 strcpy(info->bus_info, pci_name(bp->pdev));
8962 info->n_stats = BNX2X_NUM_STATS;
8963 info->testinfo_len = BNX2X_NUM_TESTS;
34f80b04 8964 info->eedump_len = bp->common.flash_size;
a2fbb9ea
ET
8965 info->regdump_len = 0;
8966}
8967
0a64ea57
EG
8968#define IS_E1_ONLINE(info) (((info) & RI_E1_ONLINE) == RI_E1_ONLINE)
8969#define IS_E1H_ONLINE(info) (((info) & RI_E1H_ONLINE) == RI_E1H_ONLINE)
8970
8971static int bnx2x_get_regs_len(struct net_device *dev)
8972{
8973 static u32 regdump_len;
8974 struct bnx2x *bp = netdev_priv(dev);
8975 int i;
8976
8977 if (regdump_len)
8978 return regdump_len;
8979
8980 if (CHIP_IS_E1(bp)) {
8981 for (i = 0; i < REGS_COUNT; i++)
8982 if (IS_E1_ONLINE(reg_addrs[i].info))
8983 regdump_len += reg_addrs[i].size;
8984
8985 for (i = 0; i < WREGS_COUNT_E1; i++)
8986 if (IS_E1_ONLINE(wreg_addrs_e1[i].info))
8987 regdump_len += wreg_addrs_e1[i].size *
8988 (1 + wreg_addrs_e1[i].read_regs_count);
8989
8990 } else { /* E1H */
8991 for (i = 0; i < REGS_COUNT; i++)
8992 if (IS_E1H_ONLINE(reg_addrs[i].info))
8993 regdump_len += reg_addrs[i].size;
8994
8995 for (i = 0; i < WREGS_COUNT_E1H; i++)
8996 if (IS_E1H_ONLINE(wreg_addrs_e1h[i].info))
8997 regdump_len += wreg_addrs_e1h[i].size *
8998 (1 + wreg_addrs_e1h[i].read_regs_count);
8999 }
9000 regdump_len *= 4;
9001 regdump_len += sizeof(struct dump_hdr);
9002
9003 return regdump_len;
9004}
9005
9006static void bnx2x_get_regs(struct net_device *dev,
9007 struct ethtool_regs *regs, void *_p)
9008{
9009 u32 *p = _p, i, j;
9010 struct bnx2x *bp = netdev_priv(dev);
9011 struct dump_hdr dump_hdr = {0};
9012
9013 regs->version = 0;
9014 memset(p, 0, regs->len);
9015
9016 if (!netif_running(bp->dev))
9017 return;
9018
9019 dump_hdr.hdr_size = (sizeof(struct dump_hdr) / 4) - 1;
9020 dump_hdr.dump_sign = dump_sign_all;
9021 dump_hdr.xstorm_waitp = REG_RD(bp, XSTORM_WAITP_ADDR);
9022 dump_hdr.tstorm_waitp = REG_RD(bp, TSTORM_WAITP_ADDR);
9023 dump_hdr.ustorm_waitp = REG_RD(bp, USTORM_WAITP_ADDR);
9024 dump_hdr.cstorm_waitp = REG_RD(bp, CSTORM_WAITP_ADDR);
9025 dump_hdr.info = CHIP_IS_E1(bp) ? RI_E1_ONLINE : RI_E1H_ONLINE;
9026
9027 memcpy(p, &dump_hdr, sizeof(struct dump_hdr));
9028 p += dump_hdr.hdr_size + 1;
9029
9030 if (CHIP_IS_E1(bp)) {
9031 for (i = 0; i < REGS_COUNT; i++)
9032 if (IS_E1_ONLINE(reg_addrs[i].info))
9033 for (j = 0; j < reg_addrs[i].size; j++)
9034 *p++ = REG_RD(bp,
9035 reg_addrs[i].addr + j*4);
9036
9037 } else { /* E1H */
9038 for (i = 0; i < REGS_COUNT; i++)
9039 if (IS_E1H_ONLINE(reg_addrs[i].info))
9040 for (j = 0; j < reg_addrs[i].size; j++)
9041 *p++ = REG_RD(bp,
9042 reg_addrs[i].addr + j*4);
9043 }
9044}
9045
a2fbb9ea
ET
9046static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9047{
9048 struct bnx2x *bp = netdev_priv(dev);
9049
9050 if (bp->flags & NO_WOL_FLAG) {
9051 wol->supported = 0;
9052 wol->wolopts = 0;
9053 } else {
9054 wol->supported = WAKE_MAGIC;
9055 if (bp->wol)
9056 wol->wolopts = WAKE_MAGIC;
9057 else
9058 wol->wolopts = 0;
9059 }
9060 memset(&wol->sopass, 0, sizeof(wol->sopass));
9061}
9062
9063static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9064{
9065 struct bnx2x *bp = netdev_priv(dev);
9066
9067 if (wol->wolopts & ~WAKE_MAGIC)
9068 return -EINVAL;
9069
9070 if (wol->wolopts & WAKE_MAGIC) {
9071 if (bp->flags & NO_WOL_FLAG)
9072 return -EINVAL;
9073
9074 bp->wol = 1;
34f80b04 9075 } else
a2fbb9ea 9076 bp->wol = 0;
34f80b04 9077
a2fbb9ea
ET
9078 return 0;
9079}
9080
9081static u32 bnx2x_get_msglevel(struct net_device *dev)
9082{
9083 struct bnx2x *bp = netdev_priv(dev);
9084
9085 return bp->msglevel;
9086}
9087
9088static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
9089{
9090 struct bnx2x *bp = netdev_priv(dev);
9091
9092 if (capable(CAP_NET_ADMIN))
9093 bp->msglevel = level;
9094}
9095
9096static int bnx2x_nway_reset(struct net_device *dev)
9097{
9098 struct bnx2x *bp = netdev_priv(dev);
9099
34f80b04
EG
9100 if (!bp->port.pmf)
9101 return 0;
a2fbb9ea 9102
34f80b04 9103 if (netif_running(dev)) {
bb2a0f7a 9104 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
9105 bnx2x_link_set(bp);
9106 }
a2fbb9ea
ET
9107
9108 return 0;
9109}
9110
01e53298
NO
9111static u32
9112bnx2x_get_link(struct net_device *dev)
9113{
9114 struct bnx2x *bp = netdev_priv(dev);
9115
9116 return bp->link_vars.link_up;
9117}
9118
a2fbb9ea
ET
9119static int bnx2x_get_eeprom_len(struct net_device *dev)
9120{
9121 struct bnx2x *bp = netdev_priv(dev);
9122
34f80b04 9123 return bp->common.flash_size;
a2fbb9ea
ET
9124}
9125
9126static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
9127{
34f80b04 9128 int port = BP_PORT(bp);
a2fbb9ea
ET
9129 int count, i;
9130 u32 val = 0;
9131
9132 /* adjust timeout for emulation/FPGA */
9133 count = NVRAM_TIMEOUT_COUNT;
9134 if (CHIP_REV_IS_SLOW(bp))
9135 count *= 100;
9136
9137 /* request access to nvram interface */
9138 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
9139 (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
9140
9141 for (i = 0; i < count*10; i++) {
9142 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
9143 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
9144 break;
9145
9146 udelay(5);
9147 }
9148
9149 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
34f80b04 9150 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
a2fbb9ea
ET
9151 return -EBUSY;
9152 }
9153
9154 return 0;
9155}
9156
9157static int bnx2x_release_nvram_lock(struct bnx2x *bp)
9158{
34f80b04 9159 int port = BP_PORT(bp);
a2fbb9ea
ET
9160 int count, i;
9161 u32 val = 0;
9162
9163 /* adjust timeout for emulation/FPGA */
9164 count = NVRAM_TIMEOUT_COUNT;
9165 if (CHIP_REV_IS_SLOW(bp))
9166 count *= 100;
9167
9168 /* relinquish nvram interface */
9169 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
9170 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
9171
9172 for (i = 0; i < count*10; i++) {
9173 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
9174 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
9175 break;
9176
9177 udelay(5);
9178 }
9179
9180 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
34f80b04 9181 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
a2fbb9ea
ET
9182 return -EBUSY;
9183 }
9184
9185 return 0;
9186}
9187
9188static void bnx2x_enable_nvram_access(struct bnx2x *bp)
9189{
9190 u32 val;
9191
9192 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
9193
9194 /* enable both bits, even on read */
9195 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
9196 (val | MCPR_NVM_ACCESS_ENABLE_EN |
9197 MCPR_NVM_ACCESS_ENABLE_WR_EN));
9198}
9199
9200static void bnx2x_disable_nvram_access(struct bnx2x *bp)
9201{
9202 u32 val;
9203
9204 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
9205
9206 /* disable both bits, even after read */
9207 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
9208 (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
9209 MCPR_NVM_ACCESS_ENABLE_WR_EN)));
9210}
9211
4781bfad 9212static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, __be32 *ret_val,
a2fbb9ea
ET
9213 u32 cmd_flags)
9214{
f1410647 9215 int count, i, rc;
a2fbb9ea
ET
9216 u32 val;
9217
9218 /* build the command word */
9219 cmd_flags |= MCPR_NVM_COMMAND_DOIT;
9220
9221 /* need to clear DONE bit separately */
9222 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
9223
9224 /* address of the NVRAM to read from */
9225 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
9226 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
9227
9228 /* issue a read command */
9229 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
9230
9231 /* adjust timeout for emulation/FPGA */
9232 count = NVRAM_TIMEOUT_COUNT;
9233 if (CHIP_REV_IS_SLOW(bp))
9234 count *= 100;
9235
9236 /* wait for completion */
9237 *ret_val = 0;
9238 rc = -EBUSY;
9239 for (i = 0; i < count; i++) {
9240 udelay(5);
9241 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
9242
9243 if (val & MCPR_NVM_COMMAND_DONE) {
9244 val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
a2fbb9ea
ET
9245 /* we read nvram data in cpu order
9246 * but ethtool sees it as an array of bytes
9247 * converting to big-endian will do the work */
4781bfad 9248 *ret_val = cpu_to_be32(val);
a2fbb9ea
ET
9249 rc = 0;
9250 break;
9251 }
9252 }
9253
9254 return rc;
9255}
9256
9257static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
9258 int buf_size)
9259{
9260 int rc;
9261 u32 cmd_flags;
4781bfad 9262 __be32 val;
a2fbb9ea
ET
9263
9264 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
34f80b04 9265 DP(BNX2X_MSG_NVM,
c14423fe 9266 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
a2fbb9ea
ET
9267 offset, buf_size);
9268 return -EINVAL;
9269 }
9270
34f80b04
EG
9271 if (offset + buf_size > bp->common.flash_size) {
9272 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 9273 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 9274 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
9275 return -EINVAL;
9276 }
9277
9278 /* request access to nvram interface */
9279 rc = bnx2x_acquire_nvram_lock(bp);
9280 if (rc)
9281 return rc;
9282
9283 /* enable access to nvram interface */
9284 bnx2x_enable_nvram_access(bp);
9285
9286 /* read the first word(s) */
9287 cmd_flags = MCPR_NVM_COMMAND_FIRST;
9288 while ((buf_size > sizeof(u32)) && (rc == 0)) {
9289 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
9290 memcpy(ret_buf, &val, 4);
9291
9292 /* advance to the next dword */
9293 offset += sizeof(u32);
9294 ret_buf += sizeof(u32);
9295 buf_size -= sizeof(u32);
9296 cmd_flags = 0;
9297 }
9298
9299 if (rc == 0) {
9300 cmd_flags |= MCPR_NVM_COMMAND_LAST;
9301 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
9302 memcpy(ret_buf, &val, 4);
9303 }
9304
9305 /* disable access to nvram interface */
9306 bnx2x_disable_nvram_access(bp);
9307 bnx2x_release_nvram_lock(bp);
9308
9309 return rc;
9310}
9311
9312static int bnx2x_get_eeprom(struct net_device *dev,
9313 struct ethtool_eeprom *eeprom, u8 *eebuf)
9314{
9315 struct bnx2x *bp = netdev_priv(dev);
9316 int rc;
9317
2add3acb
EG
9318 if (!netif_running(dev))
9319 return -EAGAIN;
9320
34f80b04 9321 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
a2fbb9ea
ET
9322 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
9323 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
9324 eeprom->len, eeprom->len);
9325
9326 /* parameters already validated in ethtool_get_eeprom */
9327
9328 rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
9329
9330 return rc;
9331}
9332
9333static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
9334 u32 cmd_flags)
9335{
f1410647 9336 int count, i, rc;
a2fbb9ea
ET
9337
9338 /* build the command word */
9339 cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
9340
9341 /* need to clear DONE bit separately */
9342 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
9343
9344 /* write the data */
9345 REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
9346
9347 /* address of the NVRAM to write to */
9348 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
9349 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
9350
9351 /* issue the write command */
9352 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
9353
9354 /* adjust timeout for emulation/FPGA */
9355 count = NVRAM_TIMEOUT_COUNT;
9356 if (CHIP_REV_IS_SLOW(bp))
9357 count *= 100;
9358
9359 /* wait for completion */
9360 rc = -EBUSY;
9361 for (i = 0; i < count; i++) {
9362 udelay(5);
9363 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
9364 if (val & MCPR_NVM_COMMAND_DONE) {
9365 rc = 0;
9366 break;
9367 }
9368 }
9369
9370 return rc;
9371}
9372
f1410647 9373#define BYTE_OFFSET(offset) (8 * (offset & 0x03))
a2fbb9ea
ET
9374
9375static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
9376 int buf_size)
9377{
9378 int rc;
9379 u32 cmd_flags;
9380 u32 align_offset;
4781bfad 9381 __be32 val;
a2fbb9ea 9382
34f80b04
EG
9383 if (offset + buf_size > bp->common.flash_size) {
9384 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 9385 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 9386 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
9387 return -EINVAL;
9388 }
9389
9390 /* request access to nvram interface */
9391 rc = bnx2x_acquire_nvram_lock(bp);
9392 if (rc)
9393 return rc;
9394
9395 /* enable access to nvram interface */
9396 bnx2x_enable_nvram_access(bp);
9397
9398 cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
9399 align_offset = (offset & ~0x03);
9400 rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
9401
9402 if (rc == 0) {
9403 val &= ~(0xff << BYTE_OFFSET(offset));
9404 val |= (*data_buf << BYTE_OFFSET(offset));
9405
9406 /* nvram data is returned as an array of bytes
9407 * convert it back to cpu order */
9408 val = be32_to_cpu(val);
9409
a2fbb9ea
ET
9410 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
9411 cmd_flags);
9412 }
9413
9414 /* disable access to nvram interface */
9415 bnx2x_disable_nvram_access(bp);
9416 bnx2x_release_nvram_lock(bp);
9417
9418 return rc;
9419}
9420
9421static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
9422 int buf_size)
9423{
9424 int rc;
9425 u32 cmd_flags;
9426 u32 val;
9427 u32 written_so_far;
9428
34f80b04 9429 if (buf_size == 1) /* ethtool */
a2fbb9ea 9430 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
a2fbb9ea
ET
9431
9432 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
34f80b04 9433 DP(BNX2X_MSG_NVM,
c14423fe 9434 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
a2fbb9ea
ET
9435 offset, buf_size);
9436 return -EINVAL;
9437 }
9438
34f80b04
EG
9439 if (offset + buf_size > bp->common.flash_size) {
9440 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 9441 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 9442 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
9443 return -EINVAL;
9444 }
9445
9446 /* request access to nvram interface */
9447 rc = bnx2x_acquire_nvram_lock(bp);
9448 if (rc)
9449 return rc;
9450
9451 /* enable access to nvram interface */
9452 bnx2x_enable_nvram_access(bp);
9453
9454 written_so_far = 0;
9455 cmd_flags = MCPR_NVM_COMMAND_FIRST;
9456 while ((written_so_far < buf_size) && (rc == 0)) {
9457 if (written_so_far == (buf_size - sizeof(u32)))
9458 cmd_flags |= MCPR_NVM_COMMAND_LAST;
9459 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
9460 cmd_flags |= MCPR_NVM_COMMAND_LAST;
9461 else if ((offset % NVRAM_PAGE_SIZE) == 0)
9462 cmd_flags |= MCPR_NVM_COMMAND_FIRST;
9463
9464 memcpy(&val, data_buf, 4);
a2fbb9ea
ET
9465
9466 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
9467
9468 /* advance to the next dword */
9469 offset += sizeof(u32);
9470 data_buf += sizeof(u32);
9471 written_so_far += sizeof(u32);
9472 cmd_flags = 0;
9473 }
9474
9475 /* disable access to nvram interface */
9476 bnx2x_disable_nvram_access(bp);
9477 bnx2x_release_nvram_lock(bp);
9478
9479 return rc;
9480}
9481
9482static int bnx2x_set_eeprom(struct net_device *dev,
9483 struct ethtool_eeprom *eeprom, u8 *eebuf)
9484{
9485 struct bnx2x *bp = netdev_priv(dev);
f57a6025
EG
9486 int port = BP_PORT(bp);
9487 int rc = 0;
a2fbb9ea 9488
9f4c9583
EG
9489 if (!netif_running(dev))
9490 return -EAGAIN;
9491
34f80b04 9492 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
a2fbb9ea
ET
9493 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
9494 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
9495 eeprom->len, eeprom->len);
9496
9497 /* parameters already validated in ethtool_set_eeprom */
9498
f57a6025
EG
9499 /* PHY eeprom can be accessed only by the PMF */
9500 if ((eeprom->magic >= 0x50485900) && (eeprom->magic <= 0x504859FF) &&
9501 !bp->port.pmf)
9502 return -EINVAL;
9503
9504 if (eeprom->magic == 0x50485950) {
9505 /* 'PHYP' (0x50485950): prepare phy for FW upgrade */
9506 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04 9507
f57a6025
EG
9508 bnx2x_acquire_phy_lock(bp);
9509 rc |= bnx2x_link_reset(&bp->link_params,
9510 &bp->link_vars, 0);
9511 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
9512 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101)
9513 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
9514 MISC_REGISTERS_GPIO_HIGH, port);
9515 bnx2x_release_phy_lock(bp);
9516 bnx2x_link_report(bp);
9517
9518 } else if (eeprom->magic == 0x50485952) {
9519 /* 'PHYR' (0x50485952): re-init link after FW upgrade */
9520 if ((bp->state == BNX2X_STATE_OPEN) ||
9521 (bp->state == BNX2X_STATE_DISABLED)) {
4a37fb66 9522 bnx2x_acquire_phy_lock(bp);
f57a6025
EG
9523 rc |= bnx2x_link_reset(&bp->link_params,
9524 &bp->link_vars, 1);
9525
9526 rc |= bnx2x_phy_init(&bp->link_params,
9527 &bp->link_vars);
4a37fb66 9528 bnx2x_release_phy_lock(bp);
f57a6025
EG
9529 bnx2x_calc_fc_adv(bp);
9530 }
9531 } else if (eeprom->magic == 0x53985943) {
9532 /* 'PHYC' (0x53985943): PHY FW upgrade completed */
9533 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
9534 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) {
9535 u8 ext_phy_addr =
9536 (bp->link_params.ext_phy_config &
9537 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >>
9538 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT;
9539
9540 /* DSP Remove Download Mode */
9541 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
9542 MISC_REGISTERS_GPIO_LOW, port);
34f80b04 9543
f57a6025
EG
9544 bnx2x_acquire_phy_lock(bp);
9545
9546 bnx2x_sfx7101_sp_sw_reset(bp, port, ext_phy_addr);
9547
9548 /* wait 0.5 sec to allow it to run */
9549 msleep(500);
9550 bnx2x_ext_phy_hw_reset(bp, port);
9551 msleep(500);
9552 bnx2x_release_phy_lock(bp);
9553 }
9554 } else
c18487ee 9555 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
a2fbb9ea
ET
9556
9557 return rc;
9558}
9559
9560static int bnx2x_get_coalesce(struct net_device *dev,
9561 struct ethtool_coalesce *coal)
9562{
9563 struct bnx2x *bp = netdev_priv(dev);
9564
9565 memset(coal, 0, sizeof(struct ethtool_coalesce));
9566
9567 coal->rx_coalesce_usecs = bp->rx_ticks;
9568 coal->tx_coalesce_usecs = bp->tx_ticks;
a2fbb9ea
ET
9569
9570 return 0;
9571}
9572
ca00392c 9573#define BNX2X_MAX_COALES_TOUT (0xf0*12) /* Maximal coalescing timeout in us */
a2fbb9ea
ET
9574static int bnx2x_set_coalesce(struct net_device *dev,
9575 struct ethtool_coalesce *coal)
9576{
9577 struct bnx2x *bp = netdev_priv(dev);
9578
9579 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
ca00392c
EG
9580 if (bp->rx_ticks > BNX2X_MAX_COALES_TOUT)
9581 bp->rx_ticks = BNX2X_MAX_COALES_TOUT;
a2fbb9ea
ET
9582
9583 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
ca00392c
EG
9584 if (bp->tx_ticks > BNX2X_MAX_COALES_TOUT)
9585 bp->tx_ticks = BNX2X_MAX_COALES_TOUT;
a2fbb9ea 9586
34f80b04 9587 if (netif_running(dev))
a2fbb9ea
ET
9588 bnx2x_update_coalesce(bp);
9589
9590 return 0;
9591}
9592
9593static void bnx2x_get_ringparam(struct net_device *dev,
9594 struct ethtool_ringparam *ering)
9595{
9596 struct bnx2x *bp = netdev_priv(dev);
9597
9598 ering->rx_max_pending = MAX_RX_AVAIL;
9599 ering->rx_mini_max_pending = 0;
9600 ering->rx_jumbo_max_pending = 0;
9601
9602 ering->rx_pending = bp->rx_ring_size;
9603 ering->rx_mini_pending = 0;
9604 ering->rx_jumbo_pending = 0;
9605
9606 ering->tx_max_pending = MAX_TX_AVAIL;
9607 ering->tx_pending = bp->tx_ring_size;
9608}
9609
9610static int bnx2x_set_ringparam(struct net_device *dev,
9611 struct ethtool_ringparam *ering)
9612{
9613 struct bnx2x *bp = netdev_priv(dev);
34f80b04 9614 int rc = 0;
a2fbb9ea
ET
9615
9616 if ((ering->rx_pending > MAX_RX_AVAIL) ||
9617 (ering->tx_pending > MAX_TX_AVAIL) ||
9618 (ering->tx_pending <= MAX_SKB_FRAGS + 4))
9619 return -EINVAL;
9620
9621 bp->rx_ring_size = ering->rx_pending;
9622 bp->tx_ring_size = ering->tx_pending;
9623
34f80b04
EG
9624 if (netif_running(dev)) {
9625 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9626 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
a2fbb9ea
ET
9627 }
9628
34f80b04 9629 return rc;
a2fbb9ea
ET
9630}
9631
9632static void bnx2x_get_pauseparam(struct net_device *dev,
9633 struct ethtool_pauseparam *epause)
9634{
9635 struct bnx2x *bp = netdev_priv(dev);
9636
356e2385
EG
9637 epause->autoneg = (bp->link_params.req_flow_ctrl ==
9638 BNX2X_FLOW_CTRL_AUTO) &&
c18487ee
YR
9639 (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
9640
c0700f90
DM
9641 epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) ==
9642 BNX2X_FLOW_CTRL_RX);
9643 epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) ==
9644 BNX2X_FLOW_CTRL_TX);
a2fbb9ea
ET
9645
9646 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9647 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
9648 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9649}
9650
9651static int bnx2x_set_pauseparam(struct net_device *dev,
9652 struct ethtool_pauseparam *epause)
9653{
9654 struct bnx2x *bp = netdev_priv(dev);
9655
34f80b04
EG
9656 if (IS_E1HMF(bp))
9657 return 0;
9658
a2fbb9ea
ET
9659 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9660 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
9661 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9662
c0700f90 9663 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
a2fbb9ea 9664
f1410647 9665 if (epause->rx_pause)
c0700f90 9666 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_RX;
c18487ee 9667
f1410647 9668 if (epause->tx_pause)
c0700f90 9669 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_TX;
c18487ee 9670
c0700f90
DM
9671 if (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO)
9672 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
a2fbb9ea 9673
c18487ee 9674 if (epause->autoneg) {
34f80b04 9675 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
3196a88a 9676 DP(NETIF_MSG_LINK, "autoneg not supported\n");
c18487ee
YR
9677 return -EINVAL;
9678 }
a2fbb9ea 9679
c18487ee 9680 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
c0700f90 9681 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
c18487ee 9682 }
a2fbb9ea 9683
c18487ee
YR
9684 DP(NETIF_MSG_LINK,
9685 "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
34f80b04
EG
9686
9687 if (netif_running(dev)) {
bb2a0f7a 9688 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
9689 bnx2x_link_set(bp);
9690 }
a2fbb9ea
ET
9691
9692 return 0;
9693}
9694
df0f2343
VZ
9695static int bnx2x_set_flags(struct net_device *dev, u32 data)
9696{
9697 struct bnx2x *bp = netdev_priv(dev);
9698 int changed = 0;
9699 int rc = 0;
9700
9701 /* TPA requires Rx CSUM offloading */
9702 if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
9703 if (!(dev->features & NETIF_F_LRO)) {
9704 dev->features |= NETIF_F_LRO;
9705 bp->flags |= TPA_ENABLE_FLAG;
9706 changed = 1;
9707 }
9708
9709 } else if (dev->features & NETIF_F_LRO) {
9710 dev->features &= ~NETIF_F_LRO;
9711 bp->flags &= ~TPA_ENABLE_FLAG;
9712 changed = 1;
9713 }
9714
9715 if (changed && netif_running(dev)) {
9716 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9717 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
9718 }
9719
9720 return rc;
9721}
9722
a2fbb9ea
ET
9723static u32 bnx2x_get_rx_csum(struct net_device *dev)
9724{
9725 struct bnx2x *bp = netdev_priv(dev);
9726
9727 return bp->rx_csum;
9728}
9729
9730static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
9731{
9732 struct bnx2x *bp = netdev_priv(dev);
df0f2343 9733 int rc = 0;
a2fbb9ea
ET
9734
9735 bp->rx_csum = data;
df0f2343
VZ
9736
9737 /* Disable TPA, when Rx CSUM is disabled. Otherwise all
9738 TPA'ed packets will be discarded due to wrong TCP CSUM */
9739 if (!data) {
9740 u32 flags = ethtool_op_get_flags(dev);
9741
9742 rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
9743 }
9744
9745 return rc;
a2fbb9ea
ET
9746}
9747
9748static int bnx2x_set_tso(struct net_device *dev, u32 data)
9749{
755735eb 9750 if (data) {
a2fbb9ea 9751 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb
EG
9752 dev->features |= NETIF_F_TSO6;
9753 } else {
a2fbb9ea 9754 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb
EG
9755 dev->features &= ~NETIF_F_TSO6;
9756 }
9757
a2fbb9ea
ET
9758 return 0;
9759}
9760
f3c87cdd 9761static const struct {
a2fbb9ea
ET
9762 char string[ETH_GSTRING_LEN];
9763} bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
f3c87cdd
YG
9764 { "register_test (offline)" },
9765 { "memory_test (offline)" },
9766 { "loopback_test (offline)" },
9767 { "nvram_test (online)" },
9768 { "interrupt_test (online)" },
9769 { "link_test (online)" },
d3d4f495 9770 { "idle check (online)" }
a2fbb9ea
ET
9771};
9772
9773static int bnx2x_self_test_count(struct net_device *dev)
9774{
9775 return BNX2X_NUM_TESTS;
9776}
9777
f3c87cdd
YG
9778static int bnx2x_test_registers(struct bnx2x *bp)
9779{
9780 int idx, i, rc = -ENODEV;
9781 u32 wr_val = 0;
9dabc424 9782 int port = BP_PORT(bp);
f3c87cdd
YG
9783 static const struct {
9784 u32 offset0;
9785 u32 offset1;
9786 u32 mask;
9787 } reg_tbl[] = {
9788/* 0 */ { BRB1_REG_PAUSE_LOW_THRESHOLD_0, 4, 0x000003ff },
9789 { DORQ_REG_DB_ADDR0, 4, 0xffffffff },
9790 { HC_REG_AGG_INT_0, 4, 0x000003ff },
9791 { PBF_REG_MAC_IF0_ENABLE, 4, 0x00000001 },
9792 { PBF_REG_P0_INIT_CRD, 4, 0x000007ff },
9793 { PRS_REG_CID_PORT_0, 4, 0x00ffffff },
9794 { PXP2_REG_PSWRQ_CDU0_L2P, 4, 0x000fffff },
9795 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
9796 { PXP2_REG_PSWRQ_TM0_L2P, 4, 0x000fffff },
9797 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
9798/* 10 */ { PXP2_REG_PSWRQ_TSDM0_L2P, 4, 0x000fffff },
9799 { QM_REG_CONNNUM_0, 4, 0x000fffff },
9800 { TM_REG_LIN0_MAX_ACTIVE_CID, 4, 0x0003ffff },
9801 { SRC_REG_KEYRSS0_0, 40, 0xffffffff },
9802 { SRC_REG_KEYRSS0_7, 40, 0xffffffff },
9803 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
9804 { XCM_REG_WU_DA_CNT_CMD00, 4, 0x00000003 },
9805 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 4, 0x000000ff },
f3c87cdd 9806 { NIG_REG_LLH0_T_BIT, 4, 0x00000001 },
c1f1a06f
EG
9807 { NIG_REG_EMAC0_IN_EN, 4, 0x00000001 },
9808/* 20 */ { NIG_REG_BMAC0_IN_EN, 4, 0x00000001 },
f3c87cdd
YG
9809 { NIG_REG_XCM0_OUT_EN, 4, 0x00000001 },
9810 { NIG_REG_BRB0_OUT_EN, 4, 0x00000001 },
9811 { NIG_REG_LLH0_XCM_MASK, 4, 0x00000007 },
9812 { NIG_REG_LLH0_ACPI_PAT_6_LEN, 68, 0x000000ff },
9813 { NIG_REG_LLH0_ACPI_PAT_0_CRC, 68, 0xffffffff },
9814 { NIG_REG_LLH0_DEST_MAC_0_0, 160, 0xffffffff },
9815 { NIG_REG_LLH0_DEST_IP_0_1, 160, 0xffffffff },
9816 { NIG_REG_LLH0_IPV4_IPV6_0, 160, 0x00000001 },
c1f1a06f
EG
9817 { NIG_REG_LLH0_DEST_UDP_0, 160, 0x0000ffff },
9818/* 30 */ { NIG_REG_LLH0_DEST_TCP_0, 160, 0x0000ffff },
f3c87cdd
YG
9819 { NIG_REG_LLH0_VLAN_ID_0, 160, 0x00000fff },
9820 { NIG_REG_XGXS_SERDES0_MODE_SEL, 4, 0x00000001 },
9821 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
9822 { NIG_REG_STATUS_INTERRUPT_PORT0, 4, 0x07ffffff },
9823 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
9824 { NIG_REG_SERDES0_CTRL_PHY_ADDR, 16, 0x0000001f },
9825
9826 { 0xffffffff, 0, 0x00000000 }
9827 };
9828
9829 if (!netif_running(bp->dev))
9830 return rc;
9831
9832 /* Repeat the test twice:
9833 First by writing 0x00000000, second by writing 0xffffffff */
9834 for (idx = 0; idx < 2; idx++) {
9835
9836 switch (idx) {
9837 case 0:
9838 wr_val = 0;
9839 break;
9840 case 1:
9841 wr_val = 0xffffffff;
9842 break;
9843 }
9844
9845 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
9846 u32 offset, mask, save_val, val;
f3c87cdd
YG
9847
9848 offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
9849 mask = reg_tbl[i].mask;
9850
9851 save_val = REG_RD(bp, offset);
9852
9853 REG_WR(bp, offset, wr_val);
9854 val = REG_RD(bp, offset);
9855
9856 /* Restore the original register's value */
9857 REG_WR(bp, offset, save_val);
9858
9859 /* verify that value is as expected value */
9860 if ((val & mask) != (wr_val & mask))
9861 goto test_reg_exit;
9862 }
9863 }
9864
9865 rc = 0;
9866
9867test_reg_exit:
9868 return rc;
9869}
9870
9871static int bnx2x_test_memory(struct bnx2x *bp)
9872{
9873 int i, j, rc = -ENODEV;
9874 u32 val;
9875 static const struct {
9876 u32 offset;
9877 int size;
9878 } mem_tbl[] = {
9879 { CCM_REG_XX_DESCR_TABLE, CCM_REG_XX_DESCR_TABLE_SIZE },
9880 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
9881 { CFC_REG_LINK_LIST, CFC_REG_LINK_LIST_SIZE },
9882 { DMAE_REG_CMD_MEM, DMAE_REG_CMD_MEM_SIZE },
9883 { TCM_REG_XX_DESCR_TABLE, TCM_REG_XX_DESCR_TABLE_SIZE },
9884 { UCM_REG_XX_DESCR_TABLE, UCM_REG_XX_DESCR_TABLE_SIZE },
9885 { XCM_REG_XX_DESCR_TABLE, XCM_REG_XX_DESCR_TABLE_SIZE },
9886
9887 { 0xffffffff, 0 }
9888 };
9889 static const struct {
9890 char *name;
9891 u32 offset;
9dabc424
YG
9892 u32 e1_mask;
9893 u32 e1h_mask;
f3c87cdd 9894 } prty_tbl[] = {
9dabc424
YG
9895 { "CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, 0x3ffc0, 0 },
9896 { "CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, 0x2, 0x2 },
9897 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0, 0 },
9898 { "TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, 0x3ffc0, 0 },
9899 { "UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, 0x3ffc0, 0 },
9900 { "XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, 0x3ffc1, 0 },
9901
9902 { NULL, 0xffffffff, 0, 0 }
f3c87cdd
YG
9903 };
9904
9905 if (!netif_running(bp->dev))
9906 return rc;
9907
9908 /* Go through all the memories */
9909 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
9910 for (j = 0; j < mem_tbl[i].size; j++)
9911 REG_RD(bp, mem_tbl[i].offset + j*4);
9912
9913 /* Check the parity status */
9914 for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
9915 val = REG_RD(bp, prty_tbl[i].offset);
9dabc424
YG
9916 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
9917 (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
f3c87cdd
YG
9918 DP(NETIF_MSG_HW,
9919 "%s is 0x%x\n", prty_tbl[i].name, val);
9920 goto test_mem_exit;
9921 }
9922 }
9923
9924 rc = 0;
9925
9926test_mem_exit:
9927 return rc;
9928}
9929
f3c87cdd
YG
9930static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
9931{
9932 int cnt = 1000;
9933
9934 if (link_up)
9935 while (bnx2x_link_test(bp) && cnt--)
9936 msleep(10);
9937}
9938
9939static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
9940{
9941 unsigned int pkt_size, num_pkts, i;
9942 struct sk_buff *skb;
9943 unsigned char *packet;
ca00392c
EG
9944 struct bnx2x_fastpath *fp_rx = &bp->fp[0];
9945 struct bnx2x_fastpath *fp_tx = &bp->fp[bp->num_rx_queues];
f3c87cdd
YG
9946 u16 tx_start_idx, tx_idx;
9947 u16 rx_start_idx, rx_idx;
ca00392c 9948 u16 pkt_prod, bd_prod;
f3c87cdd 9949 struct sw_tx_bd *tx_buf;
ca00392c
EG
9950 struct eth_tx_start_bd *tx_start_bd;
9951 struct eth_tx_parse_bd *pbd = NULL;
f3c87cdd
YG
9952 dma_addr_t mapping;
9953 union eth_rx_cqe *cqe;
9954 u8 cqe_fp_flags;
9955 struct sw_rx_bd *rx_buf;
9956 u16 len;
9957 int rc = -ENODEV;
9958
b5bf9068
EG
9959 /* check the loopback mode */
9960 switch (loopback_mode) {
9961 case BNX2X_PHY_LOOPBACK:
9962 if (bp->link_params.loopback_mode != LOOPBACK_XGXS_10)
9963 return -EINVAL;
9964 break;
9965 case BNX2X_MAC_LOOPBACK:
f3c87cdd 9966 bp->link_params.loopback_mode = LOOPBACK_BMAC;
f3c87cdd 9967 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
b5bf9068
EG
9968 break;
9969 default:
f3c87cdd 9970 return -EINVAL;
b5bf9068 9971 }
f3c87cdd 9972
b5bf9068
EG
9973 /* prepare the loopback packet */
9974 pkt_size = (((bp->dev->mtu < ETH_MAX_PACKET_SIZE) ?
9975 bp->dev->mtu : ETH_MAX_PACKET_SIZE) + ETH_HLEN);
f3c87cdd
YG
9976 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
9977 if (!skb) {
9978 rc = -ENOMEM;
9979 goto test_loopback_exit;
9980 }
9981 packet = skb_put(skb, pkt_size);
9982 memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
ca00392c
EG
9983 memset(packet + ETH_ALEN, 0, ETH_ALEN);
9984 memset(packet + 2*ETH_ALEN, 0x77, (ETH_HLEN - 2*ETH_ALEN));
f3c87cdd
YG
9985 for (i = ETH_HLEN; i < pkt_size; i++)
9986 packet[i] = (unsigned char) (i & 0xff);
9987
b5bf9068 9988 /* send the loopback packet */
f3c87cdd 9989 num_pkts = 0;
ca00392c
EG
9990 tx_start_idx = le16_to_cpu(*fp_tx->tx_cons_sb);
9991 rx_start_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
f3c87cdd 9992
ca00392c
EG
9993 pkt_prod = fp_tx->tx_pkt_prod++;
9994 tx_buf = &fp_tx->tx_buf_ring[TX_BD(pkt_prod)];
9995 tx_buf->first_bd = fp_tx->tx_bd_prod;
f3c87cdd 9996 tx_buf->skb = skb;
ca00392c 9997 tx_buf->flags = 0;
f3c87cdd 9998
ca00392c
EG
9999 bd_prod = TX_BD(fp_tx->tx_bd_prod);
10000 tx_start_bd = &fp_tx->tx_desc_ring[bd_prod].start_bd;
f3c87cdd
YG
10001 mapping = pci_map_single(bp->pdev, skb->data,
10002 skb_headlen(skb), PCI_DMA_TODEVICE);
ca00392c
EG
10003 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10004 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10005 tx_start_bd->nbd = cpu_to_le16(2); /* start + pbd */
10006 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
10007 tx_start_bd->vlan = cpu_to_le16(pkt_prod);
10008 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
10009 tx_start_bd->general_data = ((UNICAST_ADDRESS <<
10010 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT) | 1);
10011
10012 /* turn on parsing and get a BD */
10013 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10014 pbd = &fp_tx->tx_desc_ring[bd_prod].parse_bd;
10015
10016 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
f3c87cdd 10017
58f4c4cf
EG
10018 wmb();
10019
ca00392c
EG
10020 fp_tx->tx_db.data.prod += 2;
10021 barrier();
10022 DOORBELL(bp, fp_tx->index - bp->num_rx_queues, fp_tx->tx_db.raw);
f3c87cdd
YG
10023
10024 mmiowb();
10025
10026 num_pkts++;
ca00392c 10027 fp_tx->tx_bd_prod += 2; /* start + pbd */
f3c87cdd
YG
10028 bp->dev->trans_start = jiffies;
10029
10030 udelay(100);
10031
ca00392c 10032 tx_idx = le16_to_cpu(*fp_tx->tx_cons_sb);
f3c87cdd
YG
10033 if (tx_idx != tx_start_idx + num_pkts)
10034 goto test_loopback_exit;
10035
ca00392c 10036 rx_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
f3c87cdd
YG
10037 if (rx_idx != rx_start_idx + num_pkts)
10038 goto test_loopback_exit;
10039
ca00392c 10040 cqe = &fp_rx->rx_comp_ring[RCQ_BD(fp_rx->rx_comp_cons)];
f3c87cdd
YG
10041 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
10042 if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
10043 goto test_loopback_rx_exit;
10044
10045 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
10046 if (len != pkt_size)
10047 goto test_loopback_rx_exit;
10048
ca00392c 10049 rx_buf = &fp_rx->rx_buf_ring[RX_BD(fp_rx->rx_bd_cons)];
f3c87cdd
YG
10050 skb = rx_buf->skb;
10051 skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
10052 for (i = ETH_HLEN; i < pkt_size; i++)
10053 if (*(skb->data + i) != (unsigned char) (i & 0xff))
10054 goto test_loopback_rx_exit;
10055
10056 rc = 0;
10057
10058test_loopback_rx_exit:
f3c87cdd 10059
ca00392c
EG
10060 fp_rx->rx_bd_cons = NEXT_RX_IDX(fp_rx->rx_bd_cons);
10061 fp_rx->rx_bd_prod = NEXT_RX_IDX(fp_rx->rx_bd_prod);
10062 fp_rx->rx_comp_cons = NEXT_RCQ_IDX(fp_rx->rx_comp_cons);
10063 fp_rx->rx_comp_prod = NEXT_RCQ_IDX(fp_rx->rx_comp_prod);
f3c87cdd
YG
10064
10065 /* Update producers */
ca00392c
EG
10066 bnx2x_update_rx_prod(bp, fp_rx, fp_rx->rx_bd_prod, fp_rx->rx_comp_prod,
10067 fp_rx->rx_sge_prod);
f3c87cdd
YG
10068
10069test_loopback_exit:
10070 bp->link_params.loopback_mode = LOOPBACK_NONE;
10071
10072 return rc;
10073}
10074
10075static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
10076{
b5bf9068 10077 int rc = 0, res;
f3c87cdd
YG
10078
10079 if (!netif_running(bp->dev))
10080 return BNX2X_LOOPBACK_FAILED;
10081
f8ef6e44 10082 bnx2x_netif_stop(bp, 1);
3910c8ae 10083 bnx2x_acquire_phy_lock(bp);
f3c87cdd 10084
b5bf9068
EG
10085 res = bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up);
10086 if (res) {
10087 DP(NETIF_MSG_PROBE, " PHY loopback failed (res %d)\n", res);
10088 rc |= BNX2X_PHY_LOOPBACK_FAILED;
f3c87cdd
YG
10089 }
10090
b5bf9068
EG
10091 res = bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up);
10092 if (res) {
10093 DP(NETIF_MSG_PROBE, " MAC loopback failed (res %d)\n", res);
10094 rc |= BNX2X_MAC_LOOPBACK_FAILED;
f3c87cdd
YG
10095 }
10096
3910c8ae 10097 bnx2x_release_phy_lock(bp);
f3c87cdd
YG
10098 bnx2x_netif_start(bp);
10099
10100 return rc;
10101}
10102
10103#define CRC32_RESIDUAL 0xdebb20e3
10104
10105static int bnx2x_test_nvram(struct bnx2x *bp)
10106{
10107 static const struct {
10108 int offset;
10109 int size;
10110 } nvram_tbl[] = {
10111 { 0, 0x14 }, /* bootstrap */
10112 { 0x14, 0xec }, /* dir */
10113 { 0x100, 0x350 }, /* manuf_info */
10114 { 0x450, 0xf0 }, /* feature_info */
10115 { 0x640, 0x64 }, /* upgrade_key_info */
10116 { 0x6a4, 0x64 },
10117 { 0x708, 0x70 }, /* manuf_key_info */
10118 { 0x778, 0x70 },
10119 { 0, 0 }
10120 };
4781bfad 10121 __be32 buf[0x350 / 4];
f3c87cdd
YG
10122 u8 *data = (u8 *)buf;
10123 int i, rc;
10124 u32 magic, csum;
10125
10126 rc = bnx2x_nvram_read(bp, 0, data, 4);
10127 if (rc) {
f5372251 10128 DP(NETIF_MSG_PROBE, "magic value read (rc %d)\n", rc);
f3c87cdd
YG
10129 goto test_nvram_exit;
10130 }
10131
10132 magic = be32_to_cpu(buf[0]);
10133 if (magic != 0x669955aa) {
10134 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
10135 rc = -ENODEV;
10136 goto test_nvram_exit;
10137 }
10138
10139 for (i = 0; nvram_tbl[i].size; i++) {
10140
10141 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
10142 nvram_tbl[i].size);
10143 if (rc) {
10144 DP(NETIF_MSG_PROBE,
f5372251 10145 "nvram_tbl[%d] read data (rc %d)\n", i, rc);
f3c87cdd
YG
10146 goto test_nvram_exit;
10147 }
10148
10149 csum = ether_crc_le(nvram_tbl[i].size, data);
10150 if (csum != CRC32_RESIDUAL) {
10151 DP(NETIF_MSG_PROBE,
10152 "nvram_tbl[%d] csum value (0x%08x)\n", i, csum);
10153 rc = -ENODEV;
10154 goto test_nvram_exit;
10155 }
10156 }
10157
10158test_nvram_exit:
10159 return rc;
10160}
10161
10162static int bnx2x_test_intr(struct bnx2x *bp)
10163{
10164 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
10165 int i, rc;
10166
10167 if (!netif_running(bp->dev))
10168 return -ENODEV;
10169
8d9c5f34 10170 config->hdr.length = 0;
af246401
EG
10171 if (CHIP_IS_E1(bp))
10172 config->hdr.offset = (BP_PORT(bp) ? 32 : 0);
10173 else
10174 config->hdr.offset = BP_FUNC(bp);
0626b899 10175 config->hdr.client_id = bp->fp->cl_id;
f3c87cdd
YG
10176 config->hdr.reserved1 = 0;
10177
10178 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
10179 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
10180 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
10181 if (rc == 0) {
10182 bp->set_mac_pending++;
10183 for (i = 0; i < 10; i++) {
10184 if (!bp->set_mac_pending)
10185 break;
10186 msleep_interruptible(10);
10187 }
10188 if (i == 10)
10189 rc = -ENODEV;
10190 }
10191
10192 return rc;
10193}
10194
a2fbb9ea
ET
10195static void bnx2x_self_test(struct net_device *dev,
10196 struct ethtool_test *etest, u64 *buf)
10197{
10198 struct bnx2x *bp = netdev_priv(dev);
a2fbb9ea
ET
10199
10200 memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
10201
f3c87cdd 10202 if (!netif_running(dev))
a2fbb9ea 10203 return;
a2fbb9ea 10204
33471629 10205 /* offline tests are not supported in MF mode */
f3c87cdd
YG
10206 if (IS_E1HMF(bp))
10207 etest->flags &= ~ETH_TEST_FL_OFFLINE;
10208
10209 if (etest->flags & ETH_TEST_FL_OFFLINE) {
279abdf5
EG
10210 int port = BP_PORT(bp);
10211 u32 val;
f3c87cdd
YG
10212 u8 link_up;
10213
279abdf5
EG
10214 /* save current value of input enable for TX port IF */
10215 val = REG_RD(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4);
10216 /* disable input for TX port IF */
10217 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, 0);
10218
f3c87cdd
YG
10219 link_up = bp->link_vars.link_up;
10220 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10221 bnx2x_nic_load(bp, LOAD_DIAG);
10222 /* wait until link state is restored */
10223 bnx2x_wait_for_link(bp, link_up);
10224
10225 if (bnx2x_test_registers(bp) != 0) {
10226 buf[0] = 1;
10227 etest->flags |= ETH_TEST_FL_FAILED;
10228 }
10229 if (bnx2x_test_memory(bp) != 0) {
10230 buf[1] = 1;
10231 etest->flags |= ETH_TEST_FL_FAILED;
10232 }
10233 buf[2] = bnx2x_test_loopback(bp, link_up);
10234 if (buf[2] != 0)
10235 etest->flags |= ETH_TEST_FL_FAILED;
a2fbb9ea 10236
f3c87cdd 10237 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
279abdf5
EG
10238
10239 /* restore input for TX port IF */
10240 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, val);
10241
f3c87cdd
YG
10242 bnx2x_nic_load(bp, LOAD_NORMAL);
10243 /* wait until link state is restored */
10244 bnx2x_wait_for_link(bp, link_up);
10245 }
10246 if (bnx2x_test_nvram(bp) != 0) {
10247 buf[3] = 1;
a2fbb9ea
ET
10248 etest->flags |= ETH_TEST_FL_FAILED;
10249 }
f3c87cdd
YG
10250 if (bnx2x_test_intr(bp) != 0) {
10251 buf[4] = 1;
10252 etest->flags |= ETH_TEST_FL_FAILED;
10253 }
10254 if (bp->port.pmf)
10255 if (bnx2x_link_test(bp) != 0) {
10256 buf[5] = 1;
10257 etest->flags |= ETH_TEST_FL_FAILED;
10258 }
f3c87cdd
YG
10259
10260#ifdef BNX2X_EXTRA_DEBUG
10261 bnx2x_panic_dump(bp);
10262#endif
a2fbb9ea
ET
10263}
10264
de832a55
EG
10265static const struct {
10266 long offset;
10267 int size;
10268 u8 string[ETH_GSTRING_LEN];
10269} bnx2x_q_stats_arr[BNX2X_NUM_Q_STATS] = {
10270/* 1 */ { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%d]: rx_bytes" },
10271 { Q_STATS_OFFSET32(error_bytes_received_hi),
10272 8, "[%d]: rx_error_bytes" },
10273 { Q_STATS_OFFSET32(total_unicast_packets_received_hi),
10274 8, "[%d]: rx_ucast_packets" },
10275 { Q_STATS_OFFSET32(total_multicast_packets_received_hi),
10276 8, "[%d]: rx_mcast_packets" },
10277 { Q_STATS_OFFSET32(total_broadcast_packets_received_hi),
10278 8, "[%d]: rx_bcast_packets" },
10279 { Q_STATS_OFFSET32(no_buff_discard_hi), 8, "[%d]: rx_discards" },
10280 { Q_STATS_OFFSET32(rx_err_discard_pkt),
10281 4, "[%d]: rx_phy_ip_err_discards"},
10282 { Q_STATS_OFFSET32(rx_skb_alloc_failed),
10283 4, "[%d]: rx_skb_alloc_discard" },
10284 { Q_STATS_OFFSET32(hw_csum_err), 4, "[%d]: rx_csum_offload_errors" },
10285
10286/* 10 */{ Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%d]: tx_bytes" },
10287 { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
10288 8, "[%d]: tx_packets" }
10289};
10290
bb2a0f7a
YG
10291static const struct {
10292 long offset;
10293 int size;
10294 u32 flags;
66e855f3
YG
10295#define STATS_FLAGS_PORT 1
10296#define STATS_FLAGS_FUNC 2
de832a55 10297#define STATS_FLAGS_BOTH (STATS_FLAGS_FUNC | STATS_FLAGS_PORT)
66e855f3 10298 u8 string[ETH_GSTRING_LEN];
bb2a0f7a 10299} bnx2x_stats_arr[BNX2X_NUM_STATS] = {
de832a55
EG
10300/* 1 */ { STATS_OFFSET32(total_bytes_received_hi),
10301 8, STATS_FLAGS_BOTH, "rx_bytes" },
66e855f3 10302 { STATS_OFFSET32(error_bytes_received_hi),
de832a55 10303 8, STATS_FLAGS_BOTH, "rx_error_bytes" },
bb2a0f7a 10304 { STATS_OFFSET32(total_unicast_packets_received_hi),
de832a55 10305 8, STATS_FLAGS_BOTH, "rx_ucast_packets" },
bb2a0f7a 10306 { STATS_OFFSET32(total_multicast_packets_received_hi),
de832a55 10307 8, STATS_FLAGS_BOTH, "rx_mcast_packets" },
bb2a0f7a 10308 { STATS_OFFSET32(total_broadcast_packets_received_hi),
de832a55 10309 8, STATS_FLAGS_BOTH, "rx_bcast_packets" },
bb2a0f7a 10310 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
66e855f3 10311 8, STATS_FLAGS_PORT, "rx_crc_errors" },
bb2a0f7a 10312 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
66e855f3 10313 8, STATS_FLAGS_PORT, "rx_align_errors" },
de832a55
EG
10314 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
10315 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
10316 { STATS_OFFSET32(etherstatsoverrsizepkts_hi),
10317 8, STATS_FLAGS_PORT, "rx_oversize_packets" },
10318/* 10 */{ STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
10319 8, STATS_FLAGS_PORT, "rx_fragments" },
10320 { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
10321 8, STATS_FLAGS_PORT, "rx_jabbers" },
10322 { STATS_OFFSET32(no_buff_discard_hi),
10323 8, STATS_FLAGS_BOTH, "rx_discards" },
10324 { STATS_OFFSET32(mac_filter_discard),
10325 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
10326 { STATS_OFFSET32(xxoverflow_discard),
10327 4, STATS_FLAGS_PORT, "rx_fw_discards" },
10328 { STATS_OFFSET32(brb_drop_hi),
10329 8, STATS_FLAGS_PORT, "rx_brb_discard" },
10330 { STATS_OFFSET32(brb_truncate_hi),
10331 8, STATS_FLAGS_PORT, "rx_brb_truncate" },
10332 { STATS_OFFSET32(pause_frames_received_hi),
10333 8, STATS_FLAGS_PORT, "rx_pause_frames" },
10334 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
10335 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
10336 { STATS_OFFSET32(nig_timer_max),
10337 4, STATS_FLAGS_PORT, "rx_constant_pause_events" },
10338/* 20 */{ STATS_OFFSET32(rx_err_discard_pkt),
10339 4, STATS_FLAGS_BOTH, "rx_phy_ip_err_discards"},
10340 { STATS_OFFSET32(rx_skb_alloc_failed),
10341 4, STATS_FLAGS_BOTH, "rx_skb_alloc_discard" },
10342 { STATS_OFFSET32(hw_csum_err),
10343 4, STATS_FLAGS_BOTH, "rx_csum_offload_errors" },
10344
10345 { STATS_OFFSET32(total_bytes_transmitted_hi),
10346 8, STATS_FLAGS_BOTH, "tx_bytes" },
10347 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
10348 8, STATS_FLAGS_PORT, "tx_error_bytes" },
10349 { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
10350 8, STATS_FLAGS_BOTH, "tx_packets" },
10351 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
10352 8, STATS_FLAGS_PORT, "tx_mac_errors" },
10353 { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
10354 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
bb2a0f7a 10355 { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
66e855f3 10356 8, STATS_FLAGS_PORT, "tx_single_collisions" },
bb2a0f7a 10357 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
66e855f3 10358 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
de832a55 10359/* 30 */{ STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
66e855f3 10360 8, STATS_FLAGS_PORT, "tx_deferred" },
bb2a0f7a 10361 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
66e855f3 10362 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
bb2a0f7a 10363 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
66e855f3 10364 8, STATS_FLAGS_PORT, "tx_late_collisions" },
bb2a0f7a 10365 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
66e855f3 10366 8, STATS_FLAGS_PORT, "tx_total_collisions" },
bb2a0f7a 10367 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
66e855f3 10368 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
bb2a0f7a 10369 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
66e855f3 10370 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
bb2a0f7a 10371 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
66e855f3 10372 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
bb2a0f7a 10373 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
66e855f3 10374 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
bb2a0f7a 10375 { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
66e855f3 10376 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
bb2a0f7a 10377 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
66e855f3 10378 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
de832a55 10379/* 40 */{ STATS_OFFSET32(etherstatspktsover1522octets_hi),
66e855f3 10380 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
de832a55
EG
10381 { STATS_OFFSET32(pause_frames_sent_hi),
10382 8, STATS_FLAGS_PORT, "tx_pause_frames" }
a2fbb9ea
ET
10383};
10384
de832a55
EG
10385#define IS_PORT_STAT(i) \
10386 ((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT)
10387#define IS_FUNC_STAT(i) (bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC)
10388#define IS_E1HMF_MODE_STAT(bp) \
10389 (IS_E1HMF(bp) && !(bp->msglevel & BNX2X_MSG_STATS))
66e855f3 10390
a2fbb9ea
ET
10391static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
10392{
bb2a0f7a 10393 struct bnx2x *bp = netdev_priv(dev);
de832a55 10394 int i, j, k;
bb2a0f7a 10395
a2fbb9ea
ET
10396 switch (stringset) {
10397 case ETH_SS_STATS:
de832a55
EG
10398 if (is_multi(bp)) {
10399 k = 0;
ca00392c 10400 for_each_rx_queue(bp, i) {
de832a55
EG
10401 for (j = 0; j < BNX2X_NUM_Q_STATS; j++)
10402 sprintf(buf + (k + j)*ETH_GSTRING_LEN,
10403 bnx2x_q_stats_arr[j].string, i);
10404 k += BNX2X_NUM_Q_STATS;
10405 }
10406 if (IS_E1HMF_MODE_STAT(bp))
10407 break;
10408 for (j = 0; j < BNX2X_NUM_STATS; j++)
10409 strcpy(buf + (k + j)*ETH_GSTRING_LEN,
10410 bnx2x_stats_arr[j].string);
10411 } else {
10412 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
10413 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
10414 continue;
10415 strcpy(buf + j*ETH_GSTRING_LEN,
10416 bnx2x_stats_arr[i].string);
10417 j++;
10418 }
bb2a0f7a 10419 }
a2fbb9ea
ET
10420 break;
10421
10422 case ETH_SS_TEST:
10423 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
10424 break;
10425 }
10426}
10427
10428static int bnx2x_get_stats_count(struct net_device *dev)
10429{
bb2a0f7a 10430 struct bnx2x *bp = netdev_priv(dev);
de832a55 10431 int i, num_stats;
bb2a0f7a 10432
de832a55 10433 if (is_multi(bp)) {
ca00392c 10434 num_stats = BNX2X_NUM_Q_STATS * bp->num_rx_queues;
de832a55
EG
10435 if (!IS_E1HMF_MODE_STAT(bp))
10436 num_stats += BNX2X_NUM_STATS;
10437 } else {
10438 if (IS_E1HMF_MODE_STAT(bp)) {
10439 num_stats = 0;
10440 for (i = 0; i < BNX2X_NUM_STATS; i++)
10441 if (IS_FUNC_STAT(i))
10442 num_stats++;
10443 } else
10444 num_stats = BNX2X_NUM_STATS;
bb2a0f7a 10445 }
de832a55 10446
bb2a0f7a 10447 return num_stats;
a2fbb9ea
ET
10448}
10449
10450static void bnx2x_get_ethtool_stats(struct net_device *dev,
10451 struct ethtool_stats *stats, u64 *buf)
10452{
10453 struct bnx2x *bp = netdev_priv(dev);
de832a55
EG
10454 u32 *hw_stats, *offset;
10455 int i, j, k;
bb2a0f7a 10456
de832a55
EG
10457 if (is_multi(bp)) {
10458 k = 0;
ca00392c 10459 for_each_rx_queue(bp, i) {
de832a55
EG
10460 hw_stats = (u32 *)&bp->fp[i].eth_q_stats;
10461 for (j = 0; j < BNX2X_NUM_Q_STATS; j++) {
10462 if (bnx2x_q_stats_arr[j].size == 0) {
10463 /* skip this counter */
10464 buf[k + j] = 0;
10465 continue;
10466 }
10467 offset = (hw_stats +
10468 bnx2x_q_stats_arr[j].offset);
10469 if (bnx2x_q_stats_arr[j].size == 4) {
10470 /* 4-byte counter */
10471 buf[k + j] = (u64) *offset;
10472 continue;
10473 }
10474 /* 8-byte counter */
10475 buf[k + j] = HILO_U64(*offset, *(offset + 1));
10476 }
10477 k += BNX2X_NUM_Q_STATS;
10478 }
10479 if (IS_E1HMF_MODE_STAT(bp))
10480 return;
10481 hw_stats = (u32 *)&bp->eth_stats;
10482 for (j = 0; j < BNX2X_NUM_STATS; j++) {
10483 if (bnx2x_stats_arr[j].size == 0) {
10484 /* skip this counter */
10485 buf[k + j] = 0;
10486 continue;
10487 }
10488 offset = (hw_stats + bnx2x_stats_arr[j].offset);
10489 if (bnx2x_stats_arr[j].size == 4) {
10490 /* 4-byte counter */
10491 buf[k + j] = (u64) *offset;
10492 continue;
10493 }
10494 /* 8-byte counter */
10495 buf[k + j] = HILO_U64(*offset, *(offset + 1));
a2fbb9ea 10496 }
de832a55
EG
10497 } else {
10498 hw_stats = (u32 *)&bp->eth_stats;
10499 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
10500 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
10501 continue;
10502 if (bnx2x_stats_arr[i].size == 0) {
10503 /* skip this counter */
10504 buf[j] = 0;
10505 j++;
10506 continue;
10507 }
10508 offset = (hw_stats + bnx2x_stats_arr[i].offset);
10509 if (bnx2x_stats_arr[i].size == 4) {
10510 /* 4-byte counter */
10511 buf[j] = (u64) *offset;
10512 j++;
10513 continue;
10514 }
10515 /* 8-byte counter */
10516 buf[j] = HILO_U64(*offset, *(offset + 1));
bb2a0f7a 10517 j++;
a2fbb9ea 10518 }
a2fbb9ea
ET
10519 }
10520}
10521
10522static int bnx2x_phys_id(struct net_device *dev, u32 data)
10523{
10524 struct bnx2x *bp = netdev_priv(dev);
34f80b04 10525 int port = BP_PORT(bp);
a2fbb9ea
ET
10526 int i;
10527
34f80b04
EG
10528 if (!netif_running(dev))
10529 return 0;
10530
10531 if (!bp->port.pmf)
10532 return 0;
10533
a2fbb9ea
ET
10534 if (data == 0)
10535 data = 2;
10536
10537 for (i = 0; i < (data * 2); i++) {
c18487ee 10538 if ((i % 2) == 0)
34f80b04 10539 bnx2x_set_led(bp, port, LED_MODE_OPER, SPEED_1000,
c18487ee
YR
10540 bp->link_params.hw_led_mode,
10541 bp->link_params.chip_id);
10542 else
34f80b04 10543 bnx2x_set_led(bp, port, LED_MODE_OFF, 0,
c18487ee
YR
10544 bp->link_params.hw_led_mode,
10545 bp->link_params.chip_id);
10546
a2fbb9ea
ET
10547 msleep_interruptible(500);
10548 if (signal_pending(current))
10549 break;
10550 }
10551
c18487ee 10552 if (bp->link_vars.link_up)
34f80b04 10553 bnx2x_set_led(bp, port, LED_MODE_OPER,
c18487ee
YR
10554 bp->link_vars.line_speed,
10555 bp->link_params.hw_led_mode,
10556 bp->link_params.chip_id);
a2fbb9ea
ET
10557
10558 return 0;
10559}
10560
10561static struct ethtool_ops bnx2x_ethtool_ops = {
7a9b2557
VZ
10562 .get_settings = bnx2x_get_settings,
10563 .set_settings = bnx2x_set_settings,
10564 .get_drvinfo = bnx2x_get_drvinfo,
0a64ea57
EG
10565 .get_regs_len = bnx2x_get_regs_len,
10566 .get_regs = bnx2x_get_regs,
a2fbb9ea
ET
10567 .get_wol = bnx2x_get_wol,
10568 .set_wol = bnx2x_set_wol,
7a9b2557
VZ
10569 .get_msglevel = bnx2x_get_msglevel,
10570 .set_msglevel = bnx2x_set_msglevel,
10571 .nway_reset = bnx2x_nway_reset,
01e53298 10572 .get_link = bnx2x_get_link,
7a9b2557
VZ
10573 .get_eeprom_len = bnx2x_get_eeprom_len,
10574 .get_eeprom = bnx2x_get_eeprom,
10575 .set_eeprom = bnx2x_set_eeprom,
10576 .get_coalesce = bnx2x_get_coalesce,
10577 .set_coalesce = bnx2x_set_coalesce,
10578 .get_ringparam = bnx2x_get_ringparam,
10579 .set_ringparam = bnx2x_set_ringparam,
10580 .get_pauseparam = bnx2x_get_pauseparam,
10581 .set_pauseparam = bnx2x_set_pauseparam,
10582 .get_rx_csum = bnx2x_get_rx_csum,
10583 .set_rx_csum = bnx2x_set_rx_csum,
10584 .get_tx_csum = ethtool_op_get_tx_csum,
755735eb 10585 .set_tx_csum = ethtool_op_set_tx_hw_csum,
7a9b2557
VZ
10586 .set_flags = bnx2x_set_flags,
10587 .get_flags = ethtool_op_get_flags,
10588 .get_sg = ethtool_op_get_sg,
10589 .set_sg = ethtool_op_set_sg,
a2fbb9ea
ET
10590 .get_tso = ethtool_op_get_tso,
10591 .set_tso = bnx2x_set_tso,
10592 .self_test_count = bnx2x_self_test_count,
7a9b2557
VZ
10593 .self_test = bnx2x_self_test,
10594 .get_strings = bnx2x_get_strings,
a2fbb9ea
ET
10595 .phys_id = bnx2x_phys_id,
10596 .get_stats_count = bnx2x_get_stats_count,
bb2a0f7a 10597 .get_ethtool_stats = bnx2x_get_ethtool_stats,
a2fbb9ea
ET
10598};
10599
10600/* end of ethtool_ops */
10601
10602/****************************************************************************
10603* General service functions
10604****************************************************************************/
10605
10606static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
10607{
10608 u16 pmcsr;
10609
10610 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
10611
10612 switch (state) {
10613 case PCI_D0:
34f80b04 10614 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
a2fbb9ea
ET
10615 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
10616 PCI_PM_CTRL_PME_STATUS));
10617
10618 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
33471629 10619 /* delay required during transition out of D3hot */
a2fbb9ea 10620 msleep(20);
34f80b04 10621 break;
a2fbb9ea 10622
34f80b04
EG
10623 case PCI_D3hot:
10624 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
10625 pmcsr |= 3;
a2fbb9ea 10626
34f80b04
EG
10627 if (bp->wol)
10628 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
a2fbb9ea 10629
34f80b04
EG
10630 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
10631 pmcsr);
a2fbb9ea 10632
34f80b04
EG
10633 /* No more memory access after this point until
10634 * device is brought back to D0.
10635 */
10636 break;
10637
10638 default:
10639 return -EINVAL;
10640 }
10641 return 0;
a2fbb9ea
ET
10642}
10643
237907c1
EG
10644static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
10645{
10646 u16 rx_cons_sb;
10647
10648 /* Tell compiler that status block fields can change */
10649 barrier();
10650 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
10651 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
10652 rx_cons_sb++;
10653 return (fp->rx_comp_cons != rx_cons_sb);
10654}
10655
34f80b04
EG
10656/*
10657 * net_device service functions
10658 */
10659
a2fbb9ea
ET
10660static int bnx2x_poll(struct napi_struct *napi, int budget)
10661{
10662 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
10663 napi);
10664 struct bnx2x *bp = fp->bp;
10665 int work_done = 0;
10666
10667#ifdef BNX2X_STOP_ON_ERROR
10668 if (unlikely(bp->panic))
34f80b04 10669 goto poll_panic;
a2fbb9ea
ET
10670#endif
10671
a2fbb9ea
ET
10672 prefetch(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb);
10673 prefetch((char *)(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb) + 256);
10674
10675 bnx2x_update_fpsb_idx(fp);
10676
8534f32c 10677 if (bnx2x_has_rx_work(fp)) {
a2fbb9ea 10678 work_done = bnx2x_rx_int(fp, budget);
356e2385 10679
8534f32c
EG
10680 /* must not complete if we consumed full budget */
10681 if (work_done >= budget)
10682 goto poll_again;
10683 }
a2fbb9ea 10684
ca00392c 10685 /* bnx2x_has_rx_work() reads the status block, thus we need to
8534f32c 10686 * ensure that status block indices have been actually read
ca00392c 10687 * (bnx2x_update_fpsb_idx) prior to this check (bnx2x_has_rx_work)
8534f32c 10688 * so that we won't write the "newer" value of the status block to IGU
ca00392c 10689 * (if there was a DMA right after bnx2x_has_rx_work and
8534f32c
EG
10690 * if there is no rmb, the memory reading (bnx2x_update_fpsb_idx)
10691 * may be postponed to right before bnx2x_ack_sb). In this case
10692 * there will never be another interrupt until there is another update
10693 * of the status block, while there is still unhandled work.
10694 */
10695 rmb();
a2fbb9ea 10696
ca00392c 10697 if (!bnx2x_has_rx_work(fp)) {
a2fbb9ea 10698#ifdef BNX2X_STOP_ON_ERROR
34f80b04 10699poll_panic:
a2fbb9ea 10700#endif
288379f0 10701 napi_complete(napi);
a2fbb9ea 10702
0626b899 10703 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
a2fbb9ea 10704 le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
0626b899 10705 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
a2fbb9ea
ET
10706 le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
10707 }
356e2385 10708
8534f32c 10709poll_again:
a2fbb9ea
ET
10710 return work_done;
10711}
10712
755735eb
EG
10713
10714/* we split the first BD into headers and data BDs
33471629 10715 * to ease the pain of our fellow microcode engineers
755735eb
EG
10716 * we use one mapping for both BDs
10717 * So far this has only been observed to happen
10718 * in Other Operating Systems(TM)
10719 */
10720static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
10721 struct bnx2x_fastpath *fp,
ca00392c
EG
10722 struct sw_tx_bd *tx_buf,
10723 struct eth_tx_start_bd **tx_bd, u16 hlen,
755735eb
EG
10724 u16 bd_prod, int nbd)
10725{
ca00392c 10726 struct eth_tx_start_bd *h_tx_bd = *tx_bd;
755735eb
EG
10727 struct eth_tx_bd *d_tx_bd;
10728 dma_addr_t mapping;
10729 int old_len = le16_to_cpu(h_tx_bd->nbytes);
10730
10731 /* first fix first BD */
10732 h_tx_bd->nbd = cpu_to_le16(nbd);
10733 h_tx_bd->nbytes = cpu_to_le16(hlen);
10734
10735 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
10736 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
10737 h_tx_bd->addr_lo, h_tx_bd->nbd);
10738
10739 /* now get a new data BD
10740 * (after the pbd) and fill it */
10741 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
ca00392c 10742 d_tx_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
755735eb
EG
10743
10744 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
10745 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
10746
10747 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10748 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10749 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
ca00392c
EG
10750
10751 /* this marks the BD as one that has no individual mapping */
10752 tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
10753
755735eb
EG
10754 DP(NETIF_MSG_TX_QUEUED,
10755 "TSO split data size is %d (%x:%x)\n",
10756 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
10757
ca00392c
EG
10758 /* update tx_bd */
10759 *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
755735eb
EG
10760
10761 return bd_prod;
10762}
10763
10764static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
10765{
10766 if (fix > 0)
10767 csum = (u16) ~csum_fold(csum_sub(csum,
10768 csum_partial(t_header - fix, fix, 0)));
10769
10770 else if (fix < 0)
10771 csum = (u16) ~csum_fold(csum_add(csum,
10772 csum_partial(t_header, -fix, 0)));
10773
10774 return swab16(csum);
10775}
10776
10777static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
10778{
10779 u32 rc;
10780
10781 if (skb->ip_summed != CHECKSUM_PARTIAL)
10782 rc = XMIT_PLAIN;
10783
10784 else {
4781bfad 10785 if (skb->protocol == htons(ETH_P_IPV6)) {
755735eb
EG
10786 rc = XMIT_CSUM_V6;
10787 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
10788 rc |= XMIT_CSUM_TCP;
10789
10790 } else {
10791 rc = XMIT_CSUM_V4;
10792 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
10793 rc |= XMIT_CSUM_TCP;
10794 }
10795 }
10796
10797 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
10798 rc |= XMIT_GSO_V4;
10799
10800 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
10801 rc |= XMIT_GSO_V6;
10802
10803 return rc;
10804}
10805
632da4d6 10806#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
f5372251
EG
10807/* check if packet requires linearization (packet is too fragmented)
10808 no need to check fragmentation if page size > 8K (there will be no
10809 violation to FW restrictions) */
755735eb
EG
10810static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
10811 u32 xmit_type)
10812{
10813 int to_copy = 0;
10814 int hlen = 0;
10815 int first_bd_sz = 0;
10816
10817 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
10818 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
10819
10820 if (xmit_type & XMIT_GSO) {
10821 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
10822 /* Check if LSO packet needs to be copied:
10823 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
10824 int wnd_size = MAX_FETCH_BD - 3;
33471629 10825 /* Number of windows to check */
755735eb
EG
10826 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
10827 int wnd_idx = 0;
10828 int frag_idx = 0;
10829 u32 wnd_sum = 0;
10830
10831 /* Headers length */
10832 hlen = (int)(skb_transport_header(skb) - skb->data) +
10833 tcp_hdrlen(skb);
10834
10835 /* Amount of data (w/o headers) on linear part of SKB*/
10836 first_bd_sz = skb_headlen(skb) - hlen;
10837
10838 wnd_sum = first_bd_sz;
10839
10840 /* Calculate the first sum - it's special */
10841 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
10842 wnd_sum +=
10843 skb_shinfo(skb)->frags[frag_idx].size;
10844
10845 /* If there was data on linear skb data - check it */
10846 if (first_bd_sz > 0) {
10847 if (unlikely(wnd_sum < lso_mss)) {
10848 to_copy = 1;
10849 goto exit_lbl;
10850 }
10851
10852 wnd_sum -= first_bd_sz;
10853 }
10854
10855 /* Others are easier: run through the frag list and
10856 check all windows */
10857 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
10858 wnd_sum +=
10859 skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
10860
10861 if (unlikely(wnd_sum < lso_mss)) {
10862 to_copy = 1;
10863 break;
10864 }
10865 wnd_sum -=
10866 skb_shinfo(skb)->frags[wnd_idx].size;
10867 }
755735eb
EG
10868 } else {
10869 /* in non-LSO too fragmented packet should always
10870 be linearized */
10871 to_copy = 1;
10872 }
10873 }
10874
10875exit_lbl:
10876 if (unlikely(to_copy))
10877 DP(NETIF_MSG_TX_QUEUED,
10878 "Linearization IS REQUIRED for %s packet. "
10879 "num_frags %d hlen %d first_bd_sz %d\n",
10880 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
10881 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
10882
10883 return to_copy;
10884}
632da4d6 10885#endif
755735eb
EG
10886
10887/* called with netif_tx_lock
a2fbb9ea 10888 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
755735eb 10889 * netif_wake_queue()
a2fbb9ea
ET
10890 */
10891static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
10892{
10893 struct bnx2x *bp = netdev_priv(dev);
ca00392c 10894 struct bnx2x_fastpath *fp, *fp_stat;
555f6c78 10895 struct netdev_queue *txq;
a2fbb9ea 10896 struct sw_tx_bd *tx_buf;
ca00392c
EG
10897 struct eth_tx_start_bd *tx_start_bd;
10898 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
a2fbb9ea
ET
10899 struct eth_tx_parse_bd *pbd = NULL;
10900 u16 pkt_prod, bd_prod;
755735eb 10901 int nbd, fp_index;
a2fbb9ea 10902 dma_addr_t mapping;
755735eb 10903 u32 xmit_type = bnx2x_xmit_type(bp, skb);
755735eb
EG
10904 int i;
10905 u8 hlen = 0;
ca00392c 10906 __le16 pkt_size = 0;
a2fbb9ea
ET
10907
10908#ifdef BNX2X_STOP_ON_ERROR
10909 if (unlikely(bp->panic))
10910 return NETDEV_TX_BUSY;
10911#endif
10912
555f6c78
EG
10913 fp_index = skb_get_queue_mapping(skb);
10914 txq = netdev_get_tx_queue(dev, fp_index);
10915
ca00392c
EG
10916 fp = &bp->fp[fp_index + bp->num_rx_queues];
10917 fp_stat = &bp->fp[fp_index];
755735eb 10918
231fd58a 10919 if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
ca00392c 10920 fp_stat->eth_q_stats.driver_xoff++;
555f6c78 10921 netif_tx_stop_queue(txq);
a2fbb9ea
ET
10922 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
10923 return NETDEV_TX_BUSY;
10924 }
10925
755735eb
EG
10926 DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x protocol %x protocol(%x,%x)"
10927 " gso type %x xmit_type %x\n",
10928 skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
10929 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
10930
632da4d6 10931#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
f5372251
EG
10932 /* First, check if we need to linearize the skb (due to FW
10933 restrictions). No need to check fragmentation if page size > 8K
10934 (there will be no violation to FW restrictions) */
755735eb
EG
10935 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
10936 /* Statistics of linearization */
10937 bp->lin_cnt++;
10938 if (skb_linearize(skb) != 0) {
10939 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
10940 "silently dropping this SKB\n");
10941 dev_kfree_skb_any(skb);
da5a662a 10942 return NETDEV_TX_OK;
755735eb
EG
10943 }
10944 }
632da4d6 10945#endif
755735eb 10946
a2fbb9ea 10947 /*
755735eb 10948 Please read carefully. First we use one BD which we mark as start,
ca00392c 10949 then we have a parsing info BD (used for TSO or xsum),
755735eb 10950 and only then we have the rest of the TSO BDs.
a2fbb9ea
ET
10951 (don't forget to mark the last one as last,
10952 and to unmap only AFTER you write to the BD ...)
755735eb 10953 And above all, all pdb sizes are in words - NOT DWORDS!
a2fbb9ea
ET
10954 */
10955
10956 pkt_prod = fp->tx_pkt_prod++;
755735eb 10957 bd_prod = TX_BD(fp->tx_bd_prod);
a2fbb9ea 10958
755735eb 10959 /* get a tx_buf and first BD */
a2fbb9ea 10960 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
ca00392c 10961 tx_start_bd = &fp->tx_desc_ring[bd_prod].start_bd;
a2fbb9ea 10962
ca00392c
EG
10963 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
10964 tx_start_bd->general_data = (UNICAST_ADDRESS <<
10965 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT);
3196a88a 10966 /* header nbd */
ca00392c 10967 tx_start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT);
a2fbb9ea 10968
755735eb
EG
10969 /* remember the first BD of the packet */
10970 tx_buf->first_bd = fp->tx_bd_prod;
10971 tx_buf->skb = skb;
ca00392c 10972 tx_buf->flags = 0;
a2fbb9ea
ET
10973
10974 DP(NETIF_MSG_TX_QUEUED,
10975 "sending pkt %u @%p next_idx %u bd %u @%p\n",
ca00392c 10976 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_start_bd);
a2fbb9ea 10977
0c6671b0
EG
10978#ifdef BCM_VLAN
10979 if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
10980 (bp->flags & HW_VLAN_TX_FLAG)) {
ca00392c
EG
10981 tx_start_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
10982 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
755735eb 10983 } else
0c6671b0 10984#endif
ca00392c 10985 tx_start_bd->vlan = cpu_to_le16(pkt_prod);
a2fbb9ea 10986
ca00392c
EG
10987 /* turn on parsing and get a BD */
10988 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10989 pbd = &fp->tx_desc_ring[bd_prod].parse_bd;
755735eb 10990
ca00392c 10991 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
755735eb
EG
10992
10993 if (xmit_type & XMIT_CSUM) {
ca00392c 10994 hlen = (skb_network_header(skb) - skb->data) / 2;
a2fbb9ea
ET
10995
10996 /* for now NS flag is not used in Linux */
4781bfad
EG
10997 pbd->global_data =
10998 (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
10999 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
a2fbb9ea 11000
755735eb
EG
11001 pbd->ip_hlen = (skb_transport_header(skb) -
11002 skb_network_header(skb)) / 2;
11003
11004 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
a2fbb9ea 11005
755735eb 11006 pbd->total_hlen = cpu_to_le16(hlen);
ca00392c 11007 hlen = hlen*2;
a2fbb9ea 11008
ca00392c 11009 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
755735eb
EG
11010
11011 if (xmit_type & XMIT_CSUM_V4)
ca00392c 11012 tx_start_bd->bd_flags.as_bitfield |=
755735eb
EG
11013 ETH_TX_BD_FLAGS_IP_CSUM;
11014 else
ca00392c
EG
11015 tx_start_bd->bd_flags.as_bitfield |=
11016 ETH_TX_BD_FLAGS_IPV6;
755735eb
EG
11017
11018 if (xmit_type & XMIT_CSUM_TCP) {
11019 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
11020
11021 } else {
11022 s8 fix = SKB_CS_OFF(skb); /* signed! */
11023
ca00392c 11024 pbd->global_data |= ETH_TX_PARSE_BD_UDP_CS_FLG;
a2fbb9ea 11025
755735eb 11026 DP(NETIF_MSG_TX_QUEUED,
ca00392c
EG
11027 "hlen %d fix %d csum before fix %x\n",
11028 le16_to_cpu(pbd->total_hlen), fix, SKB_CS(skb));
755735eb
EG
11029
11030 /* HW bug: fixup the CSUM */
11031 pbd->tcp_pseudo_csum =
11032 bnx2x_csum_fix(skb_transport_header(skb),
11033 SKB_CS(skb), fix);
11034
11035 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
11036 pbd->tcp_pseudo_csum);
11037 }
a2fbb9ea
ET
11038 }
11039
11040 mapping = pci_map_single(bp->pdev, skb->data,
755735eb 11041 skb_headlen(skb), PCI_DMA_TODEVICE);
a2fbb9ea 11042
ca00392c
EG
11043 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
11044 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
11045 nbd = skb_shinfo(skb)->nr_frags + 2; /* start_bd + pbd + frags */
11046 tx_start_bd->nbd = cpu_to_le16(nbd);
11047 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
11048 pkt_size = tx_start_bd->nbytes;
a2fbb9ea
ET
11049
11050 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
755735eb 11051 " nbytes %d flags %x vlan %x\n",
ca00392c
EG
11052 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
11053 le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
11054 tx_start_bd->bd_flags.as_bitfield, le16_to_cpu(tx_start_bd->vlan));
a2fbb9ea 11055
755735eb 11056 if (xmit_type & XMIT_GSO) {
a2fbb9ea
ET
11057
11058 DP(NETIF_MSG_TX_QUEUED,
11059 "TSO packet len %d hlen %d total len %d tso size %d\n",
11060 skb->len, hlen, skb_headlen(skb),
11061 skb_shinfo(skb)->gso_size);
11062
ca00392c 11063 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
a2fbb9ea 11064
755735eb 11065 if (unlikely(skb_headlen(skb) > hlen))
ca00392c
EG
11066 bd_prod = bnx2x_tx_split(bp, fp, tx_buf, &tx_start_bd,
11067 hlen, bd_prod, ++nbd);
a2fbb9ea
ET
11068
11069 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
11070 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
755735eb
EG
11071 pbd->tcp_flags = pbd_tcp_flags(skb);
11072
11073 if (xmit_type & XMIT_GSO_V4) {
11074 pbd->ip_id = swab16(ip_hdr(skb)->id);
11075 pbd->tcp_pseudo_csum =
a2fbb9ea
ET
11076 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
11077 ip_hdr(skb)->daddr,
11078 0, IPPROTO_TCP, 0));
755735eb
EG
11079
11080 } else
11081 pbd->tcp_pseudo_csum =
11082 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
11083 &ipv6_hdr(skb)->daddr,
11084 0, IPPROTO_TCP, 0));
11085
a2fbb9ea
ET
11086 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
11087 }
ca00392c 11088 tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
a2fbb9ea 11089
755735eb
EG
11090 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
11091 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
a2fbb9ea 11092
755735eb 11093 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
ca00392c
EG
11094 tx_data_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
11095 if (total_pkt_bd == NULL)
11096 total_pkt_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
a2fbb9ea 11097
755735eb
EG
11098 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
11099 frag->size, PCI_DMA_TODEVICE);
a2fbb9ea 11100
ca00392c
EG
11101 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
11102 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
11103 tx_data_bd->nbytes = cpu_to_le16(frag->size);
11104 le16_add_cpu(&pkt_size, frag->size);
a2fbb9ea 11105
755735eb 11106 DP(NETIF_MSG_TX_QUEUED,
ca00392c
EG
11107 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
11108 i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
11109 le16_to_cpu(tx_data_bd->nbytes));
a2fbb9ea
ET
11110 }
11111
ca00392c 11112 DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
a2fbb9ea 11113
a2fbb9ea
ET
11114 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
11115
755735eb 11116 /* now send a tx doorbell, counting the next BD
a2fbb9ea
ET
11117 * if the packet contains or ends with it
11118 */
11119 if (TX_BD_POFF(bd_prod) < nbd)
11120 nbd++;
11121
ca00392c
EG
11122 if (total_pkt_bd != NULL)
11123 total_pkt_bd->total_pkt_bytes = pkt_size;
11124
a2fbb9ea
ET
11125 if (pbd)
11126 DP(NETIF_MSG_TX_QUEUED,
11127 "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
11128 " tcp_flags %x xsum %x seq %u hlen %u\n",
11129 pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
11130 pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
755735eb 11131 pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
a2fbb9ea 11132
755735eb 11133 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
a2fbb9ea 11134
58f4c4cf
EG
11135 /*
11136 * Make sure that the BD data is updated before updating the producer
11137 * since FW might read the BD right after the producer is updated.
11138 * This is only applicable for weak-ordered memory model archs such
11139 * as IA-64. The following barrier is also mandatory since FW will
11140 * assumes packets must have BDs.
11141 */
11142 wmb();
11143
ca00392c
EG
11144 fp->tx_db.data.prod += nbd;
11145 barrier();
11146 DOORBELL(bp, fp->index - bp->num_rx_queues, fp->tx_db.raw);
a2fbb9ea
ET
11147
11148 mmiowb();
11149
755735eb 11150 fp->tx_bd_prod += nbd;
a2fbb9ea
ET
11151
11152 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
ca00392c 11153 netif_tx_stop_queue(txq);
58f4c4cf
EG
11154 /* We want bnx2x_tx_int to "see" the updated tx_bd_prod
11155 if we put Tx into XOFF state. */
11156 smp_mb();
ca00392c 11157 fp_stat->eth_q_stats.driver_xoff++;
a2fbb9ea 11158 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
555f6c78 11159 netif_tx_wake_queue(txq);
a2fbb9ea 11160 }
ca00392c 11161 fp_stat->tx_pkt++;
a2fbb9ea
ET
11162
11163 return NETDEV_TX_OK;
11164}
11165
bb2a0f7a 11166/* called with rtnl_lock */
a2fbb9ea
ET
11167static int bnx2x_open(struct net_device *dev)
11168{
11169 struct bnx2x *bp = netdev_priv(dev);
11170
6eccabb3
EG
11171 netif_carrier_off(dev);
11172
a2fbb9ea
ET
11173 bnx2x_set_power_state(bp, PCI_D0);
11174
bb2a0f7a 11175 return bnx2x_nic_load(bp, LOAD_OPEN);
a2fbb9ea
ET
11176}
11177
bb2a0f7a 11178/* called with rtnl_lock */
a2fbb9ea
ET
11179static int bnx2x_close(struct net_device *dev)
11180{
a2fbb9ea
ET
11181 struct bnx2x *bp = netdev_priv(dev);
11182
11183 /* Unload the driver, release IRQs */
bb2a0f7a
YG
11184 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
11185 if (atomic_read(&bp->pdev->enable_cnt) == 1)
11186 if (!CHIP_REV_IS_SLOW(bp))
11187 bnx2x_set_power_state(bp, PCI_D3hot);
a2fbb9ea
ET
11188
11189 return 0;
11190}
11191
f5372251 11192/* called with netif_tx_lock from dev_mcast.c */
34f80b04
EG
11193static void bnx2x_set_rx_mode(struct net_device *dev)
11194{
11195 struct bnx2x *bp = netdev_priv(dev);
11196 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
11197 int port = BP_PORT(bp);
11198
11199 if (bp->state != BNX2X_STATE_OPEN) {
11200 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
11201 return;
11202 }
11203
11204 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
11205
11206 if (dev->flags & IFF_PROMISC)
11207 rx_mode = BNX2X_RX_MODE_PROMISC;
11208
11209 else if ((dev->flags & IFF_ALLMULTI) ||
11210 ((dev->mc_count > BNX2X_MAX_MULTICAST) && CHIP_IS_E1(bp)))
11211 rx_mode = BNX2X_RX_MODE_ALLMULTI;
11212
11213 else { /* some multicasts */
11214 if (CHIP_IS_E1(bp)) {
11215 int i, old, offset;
11216 struct dev_mc_list *mclist;
11217 struct mac_configuration_cmd *config =
11218 bnx2x_sp(bp, mcast_config);
11219
11220 for (i = 0, mclist = dev->mc_list;
11221 mclist && (i < dev->mc_count);
11222 i++, mclist = mclist->next) {
11223
11224 config->config_table[i].
11225 cam_entry.msb_mac_addr =
11226 swab16(*(u16 *)&mclist->dmi_addr[0]);
11227 config->config_table[i].
11228 cam_entry.middle_mac_addr =
11229 swab16(*(u16 *)&mclist->dmi_addr[2]);
11230 config->config_table[i].
11231 cam_entry.lsb_mac_addr =
11232 swab16(*(u16 *)&mclist->dmi_addr[4]);
11233 config->config_table[i].cam_entry.flags =
11234 cpu_to_le16(port);
11235 config->config_table[i].
11236 target_table_entry.flags = 0;
ca00392c
EG
11237 config->config_table[i].target_table_entry.
11238 clients_bit_vector =
11239 cpu_to_le32(1 << BP_L_ID(bp));
34f80b04
EG
11240 config->config_table[i].
11241 target_table_entry.vlan_id = 0;
11242
11243 DP(NETIF_MSG_IFUP,
11244 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
11245 config->config_table[i].
11246 cam_entry.msb_mac_addr,
11247 config->config_table[i].
11248 cam_entry.middle_mac_addr,
11249 config->config_table[i].
11250 cam_entry.lsb_mac_addr);
11251 }
8d9c5f34 11252 old = config->hdr.length;
34f80b04
EG
11253 if (old > i) {
11254 for (; i < old; i++) {
11255 if (CAM_IS_INVALID(config->
11256 config_table[i])) {
af246401 11257 /* already invalidated */
34f80b04
EG
11258 break;
11259 }
11260 /* invalidate */
11261 CAM_INVALIDATE(config->
11262 config_table[i]);
11263 }
11264 }
11265
11266 if (CHIP_REV_IS_SLOW(bp))
11267 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
11268 else
11269 offset = BNX2X_MAX_MULTICAST*(1 + port);
11270
8d9c5f34 11271 config->hdr.length = i;
34f80b04 11272 config->hdr.offset = offset;
8d9c5f34 11273 config->hdr.client_id = bp->fp->cl_id;
34f80b04
EG
11274 config->hdr.reserved1 = 0;
11275
11276 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
11277 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
11278 U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
11279 0);
11280 } else { /* E1H */
11281 /* Accept one or more multicasts */
11282 struct dev_mc_list *mclist;
11283 u32 mc_filter[MC_HASH_SIZE];
11284 u32 crc, bit, regidx;
11285 int i;
11286
11287 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
11288
11289 for (i = 0, mclist = dev->mc_list;
11290 mclist && (i < dev->mc_count);
11291 i++, mclist = mclist->next) {
11292
7c510e4b
JB
11293 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
11294 mclist->dmi_addr);
34f80b04
EG
11295
11296 crc = crc32c_le(0, mclist->dmi_addr, ETH_ALEN);
11297 bit = (crc >> 24) & 0xff;
11298 regidx = bit >> 5;
11299 bit &= 0x1f;
11300 mc_filter[regidx] |= (1 << bit);
11301 }
11302
11303 for (i = 0; i < MC_HASH_SIZE; i++)
11304 REG_WR(bp, MC_HASH_OFFSET(bp, i),
11305 mc_filter[i]);
11306 }
11307 }
11308
11309 bp->rx_mode = rx_mode;
11310 bnx2x_set_storm_rx_mode(bp);
11311}
11312
11313/* called with rtnl_lock */
a2fbb9ea
ET
11314static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
11315{
11316 struct sockaddr *addr = p;
11317 struct bnx2x *bp = netdev_priv(dev);
11318
34f80b04 11319 if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
a2fbb9ea
ET
11320 return -EINVAL;
11321
11322 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
34f80b04
EG
11323 if (netif_running(dev)) {
11324 if (CHIP_IS_E1(bp))
3101c2bc 11325 bnx2x_set_mac_addr_e1(bp, 1);
34f80b04 11326 else
3101c2bc 11327 bnx2x_set_mac_addr_e1h(bp, 1);
34f80b04 11328 }
a2fbb9ea
ET
11329
11330 return 0;
11331}
11332
c18487ee 11333/* called with rtnl_lock */
01cd4528
EG
11334static int bnx2x_mdio_read(struct net_device *netdev, int prtad,
11335 int devad, u16 addr)
a2fbb9ea 11336{
01cd4528
EG
11337 struct bnx2x *bp = netdev_priv(netdev);
11338 u16 value;
11339 int rc;
11340 u32 phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea 11341
01cd4528
EG
11342 DP(NETIF_MSG_LINK, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n",
11343 prtad, devad, addr);
a2fbb9ea 11344
01cd4528
EG
11345 if (prtad != bp->mdio.prtad) {
11346 DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
11347 prtad, bp->mdio.prtad);
11348 return -EINVAL;
11349 }
11350
11351 /* The HW expects different devad if CL22 is used */
11352 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
c18487ee 11353
01cd4528
EG
11354 bnx2x_acquire_phy_lock(bp);
11355 rc = bnx2x_cl45_read(bp, BP_PORT(bp), phy_type, prtad,
11356 devad, addr, &value);
11357 bnx2x_release_phy_lock(bp);
11358 DP(NETIF_MSG_LINK, "mdio_read_val 0x%x rc = 0x%x\n", value, rc);
a2fbb9ea 11359
01cd4528
EG
11360 if (!rc)
11361 rc = value;
11362 return rc;
11363}
a2fbb9ea 11364
01cd4528
EG
11365/* called with rtnl_lock */
11366static int bnx2x_mdio_write(struct net_device *netdev, int prtad, int devad,
11367 u16 addr, u16 value)
11368{
11369 struct bnx2x *bp = netdev_priv(netdev);
11370 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
11371 int rc;
11372
11373 DP(NETIF_MSG_LINK, "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x,"
11374 " value 0x%x\n", prtad, devad, addr, value);
11375
11376 if (prtad != bp->mdio.prtad) {
11377 DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
11378 prtad, bp->mdio.prtad);
11379 return -EINVAL;
a2fbb9ea
ET
11380 }
11381
01cd4528
EG
11382 /* The HW expects different devad if CL22 is used */
11383 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
a2fbb9ea 11384
01cd4528
EG
11385 bnx2x_acquire_phy_lock(bp);
11386 rc = bnx2x_cl45_write(bp, BP_PORT(bp), ext_phy_type, prtad,
11387 devad, addr, value);
11388 bnx2x_release_phy_lock(bp);
11389 return rc;
11390}
c18487ee 11391
01cd4528
EG
11392/* called with rtnl_lock */
11393static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
11394{
11395 struct bnx2x *bp = netdev_priv(dev);
11396 struct mii_ioctl_data *mdio = if_mii(ifr);
a2fbb9ea 11397
01cd4528
EG
11398 DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
11399 mdio->phy_id, mdio->reg_num, mdio->val_in);
a2fbb9ea 11400
01cd4528
EG
11401 if (!netif_running(dev))
11402 return -EAGAIN;
11403
11404 return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
a2fbb9ea
ET
11405}
11406
34f80b04 11407/* called with rtnl_lock */
a2fbb9ea
ET
11408static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
11409{
11410 struct bnx2x *bp = netdev_priv(dev);
34f80b04 11411 int rc = 0;
a2fbb9ea
ET
11412
11413 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
11414 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
11415 return -EINVAL;
11416
11417 /* This does not race with packet allocation
c14423fe 11418 * because the actual alloc size is
a2fbb9ea
ET
11419 * only updated as part of load
11420 */
11421 dev->mtu = new_mtu;
11422
11423 if (netif_running(dev)) {
34f80b04
EG
11424 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
11425 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
a2fbb9ea 11426 }
34f80b04
EG
11427
11428 return rc;
a2fbb9ea
ET
11429}
11430
11431static void bnx2x_tx_timeout(struct net_device *dev)
11432{
11433 struct bnx2x *bp = netdev_priv(dev);
11434
11435#ifdef BNX2X_STOP_ON_ERROR
11436 if (!bp->panic)
11437 bnx2x_panic();
11438#endif
11439 /* This allows the netif to be shutdown gracefully before resetting */
11440 schedule_work(&bp->reset_task);
11441}
11442
11443#ifdef BCM_VLAN
34f80b04 11444/* called with rtnl_lock */
a2fbb9ea
ET
11445static void bnx2x_vlan_rx_register(struct net_device *dev,
11446 struct vlan_group *vlgrp)
11447{
11448 struct bnx2x *bp = netdev_priv(dev);
11449
11450 bp->vlgrp = vlgrp;
0c6671b0
EG
11451
11452 /* Set flags according to the required capabilities */
11453 bp->flags &= ~(HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
11454
11455 if (dev->features & NETIF_F_HW_VLAN_TX)
11456 bp->flags |= HW_VLAN_TX_FLAG;
11457
11458 if (dev->features & NETIF_F_HW_VLAN_RX)
11459 bp->flags |= HW_VLAN_RX_FLAG;
11460
a2fbb9ea 11461 if (netif_running(dev))
49d66772 11462 bnx2x_set_client_config(bp);
a2fbb9ea 11463}
34f80b04 11464
a2fbb9ea
ET
11465#endif
11466
11467#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
11468static void poll_bnx2x(struct net_device *dev)
11469{
11470 struct bnx2x *bp = netdev_priv(dev);
11471
11472 disable_irq(bp->pdev->irq);
11473 bnx2x_interrupt(bp->pdev->irq, dev);
11474 enable_irq(bp->pdev->irq);
11475}
11476#endif
11477
c64213cd
SH
11478static const struct net_device_ops bnx2x_netdev_ops = {
11479 .ndo_open = bnx2x_open,
11480 .ndo_stop = bnx2x_close,
11481 .ndo_start_xmit = bnx2x_start_xmit,
356e2385 11482 .ndo_set_multicast_list = bnx2x_set_rx_mode,
c64213cd
SH
11483 .ndo_set_mac_address = bnx2x_change_mac_addr,
11484 .ndo_validate_addr = eth_validate_addr,
11485 .ndo_do_ioctl = bnx2x_ioctl,
11486 .ndo_change_mtu = bnx2x_change_mtu,
11487 .ndo_tx_timeout = bnx2x_tx_timeout,
11488#ifdef BCM_VLAN
11489 .ndo_vlan_rx_register = bnx2x_vlan_rx_register,
11490#endif
11491#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
11492 .ndo_poll_controller = poll_bnx2x,
11493#endif
11494};
11495
34f80b04
EG
11496static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
11497 struct net_device *dev)
a2fbb9ea
ET
11498{
11499 struct bnx2x *bp;
11500 int rc;
11501
11502 SET_NETDEV_DEV(dev, &pdev->dev);
11503 bp = netdev_priv(dev);
11504
34f80b04
EG
11505 bp->dev = dev;
11506 bp->pdev = pdev;
a2fbb9ea 11507 bp->flags = 0;
34f80b04 11508 bp->func = PCI_FUNC(pdev->devfn);
a2fbb9ea
ET
11509
11510 rc = pci_enable_device(pdev);
11511 if (rc) {
11512 printk(KERN_ERR PFX "Cannot enable PCI device, aborting\n");
11513 goto err_out;
11514 }
11515
11516 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
11517 printk(KERN_ERR PFX "Cannot find PCI device base address,"
11518 " aborting\n");
11519 rc = -ENODEV;
11520 goto err_out_disable;
11521 }
11522
11523 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
11524 printk(KERN_ERR PFX "Cannot find second PCI device"
11525 " base address, aborting\n");
11526 rc = -ENODEV;
11527 goto err_out_disable;
11528 }
11529
34f80b04
EG
11530 if (atomic_read(&pdev->enable_cnt) == 1) {
11531 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
11532 if (rc) {
11533 printk(KERN_ERR PFX "Cannot obtain PCI resources,"
11534 " aborting\n");
11535 goto err_out_disable;
11536 }
a2fbb9ea 11537
34f80b04
EG
11538 pci_set_master(pdev);
11539 pci_save_state(pdev);
11540 }
a2fbb9ea
ET
11541
11542 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
11543 if (bp->pm_cap == 0) {
11544 printk(KERN_ERR PFX "Cannot find power management"
11545 " capability, aborting\n");
11546 rc = -EIO;
11547 goto err_out_release;
11548 }
11549
11550 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
11551 if (bp->pcie_cap == 0) {
11552 printk(KERN_ERR PFX "Cannot find PCI Express capability,"
11553 " aborting\n");
11554 rc = -EIO;
11555 goto err_out_release;
11556 }
11557
6a35528a 11558 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) {
a2fbb9ea 11559 bp->flags |= USING_DAC_FLAG;
6a35528a 11560 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) {
a2fbb9ea
ET
11561 printk(KERN_ERR PFX "pci_set_consistent_dma_mask"
11562 " failed, aborting\n");
11563 rc = -EIO;
11564 goto err_out_release;
11565 }
11566
284901a9 11567 } else if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) {
a2fbb9ea
ET
11568 printk(KERN_ERR PFX "System does not support DMA,"
11569 " aborting\n");
11570 rc = -EIO;
11571 goto err_out_release;
11572 }
11573
34f80b04
EG
11574 dev->mem_start = pci_resource_start(pdev, 0);
11575 dev->base_addr = dev->mem_start;
11576 dev->mem_end = pci_resource_end(pdev, 0);
a2fbb9ea
ET
11577
11578 dev->irq = pdev->irq;
11579
275f165f 11580 bp->regview = pci_ioremap_bar(pdev, 0);
a2fbb9ea
ET
11581 if (!bp->regview) {
11582 printk(KERN_ERR PFX "Cannot map register space, aborting\n");
11583 rc = -ENOMEM;
11584 goto err_out_release;
11585 }
11586
34f80b04
EG
11587 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
11588 min_t(u64, BNX2X_DB_SIZE,
11589 pci_resource_len(pdev, 2)));
a2fbb9ea
ET
11590 if (!bp->doorbells) {
11591 printk(KERN_ERR PFX "Cannot map doorbell space, aborting\n");
11592 rc = -ENOMEM;
11593 goto err_out_unmap;
11594 }
11595
11596 bnx2x_set_power_state(bp, PCI_D0);
11597
34f80b04
EG
11598 /* clean indirect addresses */
11599 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
11600 PCICFG_VENDOR_ID_OFFSET);
11601 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
11602 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
11603 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
11604 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
a2fbb9ea 11605
34f80b04 11606 dev->watchdog_timeo = TX_TIMEOUT;
a2fbb9ea 11607
c64213cd 11608 dev->netdev_ops = &bnx2x_netdev_ops;
34f80b04 11609 dev->ethtool_ops = &bnx2x_ethtool_ops;
34f80b04
EG
11610 dev->features |= NETIF_F_SG;
11611 dev->features |= NETIF_F_HW_CSUM;
11612 if (bp->flags & USING_DAC_FLAG)
11613 dev->features |= NETIF_F_HIGHDMA;
5316bc0b
EG
11614 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
11615 dev->features |= NETIF_F_TSO6;
34f80b04
EG
11616#ifdef BCM_VLAN
11617 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
0c6671b0 11618 bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
5316bc0b
EG
11619
11620 dev->vlan_features |= NETIF_F_SG;
11621 dev->vlan_features |= NETIF_F_HW_CSUM;
11622 if (bp->flags & USING_DAC_FLAG)
11623 dev->vlan_features |= NETIF_F_HIGHDMA;
11624 dev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
11625 dev->vlan_features |= NETIF_F_TSO6;
34f80b04 11626#endif
a2fbb9ea 11627
01cd4528
EG
11628 /* get_port_hwinfo() will set prtad and mmds properly */
11629 bp->mdio.prtad = MDIO_PRTAD_NONE;
11630 bp->mdio.mmds = 0;
11631 bp->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
11632 bp->mdio.dev = dev;
11633 bp->mdio.mdio_read = bnx2x_mdio_read;
11634 bp->mdio.mdio_write = bnx2x_mdio_write;
11635
a2fbb9ea
ET
11636 return 0;
11637
11638err_out_unmap:
11639 if (bp->regview) {
11640 iounmap(bp->regview);
11641 bp->regview = NULL;
11642 }
a2fbb9ea
ET
11643 if (bp->doorbells) {
11644 iounmap(bp->doorbells);
11645 bp->doorbells = NULL;
11646 }
11647
11648err_out_release:
34f80b04
EG
11649 if (atomic_read(&pdev->enable_cnt) == 1)
11650 pci_release_regions(pdev);
a2fbb9ea
ET
11651
11652err_out_disable:
11653 pci_disable_device(pdev);
11654 pci_set_drvdata(pdev, NULL);
11655
11656err_out:
11657 return rc;
11658}
11659
25047950
ET
11660static int __devinit bnx2x_get_pcie_width(struct bnx2x *bp)
11661{
11662 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
11663
11664 val = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
11665 return val;
11666}
11667
11668/* return value of 1=2.5GHz 2=5GHz */
11669static int __devinit bnx2x_get_pcie_speed(struct bnx2x *bp)
11670{
11671 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
11672
11673 val = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
11674 return val;
11675}
94a78b79
VZ
11676static int __devinit bnx2x_check_firmware(struct bnx2x *bp)
11677{
11678 struct bnx2x_fw_file_hdr *fw_hdr;
11679 struct bnx2x_fw_file_section *sections;
11680 u16 *ops_offsets;
11681 u32 offset, len, num_ops;
11682 int i;
11683 const struct firmware *firmware = bp->firmware;
11684 const u8 * fw_ver;
11685
11686 if (firmware->size < sizeof(struct bnx2x_fw_file_hdr))
11687 return -EINVAL;
11688
11689 fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
11690 sections = (struct bnx2x_fw_file_section *)fw_hdr;
11691
11692 /* Make sure none of the offsets and sizes make us read beyond
11693 * the end of the firmware data */
11694 for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
11695 offset = be32_to_cpu(sections[i].offset);
11696 len = be32_to_cpu(sections[i].len);
11697 if (offset + len > firmware->size) {
11698 printk(KERN_ERR PFX "Section %d length is out of bounds\n", i);
11699 return -EINVAL;
11700 }
11701 }
11702
11703 /* Likewise for the init_ops offsets */
11704 offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
11705 ops_offsets = (u16 *)(firmware->data + offset);
11706 num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
11707
11708 for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
11709 if (be16_to_cpu(ops_offsets[i]) > num_ops) {
11710 printk(KERN_ERR PFX "Section offset %d is out of bounds\n", i);
11711 return -EINVAL;
11712 }
11713 }
11714
11715 /* Check FW version */
11716 offset = be32_to_cpu(fw_hdr->fw_version.offset);
11717 fw_ver = firmware->data + offset;
11718 if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
11719 (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
11720 (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
11721 (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
11722 printk(KERN_ERR PFX "Bad FW version:%d.%d.%d.%d."
11723 " Should be %d.%d.%d.%d\n",
11724 fw_ver[0], fw_ver[1], fw_ver[2],
11725 fw_ver[3], BCM_5710_FW_MAJOR_VERSION,
11726 BCM_5710_FW_MINOR_VERSION,
11727 BCM_5710_FW_REVISION_VERSION,
11728 BCM_5710_FW_ENGINEERING_VERSION);
11729 return -EINVAL;
11730 }
11731
11732 return 0;
11733}
11734
11735static void inline be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
11736{
11737 u32 i;
11738 const __be32 *source = (const __be32*)_source;
11739 u32 *target = (u32*)_target;
11740
11741 for (i = 0; i < n/4; i++)
11742 target[i] = be32_to_cpu(source[i]);
11743}
11744
11745/*
11746 Ops array is stored in the following format:
11747 {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
11748 */
11749static void inline bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
11750{
11751 u32 i, j, tmp;
11752 const __be32 *source = (const __be32*)_source;
11753 struct raw_op *target = (struct raw_op*)_target;
11754
11755 for (i = 0, j = 0; i < n/8; i++, j+=2) {
11756 tmp = be32_to_cpu(source[j]);
11757 target[i].op = (tmp >> 24) & 0xff;
11758 target[i].offset = tmp & 0xffffff;
11759 target[i].raw_data = be32_to_cpu(source[j+1]);
11760 }
11761}
11762static void inline be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
11763{
11764 u32 i;
11765 u16 *target = (u16*)_target;
11766 const __be16 *source = (const __be16*)_source;
11767
11768 for (i = 0; i < n/2; i++)
11769 target[i] = be16_to_cpu(source[i]);
11770}
11771
11772#define BNX2X_ALLOC_AND_SET(arr, lbl, func) \
11773 do { \
11774 u32 len = be32_to_cpu(fw_hdr->arr.len); \
11775 bp->arr = kmalloc(len, GFP_KERNEL); \
11776 if (!bp->arr) { \
11777 printk(KERN_ERR PFX "Failed to allocate %d bytes for "#arr"\n", len); \
11778 goto lbl; \
11779 } \
11780 func(bp->firmware->data + \
11781 be32_to_cpu(fw_hdr->arr.offset), \
11782 (u8*)bp->arr, len); \
11783 } while (0)
11784
11785
11786static int __devinit bnx2x_init_firmware(struct bnx2x *bp, struct device *dev)
11787{
11788 char fw_file_name[40] = {0};
11789 int rc, offset;
11790 struct bnx2x_fw_file_hdr *fw_hdr;
11791
11792 /* Create a FW file name */
11793 if (CHIP_IS_E1(bp))
11794 offset = sprintf(fw_file_name, FW_FILE_PREFIX_E1);
11795 else
11796 offset = sprintf(fw_file_name, FW_FILE_PREFIX_E1H);
11797
11798 sprintf(fw_file_name + offset, "%d.%d.%d.%d.fw",
11799 BCM_5710_FW_MAJOR_VERSION,
11800 BCM_5710_FW_MINOR_VERSION,
11801 BCM_5710_FW_REVISION_VERSION,
11802 BCM_5710_FW_ENGINEERING_VERSION);
11803
11804 printk(KERN_INFO PFX "Loading %s\n", fw_file_name);
11805
11806 rc = request_firmware(&bp->firmware, fw_file_name, dev);
11807 if (rc) {
11808 printk(KERN_ERR PFX "Can't load firmware file %s\n", fw_file_name);
11809 goto request_firmware_exit;
11810 }
11811
11812 rc = bnx2x_check_firmware(bp);
11813 if (rc) {
11814 printk(KERN_ERR PFX "Corrupt firmware file %s\n", fw_file_name);
11815 goto request_firmware_exit;
11816 }
11817
11818 fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
11819
11820 /* Initialize the pointers to the init arrays */
11821 /* Blob */
11822 BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
11823
11824 /* Opcodes */
11825 BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
11826
11827 /* Offsets */
11828 BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err, be16_to_cpu_n);
11829
11830 /* STORMs firmware */
11831 bp->tsem_int_table_data = bp->firmware->data +
11832 be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
11833 bp->tsem_pram_data = bp->firmware->data +
11834 be32_to_cpu(fw_hdr->tsem_pram_data.offset);
11835 bp->usem_int_table_data = bp->firmware->data +
11836 be32_to_cpu(fw_hdr->usem_int_table_data.offset);
11837 bp->usem_pram_data = bp->firmware->data +
11838 be32_to_cpu(fw_hdr->usem_pram_data.offset);
11839 bp->xsem_int_table_data = bp->firmware->data +
11840 be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
11841 bp->xsem_pram_data = bp->firmware->data +
11842 be32_to_cpu(fw_hdr->xsem_pram_data.offset);
11843 bp->csem_int_table_data = bp->firmware->data +
11844 be32_to_cpu(fw_hdr->csem_int_table_data.offset);
11845 bp->csem_pram_data = bp->firmware->data +
11846 be32_to_cpu(fw_hdr->csem_pram_data.offset);
11847
11848 return 0;
11849init_offsets_alloc_err:
11850 kfree(bp->init_ops);
11851init_ops_alloc_err:
11852 kfree(bp->init_data);
11853request_firmware_exit:
11854 release_firmware(bp->firmware);
11855
11856 return rc;
11857}
11858
11859
25047950 11860
a2fbb9ea
ET
11861static int __devinit bnx2x_init_one(struct pci_dev *pdev,
11862 const struct pci_device_id *ent)
11863{
11864 static int version_printed;
11865 struct net_device *dev = NULL;
11866 struct bnx2x *bp;
25047950 11867 int rc;
a2fbb9ea
ET
11868
11869 if (version_printed++ == 0)
11870 printk(KERN_INFO "%s", version);
11871
11872 /* dev zeroed in init_etherdev */
555f6c78 11873 dev = alloc_etherdev_mq(sizeof(*bp), MAX_CONTEXT);
34f80b04
EG
11874 if (!dev) {
11875 printk(KERN_ERR PFX "Cannot allocate net device\n");
a2fbb9ea 11876 return -ENOMEM;
34f80b04 11877 }
a2fbb9ea 11878
a2fbb9ea
ET
11879 bp = netdev_priv(dev);
11880 bp->msglevel = debug;
11881
34f80b04 11882 rc = bnx2x_init_dev(pdev, dev);
a2fbb9ea
ET
11883 if (rc < 0) {
11884 free_netdev(dev);
11885 return rc;
11886 }
11887
a2fbb9ea
ET
11888 pci_set_drvdata(pdev, dev);
11889
34f80b04 11890 rc = bnx2x_init_bp(bp);
693fc0d1
EG
11891 if (rc)
11892 goto init_one_exit;
11893
94a78b79
VZ
11894 /* Set init arrays */
11895 rc = bnx2x_init_firmware(bp, &pdev->dev);
11896 if (rc) {
11897 printk(KERN_ERR PFX "Error loading firmware\n");
11898 goto init_one_exit;
11899 }
11900
693fc0d1 11901 rc = register_netdev(dev);
34f80b04 11902 if (rc) {
693fc0d1 11903 dev_err(&pdev->dev, "Cannot register net device\n");
34f80b04
EG
11904 goto init_one_exit;
11905 }
11906
25047950 11907 printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
87942b46 11908 " IRQ %d, ", dev->name, board_info[ent->driver_data].name,
34f80b04 11909 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
25047950
ET
11910 bnx2x_get_pcie_width(bp),
11911 (bnx2x_get_pcie_speed(bp) == 2) ? "5GHz (Gen2)" : "2.5GHz",
11912 dev->base_addr, bp->pdev->irq);
e174961c 11913 printk(KERN_CONT "node addr %pM\n", dev->dev_addr);
c016201c 11914
a2fbb9ea 11915 return 0;
34f80b04
EG
11916
11917init_one_exit:
11918 if (bp->regview)
11919 iounmap(bp->regview);
11920
11921 if (bp->doorbells)
11922 iounmap(bp->doorbells);
11923
11924 free_netdev(dev);
11925
11926 if (atomic_read(&pdev->enable_cnt) == 1)
11927 pci_release_regions(pdev);
11928
11929 pci_disable_device(pdev);
11930 pci_set_drvdata(pdev, NULL);
11931
11932 return rc;
a2fbb9ea
ET
11933}
11934
11935static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
11936{
11937 struct net_device *dev = pci_get_drvdata(pdev);
228241eb
ET
11938 struct bnx2x *bp;
11939
11940 if (!dev) {
228241eb
ET
11941 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
11942 return;
11943 }
228241eb 11944 bp = netdev_priv(dev);
a2fbb9ea 11945
a2fbb9ea
ET
11946 unregister_netdev(dev);
11947
94a78b79
VZ
11948 kfree(bp->init_ops_offsets);
11949 kfree(bp->init_ops);
11950 kfree(bp->init_data);
11951 release_firmware(bp->firmware);
11952
a2fbb9ea
ET
11953 if (bp->regview)
11954 iounmap(bp->regview);
11955
11956 if (bp->doorbells)
11957 iounmap(bp->doorbells);
11958
11959 free_netdev(dev);
34f80b04
EG
11960
11961 if (atomic_read(&pdev->enable_cnt) == 1)
11962 pci_release_regions(pdev);
11963
a2fbb9ea
ET
11964 pci_disable_device(pdev);
11965 pci_set_drvdata(pdev, NULL);
11966}
11967
11968static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
11969{
11970 struct net_device *dev = pci_get_drvdata(pdev);
228241eb
ET
11971 struct bnx2x *bp;
11972
34f80b04
EG
11973 if (!dev) {
11974 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
11975 return -ENODEV;
11976 }
11977 bp = netdev_priv(dev);
a2fbb9ea 11978
34f80b04 11979 rtnl_lock();
a2fbb9ea 11980
34f80b04 11981 pci_save_state(pdev);
228241eb 11982
34f80b04
EG
11983 if (!netif_running(dev)) {
11984 rtnl_unlock();
11985 return 0;
11986 }
a2fbb9ea
ET
11987
11988 netif_device_detach(dev);
a2fbb9ea 11989
da5a662a 11990 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
34f80b04 11991
a2fbb9ea 11992 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
228241eb 11993
34f80b04
EG
11994 rtnl_unlock();
11995
a2fbb9ea
ET
11996 return 0;
11997}
11998
11999static int bnx2x_resume(struct pci_dev *pdev)
12000{
12001 struct net_device *dev = pci_get_drvdata(pdev);
228241eb 12002 struct bnx2x *bp;
a2fbb9ea
ET
12003 int rc;
12004
228241eb
ET
12005 if (!dev) {
12006 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
12007 return -ENODEV;
12008 }
228241eb 12009 bp = netdev_priv(dev);
a2fbb9ea 12010
34f80b04
EG
12011 rtnl_lock();
12012
228241eb 12013 pci_restore_state(pdev);
34f80b04
EG
12014
12015 if (!netif_running(dev)) {
12016 rtnl_unlock();
12017 return 0;
12018 }
12019
a2fbb9ea
ET
12020 bnx2x_set_power_state(bp, PCI_D0);
12021 netif_device_attach(dev);
12022
da5a662a 12023 rc = bnx2x_nic_load(bp, LOAD_OPEN);
a2fbb9ea 12024
34f80b04
EG
12025 rtnl_unlock();
12026
12027 return rc;
a2fbb9ea
ET
12028}
12029
f8ef6e44
YG
12030static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
12031{
12032 int i;
12033
12034 bp->state = BNX2X_STATE_ERROR;
12035
12036 bp->rx_mode = BNX2X_RX_MODE_NONE;
12037
12038 bnx2x_netif_stop(bp, 0);
12039
12040 del_timer_sync(&bp->timer);
12041 bp->stats_state = STATS_STATE_DISABLED;
12042 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
12043
12044 /* Release IRQs */
12045 bnx2x_free_irq(bp);
12046
12047 if (CHIP_IS_E1(bp)) {
12048 struct mac_configuration_cmd *config =
12049 bnx2x_sp(bp, mcast_config);
12050
8d9c5f34 12051 for (i = 0; i < config->hdr.length; i++)
f8ef6e44
YG
12052 CAM_INVALIDATE(config->config_table[i]);
12053 }
12054
12055 /* Free SKBs, SGEs, TPA pool and driver internals */
12056 bnx2x_free_skbs(bp);
555f6c78 12057 for_each_rx_queue(bp, i)
f8ef6e44 12058 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
555f6c78 12059 for_each_rx_queue(bp, i)
7cde1c8b 12060 netif_napi_del(&bnx2x_fp(bp, i, napi));
f8ef6e44
YG
12061 bnx2x_free_mem(bp);
12062
12063 bp->state = BNX2X_STATE_CLOSED;
12064
12065 netif_carrier_off(bp->dev);
12066
12067 return 0;
12068}
12069
12070static void bnx2x_eeh_recover(struct bnx2x *bp)
12071{
12072 u32 val;
12073
12074 mutex_init(&bp->port.phy_mutex);
12075
12076 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
12077 bp->link_params.shmem_base = bp->common.shmem_base;
12078 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
12079
12080 if (!bp->common.shmem_base ||
12081 (bp->common.shmem_base < 0xA0000) ||
12082 (bp->common.shmem_base >= 0xC0000)) {
12083 BNX2X_DEV_INFO("MCP not active\n");
12084 bp->flags |= NO_MCP_FLAG;
12085 return;
12086 }
12087
12088 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
12089 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
12090 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
12091 BNX2X_ERR("BAD MCP validity signature\n");
12092
12093 if (!BP_NOMCP(bp)) {
12094 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
12095 & DRV_MSG_SEQ_NUMBER_MASK);
12096 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
12097 }
12098}
12099
493adb1f
WX
12100/**
12101 * bnx2x_io_error_detected - called when PCI error is detected
12102 * @pdev: Pointer to PCI device
12103 * @state: The current pci connection state
12104 *
12105 * This function is called after a PCI bus error affecting
12106 * this device has been detected.
12107 */
12108static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
12109 pci_channel_state_t state)
12110{
12111 struct net_device *dev = pci_get_drvdata(pdev);
12112 struct bnx2x *bp = netdev_priv(dev);
12113
12114 rtnl_lock();
12115
12116 netif_device_detach(dev);
12117
07ce50e4
DN
12118 if (state == pci_channel_io_perm_failure) {
12119 rtnl_unlock();
12120 return PCI_ERS_RESULT_DISCONNECT;
12121 }
12122
493adb1f 12123 if (netif_running(dev))
f8ef6e44 12124 bnx2x_eeh_nic_unload(bp);
493adb1f
WX
12125
12126 pci_disable_device(pdev);
12127
12128 rtnl_unlock();
12129
12130 /* Request a slot reset */
12131 return PCI_ERS_RESULT_NEED_RESET;
12132}
12133
12134/**
12135 * bnx2x_io_slot_reset - called after the PCI bus has been reset
12136 * @pdev: Pointer to PCI device
12137 *
12138 * Restart the card from scratch, as if from a cold-boot.
12139 */
12140static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
12141{
12142 struct net_device *dev = pci_get_drvdata(pdev);
12143 struct bnx2x *bp = netdev_priv(dev);
12144
12145 rtnl_lock();
12146
12147 if (pci_enable_device(pdev)) {
12148 dev_err(&pdev->dev,
12149 "Cannot re-enable PCI device after reset\n");
12150 rtnl_unlock();
12151 return PCI_ERS_RESULT_DISCONNECT;
12152 }
12153
12154 pci_set_master(pdev);
12155 pci_restore_state(pdev);
12156
12157 if (netif_running(dev))
12158 bnx2x_set_power_state(bp, PCI_D0);
12159
12160 rtnl_unlock();
12161
12162 return PCI_ERS_RESULT_RECOVERED;
12163}
12164
12165/**
12166 * bnx2x_io_resume - called when traffic can start flowing again
12167 * @pdev: Pointer to PCI device
12168 *
12169 * This callback is called when the error recovery driver tells us that
12170 * its OK to resume normal operation.
12171 */
12172static void bnx2x_io_resume(struct pci_dev *pdev)
12173{
12174 struct net_device *dev = pci_get_drvdata(pdev);
12175 struct bnx2x *bp = netdev_priv(dev);
12176
12177 rtnl_lock();
12178
f8ef6e44
YG
12179 bnx2x_eeh_recover(bp);
12180
493adb1f 12181 if (netif_running(dev))
f8ef6e44 12182 bnx2x_nic_load(bp, LOAD_NORMAL);
493adb1f
WX
12183
12184 netif_device_attach(dev);
12185
12186 rtnl_unlock();
12187}
12188
12189static struct pci_error_handlers bnx2x_err_handler = {
12190 .error_detected = bnx2x_io_error_detected,
356e2385
EG
12191 .slot_reset = bnx2x_io_slot_reset,
12192 .resume = bnx2x_io_resume,
493adb1f
WX
12193};
12194
a2fbb9ea 12195static struct pci_driver bnx2x_pci_driver = {
493adb1f
WX
12196 .name = DRV_MODULE_NAME,
12197 .id_table = bnx2x_pci_tbl,
12198 .probe = bnx2x_init_one,
12199 .remove = __devexit_p(bnx2x_remove_one),
12200 .suspend = bnx2x_suspend,
12201 .resume = bnx2x_resume,
12202 .err_handler = &bnx2x_err_handler,
a2fbb9ea
ET
12203};
12204
12205static int __init bnx2x_init(void)
12206{
dd21ca6d
SG
12207 int ret;
12208
1cf167f2
EG
12209 bnx2x_wq = create_singlethread_workqueue("bnx2x");
12210 if (bnx2x_wq == NULL) {
12211 printk(KERN_ERR PFX "Cannot create workqueue\n");
12212 return -ENOMEM;
12213 }
12214
dd21ca6d
SG
12215 ret = pci_register_driver(&bnx2x_pci_driver);
12216 if (ret) {
12217 printk(KERN_ERR PFX "Cannot register driver\n");
12218 destroy_workqueue(bnx2x_wq);
12219 }
12220 return ret;
a2fbb9ea
ET
12221}
12222
12223static void __exit bnx2x_cleanup(void)
12224{
12225 pci_unregister_driver(&bnx2x_pci_driver);
1cf167f2
EG
12226
12227 destroy_workqueue(bnx2x_wq);
a2fbb9ea
ET
12228}
12229
12230module_init(bnx2x_init);
12231module_exit(bnx2x_cleanup);
12232
94a78b79 12233
This page took 1.013818 seconds and 5 git commands to generate.